diff --git a/Cargo.lock b/Cargo.lock index 6a2260b3b4841e46daa51b4d2d5fdb2444374f62..7bddc07d24e47b4d8a1be6b3a2bf46972deb133a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1344,6 +1344,16 @@ dependencies = [ "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "impl-codec" version = "0.4.0" @@ -1421,24 +1431,25 @@ dependencies = [ [[package]] name = "jsonrpc-client-transports" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "websocket 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-core" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1450,15 +1461,15 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-derive" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-crate 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1469,12 +1480,12 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1483,10 +1494,10 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1494,12 +1505,12 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1510,15 +1521,15 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2298,7 +2309,7 @@ dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-executor 2.0.0", "node-primitives 2.0.0", @@ -2388,10 +2399,10 @@ name = "node-rpc" version = "2.0.0" dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "node-runtime 2.0.0", @@ -2412,7 +2423,7 @@ dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "substrate-rpc 2.0.0", @@ -2934,6 +2945,11 @@ name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pin-utils" version = "0.1.0-alpha.4" @@ -4769,7 +4785,6 @@ dependencies = [ "substrate-keystore 2.0.0", "substrate-network 2.0.0", "substrate-primitives 2.0.0", - "substrate-service 2.0.0", "substrate-telemetry 2.0.0", "substrate-test-runtime-client 2.0.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4975,10 +4990,10 @@ dependencies = [ "derive_more 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5004,10 +5019,10 @@ dependencies = [ name = "substrate-rpc-servers" version = "2.0.0" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -5746,7 +5761,7 @@ name = "twox-hash" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5845,6 +5860,16 @@ dependencies = [ "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "url" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "utf8-ranges" version = "1.0.3" @@ -6157,7 +6182,7 @@ dependencies = [ [[package]] name = "ws" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6169,7 +6194,7 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -6389,6 +6414,7 @@ dependencies = [ "checksum hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)" = "7cb44cbce9d8ee4fb36e4c0ad7b794ac44ebaad924b9c8291a63215bb44c2c8f" "checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" "checksum impl-codec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "78c441b3d2b5e24b407161e76d482b7bbd29b5da357707839ac40d95152f031f" "checksum impl-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5158079de9d4158e0ce1de3ae0bd7be03904efc40b3d7dd8b8c301cbf6b52b56" "checksum impl-serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d26be4b97d738552ea423f76c4f681012ff06c3fa36fa968656b3679f60b4a1" @@ -6400,14 +6426,14 @@ dependencies = [ "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" "checksum js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "da3ea71161651a4cd97d999b2da139109c537b15ab33abc8ae4ead38deac8a03" -"checksum jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0bb6fd4acf48d1f17eb7b0e27ab7043c16f063ad0aa7020ec92a431648286c2f" -"checksum jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34d379861584fe4e3678f6ae9ee60b41726df2989578c1dc0f90190dfc92dbe0" -"checksum jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6b0a3dc76953d88cdb47f5fe4ae21abcabc8d7edf4951ebce42db5c722d6698" -"checksum jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9e2d4475549bc0126690788ed5107573c8917f97db5298f0043fb73d46fc498" -"checksum jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad55e8dd67c2c5b16436738b0baf319a6b353feba7401dbc1508a0bd8bd451f" -"checksum jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "583f5930821dbc043236fe5d672d496ead7ff83d21351146598386c66fe8722a" -"checksum jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "04f18ca34046c249751fe90428e77e9570beaa03b33a108e74418a586063d07d" -"checksum jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aee1265de937bd53ad0fc95ff5817314922ce009fa99a04a09fdf449b140ddf6" +"checksum jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "39577db48b004cffb4c5b8e5c9b993c177c52599ecbee88711e815acf65144db" +"checksum jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd42951eb35079520ee29b7efbac654d85821b397ef88c8151600ef7e2d00217" +"checksum jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f047c10738edee7c3c6acf5241a0ce33df32ef9230c1a7fb03e4a77ee72c992f" +"checksum jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29f9149f785deaae92a4c834a9a1a83a4313b8cfedccf15362cd4cf039a64501" +"checksum jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4edd28922653d79e4f6c0f5d0a1034a4edbc5f9cf6cad8ec85e2a685713e3708" +"checksum jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c08b444cc0ed70263798834343d0ac875e664257df8079160f23ac1ea79446" +"checksum jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44561bfdd31401bad790527f1e951dde144f2341ddc3e1b859d32945e1a34eff" +"checksum jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d230ff76a8e4a3fb068aab6ba23d0c4e7d6e3b41bca524daa33988b04b065265" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" @@ -6512,6 +6538,7 @@ dependencies = [ "checksum pbkdf2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c1d2cfa5a714db3b5f24f0915e74fcdf91d09d496ba61329705dda7774d2af" "checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" @@ -6666,6 +6693,7 @@ dependencies = [ "checksum unsigned-varint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2c64cdf40b4a9645534a943668681bcb219faf51874d4b65d2e0abda1b10a2ab" "checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" "checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" "checksum utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d50aa7650df78abf942826607c62468ce18d9019673d4a2ebe1865dbb96ffde" "checksum vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "33dd455d0f96e90a75803cfeb7f948768c08d70a6de9a8d2362461935698bf95" "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" @@ -6700,7 +6728,7 @@ dependencies = [ "checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" -"checksum ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ec91ea61b83ce033c43c06c52ddc7532f465c0153281610d44c58b74083aee1a" +"checksum ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a6f5bb86663ff4d1639408410f50bf6050367a8525d644d49a6894cd618a631" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" "checksum xdg 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" diff --git a/core/cli/src/informant.rs b/core/cli/src/informant.rs index b5a2f03d795461a75e2ad0f02084875fed395bcb..52a5f67c26d595e39a7a7f24f20513b1fb6432cd 100644 --- a/core/cli/src/informant.rs +++ b/core/cli/src/informant.rs @@ -21,22 +21,12 @@ use futures::{Future, Stream}; use futures03::{StreamExt as _, TryStreamExt as _}; use log::{info, warn}; use sr_primitives::{generic::BlockId, traits::Header}; -use service::{Service, Components}; -use tokio::runtime::TaskExecutor; +use service::AbstractService; mod display; -/// Spawn informant on the event loop -#[deprecated(note = "Please use informant::build instead, and then create the task manually")] -pub fn start<C>(service: &Service<C>, exit: ::exit_future::Exit, handle: TaskExecutor) where - C: Components, -{ - handle.spawn(exit.until(build(service)).map(|_| ())); -} - /// Creates an informant in the form of a `Future` that must be polled regularly. -pub fn build<C>(service: &Service<C>) -> impl Future<Item = (), Error = ()> -where C: Components { +pub fn build(service: &impl AbstractService) -> impl Future<Item = (), Error = ()> { let client = service.client(); let mut display = display::InformantDisplay::new(); diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index ef5290413166d6a247d53f7ecf9c39f4b798b7ff..6e9955ca1a48b6e929fd793fa5d9664108c5a91a 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -29,8 +29,8 @@ pub mod informant; use client::ExecutionStrategies; use service::{ config::Configuration, - ServiceFactory, FactoryFullConfiguration, RuntimeGenesis, - FactoryGenesis, PruningMode, ChainSpec, + ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert, + RuntimeGenesis, PruningMode, ChainSpec, }; use network::{ self, multiaddr::Protocol, @@ -317,13 +317,17 @@ pub struct ParseAndPrepareExport<'a> { impl<'a> ParseAndPrepareExport<'a> { /// Runs the command and exports from the chain. - pub fn run<F, S, E>( + pub fn run_with_builder<C, G, F, B, S, E>( self, + builder: F, spec_factory: S, exit: E, ) -> error::Result<()> - where S: FnOnce(&str) -> Result<Option<ChainSpec<FactoryGenesis<F>>>, String>, - F: ServiceFactory, + where S: FnOnce(&str) -> Result<Option<ChainSpec<G>>, String>, + F: FnOnce(Configuration<C, G>) -> Result<B, error::Error>, + B: ServiceBuilderExport, + C: Default, + G: RuntimeGenesis, E: IntoExit { let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; @@ -338,9 +342,8 @@ impl<'a> ParseAndPrepareExport<'a> { None => Box::new(stdout()), }; - service::chain_ops::export_blocks::<F, _, _>( - config, exit.into_exit(), file, from.into(), to.map(Into::into), json - ).map_err(Into::into) + builder(config)?.export_blocks(exit.into_exit(), file, from.into(), to.map(Into::into), json)?; + Ok(()) } } @@ -352,13 +355,17 @@ pub struct ParseAndPrepareImport<'a> { impl<'a> ParseAndPrepareImport<'a> { /// Runs the command and imports to the chain. - pub fn run<F, S, E>( + pub fn run_with_builder<C, G, F, B, S, E>( self, + builder: F, spec_factory: S, exit: E, ) -> error::Result<()> - where S: FnOnce(&str) -> Result<Option<ChainSpec<FactoryGenesis<F>>>, String>, - F: ServiceFactory, + where S: FnOnce(&str) -> Result<Option<ChainSpec<G>>, String>, + F: FnOnce(Configuration<C, G>) -> Result<B, error::Error>, + B: ServiceBuilderImport, + C: Default, + G: RuntimeGenesis, E: IntoExit { let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; @@ -377,7 +384,7 @@ impl<'a> ParseAndPrepareImport<'a> { }, }; - let fut = service::chain_ops::import_blocks::<F, _, _>(config, exit.into_exit(), file)?; + let fut = builder(config)?.import_blocks(exit.into_exit(), file)?; tokio::run(fut); Ok(()) } @@ -440,67 +447,23 @@ pub struct ParseAndPrepareRevert<'a> { impl<'a> ParseAndPrepareRevert<'a> { /// Runs the command and reverts the chain. - pub fn run<F, S>( + pub fn run_with_builder<C, G, F, B, S>( self, + builder: F, spec_factory: S ) -> error::Result<()> - where S: FnOnce(&str) -> Result<Option<ChainSpec<FactoryGenesis<F>>>, String>, - F: ServiceFactory { + where S: FnOnce(&str) -> Result<Option<ChainSpec<G>>, String>, + F: FnOnce(Configuration<C, G>) -> Result<B, error::Error>, + B: ServiceBuilderRevert, + C: Default, + G: RuntimeGenesis { let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; let blocks = self.params.num; - Ok(service::chain_ops::revert_chain::<F>(config, blocks.into())?) + builder(config)?.revert_chain(blocks.into())?; + Ok(()) } } -/// Parse command line interface arguments and executes the desired command. -/// -/// # Return value -/// -/// A result that indicates if any error occurred. -/// If no error occurred and a custom subcommand was found, the subcommand is returned. -/// The user needs to handle this subcommand on its own. -/// -/// # Remarks -/// -/// `CC` is a custom subcommand. This needs to be an `enum`! If no custom subcommand is required, -/// `NoCustom` can be used as type here. -/// `RP` are custom parameters for the run command. This needs to be a `struct`! The custom -/// parameters are visible to the user as if they were normal run command parameters. If no custom -/// parameters are required, `NoCustom` can be used as type here. -#[deprecated( - note = "Use parse_and_prepare instead; see the source code of parse_and_execute for how to transition" -)] -pub fn parse_and_execute<'a, F, CC, RP, S, RS, E, I, T>( - spec_factory: S, - version: &VersionInfo, - impl_name: &'static str, - args: I, - exit: E, - run_service: RS, -) -> error::Result<Option<CC>> -where - F: ServiceFactory, - S: FnOnce(&str) -> Result<Option<ChainSpec<FactoryGenesis<F>>>, String>, - CC: StructOpt + Clone + GetLogFilter, - RP: StructOpt + Clone + AugmentClap, - E: IntoExit, - RS: FnOnce(E, RunCmd, RP, FactoryFullConfiguration<F>) -> Result<(), String>, - I: IntoIterator<Item = T>, - T: Into<std::ffi::OsString> + Clone, -{ - match parse_and_prepare::<CC, RP, _>(version, impl_name, args) { - ParseAndPrepare::Run(cmd) => cmd.run(spec_factory, exit, run_service), - ParseAndPrepare::BuildSpec(cmd) => cmd.run(spec_factory), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::<F, _, _>(spec_factory, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::<F, _, _>(spec_factory, exit), - ParseAndPrepare::PurgeChain(cmd) => cmd.run(spec_factory), - ParseAndPrepare::RevertChain(cmd) => cmd.run::<F, _>(spec_factory), - ParseAndPrepare::CustomCommand(cmd) => return Ok(Some(cmd)) - }?; - - Ok(None) -} - /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. fn node_key_config<P>(params: NodeKeyParams, net_config_dir: &Option<P>) diff --git a/core/finality-grandpa/Cargo.toml b/core/finality-grandpa/Cargo.toml index 22237c5a0b5a0f886fe2853604270627b272cf79..393ee45db5776afce900c811e89eb92626838e22 100644 --- a/core/finality-grandpa/Cargo.toml +++ b/core/finality-grandpa/Cargo.toml @@ -23,7 +23,6 @@ serde_json = "1.0" client = { package = "substrate-client", path = "../client" } inherents = { package = "substrate-inherents", path = "../../core/inherents" } network = { package = "substrate-network", path = "../network" } -service = { package = "substrate-service", path = "../service", optional = true } srml-finality-tracker = { path = "../../srml/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "primitives" } grandpa = { package = "finality-grandpa", version = "0.9.0", features = ["derive-codec"] } @@ -37,7 +36,3 @@ babe_primitives = { package = "substrate-consensus-babe-primitives", path = "../ env_logger = "0.6" tokio = "0.1.17" tempfile = "3.1" - -[features] -default = ["service-integration"] -service-integration = ["service"] diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index b79b120e3571457fd2d5c13e6b600a99ec3537c6..d6f4d768472a372794dd1c19d5bfa255061a1c79 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -93,10 +93,6 @@ mod light_import; mod observer; mod until_imported; -#[cfg(feature="service-integration")] -mod service_integration; -#[cfg(feature="service-integration")] -pub use service_integration::{LinkHalfForService, BlockImportForService, BlockImportForLightService}; pub use communication::Network; pub use finality_proof::FinalityProofProvider; pub use light_import::light_block_import; @@ -107,7 +103,6 @@ use environment::{Environment, VoterSetState}; use import::GrandpaBlockImport; use until_imported::UntilGlobalMessageBlocksImported; use communication::NetworkBridge; -use service::TelemetryOnConnect; use fg_primitives::{AuthoritySignature, SetId, AuthorityWeight}; // Re-export these two because it's just so damn convenient. @@ -484,7 +479,7 @@ pub struct GrandpaParams<B, E, Block: BlockT<Hash=H256>, N, RA, SC, X> { /// Handle to a future that will resolve on exit. pub on_exit: X, /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option<TelemetryOnConnect>, + pub telemetry_on_connect: Option<mpsc::UnboundedReceiver<()>>, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a @@ -531,7 +526,7 @@ pub fn run_grandpa_voter<B, E, Block: BlockT<Hash=H256>, N, RA, SC, X>( let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { let authorities = persistent_data.authority_set.clone(); - let events = telemetry_on_connect.telemetry_connection_sinks + let events = telemetry_on_connect .for_each(move |_| { telemetry!(CONSENSUS_INFO; "afg.authority_set"; "authority_set_id" => ?authorities.set_id(), diff --git a/core/finality-grandpa/src/service_integration.rs b/core/finality-grandpa/src/service_integration.rs deleted file mode 100644 index 9f19b9204190bdcc16f3766db3c20a400ed40331..0000000000000000000000000000000000000000 --- a/core/finality-grandpa/src/service_integration.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see <http://www.gnu.org/licenses/>. - -/// Integrate grandpa finality with substrate service - -use client; -use service::{FullBackend, FullExecutor, LightBackend, LightExecutor, ServiceFactory}; - -pub type BlockImportForService<F> = crate::GrandpaBlockImport< - FullBackend<F>, - FullExecutor<F>, - <F as ServiceFactory>::Block, - <F as ServiceFactory>::RuntimeApi, - client::Client< - FullBackend<F>, - FullExecutor<F>, - <F as ServiceFactory>::Block, - <F as ServiceFactory>::RuntimeApi - >, - <F as ServiceFactory>::SelectChain, ->; - -pub type LinkHalfForService<F> = crate::LinkHalf< - FullBackend<F>, - FullExecutor<F>, - <F as ServiceFactory>::Block, - <F as ServiceFactory>::RuntimeApi, - <F as ServiceFactory>::SelectChain ->; - -pub type BlockImportForLightService<F> = crate::light_import::GrandpaLightBlockImport< - LightBackend<F>, - LightExecutor<F>, - <F as ServiceFactory>::Block, - <F as ServiceFactory>::RuntimeApi, ->; diff --git a/core/rpc-servers/Cargo.toml b/core/rpc-servers/Cargo.toml index 54a4b68eab9bf877848fb65db643da19d8ef1ae8..d4befd52e9f7a9c7f089d3f4192898e2664aa554 100644 --- a/core/rpc-servers/Cargo.toml +++ b/core/rpc-servers/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Parity Technologies <admin@parity.io>"] edition = "2018" [dependencies] -jsonrpc-core = "13.0.0" -pubsub = { package = "jsonrpc-pubsub", version = "13.0.0" } +jsonrpc-core = "13.1.0" +pubsub = { package = "jsonrpc-pubsub", version = "13.1.0" } log = "0.4" serde = "1.0" sr-primitives = { path = "../sr-primitives" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "13.0.0" } -ws = { package = "jsonrpc-ws-server", version = "13.0.0" } +http = { package = "jsonrpc-http-server", version = "13.1.0" } +ws = { package = "jsonrpc-ws-server", version = "13.1.0" } diff --git a/core/rpc/Cargo.toml b/core/rpc/Cargo.toml index 0a9cf108c9ede0857c656c4afe9107fb05636c41..f35408c7b3c9c60b61b84e0f4b920a65aedd5014 100644 --- a/core/rpc/Cargo.toml +++ b/core/rpc/Cargo.toml @@ -8,10 +8,10 @@ edition = "2018" derive_more = "0.14.0" futures = "0.1" futures03 = { package = "futures-preview", version = "0.3.0-alpha.17", features = ["compat"] } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-pubsub = "13.0.0" -jsonrpc-derive = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-pubsub = "13.1.0" +jsonrpc-derive = "13.1.0" log = "0.4" parking_lot = "0.9.0" codec = { package = "parity-scale-codec", version = "1.0.0" } diff --git a/core/service/src/builder.rs b/core/service/src/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..3b079e549d8cdddb42c97a85a21ae1fd178085e9 --- /dev/null +++ b/core/service/src/builder.rs @@ -0,0 +1,809 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see <http://www.gnu.org/licenses/>. + +use crate::{NewService, NetworkStatus, NetworkState, error::{self, Error}, DEFAULT_PROTOCOL_ID}; +use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, TransactionPoolAdapter}; +use crate::TaskExecutor; +use crate::config::Configuration; +use client::{BlockchainEvents, Client, runtime_api}; +use codec::{Decode, Encode, IoReader}; +use consensus_common::import_queue::ImportQueue; +use futures::{prelude::*, sync::mpsc}; +use futures03::{FutureExt as _, compat::Compat, StreamExt as _, TryStreamExt as _}; +use keystore::{Store as Keystore, KeyStorePtr}; +use log::{info, warn}; +use network::{FinalityProofProvider, OnDemand, NetworkService, NetworkStateInfo}; +use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpecialization}; +use parking_lot::{Mutex, RwLock}; +use primitives::{Blake2Hasher, H256, Hasher}; +use rpc::{self, system::SystemInfo}; +use sr_primitives::{BuildStorage, generic::BlockId}; +use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, NumberFor, One, Zero, Header, SaturatedConversion}; +use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; +use serde::{Serialize, de::DeserializeOwned}; +use std::{io::{Read, Write, Seek}, marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; +use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; +use tel::{telemetry, SUBSTRATE_INFO}; +use transaction_pool::txpool::{self, ChainApi, Pool as TransactionPool}; + +/// Aggregator for the components required to build a service. +/// +/// # Usage +/// +/// Call [`ServiceBuilder::new_full`] or [`ServiceBuilder::new_light`], then call the various +/// `with_` methods to add the required components that you built yourself: +/// +/// - [`with_select_chain`](ServiceBuilder::with_select_chain) +/// - [`with_import_queue`](ServiceBuilder::with_import_queue) +/// - [`with_network_protocol`](ServiceBuilder::with_network_protocol) +/// - [`with_finality_proof_provider`](ServiceBuilder::with_finality_proof_provider) +/// - [`with_transaction_pool`](ServiceBuilder::with_transaction_pool) +/// +/// After this is done, call [`build`](ServiceBuilder::build) to construct the service. +/// +/// The order in which the `with_*` methods are called doesn't matter, as the correct binding of +/// generics is done when you call `build`. +/// +pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> { + config: Configuration<TCfg, TGen>, + client: Arc<TCl>, + keystore: Arc<RwLock<Keystore>>, + fetcher: Option<TFchr>, + select_chain: Option<TSc>, + import_queue: TImpQu, + finality_proof_request_builder: Option<TFprb>, + finality_proof_provider: Option<TFpp>, + network_protocol: TNetP, + transaction_pool: Arc<TExPool>, + rpc_extensions: TRpc, + marker: PhantomData<(TBl, TRtApi)>, +} + +impl<TCfg, TGen> ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), (), ()> +where TGen: Serialize + DeserializeOwned + BuildStorage { + /// Start the service builder with a configuration. + pub fn new_full<TBl: BlockT<Hash=H256>, TRtApi, TExecDisp: NativeExecutionDispatch>( + config: Configuration<TCfg, TGen> + ) -> Result<ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + Client< + client_db::Backend<TBl>, + client::LocalCallExecutor<client_db::Backend<TBl>, NativeExecutor<TExecDisp>>, + TBl, + TRtApi + >, + Arc<OnDemand<TBl>>, + (), + (), + BoxFinalityProofRequestBuilder<TBl>, + (), + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: None, + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::<TExecDisp>::new(config.default_heap_pages); + + let client = Arc::new(client_db::new_client( + db_settings, + executor, + &config.chain_spec, + config.execution_strategies.clone(), + Some(keystore.clone()), + )?); + + Ok(ServiceBuilder { + config, + client, + keystore, + fetcher: None, + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + marker: PhantomData, + }) + } + + /// Start the service builder with a configuration. + pub fn new_light<TBl: BlockT<Hash=H256>, TRtApi, TExecDisp: NativeExecutionDispatch + 'static>( + config: Configuration<TCfg, TGen> + ) -> Result<ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + Client< + client::light::backend::Backend<client_db::light::LightStorage<TBl>, network::OnDemand<TBl>, Blake2Hasher>, + client::light::call_executor::RemoteOrLocalCallExecutor< + TBl, + client::light::backend::Backend< + client_db::light::LightStorage<TBl>, + network::OnDemand<TBl>, + Blake2Hasher + >, + client::light::call_executor::RemoteCallExecutor< + client::light::blockchain::Blockchain< + client_db::light::LightStorage<TBl>, + network::OnDemand<TBl> + >, + network::OnDemand<TBl>, + >, + client::LocalCallExecutor< + client::light::backend::Backend< + client_db::light::LightStorage<TBl>, + network::OnDemand<TBl>, + Blake2Hasher + >, + NativeExecutor<TExecDisp> + > + >, + TBl, + TRtApi + >, + Arc<OnDemand<TBl>>, + (), + (), + BoxFinalityProofRequestBuilder<TBl>, + (), + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: config.database_cache_size.map(|u| u as usize), + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::<TExecDisp>::new(config.default_heap_pages); + + let db_storage = client_db::light::LightStorage::new(db_settings)?; + let light_blockchain = client::light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor.clone())); + let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); + let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); + let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; + + Ok(ServiceBuilder { + config, + client: Arc::new(client), + keystore, + fetcher: Some(fetcher), + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + marker: PhantomData, + }) + } +} + +impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> + ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> { + + /// Returns a reference to the client that was stored in this builder. + pub fn client(&self) -> &Arc<TCl> { + &self.client + } + + /// Returns a reference to the select-chain that was stored in this builder. + pub fn select_chain(&self) -> Option<&TSc> { + self.select_chain.as_ref() + } + + /// Defines which head-of-chain strategy to use. + pub fn with_opt_select_chain<USc>( + mut self, + select_chain_builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>) -> Result<Option<USc>, Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, USc, TImpQu, TFprb, TFpp, + TNetP, TExPool, TRpc>, Error> { + let select_chain = select_chain_builder(&mut self.config, self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which head-of-chain strategy to use. + pub fn with_select_chain<USc>( + self, + builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>) -> Result<USc, Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, USc, TImpQu, TFprb, TFpp, + TNetP, TExPool, TRpc>, Error> { + self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue<UImpQu>( + mut self, + builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>) + -> Result<UImpQu, Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, TFprb, TFpp, + TNetP, TExPool, TRpc>, Error> + where TSc: Clone { + let import_queue = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which network specialization protocol to use. + pub fn with_network_protocol<UNetP>( + self, + network_protocol_builder: impl FnOnce(&Configuration<TCfg, TGen>) -> Result<UNetP, Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, + UNetP, TExPool, TRpc>, Error> { + let network_protocol = network_protocol_builder(&self.config)?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_opt_finality_proof_provider( + self, + builder: impl FnOnce(Arc<TCl>) -> Result<Option<Arc<FinalityProofProvider<TBl>>>, Error> + ) -> Result<ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + TCl, + TFchr, + TSc, + TImpQu, + TFprb, + Arc<FinalityProofProvider<TBl>>, + TNetP, + TExPool, + TRpc + >, Error> { + let finality_proof_provider = builder(self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_finality_proof_provider( + self, + build: impl FnOnce(Arc<TCl>) -> Result<Arc<FinalityProofProvider<TBl>>, Error> + ) -> Result<ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + TCl, + TFchr, + TSc, + TImpQu, + TFprb, + Arc<FinalityProofProvider<TBl>>, + TNetP, + TExPool, + TRpc + >, Error> { + self.with_opt_finality_proof_provider(|client| build(client).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_opt_fprb<UImpQu, UFprb>( + mut self, + builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>) + -> Result<(UImpQu, Option<UFprb>), Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, UFprb, TFpp, + TNetP, TExPool, TRpc>, Error> + where TSc: Clone { + let (import_queue, fprb) = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: fprb, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_fprb<UImpQu, UFprb>( + self, + builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>) + -> Result<(UImpQu, UFprb), Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, UFprb, TFpp, + TNetP, TExPool, TRpc>, Error> + where TSc: Clone { + self.with_import_queue_and_opt_fprb(|cfg, cl, sc, tx| builder(cfg, cl, sc, tx).map(|(q, f)| (q, Some(f)))) + } + + /// Defines which transaction pool to use. + pub fn with_transaction_pool<UExPool>( + self, + transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc<TCl>) -> Result<UExPool, Error> + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, + TNetP, UExPool, TRpc>, Error> { + let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: Arc::new(transaction_pool), + rpc_extensions: self.rpc_extensions, + marker: self.marker, + }) + } + + /// Defines the RPC extensions to use. + pub fn with_rpc_extensions<URpc>( + self, + rpc_ext_builder: impl FnOnce(Arc<TCl>, Arc<TExPool>) -> URpc + ) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, + TNetP, TExPool, URpc>, Error> { + let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone()); + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions, + marker: self.marker, + }) + } +} + +/// Implemented on `ServiceBuilder`. Allows importing blocks once you have given all the required +/// components to the builder. +pub trait ServiceBuilderImport { + /// Starts the process of importing blocks. + fn import_blocks( + self, + exit: impl Future<Item=(),Error=()> + Send + 'static, + input: impl Read + Seek, + ) -> Result<Box<dyn Future<Item = (), Error = ()> + Send>, Error>; +} + +/// Implemented on `ServiceBuilder`. Allows exporting blocks once you have given all the required +/// components to the builder. +pub trait ServiceBuilderExport { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs the blocks export. + fn export_blocks( + &self, + exit: impl Future<Item=(),Error=()> + Send + 'static, + output: impl Write, + from: NumberFor<Self::Block>, + to: Option<NumberFor<Self::Block>>, + json: bool + ) -> Result<(), Error>; +} + +/// Implemented on `ServiceBuilder`. Allows reverting the chain once you have given all the +/// required components to the builder. +pub trait ServiceBuilderRevert { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs a revert of `blocks` bocks. + fn revert_chain( + &self, + blocks: NumberFor<Self::Block> + ) -> Result<(), Error>; +} + +impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> + ServiceBuilderImport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send, + TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone, + TImpQu: 'static + ImportQueue<TBl>, + TRtApi: 'static + Send + Sync, +{ + fn import_blocks( + self, + exit: impl Future<Item=(),Error=()> + Send + 'static, + input: impl Read + Seek, + ) -> Result<Box<dyn Future<Item = (), Error = ()> + Send>, Error> { + let client = self.client; + let mut queue = self.import_queue; + import_blocks!(TBl, client, queue, exit, input) + .map(|f| Box::new(f) as Box<_>) + } +} + +impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> + ServiceBuilderExport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send, + TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone +{ + type Block = TBl; + + fn export_blocks( + &self, + exit: impl Future<Item=(),Error=()> + Send + 'static, + mut output: impl Write, + from: NumberFor<TBl>, + to: Option<NumberFor<TBl>>, + json: bool + ) -> Result<(), Error> { + let client = &self.client; + export_blocks!(client, exit, output, from, to, json) + } +} + +impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> + ServiceBuilderRevert for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +where + TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send, + TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone +{ + type Block = TBl; + + fn revert_chain( + &self, + blocks: NumberFor<TBl> + ) -> Result<(), Error> { + let client = &self.client; + revert_chain!(client, blocks) + } +} + +impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TSc, TImpQu, TNetP, TExPoolApi, TRpc> +ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + Client<TBackend, TExec, TBl, TRtApi>, + Arc<OnDemand<TBl>>, + TSc, + TImpQu, + BoxFinalityProofRequestBuilder<TBl>, + Arc<FinalityProofProvider<TBl>>, + TNetP, + TransactionPool<TExPoolApi>, + TRpc +> where + Client<TBackend, TExec, TBl, TRtApi>: ProvideRuntimeApi, + <Client<TBackend, TExec, TBl, TRtApi> as ProvideRuntimeApi>::Api: + runtime_api::Metadata<TBl> + + offchain::OffchainWorkerApi<TBl> + + runtime_api::TaggedTransactionQueue<TBl> + + session::SessionKeys<TBl>, + TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + TRtApi: 'static + Send + Sync, + TCfg: Default, + TGen: Serialize + DeserializeOwned + BuildStorage, + TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send, + TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone, + TSc: Clone, + TImpQu: 'static + ImportQueue<TBl>, + TNetP: NetworkSpecialization<TBl>, + TExPoolApi: 'static + ChainApi<Block = TBl, Hash = <TBl as BlockT>::Hash>, + TRpc: rpc::RpcExtension<rpc::Metadata> + Clone, +{ + /// Builds the service. + pub fn build(self) -> Result<NewService< + Configuration<TCfg, TGen>, + TBl, + Client<TBackend, TExec, TBl, TRtApi>, + TSc, + NetworkStatus<TBl>, + NetworkService<TBl, TNetP, <TBl as BlockT>::Hash>, + TransactionPool<TExPoolApi>, + offchain::OffchainWorkers< + Client<TBackend, TExec, TBl, TRtApi>, + TBackend::OffchainStorage, + TBl + >, + >, Error> { + let mut config = self.config; + session::generate_initial_session_keys( + self.client.clone(), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default() + )?; + let ( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + ) = ( + self.client, + self.fetcher, + self.keystore, + self.select_chain, + self.import_queue, + self.finality_proof_request_builder, + self.finality_proof_provider, + self.network_protocol, + self.transaction_pool, + self.rpc_extensions + ); + + new_impl!( + TBl, + config, + move |_| -> Result<_, Error> { + Ok(( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + )) + }, + |h, c, tx| maintain_transaction_pool(h, c, tx), + |n, o, p, ns, v| offchain_workers(n, o, p, ns, v), + |c, ssb, si, te, tp, ext, ks| start_rpc(c, ssb, si, te, tp, ext, ks), + ) + } +} + +pub(crate) fn start_rpc<Api, Backend, Block, Executor, PoolApi>( + client: Arc<Client<Backend, Executor, Block, Api>>, + system_send_back: futures03::channel::mpsc::UnboundedSender<rpc::system::Request<Block>>, + rpc_system_info: SystemInfo, + task_executor: TaskExecutor, + transaction_pool: Arc<TransactionPool<PoolApi>>, + rpc_extensions: impl rpc::RpcExtension<rpc::Metadata>, + keystore: KeyStorePtr, +) -> rpc_servers::RpcHandler<rpc::Metadata> +where + Block: BlockT<Hash = <Blake2Hasher as primitives::Hasher>::Out>, + Backend: client::backend::Backend<Block, Blake2Hasher> + 'static, + Client<Backend, Executor, Block, Api>: ProvideRuntimeApi, + <Client<Backend, Executor, Block, Api> as ProvideRuntimeApi>::Api: + runtime_api::Metadata<Block> + session::SessionKeys<Block>, + Api: Send + Sync + 'static, + Executor: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone + 'static, + PoolApi: txpool::ChainApi<Hash = Block::Hash, Block = Block> + 'static { + use rpc::{chain, state, author, system}; + let subscriptions = rpc::Subscriptions::new(task_executor.clone()); + let chain = chain::Chain::new(client.clone(), subscriptions.clone()); + let state = state::State::new(client.clone(), subscriptions.clone()); + let author = rpc::author::Author::new( + client, + transaction_pool, + subscriptions, + keystore, + ); + let system = system::System::new(rpc_system_info, system_send_back); + + rpc_servers::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions, + )) +} + +pub(crate) fn maintain_transaction_pool<Api, Backend, Block, Executor, PoolApi>( + id: &BlockId<Block>, + client: &Client<Backend, Executor, Block, Api>, + transaction_pool: &TransactionPool<PoolApi>, +) -> error::Result<()> where + Block: BlockT<Hash = <Blake2Hasher as primitives::Hasher>::Out>, + Backend: client::backend::Backend<Block, Blake2Hasher>, + Client<Backend, Executor, Block, Api>: ProvideRuntimeApi, + <Client<Backend, Executor, Block, Api> as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue<Block>, + Executor: client::CallExecutor<Block, Blake2Hasher>, + PoolApi: txpool::ChainApi<Hash = Block::Hash, Block = Block>, +{ + // Avoid calling into runtime if there is nothing to prune from the pool anyway. + if transaction_pool.status().is_empty() { + return Ok(()) + } + + if let Some(block) = client.block(id)? { + let parent_id = BlockId::hash(*block.block.header().parent_hash()); + let extrinsics = block.block.extrinsics(); + transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; + } + + Ok(()) +} + +pub(crate) fn offchain_workers<Api, Backend, Block, Executor, PoolApi>( + number: &NumberFor<Block>, + offchain: &offchain::OffchainWorkers< + Client<Backend, Executor, Block, Api>, + <Backend as client::backend::Backend<Block, Blake2Hasher>>::OffchainStorage, + Block + >, + pool: &Arc<TransactionPool<PoolApi>>, + network_state: &Arc<dyn NetworkStateInfo + Send + Sync>, + is_validator: bool, +) -> error::Result<Box<dyn Future<Item = (), Error = ()> + Send>> +where + Block: BlockT<Hash = <Blake2Hasher as primitives::Hasher>::Out>, + Backend: client::backend::Backend<Block, Blake2Hasher> + 'static, + Api: 'static, + <Backend as client::backend::Backend<Block, Blake2Hasher>>::OffchainStorage: 'static, + Client<Backend, Executor, Block, Api>: ProvideRuntimeApi + Send + Sync, + <Client<Backend, Executor, Block, Api> as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi<Block>, + Executor: client::CallExecutor<Block, Blake2Hasher> + 'static, + PoolApi: txpool::ChainApi<Hash = Block::Hash, Block = Block> + 'static, +{ + let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) + .map(|()| Ok(())); + Ok(Box::new(Compat::new(future))) +} + +#[cfg(test)] +mod tests { + use super::*; + use consensus_common::{BlockOrigin, SelectChain}; + use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; + + #[test] + fn should_remove_transactions_from_the_pool() { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); + + // import the block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + maintain_transaction_pool( + &id, + &client, + &pool, + ).unwrap(); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } +} diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index c801b81186f18cda9495bd6466f13788263b0e10..3a3677798b6add0be34516d7657cdccb871dafff 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -16,44 +16,19 @@ //! Chain utilities. -use std::{self, io::{Read, Write, Seek}}; -use futures::prelude::*; -use futures03::TryFutureExt as _; -use log::{info, warn}; - -use sr_primitives::generic::{SignedBlock, BlockId}; -use sr_primitives::traits::{SaturatedConversion, Zero, One, Block, Header, NumberFor}; -use consensus_common::import_queue::{ImportQueue, IncomingBlock, Link, BlockImportError, BlockImportResult}; -use network::message; - -use consensus_common::BlockOrigin; -use crate::components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis}; -use crate::new_client; -use codec::{Decode, Encode, IoReader}; +use crate::RuntimeGenesis; use crate::error; use crate::chain_spec::ChainSpec; -/// Export a range of blocks to a binary stream. -pub fn export_blocks<F, E, W>( - config: FactoryFullConfiguration<F>, - exit: E, - mut output: W, - from: FactoryBlockNumber<F>, - to: Option<FactoryBlockNumber<F>>, - json: bool -) -> error::Result<()> - where - F: ServiceFactory, - E: Future<Item=(),Error=()> + Send + 'static, - W: Write, -{ - let client = new_client::<F>(&config)?; - let mut block = from; +#[macro_export] +macro_rules! export_blocks { +($client:ident, $exit:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{ + let mut block = $from; - let last = match to { + let last = match $to { Some(v) if v.is_zero() => One::one(), Some(v) => v, - None => client.info().chain.best_number, + None => $client.info().chain.best_number, }; if last < block { @@ -62,28 +37,28 @@ pub fn export_blocks<F, E, W>( let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); info!("Exporting blocks from #{} to #{}", block, last); - if !json { + if !$json { let last_: u64 = last.saturated_into::<u64>(); let block_: u64 = block.saturated_into::<u64>(); let len: u64 = last_ - block_ + 1; - output.write(&len.encode())?; + $output.write(&len.encode())?; } loop { if exit_recv.try_recv().is_ok() { break; } - match client.block(&BlockId::number(block))? { + match $client.block(&BlockId::number(block))? { Some(block) => { - if json { - serde_json::to_writer(&mut output, &block) + if $json { + serde_json::to_writer(&mut $output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; } else { - output.write(&block.encode())?; + $output.write(&block.encode())?; } }, None => break, @@ -97,66 +72,59 @@ pub fn export_blocks<F, E, W>( block += One::one(); } Ok(()) +}} } -struct WaitLink { - imported_blocks: u64, - has_error: bool, -} +#[macro_export] +macro_rules! import_blocks { +($block:ty, $client:ident, $queue:ident, $exit:ident, $input:ident) => {{ + use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult}; + use consensus_common::BlockOrigin; + use network::message; + use sr_primitives::generic::SignedBlock; + use sr_primitives::traits::Block; + use futures03::TryFutureExt as _; + + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } -impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } } } -} -impl<B: Block> Link<B> for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _count: usize, - results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; + impl<B: Block> Link<B> for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _count: usize, + results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } } } } -} - -/// Returns a future that import blocks from a binary stream. -pub fn import_blocks<F, E, R>( - mut config: FactoryFullConfiguration<F>, - exit: E, - input: R -) -> error::Result<impl Future<Item = (), Error = ()>> - where F: ServiceFactory, E: Future<Item=(),Error=()> + Send + 'static, R: Read + Seek, -{ - let client = new_client::<F>(&config)?; - // FIXME #1134 this shouldn't need a mutable config. - let select_chain = components::FullComponents::<F>::build_select_chain(&mut config, client.clone())?; - let (mut queue, _) = components::FullComponents::<F>::build_import_queue( - &mut config, - client.clone(), - select_chain, - None, - )?; let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); - let mut io_reader_input = IoReader(input); + let mut io_reader_input = IoReader($input); let count: u64 = Decode::decode(&mut io_reader_input) .map_err(|e| format!("Error reading file: {}", e))?; info!("Importing {} blocks", count); @@ -165,11 +133,11 @@ pub fn import_blocks<F, E, R>( if exit_recv.try_recv().is_ok() { break; } - match SignedBlock::<F::Block>::decode(&mut io_reader_input) { + match SignedBlock::<$block>::decode(&mut io_reader_input) { Ok(signed) => { let (header, extrinsics) = signed.block.deconstruct(); let hash = header.hash(); - let block = message::BlockData::<F::Block> { + let block = message::BlockData::<$block> { hash, justification: signed.justification, header: Some(header), @@ -178,8 +146,8 @@ pub fn import_blocks<F, E, R>( message_queue: None }; // import queue handles verification and importing it into the client - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock::<F::Block> { + $queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock::<$block> { hash: block.hash, header: block.header, body: block.body, @@ -208,7 +176,7 @@ pub fn import_blocks<F, E, R>( let blocks_before = link.imported_blocks; let _ = futures03::future::poll_fn(|cx| { - queue.poll_actions(cx, &mut link); + $queue.poll_actions(cx, &mut link); std::task::Poll::Pending::<Result<(), ()>> }).compat().poll(); if link.has_error { @@ -226,24 +194,20 @@ pub fn import_blocks<F, E, R>( ); } if link.imported_blocks >= count { - info!("Imported {} blocks. Best: #{}", block_count, client.info().chain.best_number); + info!("Imported {} blocks. Best: #{}", block_count, $client.info().chain.best_number); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } })) +}} } -/// Revert the chain. -pub fn revert_chain<F>( - config: FactoryFullConfiguration<F>, - blocks: FactoryBlockNumber<F> -) -> error::Result<()> - where F: ServiceFactory, -{ - let client = new_client::<F>(&config)?; - let reverted = client.revert(blocks)?; - let info = client.info().chain; +#[macro_export] +macro_rules! revert_chain { +($client:ident, $blocks:ident) => {{ + let reverted = $client.revert($blocks)?; + let info = $client.info().chain; if reverted.is_zero() { info!("There aren't any non-finalized blocks to revert."); @@ -251,6 +215,7 @@ pub fn revert_chain<F>( info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); } Ok(()) +}} } /// Build a chain spec json diff --git a/core/service/src/chain_spec.rs b/core/service/src/chain_spec.rs index 1683876c3f86ff7bd5833a488f0b43c1e1fd04dc..8b35b0bac9581b997ba5a09075bceb9fbbd8bc6f 100644 --- a/core/service/src/chain_spec.rs +++ b/core/service/src/chain_spec.rs @@ -24,7 +24,7 @@ use serde::{Serialize, Deserialize}; use primitives::storage::{StorageKey, StorageData}; use sr_primitives::{BuildStorage, StorageOverlay, ChildrenStorageOverlay}; use serde_json as json; -use crate::components::RuntimeGenesis; +use crate::RuntimeGenesis; use network::Multiaddr; use tel::TelemetryEndpoints; diff --git a/core/service/src/components.rs b/core/service/src/components.rs deleted file mode 100644 index a9aa2129f2498f1d4438fba8cf7badeba3201378..0000000000000000000000000000000000000000 --- a/core/service/src/components.rs +++ /dev/null @@ -1,808 +0,0 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see <http://www.gnu.org/licenses/>. - -//! Substrate service components. - -use std::{sync::Arc, ops::Deref, ops::DerefMut}; -use serde::{Serialize, de::DeserializeOwned}; -use crate::chain_spec::ChainSpec; -use keystore::KeyStorePtr; -use client_db; -use client::{self, Client, runtime_api}; -use crate::{error, Service}; -use consensus_common::{import_queue::ImportQueue, SelectChain}; -use network::{ - self, OnDemand, FinalityProofProvider, NetworkStateInfo, config::BoxFinalityProofRequestBuilder -}; -use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; -use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; -use sr_primitives::{ - BuildStorage, traits::{Block as BlockT, Header as HeaderT, ProvideRuntimeApi}, generic::BlockId -}; -use crate::config::Configuration; -use primitives::{Blake2Hasher, H256, traits::BareCryptoStorePtr}; -use rpc::{self, system::SystemInfo}; -use futures::{prelude::*, future::Executor}; -use futures03::{FutureExt as _, channel::mpsc, compat::Compat}; - -// Type aliases. -// These exist mainly to avoid typing `<F as Factory>::Foo` all over the code. - -/// Network service type for `Components`. -pub type NetworkService<C> = network::NetworkService< - ComponentBlock<C>, - <<C as Components>::Factory as ServiceFactory>::NetworkProtocol, - ComponentExHash<C> ->; - -/// Code executor type for a factory. -pub type CodeExecutor<F> = NativeExecutor<<F as ServiceFactory>::RuntimeDispatch>; - -/// Full client backend type for a factory. -pub type FullBackend<F> = client_db::Backend<<F as ServiceFactory>::Block>; - -/// Full client executor type for a factory. -pub type FullExecutor<F> = client::LocalCallExecutor< - client_db::Backend<<F as ServiceFactory>::Block>, - CodeExecutor<F>, ->; - -/// Light client backend type for a factory. -pub type LightBackend<F> = client::light::backend::Backend< - client_db::light::LightStorage<<F as ServiceFactory>::Block>, - network::OnDemand<<F as ServiceFactory>::Block>, - Blake2Hasher, ->; - -/// Light client executor type for a factory. -pub type LightExecutor<F> = client::light::call_executor::RemoteOrLocalCallExecutor< - <F as ServiceFactory>::Block, - client::light::backend::Backend< - client_db::light::LightStorage<<F as ServiceFactory>::Block>, - network::OnDemand<<F as ServiceFactory>::Block>, - Blake2Hasher - >, - client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain< - client_db::light::LightStorage<<F as ServiceFactory>::Block>, - network::OnDemand<<F as ServiceFactory>::Block> - >, - network::OnDemand<<F as ServiceFactory>::Block>, - >, - client::LocalCallExecutor< - client::light::backend::Backend< - client_db::light::LightStorage<<F as ServiceFactory>::Block>, - network::OnDemand<<F as ServiceFactory>::Block>, - Blake2Hasher - >, - CodeExecutor<F> - > ->; - -/// Full client type for a factory. -pub type FullClient<F> = Client<FullBackend<F>, FullExecutor<F>, <F as ServiceFactory>::Block, <F as ServiceFactory>::RuntimeApi>; - -/// Light client type for a factory. -pub type LightClient<F> = Client<LightBackend<F>, LightExecutor<F>, <F as ServiceFactory>::Block, <F as ServiceFactory>::RuntimeApi>; - -/// `ChainSpec` specialization for a factory. -pub type FactoryChainSpec<F> = ChainSpec<<F as ServiceFactory>::Genesis>; - -/// `Genesis` specialization for a factory. -pub type FactoryGenesis<F> = <F as ServiceFactory>::Genesis; - -/// `Block` type for a factory. -pub type FactoryBlock<F> = <F as ServiceFactory>::Block; - -/// `Extrinsic` type for a factory. -pub type FactoryExtrinsic<F> = <<F as ServiceFactory>::Block as BlockT>::Extrinsic; - -/// `Number` type for a factory. -pub type FactoryBlockNumber<F> = <<FactoryBlock<F> as BlockT>::Header as HeaderT>::Number; - -/// Full `Configuration` type for a factory. -pub type FactoryFullConfiguration<F> = Configuration<<F as ServiceFactory>::Configuration, FactoryGenesis<F>>; - -/// Client type for `Components`. -pub type ComponentClient<C> = Client< - <C as Components>::Backend, - <C as Components>::Executor, - FactoryBlock<<C as Components>::Factory>, - <C as Components>::RuntimeApi, ->; - -/// A offchain workers storage backend type. -pub type ComponentOffchainStorage<C> = < - <C as Components>::Backend as client::backend::Backend<ComponentBlock<C>, Blake2Hasher> ->::OffchainStorage; - -/// Block type for `Components` -pub type ComponentBlock<C> = <<C as Components>::Factory as ServiceFactory>::Block; - -/// Extrinsic hash type for `Components` -pub type ComponentExHash<C> = <<C as Components>::TransactionPoolApi as txpool::ChainApi>::Hash; - -/// Extrinsic type. -pub type ComponentExtrinsic<C> = <ComponentBlock<C> as BlockT>::Extrinsic; - -/// Extrinsic pool API type for `Components`. -pub type PoolApi<C> = <C as Components>::TransactionPoolApi; - -/// A set of traits for the runtime genesis config. -pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} -impl<T: Serialize + DeserializeOwned + BuildStorage> RuntimeGenesis for T {} - -/// A transport-agnostic handler of the RPC queries. -pub type RpcHandler = rpc_servers::RpcHandler<rpc::Metadata>; - -/// Something that can create and store initial session keys from given seeds. -pub trait InitialSessionKeys<C: Components> { - /// Generate the initial session keys for the given seeds and store them in - /// an internal keystore. - fn generate_initial_session_keys( - client: Arc<ComponentClient<C>>, - seeds: Vec<String>, - ) -> error::Result<()>; -} - -impl<C: Components> InitialSessionKeys<Self> for C where - ComponentClient<C>: ProvideRuntimeApi, - <ComponentClient<C> as ProvideRuntimeApi>::Api: session::SessionKeys<ComponentBlock<C>>, -{ - fn generate_initial_session_keys( - client: Arc<ComponentClient<C>>, - seeds: Vec<String>, - ) -> error::Result<()> { - session::generate_initial_session_keys(client, seeds).map_err(Into::into) - } -} - -/// Something that can start the RPC service. -pub trait StartRpc<C: Components> { - fn start_rpc( - client: Arc<ComponentClient<C>>, - system_send_back: mpsc::UnboundedSender<rpc::system::Request<ComponentBlock<C>>>, - system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc<TransactionPool<C::TransactionPoolApi>>, - rpc_extensions: impl rpc::RpcExtension<rpc::Metadata>, - keystore: KeyStorePtr, - ) -> RpcHandler; -} - -impl<C: Components> StartRpc<C> for C where - ComponentClient<C>: ProvideRuntimeApi, - <ComponentClient<C> as ProvideRuntimeApi>::Api: - runtime_api::Metadata<ComponentBlock<C>> + session::SessionKeys<ComponentBlock<C>>, -{ - fn start_rpc( - client: Arc<ComponentClient<C>>, - system_send_back: mpsc::UnboundedSender<rpc::system::Request<ComponentBlock<C>>>, - rpc_system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc<TransactionPool<C::TransactionPoolApi>>, - rpc_extensions: impl rpc::RpcExtension<rpc::Metadata>, - keystore: KeyStorePtr, - ) -> RpcHandler { - use rpc::{chain, state, author, system}; - let subscriptions = rpc::Subscriptions::new(task_executor.clone()); - let chain = chain::Chain::new(client.clone(), subscriptions.clone()); - let state = state::State::new(client.clone(), subscriptions.clone()); - let author = rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - ); - let system = system::System::new(rpc_system_info, system_send_back); - - rpc_servers::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions, - )) - } -} - -/// Something that can maintain transaction pool on every imported block. -pub trait MaintainTransactionPool<C: Components> { - fn maintain_transaction_pool( - id: &BlockId<ComponentBlock<C>>, - client: &ComponentClient<C>, - transaction_pool: &TransactionPool<C::TransactionPoolApi>, - ) -> error::Result<()>; -} - -fn maintain_transaction_pool<Api, Backend, Block, Executor, PoolApi>( - id: &BlockId<Block>, - client: &Client<Backend, Executor, Block, Api>, - transaction_pool: &TransactionPool<PoolApi>, -) -> error::Result<()> where - Block: BlockT<Hash = <Blake2Hasher as primitives::Hasher>::Out>, - Backend: client::backend::Backend<Block, Blake2Hasher>, - Client<Backend, Executor, Block, Api>: ProvideRuntimeApi, - <Client<Backend, Executor, Block, Api> as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue<Block>, - Executor: client::CallExecutor<Block, Blake2Hasher>, - PoolApi: txpool::ChainApi<Hash = Block::Hash, Block = Block>, -{ - // Avoid calling into runtime if there is nothing to prune from the pool anyway. - if transaction_pool.status().is_empty() { - return Ok(()) - } - - if let Some(block) = client.block(id)? { - let parent_id = BlockId::hash(*block.block.header().parent_hash()); - let extrinsics = block.block.extrinsics(); - transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; - } - - Ok(()) -} - -impl<C: Components> MaintainTransactionPool<Self> for C where - ComponentClient<C>: ProvideRuntimeApi, - <ComponentClient<C> as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue<ComponentBlock<C>>, -{ - fn maintain_transaction_pool( - id: &BlockId<ComponentBlock<C>>, - client: &ComponentClient<C>, - transaction_pool: &TransactionPool<C::TransactionPoolApi>, - ) -> error::Result<()> { - maintain_transaction_pool(id, client, transaction_pool) - } -} - -pub trait OffchainWorker<C: Components> { - fn offchain_workers( - number: &FactoryBlockNumber<C::Factory>, - offchain: &offchain::OffchainWorkers< - ComponentClient<C>, - ComponentOffchainStorage<C>, - ComponentBlock<C> - >, - pool: &Arc<TransactionPool<C::TransactionPoolApi>>, - network_state: &Arc<dyn NetworkStateInfo + Send + Sync>, - is_validator: bool, - ) -> error::Result<Box<dyn Future<Item = (), Error = ()> + Send>>; -} - -impl<C: Components> OffchainWorker<Self> for C where - ComponentClient<C>: ProvideRuntimeApi, - <ComponentClient<C> as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi<ComponentBlock<C>>, -{ - fn offchain_workers( - number: &FactoryBlockNumber<C::Factory>, - offchain: &offchain::OffchainWorkers< - ComponentClient<C>, - ComponentOffchainStorage<C>, - ComponentBlock<C> - >, - pool: &Arc<TransactionPool<C::TransactionPoolApi>>, - network_state: &Arc<dyn NetworkStateInfo + Send + Sync>, - is_validator: bool, - ) -> error::Result<Box<dyn Future<Item = (), Error = ()> + Send>> { - let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) - .map(|()| Ok(())); - Ok(Box::new(Compat::new(future))) - } -} - -/// The super trait that combines all required traits a `Service` needs to implement. -pub trait ServiceTrait<C: Components>: - Deref<Target = Service<C>> - + Send - + 'static - + StartRpc<C> - + MaintainTransactionPool<C> - + OffchainWorker<C> - + InitialSessionKeys<C> -{} -impl<C: Components, T> ServiceTrait<C> for T where - T: Deref<Target = Service<C>> - + Send - + 'static - + StartRpc<C> - + MaintainTransactionPool<C> - + OffchainWorker<C> - + InitialSessionKeys<C> -{} - -/// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc<dyn Executor<Box<dyn Future<Item = (), Error = ()> + Send>> + Send + Sync>; - -/// A collection of types and methods to build a service on top of the substrate service. -pub trait ServiceFactory: 'static + Sized { - /// Block type. - type Block: BlockT<Hash=H256>; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// Network protocol extensions. - type NetworkProtocol: network::specialization::NetworkSpecialization<Self::Block>; - /// Chain runtime. - type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; - /// Extrinsic pool backend type for the full client. - type FullTransactionPoolApi: txpool::ChainApi<Hash = <Self::Block as BlockT>::Hash, Block = Self::Block> + Send + 'static; - /// Extrinsic pool backend type for the light client. - type LightTransactionPoolApi: txpool::ChainApi<Hash = <Self::Block as BlockT>::Hash, Block = Self::Block> + 'static; - /// Genesis configuration for the runtime. - type Genesis: RuntimeGenesis; - /// Other configuration for service members. - type Configuration: Default; - /// RPC initialisation. - type RpcExtensions: rpc::RpcExtension<rpc::Metadata>; - /// Extended full service type. - type FullService: ServiceTrait<FullComponents<Self>>; - /// Extended light service type. - type LightService: ServiceTrait<LightComponents<Self>>; - /// ImportQueue for full client - type FullImportQueue: ImportQueue<Self::Block> + 'static; - /// ImportQueue for light clients - type LightImportQueue: ImportQueue<Self::Block> + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain<Self::Block> + 'static; - - //TODO: replace these with a constructor trait. that TransactionPool implements. (#1242) - /// Extrinsic pool constructor for the full client. - fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc<FullClient<Self>>) - -> Result<TransactionPool<Self::FullTransactionPoolApi>, error::Error>; - /// Extrinsic pool constructor for the light client. - fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc<LightClient<Self>>) - -> Result<TransactionPool<Self::LightTransactionPoolApi>, error::Error>; - - /// Build network protocol. - fn build_network_protocol(config: &FactoryFullConfiguration<Self>) - -> Result<Self::NetworkProtocol, error::Error>; - - /// Build finality proof provider for serving network requests on full node. - fn build_finality_proof_provider( - client: Arc<FullClient<Self>> - ) -> Result<Option<Arc<dyn FinalityProofProvider<Self::Block>>>, error::Error>; - - /// Build the Fork Choice algorithm for full client - fn build_select_chain( - config: &mut FactoryFullConfiguration<Self>, - client: Arc<FullClient<Self>>, - ) -> Result<Self::SelectChain, error::Error>; - - /// Build full service. - fn new_full(config: FactoryFullConfiguration<Self>) - -> Result<Self::FullService, error::Error>; - /// Build light service. - fn new_light(config: FactoryFullConfiguration<Self>) - -> Result<Self::LightService, error::Error>; - - /// ImportQueue for a full client - fn build_full_import_queue( - config: &mut FactoryFullConfiguration<Self>, - _client: Arc<FullClient<Self>>, - _select_chain: Self::SelectChain, - _transaction_pool: Option<Arc<TransactionPool<Self::FullTransactionPoolApi>>>, - ) -> Result<Self::FullImportQueue, error::Error> { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// ImportQueue for a light client - fn build_light_import_queue( - config: &mut FactoryFullConfiguration<Self>, - _client: Arc<LightClient<Self>> - ) -> Result<(Self::LightImportQueue, BoxFinalityProofRequestBuilder<Self::Block>), error::Error> { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// Create custom RPC method handlers for full node. - fn build_full_rpc_extensions( - client: Arc<FullClient<Self>>, - transaction_pool: Arc<TransactionPool<Self::FullTransactionPoolApi>>, - ) -> Self::RpcExtensions; - - /// Create custom RPC method handlers for light node. - fn build_light_rpc_extensions( - client: Arc<LightClient<Self>>, - transaction_pool: Arc<TransactionPool<Self::LightTransactionPoolApi>>, - ) -> Self::RpcExtensions; -} - -/// A collection of types and function to generalize over full / light client type. -pub trait Components: Sized + 'static { - /// Associated service factory. - type Factory: ServiceFactory; - /// Client backend. - type Backend: 'static + client::backend::Backend<FactoryBlock<Self::Factory>, Blake2Hasher>; - /// Client executor. - type Executor: 'static + client::CallExecutor<FactoryBlock<Self::Factory>, Blake2Hasher> + Send + Sync + Clone; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// The type that can start all runtime-dependent services. - type RuntimeServices: ServiceTrait<Self>; - /// The type that can extend the RPC methods. - type RpcExtensions: rpc::RpcExtension<rpc::Metadata>; - // TODO: Traitify transaction pool and allow people to implement their own. (#1242) - /// Extrinsic pool type. - type TransactionPoolApi: 'static + txpool::ChainApi< - Hash = <FactoryBlock<Self::Factory> as BlockT>::Hash, - Block = FactoryBlock<Self::Factory> - >; - /// Our Import Queue - type ImportQueue: ImportQueue<FactoryBlock<Self::Factory>> + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain<FactoryBlock<Self::Factory>>; - - /// Create client. - fn build_client( - config: &FactoryFullConfiguration<Self::Factory>, - executor: CodeExecutor<Self::Factory>, - keystore: Option<BareCryptoStorePtr>, - ) -> Result< - ( - Arc<ComponentClient<Self>>, - Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>> - ), - error::Error - >; - - /// Create extrinsic pool. - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc<ComponentClient<Self>>) - -> Result<TransactionPool<Self::TransactionPoolApi>, error::Error>; - - /// Build the queue that imports blocks from the network, and optionally a way for the network - /// to build requests for proofs of finality. - fn build_import_queue( - config: &mut FactoryFullConfiguration<Self::Factory>, - client: Arc<ComponentClient<Self>>, - select_chain: Option<Self::SelectChain>, - _transaction_pool: Option<Arc<TransactionPool<Self::TransactionPoolApi>>>, - ) -> Result<(Self::ImportQueue, Option<BoxFinalityProofRequestBuilder<FactoryBlock<Self::Factory>>>), error::Error>; - - /// Finality proof provider for serving network requests. - fn build_finality_proof_provider( - client: Arc<ComponentClient<Self>> - ) -> Result<Option<Arc<dyn FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error>; - - /// Build fork choice selector - fn build_select_chain( - config: &mut FactoryFullConfiguration<Self::Factory>, - client: Arc<ComponentClient<Self>> - ) -> Result<Option<Self::SelectChain>, error::Error>; - - /// Build RPC extensions - fn build_rpc_extensions( - client: Arc<ComponentClient<Self>>, - transaction_pool: Arc<TransactionPool<Self::TransactionPoolApi>>, - ) -> Self::RpcExtensions; -} - -/// A struct that implement `Components` for the full client. -pub struct FullComponents<Factory: ServiceFactory> { - service: Service<FullComponents<Factory>>, -} - -impl<Factory: ServiceFactory> FullComponents<Factory> { - /// Create new `FullComponents` - pub fn new( - config: FactoryFullConfiguration<Factory> - ) -> Result<Self, error::Error> { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl<Factory: ServiceFactory> Deref for FullComponents<Factory> { - type Target = Service<Self>; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl<Factory: ServiceFactory> DerefMut for FullComponents<Factory> { - fn deref_mut(&mut self) -> &mut Service<Self> { - &mut self.service - } -} - -impl<Factory: ServiceFactory> Future for FullComponents<Factory> { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll<Self::Item, Self::Error> { - self.service.poll() - } -} - -impl<Factory: ServiceFactory> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> -for FullComponents<Factory> { - fn execute( - &self, - future: Box<dyn Future<Item = (), Error = ()> + Send> - ) -> Result<(), futures::future::ExecuteError<Box<dyn Future<Item = (), Error = ()> + Send>>> { - self.service.execute(future) - } -} - -impl<Factory: ServiceFactory> Components for FullComponents<Factory> { - type Factory = Factory; - type Executor = FullExecutor<Factory>; - type Backend = FullBackend<Factory>; - type TransactionPoolApi = <Factory as ServiceFactory>::FullTransactionPoolApi; - type ImportQueue = Factory::FullImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::FullService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration<Factory>, - executor: CodeExecutor<Self::Factory>, - keystore: Option<BareCryptoStorePtr>, - ) -> Result< - (Arc<ComponentClient<Self>>, Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>>), - error::Error, - > - { - let db_settings = client_db::DatabaseSettings { - cache_size: config.database_cache_size.map(|u| u as usize), - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - Ok(( - Arc::new( - client_db::new_client( - db_settings, - executor, - &config.chain_spec, - config.execution_strategies.clone(), - keystore, - )? - ), - None, - )) - } - - fn build_transaction_pool( - config: TransactionPoolOptions, - client: Arc<ComponentClient<Self>> - ) -> Result<TransactionPool<Self::TransactionPoolApi>, error::Error> { - Factory::build_full_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration<Self::Factory>, - client: Arc<ComponentClient<Self>>, - select_chain: Option<Self::SelectChain>, - transaction_pool: Option<Arc<TransactionPool<Self::TransactionPoolApi>>>, - ) -> Result<(Self::ImportQueue, Option<BoxFinalityProofRequestBuilder<FactoryBlock<Self::Factory>>>), error::Error> { - let select_chain = select_chain - .ok_or(error::Error::SelectChainRequired)?; - Factory::build_full_import_queue(config, client, select_chain, transaction_pool) - .map(|queue| (queue, None)) - } - - fn build_select_chain( - config: &mut FactoryFullConfiguration<Self::Factory>, - client: Arc<ComponentClient<Self>> - ) -> Result<Option<Self::SelectChain>, error::Error> { - Self::Factory::build_select_chain(config, client).map(Some) - } - - fn build_finality_proof_provider( - client: Arc<ComponentClient<Self>> - ) -> Result<Option<Arc<dyn FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error> { - Factory::build_finality_proof_provider(client) - } - - fn build_rpc_extensions( - client: Arc<ComponentClient<Self>>, - transaction_pool: Arc<TransactionPool<Self::TransactionPoolApi>>, - ) -> Self::RpcExtensions { - Factory::build_full_rpc_extensions(client, transaction_pool) - } -} - -/// A struct that implement `Components` for the light client. -pub struct LightComponents<Factory: ServiceFactory> { - service: Service<LightComponents<Factory>>, -} - -impl<Factory: ServiceFactory> LightComponents<Factory> { - /// Create new `LightComponents` - pub fn new( - config: FactoryFullConfiguration<Factory>, - ) -> Result<Self, error::Error> { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl<Factory: ServiceFactory> Deref for LightComponents<Factory> { - type Target = Service<Self>; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl<Factory: ServiceFactory> DerefMut for LightComponents<Factory> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.service - } -} - -impl<Factory: ServiceFactory> Future for LightComponents<Factory> { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll<Self::Item, Self::Error> { - self.service.poll() - } -} - -impl<Factory: ServiceFactory> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> -for LightComponents<Factory> { - fn execute( - &self, - future: Box<dyn Future<Item = (), Error = ()> + Send> - ) -> Result<(), futures::future::ExecuteError<Box<dyn Future<Item = (), Error = ()> + Send>>> { - self.service.execute(future) - } -} - -impl<Factory: ServiceFactory> Components for LightComponents<Factory> { - type Factory = Factory; - type Executor = LightExecutor<Factory>; - type Backend = LightBackend<Factory>; - type TransactionPoolApi = <Factory as ServiceFactory>::LightTransactionPoolApi; - type ImportQueue = <Factory as ServiceFactory>::LightImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::LightService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration<Factory>, - executor: CodeExecutor<Self::Factory>, - _: Option<BareCryptoStorePtr>, - ) - -> Result< - ( - Arc<ComponentClient<Self>>, - Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>> - ), error::Error> - { - let db_settings = client_db::DatabaseSettings { - cache_size: None, - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - let db_storage = client_db::light::LightStorage::new(db_settings)?; - let light_blockchain = client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - client::light::new_fetch_checker(light_blockchain.clone(), executor.clone()) - ); - let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); - let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); - let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; - Ok((Arc::new(client), Some(fetcher))) - } - - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc<ComponentClient<Self>>) - -> Result<TransactionPool<Self::TransactionPoolApi>, error::Error> - { - Factory::build_light_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration<Self::Factory>, - client: Arc<ComponentClient<Self>>, - _select_chain: Option<Self::SelectChain>, - _transaction_pool: Option<Arc<TransactionPool<Self::TransactionPoolApi>>>, - ) -> Result<(Self::ImportQueue, Option<BoxFinalityProofRequestBuilder<FactoryBlock<Self::Factory>>>), error::Error> { - Factory::build_light_import_queue(config, client) - .map(|(queue, builder)| (queue, Some(builder))) - } - - fn build_finality_proof_provider( - _client: Arc<ComponentClient<Self>> - ) -> Result<Option<Arc<dyn FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error> { - Ok(None) - } - - fn build_select_chain( - _config: &mut FactoryFullConfiguration<Self::Factory>, - _client: Arc<ComponentClient<Self>> - ) -> Result<Option<Self::SelectChain>, error::Error> { - Ok(None) - } - - fn build_rpc_extensions( - client: Arc<ComponentClient<Self>>, - transaction_pool: Arc<TransactionPool<Self::TransactionPoolApi>>, - ) -> Self::RpcExtensions { - Factory::build_light_rpc_extensions(client, transaction_pool) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use consensus_common::BlockOrigin; - use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; - - #[test] - fn should_remove_transactions_from_the_pool() { - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - let best = longest_chain.best_chain().unwrap(); - - // store the transaction in the pool - pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); - - // import the block - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - ).unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } -} diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 33a42e87fe04a9190790f534bbdd6a5b54c6744c..363ad9cfdada3469bcebfa2dfc3d7d47fc397b5c 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -19,51 +19,43 @@ #![warn(missing_docs)] -mod components; mod chain_spec; pub mod config; +#[macro_use] pub mod chain_ops; pub mod error; use std::io; +use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Duration, Instant}; +use serde::{Serialize, de::DeserializeOwned}; use futures::sync::mpsc; use parking_lot::Mutex; -use client::{BlockchainEvents, backend::Backend, runtime_api::BlockT}; +use client::{runtime_api::BlockT, Client}; use exit_future::Signal; use futures::prelude::*; use futures03::stream::{StreamExt as _, TryStreamExt as _}; -use keystore::Store as Keystore; -use network::{NetworkState, NetworkStateInfo}; -use log::{log, info, warn, debug, error, Level}; +use network::{NetworkService, NetworkState, specialization::NetworkSpecialization}; +use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; +use primitives::{Blake2Hasher, H256}; +use sr_primitives::BuildStorage; use sr_primitives::generic::BlockId; -use sr_primitives::traits::{Header, NumberFor, SaturatedConversion}; -use substrate_executor::NativeExecutor; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; -use tel::{telemetry, SUBSTRATE_INFO}; +use sr_primitives::traits::NumberFor; pub use self::error::Error; +pub use self::builder::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties}; pub use transaction_pool::txpool::{ self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError }; pub use client::FinalityNotifications; - -pub use components::{ - ServiceFactory, FullBackend, FullExecutor, LightBackend, - LightExecutor, Components, PoolApi, ComponentClient, ComponentOffchainStorage, - ComponentBlock, FullClient, LightClient, FullComponents, LightComponents, - CodeExecutor, NetworkService, FactoryChainSpec, FactoryBlock, - FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, - ComponentExHash, ComponentExtrinsic, FactoryExtrinsic, InitialSessionKeys, -}; -use components::{StartRpc, MaintainTransactionPool, OffchainWorker}; +pub use rpc::Metadata as RpcMetadata; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] @@ -74,15 +66,15 @@ pub use futures::future::Executor; const DEFAULT_PROTOCOL_ID: &str = "sup"; /// Substrate service. -pub struct Service<Components: components::Components> { - client: Arc<ComponentClient<Components>>, - select_chain: Option<Components::SelectChain>, - network: Arc<components::NetworkService<Components>>, +pub struct NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> { + client: Arc<TCl>, + select_chain: Option<TSc>, + network: Arc<TNet>, /// Sinks to propagate network status updates. network_status_sinks: Arc<Mutex<Vec<mpsc::UnboundedSender<( - NetworkStatus<ComponentBlock<Components>>, NetworkState + TNetStatus, NetworkState )>>>>, - transaction_pool: Arc<TransactionPool<Components::TransactionPoolApi>>, + transaction_pool: Arc<TTxPool>, /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. exit: exit_future::Exit, @@ -100,31 +92,22 @@ pub struct Service<Components: components::Components> { /// The elements must then be polled manually. to_poll: Vec<Box<dyn Future<Item = (), Error = ()> + Send>>, /// Configuration of this Service - config: FactoryFullConfiguration<Components::Factory>, - rpc_handlers: components::RpcHandler, + config: TCfg, + rpc_handlers: rpc_servers::RpcHandler<rpc::Metadata>, _rpc: Box<dyn std::any::Any + Send + Sync>, _telemetry: Option<tel::Telemetry>, _telemetry_on_connect_sinks: Arc<Mutex<Vec<mpsc::UnboundedSender<()>>>>, - _offchain_workers: Option<Arc<offchain::OffchainWorkers< - ComponentClient<Components>, - ComponentOffchainStorage<Components>, - ComponentBlock<Components>> - >>, + _offchain_workers: Option<Arc<TOc>>, keystore: keystore::KeyStorePtr, + marker: PhantomData<TBl>, } -/// Creates bare client without any networking. -pub fn new_client<Factory: components::ServiceFactory>( - config: &FactoryFullConfiguration<Factory>, -) -> Result<Arc<ComponentClient<components::FullComponents<Factory>>>, error::Error> { - let executor = NativeExecutor::new(config.default_heap_pages); - - components::FullComponents::<Factory>::build_client( - config, - executor, - None, - ).map(|r| r.0) -} +/// A set of traits for the runtime genesis config. +pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} +impl<T: Serialize + DeserializeOwned + BuildStorage> RuntimeGenesis for T {} + +/// Alias for a an implementation of `futures::future::Executor`. +pub type TaskExecutor = Arc<dyn Executor<Box<dyn Future<Item = (), Error = ()> + Send>> + Send + Sync>; /// An handle for spawning tasks in the service. #[derive(Clone)] @@ -146,59 +129,38 @@ impl Executor<Box<dyn Future<Item = (), Error = ()> + Send>> for SpawnTaskHandle } } -/// Stream of events for connection established to a telemetry server. -pub type TelemetryOnConnectNotifications = mpsc::UnboundedReceiver<()>; - -/// Used to hook on telemetry connection established events. -pub struct TelemetryOnConnect { - /// Event stream. - pub telemetry_connection_sinks: TelemetryOnConnectNotifications, -} - -impl<Components: components::Components> Service<Components> { - /// Creates a new service. - pub fn new( - mut config: FactoryFullConfiguration<Components::Factory>, - ) -> Result<Self, error::Error> { +macro_rules! new_impl { + ( + $block:ty, + $config:ident, + $build_components:expr, + $maintain_transaction_pool:expr, + $offchain_workers:expr, + $start_rpc:expr, + ) => {{ let (signal, exit) = exit_future::signal(); // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. let (to_spawn_tx, to_spawn_rx) = mpsc::unbounded::<Box<dyn Future<Item = (), Error = ()> + Send>>(); - // Create client - let executor = NativeExecutor::new(config.default_heap_pages); - - let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; - - let (client, on_demand) = Components::build_client(&config, executor, Some(keystore.clone()))?; - let select_chain = Components::build_select_chain(&mut config, client.clone())?; - - let transaction_pool = Arc::new( - Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? - ); - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !config.roles.is_light(), - pool: transaction_pool.clone(), - client: client.clone(), - }); - - let (import_queue, finality_proof_request_builder) = Components::build_import_queue( - &mut config, - client.clone(), - select_chain.clone(), - Some(transaction_pool.clone()), - )?; + // Create all the components. + let ( + client, + on_demand, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool, + rpc_extensions + ) = $build_components(&mut $config)?; let import_queue = Box::new(import_queue); - let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; let chain_info = client.info().chain; - Components::RuntimeServices::generate_initial_session_keys( - client.clone(), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - - let version = config.full_version(); + let version = $config.full_version(); info!("Highest known block at #{}", chain_info.best_number); telemetry!( SUBSTRATE_INFO; @@ -207,10 +169,14 @@ impl<Components: components::Components> Service<Components> { "best" => ?chain_info.best_hash ); - let network_protocol = <Components::Factory>::build_network_protocol(&config)?; + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { + imports_external_transactions: !$config.roles.is_light(), + pool: transaction_pool.clone(), + client: client.clone(), + }); let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { + let protocol_id_full = match $config.chain_spec.protocol_id() { Some(pid) => pid, None => { warn!("Using default protocol ID {:?} because none is configured in the \ @@ -223,8 +189,8 @@ impl<Components: components::Components> Service<Components> { }; let network_params = network::config::Params { - roles: config.roles, - network_config: config.network.clone(), + roles: $config.roles, + network_config: $config.network.clone(), chain: client.clone(), finality_proof_provider, finality_proof_request_builder, @@ -242,7 +208,7 @@ impl<Components: components::Components> Service<Components> { #[allow(deprecated)] let offchain_storage = client.backend().offchain_storage(); - let offchain_workers = match (config.offchain_worker, offchain_storage) { + let offchain_workers = match ($config.offchain_worker, offchain_storage) { (true, Some(db)) => { Some(Arc::new(offchain::OffchainWorkers::new(client.clone(), db))) }, @@ -260,23 +226,25 @@ impl<Components: components::Components> Service<Components> { let offchain = offchain_workers.as_ref().map(Arc::downgrade); let to_spawn_tx_ = to_spawn_tx.clone(); let network_state_info: Arc<dyn NetworkStateInfo + Send + Sync> = network.clone(); - let is_validator = config.roles.is_authority(); + let is_validator = $config.roles.is_authority(); let events = client.import_notification_stream() .map(|v| Ok::<_, ()>(v)).compat() .for_each(move |notification| { let number = *notification.header.number(); + let txpool = txpool.upgrade(); - if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { - Components::RuntimeServices::maintain_transaction_pool( + if let (Some(txpool), Some(client)) = (txpool.as_ref(), wclient.upgrade()) { + $maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, &*txpool, ).map_err(|e| warn!("Pool error processing new block: {:?}", e))?; } - if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { - let future = Components::RuntimeServices::offchain_workers( + let offchain = offchain.as_ref().and_then(|o| o.upgrade()); + if let (Some(txpool), Some(offchain)) = (txpool, offchain) { + let future = $offchain_workers( &number, &offchain, &txpool, @@ -321,7 +289,7 @@ impl<Components: components::Components> Service<Components> { let client_ = client.clone(); let mut sys = System::new(); let self_pid = get_current_pid().ok(); - let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<ComponentBlock<Components>>, NetworkState)>(); + let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); network_status_sinks.lock().push(netstat_tx); let tel_task = netstat_rx.for_each(move |(net_status, network_state)| { let info = client_.info(); @@ -374,23 +342,23 @@ impl<Components: components::Components> Service<Components> { let (system_rpc_tx, system_rpc_rx) = futures03::channel::mpsc::unbounded(); let gen_handler = || { let system_info = rpc::system::SystemInfo { - chain_name: config.chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), - properties: config.chain_spec.properties(), + chain_name: $config.chain_spec.name().into(), + impl_name: $config.impl_name.into(), + impl_version: $config.impl_version.into(), + properties: $config.chain_spec.properties(), }; - Components::RuntimeServices::start_rpc( + $start_rpc( client.clone(), system_rpc_tx.clone(), system_info.clone(), Arc::new(SpawnTaskHandle { sender: to_spawn_tx.clone() }), transaction_pool.clone(), - Components::build_rpc_extensions(client.clone(), transaction_pool.clone()), + rpc_extensions.clone(), keystore.clone(), ) }; let rpc_handlers = gen_handler(); - let rpc = start_rpc_servers(&config, gen_handler)?; + let rpc = start_rpc_servers(&$config, gen_handler)?; let _ = to_spawn_tx.unbounded_send(Box::new(build_network_future( network_mut, @@ -406,17 +374,17 @@ impl<Components: components::Components> Service<Components> { let telemetry_connection_sinks: Arc<Mutex<Vec<mpsc::UnboundedSender<()>>>> = Default::default(); // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.roles.is_authority(); + let telemetry = $config.telemetry_endpoints.clone().map(|endpoints| { + let is_authority = $config.roles.is_authority(); let network_id = network.local_peer_id().to_base58(); - let name = config.name.clone(); - let impl_name = config.impl_name.to_owned(); + let name = $config.name.clone(); + let impl_name = $config.impl_name.to_owned(); let version = version.clone(); - let chain_name = config.chain_spec.name().to_owned(); + let chain_name = $config.chain_spec.name().to_owned(); let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); let telemetry = tel::init_telemetry(tel::TelemetryConfig { endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), + wasm_external_transport: $config.telemetry_external_transport.take(), }); let future = telemetry.clone() .map(|ev| Ok::<_, ()>(ev)) @@ -446,7 +414,7 @@ impl<Components: components::Components> Service<Components> { telemetry }); - Ok(Service { + Ok(NewService { client, network, network_status_sinks, @@ -458,56 +426,145 @@ impl<Components: components::Components> Service<Components> { to_spawn_tx, to_spawn_rx, to_poll: Vec::new(), - config, + $config, rpc_handlers, _rpc: rpc, _telemetry: telemetry, _offchain_workers: offchain_workers, _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, + marker: PhantomData::<$block>, }) - } + }} +} + +mod builder; + +/// Abstraction over a Substrate service. +pub trait AbstractService: 'static + Future<Item = (), Error = Error> + + Executor<Box<dyn Future<Item = (), Error = ()> + Send>> + Send { + /// Type of block of this chain. + type Block: BlockT<Hash = H256>; + /// Backend storage for the client. + type Backend: 'static + client::backend::Backend<Self::Block, Blake2Hasher>; + /// How to execute calls towards the runtime. + type CallExecutor: 'static + client::CallExecutor<Self::Block, Blake2Hasher> + Send + Sync + Clone; + /// API that the runtime provides. + type RuntimeApi: Send + Sync; + /// Configuration struct of the service. + type Config; + /// Chain selection algorithm. + type SelectChain; + /// API of the transaction pool. + type TransactionPoolApi: ChainApi<Block = Self::Block>; + /// Network specialization. + type NetworkSpecialization: NetworkSpecialization<Self::Block>; + + /// Get event stream for telemetry connection established events. + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()>; + + /// Returns the configuration passed on construction. + fn config(&self) -> &Self::Config; + + /// Returns the configuration passed on construction. + fn config_mut(&mut self) -> &mut Self::Config; + + /// return a shared instance of Telemetry (if enabled) + fn telemetry(&self) -> Option<tel::Telemetry>; + + /// Spawns a task in the background that runs the future passed as parameter. + fn spawn_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static); + + /// Spawns a task in the background that runs the future passed as + /// parameter. The given task is considered essential, i.e. if it errors we + /// trigger a service exit. + fn spawn_essential_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static); + + /// Returns a handle for spawning tasks. + fn spawn_task_handle(&self) -> SpawnTaskHandle; + + /// Returns the keystore that stores keys. + fn keystore(&self) -> keystore::KeyStorePtr; + + /// Starts an RPC query. + /// + /// The query is passed as a string and must be a JSON text similar to what an HTTP client + /// would for example send. + /// + /// Returns a `Future` that contains the optional response. + /// + /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to + /// send back spontaneous events. + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box<dyn Future<Item = Option<String>, Error = ()> + Send>; + + /// Get shared client instance. + fn client(&self) -> Arc<client::Client<Self::Backend, Self::CallExecutor, Self::Block, Self::RuntimeApi>>; + + /// Get clone of select chain. + fn select_chain(&self) -> Option<Self::SelectChain>; + + /// Get shared network instance. + fn network(&self) -> Arc<NetworkService<Self::Block, Self::NetworkSpecialization, H256>>; - /// Returns a reference to the config passed at initialization. - pub fn config(&self) -> &FactoryFullConfiguration<Components::Factory> { + /// Returns a receiver that periodically receives a status of the network. + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus<Self::Block>, NetworkState)>; + + /// Get shared transaction pool instance. + fn transaction_pool(&self) -> Arc<TransactionPool<Self::TransactionPoolApi>>; + + /// Get a handle to a future that will resolve on exit. + fn on_exit(&self) -> ::exit_future::Exit; +} + +impl<TCfg, TBl, TBackend, TExec, TRtApi, TSc, TNetSpec, TExPoolApi, TOc> AbstractService for + NewService<TCfg, TBl, Client<TBackend, TExec, TBl, TRtApi>, TSc, NetworkStatus<TBl>, + NetworkService<TBl, TNetSpec, H256>, TransactionPool<TExPoolApi>, TOc> +where TCfg: 'static + Send, + TBl: BlockT<Hash = H256>, + TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher>, + TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone, + TRtApi: 'static + Send + Sync, + TSc: 'static + Clone + Send, + TExPoolApi: 'static + ChainApi<Block = TBl>, + TOc: 'static + Send + Sync, + TNetSpec: NetworkSpecialization<TBl>, +{ + type Block = TBl; + type Backend = TBackend; + type CallExecutor = TExec; + type RuntimeApi = TRtApi; + type Config = TCfg; + type SelectChain = TSc; + type TransactionPoolApi = TExPoolApi; + type NetworkSpecialization = TNetSpec; + + fn config(&self) -> &Self::Config { &self.config } - /// Returns a reference to the config passed at initialization. - /// - /// > **Note**: This method is currently necessary because we extract some elements from the - /// > configuration at the end of the service initialization. It is intended to be - /// > removed. - pub fn config_mut(&mut self) -> &mut FactoryFullConfiguration<Components::Factory> { + fn config_mut(&mut self) -> &mut Self::Config { &mut self.config } - /// Get event stream for telemetry connection established events. - pub fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()> { let (sink, stream) = mpsc::unbounded(); self._telemetry_on_connect_sinks.lock().push(sink); stream } - /// Return a shared instance of Telemetry (if enabled) - pub fn telemetry(&self) -> Option<tel::Telemetry> { + fn telemetry(&self) -> Option<tel::Telemetry> { self._telemetry.as_ref().map(|t| t.clone()) } - /// Returns the keystore instance. - pub fn keystore(&self) -> keystore::KeyStorePtr { + fn keystore(&self) -> keystore::KeyStorePtr { self.keystore.clone() } - /// Spawns a task in the background that runs the future passed as parameter. - pub fn spawn_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static) { + fn spawn_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static) { let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); } - /// Spawns a task in the background that runs the future passed as - /// parameter. The given task is considered essential, i.e. if it errors we - /// trigger a service exit. - pub fn spawn_essential_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static) { + fn spawn_essential_task(&self, task: impl Future<Item = (), Error = ()> + Send + 'static) { let essential_failed = self.essential_failed.clone(); let essential_task = Box::new(task.map_err(move |_| { error!("Essential task failed. Shutting down service."); @@ -517,62 +574,45 @@ impl<Components: components::Components> Service<Components> { let _ = self.to_spawn_tx.unbounded_send(essential_task); } - /// Returns a handle for spawning tasks. - pub fn spawn_task_handle(&self) -> SpawnTaskHandle { + fn spawn_task_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { sender: self.to_spawn_tx.clone(), } } - /// Starts an RPC query. - /// - /// The query is passed as a string and must be a JSON text similar to what an HTTP client - /// would for example send. - /// - /// Returns a `Future` that contains the optional response. - /// - /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to - /// send back spontaneous events. - pub fn rpc_query(&self, mem: &RpcSession, request: &str) - -> impl Future<Item = Option<String>, Error = ()> - { - self.rpc_handlers.handle_request(request, mem.metadata.clone()) + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box<dyn Future<Item = Option<String>, Error = ()> + Send> { + Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) } - /// Get shared client instance. - pub fn client(&self) -> Arc<ComponentClient<Components>> { + fn client(&self) -> Arc<client::Client<Self::Backend, Self::CallExecutor, Self::Block, Self::RuntimeApi>> { self.client.clone() } - /// Get clone of select chain. - pub fn select_chain(&self) -> Option<<Components as components::Components>::SelectChain> { + fn select_chain(&self) -> Option<Self::SelectChain> { self.select_chain.clone() } - /// Get shared network instance. - pub fn network(&self) -> Arc<components::NetworkService<Components>> { + fn network(&self) -> Arc<NetworkService<Self::Block, Self::NetworkSpecialization, H256>> { self.network.clone() } - /// Returns a receiver that periodically receives a status of the network. - pub fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus<ComponentBlock<Components>>, NetworkState)> { + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus<Self::Block>, NetworkState)> { let (sink, stream) = mpsc::unbounded(); self.network_status_sinks.lock().push(sink); stream } - /// Get shared transaction pool instance. - pub fn transaction_pool(&self) -> Arc<TransactionPool<Components::TransactionPoolApi>> { + fn transaction_pool(&self) -> Arc<TransactionPool<Self::TransactionPoolApi>> { self.transaction_pool.clone() } - /// Get a handle to a future that will resolve on exit. - pub fn on_exit(&self) -> ::exit_future::Exit { + fn on_exit(&self) -> ::exit_future::Exit { self.exit.clone() } } -impl<Components> Future for Service<Components> where Components: components::Components { +impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Future for +NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> { type Item = (); type Error = Error; @@ -603,9 +643,8 @@ impl<Components> Future for Service<Components> where Components: components::Co } } -impl<Components> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> - for Service<Components> where Components: components::Components -{ +impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> for +NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> { fn execute( &self, future: Box<dyn Future<Item = (), Error = ()> + Send> @@ -746,7 +785,8 @@ pub struct NetworkStatus<B: BlockT> { pub average_upload_per_sec: u64, } -impl<Components> Drop for Service<Components> where Components: components::Components { +impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Drop for +NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> { fn drop(&mut self) { debug!(target: "service", "Substrate service shutdown"); if let Some(signal) = self.signal.take() { @@ -757,7 +797,7 @@ impl<Components> Drop for Service<Components> where Components: components::Comp /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] -fn start_rpc_servers<C, G, H: FnMut() -> components::RpcHandler>( +fn start_rpc_servers<C, G, H: FnMut() -> rpc_servers::RpcHandler<rpc::Metadata>>( config: &Configuration<C, G>, mut gen_handler: H ) -> Result<Box<dyn std::any::Any + Send + Sync>, error::Error> { @@ -906,225 +946,6 @@ where } } -/// Constructs a service factory with the given name that implements the `ServiceFactory` trait. -/// The required parameters are required to be given in the exact order. Some parameters are followed -/// by `{}` blocks. These blocks are required and used to initialize the given parameter. -/// In these block it is required to write a closure that takes the same number of arguments, -/// the corresponding function in the `ServiceFactory` trait provides. -/// -/// # Example -/// -/// ``` -/// # use substrate_service::{ -/// # construct_service_factory, Service, FullBackend, FullExecutor, LightBackend, LightExecutor, -/// # FullComponents, LightComponents, FactoryFullConfiguration, FullClient -/// # }; -/// # use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -/// # use network::{config::DummyFinalityProofRequestBuilder, construct_simple_protocol}; -/// # use client::{self, LongestChain}; -/// # use consensus_common::import_queue::{BasicQueue, Verifier}; -/// # use consensus_common::{BlockOrigin, BlockImportParams, well_known_cache_keys::Id as CacheKeyId}; -/// # use node_runtime::{GenesisConfig, RuntimeApi}; -/// # use std::sync::Arc; -/// # use node_primitives::Block; -/// # use babe_primitives::AuthorityPair as BabePair; -/// # use grandpa_primitives::AuthorityPair as GrandpaPair; -/// # use sr_primitives::Justification; -/// # use sr_primitives::traits::Block as BlockT; -/// # use grandpa; -/// # construct_simple_protocol! { -/// # pub struct NodeProtocol where Block = Block { } -/// # } -/// # struct MyVerifier; -/// # impl<B: BlockT> Verifier<B> for MyVerifier { -/// # fn verify( -/// # &mut self, -/// # origin: BlockOrigin, -/// # header: B::Header, -/// # justification: Option<Justification>, -/// # body: Option<Vec<B::Extrinsic>>, -/// # ) -> Result<(BlockImportParams<B>, Option<Vec<(CacheKeyId, Vec<u8>)>>), String> { -/// # unimplemented!(); -/// # } -/// # } -/// type FullChainApi<T> = transaction_pool::ChainApi< -/// client::Client<FullBackend<T>, FullExecutor<T>, Block, RuntimeApi>, Block>; -/// type LightChainApi<T> = transaction_pool::ChainApi< -/// client::Client<LightBackend<T>, LightExecutor<T>, Block, RuntimeApi>, Block>; -/// -/// construct_service_factory! { -/// struct Factory { -/// // Declare the block type -/// Block = Block, -/// RuntimeApi = RuntimeApi, -/// // Declare the network protocol and give an initializer. -/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, -/// RuntimeDispatch = node_executor::Executor, -/// FullTransactionPoolApi = FullChainApi<Self> -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// LightTransactionPoolApi = LightChainApi<Self> -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// Genesis = GenesisConfig, -/// Configuration = (), -/// FullService = FullComponents<Self> -/// { |config| <FullComponents<Factory>>::new(config) }, -/// // Setup as Consensus Authority (if the role and key are given) -/// AuthoritySetup = { -/// |service: Self::FullService| { -/// Ok(service) -/// }}, -/// LightService = LightComponents<Self> -/// { |config| <LightComponents<Factory>>::new(config) }, -/// FullImportQueue = BasicQueue<Block> -/// { |_, client, _, _| Ok(BasicQueue::new(MyVerifier, Box::new(client), None, None)) }, -/// LightImportQueue = BasicQueue<Block> -/// { |_, client| { -/// let fprb = Box::new(DummyFinalityProofRequestBuilder::default()) as Box<_>; -/// Ok((BasicQueue::new(MyVerifier, Box::new(client), None, None), fprb)) -/// }}, -/// SelectChain = LongestChain<FullBackend<Self>, Self::Block> -/// { |config: &FactoryFullConfiguration<Self>, client: Arc<FullClient<Self>>| { -/// #[allow(deprecated)] -/// Ok(LongestChain::new(client.backend().clone())) -/// }}, -/// FinalityProofProvider = { |client: Arc<FullClient<Self>>| { -/// Ok(Some(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)) -/// }}, -/// RpcExtensions = (), -/// } -/// } -/// ``` -#[macro_export] -macro_rules! construct_service_factory { - ( - $(#[$attr:meta])* - struct $name:ident { - Block = $block:ty, - RuntimeApi = $runtime_api:ty, - NetworkProtocol = $protocol:ty { $( $protocol_init:tt )* }, - RuntimeDispatch = $dispatch:ty, - FullTransactionPoolApi = $full_transaction:ty { $( $full_transaction_init:tt )* }, - LightTransactionPoolApi = $light_transaction:ty { $( $light_transaction_init:tt )* }, - Genesis = $genesis:ty, - Configuration = $config:ty, - FullService = $full_service:ty { $( $full_service_init:tt )* }, - AuthoritySetup = { $( $authority_setup:tt )* }, - LightService = $light_service:ty { $( $light_service_init:tt )* }, - FullImportQueue = $full_import_queue:ty - { $( $full_import_queue_init:tt )* }, - LightImportQueue = $light_import_queue:ty - { $( $light_import_queue_init:tt )* }, - SelectChain = $select_chain:ty - { $( $select_chain_init:tt )* }, - FinalityProofProvider = { $( $finality_proof_provider_init:tt )* }, - RpcExtensions = $rpc_extensions_ty:ty - $( { $( $rpc_extensions:tt )* } )?, - } - ) => { - $( #[$attr] )* - pub struct $name {} - - #[allow(unused_variables)] - impl $crate::ServiceFactory for $name { - type Block = $block; - type RuntimeApi = $runtime_api; - type NetworkProtocol = $protocol; - type RuntimeDispatch = $dispatch; - type FullTransactionPoolApi = $full_transaction; - type LightTransactionPoolApi = $light_transaction; - type Genesis = $genesis; - type Configuration = $config; - type FullService = $full_service; - type LightService = $light_service; - type FullImportQueue = $full_import_queue; - type LightImportQueue = $light_import_queue; - type SelectChain = $select_chain; - type RpcExtensions = $rpc_extensions_ty; - - fn build_full_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::FullClient<Self>> - ) -> $crate::Result<$crate::TransactionPool<Self::FullTransactionPoolApi>, $crate::Error> - { - ( $( $full_transaction_init )* ) (config, client) - } - - fn build_light_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::LightClient<Self>> - ) -> $crate::Result<$crate::TransactionPool<Self::LightTransactionPoolApi>, $crate::Error> - { - ( $( $light_transaction_init )* ) (config, client) - } - - fn build_network_protocol(config: &$crate::FactoryFullConfiguration<Self>) - -> $crate::Result<Self::NetworkProtocol, $crate::Error> - { - ( $( $protocol_init )* ) (config) - } - - fn build_select_chain( - config: &mut $crate::FactoryFullConfiguration<Self>, - client: Arc<$crate::FullClient<Self>> - ) -> $crate::Result<Self::SelectChain, $crate::Error> { - ( $( $select_chain_init )* ) (config, client) - } - - fn build_full_import_queue( - config: &mut $crate::FactoryFullConfiguration<Self>, - client: $crate::Arc<$crate::FullClient<Self>>, - select_chain: Self::SelectChain, - transaction_pool: Option<Arc<$crate::TransactionPool<Self::FullTransactionPoolApi>>>, - ) -> $crate::Result<Self::FullImportQueue, $crate::Error> { - ( $( $full_import_queue_init )* ) (config, client, select_chain, transaction_pool) - } - - fn build_light_import_queue( - config: &mut FactoryFullConfiguration<Self>, - client: Arc<$crate::LightClient<Self>>, - ) -> Result<(Self::LightImportQueue, $crate::BoxFinalityProofRequestBuilder<$block>), $crate::Error> { - ( $( $light_import_queue_init )* ) (config, client) - } - - fn build_finality_proof_provider( - client: Arc<$crate::FullClient<Self>> - ) -> Result<Option<Arc<$crate::FinalityProofProvider<Self::Block>>>, $crate::Error> { - ( $( $finality_proof_provider_init )* ) (client) - } - - fn new_light( - config: $crate::FactoryFullConfiguration<Self> - ) -> $crate::Result<Self::LightService, $crate::Error> - { - ( $( $light_service_init )* ) (config) - } - - fn new_full( - config: $crate::FactoryFullConfiguration<Self> - ) -> Result<Self::FullService, $crate::Error> - { - ( $( $full_service_init )* ) (config).and_then(|service| { - ($( $authority_setup )*)(service) - }) - } - - fn build_full_rpc_extensions( - client: Arc<$crate::FullClient<Self>>, - transaction_pool: Arc<$crate::TransactionPool<Self::FullTransactionPoolApi>>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - - fn build_light_rpc_extensions( - client: Arc<$crate::LightClient<Self>>, - transaction_pool: Arc<$crate::TransactionPool<Self::LightTransactionPoolApi>>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index c2895c53294965a097e9c98f6a911e492622cdbc..870f287bff8f2911c985d4dba8cf16d10822129b 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -27,32 +27,31 @@ use tempdir::TempDir; use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; use service::{ - ServiceFactory, + AbstractService, + ChainSpec, Configuration, - FactoryFullConfiguration, - FactoryChainSpec, Roles, - FactoryExtrinsic, + Error, }; use network::{multiaddr, Multiaddr}; use network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; -use sr_primitives::generic::BlockId; +use sr_primitives::{generic::BlockId, traits::Block as BlockT}; use consensus::{BlockImportParams, BlockImport}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet<F: ServiceFactory> { +struct TestNet<G, F, L, U> { runtime: Runtime, - authority_nodes: Vec<(usize, SyncService<F::FullService>, Multiaddr)>, - full_nodes: Vec<(usize, SyncService<F::FullService>, Multiaddr)>, - light_nodes: Vec<(usize, SyncService<F::LightService>, Multiaddr)>, - chain_spec: FactoryChainSpec<F>, + authority_nodes: Vec<(usize, SyncService<F>, U, Multiaddr)>, + full_nodes: Vec<(usize, SyncService<F>, U, Multiaddr)>, + light_nodes: Vec<(usize, SyncService<L>, Multiaddr)>, + chain_spec: ChainSpec<G>, base_port: u16, nodes: usize, } -/// Wraps around an `Arc<Service>>` and implements `Future`. +/// Wraps around an `Arc<Service>` and implements `Future`. pub struct SyncService<T>(Arc<Mutex<T>>); impl<T> SyncService<T> { @@ -82,22 +81,24 @@ impl<T: Future<Item=(), Error=service::Error>> Future for SyncService<T> { } } -impl<F: ServiceFactory> TestNet<F> { +impl<G, F, L, U> TestNet<G, F, L, U> +where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static +{ pub fn run_until_all_full<FP, LP>( &mut self, full_predicate: FP, light_predicate: LP, ) where - FP: Send + Fn(usize, &SyncService<F::FullService>) -> bool + 'static, - LP: Send + Fn(usize, &SyncService<F::LightService>) -> bool + 'static, + FP: Send + Fn(usize, &SyncService<F>) -> bool + 'static, + LP: Send + Fn(usize, &SyncService<L>) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); let interval = Interval::new_interval(Duration::from_millis(100)) .map_err(|_| ()) .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _)| + let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| full_predicate(*id, service) ); @@ -125,14 +126,14 @@ impl<F: ServiceFactory> TestNet<F> { } } -fn node_config<F: ServiceFactory> ( +fn node_config<G> ( index: usize, - spec: &FactoryChainSpec<F>, + spec: &ChainSpec<G>, role: Roles, key_seed: Option<String>, base_port: u16, root: &TempDir, -) -> FactoryFullConfiguration<F> +) -> Configuration<(), G> { let root = root.path().join(format!("node-{}", index)); @@ -194,18 +195,18 @@ fn node_config<F: ServiceFactory> ( } } -impl<F: ServiceFactory> TestNet<F> where - F::FullService: Future<Item=(), Error=service::Error>, - F::LightService: Future<Item=(), Error=service::Error>, +impl<G, F, L, U> TestNet<G, F, L, U> where + F: AbstractService, + L: AbstractService, { fn new( temp: &TempDir, - spec: FactoryChainSpec<F>, - full: usize, - light: usize, - authorities: Vec<String>, + spec: ChainSpec<G>, + full: impl Iterator<Item = impl FnOnce(Configuration<(), G>) -> Result<(F, U), Error>>, + light: impl Iterator<Item = impl FnOnce(Configuration<(), G>) -> Result<L, Error>>, + authorities: impl Iterator<Item = (String, impl FnOnce(Configuration<(), G>) -> Result<(F, U), Error>)>, base_port: u16 - ) -> TestNet<F> { + ) -> TestNet<G, F, L, U> { let _ = env_logger::try_init(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -222,79 +223,89 @@ impl<F: ServiceFactory> TestNet<F> where net } - fn insert_nodes(&mut self, temp: &TempDir, full: usize, light: usize, authorities: Vec<String>) { - let mut nodes = self.nodes; - let base_port = self.base_port; - let spec = &self.chain_spec; + fn insert_nodes( + &mut self, + temp: &TempDir, + full: impl Iterator<Item = impl FnOnce(Configuration<(), G>) -> Result<(F, U), Error>>, + light: impl Iterator<Item = impl FnOnce(Configuration<(), G>) -> Result<L, Error>>, + authorities: impl Iterator<Item = (String, impl FnOnce(Configuration<(), G>) -> Result<(F, U), Error>)> + ) { let executor = self.runtime.executor(); - self.authority_nodes.extend(authorities.iter().enumerate().map(|(index, key)| { - let node_config = node_config::<F>( - index, - &spec, + + for (key, authority) in authorities { + let node_config = node_config( + self.nodes, + &self.chain_spec, Roles::AUTHORITY, - Some(key.clone()), - base_port, + Some(key), + self.base_port, &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let (service, user_data) = authority(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - ((index + nodes), service, addr) - })); - nodes += authorities.len(); + self.authority_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } - self.full_nodes.extend((nodes..nodes + full).map(|index| { - let node_config = node_config::<F>(index, &spec, Roles::FULL, None, base_port, &temp); + for full in full { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let (service, user_data) = full(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += full; + self.full_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } - self.light_nodes.extend((nodes..nodes + light).map(|index| { - let node_config = node_config::<F>(index, &spec, Roles::LIGHT, None, base_port, &temp); + for light in light { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_light(node_config).expect("Error creating test node service")); + let service = SyncService::from(light(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += light; - - self.nodes = nodes; + self.light_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } } } -pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) where - F::FullService: Future<Item=(), Error=service::Error>, - F::LightService: Future<Item=(), Error=service::Error>, +pub fn connectivity<G, Fb, F, Lb, L>(spec: ChainSpec<G>, full_builder: Fb, light_builder: Lb) where + Fb: Fn(Configuration<(), G>) -> Result<F, Error>, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result<L, Error>, + L: AbstractService, { const NUM_FULL_NODES: usize = 5; const NUM_LIGHT_NODES: usize = 5; { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); let runtime = { - let mut network = TestNet::<F>::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking star topology"); - let first_address = network.full_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + let first_address = network.full_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| service.get().network().num_connected() == NUM_FULL_NODES - 1 @@ -311,27 +322,31 @@ pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) where { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); { - let mut network = TestNet::<F>::new( + let mut network = TestNet::new( &temp, spec, - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking linked topology"); - let mut address = network.full_nodes[0].2.clone(); + let mut address = network.full_nodes[0].3.clone(); let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); for i in 0..max_nodes { if i != 0 { - if let Some((_, service, node_id)) = network.full_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } @@ -345,42 +360,53 @@ pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) where } } -pub fn sync<F, B, E>(spec: FactoryChainSpec<F>, mut block_factory: B, mut extrinsic_factory: E) where - F: ServiceFactory, - F::FullService: Future<Item=(), Error=service::Error>, - F::LightService: Future<Item=(), Error=service::Error>, - B: FnMut(&SyncService<F::FullService>) -> BlockImportParams<F::Block>, - E: FnMut(&SyncService<F::FullService>) -> FactoryExtrinsic<F>, +pub fn sync<G, Fb, F, Lb, L, B, E, U>( + spec: ChainSpec<G>, + full_builder: Fb, + light_builder: Lb, + mut block_factory: B, + mut extrinsic_factory: E +) where + Fb: Fn(Configuration<(), G>) -> Result<(F, U), Error>, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result<L, Error>, + L: AbstractService, + B: FnMut(&F, &U) -> BlockImportParams<F::Block>, + E: FnMut(&F, &U) -> <F::Block as BlockT>::Extrinsic, + U: Clone + Send + 'static, { const NUM_FULL_NODES: usize = 10; // FIXME: BABE light client support is currently not working. const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 512; let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir"); - let mut network = TestNet::<F>::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), 30500, ); info!("Checking block sync"); let first_address = { let first_service = &network.full_nodes[0].1; + let first_user_data = &network.full_nodes[0].2; let mut client = first_service.get().client(); for i in 0 .. NUM_BLOCKS { if i % 128 == 0 { info!("Generating #{}", i); } - let import_data = block_factory(&first_service); + let import_data = block_factory(&first_service.get(), first_user_data); client.import_block(import_data, HashMap::new()).expect("Error importing test block"); } - network.full_nodes[0].2.clone() + network.full_nodes[0].3.clone() }; info!("Running sync"); - for (_, service, _) in network.full_nodes.iter().skip(1) { + for (_, service, _, _) in network.full_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { @@ -395,8 +421,9 @@ pub fn sync<F, B, E>(spec: FactoryChainSpec<F>, mut block_factory: B, mut extrin info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); + let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.get().client().info().chain.best_number); - let extrinsic = extrinsic_factory(&first_service); + let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); first_service.get().transaction_pool().submit_one(&best_block, extrinsic).unwrap(); network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, @@ -404,33 +431,39 @@ pub fn sync<F, B, E>(spec: FactoryChainSpec<F>, mut block_factory: B, mut extrin ); } -pub fn consensus<F>(spec: FactoryChainSpec<F>, authorities: Vec<String>) where - F: ServiceFactory, - F::FullService: Future<Item=(), Error=service::Error>, - F::LightService: Future<Item=(), Error=service::Error>, +pub fn consensus<G, Fb, F, Lb, L>( + spec: ChainSpec<G>, + full_builder: Fb, + light_builder: Lb, + authorities: impl IntoIterator<Item = String> +) where + Fb: Fn(Configuration<(), G>) -> Result<F, Error>, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result<L, Error>, + L: AbstractService, { const NUM_FULL_NODES: usize = 10; const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir"); - let mut network = TestNet::<F>::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES / 2, - NUM_LIGHT_NODES / 2, - authorities, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30600, ); info!("Checking consensus"); - let first_address = network.authority_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter() { + let first_address = network.authority_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } - for (_, service, _) in network.authority_nodes.iter().skip(1) { + for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } network.run_until_all_full( @@ -441,8 +474,15 @@ pub fn consensus<F>(spec: FactoryChainSpec<F>, authorities: Vec<String>) where ); info!("Adding more peers"); - network.insert_nodes(&temp, NUM_FULL_NODES / 2, NUM_LIGHT_NODES / 2, vec![]); - for (_, service, _) in network.full_nodes.iter() { + network.insert_nodes( + &temp, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + ); + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index 4d672491c18e658b69fa9114261f2ad4a1b38ec1..f6edbb2cc3ee33fd9a956c6ea86152f5a23e7a15 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -4,9 +4,8 @@ use std::cell::RefCell; use tokio::runtime::Runtime; pub use substrate_cli::{VersionInfo, IntoExit, error}; use substrate_cli::{informant, parse_and_prepare, ParseAndPrepare, NoCustom}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use crate::chain_spec; -use std::ops::Deref; use log::info; /// Parse command line arguments into service configuration. @@ -16,7 +15,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: VersionInfo) -> error::Result<()> E: IntoExit, { match parse_and_prepare::<NoCustom, NoCustom, _>(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by {}, 2017, 2018", version.author); @@ -27,21 +27,24 @@ pub fn run<I, T, E>(args: I, exit: E, version: VersionInfo) -> error::Result<()> match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::<service::Factory, _>(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(_) => Ok(()) }?; @@ -55,14 +58,13 @@ fn load_spec(id: &str) -> Result<Option<chain_spec::ChainSpec>, String> { }) } -fn run_until_exit<T, C, E>( +fn run_until_exit<T, E>( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref<Target=substrate_service::Service<C>>, - T: Future<Item = (), Error = substrate_service::error::Error> + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); @@ -99,7 +101,8 @@ impl IntoExit for Exit { let exit_send_cell = RefCell::new(Some(exit_send)); ctrlc::set_handler(move || { - if let Some(exit_send) = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take() { + let exit_send = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take(); + if let Some(exit_send) = exit_send { exit_send.send(()).expect("Error sending exit notification"); } }).expect("Error setting Ctrl-C handler"); diff --git a/node-template/src/main.rs b/node-template/src/main.rs index 18e9638833fd22230ae292df41d9268acf6054b4..024efcc7db5413eef320efa722ef3c02c5f9b42a 100644 --- a/node-template/src/main.rs +++ b/node-template/src/main.rs @@ -4,6 +4,7 @@ #![warn(unused_extern_crates)] mod chain_spec; +#[macro_use] mod service; mod cli; diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 2baa0c76313734020cafef3bb91adc1a54d3dce8..07c0aa26cff3136ffc5ee457fef1a42980d15998 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -1,24 +1,17 @@ -#![warn(unused_extern_crates)] - //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use std::sync::Arc; use std::time::Duration; -use substrate_client::{self as client, LongestChain}; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use substrate_client::LongestChain; +use babe::{import_queue, start_babe, Config}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use futures::prelude::*; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM_BINARY}; -use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, -}; +use substrate_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; use substrate_executor::native_executor_instance; -use substrate_service::{ServiceFactory, construct_service_factory, TelemetryOnConnect}; pub use substrate_executor::NativeExecutor; // Our native executor instance. @@ -34,235 +27,205 @@ construct_simple_protocol! { pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService<F> = babe::BabeBlockImport< - FullBackend<F>, - FullExecutor<F>, - <F as ServiceFactory>::Block, - grandpa::BlockImportForService<F>, - <F as ServiceFactory>::RuntimeApi, - client::Client< - FullBackend<F>, - FullExecutor<F>, - <F as ServiceFactory>::Block, - <F as ServiceFactory>::RuntimeApi - >, ->; - -pub struct NodeConfig<F: ServiceFactory> { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService<F>, - grandpa::LinkHalfForService<F>, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option<Vec<Box<dyn Future<Item = (), Error = ()> + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl<F> Default for NodeConfig<F> where F: ServiceFactory { - fn default() -> NodeConfig<F> { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - -construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = Executor, - FullTransactionPoolApi = - transaction_pool::ChainApi< - client::Client<FullBackend<Self>, FullExecutor<Self>, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client<LightBackend<Self>, LightExecutor<Self>, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig<Self>, - FullService = FullComponents<Self> { - |config: FactoryFullConfiguration<Self>| FullComponents::<Factory>::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents<Self> - { |config| <LightComponents<Factory>>::new(config) }, - FullImportQueue = BabeImportQueue<Self::Block> { - | - config: &mut FactoryFullConfiguration<Self>, - client: Arc<FullClient<Self>>, - select_chain: Self::SelectChain, - transaction_pool: Option<Arc<TransactionPool<Self::FullTransactionPoolApi>>>, - | { +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(substrate_client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient<Self>, _>( + grandpa::block_import::<_, _, _, node_template_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; let justification_import = block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, + + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, block_import, Some(Box::new(justification_import)), None, client.clone(), client, - config.custom.inherent_data_providers.clone(), - transaction_pool, + inherent_data_providers.clone(), + Some(transaction_pool) )?; - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + Ok(import_queue) - } - }, - LightImportQueue = BabeImportQueue<Self::Block> - { |config: &FactoryFullConfiguration<Self>, client: Arc<LightClient<Self>>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient<Self>>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; + })?; + + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); +/// Builds a new service for a full client. +pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>) + -> Result<impl AbstractService, ServiceError> +{ + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); + + let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = + import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool<Self::FullTransactionPoolApi>>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, - )?; + if service.config().roles.is_authority() { + let proposer = basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + + // the BABE authoring task is considered infallible, i.e. if it + // fails we take down the service with it. + service.spawn_essential_task(select); + } - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain<FullBackend<Self>, Self::Block> - { |config: &FactoryFullConfiguration<Self>, client: Arc<FullClient<Self>>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; }, - FinalityProofProvider = { |client: Arc<FullClient<Self>>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, - RpcExtensions = (), } + + Ok(service) +} + +/// Builds a new service for a light client. +pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>) + -> Result<impl AbstractService, ServiceError> +{ + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::<Block, RuntimeApi, Executor>(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build() } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 7b4ebb0c5f32d36dcd1256f07e3f79ff1a93b8ef..1f35f7b86b41c4a96842f91891a0c5dab106d112 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -11,7 +11,7 @@ log = "0.4" tokio = "0.1.7" futures = "0.1" exit-future = "0.1" -jsonrpc-core = "13.0.0" +jsonrpc-core = "13.1.0" cli = { package = "substrate-cli", path = "../../core/cli" } codec = { package = "parity-scale-codec", version = "1.0.0" } sr-io = { path = "../../core/sr-io" } diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index fca4c78b892ee5344b4292a6359a22d7173f6b51..f83958eef482cd49f0351a93371d017662ff0a36 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -350,8 +350,8 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; + use crate::service::{new_full, new_light}; use service_test; - use crate::service::Factory; fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( @@ -395,6 +395,10 @@ pub(crate) mod tests { #[test] #[ignore] fn test_connectivity() { - service_test::connectivity::<Factory>(integration_test_config_with_two_authorities()); + service_test::connectivity( + integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), + ); } } diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 4e3cfa7f0109217f81512f29fc3905e5cb1e97a2..b7679be1764e0fed1a105b1cb2686d2f3d589194 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -21,14 +21,14 @@ pub use cli::error; pub mod chain_spec; +#[macro_use] mod service; mod factory_impl; use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; pub use cli::{VersionInfo, IntoExit, NoCustom, SharedParams, ExecutionStrategyParam}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; -use std::ops::Deref; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use log::info; use structopt::{StructOpt, clap::App}; use cli::{AugmentClap, GetLogFilter, parse_and_prepare, ParseAndPrepare}; @@ -159,7 +159,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul E: IntoExit, { match parse_and_prepare::<CustomSubcommands, NoCustom, _>(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by Parity Technologies, 2017-2019"); @@ -171,23 +172,26 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::<service::Factory, _>(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => { - let mut config = cli::create_config_with_db_path( + let mut config = cli::create_config_with_db_path::<(), _, _>( load_spec, &cli_args.shared_params, &version, @@ -209,9 +213,13 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul cli_args.num, cli_args.rounds, ); - transaction_factory::factory::<service::Factory, FactoryState<_>>( + + let service_builder = new_full_start!(config).0; + transaction_factory::factory::<FactoryState<_>, _, _, _, _, _>( factory_state, - config, + service_builder.client(), + service_builder.select_chain() + .expect("The select_chain is always initialized by new_full_start!; QED") ).map_err(|e| format!("Error in transaction factory: {}", e))?; Ok(()) @@ -219,14 +227,13 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul } } -fn run_until_exit<T, C, E>( +fn run_until_exit<T, E>( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref<Target=substrate_service::Service<C>>, - T: Future<Item = (), Error = substrate_service::error::Error> + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 7022d12d69a0f89ed61183db0aad15cd4a10d45b..c47e764c4294e3b7026d35653ec869e6fc2ff69e 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -16,271 +16,240 @@ #![warn(unused_extern_crates)] -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +//! Service implementation. Specialized wrapper over substrate service. use std::sync::Arc; -use std::time::Duration; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use babe::{import_queue, Config}; use client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; -use futures::prelude::*; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, + AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; -use substrate_service::construct_service_factory; -use substrate_service::TelemetryOnConnect; construct_simple_protocol! { /// Demo protocol attachment for substrate. pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService<F> = babe::BabeBlockImport< - FullBackend<F>, - FullExecutor<F>, - <F as crate::ServiceFactory>::Block, - grandpa::BlockImportForService<F>, - <F as crate::ServiceFactory>::RuntimeApi, - client::Client< - FullBackend<F>, - FullExecutor<F>, - <F as crate::ServiceFactory>::Block, - <F as crate::ServiceFactory>::RuntimeApi - >, ->; - -/// Node specific configuration -pub struct NodeConfig<F: substrate_service::ServiceFactory> { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService<F>, - grandpa::LinkHalfForService<F>, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option<Vec<Box<dyn Future<Item = (), Error = ()> + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl<F> Default for NodeConfig<F> where F: substrate_service::ServiceFactory { - fn default() -> NodeConfig<F> { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - -construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = node_executor::Executor, - FullTransactionPoolApi = - transaction_pool::ChainApi< - client::Client<FullBackend<Self>, FullExecutor<Self>, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client<LightBackend<Self>, LightExecutor<Self>, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig<Self>, - FullService = FullComponents<Self> { - |config: FactoryFullConfiguration<Self>| FullComponents::<Factory>::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents<Self> - { |config| <LightComponents<Factory>>::new(config) }, - FullImportQueue = BabeImportQueue<Self::Block> - { - | - config: &mut FactoryFullConfiguration<Self>, - client: Arc<FullClient<Self>>, - select_chain: Self::SelectChain, - transaction_pool: Option<Arc<TransactionPool<Self::FullTransactionPoolApi>>>, - | - { +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient<Self>, _>( + grandpa::block_import::<_, _, _, node_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; let justification_import = block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, block_import, Some(Box::new(justification_import)), None, client.clone(), client, - config.custom.inherent_data_providers.clone(), - transaction_pool, + inherent_data_providers.clone(), + Some(transaction_pool) )?; - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); Ok(import_queue) - }}, - LightImportQueue = BabeImportQueue<Self::Block> - { |config: &FactoryFullConfiguration<Self>, client: Arc<LightClient<Self>>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient<Self>>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; + })? + .with_rpc_extensions(|client, pool| { + use node_rpc::accounts::{Accounts, AccountsApi}; + + let mut io = jsonrpc_core::IoHandler::<substrate_service::RpcMetadata>::default(); + io.extend_with( + AccountsApi::to_delegate(Accounts::new(client, pool)) + ); + io + })?; + + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); +/// Creates a full service from the configuration. +/// +/// We need to use a macro because the test suit doesn't work with an opaque service. It expects +/// concrete types instead. +macro_rules! new_full { + ($config:expr) => {{ + use futures::Future; + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!($config); + + let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool<Self::FullTransactionPoolApi>>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, + if service.config().roles.is_authority() { + let proposer = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(substrate_service::Error::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: babe::Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = babe::start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + service.spawn_task(Box::new(select)); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + }; + service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), )?; + }, + } - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain<FullBackend<Self>, Self::Block> - { |config: &FactoryFullConfiguration<Self>, client: Arc<FullClient<Self>>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } - }, - FinalityProofProvider = { |client: Arc<FullClient<Self>>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, + Ok((service, inherent_data_providers)) + }} +} - RpcExtensions = jsonrpc_core::IoHandler<substrate_rpc::Metadata> - { |client, pool| { +/// Builds a new service for a full client. +pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>) +-> Result<impl AbstractService, ServiceError> { + new_full!(config).map(|(service, _)| service) +} + +/// Builds a new service for a light client. +pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>) +-> Result<impl AbstractService, ServiceError> { + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::<Block, RuntimeApi, node_executor::Executor>(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .with_rpc_extensions(|client, pool| { use node_rpc::accounts::{Accounts, AccountsApi}; let mut io = jsonrpc_core::IoHandler::default(); @@ -288,11 +257,10 @@ construct_service_factory! { AccountsApi::to_delegate(Accounts::new(client, pool)) ); io - }}, - } + })? + .build() } - #[cfg(test)] mod tests { use std::sync::Arc; @@ -312,9 +280,8 @@ mod tests { use timestamp; use finality_tracker; use keyring::AccountKeyring; - use substrate_service::ServiceFactory; - use service_test::SyncService; - use crate::service::Factory; + use substrate_service::AbstractService; + use crate::service::{new_full, new_light}; #[cfg(feature = "rhd")] fn test_sync() { @@ -369,8 +336,10 @@ mod tests { let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap(); OpaqueExtrinsic(v) }; - service_test::sync::<Factory, _, _>( + service_test::sync( chain_spec::integration_test_config(), + |config| new_full(config), + |config| new_light(config), block_factory, extrinsic_factory, ); @@ -387,130 +356,127 @@ mod tests { let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + // For the block factory let mut slot_num = 1u64; - let block_factory = |service: &SyncService<<Factory as ServiceFactory>::FullService>| { - let service = service.get(); - let mut inherent_data = service - .config() - .custom - .inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); - - let parent_id = BlockId::number(service.client().info().chain.best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let mut proposer_factory = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let mut digest = Digest::<H256>::default(); - - // even though there's only one authority some slots might be empty, - // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); - if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( - slot_num, - &parent_header, - &*service.client(), - (278, 1000), - &keystore, - ) { - break babe_pre_digest; - } - - slot_num += 1; - }; - - digest.push(<DigestItem as CompatibleDigestItem>::babe_pre_digest(babe_pre_digest)); - - let mut proposer = proposer_factory.init(&parent_header).unwrap(); - let new_block = futures03::executor::block_on(proposer.propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - )).expect("Error making test block"); - - let (new_header, new_body) = new_block.deconstruct(); - let pre_hash = new_header.hash(); - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = pre_hash.encode(); - let signature = alice.sign(&to_sign[..]); - let item = <DigestItem as CompatibleDigestItem>::babe_seal( - signature.into(), - ); - slot_num += 1; - - BlockImportParams { - origin: BlockOrigin::File, - header: new_header, - justification: None, - post_digests: vec![item], - body: Some(new_body), - finalized: true, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } - }; + // For the extrinsics factory let bob = Arc::new(AccountKeyring::Bob.pair()); let charlie = Arc::new(AccountKeyring::Charlie.pair()); - let mut index = 0; - let extrinsic_factory = |service: &SyncService<<Factory as ServiceFactory>::FullService>| { - let amount = 5 * CENTS; - let to = AddressPublic::from_raw(bob.public().0); - let from = AddressPublic::from_raw(charlie.public().0); - let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap(); - let best_block_id = BlockId::number(service.get().client().info().chain.best_number); - let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version; - let signer = charlie.clone(); - - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let check_version = system::CheckVersion::new(); - let check_genesis = system::CheckGenesis::new(); - let check_era = system::CheckEra::from(Era::Immortal); - let check_nonce = system::CheckNonce::from(index); - let check_weight = system::CheckWeight::new(); - let take_fees = balances::TakeFees::from(0); - let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); - - let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); - let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { - signer.sign(&blake2_256(payload)[..]) - } else { - signer.sign(payload) - }); - let xt = UncheckedExtrinsic::new_signed( - raw_payload.0, - from.into(), - signature.into(), - extra, - ).encode(); - let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap(); - index += 1; - OpaqueExtrinsic(v) - }; - - service_test::sync::<Factory, _, _>( + service_test::sync( chain_spec, - block_factory, - extrinsic_factory, + |config| new_full!(config), + |config| new_light(config), + |service, inherent_data_providers| { + let mut inherent_data = inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); + inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().info().chain.best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); + let mut proposer_factory = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let mut digest = Digest::<H256>::default(); + + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { + inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); + if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( + slot_num, + &parent_header, + &*service.client(), + (278, 1000), + &keystore, + ) { + break babe_pre_digest; + } + + slot_num += 1; + }; + + digest.push(<DigestItem as CompatibleDigestItem>::babe_pre_digest(babe_pre_digest)); + + let mut proposer = proposer_factory.init(&parent_header).unwrap(); + let new_block = futures03::executor::block_on(proposer.propose( + inherent_data, + digest, + std::time::Duration::from_secs(1), + )).expect("Error making test block"); + + let (new_header, new_body) = new_block.deconstruct(); + let pre_hash = new_header.hash(); + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); + let signature = alice.sign(&to_sign[..]); + let item = <DigestItem as CompatibleDigestItem>::babe_seal( + signature.into(), + ); + slot_num += 1; + + BlockImportParams { + origin: BlockOrigin::File, + header: new_header, + justification: None, + post_digests: vec![item], + body: Some(new_body), + finalized: true, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + }, + |service, _| { + let amount = 5 * CENTS; + let to = AddressPublic::from_raw(bob.public().0); + let from = AddressPublic::from_raw(charlie.public().0); + let genesis_hash = service.client().block_hash(0).unwrap().unwrap(); + let best_block_id = BlockId::number(service.client().info().chain.best_number); + let version = service.client().runtime_version_at(&best_block_id).unwrap().spec_version; + let signer = charlie.clone(); + + let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + + let check_version = system::CheckVersion::new(); + let check_genesis = system::CheckGenesis::new(); + let check_era = system::CheckEra::from(Era::Immortal); + let check_nonce = system::CheckNonce::from(index); + let check_weight = system::CheckWeight::new(); + let take_fees = balances::TakeFees::from(0); + let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); + + let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); + let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { + signer.sign(&blake2_256(payload)[..]) + } else { + signer.sign(payload) + }); + let xt = UncheckedExtrinsic::new_signed( + raw_payload.0, + from.into(), + signature.into(), + extra, + ).encode(); + let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap(); + + index += 1; + OpaqueExtrinsic(v) + }, ); } #[test] #[ignore] fn test_consensus() { - use super::Factory; - - service_test::consensus::<Factory>( + service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), vec![ "//Alice".into(), "//Bob".into(), diff --git a/node/rpc-client/Cargo.toml b/node/rpc-client/Cargo.toml index bc492bc00339495eb5d7816dd6b8dda804a3b1a6..b98df224dfcf11795031cd5bd0325f22aa2e8146 100644 --- a/node/rpc-client/Cargo.toml +++ b/node/rpc-client/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" env_logger = "0.6" futures = "0.1.26" hyper = "0.12" -jsonrpc-core-client = { version = "13.0.0", features = ["http", "ws"] } +jsonrpc-core-client = { version = "13.1.0", features = ["http", "ws"] } log = "0.4" node-primitives = { path = "../primitives" } substrate-rpc = { path = "../../core/rpc", version = "2.0.0" } diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml index 6042380c8379feaa8f68017ea7d47f931ed92f2e..55371daad6b0f49c449d41baf0867f25711f71b3 100644 --- a/node/rpc/Cargo.toml +++ b/node/rpc/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" [dependencies] client = { package = "substrate-client", path = "../../core/client" } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-derive = "13.0.0" -jsonrpc-pubsub = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-derive = "13.1.0" +jsonrpc-pubsub = "13.1.0" keyring = { package = "substrate-keyring", path = "../../core/keyring" } log = "0.4" node-primitives = { path = "../primitives" } diff --git a/test-utils/transaction-factory/src/complex_mode.rs b/test-utils/transaction-factory/src/complex_mode.rs index 85b12248d80ba636fe0a4a859d1ebb7fd51c9182..ed76a66b09083e72ad9f8a19bdec4a5610db14aa 100644 --- a/test-utils/transaction-factory/src/complex_mode.rs +++ b/test-utils/transaction-factory/src/complex_mode.rs @@ -41,29 +41,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One, Zero}; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{RuntimeAdapter, create_block}; -pub fn next<F, RA>( +pub fn next<RA, Backend, Exec, Block, RtApi>( factory_state: &mut RA, - client: &Arc<ComponentClient<FullComponents<F>>>, + client: &Arc<Client<Backend, Exec, Block, RtApi>>, version: u32, genesis_hash: <RA::Block as BlockT>::Hash, prior_block_hash: <RA::Block as BlockT>::Hash, - prior_block_id: BlockId<F::Block>, -) -> Option<<F as ServiceFactory>::Block> + prior_block_id: BlockId<Block>, +) -> Option<Block> where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi<FactoryBlock<F>, FullClient<F>>, - FullClient<F>: ProvideRuntimeApi, - <FullClient<F> as ProvideRuntimeApi>::Api: BlockBuilder<FactoryBlock<F>>, + Block: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + Exec: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone, + Backend: client::backend::Backend<Block, Blake2Hasher> + Send, + Client<Backend, Exec, Block, RtApi>: ProvideRuntimeApi, + <Client<Backend, Exec, Block, RtApi> as ProvideRuntimeApi>::Api: BlockBuilder<Block>, + RtApi: ConstructRuntimeApi<Block, Client<Backend, Exec, Block, RtApi>> + Send + Sync, RA: RuntimeAdapter, { let total = factory_state.start_number() + factory_state.num() * factory_state.rounds(); @@ -102,7 +103,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::<F, RA>(&client, transfer, inherents); + let block = create_block::<RA, _, _, _, _>(&client, transfer, inherents); info!( "Created block {} with hash {}. Transferring {} from {} to {}.", factory_state.block_no() + RA::Number::one(), diff --git a/test-utils/transaction-factory/src/lib.rs b/test-utils/transaction-factory/src/lib.rs index 16bb08a2b436d154c4377cdbd4b8febd88424f8f..5d63f906a73cf5a9e372be8828bb59d32a6ea4ba 100644 --- a/test-utils/transaction-factory/src/lib.rs +++ b/test-utils/transaction-factory/src/lib.rs @@ -26,22 +26,19 @@ use std::fmt::Display; use log::info; -use client::block_builder::api::BlockBuilder; -use client::runtime_api::ConstructRuntimeApi; +use client::{Client, block_builder::api::BlockBuilder, runtime_api::ConstructRuntimeApi}; use consensus_common::{ BlockOrigin, BlockImportParams, InherentData, ForkChoiceStrategy, SelectChain }; use consensus_common::block_import::BlockImport; use codec::{Decode, Encode}; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{ Block as BlockT, Header as HeaderT, ProvideRuntimeApi, SimpleArithmetic, One, Zero, }; -use substrate_service::{ - FactoryBlock, FactoryFullConfiguration, FullClient, new_client, - ServiceFactory, ComponentClient, FullComponents}; pub use crate::modes::Mode; pub mod modes; @@ -95,15 +92,19 @@ pub trait RuntimeAdapter { /// Manufactures transactions. The exact amount depends on /// `mode`, `num` and `rounds`. -pub fn factory<F, RA>( +pub fn factory<RA, Backend, Exec, Block, RtApi, Sc>( mut factory_state: RA, - mut config: FactoryFullConfiguration<F>, + client: &Arc<Client<Backend, Exec, Block, RtApi>>, + select_chain: &Sc, ) -> cli::error::Result<()> where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi<FactoryBlock<F>, FullClient<F>>, - FullClient<F>: ProvideRuntimeApi, - <FullClient<F> as ProvideRuntimeApi>::Api: BlockBuilder<FactoryBlock<F>>, + Block: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + Exec: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone, + Backend: client::backend::Backend<Block, Blake2Hasher> + Send, + Client<Backend, Exec, Block, RtApi>: ProvideRuntimeApi, + <Client<Backend, Exec, Block, RtApi> as ProvideRuntimeApi>::Api: BlockBuilder<Block>, + RtApi: ConstructRuntimeApi<Block, Client<Backend, Exec, Block, RtApi>> + Send + Sync, + Sc: SelectChain<Block>, RA: RuntimeAdapter, <<RA as RuntimeAdapter>::Block as BlockT>::Hash: From<primitives::H256>, { @@ -112,20 +113,16 @@ where return Err(cli::error::Error::Input(msg)); } - let client = new_client::<F>(&config)?; - - let select_chain = F::build_select_chain(&mut config, client.clone())?; - - let best_header: Result<<F::Block as BlockT>::Header, cli::error::Error> = + let best_header: Result<<Block as BlockT>::Header, cli::error::Error> = select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); let mut best_hash = best_header?.hash(); - let best_block_id = BlockId::<F::Block>::hash(best_hash); + let best_block_id = BlockId::<Block>::hash(best_hash); let version = client.runtime_version_at(&best_block_id)?.spec_version; let genesis_hash = client.block_hash(Zero::zero())? .expect("Genesis block always exists; qed").into(); while let Some(block) = match factory_state.mode() { - Mode::MasterToNToM => complex_mode::next::<F, RA>( + Mode::MasterToNToM => complex_mode::next::<RA, _, _, _, _>( &mut factory_state, &client, version, @@ -133,7 +130,7 @@ where best_hash.into(), best_block_id, ), - _ => simple_modes::next::<F, RA>( + _ => simple_modes::next::<RA, _, _, _, _>( &mut factory_state, &client, version, @@ -143,7 +140,7 @@ where ), } { best_hash = block.header().hash(); - import_block::<F>(&client, block); + import_block(&client, block); info!("Imported block at {}", factory_state.block_no()); } @@ -152,16 +149,18 @@ where } /// Create a baked block from a transfer extrinsic and timestamp inherent. -pub fn create_block<F, RA>( - client: &Arc<ComponentClient<FullComponents<F>>>, +pub fn create_block<RA, Backend, Exec, Block, RtApi>( + client: &Arc<Client<Backend, Exec, Block, RtApi>>, transfer: <RA::Block as BlockT>::Extrinsic, - inherent_extrinsics: Vec<<F::Block as BlockT>::Extrinsic>, -) -> <F as ServiceFactory>::Block + inherent_extrinsics: Vec<<Block as BlockT>::Extrinsic>, +) -> Block where - F: ServiceFactory, - FullClient<F>: ProvideRuntimeApi, - F::RuntimeApi: ConstructRuntimeApi<FactoryBlock<F>, FullClient<F>>, - <FullClient<F> as ProvideRuntimeApi>::Api: BlockBuilder<FactoryBlock<F>>, + Block: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + Exec: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone, + Backend: client::backend::Backend<Block, Blake2Hasher> + Send, + Client<Backend, Exec, Block, RtApi>: ProvideRuntimeApi, + RtApi: ConstructRuntimeApi<Block, Client<Backend, Exec, Block, RtApi>> + Send + Sync, + <Client<Backend, Exec, Block, RtApi> as ProvideRuntimeApi>::Api: BlockBuilder<Block>, RA: RuntimeAdapter, { let mut block = client.new_block(Default::default()).expect("Failed to create new block"); @@ -177,10 +176,13 @@ where block.bake().expect("Failed to bake block") } -fn import_block<F>( - client: &Arc<ComponentClient<FullComponents<F>>>, - block: <F as ServiceFactory>::Block -) -> () where F: ServiceFactory +fn import_block<Backend, Exec, Block, RtApi>( + client: &Arc<Client<Backend, Exec, Block, RtApi>>, + block: Block +) -> () where + Block: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + Exec: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone, + Backend: client::backend::Backend<Block, Blake2Hasher> + Send, { let import = BlockImportParams { origin: BlockOrigin::File, diff --git a/test-utils/transaction-factory/src/simple_modes.rs b/test-utils/transaction-factory/src/simple_modes.rs index ec4f484fa98271c8fec20570dd0b22a0481124f0..bcbb91200657f2a0bd33031f9df021cd911299e5 100644 --- a/test-utils/transaction-factory/src/simple_modes.rs +++ b/test-utils/transaction-factory/src/simple_modes.rs @@ -36,29 +36,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One}; use sr_primitives::generic::BlockId; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{Mode, RuntimeAdapter, create_block}; -pub fn next<F, RA>( +pub fn next<RA, Backend, Exec, Block, RtApi>( factory_state: &mut RA, - client: &Arc<ComponentClient<FullComponents<F>>>, + client: &Arc<Client<Backend, Exec, Block, RtApi>>, version: u32, genesis_hash: <RA::Block as BlockT>::Hash, prior_block_hash: <RA::Block as BlockT>::Hash, - prior_block_id: BlockId<F::Block>, -) -> Option<<F as ServiceFactory>::Block> + prior_block_id: BlockId<Block>, +) -> Option<Block> where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi<FactoryBlock<F>, FullClient<F>>, - FullClient<F>: ProvideRuntimeApi, - <FullClient<F> as ProvideRuntimeApi>::Api: BlockBuilder<FactoryBlock<F>>, + Block: BlockT<Hash = <Blake2Hasher as Hasher>::Out>, + Exec: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone, + Backend: client::backend::Backend<Block, Blake2Hasher> + Send, + Client<Backend, Exec, Block, RtApi>: ProvideRuntimeApi, + <Client<Backend, Exec, Block, RtApi> as ProvideRuntimeApi>::Api: BlockBuilder<Block>, + RtApi: ConstructRuntimeApi<Block, Client<Backend, Exec, Block, RtApi>> + Send + Sync, RA: RuntimeAdapter, { if factory_state.block_no() >= factory_state.num() { @@ -93,7 +94,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::<F, RA>(&client, transfer, inherents); + let block = create_block::<RA, _, _, _, _>(&client, transfer, inherents); factory_state.set_block_no(factory_state.block_no() + RA::Number::one());