Newer
Older
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
//! # Substrate Primitives: IO
//!
//! This crate contains interfaces for the runtime to communicate with the outside world, ergo `io`.
//! In other context, such interfaces are referred to as "**host functions**".
//!
//! Each set of host functions are defined with an instance of the
//! [`sp_runtime_interface::runtime_interface`] macro.
//!
//! Most notably, this crate contains host functions for:
//!
//! - [`hashing`]
//! - [`crypto`]
//! - [`trie`]
//! - [`offchain`]
//! - [`storage`]
//! - [`allocator`]
//! - [`logging`]
//!
//! All of the default host functions provided by this crate, and by default contained in all
//! substrate-based clients are amalgamated in [`SubstrateHostFunctions`].
//!
//! ## Externalities
//!
//! Host functions go hand in hand with the concept of externalities. Externalities are an
//! environment in which host functions are provided, and thus can be accessed. Some host functions
//! are only accessible in an externality environment that provides it.
//!
//! A typical error for substrate developers is the following:
//!
//! ```should_panic
//! use sp_io::storage::get;
//! # fn main() {
//! let data = get(b"hello world");
//! # }
//! ```
//!
//! This code will panic with the following error:
//!
//! ```no_compile
//! thread 'main' panicked at '`get_version_1` called outside of an Externalities-provided environment.'
//! ```
//!
//! Such error messages should always be interpreted as "code accessing host functions accessed
//! outside of externalities".
//!
//! An externality is any type that implements [`sp_externalities::Externalities`]. A simple example
//! of which is [`TestExternalities`], which is commonly used in tests and is exported from this
//! crate.
//!
//! ```
//! use sp_io::{storage::get, TestExternalities};
//! # fn main() {
//! TestExternalities::default().execute_with(|| {
//! let data = get(b"hello world");
//! });
//! # }
//! ```
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(enable_alloc_error_handler, feature(alloc_error_handler))]
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use tracing;
crypto::Pair,
hexdisplay::HexDisplay,
offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt},
#[cfg(feature = "std")]
#[cfg(feature = "bandersnatch-experimental")]
use sp_core::bandersnatch;
crypto::KeyTypeId,
ecdsa, ed25519,
HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp,
sr25519,
storage::StateVersion,
LogLevel, LogLevelFilter, OpaquePeerId, H256,
#[cfg(feature = "bls-experimental")]
use sp_core::bls377;
use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration};
use sp_runtime_interface::{
pass_by::{PassBy, PassByCodec},
runtime_interface, Pointer,
};
use codec::{Decode, Encode};
#[cfg(feature = "std")]
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
Message, SECP256K1,
};
use sp_externalities::{Externalities, ExternalitiesExt};
pub use sp_externalities::MultiRemovalResults;
const LOG_TARGET: &str = "runtime::io";
/// Error verifying ECDSA signature
pub enum EcdsaVerifyError {
/// Incorrect value of R or S
/// Incorrect value of V
/// Invalid signature
BadSignature,
}
/// The outcome of calling `storage_kill`. Returned value is the number of storage items
/// removed from the backend from making the `storage_kill` call.
Shawn Tabrizi
committed
#[derive(PassByCodec, Encode, Decode)]
/// All keys to remove were removed, return number of iterations performed during the
/// operation.
Shawn Tabrizi
committed
AllRemoved(u32),
/// Not all key to remove were removed, return number of iterations performed during the
/// operation.
Shawn Tabrizi
committed
SomeRemaining(u32),
}
impl From<MultiRemovalResults> for KillStorageResult {
fn from(r: MultiRemovalResults) -> Self {
// We use `loops` here rather than `backend` because that's the same as the original
// functionality pre-#11490. This won't matter once we switch to the new host function
// since we won't be using the `KillStorageResult` type in the runtime any more.
match r.maybe_cursor {
None => Self::AllRemoved(r.loops),
Some(..) => Self::SomeRemaining(r.loops),
/// Interface for accessing the storage from within the runtime.
#[runtime_interface]
pub trait Storage {
/// Returns the data for `key` in the storage or `None` if the key can not be found.
Koute
committed
fn get(&self, key: &[u8]) -> Option<bytes::Bytes> {
self.storage(key).map(|s| bytes::Bytes::from(s.to_vec()))
}
/// Get `key` from storage, placing the value into `value_out` and return the number of
/// bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option<u32> {
self.storage(key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
/// Set `key` to `value` in the storage.
fn set(&mut self, key: &[u8], value: &[u8]) {
self.set_storage(key.to_vec(), value.to_vec());
}
/// Clear the storage of the given `key` and its value.
fn clear(&mut self, key: &[u8]) {
self.clear_storage(key)
}
/// Check whether the given `key` exists in storage.
fn exists(&self, key: &[u8]) -> bool {
self.exists_storage(key)
}
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, prefix: &[u8]) {
let _ = Externalities::clear_prefix(*self, prefix, None, None);
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
///
/// # Limit
///
/// Deletes all keys from the overlay and up to `limit` keys from the backend if
/// it is set to `Some`. No limit is applied when `limit` is set to `None`.
///
/// The limit can be used to partially delete a prefix storage in case it is too large
/// to delete in one go (block).
///
/// Returns [`KillStorageResult`] to inform about the result.
///
/// # Note
///
/// Please note that keys that are residing in the overlay for that prefix when
/// issuing this call are all deleted without counting towards the `limit`. Only keys
/// written during the current block are part of the overlay. Deleting with a `limit`
/// mostly makes sense with an empty overlay for that prefix.
///
/// Calling this function multiple times per block for the same `prefix` does
/// not make much sense because it is not cumulative when called inside the same block.
/// The deletion would always start from `prefix` resulting in the same keys being deleted
/// every time this function is called with the exact same arguments per block. This happens
/// because the keys in the overlay are not taken into account when deleting keys in the
/// backend.
#[version(2)]
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> KillStorageResult {
Externalities::clear_prefix(*self, prefix, limit, None).into()
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
/// Partially clear the storage of each key-value pair where the key starts with the given
/// prefix.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete a prefix storage in case it is too large or costly
/// to delete in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given `maybe_prefix` value. Subsequent calls
/// operating on the same prefix should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given prefix, it is important that no keys further
/// keys under the same prefix are inserted. If so, then they may or may not be deleted by
/// subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for that prefix when
/// issuing this call are deleted without counting towards the `limit`.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
maybe_prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>, //< TODO Make work or just Option<Vec<u8>>?
) -> MultiRemovalResults {
Externalities::clear_prefix(
*self,
maybe_prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
/// Append the encoded `value` to the storage item at `key`.
///
/// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend).
///
/// # Warning
///
/// If the storage item does not support [`EncodeAppend`](codec::EncodeAppend) or
/// something else fails at appending, the storage item will be set to `[value]`.
fn append(&mut self, key: &[u8], value: Vec<u8>) {
self.storage_append(key.to_vec(), value);
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
self.storage_root(StateVersion::V0)
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, version: StateVersion) -> Vec<u8> {
self.storage_root(version)
/// Always returns `None`. This function exists for compatibility reasons.
fn changes_root(&mut self, _parent_hash: &[u8]) -> Option<Vec<u8>> {
None
}
/// Get the next key in storage after the given one in lexicographic order.
fn next_key(&mut self, key: &[u8]) -> Option<Vec<u8>> {
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
/// Start a new nested transaction.
///
/// This allows to either commit or roll back all changes that are made after this call.
/// For every transaction there must be a matching call to either `rollback_transaction`
/// or `commit_transaction`. This is also effective for all values manipulated using the
/// `DefaultChildStorage` API.
///
/// # Warning
///
/// This is a low level API that is potentially dangerous as it can easily result
/// in unbalanced transactions. For example, FRAME users should use high level storage
/// abstractions.
fn start_transaction(&mut self) {
self.storage_start_transaction();
}
/// Rollback the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are discarded.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn rollback_transaction(&mut self) {
self.storage_rollback_transaction()
.expect("No open transaction that can be rolled back.");
}
/// Commit the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are committed.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn commit_transaction(&mut self) {
self.storage_commit_transaction()
.expect("No open transaction that can be committed.");
}
}
/// Interface for accessing the child storage for default child trie,
/// from within the runtime.
#[runtime_interface]
pub trait DefaultChildStorage {
/// Get a default child storage value for a given key.
///
/// Parameter `storage_key` is the unprefixed location of the root of the child trie in the
/// parent trie. Result is `None` if the value for `key` in the child storage can not be found.
fn get(&self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|s| s.to_vec())
}
/// Allocation efficient variant of `get`.
///
/// Get `key` from child storage, placing the value into `value_out` and return the number
/// of bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
key: &[u8],
value_out: &mut [u8],
value_offset: u32,
) -> Option<u32> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
data.len() as u32
})
/// Set `key` to `value` in the child storage denoted by `storage_key`.
fn set(&mut self, storage_key: &[u8], key: &[u8], value: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.set_child_storage(&child_info, key.to_vec(), value.to_vec());
/// For the default child storage at `storage_key`, clear value at `key`.
fn clear(&mut self, storage_key: &[u8], key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_storage(&child_info, key);
}
/// Clear an entire child storage.
/// If it exists, the child storage for `storage_key`
/// is removed.
fn storage_kill(&mut self, storage_key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.kill_child_storage(&child_info, None, None);
Alexander Theißen
committed
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Alexander Theißen
committed
#[version(2)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> bool {
let child_info = ChildInfo::new_default(storage_key);
let r = self.kill_child_storage(&child_info, limit, None);
r.maybe_cursor.is_none()
Shawn Tabrizi
committed
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Shawn Tabrizi
committed
#[version(3)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> KillStorageResult {
Shawn Tabrizi
committed
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, limit, None).into()
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(4, register_only)]
fn storage_kill(
&mut self,
storage_key: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, maybe_limit, maybe_cursor.as_ref().map(|x| &x[..]))
.into()
}
/// Check whether the given `key` exists in default child defined at `storage_key`.
fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool {
let child_info = ChildInfo::new_default(storage_key);
self.exists_child_storage(&child_info, key)
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.clear_child_prefix(&child_info, prefix, None, None);
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(2)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
limit: Option<u32>,
) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(&child_info, prefix, limit, None).into()
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(
&child_info,
prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
fn root(&mut self, storage_key: &[u8]) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, StateVersion::V0)
}
/// Default child root calculation.
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, storage_key: &[u8], version: StateVersion) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, version)
///
/// Get the next key in storage after the given one in lexicographic order in child storage.
fn next_key(&mut self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.next_child_storage_key(&child_info, key)
/// Interface that provides trie related functionality.
#[runtime_interface]
pub trait Trie {
/// A trie root formed from the iterated items.
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn blake2_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn blake2_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::ordered_trie_root(input),
}
/// A trie root formed from the iterated items.
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn keccak_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn keccak_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::ordered_trie_root(input),
}
/// Verify trie proof
fn blake2_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::Blake2Hasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
/// Verify trie proof
#[version(2)]
fn blake2_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Verify trie proof
fn keccak_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::KeccakHasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
/// Verify trie proof
#[version(2)]
fn keccak_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Interface that provides miscellaneous functions for communicating between the runtime and the
/// node.
#[runtime_interface]
pub trait Misc {
// NOTE: We use the target 'runtime' for messages produced by general printing functions,
// instead of LOG_TARGET.
/// Print a number.
fn print_num(val: u64) {
log::debug!(target: "runtime", "{}", val);
}
/// Print any valid `utf8` buffer.
fn print_utf8(utf8: &[u8]) {
if let Ok(data) = std::str::from_utf8(utf8) {
log::debug!(target: "runtime", "{}", data)
/// Print any `u8` slice as hex.
fn print_hex(data: &[u8]) {
log::debug!(target: "runtime", "{}", HexDisplay::from(&data));
/// Extract the runtime version of the given wasm blob by calling `Core_version`.
///
/// Returns `None` if calling the function failed for any reason or `Some(Vec<u8>)` where
/// the `Vec<u8>` holds the SCALE encoded runtime version.
///
/// # Performance
///
/// This function may be very expensive to call depending on the wasm binary. It may be
/// relatively cheap if the wasm binary contains version information. In that case,
/// uncompression of the wasm blob is the dominating factor.
///
/// If the wasm binary does not have the version information attached, then a legacy mechanism
/// may be involved. This means that a runtime call will be performed to query the version.
///
/// Calling into the runtime may be incredible expensive and should be approached with care.
fn runtime_version(&mut self, wasm: &[u8]) -> Option<Vec<u8>> {
use sp_core::traits::ReadRuntimeVersionExt;
let mut ext = sp_state_machine::BasicExternalities::default();
match self
.extension::<ReadRuntimeVersionExt>()
.expect("No `ReadRuntimeVersionExt` associated for the current context!")
.read_runtime_version(wasm, &mut ext)
{
Ok(v) => Some(v),
Err(err) => {
log::debug!(
target: LOG_TARGET,
"cannot read version from the given runtime: {}",
err,
);
None
}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
#[cfg(feature = "std")]
sp_externalities::decl_extension! {
/// Extension to signal to [`crypt::ed25519_verify`] to use the dalek crate.
///
/// The switch from `ed25519-dalek` to `ed25519-zebra` was a breaking change.
/// `ed25519-zebra` is more permissive when it comes to the verification of signatures.
/// This means that some chains may fail to sync from genesis when using `ed25519-zebra`.
/// So, this extension can be registered to the runtime execution environment to signal
/// that `ed25519-dalek` should be used for verification. The extension can be registered
/// in the following way:
///
/// ```nocompile
/// client.execution_extensions().set_extensions_factory(
/// // Let the `UseDalekExt` extension being registered for each runtime invocation
/// // until the execution happens in the context of block `1000`.
/// sc_client_api::execution_extensions::ExtensionBeforeBlock::<Block, UseDalekExt>::new(1000)
/// );
/// ```
pub struct UseDalekExt;
}
#[cfg(feature = "std")]
impl Default for UseDalekExt {
fn default() -> Self {
Self
}
}
/// Interfaces for working with crypto related types from within the runtime.
#[runtime_interface]
pub trait Crypto {
/// Returns all `ed25519` public keys for the given key id from the keystore.
fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec<ed25519::Public> {
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.ed25519_public_keys(id)
/// Generate an `ed22519` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> ed25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!"));
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.ed25519_generate_new(id, seed)
.expect("`ed25519_generate` failed")
/// Sign the given `msg` with the `ed25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn ed25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &ed25519::Public,
msg: &[u8],
) -> Option<ed25519::Signature> {
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.flatten()
/// Returns `true` when the verification was successful.
fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool {
// We don't want to force everyone needing to call the function in an externalities context.
// So, we assume that we should not use dalek when we are not in externalities context.
// Otherwise, we check if the extension is present.
if sp_externalities::with_externalities(|mut e| e.extension::<UseDalekExt>().is_some())
.unwrap_or_default()
{
use ed25519_dalek::Verifier;
let Ok(public_key) = ed25519_dalek::VerifyingKey::from_bytes(&pub_key.0) else {
return false
};
let sig = ed25519_dalek::Signature::from_bytes(&sig.0);
public_key.verify(msg, &sig).is_ok()
} else {
ed25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `ed25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediately.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
///
/// NOTE: Is tagged with `register_only` to keep the functions around for backwards
/// compatibility with old runtimes, but it should not be used anymore by new runtimes.
/// The implementation emulates the old behavior, but isn't doing any batch verification
/// anymore.
#[version(1, register_only)]
fn ed25519_batch_verify(
&mut self,
sig: &ed25519::Signature,
msg: &[u8],
pub_key: &ed25519::Public,
) -> bool {
let res = ed25519_verify(sig, msg, pub_key);
if let Some(ext) = self.extension::<VerificationExtDeprecated>() {
ext.0 &= res;
}
res
}
/// Verify `sr25519` signature.
///
/// Returns `true` when the verification was successful.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool {
sr25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `sr25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediately.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
///
/// NOTE: Is tagged with `register_only` to keep the functions around for backwards
/// compatibility with old runtimes, but it should not be used anymore by new runtimes.
/// The implementation emulates the old behavior, but isn't doing any batch verification
/// anymore.
#[version(1, register_only)]
fn sr25519_batch_verify(
&mut self,
sig: &sr25519::Signature,
msg: &[u8],
pub_key: &sr25519::Public,
) -> bool {
let res = sr25519_verify(sig, msg, pub_key);
if let Some(ext) = self.extension::<VerificationExtDeprecated>() {
ext.0 &= res;
}
res
}
/// Start verification extension.
///
/// NOTE: Is tagged with `register_only` to keep the functions around for backwards
/// compatibility with old runtimes, but it should not be used anymore by new runtimes.
/// The implementation emulates the old behavior, but isn't doing any batch verification
/// anymore.
#[version(1, register_only)]
self.register_extension(VerificationExtDeprecated(true))
.expect("Failed to register required extension: `VerificationExt`");
}
/// Finish batch-verification of signatures.
///
/// Verify or wait for verification to finish for all signatures which were previously
/// deferred by `sr25519_verify`/`ed25519_verify`.
///
/// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called).
///
/// NOTE: Is tagged with `register_only` to keep the functions around for backwards
/// compatibility with old runtimes, but it should not be used anymore by new runtimes.
/// The implementation emulates the old behavior, but isn't doing any batch verification
/// anymore.
#[version(1, register_only)]
fn finish_batch_verify(&mut self) -> bool {
.extension::<VerificationExtDeprecated>()
.expect("`finish_batch_verify` should only be called after `start_batch_verify`")
self.deregister_extension::<VerificationExtDeprecated>()
.expect("No verification extension in current context!");
result
/// Returns all `sr25519` public keys for the given key id from the keystore.
fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec<sr25519::Public> {
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.sr25519_public_keys(id)
/// Generate an `sr22519` key for the given key type using an optional seed and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> sr25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!"));
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.sr25519_generate_new(id, seed)
.expect("`sr25519_generate` failed")
/// Sign the given `msg` with the `sr25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn sr25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &sr25519::Public,
msg: &[u8],
) -> Option<sr25519::Signature> {
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.flatten()
/// Verify an `sr25519` signature.
///
/// Returns `true` when the verification in successful regardless of
/// signature version.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool {
sr25519::Pair::verify_deprecated(sig, msg, pubkey)
}
/// Returns all `ecdsa` public keys for the given key id from the keystore.
fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec<ecdsa::Public> {
self.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!")
.ecdsa_public_keys(id)
}
/// Generate an `ecdsa` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.