Newer
Older
// This file is part of Substrate.
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! I/O host interface for substrate runtime.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), feature(alloc_error_handler))]
#![cfg_attr(
feature = "std",
doc = "Substrate runtime standard library as compiled when linked with Rust's standard library."
)]
#![cfg_attr(
not(feature = "std"),
doc = "Substrate's runtime standard library as compiled without Rust's standard library."
)]
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use tracing;
crypto::Pair,
hexdisplay::HexDisplay,
offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt},
traits::TaskExecutorExt,
#[cfg(feature = "std")]
use sp_keystore::{KeystoreExt, SyncCryptoStore};
crypto::KeyTypeId,
ecdsa, ed25519,
HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp,
sr25519,
storage::StateVersion,
LogLevel, LogLevelFilter, OpaquePeerId, H256,
use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration};
use sp_runtime_interface::{
pass_by::{PassBy, PassByCodec},
runtime_interface, Pointer,
};
use codec::{Decode, Encode};
#[cfg(feature = "std")]
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
Message, SECP256K1,
};
use sp_externalities::{Externalities, ExternalitiesExt};
#[cfg(feature = "std")]
mod batch_verifier;
#[cfg(feature = "std")]
use batch_verifier::BatchVerifier;
pub use sp_externalities::MultiRemovalResults;
const LOG_TARGET: &str = "runtime::io";
/// Error verifying ECDSA signature
pub enum EcdsaVerifyError {
/// Incorrect value of R or S
/// Incorrect value of V
/// Invalid signature
BadSignature,
}
/// The outcome of calling `storage_kill`. Returned value is the number of storage items
/// removed from the backend from making the `storage_kill` call.
Shawn Tabrizi
committed
#[derive(PassByCodec, Encode, Decode)]
/// All keys to remove were removed, return number of iterations performed during the
/// operation.
Shawn Tabrizi
committed
AllRemoved(u32),
/// Not all key to remove were removed, return number of iterations performed during the
/// operation.
Shawn Tabrizi
committed
SomeRemaining(u32),
}
impl From<MultiRemovalResults> for KillStorageResult {
fn from(r: MultiRemovalResults) -> Self {
// We use `loops` here rather than `backend` because that's the same as the original
// functionality pre-#11490. This won't matter once we switch to the new host function
// since we won't be using the `KillStorageResult` type in the runtime any more.
match r.maybe_cursor {
None => Self::AllRemoved(r.loops),
Some(..) => Self::SomeRemaining(r.loops),
/// Interface for accessing the storage from within the runtime.
#[runtime_interface]
pub trait Storage {
/// Returns the data for `key` in the storage or `None` if the key can not be found.
Koute
committed
fn get(&self, key: &[u8]) -> Option<bytes::Bytes> {
self.storage(key).map(|s| bytes::Bytes::from(s.to_vec()))
}
/// Get `key` from storage, placing the value into `value_out` and return the number of
/// bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option<u32> {
self.storage(key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
/// Set `key` to `value` in the storage.
fn set(&mut self, key: &[u8], value: &[u8]) {
self.set_storage(key.to_vec(), value.to_vec());
}
/// Clear the storage of the given `key` and its value.
fn clear(&mut self, key: &[u8]) {
self.clear_storage(key)
}
/// Check whether the given `key` exists in storage.
fn exists(&self, key: &[u8]) -> bool {
self.exists_storage(key)
}
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, prefix: &[u8]) {
let _ = Externalities::clear_prefix(*self, prefix, None, None);
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
///
/// # Limit
///
/// Deletes all keys from the overlay and up to `limit` keys from the backend if
/// it is set to `Some`. No limit is applied when `limit` is set to `None`.
///
/// The limit can be used to partially delete a prefix storage in case it is too large
/// to delete in one go (block).
///
/// Returns [`KillStorageResult`] to inform about the result.
///
/// # Note
///
/// Please note that keys that are residing in the overlay for that prefix when
/// issuing this call are all deleted without counting towards the `limit`. Only keys
/// written during the current block are part of the overlay. Deleting with a `limit`
/// mostly makes sense with an empty overlay for that prefix.
///
/// Calling this function multiple times per block for the same `prefix` does
/// not make much sense because it is not cumulative when called inside the same block.
/// The deletion would always start from `prefix` resulting in the same keys being deleted
/// every time this function is called with the exact same arguments per block. This happens
/// because the keys in the overlay are not taken into account when deleting keys in the
/// backend.
#[version(2)]
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> KillStorageResult {
Externalities::clear_prefix(*self, prefix, limit, None).into()
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
/// Partially clear the storage of each key-value pair where the key starts with the given
/// prefix.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete a prefix storage in case it is too large or costly
/// to delete in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given `maybe_prefix` value. Subsequent calls
/// operating on the same prefix should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given prefix, it is important that no keys further
/// keys under the same prefix are inserted. If so, then they may or may not be deleted by
/// subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for that prefix when
/// issuing this call are deleted without counting towards the `limit`.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
maybe_prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>, //< TODO Make work or just Option<Vec<u8>>?
) -> MultiRemovalResults {
Externalities::clear_prefix(
*self,
maybe_prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
/// Append the encoded `value` to the storage item at `key`.
///
/// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend).
///
/// # Warning
///
/// If the storage item does not support [`EncodeAppend`](codec::EncodeAppend) or
/// something else fails at appending, the storage item will be set to `[value]`.
fn append(&mut self, key: &[u8], value: Vec<u8>) {
self.storage_append(key.to_vec(), value);
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
self.storage_root(StateVersion::V0)
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, version: StateVersion) -> Vec<u8> {
self.storage_root(version)
/// Always returns `None`. This function exists for compatibility reasons.
fn changes_root(&mut self, _parent_hash: &[u8]) -> Option<Vec<u8>> {
None
}
/// Get the next key in storage after the given one in lexicographic order.
fn next_key(&mut self, key: &[u8]) -> Option<Vec<u8>> {
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
/// Start a new nested transaction.
///
/// This allows to either commit or roll back all changes that are made after this call.
/// For every transaction there must be a matching call to either `rollback_transaction`
/// or `commit_transaction`. This is also effective for all values manipulated using the
/// `DefaultChildStorage` API.
///
/// # Warning
///
/// This is a low level API that is potentially dangerous as it can easily result
/// in unbalanced transactions. For example, FRAME users should use high level storage
/// abstractions.
fn start_transaction(&mut self) {
self.storage_start_transaction();
}
/// Rollback the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are discarded.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn rollback_transaction(&mut self) {
self.storage_rollback_transaction()
.expect("No open transaction that can be rolled back.");
}
/// Commit the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are committed.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn commit_transaction(&mut self) {
self.storage_commit_transaction()
.expect("No open transaction that can be committed.");
}
}
/// Interface for accessing the child storage for default child trie,
/// from within the runtime.
#[runtime_interface]
pub trait DefaultChildStorage {
/// Get a default child storage value for a given key.
///
/// Parameter `storage_key` is the unprefixed location of the root of the child trie in the
/// parent trie. Result is `None` if the value for `key` in the child storage can not be found.
fn get(&self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|s| s.to_vec())
}
/// Allocation efficient variant of `get`.
///
/// Get `key` from child storage, placing the value into `value_out` and return the number
/// of bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
key: &[u8],
value_out: &mut [u8],
value_offset: u32,
) -> Option<u32> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
data.len() as u32
})
/// Set `key` to `value` in the child storage denoted by `storage_key`.
fn set(&mut self, storage_key: &[u8], key: &[u8], value: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.set_child_storage(&child_info, key.to_vec(), value.to_vec());
/// For the default child storage at `storage_key`, clear value at `key`.
fn clear(&mut self, storage_key: &[u8], key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_storage(&child_info, key);
}
/// Clear an entire child storage.
/// If it exists, the child storage for `storage_key`
/// is removed.
fn storage_kill(&mut self, storage_key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.kill_child_storage(&child_info, None, None);
Alexander Theißen
committed
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Alexander Theißen
committed
#[version(2)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> bool {
let child_info = ChildInfo::new_default(storage_key);
let r = self.kill_child_storage(&child_info, limit, None);
r.maybe_cursor.is_none()
Shawn Tabrizi
committed
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Shawn Tabrizi
committed
#[version(3)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> KillStorageResult {
Shawn Tabrizi
committed
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, limit, None).into()
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(4, register_only)]
fn storage_kill(
&mut self,
storage_key: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, maybe_limit, maybe_cursor.as_ref().map(|x| &x[..]))
.into()
}
/// Check whether the given `key` exists in default child defined at `storage_key`.
fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool {
let child_info = ChildInfo::new_default(storage_key);
self.exists_child_storage(&child_info, key)
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.clear_child_prefix(&child_info, prefix, None, None);
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(2)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
limit: Option<u32>,
) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(&child_info, prefix, limit, None).into()
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(
&child_info,
prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
fn root(&mut self, storage_key: &[u8]) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, StateVersion::V0)
}
/// Default child root calculation.
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, storage_key: &[u8], version: StateVersion) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, version)
///
/// Get the next key in storage after the given one in lexicographic order in child storage.
fn next_key(&mut self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.next_child_storage_key(&child_info, key)
/// Interface that provides trie related functionality.
#[runtime_interface]
pub trait Trie {
/// A trie root formed from the iterated items.
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn blake2_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn blake2_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::ordered_trie_root(input),
}
/// A trie root formed from the iterated items.
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn keccak_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn keccak_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::ordered_trie_root(input),
}
/// Verify trie proof
fn blake2_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::Blake2Hasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
/// Verify trie proof
#[version(2)]
fn blake2_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Verify trie proof
fn keccak_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::KeccakHasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
/// Verify trie proof
#[version(2)]
fn keccak_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Interface that provides miscellaneous functions for communicating between the runtime and the
/// node.
#[runtime_interface]
pub trait Misc {
// NOTE: We use the target 'runtime' for messages produced by general printing functions,
// instead of LOG_TARGET.
/// Print a number.
fn print_num(val: u64) {
log::debug!(target: "runtime", "{}", val);
}
/// Print any valid `utf8` buffer.
fn print_utf8(utf8: &[u8]) {
if let Ok(data) = std::str::from_utf8(utf8) {
log::debug!(target: "runtime", "{}", data)
/// Print any `u8` slice as hex.
fn print_hex(data: &[u8]) {
log::debug!(target: "runtime", "{}", HexDisplay::from(&data));
/// Extract the runtime version of the given wasm blob by calling `Core_version`.
///
/// Returns `None` if calling the function failed for any reason or `Some(Vec<u8>)` where
/// the `Vec<u8>` holds the SCALE encoded runtime version.
///
/// # Performance
///
/// This function may be very expensive to call depending on the wasm binary. It may be
/// relatively cheap if the wasm binary contains version information. In that case,
/// uncompression of the wasm blob is the dominating factor.
///
/// If the wasm binary does not have the version information attached, then a legacy mechanism
/// may be involved. This means that a runtime call will be performed to query the version.
///
/// Calling into the runtime may be incredible expensive and should be approached with care.
fn runtime_version(&mut self, wasm: &[u8]) -> Option<Vec<u8>> {
use sp_core::traits::ReadRuntimeVersionExt;
let mut ext = sp_state_machine::BasicExternalities::default();
match self
.extension::<ReadRuntimeVersionExt>()
.expect("No `ReadRuntimeVersionExt` associated for the current context!")
.read_runtime_version(wasm, &mut ext)
{
Ok(v) => Some(v),
Err(err) => {
log::debug!(
target: LOG_TARGET,
"cannot read version from the given runtime: {}",
err,
);
None
}
/// Interfaces for working with crypto related types from within the runtime.
#[runtime_interface]
pub trait Crypto {
/// Returns all `ed25519` public keys for the given key id from the keystore.
fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec<ed25519::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ed25519_public_keys(keystore, id)
/// Generate an `ed22519` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> ed25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ed25519_generate_new(keystore, id, seed)
.expect("`ed25519_generate` failed")
}
/// Sign the given `msg` with the `ed25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn ed25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &ed25519::Public,
msg: &[u8],
) -> Option<ed25519::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| ed25519::Signature::from_slice(&sig))
/// Returns `true` when the verification was successful.
fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool {
ed25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `ed25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn ed25519_batch_verify(
&mut self,
sig: &ed25519::Signature,
msg: &[u8],
pub_key: &ed25519::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_ed25519(sig.clone(), *pub_key, msg.to_vec()))
.unwrap_or_else(|| ed25519_verify(sig, msg, pub_key))
}
/// Verify `sr25519` signature.
///
/// Returns `true` when the verification was successful.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool {
sr25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `sr25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn sr25519_batch_verify(
&mut self,
sig: &sr25519::Signature,
msg: &[u8],
pub_key: &sr25519::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_sr25519(sig.clone(), *pub_key, msg.to_vec()))
.unwrap_or_else(|| sr25519_verify(sig, msg, pub_key))
}
/// Start verification extension.
fn start_batch_verify(&mut self) {
let scheduler = self
.extension::<TaskExecutorExt>()
.expect("No task executor associated with the current context!")
.clone();
self.register_extension(VerificationExt(BatchVerifier::new(scheduler)))
.expect("Failed to register required extension: `VerificationExt`");
}
/// Finish batch-verification of signatures.
///
/// Verify or wait for verification to finish for all signatures which were previously
/// deferred by `sr25519_verify`/`ed25519_verify`.
///
/// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called).
fn finish_batch_verify(&mut self) -> bool {
let result = self
.extension::<VerificationExt>()
.expect("`finish_batch_verify` should only be called after `start_batch_verify`")
.verify_and_clear();
self.deregister_extension::<VerificationExt>()
.expect("No verification extension in current context!");
result
/// Returns all `sr25519` public keys for the given key id from the keystore.
fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec<sr25519::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sr25519_public_keys(keystore, id)
/// Generate an `sr22519` key for the given key type using an optional seed and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> sr25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sr25519_generate_new(keystore, id, seed)
.expect("`sr25519_generate` failed")
}
/// Sign the given `msg` with the `sr25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn sr25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &sr25519::Public,
msg: &[u8],
) -> Option<sr25519::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| sr25519::Signature::from_slice(&sig))
/// Verify an `sr25519` signature.
///
/// Returns `true` when the verification in successful regardless of
/// signature version.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool {
sr25519::Pair::verify_deprecated(sig, msg, pubkey)
}
/// Returns all `ecdsa` public keys for the given key id from the keystore.
fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec<ecdsa::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_public_keys(keystore, id)
}
/// Generate an `ecdsa` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn ecdsa_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> ecdsa::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_generate_new(keystore, id, seed).expect("`ecdsa_generate` failed")
}
/// Sign the given `msg` with the `ecdsa` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn ecdsa_sign(
&mut self,
id: KeyTypeId,
pub_key: &ecdsa::Public,
msg: &[u8],
) -> Option<ecdsa::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| ecdsa::Signature::from_slice(&sig))
/// Sign the given a pre-hashed `msg` with the `ecdsa` key that corresponds to the given public
/// key and key type in the keystore.
///
/// Returns the signature.
fn ecdsa_sign_prehashed(
&mut self,
id: KeyTypeId,
pub_key: &ecdsa::Public,
msg: &[u8; 32],
) -> Option<ecdsa::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_sign_prehashed(keystore, id, pub_key, msg).ok().flatten()
}
/// Verify `ecdsa` signature.
///
/// Returns `true` when the verification was successful.
/// This version is able to handle, non-standard, overflowing signatures.
fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool {
ecdsa::Pair::verify_deprecated(sig, msg, pub_key)
}
/// Verify `ecdsa` signature.
///
/// Returns `true` when the verification was successful.
#[version(2)]
fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool {
ecdsa::Pair::verify(sig, msg, pub_key)
}
/// Verify `ecdsa` signature with pre-hashed `msg`.
///
/// Returns `true` when the verification was successful.
fn ecdsa_verify_prehashed(
sig: &ecdsa::Signature,
msg: &[u8; 32],
pub_key: &ecdsa::Public,
) -> bool {
ecdsa::Pair::verify_prehashed(sig, msg, pub_key)
}
/// Register a `ecdsa` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn ecdsa_batch_verify(
&mut self,
sig: &ecdsa::Signature,
msg: &[u8],
pub_key: &ecdsa::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_ecdsa(sig.clone(), *pub_key, msg.to_vec()))
.unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key))
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey
/// (doesn't include the 0x04 prefix).
/// This version is able to handle, non-standard, overflowing signatures.
fn secp256k1_ecdsa_recover(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 64], EcdsaVerifyError> {
let rid = libsecp256k1::RecoveryId::parse(
if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8,
)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = libsecp256k1::Signature::parse_overflowing_slice(&sig[..64])
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = libsecp256k1::Message::parse(msg);