Newer
Older
// This file is part of Substrate.
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! I/O host interface for substrate runtime.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), feature(alloc_error_handler))]
#![cfg_attr(
feature = "std",
doc = "Substrate runtime standard library as compiled when linked with Rust's standard library."
)]
#![cfg_attr(
not(feature = "std"),
doc = "Substrate's runtime standard library as compiled without Rust's standard library."
)]
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use tracing;
crypto::Pair,
hexdisplay::HexDisplay,
offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt},
traits::{RuntimeSpawnExt, TaskExecutorExt},
#[cfg(feature = "std")]
use sp_keystore::{KeystoreExt, SyncCryptoStore};
crypto::KeyTypeId,
ecdsa, ed25519,
HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp,
sr25519,
storage::StateVersion,
LogLevel, LogLevelFilter, OpaquePeerId, H256,
use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration};
use sp_runtime_interface::{
pass_by::{PassBy, PassByCodec},
runtime_interface, Pointer,
};
use codec::{Decode, Encode};
#[cfg(feature = "std")]
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
Message, SECP256K1,
};
use sp_externalities::{Externalities, ExternalitiesExt};
#[cfg(feature = "std")]
mod batch_verifier;
#[cfg(feature = "std")]
use batch_verifier::BatchVerifier;
const LOG_TARGET: &str = "runtime::io";
/// Error verifying ECDSA signature
pub enum EcdsaVerifyError {
/// Incorrect value of R or S
/// Incorrect value of V
/// Invalid signature
BadSignature,
}
/// The outcome of calling `storage_kill`. Returned value is the number of storage items
/// removed from the backend from making the `storage_kill` call.
Shawn Tabrizi
committed
#[derive(PassByCodec, Encode, Decode)]
/// All key to remove were removed, return number of key removed from backend.
Shawn Tabrizi
committed
AllRemoved(u32),
/// Not all key to remove were removed, return number of key removed from backend.
Shawn Tabrizi
committed
SomeRemaining(u32),
}
/// Interface for accessing the storage from within the runtime.
#[runtime_interface]
pub trait Storage {
/// Returns the data for `key` in the storage or `None` if the key can not be found.
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
self.storage(key).map(|s| s.to_vec())
}
/// Get `key` from storage, placing the value into `value_out` and return the number of
/// bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option<u32> {
self.storage(key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
/// Set `key` to `value` in the storage.
fn set(&mut self, key: &[u8], value: &[u8]) {
self.set_storage(key.to_vec(), value.to_vec());
}
/// Clear the storage of the given `key` and its value.
fn clear(&mut self, key: &[u8]) {
self.clear_storage(key)
}
/// Check whether the given `key` exists in storage.
fn exists(&self, key: &[u8]) -> bool {
self.exists_storage(key)
}
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, prefix: &[u8]) {
let _ = Externalities::clear_prefix(*self, prefix, None);
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
///
/// # Limit
///
/// Deletes all keys from the overlay and up to `limit` keys from the backend if
/// it is set to `Some`. No limit is applied when `limit` is set to `None`.
///
/// The limit can be used to partially delete a prefix storage in case it is too large
/// to delete in one go (block).
///
/// It returns a boolean false iff some keys are remaining in
/// the prefix after the functions returns. Also returns a `u32` with
/// the number of keys removed from the process.
///
/// # Note
///
/// Please note that keys that are residing in the overlay for that prefix when
/// issuing this call are all deleted without counting towards the `limit`. Only keys
/// written during the current block are part of the overlay. Deleting with a `limit`
/// mostly makes sense with an empty overlay for that prefix.
///
/// Calling this function multiple times per block for the same `prefix` does
/// not make much sense because it is not cumulative when called inside the same block.
/// Use this function to distribute the deletion of a single child trie across multiple
/// blocks.
#[version(2)]
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> KillStorageResult {
let (all_removed, num_removed) = Externalities::clear_prefix(*self, prefix, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
}
}
/// Append the encoded `value` to the storage item at `key`.
///
/// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend).
///
/// # Warning
///
/// If the storage item does not support [`EncodeAppend`](codec::EncodeAppend) or
/// something else fails at appending, the storage item will be set to `[value]`.
fn append(&mut self, key: &[u8], value: Vec<u8>) {
self.storage_append(key.to_vec(), value);
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
self.storage_root(StateVersion::V0)
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, version: StateVersion) -> Vec<u8> {
self.storage_root(version)
/// Always returns `None`. This function exists for compatibility reasons.
fn changes_root(&mut self, _parent_hash: &[u8]) -> Option<Vec<u8>> {
None
}
/// Get the next key in storage after the given one in lexicographic order.
fn next_key(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.next_storage_key(&key)
}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
/// Start a new nested transaction.
///
/// This allows to either commit or roll back all changes that are made after this call.
/// For every transaction there must be a matching call to either `rollback_transaction`
/// or `commit_transaction`. This is also effective for all values manipulated using the
/// `DefaultChildStorage` API.
///
/// # Warning
///
/// This is a low level API that is potentially dangerous as it can easily result
/// in unbalanced transactions. For example, FRAME users should use high level storage
/// abstractions.
fn start_transaction(&mut self) {
self.storage_start_transaction();
}
/// Rollback the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are discarded.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn rollback_transaction(&mut self) {
self.storage_rollback_transaction()
.expect("No open transaction that can be rolled back.");
}
/// Commit the last transaction started by `start_transaction`.
///
/// Any changes made during that transaction are committed.
///
/// # Panics
///
/// Will panic if there is no open transaction.
fn commit_transaction(&mut self) {
self.storage_commit_transaction()
.expect("No open transaction that can be committed.");
}
}
/// Interface for accessing the child storage for default child trie,
/// from within the runtime.
#[runtime_interface]
pub trait DefaultChildStorage {
/// Get a default child storage value for a given key.
///
/// Parameter `storage_key` is the unprefixed location of the root of the child trie in the
/// parent trie. Result is `None` if the value for `key` in the child storage can not be found.
fn get(&self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|s| s.to_vec())
}
/// Allocation efficient variant of `get`.
///
/// Get `key` from child storage, placing the value into `value_out` and return the number
/// of bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
key: &[u8],
value_out: &mut [u8],
value_offset: u32,
) -> Option<u32> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
data.len() as u32
})
/// Set `key` to `value` in the child storage denoted by `storage_key`.
fn set(&mut self, storage_key: &[u8], key: &[u8], value: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.set_child_storage(&child_info, key.to_vec(), value.to_vec());
/// For the default child storage at `storage_key`, clear value at `key`.
fn clear(&mut self, storage_key: &[u8], key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_storage(&child_info, key);
}
/// Clear an entire child storage.
/// If it exists, the child storage for `storage_key`
/// is removed.
fn storage_kill(&mut self, storage_key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
Alexander Theißen
committed
self.kill_child_storage(&child_info, None);
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Alexander Theißen
committed
#[version(2)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> bool {
let child_info = ChildInfo::new_default(storage_key);
Shawn Tabrizi
committed
let (all_removed, _num_removed) = self.kill_child_storage(&child_info, limit);
all_removed
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
Shawn Tabrizi
committed
#[version(3)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> KillStorageResult {
Shawn Tabrizi
committed
let child_info = ChildInfo::new_default(storage_key);
let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
Shawn Tabrizi
committed
}
/// Check whether the given `key` exists in default child defined at `storage_key`.
fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool {
let child_info = ChildInfo::new_default(storage_key);
self.exists_child_storage(&child_info, key)
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.clear_child_prefix(&child_info, prefix, None);
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(2)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
limit: Option<u32>,
) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
let (all_removed, num_removed) = self.clear_child_prefix(&child_info, prefix, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
}
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
fn root(&mut self, storage_key: &[u8]) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, StateVersion::V0)
}
/// Default child root calculation.
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns a `Vec<u8>` that holds the SCALE encoded hash.
#[version(2)]
fn root(&mut self, storage_key: &[u8], version: StateVersion) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info, version)
///
/// Get the next key in storage after the given one in lexicographic order in child storage.
fn next_key(&mut self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.next_child_storage_key(&child_info, key)
/// Interface that provides trie related functionality.
#[runtime_interface]
pub trait Trie {
/// A trie root formed from the iterated items.
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn blake2_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn blake2_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn blake2_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::Blake2Hasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::Blake2Hasher>::ordered_trie_root(input),
}
/// A trie root formed from the iterated items.
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::trie_root(input)
}
/// A trie root formed from the iterated items.
#[version(2)]
fn keccak_256_root(input: Vec<(Vec<u8>, Vec<u8>)>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::trie_root(input),
}
}
/// A trie root formed from the enumerated items.
fn keccak_256_ordered_root(input: Vec<Vec<u8>>) -> H256 {
LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input)
}
/// A trie root formed from the enumerated items.
#[version(2)]
fn keccak_256_ordered_root(input: Vec<Vec<u8>>, version: StateVersion) -> H256 {
match version {
StateVersion::V0 => LayoutV0::<sp_core::KeccakHasher>::ordered_trie_root(input),
StateVersion::V1 => LayoutV1::<sp_core::KeccakHasher>::ordered_trie_root(input),
}
/// Verify trie proof
fn blake2_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::Blake2Hasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
/// Verify trie proof
#[version(2)]
fn blake2_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::Blake2Hasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Verify trie proof
fn keccak_256_verify_proof(root: H256, proof: &[Vec<u8>], key: &[u8], value: &[u8]) -> bool {
sp_trie::verify_trie_proof::<LayoutV0<sp_core::KeccakHasher>, _, _, _>(
&root,
proof,
&[(key, Some(value))],
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
/// Verify trie proof
#[version(2)]
fn keccak_256_verify_proof(
root: H256,
proof: &[Vec<u8>],
key: &[u8],
value: &[u8],
version: StateVersion,
) -> bool {
match version {
StateVersion::V0 => sp_trie::verify_trie_proof::<
LayoutV0<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
StateVersion::V1 => sp_trie::verify_trie_proof::<
LayoutV1<sp_core::KeccakHasher>,
_,
_,
_,
>(&root, proof, &[(key, Some(value))])
.is_ok(),
}
}
/// Interface that provides miscellaneous functions for communicating between the runtime and the
/// node.
#[runtime_interface]
pub trait Misc {
// NOTE: We use the target 'runtime' for messages produced by general printing functions,
// instead of LOG_TARGET.
/// Print a number.
fn print_num(val: u64) {
log::debug!(target: "runtime", "{}", val);
}
/// Print any valid `utf8` buffer.
fn print_utf8(utf8: &[u8]) {
if let Ok(data) = std::str::from_utf8(utf8) {
log::debug!(target: "runtime", "{}", data)
/// Print any `u8` slice as hex.
fn print_hex(data: &[u8]) {
log::debug!(target: "runtime", "{}", HexDisplay::from(&data));
/// Extract the runtime version of the given wasm blob by calling `Core_version`.
///
/// Returns `None` if calling the function failed for any reason or `Some(Vec<u8>)` where
/// the `Vec<u8>` holds the SCALE encoded runtime version.
///
/// # Performance
///
/// This function may be very expensive to call depending on the wasm binary. It may be
/// relatively cheap if the wasm binary contains version information. In that case,
/// uncompression of the wasm blob is the dominating factor.
///
/// If the wasm binary does not have the version information attached, then a legacy mechanism
/// may be involved. This means that a runtime call will be performed to query the version.
///
/// Calling into the runtime may be incredible expensive and should be approached with care.
fn runtime_version(&mut self, wasm: &[u8]) -> Option<Vec<u8>> {
use sp_core::traits::ReadRuntimeVersionExt;
let mut ext = sp_state_machine::BasicExternalities::default();
match self
.extension::<ReadRuntimeVersionExt>()
.expect("No `ReadRuntimeVersionExt` associated for the current context!")
.read_runtime_version(wasm, &mut ext)
{
Ok(v) => Some(v),
Err(err) => {
log::debug!(
target: LOG_TARGET,
"cannot read version from the given runtime: {}",
err,
);
None
}
/// Interfaces for working with crypto related types from within the runtime.
#[runtime_interface]
pub trait Crypto {
/// Returns all `ed25519` public keys for the given key id from the keystore.
fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec<ed25519::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ed25519_public_keys(keystore, id)
/// Generate an `ed22519` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> ed25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ed25519_generate_new(keystore, id, seed)
.expect("`ed25519_generate` failed")
}
/// Sign the given `msg` with the `ed25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn ed25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &ed25519::Public,
msg: &[u8],
) -> Option<ed25519::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| ed25519::Signature::from_slice(&sig))
/// Returns `true` when the verification was successful.
fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool {
ed25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `ed25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn ed25519_batch_verify(
&mut self,
sig: &ed25519::Signature,
msg: &[u8],
pub_key: &ed25519::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec()))
.unwrap_or_else(|| ed25519_verify(sig, msg, pub_key))
}
/// Verify `sr25519` signature.
///
/// Returns `true` when the verification was successful.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool {
sr25519::Pair::verify(sig, msg, pub_key)
}
/// Register a `sr25519` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn sr25519_batch_verify(
&mut self,
sig: &sr25519::Signature,
msg: &[u8],
pub_key: &sr25519::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec()))
.unwrap_or_else(|| sr25519_verify(sig, msg, pub_key))
}
/// Start verification extension.
fn start_batch_verify(&mut self) {
let scheduler = self
.extension::<TaskExecutorExt>()
.expect("No task executor associated with the current context!")
.clone();
self.register_extension(VerificationExt(BatchVerifier::new(scheduler)))
.expect("Failed to register required extension: `VerificationExt`");
}
/// Finish batch-verification of signatures.
///
/// Verify or wait for verification to finish for all signatures which were previously
/// deferred by `sr25519_verify`/`ed25519_verify`.
///
/// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called).
fn finish_batch_verify(&mut self) -> bool {
let result = self
.extension::<VerificationExt>()
.expect("`finish_batch_verify` should only be called after `start_batch_verify`")
.verify_and_clear();
self.deregister_extension::<VerificationExt>()
.expect("No verification extension in current context!");
result
/// Returns all `sr25519` public keys for the given key id from the keystore.
fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec<sr25519::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sr25519_public_keys(keystore, id)
/// Generate an `sr22519` key for the given key type using an optional seed and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> sr25519::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sr25519_generate_new(keystore, id, seed)
.expect("`sr25519_generate` failed")
}
/// Sign the given `msg` with the `sr25519` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn sr25519_sign(
&mut self,
id: KeyTypeId,
pub_key: &sr25519::Public,
msg: &[u8],
) -> Option<sr25519::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| sr25519::Signature::from_slice(&sig))
/// Verify an `sr25519` signature.
///
/// Returns `true` when the verification in successful regardless of
/// signature version.
fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool {
sr25519::Pair::verify_deprecated(sig, msg, pubkey)
}
/// Returns all `ecdsa` public keys for the given key id from the keystore.
fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec<ecdsa::Public> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_public_keys(keystore, id)
}
/// Generate an `ecdsa` key for the given key type using an optional `seed` and
/// store it in the keystore.
///
/// The `seed` needs to be a valid utf8.
///
/// Returns the public key.
fn ecdsa_generate(&mut self, id: KeyTypeId, seed: Option<Vec<u8>>) -> ecdsa::Public {
let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!"));
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_generate_new(keystore, id, seed).expect("`ecdsa_generate` failed")
}
/// Sign the given `msg` with the `ecdsa` key that corresponds to the given public key and
/// key type in the keystore.
///
/// Returns the signature.
fn ecdsa_sign(
&mut self,
id: KeyTypeId,
pub_key: &ecdsa::Public,
msg: &[u8],
) -> Option<ecdsa::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg)
.flatten()
.and_then(|sig| ecdsa::Signature::from_slice(&sig))
/// Sign the given a pre-hashed `msg` with the `ecdsa` key that corresponds to the given public
/// key and key type in the keystore.
///
/// Returns the signature.
fn ecdsa_sign_prehashed(
&mut self,
id: KeyTypeId,
pub_key: &ecdsa::Public,
msg: &[u8; 32],
) -> Option<ecdsa::Signature> {
let keystore = &***self
.extension::<KeystoreExt>()
.expect("No `keystore` associated for the current context!");
SyncCryptoStore::ecdsa_sign_prehashed(keystore, id, pub_key, msg).ok().flatten()
}
/// Verify `ecdsa` signature.
///
/// Returns `true` when the verification was successful.
/// This version is able to handle, non-standard, overflowing signatures.
fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool {
ecdsa::Pair::verify_deprecated(sig, msg, pub_key)
}
/// Verify `ecdsa` signature.
///
/// Returns `true` when the verification was successful.
#[version(2)]
fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool {
ecdsa::Pair::verify(sig, msg, pub_key)
}
/// Verify `ecdsa` signature with pre-hashed `msg`.
///
/// Returns `true` when the verification was successful.
fn ecdsa_verify_prehashed(
sig: &ecdsa::Signature,
msg: &[u8; 32],
pub_key: &ecdsa::Public,
) -> bool {
ecdsa::Pair::verify_prehashed(sig, msg, pub_key)
}
/// Register a `ecdsa` signature for batch verification.
///
/// Batch verification must be enabled by calling [`start_batch_verify`].
/// If batch verification is not enabled, the signature will be verified immediatley.
/// To get the result of the batch verification, [`finish_batch_verify`]
/// needs to be called.
///
/// Returns `true` when the verification is either successful or batched.
fn ecdsa_batch_verify(
&mut self,
sig: &ecdsa::Signature,
msg: &[u8],
pub_key: &ecdsa::Public,
) -> bool {
self.extension::<VerificationExt>()
.map(|extension| extension.push_ecdsa(sig.clone(), pub_key.clone(), msg.to_vec()))
.unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key))
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey
/// (doesn't include the 0x04 prefix).
/// This version is able to handle, non-standard, overflowing signatures.
fn secp256k1_ecdsa_recover(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 64], EcdsaVerifyError> {
let rid = libsecp256k1::RecoveryId::parse(
if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8,
)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = libsecp256k1::Signature::parse_overflowing_slice(&sig[..64])
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = libsecp256k1::Message::parse(msg);
let pubkey =
libsecp256k1::recover(&msg, &sig, &rid).map_err(|_| EcdsaVerifyError::BadSignature)?;
let mut res = [0u8; 64];
res.copy_from_slice(&pubkey.serialize()[1..65]);
Ok(res)
}
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey
/// (doesn't include the 0x04 prefix).
#[version(2)]
fn secp256k1_ecdsa_recover(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 64], EcdsaVerifyError> {
let rid = RecoveryId::from_i32(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as i32)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
let mut res = [0u8; 64];
res.copy_from_slice(&pubkey.serialize_uncompressed()[1..]);
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey.
fn secp256k1_ecdsa_recover_compressed(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 33], EcdsaVerifyError> {
let rid = libsecp256k1::RecoveryId::parse(
if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8,
)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = libsecp256k1::Signature::parse_overflowing_slice(&sig[0..64])
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = libsecp256k1::Message::parse(msg);
let pubkey =
libsecp256k1::recover(&msg, &sig, &rid).map_err(|_| EcdsaVerifyError::BadSignature)?;
Ok(pubkey.serialize_compressed())
}
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey.
#[version(2)]
fn secp256k1_ecdsa_recover_compressed(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 33], EcdsaVerifyError> {
let rid = RecoveryId::from_i32(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as i32)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
/// Interface that provides functions for hashing with different algorithms.
#[runtime_interface]
pub trait Hashing {
/// Conduct a 256-bit Keccak hash.
fn keccak_256(data: &[u8]) -> [u8; 32] {
sp_core::hashing::keccak_256(data)
/// Conduct a 512-bit Keccak hash.
fn keccak_512(data: &[u8]) -> [u8; 64] {