Newer
Older
let pubkey =
libsecp256k1::recover(&msg, &sig, &rid).map_err(|_| EcdsaVerifyError::BadSignature)?;
let mut res = [0u8; 64];
res.copy_from_slice(&pubkey.serialize()[1..65]);
Ok(res)
}
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey
/// (doesn't include the 0x04 prefix).
#[version(2)]
fn secp256k1_ecdsa_recover(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 64], EcdsaVerifyError> {
let rid = RecoveryId::from_i32(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as i32)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
let mut res = [0u8; 64];
res.copy_from_slice(&pubkey.serialize_uncompressed()[1..]);
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey.
fn secp256k1_ecdsa_recover_compressed(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 33], EcdsaVerifyError> {
let rid = libsecp256k1::RecoveryId::parse(
if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8,
)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = libsecp256k1::Signature::parse_overflowing_slice(&sig[0..64])
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = libsecp256k1::Message::parse(msg);
let pubkey =
libsecp256k1::recover(&msg, &sig, &rid).map_err(|_| EcdsaVerifyError::BadSignature)?;
Ok(pubkey.serialize_compressed())
}
/// Verify and recover a SECP256k1 ECDSA signature.
///
/// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`.
/// - `msg` is the blake2-256 hash of the message.
///
/// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey.
#[version(2)]
fn secp256k1_ecdsa_recover_compressed(
sig: &[u8; 65],
msg: &[u8; 32],
) -> Result<[u8; 33], EcdsaVerifyError> {
let rid = RecoveryId::from_i32(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as i32)
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
/// Interface that provides functions for hashing with different algorithms.
#[runtime_interface]
pub trait Hashing {
/// Conduct a 256-bit Keccak hash.
fn keccak_256(data: &[u8]) -> [u8; 32] {
sp_core::hashing::keccak_256(data)
/// Conduct a 512-bit Keccak hash.
fn keccak_512(data: &[u8]) -> [u8; 64] {
sp_core::hashing::keccak_512(data)
}
/// Conduct a 256-bit Sha2 hash.
fn sha2_256(data: &[u8]) -> [u8; 32] {
sp_core::hashing::sha2_256(data)
/// Conduct a 128-bit Blake2 hash.
fn blake2_128(data: &[u8]) -> [u8; 16] {
sp_core::hashing::blake2_128(data)
/// Conduct a 256-bit Blake2 hash.
fn blake2_256(data: &[u8]) -> [u8; 32] {
sp_core::hashing::blake2_256(data)
}
/// Conduct four XX hashes to give a 256-bit result.
fn twox_256(data: &[u8]) -> [u8; 32] {
sp_core::hashing::twox_256(data)
}
/// Conduct two XX hashes to give a 128-bit result.
fn twox_128(data: &[u8]) -> [u8; 16] {
sp_core::hashing::twox_128(data)
}
/// Conduct two XX hashes to give a 64-bit result.
fn twox_64(data: &[u8]) -> [u8; 8] {
sp_core::hashing::twox_64(data)
/// Interface that provides transaction indexing API.
#[runtime_interface]
pub trait TransactionIndex {
/// Add transaction index. Returns indexed content hash.
fn index(&mut self, extrinsic: u32, size: u32, context_hash: [u8; 32]) {
self.storage_index_transaction(extrinsic, &context_hash, size);
}
/// Conduct a 512-bit Keccak hash.
fn renew(&mut self, extrinsic: u32, context_hash: [u8; 32]) {
self.storage_renew_transaction_index(extrinsic, &context_hash);
}
}
/// Interface that provides functions to access the Offchain DB.
#[runtime_interface]
pub trait OffchainIndex {
/// Write a key value pair to the Offchain DB database in a buffered fashion.
fn set(&mut self, key: &[u8], value: &[u8]) {
self.set_offchain_storage(key, Some(value));
}
/// Remove a key and its associated value from the Offchain DB.
fn clear(&mut self, key: &[u8]) {
self.set_offchain_storage(key, None);
}
}
#[cfg(feature = "std")]
sp_externalities::decl_extension! {
/// Batch verification extension to register/retrieve from the externalities.
pub struct VerificationExt(BatchVerifier);
}
/// Interface that provides functions to access the offchain functionality.
///
/// These functions are being made available to the runtime and are called by the runtime.
#[runtime_interface]
pub trait Offchain {
/// Returns if the local node is a potential validator.
///
/// Even if this function returns `true`, it does not mean that any keys are configured
/// and that the validator is registered in the chain.
fn is_validator(&mut self) -> bool {
self.extension::<OffchainWorkerExt>()
.expect("is_validator can be called only in the offchain worker context")
.is_validator()
}
/// Submit an encoded transaction to the pool.
///
/// The transaction will end up in the pool.
fn submit_transaction(&mut self, data: Vec<u8>) -> Result<(), ()> {
self.extension::<TransactionPoolExt>()
.expect(
"submit_transaction can be called only in the offchain call context with
TransactionPool capabilities enabled",
)
.submit_transaction(data)
}
/// Returns information about the local node's network state.
fn network_state(&mut self) -> Result<OpaqueNetworkState, ()> {
self.extension::<OffchainWorkerExt>()
.expect("network_state can be called only in the offchain worker context")
.network_state()
}
/// Returns current UNIX timestamp (in millis)
fn timestamp(&mut self) -> Timestamp {
self.extension::<OffchainWorkerExt>()
.expect("timestamp can be called only in the offchain worker context")
.timestamp()
}
/// Pause the execution until `deadline` is reached.
fn sleep_until(&mut self, deadline: Timestamp) {
self.extension::<OffchainWorkerExt>()
.expect("sleep_until can be called only in the offchain worker context")
.sleep_until(deadline)
}
/// Returns a random seed.
///
/// This is a truly random, non-deterministic seed generated by host environment.
/// Obviously fine in the off-chain worker context.
fn random_seed(&mut self) -> [u8; 32] {
self.extension::<OffchainWorkerExt>()
.expect("random_seed can be called only in the offchain worker context")
.random_seed()
}
/// Sets a value in the local storage.
///
/// Note this storage is not part of the consensus, it's only accessible by
/// offchain worker tasks running on the same machine. It IS persisted between runs.
fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) {
self.extension::<OffchainDbExt>()
.expect(
"local_storage_set can be called only in the offchain call context with
OffchainDb extension",
)
.local_storage_set(kind, key, value)
}
/// Remove a value from the local storage.
///
/// Note this storage is not part of the consensus, it's only accessible by
/// offchain worker tasks running on the same machine. It IS persisted between runs.
fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) {
self.extension::<OffchainDbExt>()
.expect(
"local_storage_clear can be called only in the offchain call context with
OffchainDb extension",
)
.local_storage_clear(kind, key)
}
/// Sets a value in the local storage if it matches current value.
///
/// Since multiple offchain workers may be running concurrently, to prevent
/// data races use CAS to coordinate between them.
///
/// Returns `true` if the value has been set, `false` otherwise.
///
/// Note this storage is not part of the consensus, it's only accessible by
/// offchain worker tasks running on the same machine. It IS persisted between runs.
fn local_storage_compare_and_set(
&mut self,
kind: StorageKind,
key: &[u8],
old_value: Option<Vec<u8>>,
new_value: &[u8],
) -> bool {
self.extension::<OffchainDbExt>()
.expect(
"local_storage_compare_and_set can be called only in the offchain call context
with OffchainDb extension",
.local_storage_compare_and_set(kind, key, old_value.as_deref(), new_value)
}
/// Gets a value from the local storage.
///
/// If the value does not exist in the storage `None` will be returned.
/// Note this storage is not part of the consensus, it's only accessible by
/// offchain worker tasks running on the same machine. It IS persisted between runs.
fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option<Vec<u8>> {
self.extension::<OffchainDbExt>()
.expect(
"local_storage_get can be called only in the offchain call context with
OffchainDb extension",
)
.local_storage_get(kind, key)
}
/// Initiates a http request given HTTP verb and the URL.
///
/// Meta is a future-reserved field containing additional, parity-scale-codec encoded
/// parameters. Returns the id of newly started request.
fn http_request_start(
&mut self,
method: &str,
uri: &str,
meta: &[u8],
) -> Result<HttpRequestId, ()> {
self.extension::<OffchainWorkerExt>()
.expect("http_request_start can be called only in the offchain worker context")
.http_request_start(method, uri, meta)
}
/// Append header to the request.
fn http_request_add_header(
&mut self,
request_id: HttpRequestId,
name: &str,
value: &str,
) -> Result<(), ()> {
self.extension::<OffchainWorkerExt>()
.expect("http_request_add_header can be called only in the offchain worker context")
.http_request_add_header(request_id, name, value)
}
/// Write a chunk of request body.
///
/// Writing an empty chunks finalizes the request.
/// Passing `None` as deadline blocks forever.
///
/// Returns an error in case deadline is reached or the chunk couldn't be written.
fn http_request_write_body(
&mut self,
request_id: HttpRequestId,
chunk: &[u8],
deadline: Option<Timestamp>,
) -> Result<(), HttpError> {
self.extension::<OffchainWorkerExt>()
.expect("http_request_write_body can be called only in the offchain worker context")
.http_request_write_body(request_id, chunk, deadline)
}
/// Block and wait for the responses for given requests.
///
/// Returns a vector of request statuses (the len is the same as ids).
/// Note that if deadline is not provided the method will block indefinitely,
/// otherwise unready responses will produce `DeadlineReached` status.
///
/// Passing `None` as deadline blocks forever.
fn http_response_wait(
&mut self,
ids: &[HttpRequestId],
deadline: Option<Timestamp>,
) -> Vec<HttpRequestStatus> {
self.extension::<OffchainWorkerExt>()
.expect("http_response_wait can be called only in the offchain worker context")
.http_response_wait(ids, deadline)
}
/// Read all response headers.
///
/// Returns a vector of pairs `(HeaderKey, HeaderValue)`.
/// NOTE response headers have to be read before response body.
fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec<u8>, Vec<u8>)> {
self.extension::<OffchainWorkerExt>()
.expect("http_response_headers can be called only in the offchain worker context")
.http_response_headers(request_id)
}
/// Read a chunk of body response to given buffer.
///
/// Returns the number of bytes written or an error in case a deadline
/// is reached or server closed the connection.
/// If `0` is returned it means that the response has been fully consumed
/// and the `request_id` is now invalid.
/// NOTE this implies that response headers must be read before draining the body.
/// Passing `None` as a deadline blocks forever.
fn http_response_read_body(
&mut self,
request_id: HttpRequestId,
buffer: &mut [u8],
deadline: Option<Timestamp>,
) -> Result<u32, HttpError> {
self.extension::<OffchainWorkerExt>()
.expect("http_response_read_body can be called only in the offchain worker context")
.http_response_read_body(request_id, buffer, deadline)
.map(|r| r as u32)
/// Set the authorized nodes and authorized_only flag.
fn set_authorized_nodes(&mut self, nodes: Vec<OpaquePeerId>, authorized_only: bool) {
self.extension::<OffchainWorkerExt>()
.expect("set_authorized_nodes can be called only in the offchain worker context")
.set_authorized_nodes(nodes, authorized_only)
}
}
/// Wasm only interface that provides functions for calling into the allocator.
#[runtime_interface(wasm_only)]
/// Malloc the given number of bytes and return the pointer to the allocated memory location.
fn malloc(&mut self, size: u32) -> Pointer<u8> {
self.allocate_memory(size).expect("Failed to allocate memory")
}
/// Free the given pointer.
fn free(&mut self, ptr: Pointer<u8>) {
self.deallocate_memory(ptr).expect("Failed to deallocate memory")
}
}
/// WASM-only interface which allows for aborting the execution in case
/// of an unrecoverable error.
#[runtime_interface(wasm_only)]
pub trait PanicHandler {
/// Aborts the current execution with the given error message.
#[trap_on_return]
fn abort_on_panic(&mut self, message: &str) {
self.register_panic_error_message(message);
}
}
/// Interface that provides functions for logging from within the runtime.
#[runtime_interface]
pub trait Logging {
/// Request to print a log message on the host.
///
/// Note that this will be only displayed if the host is enabled to display log messages with
/// given level and target.
///
/// Instead of using directly, prefer setting up `RuntimeLogger` and using `log` macros.
fn log(level: LogLevel, target: &str, message: &[u8]) {
if let Ok(message) = std::str::from_utf8(message) {
log::log!(target: target, log::Level::from(level), "{}", message)
/// Returns the max log level used by the host.
fn max_level() -> LogLevelFilter {
log::max_level().into()
}
}
#[derive(Encode, Decode)]
/// Crossing is a helper wrapping any Encode-Decodeable type
/// for transferring over the wasm barrier.
pub struct Crossing<T: Encode + Decode>(T);
impl<T: Encode + Decode> PassBy for Crossing<T> {
type PassBy = sp_runtime_interface::pass_by::Codec<Self>;
impl<T: Encode + Decode> Crossing<T> {
/// Convert into the inner type
pub fn into_inner(self) -> T {
self.0
}
}
// useful for testing
impl<T> core::default::Default for Crossing<T>
where
T: core::default::Default + Encode + Decode,
{
fn default() -> Self {
Self(Default::default())
}
}
/// Interface to provide tracing facilities for wasm. Modelled after tokios `tracing`-crate
/// interfaces. See `sp-tracing` for more information.
#[runtime_interface(wasm_only, no_tracing)]
pub trait WasmTracing {
/// Whether the span described in `WasmMetadata` should be traced wasm-side
/// On the host converts into a static Metadata and checks against the global `tracing`
/// dispatcher.
///
/// When returning false the calling code should skip any tracing-related execution. In general
/// within the same block execution this is not expected to change and it doesn't have to be
/// checked more than once per metadata. This exists for optimisation purposes but is still not
/// cheap as it will jump the wasm-native-barrier every time it is called. So an implementation
/// might chose to cache the result for the execution of the entire block.
fn enabled(&mut self, metadata: Crossing<sp_tracing::WasmMetadata>) -> bool {
let metadata: &tracing_core::metadata::Metadata<'static> = (&metadata.into_inner()).into();
tracing::dispatcher::get_default(|d| d.enabled(metadata))
}
/// Open a new span with the given attributes. Return the u64 Id of the span.
///
/// On the native side this goes through the default `tracing` dispatcher to register the span
/// and then calls `clone_span` with the ID to signal that we are keeping it around on the wasm-
/// side even after the local span is dropped. The resulting ID is then handed over to the wasm-
/// side.
fn enter_span(&mut self, span: Crossing<sp_tracing::WasmEntryAttributes>) -> u64 {
let span: tracing::Span = span.into_inner().into();
match span.id() {
Some(id) => tracing::dispatcher::get_default(|d| {
// inform dispatch that we'll keep the ID around
// then enter it immediately
let final_id = d.clone_span(&id);
d.enter(&final_id);
final_id.into_u64()
}),
}
/// Emit the given event to the global tracer on the native side
fn event(&mut self, event: Crossing<sp_tracing::WasmEntryAttributes>) {
event.into_inner().emit();
}
/// Signal that a given span-id has been exited. On native, this directly
/// proxies the span to the global dispatcher.
fn exit(&mut self, span: u64) {
tracing::dispatcher::get_default(|d| {
let id = tracing_core::span::Id::from_u64(span);
d.exit(&id);
});
}
}
#[cfg(all(not(feature = "std"), feature = "with-tracing"))]
mod tracing_setup {
use super::{wasm_tracing, Crossing};
use core::sync::atomic::{AtomicBool, Ordering};
use tracing_core::{
dispatcher::{set_global_default, Dispatch},
span::{Attributes, Id, Record},
Event, Metadata,
static TRACING_SET: AtomicBool = AtomicBool::new(false);
/// The PassingTracingSubscriber implements `tracing_core::Subscriber`
/// and pushes the information across the runtime interface to the host
struct PassingTracingSubsciber;
impl tracing_core::Subscriber for PassingTracingSubsciber {
fn enabled(&self, metadata: &Metadata<'_>) -> bool {
wasm_tracing::enabled(Crossing(metadata.into()))
}
fn new_span(&self, attrs: &Attributes<'_>) -> Id {
Id::from_u64(wasm_tracing::enter_span(Crossing(attrs.into())))
}
fn enter(&self, _: &Id) {
// Do nothing, we already entered the span previously
}
/// Not implemented! We do not support recording values later
/// Will panic when used.
fn record(&self, _: &Id, _: &Record<'_>) {
unimplemented! {} // this usage is not supported
}
/// Not implemented! We do not support recording values later
/// Will panic when used.
fn record_follows_from(&self, _: &Id, _: &Id) {
unimplemented! {} // this usage is not supported
}
fn event(&self, event: &Event<'_>) {
wasm_tracing::event(Crossing(event.into()))
}
fn exit(&self, span: &Id) {
wasm_tracing::exit(span.into_u64())
}
}
/// Initialize tracing of sp_tracing on wasm with `with-tracing` enabled.
/// Can be called multiple times from within the same process and will only
/// set the global bridging subscriber once.
pub fn init_tracing() {
if TRACING_SET.load(Ordering::Relaxed) == false {
set_global_default(Dispatch::new(PassingTracingSubsciber {}))
.expect("We only ever call this once");
TRACING_SET.store(true, Ordering::Relaxed);
}
}
}
#[cfg(not(all(not(feature = "std"), feature = "with-tracing")))]
mod tracing_setup {
/// Initialize tracing of sp_tracing not necessary – noop. To enable build
/// without std and with the `with-tracing`-feature.
}
pub use tracing_setup::init_tracing;
/// Wasm-only interface that provides functions for interacting with the sandbox.
#[runtime_interface(wasm_only)]
pub trait Sandbox {
/// Instantiate a new sandbox instance with the given `wasm_code`.
fn instantiate(
&mut self,
dispatch_thunk: u32,
wasm_code: &[u8],
env_def: &[u8],
state_ptr: Pointer<u8>,
) -> u32 {
self.sandbox()
.instance_new(dispatch_thunk, wasm_code, env_def, state_ptr.into())
.expect("Failed to instantiate a new sandbox")
}
/// Invoke `function` in the sandbox with `sandbox_idx`.
fn invoke(
&mut self,
instance_idx: u32,
function: &str,
args: &[u8],
return_val_ptr: Pointer<u8>,
return_val_len: u32,
state_ptr: Pointer<u8>,
) -> u32 {
.invoke(instance_idx, function, args, return_val_ptr, return_val_len, state_ptr.into())
.expect("Failed to invoke function with sandbox")
/// Create a new memory instance with the given `initial` and `maximum` size.
fn memory_new(&mut self, initial: u32, maximum: u32) -> u32 {
self.sandbox()
.memory_new(initial, maximum)
.expect("Failed to create new memory with sandbox")
}
/// Get the memory starting at `offset` from the instance with `memory_idx` into the buffer.
fn memory_get(
&mut self,
memory_idx: u32,
offset: u32,
buf_ptr: Pointer<u8>,
buf_len: u32,
) -> u32 {
self.sandbox()
.memory_get(memory_idx, offset, buf_ptr, buf_len)
.expect("Failed to get memory with sandbox")
}
/// Set the memory in the given `memory_idx` to the given value at `offset`.
fn memory_set(
&mut self,
memory_idx: u32,
offset: u32,
val_ptr: Pointer<u8>,
val_len: u32,
) -> u32 {
self.sandbox()
.memory_set(memory_idx, offset, val_ptr, val_len)
.expect("Failed to set memory with sandbox")
}
/// Teardown the memory instance with the given `memory_idx`.
fn memory_teardown(&mut self, memory_idx: u32) {
self.sandbox()
.memory_teardown(memory_idx)
.expect("Failed to teardown memory with sandbox")
}
/// Teardown the sandbox instance with the given `instance_idx`.
fn instance_teardown(&mut self, instance_idx: u32) {
self.sandbox()
.instance_teardown(instance_idx)
.expect("Failed to teardown sandbox instance")
/// Get the value from a global with the given `name`. The sandbox is determined by the given
/// `instance_idx`.
///
/// Returns `Some(_)` when the requested global variable could be found.
fn get_global_val(
&mut self,
instance_idx: u32,
name: &str,
) -> Option<sp_wasm_interface::Value> {
self.sandbox()
.get_global_val(instance_idx, name)
.expect("Failed to get global from sandbox")
}
/// Wasm host functions for managing tasks.
///
/// This should not be used directly. Use `sp_tasks` for running parallel tasks instead.
#[runtime_interface(wasm_only)]
pub trait RuntimeTasks {
/// Wasm host function for spawning task.
///
/// This should not be used directly. Use `sp_tasks::spawn` instead.
fn spawn(dispatcher_ref: u32, entry: u32, payload: Vec<u8>) -> u64 {
sp_externalities::with_externalities(|mut ext| {
let runtime_spawn = ext
.extension::<RuntimeSpawnExt>()
.expect("Cannot spawn without dynamic runtime dispatcher (RuntimeSpawnExt)");
runtime_spawn.spawn_call(dispatcher_ref, entry, payload)
})
.expect("`RuntimeTasks::spawn`: called outside of externalities context")
}
/// Wasm host function for joining a task.
///
/// This should not be used directly. Use `join` of `sp_tasks::spawn` result instead.
fn join(handle: u64) -> Vec<u8> {
sp_externalities::with_externalities(|mut ext| {
let runtime_spawn = ext
.extension::<RuntimeSpawnExt>()
.expect("Cannot join without dynamic runtime dispatcher (RuntimeSpawnExt)");
runtime_spawn.join(handle)
})
.expect("`RuntimeTasks::join`: called outside of externalities context")
/// Allocator used by Substrate when executing the Wasm runtime.
#[cfg(all(target_arch = "wasm32", not(feature = "std")))]
#[cfg(all(target_arch = "wasm32", not(feature = "disable_allocator"), not(feature = "std")))]
#[global_allocator]
static ALLOCATOR: WasmAllocator = WasmAllocator;
#[cfg(all(target_arch = "wasm32", not(feature = "std")))]
use super::*;
use core::alloc::{GlobalAlloc, Layout};
unsafe impl GlobalAlloc for WasmAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
allocator::malloc(layout.size() as u32)
}
unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) {
allocator::free(ptr)
}
}
}
/// A default panic handler for WASM environment.
#[cfg(all(not(feature = "disable_panic_handler"), not(feature = "std")))]
#[panic_handler]
#[no_mangle]
pub fn panic(info: &core::panic::PanicInfo) -> ! {
let message = sp_std::alloc::format!("{}", info);
#[cfg(feature = "improved_panic_error_reporting")]
{
panic_handler::abort_on_panic(&message);
}
#[cfg(not(feature = "improved_panic_error_reporting"))]
{
logging::log(LogLevel::Error, "runtime", message.as_bytes());
core::arch::wasm32::unreachable();
}
/// A default OOM handler for WASM environment.
#[cfg(all(not(feature = "disable_oom"), not(feature = "std")))]
#[alloc_error_handler]
pub fn oom(_: core::alloc::Layout) -> ! {
#[cfg(feature = "improved_panic_error_reporting")]
{
panic_handler::abort_on_panic("Runtime memory exhausted.");
}
#[cfg(not(feature = "improved_panic_error_reporting"))]
{
logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting");
core::arch::wasm32::unreachable();
}
Svyatoslav Nikolsky
committed
/// Type alias for Externalities implementation used in tests.
#[cfg(feature = "std")]
pub type TestExternalities = sp_state_machine::TestExternalities<sp_core::Blake2Hasher>;
/// The host functions Substrate provides for the Wasm runtime environment.
///
/// All these host functions will be callable from inside the Wasm environment.
#[cfg(feature = "std")]
pub type SubstrateHostFunctions = (
storage::HostFunctions,
wasm_tracing::HostFunctions,
offchain::HostFunctions,
crypto::HostFunctions,
hashing::HostFunctions,
allocator::HostFunctions,
panic_handler::HostFunctions,
logging::HostFunctions,
sandbox::HostFunctions,
crate::trie::HostFunctions,
transaction_index::HostFunctions,
);
#[cfg(test)]
mod tests {
use super::*;
use sp_core::{
crypto::UncheckedInto, map, storage::Storage, testing::TaskExecutor,
traits::TaskExecutorExt,
};
#[test]
fn storage_works() {
let mut t = BasicExternalities::default();
t.execute_with(|| {
assert_eq!(storage::get(b"hello"), None);
storage::set(b"hello", b"world");
Koute
committed
assert_eq!(storage::get(b"hello"), Some(b"world".to_vec().into()));
assert_eq!(storage::get(b"foo"), None);
storage::set(b"foo", &[1, 2, 3][..]);
});
t = BasicExternalities::new(Storage {
top: map![b"foo".to_vec() => b"bar".to_vec()],
t.execute_with(|| {
assert_eq!(storage::get(b"hello"), None);
Koute
committed
assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec().into()));
let value = vec![7u8; 35];
let storage =
Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![] };
t = BasicExternalities::new(storage);
t.execute_with(|| {
assert_eq!(storage::get(b"hello"), None);
Koute
committed
assert_eq!(storage::get(b"foo00"), Some(value.clone().into()));
}
#[test]
fn read_storage_works() {
let value = b"\x0b\0\0\0Hello world".to_vec();
let mut t = BasicExternalities::new(Storage {
top: map![b":test".to_vec() => value.clone()],
t.execute_with(|| {
let mut v = [0u8; 4];
assert_eq!(storage::read(b":test", &mut v[..], 0).unwrap(), value.len() as u32);
assert_eq!(v, [11u8, 0, 0, 0]);
let mut w = [0u8; 11];
assert_eq!(storage::read(b":test", &mut w[..], 4).unwrap(), value.len() as u32 - 4);
assert_eq!(&w, b"Hello world");
});
}
#[test]
fn clear_prefix_works() {
let mut t = BasicExternalities::new(Storage {
top: map![
b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(),
b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(),
b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(),
b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec()
],
// We can switch to this once we enable v3 of the `clear_prefix`.
//assert!(matches!(
// storage::clear_prefix(b":abc", None),
// MultiRemovalResults::NoneLeft { db: 2, total: 2 }
//));
assert!(matches!(
storage::clear_prefix(b":abc", None),
KillStorageResult::AllRemoved(2),
assert!(storage::get(b":a").is_some());
assert!(storage::get(b":abdd").is_some());
assert!(storage::get(b":abcd").is_none());
assert!(storage::get(b":abc").is_none());
// We can switch to this once we enable v3 of the `clear_prefix`.
//assert!(matches!(
// storage::clear_prefix(b":abc", None),
// MultiRemovalResults::NoneLeft { db: 0, total: 0 }
//));
assert!(matches!(
storage::clear_prefix(b":abc", None),
KillStorageResult::AllRemoved(0),
));
fn batch_verify_start_finish_works() {
let mut ext = BasicExternalities::default();
ext.register_extension(TaskExecutorExt::new(TaskExecutor::new()));
ext.execute_with(|| {
crypto::start_batch_verify();
});
assert!(ext.extensions().get_mut(TypeId::of::<VerificationExt>()).is_some());
ext.execute_with(|| {
assert!(crypto::finish_batch_verify());
});
assert!(ext.extensions().get_mut(TypeId::of::<VerificationExt>()).is_none());
}
#[test]
fn long_sr25519_batching() {
let mut ext = BasicExternalities::default();
ext.register_extension(TaskExecutorExt::new(TaskExecutor::new()));
ext.execute_with(|| {
let pair = sr25519::Pair::generate_with_phrase(None).0;
let pair_unused = sr25519::Pair::generate_with_phrase(None).0;
crypto::start_batch_verify();
for it in 0..70 {
let msg = format!("Schnorrkel {}!", it);
let signature = pair.sign(msg.as_bytes());
crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public());
// push invalid
let msg = b"asdf!";
let signature = pair.sign(msg);
crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public());
assert!(!crypto::finish_batch_verify());
crypto::start_batch_verify();
for it in 0..70 {
let msg = format!("Schnorrkel {}!", it);
let signature = pair.sign(msg.as_bytes());
crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public());
}
assert!(crypto::finish_batch_verify());
});
}
fn zero_ed_pub() -> ed25519::Public {
[0u8; 32].unchecked_into()
}
fn zero_ed_sig() -> ed25519::Signature {
ed25519::Signature::from_raw([0u8; 64])
}
fn zero_sr_pub() -> sr25519::Public {
[0u8; 32].unchecked_into()
}
fn zero_sr_sig() -> sr25519::Signature {
sr25519::Signature::from_raw([0u8; 64])
}
let mut ext = BasicExternalities::default();
ext.register_extension(TaskExecutorExt::new(TaskExecutor::new()));
// valid ed25519 signature
crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub());
assert!(crypto::finish_batch_verify());
// 2 valid ed25519 signatures
crypto::start_batch_verify();
let pair = ed25519::Pair::generate_with_phrase(None).0;
let msg = b"Important message";
let signature = pair.sign(msg);
crypto::ed25519_batch_verify(&signature, msg, &pair.public());
let pair = ed25519::Pair::generate_with_phrase(None).0;
let msg = b"Even more important message";
let signature = pair.sign(msg);
crypto::ed25519_batch_verify(&signature, msg, &pair.public());
assert!(crypto::finish_batch_verify());
// 1 valid, 1 invalid ed25519 signature
crypto::start_batch_verify();
let pair1 = ed25519::Pair::generate_with_phrase(None).0;
let pair2 = ed25519::Pair::generate_with_phrase(None).0;
let signature = pair1.sign(msg);
crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub());
crypto::ed25519_batch_verify(&signature, msg, &pair1.public());
crypto::ed25519_batch_verify(&signature, msg, &pair2.public());
assert!(!crypto::finish_batch_verify());
// 1 valid ed25519, 2 valid sr25519
crypto::start_batch_verify();
let pair = ed25519::Pair::generate_with_phrase(None).0;
let msg = b"Ed25519 batching";
let signature = pair.sign(msg);
crypto::ed25519_batch_verify(&signature, msg, &pair.public());
let pair = sr25519::Pair::generate_with_phrase(None).0;
let msg = b"Schnorrkel rules";
let signature = pair.sign(msg);
crypto::sr25519_batch_verify(&signature, msg, &pair.public());
let pair = sr25519::Pair::generate_with_phrase(None).0;
let msg = b"Schnorrkel batches!";
let signature = pair.sign(msg);
crypto::sr25519_batch_verify(&signature, msg, &pair.public());
assert!(crypto::finish_batch_verify());
// 1 valid sr25519, 1 invalid sr25519
crypto::start_batch_verify();