Skip to content
Snippets Groups Projects
Commit aad569a1 authored by Bastian Köcher's avatar Bastian Köcher Committed by GitHub
Browse files

Revert new wasm allocator (#1502)

* Revert "Better allocator for wasm (#1460)"

This reverts commit 1ccb590d.

* Update wasm files
parent 3857d8aa
Branches
No related merge requests found
...@@ -3560,7 +3560,6 @@ dependencies = [ ...@@ -3560,7 +3560,6 @@ dependencies = [
"assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
......
...@@ -19,7 +19,6 @@ byteorder = "1.1" ...@@ -19,7 +19,6 @@ byteorder = "1.1"
lazy_static = "1.0" lazy_static = "1.0"
parking_lot = "0.7.1" parking_lot = "0.7.1"
log = "0.4" log = "0.4"
fnv = "1.0.6"
[dev-dependencies] [dev-dependencies]
assert_matches = "1.1" assert_matches = "1.1"
......
// Copyright 2017-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! This module implements a buddy allocation heap.
//! It uses a binary tree and follows the concepts outlined in
//! https://en.wikipedia.org/wiki/Buddy_memory_allocation.
extern crate fnv;
use std::vec;
use self::fnv::FnvHashMap;
// The pointers need to be aligned to 8 bytes.
const ALIGNMENT: u32 = 8;
// The block size needs to be a multiple of the memory alignment
// requirement. This is so that the pointer returned by `allocate()`
// always fulfills the alignment. In buddy allocation a pointer always
// points to the start of a block, which with a fitting block size
// will then be a multiple of the alignment requirement.
const BLOCK_SIZE: u32 = 8192; // 2^13 bytes
#[allow(path_statements)]
fn _assert_block_size_aligned() {
// mem::transmute checks that type sizes are equal.
// this enables us to assert that pointers are aligned -- at compile time.
::std::mem::transmute::<[u8; BLOCK_SIZE as usize % ALIGNMENT as usize], [u8; 0]>;
}
#[derive(PartialEq, Copy, Clone)]
enum Node {
Free,
Full,
Split,
}
/// A buddy allocation heap, which tracks allocations and deallocations
/// using a binary tree.
pub struct Heap {
allocated_bytes: FnvHashMap<u32, u32>,
levels: u32,
ptr_offset: u32,
tree: vec::Vec<Node>,
total_size: u32,
}
impl Heap {
/// Creates a new buddy allocation heap.
///
/// # Arguments
///
/// * `ptr_offset` - The pointers returned by `allocate()`
/// start from this offset on. The pointer offset needs
/// to be aligned to a multiple of 8, hence a padding might
/// be added to align `ptr_offset` properly.
///
/// * `heap_size` - The size available to this heap instance
/// (in bytes) for allocating memory.
///
pub fn new(mut ptr_offset: u32, heap_size: u32) -> Self {
let padding = ptr_offset % ALIGNMENT;
if padding != 0 {
ptr_offset += ALIGNMENT - padding;
}
let leaves = heap_size / BLOCK_SIZE;
let levels = Heap::get_tree_levels(leaves);
let node_count: usize = (1 << levels + 1) - 1;
Heap {
allocated_bytes: FnvHashMap::default(),
levels,
ptr_offset,
tree: vec![Node::Free; node_count],
total_size: 0,
}
}
/// Gets requested number of bytes to allocate and returns a pointer.
pub fn allocate(&mut self, size: u32) -> u32 {
// Get the requested level from number of blocks requested
let blocks_needed = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
let block_offset = match self.allocate_block_in_tree(blocks_needed) {
Some(v) => v,
None => return 0,
};
let ptr = BLOCK_SIZE * block_offset as u32;
self.allocated_bytes.insert(ptr, size as u32);
self.total_size += size;
trace!(target: "wasm-heap", "Heap size over {} bytes after allocation", self.total_size);
self.ptr_offset + ptr
}
fn allocate_block_in_tree(&mut self, blocks_needed: u32) -> Option<usize> {
let levels_needed = Heap::get_tree_levels(blocks_needed);
if levels_needed > self.levels {
trace!(target: "wasm-heap", "Heap is too small: {:?} > {:?}", levels_needed, self.levels);
return None;
}
// Start at tree root and traverse down
let mut index = 0;
let mut current_level = self.levels;
'down: loop {
let buddy_exists = index & 1 == 1;
if current_level == levels_needed {
if self.tree[index] == Node::Free {
self.tree[index] = Node::Full;
if index > 0 {
let parent = self.get_parent_node_index(index);
self.update_parent_nodes(parent);
}
break 'down;
}
} else {
match self.tree[index] {
Node::Full => {
if buddy_exists {
// Check if buddy is free
index += 1;
} else {
break 'down;
}
continue 'down;
},
Node::Free => {
// If node is free we split it and descend further down
self.tree[index] = Node::Split;
index = index * 2 + 1;
current_level -= 1;
continue 'down;
},
Node::Split => {
// Descend further
index = index * 2 + 1;
current_level -= 1;
continue 'down;
},
}
}
if buddy_exists {
// If a buddy exists it needs to be checked as well
index += 1;
continue 'down;
}
// Backtrack once we're at the bottom and haven't matched a free block yet
'up: loop {
if index == 0 {
trace!(target: "wasm-heap", "Heap is too small: tree root reached.");
return None;
}
index = self.get_parent_node_index(index);
current_level += 1;
let has_buddy = index & 1 == 1;
if has_buddy {
index += 1;
break 'up;
}
}
}
let current_level_offset = (1 << self.levels - current_level) - 1;
let level_offset = index - current_level_offset;
let block_offset = level_offset * (1 << current_level);
Some(block_offset as usize)
}
/// Deallocates all blocks which were allocated for a pointer.
pub fn deallocate(&mut self, mut ptr: u32) {
ptr -= self.ptr_offset;
let allocated_size = match self.allocated_bytes.get(&ptr) {
Some(v) => *v,
// If nothing has been allocated for the pointer nothing happens
None => return (),
};
let count_blocks = (allocated_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
let block_offset = ptr / BLOCK_SIZE;
self.free(block_offset, count_blocks);
self.allocated_bytes.remove(&ptr).unwrap_or_default();
self.total_size = self.total_size.checked_sub(allocated_size).unwrap_or(0);
trace!(target: "wasm-heap", "Heap size over {} bytes after deallocation", self.total_size);
}
fn free(&mut self, block_offset: u32, count_blocks: u32) {
let requested_level = Heap::get_tree_levels(count_blocks);
let current_level_offset = (1 << self.levels - requested_level) - 1;
let level_offset = block_offset / (1 << requested_level);
let index_offset = current_level_offset + level_offset;
if index_offset > self.tree.len() as u32 - 1 {
trace!(target: "wasm-heap", "Index offset {} is > length of tree {}", index_offset, self.tree.len());
}
self.free_and_merge(index_offset as usize);
let parent = self.get_parent_node_index(index_offset as usize);
self.update_parent_nodes(parent);
}
fn get_parent_node_index(&mut self, index: usize) -> usize {
(index + 1) / 2 - 1
}
fn free_and_merge(&mut self, index: usize) {
self.tree[index] = Node::Free;
if index == 0 {
return;
}
let has_right_buddy = (index & 1) == 1;
let other_node = if has_right_buddy {
index + 1
} else {
index - 1
};
if self.tree[other_node] == Node::Free {
let parent = self.get_parent_node_index(index);
self.free_and_merge(parent);
}
}
fn update_parent_nodes(&mut self, index: usize) {
let left_child = index * 2 + 1;
let right_child = index * 2 + 2;
let children_free = self.tree[left_child] == Node::Free && self.tree[right_child] == Node::Free;
let children_full = self.tree[left_child] == Node::Full && self.tree[right_child] == Node::Full;
if children_free {
self.tree[index] = Node::Free;
} else if children_full {
self.tree[index] = Node::Full;
} else {
self.tree[index] = Node::Split;
}
if index == 0 {
// Tree root
return;
}
let parent = self.get_parent_node_index(index);
self.update_parent_nodes(parent);
}
fn get_tree_levels(mut count_blocks: u32) -> u32 {
if count_blocks == 0 {
0
} else {
let mut counter = 0;
while {count_blocks >>= 1; count_blocks > 0} {
counter += 1;
}
counter
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_always_align_pointers_to_multiples_of_8() {
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(13, heap_size);
let ptr = heap.allocate(1);
assert_eq!(ptr, 16); // 16 is the next multiple of 8 from 13
}
#[test]
fn should_start_first_pointer_at_offset() {
let start_offset = 40;
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(start_offset, heap_size);
let ptr = heap.allocate(BLOCK_SIZE - 1);
assert_eq!(ptr, start_offset);
}
#[test]
fn should_start_second_pointer_at_second_block() {
let start_offset = 40;
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(start_offset, heap_size);
let _ptr1 = heap.allocate(BLOCK_SIZE - 1);
let ptr2 = heap.allocate(BLOCK_SIZE - 1);
assert_eq!(ptr2, start_offset + BLOCK_SIZE);
}
#[test]
fn should_not_panic_on_deallocation_of_nonexistent_pointer() {
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(1, heap_size);
let ret = heap.deallocate(heap_size + 1);
assert_eq!(ret, ());
}
#[test]
fn should_calculate_tree_size_from_heap_size() {
let heap_size = BLOCK_SIZE * 4;
let heap = super::Heap::new(1, heap_size);
assert_eq!(heap.levels, 2);
}
#[test]
fn should_round_tree_size_to_nearest_possible() {
let heap_size = BLOCK_SIZE * 4 + 1;
let heap = super::Heap::new(1, heap_size);
assert_eq!(heap.levels, 2);
}
#[test]
fn heap_size_should_stay_zero_in_total() {
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(1, heap_size);
assert_eq!(heap.total_size, 0);
let ptr = heap.allocate(42);
assert_eq!(heap.total_size, 42);
heap.deallocate(ptr);
assert_eq!(heap.total_size, 0);
}
#[test]
fn heap_size_should_stay_constant() {
let heap_size = BLOCK_SIZE * 4;
let mut heap = super::Heap::new(9, heap_size);
for _ in 1..10 {
assert_eq!(heap.total_size, 0);
let ptr = heap.allocate(42);
assert_eq!(ptr, 16);
assert_eq!(heap.total_size, 42);
heap.deallocate(ptr);
assert_eq!(heap.total_size, 0);
}
assert_eq!(heap.total_size, 0);
}
}
...@@ -63,7 +63,6 @@ mod wasm_executor; ...@@ -63,7 +63,6 @@ mod wasm_executor;
#[macro_use] #[macro_use]
mod native_executor; mod native_executor;
mod sandbox; mod sandbox;
mod heap;
pub mod error; pub mod error;
pub use wasm_executor::WasmExecutor; pub use wasm_executor::WasmExecutor;
......
...@@ -22,7 +22,7 @@ use wasmi::{ ...@@ -22,7 +22,7 @@ use wasmi::{
Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef,
}; };
use wasmi::RuntimeValue::{I32, I64}; use wasmi::RuntimeValue::{I32, I64};
use wasmi::memory_units::{Bytes, Pages}; use wasmi::memory_units::Pages;
use state_machine::Externalities; use state_machine::Externalities;
use error::{Error, ErrorKind, Result}; use error::{Error, ErrorKind, Result};
use wasm_utils::UserError; use wasm_utils::UserError;
...@@ -32,7 +32,35 @@ use primitives::sandbox as sandbox_primitives; ...@@ -32,7 +32,35 @@ use primitives::sandbox as sandbox_primitives;
use primitives::{H256, Blake2Hasher}; use primitives::{H256, Blake2Hasher};
use trie::ordered_trie_root; use trie::ordered_trie_root;
use sandbox; use sandbox;
use heap;
struct Heap {
end: u32,
}
impl Heap {
/// Construct new `Heap` struct.
///
/// Returns `Err` if the heap couldn't allocate required
/// number of pages.
///
/// This could mean that wasm binary specifies memory
/// limit and we are trying to allocate beyond that limit.
fn new(memory: &MemoryRef) -> Self {
Heap {
end: memory.used_size().0 as u32,
}
}
fn allocate(&mut self, size: u32) -> u32 {
let r = self.end;
self.end += size;
r
}
fn deallocate(&mut self, _offset: u32) {
}
}
#[cfg(feature="wasm-extern-trace")] #[cfg(feature="wasm-extern-trace")]
macro_rules! debug_trace { macro_rules! debug_trace {
...@@ -45,7 +73,7 @@ macro_rules! debug_trace { ...@@ -45,7 +73,7 @@ macro_rules! debug_trace {
struct FunctionExecutor<'e, E: Externalities<Blake2Hasher> + 'e> { struct FunctionExecutor<'e, E: Externalities<Blake2Hasher> + 'e> {
sandbox_store: sandbox::Store, sandbox_store: sandbox::Store,
heap: heap::Heap, heap: Heap,
memory: MemoryRef, memory: MemoryRef,
table: Option<TableRef>, table: Option<TableRef>,
ext: &'e mut E, ext: &'e mut E,
...@@ -54,14 +82,9 @@ struct FunctionExecutor<'e, E: Externalities<Blake2Hasher> + 'e> { ...@@ -54,14 +82,9 @@ struct FunctionExecutor<'e, E: Externalities<Blake2Hasher> + 'e> {
impl<'e, E: Externalities<Blake2Hasher>> FunctionExecutor<'e, E> { impl<'e, E: Externalities<Blake2Hasher>> FunctionExecutor<'e, E> {
fn new(m: MemoryRef, t: Option<TableRef>, e: &'e mut E) -> Result<Self> { fn new(m: MemoryRef, t: Option<TableRef>, e: &'e mut E) -> Result<Self> {
let current_size: Bytes = m.current_size().into();
let current_size = current_size.0 as u32;
let used_size = m.used_size().0 as u32;
let heap_size = current_size - used_size;
Ok(FunctionExecutor { Ok(FunctionExecutor {
sandbox_store: sandbox::Store::new(), sandbox_store: sandbox::Store::new(),
heap: heap::Heap::new(used_size, heap_size), heap: Heap::new(&m),
memory: m, memory: m,
table: t, table: t,
ext: e, ext: e,
......
...@@ -132,10 +132,9 @@ pub fn storage(key: &[u8]) -> Option<Vec<u8>> { ...@@ -132,10 +132,9 @@ pub fn storage(key: &[u8]) -> Option<Vec<u8>> {
if length == u32::max_value() { if length == u32::max_value() {
None None
} else { } else {
// Invariants required by Vec::from_raw_parts are not formally fulfilled. let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
// We don't allocate via String/Vec<T>, but use a custom allocator instead. ext_free(ptr);
// See #300 for more details. Some(ret)
Some(<Vec<u8>>::from_raw_parts(ptr, length as usize, length as usize))
} }
} }
} }
...@@ -148,10 +147,9 @@ pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> { ...@@ -148,10 +147,9 @@ pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
if length == u32::max_value() { if length == u32::max_value() {
None None
} else { } else {
// Invariants required by Vec::from_raw_parts are not formally fulfilled. let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
// We don't allocate via String/Vec<T>, but use a custom allocator instead. ext_free(ptr);
// See #300 for more details. Some(ret)
Some(<Vec<u8>>::from_raw_parts(ptr, length as usize, length as usize))
} }
} }
} }
......
No preview for this file type
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment