Unverified Commit a9ab40db authored by Michael Müller's avatar Michael Müller Committed by GitHub
Browse files

Add automated spell-checking (#718)



* Add `.config`

* Fix hunspell complaints

* Run spellcheck in CI

* Improve hunspell config

* Fix 'pre' prefix

* Remove `|` from dictionary

* Add missing words to dictionary

* Add missing words to dictionary

* Add missing words to dictionary

* Fix `n`-th → nth

* Fix indentation

* Remove comment

* Fix number

* Fix spacing

* Remove regex

* Apply suggestions from code review
Co-authored-by: default avatarBernhard Schuster <bernhard@ahoi.io>

* Fix dictionary

* Fix dictionary

* Implement review comments

* Replace SRML with FRAME

* Replace occurrences of 'spreaded'

* Tuning text regarding 'spreaded'

* Update crates/storage/src/lazy/entry.rs
Co-authored-by: default avatarHero Bird <robin.freyler@gmail.com>
Co-authored-by: default avatarBernhard Schuster <bernhard@ahoi.io>
Co-authored-by: default avatarHero Bird <robin.freyler@gmail.com>
parent f329a928
Pipeline #129415 failed with stages
in 7 minutes and 28 seconds
......@@ -21,7 +21,7 @@ use super::{
Bits256BitsIterMut,
};
/// A reference to a subslice within a 256-bit chunk.
/// A reference to a sub-slice within a 256-bit chunk.
///
/// This is a reference wrapper around either a shared 256-bit chunk
/// or an exclusive 256-bit chunk. Also it prevents accesses to out of bounds
......
......@@ -53,10 +53,10 @@ use crate::{
/// The index of a bit pack within the bit vector.
type Index = u32;
/// A bit position within a 256-bit package.
/// The position of a bit within a 256-bit package.
type Index256 = u8;
/// A bit position within a `u64`.
/// The position of a bit within a `u64`.
type Index64 = u8;
/// A pack of 64 bits.
......@@ -127,7 +127,7 @@ impl Bitvec {
Bits256IterMut::new(self)
}
/// Splits the given index into a 256-bit pack index and bit position index.
/// Splits the given index into a 256-bit pack index and a position index of the bit.
fn split_index(&self, at: Index) -> Option<(Index, Index256)> {
if at >= self.len() {
return None
......
......@@ -36,7 +36,7 @@ use std::{
/// Conducts repeated insert and remove operations into the map by iterating
/// over `xs`. For each odd `x` in `xs` a defined number of insert operations
/// (`inserts_each`) is executed. For each even `x` it's asserted that the
/// previously inserted elements are in the map and they are removed subsequently.
/// previously inserted elements are in the map, and they are removed subsequently.
///
/// The reasoning behind this even/odd sequence is to introduce some
/// randomness into when elements are inserted/removed.
......
......@@ -348,7 +348,7 @@ where
// what normally a hash map implementation does because we do not resolve
// or prevent collisions in this hash map implementation at any level.
// Having a collision is virtually impossible since we
// are using a keyspace of 2^256 bit.
// are using a keyspace of `2^256` bit.
self.values.get(key).is_some()
}
......@@ -492,7 +492,7 @@ where
K: Ord + Clone + PackedLayout,
V: PackedLayout,
{
/// Gets a reference to the key that would be used when inserting a value through the VacantEntry.
/// Gets a reference to the key that would be used when inserting a value through the `VacantEntry`.
pub fn key(&self) -> &K {
&self.values_entry.key()
}
......@@ -502,7 +502,7 @@ where
self.values_entry.into_key()
}
/// Sets the value of the entry with the `VacantEntry`'s key, and returns a mutable reference to it.
/// Sets the value of the entry with the `VacantEntry`s key, and returns a mutable reference to it.
pub fn insert(self, value: V) -> &'a mut V {
// At this point we know that `key` does not yet exist in the map.
let key_index = self.keys.put(self.key().to_owned());
......@@ -556,7 +556,7 @@ where
self.remove_entry().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// Converts the `OccupiedEntry` into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
pub fn into_mut(self) -> &'a mut V {
&mut self.values_entry.into_mut().value
......
......@@ -41,12 +41,12 @@ pub use self::{
#[doc(inline)]
pub use self::smallvec::SmallVec;
/// Extends the lifetime 'a to the outliving lifetime 'b for the given reference.
/// Extends the lifetime `'a` to the outliving lifetime `'b` for the given reference.
///
/// # Note
///
/// This interface is a bit more constraint than a simple
/// [transmut](`core::mem::transmute`) and therefore preferred
/// [transmute](`core::mem::transmute`) and therefore preferred
/// for extending lifetimes only.
///
/// # Safety
......
......@@ -48,7 +48,7 @@ type Index = u32;
/// - The `storage::SmallVec` has a very similar API compared to a `storage::Vec`.
/// The major difference between both data structures is that the `SmallVec`
/// can only contain up to a fixed amount of elements given by `N` whereas the
/// `Vec` can contain up to 2^32 elements which is the maximum for 32-bit Wasm
/// `Vec` can contain up to `2^32` elements which is the maximum for 32-bit Wasm
/// targets.
/// - The performance characteristics may be different from Rust's
/// `Vec` due to the internal differences.
......@@ -117,7 +117,7 @@ where
self.elems.capacity()
}
/// Returns the number of elements in the vector, also referred to as its 'length'.
/// Returns the number of elements in the vector, also referred to as its length.
#[inline]
pub fn len(&self) -> u32 {
*self.len
......@@ -156,7 +156,7 @@ where
IterMut::new(self)
}
/// Returns the index if it is witihn bounds or `None` otherwise.
/// Returns the index if it is within bounds or `None` otherwise.
fn within_bounds(&self, index: Index) -> Option<Index> {
if index < self.len() {
return Some(index)
......
......@@ -291,7 +291,7 @@ impl<T> Stash<T>
where
T: PackedLayout,
{
/// Rebinds the `prev` and `next` bindings of the neighbours of the vacant entry.
/// Rebinds the `prev` and `next` bindings of the neighbors of the vacant entry.
///
/// # Note
///
......@@ -377,13 +377,13 @@ where
(root_vacant.prev, index)
}
} else {
// Default prev and next to the given at index.
// Default previous and next to the given at index.
// So the resulting vacant index is pointing to itself.
(at, at)
}
}
/// Updates links from and to neighbouring vacant entries.
/// Updates links from and to neighboring vacant entries.
fn update_neighboring_vacant_entry_links(
&mut self,
prev: Index,
......@@ -459,7 +459,7 @@ where
// Early return since `at` index is out of bounds.
return None
}
// Precompute prev and next vacant entries as we might need them later.
// Precompute previous and next vacant entries as we might need them later.
// Due to borrow checker constraints we cannot have this at a later stage.
let (prev, next) = self.fetch_prev_and_next_vacant_entry(at);
let entry_mut = self.entries.get_mut(at).expect("index is out of bounds");
......@@ -511,7 +511,7 @@ where
// Early return since `at` index is out of bounds.
return None
}
// Precompute prev and next vacant entries as we might need them later.
// Precompute previous and next vacant entries as we might need them later.
// Due to borrow checker constraints we cannot have this at a later stage.
let (prev, next) = self.fetch_prev_and_next_vacant_entry(at);
let new_vacant_entry = Entry::Vacant(VacantEntry { next, prev });
......@@ -527,7 +527,7 @@ where
///
/// Returns the number of storage cells freed this way.
///
/// This might invalidate indices stored outside of the stash.
/// This might invalidate indices stored outside the stash.
///
/// # Callback
///
......@@ -567,7 +567,7 @@ where
.expect("index is out of bounds")
{
Entry::Vacant(vacant_entry) => {
// Remove the vacant entry and rebind its neighbours.
// Remove the vacant entry and rebind its neighbors.
self.remove_vacant_entry(index, vacant_entry);
}
Entry::Occupied(value) => {
......
......@@ -445,10 +445,11 @@ fn simple_defrag_works() {
/// Returns a storage stash that looks internally like this:
///
/// i | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
/// next | | | | | | | | |
/// prev | | | | | | | | |
/// val | | | | | E | | | H |
/// i | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
/// ----------|---|---|---|---|---|---|---|---
/// next | | | | | | | |
/// previous | | | | | | | |
/// val | | | | | E | | | H
fn complex_defrag_setup() -> StorageStash<u8> {
let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H']
.iter()
......
......@@ -15,7 +15,7 @@
//! A storage vector used to store elements in a contiguous sequenced order.
//!
//! This is by default the go-to collection for most smart contracts if there
//! are not special requirements to the storage data structure.
//! are no special requirements to the storage data structure.
mod impls;
mod iter;
......@@ -39,7 +39,7 @@ use crate::{
traits::PackedLayout,
};
/// A contiguous growable array type, written `Vec<T>` but pronounced 'vector'.
/// A contiguous growable array type, written `Vec<T>` but pronounced "vector".
///
/// # Note
///
......@@ -89,7 +89,7 @@ where
}
}
/// Returns the number of elements in the vector, also referred to as its 'length'.
/// Returns the number of elements in the vector, also referred to as its length.
pub fn len(&self) -> u32 {
*self.len
}
......
......@@ -22,8 +22,8 @@ use core::{
/// A cache for a `T` that allow to mutate the inner `T` through `&self`.
///
/// Internally this is a thin wrapper around an `UnsafeCell<T>`.
/// The main difference to `UnsafeCell` is that this type provides an out of the
/// box API to safely access the inner `T` as well for single threaded contexts.
/// The main difference to `UnsafeCell` is that this type provides an out-of-the-box
/// API to safely access the inner `T` as well for single threaded contexts.
pub struct CacheCell<T: ?Sized> {
/// The inner value that is allowed to be mutated in shared contexts.
inner: UnsafeCell<T>,
......
......@@ -177,7 +177,8 @@ impl<T> StorageEntry<T>
where
T: SpreadLayout,
{
/// Pulls the entity from the underlying associated storage as spreaded representation.
/// Pulls the entity from the underlying associated storage as a `SpreadLayout`
/// storage layout representation.
///
/// # Note
///
......@@ -187,7 +188,8 @@ where
Self::new(pull_spread_root_opt::<T>(&root_key), EntryState::Preserved)
}
/// Pushes the underlying associated storage as spreaded representation.
/// Pushes the underlying associated data to the contract storage using
/// the `SpreadLayout` storage layout.
///
/// # Note
///
......@@ -200,7 +202,7 @@ where
}
}
/// Clears the underlying associated storage as spreaded representation.
/// Clears the underlying associated storage as `SpreadLayout` storage layout representation.
///
/// # Note
///
......
......@@ -286,7 +286,7 @@ where
/// Care should be taken when using this API.
///
/// The general use of this API is to streamline `Drop` implementations of
/// high-level abstractions that build upon this low-level data strcuture.
/// high-level abstractions that build upon this low-level data structure.
pub fn clear_packed_at(&self, index: Index) {
let root_key = self.key_at(index).expect("cannot clear in lazy state");
if <T as SpreadLayout>::REQUIRES_DEEP_CLEAN_UP {
......
......@@ -51,7 +51,7 @@ where
key: Option<Key>,
/// The low-level cache for the lazily loaded storage value.
///
/// # Safety (Dev)
/// # Developer Note: Safety
///
/// We use `UnsafeCell` instead of `RefCell` because
/// the intended use-case is to hand out references (`&` and `&mut`)
......
......@@ -71,7 +71,7 @@ pub type EntryMap<K, V> = BTreeMap<K, Box<StorageEntry<V>>>;
/// storage primitives in order to manage the contract storage for a whole
/// mapping of storage cells.
///
/// This storage data structure might store its entires anywhere in the contract
/// This storage data structure might store its entries anywhere in the contract
/// storage. It is the users responsibility to keep track of the entries if it
/// is necessary to do so.
pub struct LazyHashMap<K, V, H> {
......@@ -97,7 +97,7 @@ pub struct LazyHashMap<K, V, H> {
/// in storage we insert it into the cache.
///
/// The problem now is that in this case we only have the `Vacant` object
/// which we got from searching in the cache, but we need to return an
/// which we got from searching in the cache, but we need to return
/// `Occupied` here, since the object is now in the cache. We could do this
/// by querying the cache another time -- but this would be an additional
/// search. So what we do instead is to save a reference to the inserted
......@@ -139,7 +139,7 @@ where
/// In an `BTreeMapEntry::Occupied` state the entry has been marked to
/// be removed (with `None`), but we still want to expose the `VacantEntry` API
/// to the use.
/// In an `BTreeMapEntry::Vacant` state the entry is vacant and we want to expose
/// In an `BTreeMapEntry::Vacant` state the entry is vacant, and we want to expose
/// the `VacantEntry` API.
entry: BTreeMapEntry<'a, K, Box<StorageEntry<V>>>,
}
......@@ -553,8 +553,8 @@ where
///
/// This is an `unsafe` operation because it has a `&self` receiver but returns
/// a `*mut Entry<T>` pointer that allows for exclusive access. This is safe
/// within internal use only and should never be given outside of the lazy
/// entity for public `&self` methods.
/// within internal use only and should never be given outside the lazy entity
/// for public `&self` methods.
unsafe fn lazily_load<Q>(&self, key: &Q) -> NonNull<StorageEntry<V>>
where
K: Borrow<Q>,
......@@ -625,7 +625,7 @@ where
/// Care should be taken when using this API.
///
/// The general use of this API is to streamline `Drop` implementations of
/// high-level abstractions that build upon this low-level data strcuture.
/// high-level abstractions that build upon this low-level data structure.
pub fn clear_packed_at<Q>(&self, index: &Q)
where
K: Borrow<Q>,
......@@ -819,7 +819,7 @@ where
K: Ord + Clone + PackedLayout,
V: PackedLayout,
{
/// Gets a reference to the key that would be used when inserting a value through the VacantEntry.
/// Gets a reference to the key that would be used when inserting a value through the `VacantEntry`.
pub fn key(&self) -> &K {
&self.key
}
......@@ -829,7 +829,7 @@ where
self.key
}
/// Sets the value of the entry with the VacantEntry's key, and returns a mutable reference to it.
/// Sets the value of the entry with the `VacantEntry`s key, and returns a mutable reference to it.
pub fn insert(self, value: V) -> &'a mut V {
let new = Box::new(StorageEntry::new(Some(value), EntryState::Mutated));
match self.entry {
......@@ -946,7 +946,7 @@ where
self.remove_entry().1
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// Converts the `OccupiedEntry` into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
pub fn into_mut(self) -> &'a mut V {
match self.entry {
......@@ -1022,7 +1022,7 @@ mod tests {
fn key_at_works() {
let key = Key::from([0x42; 32]);
// BLAKE2 256-bit hasher:
// BLAKE-2 256-bit hasher:
let hmap1 = <LazyHashMap<i32, u8, Blake2x256>>::lazy(key);
// Key must be some.
assert_eq!(hmap1.key(), Some(&key));
......@@ -1050,7 +1050,7 @@ mod tests {
\xFB\x85\x36\x3B\x82\x94\x85\x3F"
))
);
// SHA2 256-bit hasher:
// SHA-2 256-bit hasher:
let hmap2 = <LazyHashMap<i32, u8, Sha2x256>>::lazy(key);
// Key must be some.
assert_eq!(hmap2.key(), Some(&key));
......
......@@ -47,7 +47,7 @@ pub type Index = u32;
/// storage primitives in order to manage the contract storage for a whole
/// chunk of storage cells.
///
/// A chunk of storage cells is a contiguous range of 2^32 storage cells.
/// A chunk of storage cells is a contiguous range of `2^32` storage cells.
pub struct LazyIndexMap<V> {
/// The offset key for the chunk of cells.
///
......@@ -276,7 +276,7 @@ where
/// Care should be taken when using this API.
///
/// The general use of this API is to streamline `Drop` implementations of
/// high-level abstractions that build upon this low-level data strcuture.
/// high-level abstractions that build upon this low-level data structure.
pub fn clear_packed_at(&self, index: Index) {
let root_key = self.key_at(index).expect("cannot clear in lazy state");
if <V as SpreadLayout>::REQUIRES_DEEP_CLEAN_UP {
......@@ -321,8 +321,8 @@ where
///
/// This is an `unsafe` operation because it has a `&self` receiver but returns
/// a `*mut Entry<T>` pointer that allows for exclusive access. This is safe
/// within internal use only and should never be given outside of the lazy
/// entity for public `&self` methods.
/// within internal use only and should never be given outside the lazy entity
/// for public `&self` methods.
unsafe fn lazily_load(&self, index: Index) -> NonNull<StorageEntry<V>> {
// SAFETY: We have put the whole `cached_entries` mapping into an
// `UnsafeCell` because of this caching functionality. The
......
......@@ -17,7 +17,7 @@
//! Mainly provides entities to work on a contract's storage
//! as well as high-level collections on top of those.
//! Also provides environmental utilities, such as storage allocators,
//! FFI to interface with SRML contracts and a primitive blockchain
//! FFI to interface with FRAME contracts and a primitive blockchain
//! emulator for simple off-chain testing.
#![cfg_attr(not(feature = "std"), no_std)]
......
......@@ -29,7 +29,7 @@ use ink_primitives::Key;
/// # Note
///
/// This is an important modular building stone in order to manage contract
/// storage occupation. By default types try to distribute themselves onto
/// storage occupation. By default, types try to distribute themselves onto
/// their respective contract storage area. However, upon packing them into
/// `Pack<T>` they will be compressed to only ever make use of a single
/// contract storage cell. Sometimes this can be advantageous for performance
......@@ -37,12 +37,12 @@ use ink_primitives::Key;
///
/// # Usage
///
/// - A `Pack<i32>` is equivalent to `i32` in its storage occupation.
/// - A `Pack<(i32, i32)>` will occupy a single cell compared to `(i32, i32)`
/// which occupies a cell per `i32`.
/// - A `Pack<i32>` instance is equivalent to `i32` in its storage occupation.
/// - A `Pack<(i32, i32)>` instance will occupy a single cell compared to
/// `(i32, i32)` which occupies a cell per `i32`.
/// - A `Lazy<Pack<[u8; 8]>>` lazily loads a `Pack<[u8; 8]>` which occupies
/// a single cell whereas a `[u8; 8]` would occupy 8 cells in total - one for
/// each `u8`.
/// a single cell whereas a `[u8; 8]` array would occupy 8 cells in total,
/// one for each `u8`.
/// - Rust collections will never use more than a single cell. So
/// `Pack<LinkedList<T>>` and `LinkedList<T>` will occupy the same amount of
/// cells, namely 1.
......
......@@ -157,7 +157,7 @@ macro_rules! fuzz_storage {
}
/// Does some basic storage interaction tests whilst mutating
/// *all* of the data structure's entries.
/// *all* the data structure's entries.
#[allow(trivial_casts)]
#[quickcheck]
fn [< fuzz_ $id _mutate_all >] (
......
......@@ -112,7 +112,7 @@ const fn max(a: u64, b: u64) -> u64 {
/// # Note
///
/// Use this utility function to use a packed pull operation for the type
/// instead of a spreaded pull operation.
/// instead of a spread storage layout pull operation.
#[inline]
pub fn forward_pull_packed<T>(ptr: &mut KeyPtr) -> T
where
......@@ -129,7 +129,7 @@ where
/// # Note
///
/// Use this utility function to use a packed push operation for the type
/// instead of a spreaded push operation.
/// instead of a spread storage layout push operation.
#[inline]
pub fn forward_push_packed<T>(entity: &T, ptr: &mut KeyPtr)
where
......@@ -146,7 +146,7 @@ where
/// # Note
///
/// Use this utility function to use a packed clear operation for the type
/// instead of a spreaded clear operation.
/// instead of a spread storage layout clear operation.
#[inline]
pub fn forward_clear_packed<T>(entity: &T, ptr: &mut KeyPtr)
where
......
......@@ -14,7 +14,7 @@
//! Implement specialized routines for managing Option<T> storage entities.
//!
//! These are mere optimizations compared to the non specialized root functions.
//! These are mere optimizations compared to the non-specialized root functions.
//! The specializations make use of the storage entry state (occupied or vacant)
//! in order to store the option's state thus using less storage in total.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment