Unverified Commit f590442e authored by Peter Goodspeed-Niklaus's avatar Peter Goodspeed-Niklaus Committed by GitHub
Browse files

Move `MaxEncodedLen` from Substrate (#268)



* move MaxEncodedLen trait from Substrate

* Move derive macro and tests from Substrate

* only run ui tests when derive feature enabled

* reduce note-taking documentation
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>

* Bless trybuild `tests/max_encoded_len_ui/union.rs` test

* Update docs (these files are not part of substrate)
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>

* Mention new `MaxEncodedLen` trait in the CHANGELOG.md

* Prepare a 2.2.0-rc.1 release

This is a pre-release rather than a full release in order to help shape
the new `MaxEncodedLen` trait used in Substrate in case some more
involved changes are found out to be required.
The API did not change since its introduction until now so chances are
slim but it's good to leave some leeway.

* incorporate changes made to Substrate version of MaxEncodedLen

Note: doesn't include the MaxEncodedLen impl for H160, H256, H512.
A substrate companion will be necessary to re-add those.

* remove redundant no_std

* Rewrite fn max_encoded_len_trait for clarity

* simplify logic checking for invalid attr
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>

* remove bogus whitespace
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>

* use Path::is_ident() helper
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>

* rm unused import
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Igor Matuszewski's avatarIgor Matuszewski <Xanewok@gmail.com>
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>
parent c5d97543
Pipeline #143845 passed with stages
in 19 minutes and 47 seconds
......@@ -4,6 +4,13 @@ All notable changes to this crate are documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this crate adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
## [2.2.0-rc.1] - 2021-06-22
### Added
- `MaxEncodedLen` trait for items that have a statically known maximum encoded size. ([#268](https://github.com/paritytech/parity-scale-codec/pull/268))
## [2.1.3] - 2021-06-14
### Changed
......
......@@ -297,6 +297,12 @@ dependencies = [
"wasi",
]
[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "half"
version = "1.7.1"
......@@ -323,6 +329,17 @@ dependencies = [
"memmap",
]
[[package]]
name = "impl-trait-for-tuples"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "itertools"
version = "0.9.0"
......@@ -435,7 +452,7 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "parity-scale-codec"
version = "2.1.3"
version = "2.2.0-rc.1"
dependencies = [
"arbitrary",
"arrayvec",
......@@ -443,15 +460,17 @@ dependencies = [
"byte-slice-cast",
"criterion",
"generic-array",
"impl-trait-for-tuples",
"parity-scale-codec-derive",
"quickcheck",
"serde",
"serde_derive",
"trybuild",
]
[[package]]
name = "parity-scale-codec-derive"
version = "2.1.3"
version = "2.2.0-rc.1"
dependencies = [
"parity-scale-codec",
"proc-macro-crate",
......@@ -653,6 +672,9 @@ name = "serde"
version = "1.0.115"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e54c9a88f2da7238af84b5101443f0c0d0a3bbdc455e34a5c9497b1903ed55d5"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_cbor"
......@@ -666,9 +688,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.118"
version = "1.0.115"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df"
checksum = "609feed1d0a73cc36a0182a840a9b37b4a82f0b1150369f0536a9e3f2a31dc48"
dependencies = [
"proc-macro2",
"quote",
......@@ -703,6 +725,15 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e"
[[package]]
name = "termcolor"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
dependencies = [
"winapi-util",
]
[[package]]
name = "textwrap"
version = "0.11.0"
......@@ -760,6 +791,20 @@ dependencies = [
"serde",
]
[[package]]
name = "trybuild"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1768998d9a3b179411618e377dbb134c58a88cda284b0aa71c42c40660127d46"
dependencies = [
"glob",
"lazy_static",
"serde",
"serde_json",
"termcolor",
"toml",
]
[[package]]
name = "typenum"
version = "1.12.0"
......
[package]
name = "parity-scale-codec"
description = "SCALE - Simple Concatenating Aggregated Little Endians"
version = "2.1.3"
version = "2.2.0-rc.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "Apache-2.0"
repository = "https://github.com/paritytech/parity-scale-codec"
......@@ -11,17 +11,19 @@ edition = "2018"
[dependencies]
arrayvec = { version = "0.7", default-features = false }
serde = { version = "1.0.102", optional = true }
parity-scale-codec-derive = { path = "derive", version = "2.1.3", default-features = false, optional = true }
parity-scale-codec-derive = { path = "derive", version = "2.2.0-rc.1", default-features = false, optional = true }
bitvec = { version = "0.20.1", default-features = false, features = ["alloc"], optional = true }
byte-slice-cast = { version = "1.0.0", default-features = false }
generic-array = { version = "0.14.4", optional = true }
arbitrary = { version = "1.0.1", features = ["derive"], optional = true }
impl-trait-for-tuples = "0.2.1"
[dev-dependencies]
criterion = "0.3.0"
serde_derive = { version = "1.0" }
parity-scale-codec-derive = { path = "derive", version = "2.1.3", default-features = false }
parity-scale-codec-derive = { path = "derive", version = "2.2.0-rc.1", default-features = false }
quickcheck = "1.0"
trybuild = "1.0.42"
[[bench]]
name = "benches"
......
[package]
name = "parity-scale-codec-derive"
description = "Serialization and deserialization derive macro for Parity SCALE Codec"
version = "2.1.3"
version = "2.2.0-rc.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "Apache-2.0"
edition = "2018"
......@@ -16,4 +16,4 @@ proc-macro2 = "1.0.6"
proc-macro-crate = "1.0.0"
[dev-dependencies]
parity-scale-codec = { path = "..", version = "2.0.1" }
parity-scale-codec = { path = "..", version = "2.2.0-rc.1" }
// Copyright 2017-2018 Parity Technologies
// Copyright 2017-2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -32,6 +32,7 @@ use crate::utils::is_lint_attribute;
mod decode;
mod encode;
mod max_encoded_len;
mod utils;
mod trait_bounds;
......@@ -333,3 +334,9 @@ pub fn compact_as_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStr
wrap_with_dummy_const(input, impl_block)
}
/// Derive `MaxEncodedLen`.
#[proc_macro_derive(MaxEncodedLen, attributes(max_encoded_len_mod))]
pub fn derive_max_encoded_len(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
max_encoded_len::derive_max_encoded_len(input)
}
// Copyright (C) 2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::utils::codec_crate_path;
use quote::{quote, quote_spanned};
use syn::{
Data, DeriveInput, Fields, GenericParam, Generics, TraitBound, Type, TypeParamBound,
parse_quote, spanned::Spanned,
};
/// impl for `#[derive(MaxEncodedLen)]`
pub fn derive_max_encoded_len(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let input: DeriveInput = match syn::parse(input) {
Ok(input) => input,
Err(e) => return e.to_compile_error().into(),
};
let mel_trait = match max_encoded_len_trait(&input) {
Ok(mel_trait) => mel_trait,
Err(e) => return e.to_compile_error().into(),
};
let name = &input.ident;
let generics = add_trait_bounds(input.generics, mel_trait.clone());
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let data_expr = data_length_expr(&input.data);
quote::quote!(
const _: () = {
impl #impl_generics #mel_trait for #name #ty_generics #where_clause {
fn max_encoded_len() -> usize {
#data_expr
}
}
};
)
.into()
}
fn max_encoded_len_trait(input: &DeriveInput) -> syn::Result<TraitBound> {
let mel = codec_crate_path(&input.attrs)?;
Ok(parse_quote!(#mel::MaxEncodedLen))
}
// Add a bound `T: MaxEncodedLen` to every type parameter T.
fn add_trait_bounds(mut generics: Generics, mel_trait: TraitBound) -> Generics {
for param in &mut generics.params {
if let GenericParam::Type(ref mut type_param) = *param {
type_param.bounds.push(TypeParamBound::Trait(mel_trait.clone()));
}
}
generics
}
/// generate an expression to sum up the max encoded length from several fields
fn fields_length_expr(fields: &Fields) -> proc_macro2::TokenStream {
let type_iter: Box<dyn Iterator<Item = &Type>> = match fields {
Fields::Named(ref fields) => Box::new(fields.named.iter().map(|field| &field.ty)),
Fields::Unnamed(ref fields) => Box::new(fields.unnamed.iter().map(|field| &field.ty)),
Fields::Unit => Box::new(std::iter::empty()),
};
// expands to an expression like
//
// 0
// .saturating_add(<type of first field>::max_encoded_len())
// .saturating_add(<type of second field>::max_encoded_len())
//
// We match the span of each field to the span of the corresponding
// `max_encoded_len` call. This way, if one field's type doesn't implement
// `MaxEncodedLen`, the compiler's error message will underline which field
// caused the issue.
let expansion = type_iter.map(|ty| {
quote_spanned! {
ty.span() => .saturating_add(<#ty>::max_encoded_len())
}
});
quote! {
0_usize #( #expansion )*
}
}
// generate an expression to sum up the max encoded length of each field
fn data_length_expr(data: &Data) -> proc_macro2::TokenStream {
match *data {
Data::Struct(ref data) => fields_length_expr(&data.fields),
Data::Enum(ref data) => {
// We need an expression expanded for each variant like
//
// 0
// .max(<variant expression>)
// .max(<variant expression>)
// .saturating_add(1)
//
// The 1 derives from the discriminant; see
// https://github.com/paritytech/parity-scale-codec/
// blob/f0341dabb01aa9ff0548558abb6dcc5c31c669a1/derive/src/encode.rs#L211-L216
//
// Each variant expression's sum is computed the way an equivalent struct's would be.
let expansion = data.variants.iter().map(|variant| {
let variant_expression = fields_length_expr(&variant.fields);
quote! {
.max(#variant_expression)
}
});
quote! {
0_usize #( #expansion )* .saturating_add(1)
}
}
Data::Union(ref data) => {
// https://github.com/paritytech/parity-scale-codec/
// blob/f0341dabb01aa9ff0548558abb6dcc5c31c669a1/derive/src/encode.rs#L290-L293
syn::Error::new(data.union_token.span(), "Union types are not supported")
.to_compile_error()
}
}
}
......@@ -19,11 +19,13 @@
use std::str::FromStr;
use proc_macro2::TokenStream;
use proc_macro_crate::{crate_name, FoundCrate};
use proc_macro2::{Span, Ident, TokenStream};
use quote::quote;
use syn::{
spanned::Spanned,
Meta, NestedMeta, Lit, Attribute, Variant, Field, DeriveInput, Fields, Data, FieldsUnnamed,
FieldsNamed, MetaNameValue, punctuated::Punctuated, token, parse::Parse,
Attribute, Data, DeriveInput, Error, Field, Fields, FieldsNamed, FieldsUnnamed, Lit, Meta,
MetaNameValue, NestedMeta, parse::Parse, Path, punctuated::Punctuated,
spanned::Spanned, token, Variant,
};
fn find_meta_item<'a, F, R, I, M>(mut itr: I, mut pred: F) -> Option<R> where
......@@ -119,6 +121,48 @@ pub fn has_dumb_trait_bound(attrs: &[Attribute]) -> bool {
}).is_some()
}
/// Generate the crate access for the crate using 2018 syntax.
fn crate_access() -> syn::Result<Ident> {
const DEF_CRATE: &str = "parity-scale-codec";
match crate_name(DEF_CRATE) {
Ok(FoundCrate::Itself) => {
let name = DEF_CRATE.to_string().replace("-", "_");
Ok(syn::Ident::new(&name, Span::call_site()))
}
Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())),
Err(e) => Err(Error::new(Span::call_site(), e)),
}
}
/// Match `#[codec(crate = ...)]` and return the `...`
fn codec_crate_path_lit(attr: &Attribute) -> Option<Lit> {
// match `#[codec ...]`
if !attr.path.is_ident("codec") {
return None;
};
// match `#[codec(crate = ...)]` and return the `...`
match attr.parse_meta() {
Ok(Meta::NameValue(MetaNameValue { path, lit, .. })) if path.is_ident("crate") => {
Some(lit)
}
_ => None,
}
}
/// Match `#[codec(crate = "...")]` and return the contents as a `Path`
pub fn codec_crate_path(attrs: &[Attribute]) -> syn::Result<Path> {
match attrs.iter().find_map(codec_crate_path_lit) {
Some(Lit::Str(lit_str)) => lit_str.parse::<Path>(),
Some(lit) => {
Err(Error::new(
lit.span(),
"Expected format: #[codec(crate = \"path::to::codec\")]",
))
}
None => crate_access().map(|ident| ident.into()),
}
}
/// Trait bounds.
pub type TraitBounds = Punctuated<syn::WherePredicate, token::Comma>;
......@@ -179,12 +223,20 @@ pub fn filter_skip_unnamed<'a>(fields: &'a syn::FieldsUnnamed) -> impl Iterator<
/// Ensure attributes are correctly applied. This *must* be called before using
/// any of the attribute finder methods or the macro may panic if it encounters
/// misapplied attributes.
/// `#[codec(dumb_trait_bound)]` is the only accepted top attribute.
///
/// The top level can have the following attributes:
///
/// * `#[codec(dumb_trait_bound)]`
/// * `#[codec(crate = "path::to::crate")]
///
/// Fields can have the following attributes:
///
/// * `#[codec(skip)]`
/// * `#[codec(compact)]`
/// * `#[codec(encoded_as = "$EncodeAs")]` with $EncodedAs a valid TokenStream
///
/// Variants can have the following attributes:
///
/// * `#[codec(skip)]`
/// * `#[codec(index = $int)]`
pub fn check_attributes(input: &DeriveInput) -> syn::Result<()> {
......@@ -293,26 +345,24 @@ fn check_variant_attribute(attr: &Attribute) -> syn::Result<()> {
// Only `#[codec(dumb_trait_bound)]` is accepted as top attribute
fn check_top_attribute(attr: &Attribute) -> syn::Result<()> {
let top_error =
"Invalid attribute only `#[codec(dumb_trait_bound)]`, `#[codec(encode_bound(T: Encode))]` or \
let top_error = "Invalid attribute: only `#[codec(dumb_trait_bound)]`, \
`#[codec(encode_bound(T: Encode))]`, `#[codec(crate = \"path::to::crate\")]`, or \
`#[codec(decode_bound(T: Decode))]` are accepted as top attribute";
if attr.path.is_ident("codec") {
if attr.parse_args::<CustomTraitBound<encode_bound>>().is_ok() {
return Ok(())
} else if attr.parse_args::<CustomTraitBound<decode_bound>>().is_ok() {
return Ok(())
} else {
match attr.parse_meta()? {
Meta::List(ref meta_list) if meta_list.nested.len() == 1 => {
match meta_list.nested.first().expect("Just checked that there is one item; qed") {
if attr.path.is_ident("codec")
&& attr.parse_args::<CustomTraitBound<encode_bound>>().is_err()
&& attr.parse_args::<CustomTraitBound<decode_bound>>().is_err()
&& codec_crate_path_lit(attr).is_none()
{
match attr.parse_meta()? {
Meta::List(ref meta_list) if meta_list.nested.len() == 1 => {
match meta_list.nested.first().expect("Just checked that there is one item; qed") {
NestedMeta::Meta(Meta::Path(path))
if path.get_ident().map_or(false, |i| i == "dumb_trait_bound") => Ok(()),
elt @ _ => Err(syn::Error::new(elt.span(), top_error)),
}
},
_ => Err(syn::Error::new(attr.span(), top_error)),
}
_ => Err(syn::Error::new(attr.span(), top_error)),
}
} else {
Ok(())
......
// Copyright 2017, 2018 Parity Technologies
// Copyright 2017-2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -21,8 +21,8 @@
//! suitable for resource-constrained execution environments like blockchain runtimes and low-power,
//! low-memory devices.
//!
//! It is important to note that the encoding context (knowledge of how the types and data structures look)
//! needs to be known separately at both encoding and decoding ends.
//! It is important to note that the encoding context (knowledge of how the types and data
//! structures look) needs to be known separately at both encoding and decoding ends.
//! The encoded data does not include this contextual information.
//!
//! To get a better understanding of how the encoding is done for different types,
......@@ -35,46 +35,55 @@
//!
//! ### Encode
//!
//! The `Encode` trait is used for encoding of data into the SCALE format. The `Encode` trait contains the following functions:
//! The `Encode` trait is used for encoding of data into the SCALE format. The `Encode` trait
//! contains the following functions:
//!
//! * `size_hint(&self) -> usize`: Gets the capacity (in bytes) required for the encoded data.
//! This is to avoid double-allocation of memory needed for the encoding.
//! It can be an estimate and does not need to be an exact number.
//! If the size is not known, even no good maximum, then we can skip this function from the trait implementation.
//! This is required to be a cheap operation, so should not involve iterations etc.
//! * `encode_to<T: Output>(&self, dest: &mut T)`: Encodes the value and appends it to a destination buffer.
//! If the size is not known, even no good maximum, then we can skip this function from the trait
//! implementation. This is required to be a cheap operation, so should not involve iterations etc.
//! * `encode_to<T: Output>(&self, dest: &mut T)`: Encodes the value and appends it to a destination
//! buffer.
//! * `encode(&self) -> Vec<u8>`: Encodes the type data and returns a slice.
//! * `using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R`: Encodes the type data and executes a closure on the encoded value.
//! * `using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R`: Encodes the type data and
//! executes a closure on the encoded value.
//! Returns the result from the executed closure.
//!
//! **Note:** Implementations should override `using_encoded` for value types and `encode_to` for allocating types.
//! `size_hint` should be implemented for all types, wherever possible. Wrapper types should override all methods.
//! **Note:** Implementations should override `using_encoded` for value types and `encode_to` for
//! allocating types. `size_hint` should be implemented for all types, wherever possible. Wrapper
//! types should override all methods.
//!
//! ### Decode
//!
//! The `Decode` trait is used for deserialization/decoding of encoded data into the respective types.
//! The `Decode` trait is used for deserialization/decoding of encoded data into the respective
//! types.
//!
//! * `fn decode<I: Input>(value: &mut I) -> Result<Self, Error>`: Tries to decode the value from SCALE format to the type it is called on.
//! * `fn decode<I: Input>(value: &mut I) -> Result<Self, Error>`: Tries to decode the value from
//! SCALE format to the type it is called on.
//! Returns an `Err` if the decoding fails.
//!
//! ### CompactAs
//!
//! The `CompactAs` trait is used for wrapping custom types/structs as compact types, which makes them even more space/memory efficient.
//! The compact encoding is described [here](https://substrate.dev/docs/en/knowledgebase/advanced/codec#compactgeneral-integers).
//! The `CompactAs` trait is used for wrapping custom types/structs as compact types, which makes
//! them even more space/memory efficient. The compact encoding is described [here](https://substrate.dev/docs/en/knowledgebase/advanced/codec#compactgeneral-integers).
//!
//! * `encode_as(&self) -> &Self::As`: Encodes the type (self) as a compact type.
//! The type `As` is defined in the same trait and its implementation should be compact encode-able.
//! * `decode_from(_: Self::As) -> Result<Self, Error>`: Decodes the type (self) from a compact encode-able type.
//! * `decode_from(_: Self::As) -> Result<Self, Error>`: Decodes the type (self) from a compact
//! encode-able type.
//!
//! ### HasCompact
//!
//! The `HasCompact` trait, if implemented, tells that the corresponding type is a compact encode-able type.
//! The `HasCompact` trait, if implemented, tells that the corresponding type is a compact
//! encode-able type.
//!
//! ### EncodeLike
//!
//! The `EncodeLike` trait needs to be implemented for each type manually. When using derive, it is
//! done automatically for you. Basically the trait gives you the opportunity to accept multiple types
//! to a function that all encode to the same representation.
//! done automatically for you. Basically the trait gives you the opportunity to accept multiple
//! types to a function that all encode to the same representation.
//!
//! ## Usage Examples
//!
......@@ -212,24 +221,22 @@
//! ## Derive attributes
//!
//! The derive implementation supports the following attributes:
//! - `codec(dumb_trait_bound)`: This attribute needs to be placed above the type that one of the trait
//! should be implemented for. It will make the algorithm that determines the to-add trait bounds
//! fall back to just use the type parameters of the type. This can be useful for situation where
//! the algorithm includes private types in the public interface. By using this attribute, you should
//! not get this error/warning again.
//! - `codec(dumb_trait_bound)`: This attribute needs to be placed above the type that one of the
//! trait should be implemented for. It will make the algorithm that determines the to-add trait
//! bounds fall back to just use the type parameters of the type. This can be useful for situation
//! where the algorithm includes private types in the public interface. By using this attribute,
//! you should not get this error/warning again.
//! - `codec(skip)`: Needs to be placed above a field or variant and makes it to be skipped while
//! encoding/decoding.
//! - `codec(compact)`: Needs to be placed above a field and makes the field use compact encoding.
//! (The type needs to support compact encoding.)
//! - `codec(encoded_as = "OtherType")`: Needs to be placed above a field and makes the field being encoded
//! by using `OtherType`.
//! - `codec(encoded_as = "OtherType")`: Needs to be placed above a field and makes the field being
//! encoded by using `OtherType`.
//! - `codec(index = 0)`: Needs to be placed above an enum variant to make the variant use the given
//! index when encoded. By default the index is determined by counting from `0` beginning wth the
//! first variant.
//!
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
......@@ -274,6 +281,7 @@ mod depth_limit;
mod encode_append;
mod encode_like;
mod error;
mod max_encoded_len;
pub use self::error::Error;
pub use self::codec::{
......@@ -289,3 +297,49 @@ pub use self::decode_all::DecodeAll;
pub use self::depth_limit::DecodeLimit;
pub use self::encode_append::EncodeAppend;
pub use self::encode_like::{EncodeLike, Ref};
pub use max_encoded_len::MaxEncodedLen;
/// Derive macro for [`MaxEncodedLen`][max_encoded_len::MaxEncodedLen].
///
/// # Examples
///
/// ```
/// # use parity_scale_codec::{Encode, MaxEncodedLen};
/// #[derive(Encode, MaxEncodedLen)]
/// struct Example;
/// ```
///
/// ```
/// # use parity_scale_codec::{Encode, MaxEncodedLen};
/// #[derive(Encode, MaxEncodedLen)]
/// struct TupleStruct(u8, u32);
///
/// assert_eq!(TupleStruct::max_encoded_len(), u8::max_encoded_len() + u32::max_encoded_len());
/// ```
///
/// ```
/// # use parity_scale_codec::{Encode, MaxEncodedLen};
/// #[derive(Encode, MaxEncodedLen)]
/// enum GenericEnum<T> {
/// A,
/// B(T),
/// }
///
/// assert_eq!(GenericEnum::<u8>::max_encoded_len(), u8::max_encoded_len() + u8::max_encoded_len());
/// assert_eq!(GenericEnum::<u128>::max_encoded_len(), u8::max_encoded_len() + u128::max_encoded_len());
/// ```
///
/// # Within other macros
///
/// Sometimes the `MaxEncodedLen` trait and macro are used within another macro, and it can't be
/// guaranteed that the `max_encoded_len` module is available at the call site. In that case, the
/// macro should reexport the `max_encoded_len` module and specify the path to the reexport:
///
/// ```ignore
/// pub use parity_scale_codec::max_encoded_len;