diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock
index 5b84808ad4449fa9954b0f446a5e9854e8654dab..d3f1d83495b4924b59f27e942c95b901b966aebb 100644
--- a/polkadot/Cargo.lock
+++ b/polkadot/Cargo.lock
@@ -705,6 +705,15 @@ dependencies = [
  "once_cell",
 ]
 
+[[package]]
+name = "bounded-vec"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afdd1dffefe5fc66262a524b91087c43b16e478b2e3dc49eb11b0e2fd6b6ec90"
+dependencies = [
+ "thiserror",
+]
+
 [[package]]
 name = "bp-header-chain"
 version = "0.1.0"
@@ -6319,6 +6328,7 @@ dependencies = [
 name = "polkadot-node-primitives"
 version = "0.9.9"
 dependencies = [
+ "bounded-vec",
  "futures 0.3.16",
  "parity-scale-codec",
  "polkadot-parachain",
diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs
index 3c8dd35077b1532807be77fa9c12975b31460328..3c5e7f10e73a6bac37f885b95e910c34859d662e 100644
--- a/polkadot/erasure-coding/src/lib.rs
+++ b/polkadot/erasure-coding/src/lib.rs
@@ -24,8 +24,10 @@
 //! f is the maximum number of faulty validators in the system.
 //! The data is coded so any f+1 chunks can be used to reconstruct the full data.
 
+use std::convert::TryFrom;
+
 use parity_scale_codec::{Decode, Encode};
-use polkadot_node_primitives::AvailableData;
+use polkadot_node_primitives::{AvailableData, Proof};
 use polkadot_primitives::v0::{self, BlakeTwo256, Hash as H256, HashT};
 use sp_core::Blake2Hasher;
 use thiserror::Error;
@@ -245,7 +247,7 @@ impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
 }
 
 impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
-	type Item = (Vec<Vec<u8>>, &'a [u8]);
+	type Item = (Proof, &'a [u8]);
 
 	fn next(&mut self) -> Option<Self::Item> {
 		use trie::Recorder;
@@ -258,13 +260,12 @@ impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
 
 		match res.expect("all nodes in trie present; qed") {
 			Some(_) => {
-				let nodes = recorder.drain().into_iter().map(|r| r.data).collect();
+				let nodes: Vec<Vec<u8>> = recorder.drain().into_iter().map(|r| r.data).collect();
 				let chunk = self.chunks.get(self.current_pos).expect(
 					"there is a one-to-one mapping of chunks to valid merkle branches; qed",
 				);
-
 				self.current_pos += 1;
-				Some((nodes, chunk.as_ref()))
+				Proof::try_from(nodes).ok().map(|proof| (proof, chunk.as_ref()))
 			},
 			None => None,
 		}
@@ -421,7 +422,10 @@ mod tests {
 		assert_eq!(proofs.len(), 10);
 
 		for (i, proof) in proofs.into_iter().enumerate() {
-			assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i]));
+			assert_eq!(
+				branch_hash(&root, &proof.as_vec(), i).unwrap(),
+				BlakeTwo256::hash(&chunks[i])
+			);
 		}
 	}
 }
diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs
index c96b2dc4f6c63d1c8006d0718182ae2acebfeb7d..1101edde928ffd70d5a16bb51a364042002f1f71 100644
--- a/polkadot/node/core/av-store/src/tests.rs
+++ b/polkadot/node/core/av-store/src/tests.rs
@@ -16,11 +16,13 @@
 
 use super::*;
 
+use std::convert::TryFrom;
+
 use assert_matches::assert_matches;
 use futures::{channel::oneshot, executor, future, Future};
 
 use parking_lot::Mutex;
-use polkadot_node_primitives::{AvailableData, BlockData, PoV};
+use polkadot_node_primitives::{AvailableData, BlockData, PoV, Proof};
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::v1::{
@@ -287,7 +289,7 @@ fn store_chunk_works() {
 		let chunk = ErasureChunk {
 			chunk: vec![1, 2, 3],
 			index: validator_index,
-			proof: vec![vec![3, 4, 5]],
+			proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(),
 		};
 
 		// Ensure an entry already exists. In reality this would come from watching
@@ -333,7 +335,7 @@ fn store_chunk_does_nothing_if_no_entry_already() {
 		let chunk = ErasureChunk {
 			chunk: vec![1, 2, 3],
 			index: validator_index,
-			proof: vec![vec![3, 4, 5]],
+			proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(),
 		};
 
 		let (tx, rx) = oneshot::channel();
@@ -441,8 +443,11 @@ fn store_block_works() {
 		let mut branches = erasure::branches(chunks.as_ref());
 
 		let branch = branches.nth(5).unwrap();
-		let expected_chunk =
-			ErasureChunk { chunk: branch.1.to_vec(), index: ValidatorIndex(5), proof: branch.0 };
+		let expected_chunk = ErasureChunk {
+			chunk: branch.1.to_vec(),
+			index: ValidatorIndex(5),
+			proof: Proof::try_from(branch.0).unwrap(),
+		};
 
 		assert_eq!(chunk, expected_chunk);
 		virtual_overseer
@@ -545,7 +550,7 @@ fn query_all_chunks_works() {
 			let chunk = ErasureChunk {
 				chunk: vec![1, 2, 3],
 				index: ValidatorIndex(1),
-				proof: vec![vec![3, 4, 5]],
+				proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(),
 			};
 
 			let (tx, rx) = oneshot::channel();
diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
index 4800de26d523f26a4d0a44a899080c7bc97a2829..4eed9440952aa3b78cdaf1caaa7b17145b935619 100644
--- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
@@ -363,7 +363,7 @@ impl RunningTask {
 
 	fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool {
 		let anticipated_hash =
-			match branch_hash(&self.erasure_root, &chunk.proof, chunk.index.0 as usize) {
+			match branch_hash(&self.erasure_root, &chunk.proof_as_vec(), chunk.index.0 as usize) {
 				Ok(hash) => hash,
 				Err(e) => {
 					tracing::warn!(
diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
index 48616e336ae5be6acfca70e6789e71882ae6b2c8..e39a9d5ef16416b8fb03745e5f39d7cf88627e3a 100644
--- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use std::collections::HashMap;
+use std::{collections::HashMap, convert::TryFrom};
 
 use parity_scale_codec::Encode;
 
@@ -29,7 +29,7 @@ use sc_network as network;
 use sp_keyring::Sr25519Keyring;
 
 use polkadot_node_network_protocol::request_response::{v1, Recipient};
-use polkadot_node_primitives::{BlockData, PoV};
+use polkadot_node_primitives::{BlockData, PoV, Proof};
 use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
 
 use super::*;
@@ -60,7 +60,7 @@ fn task_does_not_accept_invalid_chunk() {
 				Recipient::Authority(Sr25519Keyring::Alice.public().into()),
 				ChunkFetchingResponse::Chunk(v1::ChunkResponse {
 					chunk: vec![1, 2, 3],
-					proof: vec![vec![9, 8, 2], vec![2, 3, 4]],
+					proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(),
 				}),
 			);
 			m
@@ -170,7 +170,7 @@ fn task_stores_valid_chunk_if_there_is_one() {
 				Recipient::Authority(Sr25519Keyring::Charlie.public().into()),
 				ChunkFetchingResponse::Chunk(v1::ChunkResponse {
 					chunk: vec![1, 2, 3],
-					proof: vec![vec![9, 8, 2], vec![2, 3, 4]],
+					proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(),
 				}),
 			);
 
diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs
index 0b11b22bf3bea635804bdea74c6a7ae68e28a64a..fb01e0b2f9c7ac9fe346a2eccd383350c53d3b6e 100644
--- a/polkadot/node/network/availability-distribution/src/tests/mock.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs
@@ -16,12 +16,12 @@
 
 //! Helper functions and tools to generate mock data useful for testing this subsystem.
 
-use std::sync::Arc;
+use std::{convert::TryFrom, sync::Arc};
 
 use sp_keyring::Sr25519Keyring;
 
 use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
-use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV};
+use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof};
 use polkadot_primitives::v1::{
 	CandidateCommitments, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt,
 	GroupIndex, Hash, HeadData, Id as ParaId, OccupiedCore, PersistedValidationData, SessionInfo,
@@ -139,7 +139,7 @@ pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) {
 		.map(|(index, (proof, chunk))| ErasureChunk {
 			chunk: chunk.to_vec(),
 			index: ValidatorIndex(index as _),
-			proof,
+			proof: Proof::try_from(proof).unwrap(),
 		})
 		.next()
 		.expect("There really should be 10 chunks.");
diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs
index ea5822e896a9f8f82fa9cfa4d22585d6fb1f7af6..732bb373f1f44977afa3d7a2955694f774c4dae9 100644
--- a/polkadot/node/network/availability-recovery/src/lib.rs
+++ b/polkadot/node/network/availability-recovery/src/lib.rs
@@ -301,9 +301,11 @@ impl RequestChunksPhase {
 
 					let validator_index = chunk.index;
 
-					if let Ok(anticipated_hash) =
-						branch_hash(&params.erasure_root, &chunk.proof, chunk.index.0 as usize)
-					{
+					if let Ok(anticipated_hash) = branch_hash(
+						&params.erasure_root,
+						&chunk.proof_as_vec(),
+						chunk.index.0 as usize,
+					) {
 						let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
 
 						if erasure_chunk_hash != anticipated_hash {
diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs
index fcd2575026e14742006c2f53b4200278917260f0..ed9b5b7ebaa1f967bda15a06f3fc1638e2893771 100644
--- a/polkadot/node/network/availability-recovery/src/tests.rs
+++ b/polkadot/node/network/availability-recovery/src/tests.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use std::{sync::Arc, time::Duration};
+use std::{convert::TryFrom, sync::Arc, time::Duration};
 
 use assert_matches::assert_matches;
 use futures::{executor, future};
@@ -28,7 +28,7 @@ use super::*;
 use sc_network::config::RequestResponseConfig;
 
 use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
-use polkadot_node_primitives::{BlockData, PoV};
+use polkadot_node_primitives::{BlockData, PoV, Proof};
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::v1::{AuthorityDiscoveryId, HeadData, PersistedValidationData};
 use polkadot_subsystem::{
@@ -371,7 +371,7 @@ fn derive_erasure_chunks_with_proofs_and_root(
 		.map(|(index, (proof, chunk))| ErasureChunk {
 			chunk: chunk.to_vec(),
 			index: ValidatorIndex(index as _),
-			proof,
+			proof: Proof::try_from(proof).unwrap(),
 		})
 		.collect::<Vec<ErasureChunk>>();
 
diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs
index 184bcf5f030ec624f28ff1d78699213c0cfb99b8..8a63d653e8c71cd64d243f382834087b09c1581f 100644
--- a/polkadot/node/network/protocol/src/request_response/v1.rs
+++ b/polkadot/node/network/protocol/src/request_response/v1.rs
@@ -19,7 +19,7 @@
 use parity_scale_codec::{Decode, Encode};
 
 use polkadot_node_primitives::{
-	AvailableData, DisputeMessage, ErasureChunk, PoV, UncheckedDisputeMessage,
+	AvailableData, DisputeMessage, ErasureChunk, PoV, Proof, UncheckedDisputeMessage,
 };
 use polkadot_primitives::v1::{
 	CandidateHash, CandidateReceipt, CommittedCandidateReceipt, Hash, Id as ParaId, ValidatorIndex,
@@ -67,7 +67,7 @@ pub struct ChunkResponse {
 	/// The erasure-encoded chunk of data belonging to the candidate block.
 	pub chunk: Vec<u8>,
 	/// Proof for this chunk's branch in the Merkle tree.
-	pub proof: Vec<Vec<u8>>,
+	pub proof: Proof,
 }
 
 impl From<ErasureChunk> for ChunkResponse {
diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml
index e1aa4ebf3de0209e18e7206e937f85d0374dde34..5f7146eddd82a305f5a055016bd31183d8ae120b 100644
--- a/polkadot/node/primitives/Cargo.toml
+++ b/polkadot/node/primitives/Cargo.toml
@@ -6,6 +6,7 @@ edition = "2018"
 description = "Primitives types for the Node-side"
 
 [dependencies]
+bounded-vec = "0.4"
 futures = "0.3.15"
 polkadot-primitives = { path = "../../primitives" }
 polkadot-statement-table = { path = "../../statement-table" }
diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs
index 19ee14f05acf5f37cc5677ab81e10404485dbff1..ae982708c8cbf44232e58c29b0f0d50c9e393dc1 100644
--- a/polkadot/node/primitives/src/lib.rs
+++ b/polkadot/node/primitives/src/lib.rs
@@ -22,11 +22,12 @@
 
 #![deny(missing_docs)]
 
-use std::pin::Pin;
+use std::{convert::TryFrom, pin::Pin};
 
+use bounded_vec::BoundedVec;
 use futures::Future;
-use parity_scale_codec::{Decode, Encode};
-use serde::{Deserialize, Serialize};
+use parity_scale_codec::{Decode, Encode, Error as CodecError, Input};
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
 
 pub use sp_consensus_babe::{
 	AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch,
@@ -51,6 +52,11 @@ pub use disputes::{
 	SignedDisputeStatement, UncheckedDisputeMessage, ValidDisputeVote,
 };
 
+// For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node.
+const MERKLE_NODE_MAX_SIZE: usize = 512;
+// 16-ary Merkle Prefix Trie for 32-bit ValidatorIndex has depth at most 8.
+const MERKLE_PROOF_MAX_DEPTH: usize = 8;
+
 /// The bomb limit for decompressing code blobs.
 pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize;
 
@@ -287,6 +293,95 @@ pub struct AvailableData {
 	pub validation_data: PersistedValidationData,
 }
 
+/// This is a convenience type to allow the Erasure chunk proof to Decode into a nested BoundedVec
+#[derive(PartialEq, Eq, Clone, Debug, Hash)]
+pub struct Proof(BoundedVec<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, 1, MERKLE_PROOF_MAX_DEPTH>);
+
+impl Proof {
+	/// This function allows to convert back to the standard nested Vec format
+	pub fn as_vec(&self) -> Vec<Vec<u8>> {
+		self.0.as_vec().iter().map(|v| v.as_vec().clone()).collect()
+	}
+}
+
+#[derive(thiserror::Error, Debug)]
+///
+pub enum MerkleProofError {
+	#[error("Merkle max proof depth exceeded {0} > {} .", MERKLE_PROOF_MAX_DEPTH)]
+	/// This error signifies that the Proof length exceeds the trie's max depth
+	MerkleProofDepthExceeded(usize),
+
+	#[error("Merkle node max size exceeded {0} > {} .", MERKLE_NODE_MAX_SIZE)]
+	/// This error signifies that a Proof node exceeds the 16-ary max node size
+	MerkleProofNodeSizeExceeded(usize),
+}
+
+impl TryFrom<Vec<Vec<u8>>> for Proof {
+	type Error = MerkleProofError;
+
+	fn try_from(input: Vec<Vec<u8>>) -> Result<Self, Self::Error> {
+		if input.len() > MERKLE_PROOF_MAX_DEPTH {
+			return Err(Self::Error::MerkleProofDepthExceeded(input.len()))
+		}
+		let mut out = Vec::new();
+		for element in input.into_iter() {
+			let length = element.len();
+			let data: BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE> = BoundedVec::from_vec(element)
+				.map_err(|_| Self::Error::MerkleProofNodeSizeExceeded(length))?;
+			out.push(data);
+		}
+		Ok(Proof(BoundedVec::from_vec(out).expect("Buffer size is deterined above. qed")))
+	}
+}
+
+impl Decode for Proof {
+	fn decode<I: Input>(value: &mut I) -> Result<Self, CodecError> {
+		let temp: Vec<Vec<u8>> = Decode::decode(value)?;
+		let mut out = Vec::new();
+		for element in temp.into_iter() {
+			let bounded_temp: Result<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, CodecError> =
+				BoundedVec::from_vec(element)
+					.map_err(|_| "Inner node exceeds maximum node size.".into());
+			out.push(bounded_temp?);
+		}
+		BoundedVec::from_vec(out)
+			.map(Self)
+			.map_err(|_| "Merkle proof depth exceeds maximum trie depth".into())
+	}
+}
+
+impl Encode for Proof {
+	fn size_hint(&self) -> usize {
+		MERKLE_NODE_MAX_SIZE * MERKLE_PROOF_MAX_DEPTH
+	}
+
+	fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
+		let temp = self.as_vec();
+		temp.using_encoded(f)
+	}
+}
+
+impl Serialize for Proof {
+	fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+	where
+		S: Serializer,
+	{
+		serializer.serialize_bytes(&self.encode())
+	}
+}
+
+impl<'de> Deserialize<'de> for Proof {
+	fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+	where
+		D: Deserializer<'de>,
+	{
+		// Deserialize the string and get individual components
+		let s = Vec::<u8>::deserialize(deserializer)?;
+		let mut slice = s.as_slice();
+		Decode::decode(&mut slice).map_err(de::Error::custom)
+	}
+}
+
 /// A chunk of erasure-encoded block data.
 #[derive(PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Debug, Hash)]
 pub struct ErasureChunk {
@@ -295,7 +390,14 @@ pub struct ErasureChunk {
 	/// The index of this erasure-encoded chunk of data.
 	pub index: ValidatorIndex,
 	/// Proof for this chunk's branch in the Merkle tree.
-	pub proof: Vec<Vec<u8>>,
+	pub proof: Proof,
+}
+
+impl ErasureChunk {
+	/// Convert bounded Vec Proof to regular Vec<Vec<u8>>
+	pub fn proof_as_vec(&self) -> Vec<Vec<u8>> {
+		self.proof.as_vec()
+	}
 }
 
 /// Compress a PoV, unless it exceeds the [`POV_BOMB_LIMIT`].