diff --git a/Cargo.lock b/Cargo.lock
index 55cc1721bddeb3ec5209a4db3732d8042393e55b..7e41b7e99374472fdebce9f107f642ece9b6a267 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -17263,6 +17263,16 @@ dependencies = [
  "itertools 0.10.5",
 ]
 
+[[package]]
+name = "polkadot-ckb-merkle-mountain-range"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "221c71b432b38e494a0fdedb5f720e4cb974edf03a0af09e5b2238dbac7e6947"
+dependencies = [
+ "cfg-if",
+ "itertools 0.10.5",
+]
+
 [[package]]
 name = "polkadot-cli"
 version = "7.0.0"
@@ -26958,7 +26968,7 @@ dependencies = [
  "array-bytes",
  "log",
  "parity-scale-codec",
- "polkadot-ckb-merkle-mountain-range",
+ "polkadot-ckb-merkle-mountain-range 0.8.1",
  "scale-info",
  "serde",
  "sp-api 26.0.0",
@@ -26976,7 +26986,7 @@ checksum = "9a12dd76e368f1e48144a84b4735218b712f84b3f976970e2f25a29b30440e10"
 dependencies = [
  "log",
  "parity-scale-codec",
- "polkadot-ckb-merkle-mountain-range",
+ "polkadot-ckb-merkle-mountain-range 0.7.0",
  "scale-info",
  "serde",
  "sp-api 34.0.0",
diff --git a/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
index 317c9149ec6c57c8e1c9d5bc5deb5ee648175ada..54989c4f549c55a16abd14139334a2f2a513a7e6 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_beefy_mmr`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -48,14 +48,25 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_beefy_mmr`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 7_116_000 picoseconds.
-		Weight::from_parts(7_343_000, 0)
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -63,10 +74,10 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `234`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 5_652_000 picoseconds.
-		Weight::from_parts(5_963_000, 0)
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
 			.saturating_add(Weight::from_parts(0, 3505))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `226`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 11_953_000 picoseconds.
-		Weight::from_parts(15_978_891, 0)
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
 			.saturating_add(Weight::from_parts(0, 1517))
-			// Standard Error: 1_780
-			.saturating_add(Weight::from_parts(1_480_582, 0).saturating_mul(n.into()))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
diff --git a/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
index 5be207e3fcff484a789d209b3f3cdd7db7c49e15..8de9f6ab53e6a8f252ea13102aa7534e6fe4f179 100644
--- a/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_beefy_mmr`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -48,14 +48,25 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_beefy_mmr`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 628_000 picoseconds.
+		Weight::from_parts(1_200_102, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 63
+			.saturating_add(Weight::from_parts(1_110, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 7_850_000 picoseconds.
-		Weight::from_parts(8_169_000, 0)
+		// Minimum execution time: 9_862_000 picoseconds.
+		Weight::from_parts(10_329_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -63,10 +74,10 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `201`
+		//  Measured:  `221`
 		//  Estimated: `3505`
-		// Minimum execution time: 6_852_000 picoseconds.
-		Weight::from_parts(7_448_000, 0)
+		// Minimum execution time: 6_396_000 picoseconds.
+		Weight::from_parts(6_691_000, 0)
 			.saturating_add(Weight::from_parts(0, 3505))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `193`
+		//  Measured:  `213`
 		//  Estimated: `1517`
-		// Minimum execution time: 12_860_000 picoseconds.
-		Weight::from_parts(17_158_162, 0)
+		// Minimum execution time: 12_553_000 picoseconds.
+		Weight::from_parts(24_003_920, 0)
 			.saturating_add(Weight::from_parts(0, 1517))
-			// Standard Error: 1_732
-			.saturating_add(Weight::from_parts(1_489_410, 0).saturating_mul(n.into()))
+			// Standard Error: 2_023
+			.saturating_add(Weight::from_parts(1_390_986, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
diff --git a/prdoc/pr_6856.prdoc b/prdoc/pr_6856.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..480c3acea1952c1e7642cca93437e901c8b7359e
--- /dev/null
+++ b/prdoc/pr_6856.prdoc
@@ -0,0 +1,28 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Enable report_fork_voting()
+
+doc:
+  - audience:
+    - Runtime Dev
+    - Runtime User
+    description: |
+      This PR enables calling `report_fork_voting`.
+      In order to do this we needed to also check that the ancestry proof is optimal.
+
+crates:
+  - name: pallet-mmr
+    bump: minor
+  - name: sp-mmr-primitives
+    bump: minor
+  - name: sp-consensus-beefy
+    bump: minor
+  - name: rococo-runtime
+    bump: minor
+  - name: pallet-beefy
+    bump: minor
+  - name: pallet-beefy-mmr
+    bump: minor
+  - name: westend-runtime
+    bump: minor
diff --git a/substrate/frame/beefy-mmr/src/benchmarking.rs b/substrate/frame/beefy-mmr/src/benchmarking.rs
index fea6a2078f0f1b1fd50b4e29e9d14abcb2c5b5fa..4fddd1bccf115c351a9ffe52519f2828fdcad4e9 100644
--- a/substrate/frame/beefy-mmr/src/benchmarking.rs
+++ b/substrate/frame/beefy-mmr/src/benchmarking.rs
@@ -49,6 +49,24 @@ fn init_block<T: Config>(block_num: u32) {
 mod benchmarks {
 	use super::*;
 
+	/// Generate ancestry proofs with `n` leafs and benchmark the logic that checks
+	/// if the proof is optimal.
+	#[benchmark]
+	fn n_leafs_proof_is_optimal(n: Linear<2, 512>) {
+		pallet_mmr::UseLocalStorage::<T>::set(true);
+
+		for block_num in 1..=n {
+			init_block::<T>(block_num);
+		}
+		let proof = Mmr::<T>::generate_mock_ancestry_proof().unwrap();
+		assert_eq!(proof.leaf_count, n as u64);
+
+		#[block]
+		{
+			<BeefyMmr<T> as AncestryHelper<HeaderFor<T>>>::is_proof_optimal(&proof);
+		};
+	}
+
 	#[benchmark]
 	fn extract_validation_context() {
 		pallet_mmr::UseLocalStorage::<T>::set(true);
diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs
index ef99bc1e9cf11821718085804b50f25b80e5e8f0..c7fcdeff87999798fac8b2f4005725b853aa0e47 100644
--- a/substrate/frame/beefy-mmr/src/lib.rs
+++ b/substrate/frame/beefy-mmr/src/lib.rs
@@ -210,6 +210,18 @@ where
 			.ok()
 	}
 
+	fn is_proof_optimal(proof: &Self::Proof) -> bool {
+		let is_proof_optimal = pallet_mmr::Pallet::<T>::is_ancestry_proof_optimal(proof);
+
+		// We don't check the proof size when running benchmarks, since we use mock proofs
+		// which would cause the test to fail.
+		if cfg!(feature = "runtime-benchmarks") {
+			return true
+		}
+
+		is_proof_optimal
+	}
+
 	fn extract_validation_context(header: HeaderFor<T>) -> Option<Self::ValidationContext> {
 		// Check if the provided header is canonical.
 		let expected_hash = frame_system::Pallet::<T>::block_hash(header.number());
@@ -292,6 +304,10 @@ impl<T: Config> AncestryHelperWeightInfo<HeaderFor<T>> for Pallet<T>
 where
 	T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
 {
+	fn is_proof_optimal(proof: &<Self as AncestryHelper<HeaderFor<T>>>::Proof) -> Weight {
+		<T as Config>::WeightInfo::n_leafs_proof_is_optimal(proof.leaf_count.saturated_into())
+	}
+
 	fn extract_validation_context() -> Weight {
 		<T as Config>::WeightInfo::extract_validation_context()
 	}
diff --git a/substrate/frame/beefy-mmr/src/weights.rs b/substrate/frame/beefy-mmr/src/weights.rs
index dcfdb560ee94983ab95f8e74d1433e739403bf9e..5f7f7055311cdecda3f2917141cf1abbad98d7da 100644
--- a/substrate/frame/beefy-mmr/src/weights.rs
+++ b/substrate/frame/beefy-mmr/src/weights.rs
@@ -51,6 +51,7 @@ use core::marker::PhantomData;
 
 /// Weight functions needed for `pallet_beefy_mmr`.
 pub trait WeightInfo {
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight;
 	fn extract_validation_context() -> Weight;
 	fn read_peak() -> Weight;
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight;
@@ -59,25 +60,38 @@ pub trait WeightInfo {
 /// Weights for `pallet_beefy_mmr` using the Substrate node and recommended hardware.
 pub struct SubstrateWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 6_687_000 picoseconds.
-		Weight::from_parts(6_939_000, 3509)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
+			.saturating_add(Weight::from_parts(0, 3509))
+			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::Nodes` (r:1 w:0)
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `386`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 10_409_000 picoseconds.
-		Weight::from_parts(10_795_000, 3505)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
+			.saturating_add(Weight::from_parts(0, 3505))
+			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::RootHash` (r:1 w:0)
 	/// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
@@ -86,37 +100,51 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `378`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 15_459_000 picoseconds.
-		Weight::from_parts(21_963_366, 1517)
-			// Standard Error: 1_528
-			.saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into()))
-			.saturating_add(T::DbWeight::get().reads(2_u64))
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
+			.saturating_add(Weight::from_parts(0, 1517))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
+			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
 
 // For backwards compatibility and tests.
 impl WeightInfo for () {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 6_687_000 picoseconds.
-		Weight::from_parts(6_939_000, 3509)
-			.saturating_add(RocksDbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
+			.saturating_add(Weight::from_parts(0, 3509))
+			.saturating_add(RocksDbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::Nodes` (r:1 w:0)
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `386`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 10_409_000 picoseconds.
-		Weight::from_parts(10_795_000, 3505)
-			.saturating_add(RocksDbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
+			.saturating_add(Weight::from_parts(0, 3505))
+			.saturating_add(RocksDbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::RootHash` (r:1 w:0)
 	/// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
@@ -125,12 +153,13 @@ impl WeightInfo for () {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `378`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 15_459_000 picoseconds.
-		Weight::from_parts(21_963_366, 1517)
-			// Standard Error: 1_528
-			.saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into()))
-			.saturating_add(RocksDbWeight::get().reads(2_u64))
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
+			.saturating_add(Weight::from_parts(0, 1517))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
+			.saturating_add(RocksDbWeight::get().reads(2))
 	}
 }
diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs
index 3a49b9e169ce904d0185408fa8095d933ac730ad..294d64427ef8a62ba02e55ab62e9c0af9f2486d4 100644
--- a/substrate/frame/beefy/src/equivocation.rs
+++ b/substrate/frame/beefy/src/equivocation.rs
@@ -207,11 +207,17 @@ impl<T: Config> EquivocationEvidenceFor<T> {
 					return Err(Error::<T>::InvalidDoubleVotingProof);
 				}
 
-				return Ok(())
+				Ok(())
 			},
 			EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => {
 				let ForkVotingProof { vote, ancestry_proof, header } = equivocation_proof;
 
+				if !<T::AncestryHelper as AncestryHelper<HeaderFor<T>>>::is_proof_optimal(
+					&ancestry_proof,
+				) {
+					return Err(Error::<T>::InvalidForkVotingProof);
+				}
+
 				let maybe_validation_context = <T::AncestryHelper as AncestryHelper<
 					HeaderFor<T>,
 				>>::extract_validation_context(header);
diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs
index cf690a9df339dcd3e93ad3886a84ba3da2085f8c..e57fc0e21bc1eb99d594d3df9a24c1d33e0a82ff 100644
--- a/substrate/frame/beefy/src/lib.rs
+++ b/substrate/frame/beefy/src/lib.rs
@@ -755,7 +755,8 @@ pub(crate) trait WeightInfoExt: WeightInfo {
 		max_nominators_per_validator: u32,
 		ancestry_proof: &<T::AncestryHelper as AncestryHelper<HeaderFor<T>>>::Proof,
 	) -> Weight {
-		let _weight = <T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::extract_validation_context()
+		<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::is_proof_optimal(&ancestry_proof)
+			.saturating_add(<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::extract_validation_context())
 			.saturating_add(
 				<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::is_non_canonical(
 					ancestry_proof,
@@ -765,12 +766,7 @@ pub(crate) trait WeightInfoExt: WeightInfo {
 				1,
 				validator_count,
 				max_nominators_per_validator,
-			));
-
-		// TODO: https://github.com/paritytech/polkadot-sdk/issues/4523 - return `_weight` here.
-		// We return `Weight::MAX` currently in order to disallow this extrinsic for the moment.
-		// We need to check that the proof is optimal.
-		Weight::MAX
+			))
 	}
 
 	fn report_future_block_voting(
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 38e0cc4cfc26641063af61a33b93cd8e996a2a84..4b5f1d103b506f31ce4b47c2babb77fdee6c0100 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -99,6 +99,7 @@ pub struct MockAncestryProofContext {
 
 #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
 pub struct MockAncestryProof {
+	pub is_optimal: bool,
 	pub is_non_canonical: bool,
 }
 
@@ -128,6 +129,10 @@ impl<Header: HeaderT> AncestryHelper<Header> for MockAncestryHelper {
 		unimplemented!()
 	}
 
+	fn is_proof_optimal(proof: &Self::Proof) -> bool {
+		proof.is_optimal
+	}
+
 	fn extract_validation_context(_header: Header) -> Option<Self::ValidationContext> {
 		AncestryProofContext::get()
 	}
@@ -142,6 +147,10 @@ impl<Header: HeaderT> AncestryHelper<Header> for MockAncestryHelper {
 }
 
 impl<Header: HeaderT> AncestryHelperWeightInfo<Header> for MockAncestryHelper {
+	fn is_proof_optimal(_proof: &<Self as AncestryHelper<HeaderFor<Test>>>::Proof) -> Weight {
+		unimplemented!()
+	}
+
 	fn extract_validation_context() -> Weight {
 		unimplemented!()
 	}
diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs
index 89645d21f6baaa64c08a560668e1ef30f745b4e7..1bd0a72b25ecd72cd4477432e025b6bb5fce3870 100644
--- a/substrate/frame/beefy/src/tests.rs
+++ b/substrate/frame/beefy/src/tests.rs
@@ -799,7 +799,7 @@ fn report_fork_voting(
 	let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]);
 	let equivocation_proof = generate_fork_voting_proof(
 		(block_num, payload, set_id, &equivocation_keyring),
-		MockAncestryProof { is_non_canonical: true },
+		MockAncestryProof { is_optimal: true, is_non_canonical: true },
 		System::finalize(),
 	);
 
@@ -835,6 +835,54 @@ fn report_fork_voting_invalid_key_owner_proof() {
 	report_equivocation_invalid_key_owner_proof(report_fork_voting);
 }
 
+#[test]
+fn report_fork_voting_non_optimal_equivocation_proof() {
+	let authorities = test_authorities();
+
+	let mut ext = ExtBuilder::default().add_authorities(authorities).build();
+
+	let mut era = 1;
+	let (block_num, set_id, equivocation_keyring, key_owner_proof) = ext.execute_with(|| {
+		start_era(era);
+		let block_num = System::block_number();
+
+		let validator_set = Beefy::validator_set().unwrap();
+		let authorities = validator_set.validators();
+		let set_id = validator_set.id();
+
+		let equivocation_authority_index = 0;
+		let equivocation_key = &authorities[equivocation_authority_index];
+		let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap();
+
+		// generate a key ownership proof at set id in era 1
+		let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap();
+
+		era += 1;
+		start_era(era);
+		(block_num, set_id, equivocation_keyring, key_owner_proof)
+	});
+	ext.persist_offchain_overlay();
+
+	ext.execute_with(|| {
+		let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]);
+
+		// Simulate non optimal equivocation proof.
+		let equivocation_proof = generate_fork_voting_proof(
+			(block_num + 1, payload.clone(), set_id, &equivocation_keyring),
+			MockAncestryProof { is_optimal: false, is_non_canonical: true },
+			System::finalize(),
+		);
+		assert_err!(
+			Beefy::report_fork_voting_unsigned(
+				RuntimeOrigin::none(),
+				Box::new(equivocation_proof),
+				key_owner_proof.clone(),
+			),
+			Error::<Test>::InvalidForkVotingProof,
+		);
+	});
+}
+
 #[test]
 fn report_fork_voting_invalid_equivocation_proof() {
 	let authorities = test_authorities();
@@ -869,7 +917,7 @@ fn report_fork_voting_invalid_equivocation_proof() {
 		// vote signed with a key that isn't part of the authority set
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num, payload.clone(), set_id, &BeefyKeyring::Dave),
-			MockAncestryProof { is_non_canonical: true },
+			MockAncestryProof { is_optimal: true, is_non_canonical: true },
 			System::finalize(),
 		);
 		assert_err!(
@@ -884,7 +932,7 @@ fn report_fork_voting_invalid_equivocation_proof() {
 		// Simulate InvalidForkVotingProof error.
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num + 1, payload.clone(), set_id, &equivocation_keyring),
-			MockAncestryProof { is_non_canonical: false },
+			MockAncestryProof { is_optimal: true, is_non_canonical: false },
 			System::finalize(),
 		);
 		assert_err!(
@@ -945,7 +993,7 @@ fn report_fork_voting_invalid_context() {
 		// different payload than finalized
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num, payload, set_id, &equivocation_keyring),
-			MockAncestryProof { is_non_canonical: true },
+			MockAncestryProof { is_optimal: true, is_non_canonical: true },
 			System::finalize(),
 		);
 
diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs
index 76d6c2a1ac76f40678e9ace226b0cdefbcb6996a..cc64dfcb7de88aad56c9a941c5d0a01d04b4d0a1 100644
--- a/substrate/frame/merkle-mountain-range/src/lib.rs
+++ b/substrate/frame/merkle-mountain-range/src/lib.rs
@@ -445,6 +445,12 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		mmr.generate_mock_ancestry_proof()
 	}
 
+	pub fn is_ancestry_proof_optimal(
+		ancestry_proof: &primitives::AncestryProof<HashOf<T, I>>,
+	) -> bool {
+		mmr::is_ancestry_proof_optimal::<HashingOf<T, I>>(ancestry_proof)
+	}
+
 	pub fn verify_ancestry_proof(
 		root: HashOf<T, I>,
 		ancestry_proof: AncestryProof<HashOf<T, I>>,
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
index a9818ba471019762b4d8af42765b4da8171130e7..69a08a8b2d6af916efb8d92838da0da2926fcfe8 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
@@ -63,6 +63,18 @@ where
 		.map_err(|e| Error::Verify.log_debug(e))
 }
 
+pub fn is_ancestry_proof_optimal<H>(ancestry_proof: &AncestryProof<H::Output>) -> bool
+where
+	H: frame::traits::Hash,
+{
+	let prev_mmr_size = NodesUtils::new(ancestry_proof.prev_leaf_count).size();
+	let mmr_size = NodesUtils::new(ancestry_proof.leaf_count).size();
+
+	let expected_proof_size =
+		mmr_lib::ancestry_proof::expected_ancestry_proof_size(prev_mmr_size, mmr_size);
+	ancestry_proof.items.len() == expected_proof_size
+}
+
 pub fn verify_ancestry_proof<H, L>(
 	root: H::Output,
 	ancestry_proof: AncestryProof<H::Output>,
@@ -83,9 +95,9 @@ where
 	);
 
 	let raw_ancestry_proof = mmr_lib::AncestryProof::<Node<H, L>, Hasher<H, L>> {
+		prev_mmr_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1),
 		prev_peaks: ancestry_proof.prev_peaks.into_iter().map(|hash| Node::Hash(hash)).collect(),
-		prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1),
-		proof: prev_peaks_proof,
+		prev_peaks_proof,
 	};
 
 	let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::<Node<H, L>, Hasher<H, L>>(
@@ -248,7 +260,7 @@ where
 			prev_leaf_count,
 			leaf_count: self.leaves,
 			items: raw_ancestry_proof
-				.proof
+				.prev_peaks_proof
 				.proof_items()
 				.iter()
 				.map(|(index, item)| (*index, item.hash()))
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
index 85d00f8a65deec4db315cc5cb278de5b589aa0ff..d3232f23bce1cd3352775fb8019f910babdb8a02 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
@@ -18,7 +18,7 @@
 mod mmr;
 pub mod storage;
 
-pub use self::mmr::{verify_ancestry_proof, verify_leaves_proof, Mmr};
+pub use self::mmr::{is_ancestry_proof_optimal, verify_ancestry_proof, verify_leaves_proof, Mmr};
 use crate::primitives::{mmr_lib, DataOrHash, FullLeaf};
 use frame::traits;
 
diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs
index ae0c58e91aba4263e8161083a41dde035356f83a..03b08e51c32aea952ab21f040e92d3e22b44b160 100644
--- a/substrate/frame/merkle-mountain-range/src/tests.rs
+++ b/substrate/frame/merkle-mountain-range/src/tests.rs
@@ -811,6 +811,7 @@ fn generating_and_verifying_ancestry_proofs_works_correctly() {
 		for prev_block_number in 1usize..=500 {
 			let proof =
 				Pallet::<Test>::generate_ancestry_proof(prev_block_number as u64, None).unwrap();
+			assert!(Pallet::<Test>::is_ancestry_proof_optimal(&proof));
 			assert_eq!(
 				Pallet::<Test>::verify_ancestry_proof(root, proof),
 				Ok(prev_roots[prev_block_number - 1])
diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs
index e977fb0ea25f6a2bcdd1d7fadb0364332a1f710d..0f57cdfc810420176c9354fd2bb29662f2a198cc 100644
--- a/substrate/primitives/consensus/beefy/src/lib.rs
+++ b/substrate/primitives/consensus/beefy/src/lib.rs
@@ -449,6 +449,9 @@ pub trait AncestryHelper<Header: HeaderT> {
 		best_known_block_number: Option<Header::Number>,
 	) -> Option<Self::Proof>;
 
+	/// Check if the proof is optimal.
+	fn is_proof_optimal(proof: &Self::Proof) -> bool;
+
 	/// Extract the validation context from the provided header.
 	fn extract_validation_context(header: Header) -> Option<Self::ValidationContext>;
 
@@ -463,6 +466,9 @@ pub trait AncestryHelper<Header: HeaderT> {
 
 /// Weight information for the logic in `AncestryHelper`.
 pub trait AncestryHelperWeightInfo<Header: HeaderT>: AncestryHelper<Header> {
+	/// Weight info for the `AncestryHelper::is_proof_optimal()` method.
+	fn is_proof_optimal(proof: &<Self as AncestryHelper<Header>>::Proof) -> Weight;
+
 	/// Weight info for the `AncestryHelper::extract_validation_context()` method.
 	fn extract_validation_context() -> Weight;
 
diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml
index 5f861ca7acf1ce985aedb6199559f6608e00b8ab..0d8a67da7cad1dc9a2f356ffbca4beb553b36042 100644
--- a/substrate/primitives/merkle-mountain-range/Cargo.toml
+++ b/substrate/primitives/merkle-mountain-range/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 codec = { workspace = true }
 log = { workspace = true }
-mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false }
+mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.8.1", default-features = false }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = ["alloc", "derive"], optional = true, workspace = true }
 sp-api = { workspace = true }