diff --git a/Cargo.lock b/Cargo.lock
index 9c5a2c57da004903f27e05ee6b39ec627f117689..241a2fd3d158737fd56adb9a78c3c3c0051278df 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -544,7 +544,7 @@ dependencies = [
 [[package]]
 name = "ark-secret-scalar"
 version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
+source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7"
 dependencies = [
  "ark-ec",
  "ark-ff",
@@ -552,7 +552,7 @@ dependencies = [
  "ark-std",
  "ark-transcript",
  "digest 0.10.7",
- "rand_core 0.6.4",
+ "getrandom_or_panic",
  "zeroize",
 ]
 
@@ -593,7 +593,7 @@ dependencies = [
 [[package]]
 name = "ark-transcript"
 version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
+source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7"
 dependencies = [
  "ark-ff",
  "ark-serialize",
@@ -1225,7 +1225,7 @@ dependencies = [
 [[package]]
 name = "bandersnatch_vrfs"
 version = "0.0.4"
-source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
+source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7"
 dependencies = [
  "ark-bls12-381",
  "ark-ec",
@@ -2716,7 +2716,7 @@ dependencies = [
 [[package]]
 name = "common"
 version = "0.1.0"
-source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f"
+source = "git+https://github.com/w3f/ring-proof#61e7b528bc0170d6bf541be32440d569b784425d"
 dependencies = [
  "ark-ec",
  "ark-ff",
@@ -2724,6 +2724,7 @@ dependencies = [
  "ark-serialize",
  "ark-std",
  "fflonk",
+ "getrandom_or_panic",
  "merlin 3.0.0",
  "rand_chacha 0.3.1",
 ]
@@ -4525,7 +4526,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632"
 [[package]]
 name = "dleq_vrf"
 version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
+source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7"
 dependencies = [
  "ark-ec",
  "ark-ff",
@@ -4535,7 +4536,6 @@ dependencies = [
  "ark-std",
  "ark-transcript",
  "arrayvec 0.7.4",
- "rand_core 0.6.4",
  "zeroize",
 ]
 
@@ -4869,9 +4869,9 @@ dependencies = [
 
 [[package]]
 name = "env_logger"
-version = "0.10.0"
+version = "0.10.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
+checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece"
 dependencies = [
  "humantime",
  "is-terminal",
@@ -5132,7 +5132,7 @@ version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "84f2e425d9790201ba4af4630191feac6dcc98765b118d4d18e91d23c2353866"
 dependencies = [
- "env_logger 0.10.0",
+ "env_logger 0.10.1",
  "log",
 ]
 
@@ -5912,6 +5912,16 @@ dependencies = [
  "wasi 0.11.0+wasi-snapshot-preview1",
 ]
 
+[[package]]
+name = "getrandom_or_panic"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9"
+dependencies = [
+ "rand 0.8.5",
+ "rand_core 0.6.4",
+]
+
 [[package]]
 name = "ghash"
 version = "0.4.4"
@@ -10509,6 +10519,24 @@ dependencies = [
  "sp-std 8.0.0",
 ]
 
+[[package]]
+name = "pallet-sassafras"
+version = "0.3.5-dev"
+dependencies = [
+ "array-bytes 6.1.0",
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-consensus-sassafras",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std 8.0.0",
+]
+
 [[package]]
 name = "pallet-scheduler"
 version = "4.0.0-dev"
@@ -14164,7 +14192,7 @@ dependencies = [
 [[package]]
 name = "ring"
 version = "0.1.0"
-source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f"
+source = "git+https://github.com/w3f/ring-proof#61e7b528bc0170d6bf541be32440d569b784425d"
 dependencies = [
  "ark-ec",
  "ark-ff",
diff --git a/Cargo.toml b/Cargo.toml
index 0a7bf912e48a9f479ba79f9caa5a2da95666182d..5fb7c0f2315bccb2e8c295ce64dde0f1b1618762 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -346,6 +346,7 @@ members = [
 	"substrate/frame/root-testing",
 	"substrate/frame/safe-mode",
 	"substrate/frame/salary",
+	"substrate/frame/sassafras",
 	"substrate/frame/scheduler",
 	"substrate/frame/scored-pool",
 	"substrate/frame/session",
diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..7ab2e2e177025fbd2ae4d9c297439b6de6e81338
--- /dev/null
+++ b/substrate/frame/sassafras/Cargo.toml
@@ -0,0 +1,59 @@
+[package]
+name = "pallet-sassafras"
+version = "0.3.5-dev"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+license = "Apache-2.0"
+homepage = "https://substrate.io"
+repository = "https://github.com/paritytech/substrate/"
+description = "Consensus extension module for Sassafras consensus."
+readme = "README.md"
+publish = false
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
+scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
+frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true  }
+frame-support = { path = "../support", default-features = false  }
+frame-system = { path = "../system", default-features = false  }
+log = { version = "0.4.17", default-features = false }
+sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] }
+sp-io = { path = "../../primitives/io", default-features = false  }
+sp-runtime = { path = "../../primitives/runtime", default-features = false }
+sp-std = { path = "../../primitives/std", default-features = false }
+
+[dev-dependencies]
+array-bytes = "6.1"
+sp-core = { path = "../../primitives/core" }
+
+[features]
+default = [ "std" ]
+std = [
+	"frame-benchmarking?/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-codec/std",
+	"scale-info/std",
+	"sp-consensus-sassafras/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
+# Construct dummy ring context on genesis.
+# Mostly used for testing and development.
+construct-dummy-ring-context = []
diff --git a/substrate/frame/sassafras/README.md b/substrate/frame/sassafras/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0e24a0535578d133d5bbd347ae15ba29379e560
--- /dev/null
+++ b/substrate/frame/sassafras/README.md
@@ -0,0 +1,8 @@
+Runtime module for SASSAFRAS consensus.
+
+- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41
+- Protocol RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26
+
+# ⚠️ WARNING ⚠️
+
+The crate interfaces and structures are experimental and may be subject to changes.
diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs
new file mode 100644
index 0000000000000000000000000000000000000000..95a2b4bbce4e5c1ae839db7f344e63aeb7a44ad1
--- /dev/null
+++ b/substrate/frame/sassafras/src/benchmarking.rs
@@ -0,0 +1,272 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Benchmarks for the Sassafras pallet.
+
+use crate::*;
+use sp_consensus_sassafras::{vrf::VrfSignature, EphemeralPublic, EpochConfiguration};
+
+use frame_benchmarking::v2::*;
+use frame_support::traits::Hooks;
+use frame_system::RawOrigin;
+
+const LOG_TARGET: &str = "sassafras::benchmark";
+
+const TICKETS_DATA: &[u8] = include_bytes!("data/25_tickets_100_auths.bin");
+
+fn make_dummy_vrf_signature() -> VrfSignature {
+	// This leverages our knowledge about serialized vrf signature structure.
+	// Mostly to avoid to import all the bandersnatch primitive just for this test.
+	let buf = [
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xb5, 0x5f, 0x8e, 0xc7, 0x68, 0xf5, 0x05, 0x3f, 0xa9,
+		0x18, 0xca, 0x07, 0x13, 0xc7, 0x4b, 0xa3, 0x9a, 0x97, 0xd3, 0x76, 0x8f, 0x0c, 0xbf, 0x2e,
+		0xd4, 0xf9, 0x3a, 0xae, 0xc1, 0x96, 0x2a, 0x64, 0x80,
+	];
+	VrfSignature::decode(&mut &buf[..]).unwrap()
+}
+
+#[benchmarks]
+mod benchmarks {
+	use super::*;
+
+	// For first block (#1) we do some extra operation.
+	// But is a one shot operation, so we don't account for it here.
+	// We use 0, as it will be the path used by all the blocks with n != 1
+	#[benchmark]
+	fn on_initialize() {
+		let block_num = BlockNumberFor::<T>::from(0u32);
+
+		let slot_claim = SlotClaim {
+			authority_idx: 0,
+			slot: Default::default(),
+			vrf_signature: make_dummy_vrf_signature(),
+			ticket_claim: None,
+		};
+		frame_system::Pallet::<T>::deposit_log((&slot_claim).into());
+
+		// We currently don't account for the potential weight added by the `on_finalize`
+		// incremental sorting of the tickets.
+
+		#[block]
+		{
+			// According to `Hooks` trait docs, `on_finalize` `Weight` should be bundled
+			// together with `on_initialize` `Weight`.
+			Pallet::<T>::on_initialize(block_num);
+			Pallet::<T>::on_finalize(block_num)
+		}
+	}
+
+	// Weight for the default internal epoch change trigger.
+	//
+	// Parameters:
+	// - `x`: number of authorities (1:100).
+	// - `y`: epoch length in slots (1000:5000)
+	//
+	// This accounts for the worst case which includes:
+	// - load the full ring context.
+	// - recompute the ring verifier.
+	// - sorting the epoch tickets in one shot
+	//  (here we account for the very unluky scenario where we haven't done any sort work yet)
+	// - pending epoch change config.
+	//
+	// For this bench we assume a redundancy factor of 2 (suggested value to be used in prod).
+	#[benchmark]
+	fn enact_epoch_change(x: Linear<1, 100>, y: Linear<1000, 5000>) {
+		let authorities_count = x as usize;
+		let epoch_length = y as u32;
+		let redundancy_factor = 2;
+
+		let unsorted_tickets_count = epoch_length * redundancy_factor;
+
+		let mut meta = TicketsMetadata { unsorted_tickets_count, tickets_count: [0, 0] };
+		let config = EpochConfiguration { redundancy_factor, attempts_number: 32 };
+
+		// Triggers ring verifier computation for `x` authorities
+		let mut raw_data = TICKETS_DATA;
+		let (authorities, _): (Vec<AuthorityId>, Vec<TicketEnvelope>) =
+			Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer");
+		let next_authorities: Vec<_> = authorities[..authorities_count].to_vec();
+		let next_authorities = WeakBoundedVec::force_from(next_authorities, None);
+		NextAuthorities::<T>::set(next_authorities);
+
+		// Triggers JIT sorting tickets
+		(0..meta.unsorted_tickets_count)
+			.collect::<Vec<_>>()
+			.chunks(SEGMENT_MAX_SIZE as usize)
+			.enumerate()
+			.for_each(|(segment_id, chunk)| {
+				let segment = chunk
+					.iter()
+					.map(|i| {
+						let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes());
+						TicketId::from_le_bytes(id_bytes)
+					})
+					.collect::<Vec<_>>();
+				UnsortedSegments::<T>::insert(
+					segment_id as u32,
+					BoundedVec::truncate_from(segment),
+				);
+			});
+
+		// Triggers some code related to config change (dummy values)
+		NextEpochConfig::<T>::set(Some(config));
+		PendingEpochConfigChange::<T>::set(Some(config));
+
+		// Triggers the cleanup of the "just elapsed" epoch tickets (i.e. the current one)
+		let epoch_tag = EpochIndex::<T>::get() & 1;
+		meta.tickets_count[epoch_tag as usize] = epoch_length;
+		(0..epoch_length).for_each(|i| {
+			let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes());
+			let id = TicketId::from_le_bytes(id_bytes);
+			TicketsIds::<T>::insert((epoch_tag as u8, i), id);
+			let body = TicketBody {
+				attempt_idx: i,
+				erased_public: EphemeralPublic([i as u8; 32]),
+				revealed_public: EphemeralPublic([i as u8; 32]),
+			};
+			TicketsData::<T>::set(id, Some(body));
+		});
+
+		TicketsMeta::<T>::set(meta);
+
+		#[block]
+		{
+			Pallet::<T>::should_end_epoch(BlockNumberFor::<T>::from(3u32));
+			let next_authorities = Pallet::<T>::next_authorities();
+			// Using a different set of authorities triggers the recomputation of ring verifier.
+			Pallet::<T>::enact_epoch_change(Default::default(), next_authorities);
+		}
+	}
+
+	#[benchmark]
+	fn submit_tickets(x: Linear<1, 25>) {
+		let tickets_count = x as usize;
+
+		let mut raw_data = TICKETS_DATA;
+		let (authorities, tickets): (Vec<AuthorityId>, Vec<TicketEnvelope>) =
+			Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer");
+
+		log::debug!(target: LOG_TARGET, "PreBuiltTickets: {} tickets, {} authorities", tickets.len(), authorities.len());
+
+		// Set `NextRandomness` to the same value used for pre-built tickets
+		// (see `make_tickets_data` test).
+		NextRandomness::<T>::set([0; 32]);
+
+		Pallet::<T>::update_ring_verifier(&authorities);
+
+		// Set next epoch config to accept all the tickets
+		let next_config = EpochConfiguration { attempts_number: 1, redundancy_factor: u32::MAX };
+		NextEpochConfig::<T>::set(Some(next_config));
+
+		// Use the authorities in the pre-build tickets
+		let authorities = WeakBoundedVec::force_from(authorities, None);
+		NextAuthorities::<T>::set(authorities);
+
+		let tickets = tickets[..tickets_count].to_vec();
+		let tickets = BoundedVec::truncate_from(tickets);
+
+		log::debug!(target: LOG_TARGET, "Submitting {} tickets", tickets_count);
+
+		#[extrinsic_call]
+		submit_tickets(RawOrigin::None, tickets);
+	}
+
+	#[benchmark]
+	fn plan_config_change() {
+		let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 10 };
+
+		#[extrinsic_call]
+		plan_config_change(RawOrigin::Root, config);
+	}
+
+	// Construction of ring verifier
+	#[benchmark]
+	fn update_ring_verifier(x: Linear<1, 100>) {
+		let authorities_count = x as usize;
+
+		let mut raw_data = TICKETS_DATA;
+		let (authorities, _): (Vec<AuthorityId>, Vec<TicketEnvelope>) =
+			Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer");
+		let authorities: Vec<_> = authorities[..authorities_count].to_vec();
+
+		#[block]
+		{
+			Pallet::<T>::update_ring_verifier(&authorities);
+		}
+	}
+
+	// Bare loading of ring context.
+	//
+	// It is interesting to see how this compares to 'update_ring_verifier', which
+	// also recomputes and stores the new verifier.
+	#[benchmark]
+	fn load_ring_context() {
+		#[block]
+		{
+			let _ring_ctx = RingContext::<T>::get().unwrap();
+		}
+	}
+
+	// Tickets segments sorting function benchmark.
+	#[benchmark]
+	fn sort_segments(x: Linear<1, 100>) {
+		let segments_count = x as u32;
+		let tickets_count = segments_count * SEGMENT_MAX_SIZE;
+
+		// Construct a bunch of dummy tickets
+		let tickets: Vec<_> = (0..tickets_count)
+			.map(|i| {
+				let body = TicketBody {
+					attempt_idx: i,
+					erased_public: EphemeralPublic([i as u8; 32]),
+					revealed_public: EphemeralPublic([i as u8; 32]),
+				};
+				let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes());
+				let id = TicketId::from_le_bytes(id_bytes);
+				(id, body)
+			})
+			.collect();
+
+		for (chunk_id, chunk) in tickets.chunks(SEGMENT_MAX_SIZE as usize).enumerate() {
+			let segment: Vec<TicketId> = chunk
+				.iter()
+				.map(|(id, body)| {
+					TicketsData::<T>::set(id, Some(body.clone()));
+					*id
+				})
+				.collect();
+			let segment = BoundedVec::truncate_from(segment);
+			UnsortedSegments::<T>::insert(chunk_id as u32, segment);
+		}
+
+		// Update metadata
+		let mut meta = TicketsMeta::<T>::get();
+		meta.unsorted_tickets_count = tickets_count;
+		TicketsMeta::<T>::set(meta.clone());
+
+		log::debug!(target: LOG_TARGET, "Before sort: {:?}", meta);
+		#[block]
+		{
+			Pallet::<T>::sort_segments(u32::MAX, 0, &mut meta);
+		}
+		log::debug!(target: LOG_TARGET, "After sort: {:?}", meta);
+	}
+}
diff --git a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6e81f216455ae9dc61be31a9edef583a652721a8
Binary files /dev/null and b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin differ
diff --git a/substrate/frame/sassafras/src/data/benchmark-results.md b/substrate/frame/sassafras/src/data/benchmark-results.md
new file mode 100644
index 0000000000000000000000000000000000000000..8682f96cbe5a67328b6d494005cf03fff2030178
--- /dev/null
+++ b/substrate/frame/sassafras/src/data/benchmark-results.md
@@ -0,0 +1,99 @@
+# Benchmarks High Level Results
+
+- **Ring size**: the actual number of validators for an epoch
+- **Domain size**: a value which bounds the max size of the ring (max_ring_size = domain_size - 256)
+
+## Verify Submitted Tickets (extrinsic)
+
+`x` = Number of tickets
+
+### Domain=1024, Uncompressed (~ 13 ms + 11·x ms)
+
+    Time ~=    13400
+        + x    11390
+                  µs
+
+### Domain=1024, Compressed (~ 13 ms + 11·x ms)
+
+    Time ~=    13120
+        + x    11370
+                  µs
+
+### Domain=2048, Uncompressed (~ 26 ms + 11·x ms)
+
+    Time ~=    26210
+        + x    11440
+                  µs
+
+### Domain=2048, Compressed (~ 26 ms + 11·x ms)
+
+    Time ~=    26250
+        + x    11460
+                  µs
+
+### Conclusions
+
+- Verification doesn't depend on ring size as verification key is already constructed.
+- The call is fast as far as the max number of tickets which can be submitted in one shot
+  is appropriately bounded.
+- Currently, the bound is set equal epoch length, which iirc for Polkadot is 3600.
+  In this case if all the tickets are submitted in one shot timing is expected to be
+  ~39 seconds, which is not acceptable. TODO: find a sensible bound
+
+---
+
+## Recompute Ring Verifier Key (on epoch change)
+
+`x` = Ring size
+
+### Domain=1024, Uncompressed (~ 50 ms)
+
+    Time ~=    54070
+        + x    98.53
+                  µs
+
+### Domain=1024, Compressed (~ 700 ms)
+
+    Time ~=   733700
+        + x    90.49
+                  µs
+
+### Domain=2048, Uncompressed (~ 100 ms)
+
+    Time ~=    107700
+        + x    108.5
+                  µs
+
+### Domain=2048, Compressed (~ 1.5 s)
+
+    Time ~=   1462400
+        + x    65.14
+                  µs
+
+### Conclusions
+
+- Here we load the full ring context data to recompute verification key for the epoch
+- Ring size influence is marginal (e.g. for 1500 validators → ~98 ms to be added to the base time)
+- This step is performed at most once per epoch (if validator set changes).
+- Domain size for ring context influence the PoV size (see next paragraph)
+- Decompression heavily influence timings (1.5sec vs 100ms for same domain size)
+
+---
+
+## Ring Context Data Size
+
+### Domain=1024, Uncompressed
+
+    295412 bytes = ~ 300 KiB
+
+### Domain=1024, Compressed
+
+    147716 bytes = ~ 150 KiB
+
+### Domain=2048, Uncompressed
+
+    590324 bytes = ~ 590 KiB
+
+### Domain=2048, Compressed
+
+    295172 bytes = ~ 300 KiB
diff --git a/substrate/frame/sassafras/src/data/tickets-sort.md b/substrate/frame/sassafras/src/data/tickets-sort.md
new file mode 100644
index 0000000000000000000000000000000000000000..4d96a6825c889b152bbf0471c006e0d85dbed635
--- /dev/null
+++ b/substrate/frame/sassafras/src/data/tickets-sort.md
@@ -0,0 +1,274 @@
+# Segments Incremental Sorting Strategy Empirical Results
+
+Parameters:
+- 128 segments
+- segment max length 128
+- 32767 random tickets ids
+- epoch length 3600 (== max tickets to keep)
+
+The table shows the comparison between the segments left in the unsorted segments buffer
+and the number of new tickets which are added from the last segment to the sorted tickets
+buffer (i.e. how many tickets we retain from the last processed segment)
+
+| Segments Left | Tickets Pushed |
+|-----|-----|
+| 255 | 128 |
+| 254 | 128 |
+| 253 | 128 |
+| 252 | 128 |
+| 251 | 128 |
+| 250 | 128 |
+| 249 | 128 |
+| 248 | 128 |
+| 247 | 128 |
+| 246 | 128 |
+| 245 | 128 |
+| 244 | 128 |
+| 243 | 128 |
+| 242 | 128 |
+| 241 | 128 |
+| 240 | 128 |
+| 239 | 128 |
+| 238 | 128 |
+| 237 | 128 |
+| 236 | 128 |
+| 235 | 128 |
+| 234 | 128 |
+| 233 | 128 |
+| 232 | 128 |
+| 231 | 128 |
+| 230 | 128 |
+| 229 | 128 |
+| 228 | 128 |
+| 227 | 128 |
+| 226 | 126 |
+| 225 | 117 |
+| 224 | 120 |
+| 223 | 110 |
+| 222 | 110 |
+| 221 | 102 |
+| 220 | 107 |
+| 219 | 96 |
+| 218 | 105 |
+| 217 | 92 |
+| 216 | 91 |
+| 215 | 85 |
+| 214 | 84 |
+| 213 | 88 |
+| 212 | 77 |
+| 211 | 86 |
+| 210 | 73 |
+| 209 | 73 |
+| 208 | 81 |
+| 207 | 83 |
+| 206 | 70 |
+| 205 | 84 |
+| 204 | 71 |
+| 203 | 63 |
+| 202 | 60 |
+| 201 | 53 |
+| 200 | 73 |
+| 199 | 55 |
+| 198 | 65 |
+| 197 | 62 |
+| 196 | 55 |
+| 195 | 63 |
+| 194 | 61 |
+| 193 | 48 |
+| 192 | 67 |
+| 191 | 61 |
+| 190 | 55 |
+| 189 | 49 |
+| 188 | 60 |
+| 187 | 49 |
+| 186 | 51 |
+| 185 | 53 |
+| 184 | 47 |
+| 183 | 51 |
+| 182 | 51 |
+| 181 | 53 |
+| 180 | 42 |
+| 179 | 43 |
+| 178 | 48 |
+| 177 | 46 |
+| 176 | 39 |
+| 175 | 54 |
+| 174 | 39 |
+| 173 | 44 |
+| 172 | 51 |
+| 171 | 49 |
+| 170 | 48 |
+| 169 | 48 |
+| 168 | 41 |
+| 167 | 39 |
+| 166 | 41 |
+| 165 | 40 |
+| 164 | 43 |
+| 163 | 53 |
+| 162 | 51 |
+| 161 | 36 |
+| 160 | 45 |
+| 159 | 40 |
+| 158 | 29 |
+| 157 | 37 |
+| 156 | 31 |
+| 155 | 38 |
+| 154 | 31 |
+| 153 | 38 |
+| 152 | 39 |
+| 151 | 30 |
+| 150 | 37 |
+| 149 | 42 |
+| 148 | 35 |
+| 147 | 33 |
+| 146 | 35 |
+| 145 | 37 |
+| 144 | 38 |
+| 143 | 31 |
+| 142 | 38 |
+| 141 | 38 |
+| 140 | 27 |
+| 139 | 31 |
+| 138 | 25 |
+| 137 | 31 |
+| 136 | 26 |
+| 135 | 30 |
+| 134 | 31 |
+| 133 | 37 |
+| 132 | 29 |
+| 131 | 24 |
+| 130 | 31 |
+| 129 | 34 |
+| 128 | 31 |
+| 127 | 28 |
+| 126 | 28 |
+| 125 | 19 |
+| 124 | 27 |
+| 123 | 29 |
+| 122 | 36 |
+| 121 | 32 |
+| 120 | 29 |
+| 119 | 28 |
+| 118 | 33 |
+| 117 | 18 |
+| 116 | 28 |
+| 115 | 27 |
+| 114 | 28 |
+| 113 | 21 |
+| 112 | 23 |
+| 111 | 19 |
+| 110 | 21 |
+| 109 | 20 |
+| 108 | 26 |
+| 107 | 23 |
+| 106 | 30 |
+| 105 | 31 |
+| 104 | 19 |
+| 103 | 25 |
+| 102 | 23 |
+| 101 | 29 |
+| 100 | 18 |
+| 99 | 19 |
+| 98 | 20 |
+| 97 | 21 |
+| 96 | 23 |
+| 95 | 20 |
+| 94 | 27 |
+| 93 | 20 |
+| 92 | 22 |
+| 91 | 23 |
+| 90 | 23 |
+| 89 | 20 |
+| 88 | 15 |
+| 87 | 17 |
+| 86 | 28 |
+| 85 | 25 |
+| 84 | 10 |
+| 83 | 20 |
+| 82 | 23 |
+| 81 | 28 |
+| 80 | 17 |
+| 79 | 23 |
+| 78 | 24 |
+| 77 | 22 |
+| 76 | 18 |
+| 75 | 25 |
+| 74 | 31 |
+| 73 | 27 |
+| 72 | 19 |
+| 71 | 13 |
+| 70 | 17 |
+| 69 | 24 |
+| 68 | 20 |
+| 67 | 12 |
+| 66 | 17 |
+| 65 | 16 |
+| 64 | 26 |
+| 63 | 24 |
+| 62 | 12 |
+| 61 | 19 |
+| 60 | 18 |
+| 59 | 20 |
+| 58 | 18 |
+| 57 | 12 |
+| 56 | 15 |
+| 55 | 17 |
+| 54 | 14 |
+| 53 | 25 |
+| 52 | 22 |
+| 51 | 15 |
+| 50 | 17 |
+| 49 | 15 |
+| 48 | 17 |
+| 47 | 18 |
+| 46 | 17 |
+| 45 | 23 |
+| 44 | 17 |
+| 43 | 13 |
+| 42 | 15 |
+| 41 | 18 |
+| 40 | 11 |
+| 39 | 19 |
+| 38 | 18 |
+| 37 | 12 |
+| 36 | 19 |
+| 35 | 18 |
+| 34 | 15 |
+| 33 | 12 |
+| 32 | 25 |
+| 31 | 20 |
+| 30 | 24 |
+| 29 | 20 |
+| 28 | 10 |
+| 27 | 15 |
+| 26 | 16 |
+| 25 | 15 |
+| 24 | 15 |
+| 23 | 13 |
+| 22 | 12 |
+| 21 | 14 |
+| 20 | 19 |
+| 19 | 17 |
+| 18 | 17 |
+| 17 | 18 |
+| 16 | 15 |
+| 15 | 13 |
+| 14 | 11 |
+| 13 | 16 |
+| 12 | 13 |
+| 11 | 18 |
+| 10 | 19 |
+| 9 | 10 |
+| 8 | 7 |
+| 7 | 15 |
+| 6 | 12 |
+| 5 | 12 |
+| 4 | 17 |
+| 3 | 14 |
+| 2 | 17 |
+| 1 | 9 |
+| 0 | 13
+
+# Graph of the same data
+
+![graph](tickets-sort.png)
diff --git a/substrate/frame/sassafras/src/data/tickets-sort.png b/substrate/frame/sassafras/src/data/tickets-sort.png
new file mode 100644
index 0000000000000000000000000000000000000000..b34ce3f37ba9d39aa649cc6d5a216373048c0064
Binary files /dev/null and b/substrate/frame/sassafras/src/data/tickets-sort.png differ
diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b6f405f56549428dfe12684e36013c2489ffd790
--- /dev/null
+++ b/substrate/frame/sassafras/src/lib.rs
@@ -0,0 +1,1081 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Extension module for Sassafras consensus.
+//!
+//! [Sassafras](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS)
+//! is a constant-time block production protocol that aims to ensure that there is
+//! exactly one block produced with constant time intervals rather than multiple or none.
+//!
+//! We run a lottery to distribute block production slots in an epoch and to fix the
+//! order validators produce blocks in, by the beginning of an epoch.
+//!
+//! Each validator signs the same VRF input and publishes the output on-chain. This
+//! value is their lottery ticket that can be validated against their public key.
+//!
+//! We want to keep lottery winners secret, i.e. do not publish their public keys.
+//! At the beginning of the epoch all the validators tickets are published but not
+//! their public keys.
+//!
+//! A valid tickets is validated when an honest validator reclaims it on block
+//! production.
+//!
+//! To prevent submission of fake tickets, resulting in empty slots, the validator
+//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's
+//! my VRF output that has been generated using the given VRF input and my secret
+//! key. I'm not telling you my keys, but my public key is among those of the
+//! nominated validators", that is validated before the lottery.
+//!
+//! To anonymously publish the ticket to the chain a validator sends their tickets
+//! to a random validator who later puts it on-chain as a transaction.
+
+#![deny(warnings)]
+#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use log::{debug, error, trace, warn};
+use scale_codec::{Decode, Encode, MaxEncodedLen};
+use scale_info::TypeInfo;
+
+use frame_support::{
+	dispatch::{DispatchResultWithPostInfo, Pays},
+	traits::{Defensive, Get},
+	weights::Weight,
+	BoundedVec, WeakBoundedVec,
+};
+use frame_system::{
+	offchain::{SendTransactionTypes, SubmitTransaction},
+	pallet_prelude::BlockNumberFor,
+};
+use sp_consensus_sassafras::{
+	digests::{ConsensusLog, NextEpochDescriptor, SlotClaim},
+	vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope,
+	TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID,
+};
+use sp_io::hashing;
+use sp_runtime::{
+	generic::DigestItem,
+	traits::{One, Zero},
+	BoundToRuntimeAppPublic,
+};
+use sp_std::prelude::Vec;
+
+#[cfg(feature = "runtime-benchmarks")]
+mod benchmarking;
+#[cfg(all(feature = "std", test))]
+mod mock;
+#[cfg(all(feature = "std", test))]
+mod tests;
+
+pub mod weights;
+pub use weights::WeightInfo;
+
+pub use pallet::*;
+
+const LOG_TARGET: &str = "sassafras::runtime";
+
+// Contextual string used by the VRF to generate per-block randomness.
+const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasOnChainRandomness";
+
+// Max length for segments holding unsorted tickets.
+const SEGMENT_MAX_SIZE: u32 = 128;
+
+/// Authorities bounded vector convenience type.
+pub type AuthoritiesVec<T> = WeakBoundedVec<AuthorityId, <T as Config>::MaxAuthorities>;
+
+/// Epoch length defined by the configuration.
+pub type EpochLengthFor<T> = <T as Config>::EpochLength;
+
+/// Tickets metadata.
+#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)]
+pub struct TicketsMetadata {
+	/// Number of outstanding next epoch tickets requiring to be sorted.
+	///
+	/// These tickets are held by the [`UnsortedSegments`] storage map in segments
+	/// containing at most `SEGMENT_MAX_SIZE` items.
+	pub unsorted_tickets_count: u32,
+
+	/// Number of tickets available for current and next epoch.
+	///
+	/// These tickets are held by the [`TicketsIds`] storage map.
+	///
+	/// The array entry to be used for the current epoch is computed as epoch index modulo 2.
+	pub tickets_count: [u32; 2],
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	/// The Sassafras pallet.
+	#[pallet::pallet]
+	pub struct Pallet<T>(_);
+
+	/// Configuration parameters.
+	#[pallet::config]
+	pub trait Config: frame_system::Config + SendTransactionTypes<Call<Self>> {
+		/// Amount of slots that each epoch should last.
+		#[pallet::constant]
+		type EpochLength: Get<u32>;
+
+		/// Max number of authorities allowed.
+		#[pallet::constant]
+		type MaxAuthorities: Get<u32>;
+
+		/// Epoch change trigger.
+		///
+		/// Logic to be triggered on every block to query for whether an epoch has ended
+		/// and to perform the transition to the next epoch.
+		type EpochChangeTrigger: EpochChangeTrigger;
+
+		/// Weight information for all calls of this pallet.
+		type WeightInfo: WeightInfo;
+	}
+
+	/// Sassafras runtime errors.
+	#[pallet::error]
+	pub enum Error<T> {
+		/// Submitted configuration is invalid.
+		InvalidConfiguration,
+	}
+
+	/// Current epoch index.
+	#[pallet::storage]
+	#[pallet::getter(fn epoch_index)]
+	pub type EpochIndex<T> = StorageValue<_, u64, ValueQuery>;
+
+	/// Current epoch authorities.
+	#[pallet::storage]
+	#[pallet::getter(fn authorities)]
+	pub type Authorities<T: Config> = StorageValue<_, AuthoritiesVec<T>, ValueQuery>;
+
+	/// Next epoch authorities.
+	#[pallet::storage]
+	#[pallet::getter(fn next_authorities)]
+	pub type NextAuthorities<T: Config> = StorageValue<_, AuthoritiesVec<T>, ValueQuery>;
+
+	/// First block slot number.
+	///
+	/// As the slots may not be zero-based, we record the slot value for the fist block.
+	/// This allows to always compute relative indices for epochs and slots.
+	#[pallet::storage]
+	#[pallet::getter(fn genesis_slot)]
+	pub type GenesisSlot<T> = StorageValue<_, Slot, ValueQuery>;
+
+	/// Current block slot number.
+	#[pallet::storage]
+	#[pallet::getter(fn current_slot)]
+	pub type CurrentSlot<T> = StorageValue<_, Slot, ValueQuery>;
+
+	/// Current epoch randomness.
+	#[pallet::storage]
+	#[pallet::getter(fn randomness)]
+	pub type CurrentRandomness<T> = StorageValue<_, Randomness, ValueQuery>;
+
+	/// Next epoch randomness.
+	#[pallet::storage]
+	#[pallet::getter(fn next_randomness)]
+	pub type NextRandomness<T> = StorageValue<_, Randomness, ValueQuery>;
+
+	/// Randomness accumulator.
+	///
+	/// Excluded the first imported block, its value is updated on block finalization.
+	#[pallet::storage]
+	#[pallet::getter(fn randomness_accumulator)]
+	pub(crate) type RandomnessAccumulator<T> = StorageValue<_, Randomness, ValueQuery>;
+
+	/// The configuration for the current epoch.
+	#[pallet::storage]
+	#[pallet::getter(fn config)]
+	pub type EpochConfig<T> = StorageValue<_, EpochConfiguration, ValueQuery>;
+
+	/// The configuration for the next epoch.
+	#[pallet::storage]
+	#[pallet::getter(fn next_config)]
+	pub type NextEpochConfig<T> = StorageValue<_, EpochConfiguration>;
+
+	/// Pending epoch configuration change that will be set as `NextEpochConfig` when the next
+	/// epoch is enacted.
+	///
+	/// In other words, a configuration change submitted during epoch N will be enacted on epoch
+	/// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where
+	/// computed using configuration parameters stored for epoch N+1.
+	#[pallet::storage]
+	pub type PendingEpochConfigChange<T> = StorageValue<_, EpochConfiguration>;
+
+	/// Stored tickets metadata.
+	#[pallet::storage]
+	pub type TicketsMeta<T> = StorageValue<_, TicketsMetadata, ValueQuery>;
+
+	/// Tickets identifiers map.
+	///
+	/// The map holds tickets ids for the current and next epoch.
+	///
+	/// The key is a tuple composed by:
+	/// - `u8` equal to epoch's index modulo 2;
+	/// - `u32` equal to the ticket's index in a sorted list of epoch's tickets.
+	///
+	/// Epoch X first N-th ticket has key (X mod 2, N)
+	///
+	/// Note that the ticket's index doesn't directly correspond to the slot index within the epoch.
+	/// The assigment is computed dynamically using an *outside-in* strategy.
+	///
+	/// Be aware that entries within this map are never removed, only overwritten.
+	/// Last element index should be fetched from the [`TicketsMeta`] value.
+	#[pallet::storage]
+	pub type TicketsIds<T> = StorageMap<_, Identity, (u8, u32), TicketId>;
+
+	/// Tickets to be used for current and next epoch.
+	#[pallet::storage]
+	pub type TicketsData<T> = StorageMap<_, Identity, TicketId, TicketBody>;
+
+	/// Next epoch tickets unsorted segments.
+	///
+	/// Contains lists of tickets where each list represents a batch of tickets
+	/// received via the `submit_tickets` extrinsic.
+	///
+	/// Each segment has max length [`SEGMENT_MAX_SIZE`].
+	#[pallet::storage]
+	pub type UnsortedSegments<T: Config> =
+		StorageMap<_, Identity, u32, BoundedVec<TicketId, ConstU32<SEGMENT_MAX_SIZE>>, ValueQuery>;
+
+	/// The most recently set of tickets which are candidates to become the next
+	/// epoch tickets.
+	#[pallet::storage]
+	pub type SortedCandidates<T> =
+		StorageValue<_, BoundedVec<TicketId, EpochLengthFor<T>>, ValueQuery>;
+
+	/// Parameters used to construct the epoch's ring verifier.
+	///
+	/// In practice: Updatable Universal Reference String and the seed.
+	#[pallet::storage]
+	#[pallet::getter(fn ring_context)]
+	pub type RingContext<T: Config> = StorageValue<_, vrf::RingContext>;
+
+	/// Ring verifier data for the current epoch.
+	#[pallet::storage]
+	pub type RingVerifierData<T: Config> = StorageValue<_, vrf::RingVerifierData>;
+
+	/// Slot claim vrf-preoutput used to generate per-slot randomness.
+	///
+	/// The value is ephemeral and is cleared on block finalization.
+	#[pallet::storage]
+	pub(crate) type ClaimTemporaryData<T> = StorageValue<_, vrf::VrfOutput>;
+
+	/// Genesis configuration for Sassafras protocol.
+	#[pallet::genesis_config]
+	#[derive(frame_support::DefaultNoBound)]
+	pub struct GenesisConfig<T: Config> {
+		/// Genesis authorities.
+		pub authorities: Vec<AuthorityId>,
+		/// Genesis epoch configuration.
+		pub epoch_config: EpochConfiguration,
+		/// Phantom config
+		#[serde(skip)]
+		pub _phantom: sp_std::marker::PhantomData<T>,
+	}
+
+	#[pallet::genesis_build]
+	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
+		fn build(&self) {
+			EpochConfig::<T>::put(self.epoch_config);
+			Pallet::<T>::genesis_authorities_initialize(&self.authorities);
+
+			#[cfg(feature = "construct-dummy-ring-context")]
+			{
+				debug!(target: LOG_TARGET, "Constructing dummy ring context");
+				let ring_ctx = vrf::RingContext::new_testing();
+				RingContext::<T>::put(ring_ctx);
+				Pallet::<T>::update_ring_verifier(&self.authorities);
+			}
+		}
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn on_initialize(block_num: BlockNumberFor<T>) -> Weight {
+			debug_assert_eq!(block_num, frame_system::Pallet::<T>::block_number());
+
+			let claim = <frame_system::Pallet<T>>::digest()
+				.logs
+				.iter()
+				.find_map(|item| item.pre_runtime_try_to::<SlotClaim>(&SASSAFRAS_ENGINE_ID))
+				.expect("Valid block must have a slot claim. qed");
+
+			CurrentSlot::<T>::put(claim.slot);
+
+			if block_num == One::one() {
+				Self::post_genesis_initialize(claim.slot);
+			}
+
+			let randomness_output = claim
+				.vrf_signature
+				.outputs
+				.get(0)
+				.expect("Valid claim must have vrf signature; qed");
+			ClaimTemporaryData::<T>::put(randomness_output);
+
+			let trigger_weight = T::EpochChangeTrigger::trigger::<T>(block_num);
+
+			T::WeightInfo::on_initialize() + trigger_weight
+		}
+
+		fn on_finalize(_: BlockNumberFor<T>) {
+			// At the end of the block, we can safely include the current slot randomness
+			// to the accumulator. If we've determined that this block was the first in
+			// a new epoch, the changeover logic has already occurred at this point
+			// (i.e. `enact_epoch_change` has already been called).
+			let randomness_input = vrf::slot_claim_input(
+				&Self::randomness(),
+				CurrentSlot::<T>::get(),
+				EpochIndex::<T>::get(),
+			);
+			let randomness_output = ClaimTemporaryData::<T>::take()
+				.expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed");
+			let randomness = randomness_output
+				.make_bytes::<RANDOMNESS_LENGTH>(RANDOMNESS_VRF_CONTEXT, &randomness_input);
+			Self::deposit_slot_randomness(&randomness);
+
+			// Check if we are in the epoch's second half.
+			// If so, start sorting the next epoch tickets.
+			let epoch_length = T::EpochLength::get();
+			let current_slot_idx = Self::current_slot_index();
+			if current_slot_idx >= epoch_length / 2 {
+				let mut metadata = TicketsMeta::<T>::get();
+				if metadata.unsorted_tickets_count != 0 {
+					let next_epoch_idx = EpochIndex::<T>::get() + 1;
+					let next_epoch_tag = (next_epoch_idx & 1) as u8;
+					let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1);
+					Self::sort_segments(
+						metadata
+							.unsorted_tickets_count
+							.div_ceil(SEGMENT_MAX_SIZE * slots_left as u32),
+						next_epoch_tag,
+						&mut metadata,
+					);
+					TicketsMeta::<T>::set(metadata);
+				}
+			}
+		}
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Submit next epoch tickets candidates.
+		///
+		/// The number of tickets allowed to be submitted in one call is equal to the epoch length.
+		#[pallet::call_index(0)]
+		#[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))]
+		pub fn submit_tickets(
+			origin: OriginFor<T>,
+			tickets: BoundedVec<TicketEnvelope, EpochLengthFor<T>>,
+		) -> DispatchResultWithPostInfo {
+			ensure_none(origin)?;
+
+			debug!(target: LOG_TARGET, "Received {} tickets", tickets.len());
+
+			let epoch_length = T::EpochLength::get();
+			let current_slot_idx = Self::current_slot_index();
+			if current_slot_idx > epoch_length / 2 {
+				warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",);
+				return Err("Tickets shall be submitted in the first epoch half".into())
+			}
+
+			let Some(verifier) = RingVerifierData::<T>::get().map(|v| v.into()) else {
+				warn!(target: LOG_TARGET, "Ring verifier key not initialized");
+				return Err("Ring verifier key not initialized".into())
+			};
+
+			let next_authorities = Self::next_authorities();
+
+			// Compute tickets threshold
+			let next_config = Self::next_config().unwrap_or_else(|| Self::config());
+			let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold(
+				next_config.redundancy_factor,
+				epoch_length as u32,
+				next_config.attempts_number,
+				next_authorities.len() as u32,
+			);
+
+			// Get next epoch params
+			let randomness = NextRandomness::<T>::get();
+			let epoch_idx = EpochIndex::<T>::get() + 1;
+
+			let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len());
+
+			for ticket in tickets {
+				debug!(target: LOG_TARGET, "Checking ring proof");
+
+				let Some(ticket_id_output) = ticket.signature.outputs.get(0) else {
+					debug!(target: LOG_TARGET, "Missing ticket vrf output from ring signature");
+					continue
+				};
+				let ticket_id_input =
+					vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx);
+
+				// Check threshold constraint
+				let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output);
+				if ticket_id >= ticket_threshold {
+					debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold);
+					continue
+				}
+
+				// Check for duplicates
+				if TicketsData::<T>::contains_key(ticket_id) {
+					debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id);
+					continue
+				}
+
+				// Check ring signature
+				let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input);
+				if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) {
+					debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id);
+					continue
+				}
+
+				if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof(
+					"Input segment has same length as bounded destination vector; qed",
+				) {
+					TicketsData::<T>::set(ticket_id, Some(ticket.body));
+				}
+			}
+
+			if !valid_tickets.is_empty() {
+				Self::append_tickets(valid_tickets);
+			}
+
+			Ok(Pays::No.into())
+		}
+
+		/// Plan an epoch configuration change.
+		///
+		/// The epoch configuration change is recorded and will be announced at the begining
+		/// of the next epoch together with next epoch authorities information.
+		/// In other words, the configuration will be enacted one epoch later.
+		///
+		/// Multiple calls to this method will replace any existing planned config change
+		/// that has not been enacted yet.
+		#[pallet::call_index(1)]
+		#[pallet::weight(T::WeightInfo::plan_config_change())]
+		pub fn plan_config_change(
+			origin: OriginFor<T>,
+			config: EpochConfiguration,
+		) -> DispatchResult {
+			ensure_root(origin)?;
+
+			ensure!(
+				config.redundancy_factor != 0 && config.attempts_number != 0,
+				Error::<T>::InvalidConfiguration
+			);
+			PendingEpochConfigChange::<T>::put(config);
+			Ok(())
+		}
+	}
+
+	#[pallet::validate_unsigned]
+	impl<T: Config> ValidateUnsigned for Pallet<T> {
+		type Call = Call<T>;
+
+		fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity {
+			let Call::submit_tickets { tickets } = call else {
+				return InvalidTransaction::Call.into()
+			};
+
+			// Discard tickets not coming from the local node or that are not included in a block
+			if source == TransactionSource::External {
+				warn!(
+					target: LOG_TARGET,
+					"Rejecting unsigned `submit_tickets` transaction from external source",
+				);
+				return InvalidTransaction::BadSigner.into()
+			}
+
+			// Current slot should be less than half of epoch length.
+			let epoch_length = T::EpochLength::get();
+			let current_slot_idx = Self::current_slot_index();
+			if current_slot_idx > epoch_length / 2 {
+				warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",);
+				return InvalidTransaction::Stale.into()
+			}
+
+			// This should be set such that it is discarded after the first epoch half
+			let tickets_longevity = epoch_length / 2 - current_slot_idx;
+			let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes));
+
+			ValidTransaction::with_tag_prefix("Sassafras")
+				.priority(TransactionPriority::max_value())
+				.longevity(tickets_longevity as u64)
+				.and_provides(tickets_tag)
+				.propagate(true)
+				.build()
+		}
+	}
+}
+
+// Inherent methods
+impl<T: Config> Pallet<T> {
+	/// Determine whether an epoch change should take place at this block.
+	///
+	/// Assumes that initialization has already taken place.
+	pub(crate) fn should_end_epoch(block_num: BlockNumberFor<T>) -> bool {
+		// The epoch has technically ended during the passage of time between this block and the
+		// last, but we have to "end" the epoch now, since there is no earlier possible block we
+		// could have done it.
+		//
+		// The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having
+		// started at the slot of block 1. We want to use the same randomness and validator set as
+		// signalled in the genesis, so we don't rotate the epoch.
+		block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get()
+	}
+
+	/// Current slot index relative to the current epoch.
+	fn current_slot_index() -> u32 {
+		Self::slot_index(CurrentSlot::<T>::get())
+	}
+
+	/// Slot index relative to the current epoch.
+	fn slot_index(slot: Slot) -> u32 {
+		slot.checked_sub(*Self::current_epoch_start())
+			.and_then(|v| v.try_into().ok())
+			.unwrap_or(u32::MAX)
+	}
+
+	/// Finds the start slot of the current epoch.
+	///
+	/// Only guaranteed to give correct results after `initialize` of the first
+	/// block in the chain (as its result is based off of `GenesisSlot`).
+	fn current_epoch_start() -> Slot {
+		Self::epoch_start(EpochIndex::<T>::get())
+	}
+
+	/// Get the epoch's first slot.
+	fn epoch_start(epoch_index: u64) -> Slot {
+		const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \
+							 if u64 is not enough we should crash for safety; qed.";
+
+		let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF);
+		GenesisSlot::<T>::get().checked_add(epoch_start).expect(PROOF).into()
+	}
+
+	pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) {
+		debug!(target: LOG_TARGET, "Loading ring context");
+		let Some(ring_ctx) = RingContext::<T>::get() else {
+			debug!(target: LOG_TARGET, "Ring context not initialized");
+			return
+		};
+
+		let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect();
+
+		debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len());
+		let verifier_data = ring_ctx
+			.verifier_data(&pks)
+			.expect("Failed to build ring verifier. This is a bug");
+
+		RingVerifierData::<T>::put(verifier_data);
+	}
+
+	/// Enact an epoch change.
+	///
+	/// WARNING: Should be called on every block once and if and only if [`should_end_epoch`]
+	/// has returned `true`.
+	///
+	/// If we detect one or more skipped epochs the policy is to use the authorities and values
+	/// from the first skipped epoch. The tickets data is invalidated.
+	pub(crate) fn enact_epoch_change(
+		authorities: WeakBoundedVec<AuthorityId, T::MaxAuthorities>,
+		next_authorities: WeakBoundedVec<AuthorityId, T::MaxAuthorities>,
+	) {
+		if next_authorities != authorities {
+			Self::update_ring_verifier(&next_authorities);
+		}
+
+		// Update authorities
+		Authorities::<T>::put(&authorities);
+		NextAuthorities::<T>::put(&next_authorities);
+
+		// Update epoch index
+		let mut epoch_idx = EpochIndex::<T>::get() + 1;
+
+		let slot_idx = CurrentSlot::<T>::get().saturating_sub(Self::epoch_start(epoch_idx));
+		if slot_idx >= T::EpochLength::get() {
+			// Detected one or more skipped epochs, clear tickets data and recompute epoch index.
+			Self::reset_tickets_data();
+			let skipped_epochs = *slot_idx / T::EpochLength::get() as u64;
+			epoch_idx += skipped_epochs;
+			warn!(
+				target: LOG_TARGET,
+				"Detected {} skipped epochs, resuming from epoch {}",
+				skipped_epochs,
+				epoch_idx
+			);
+		}
+
+		let mut metadata = TicketsMeta::<T>::get();
+		let mut metadata_dirty = false;
+
+		EpochIndex::<T>::put(epoch_idx);
+
+		let next_epoch_idx = epoch_idx + 1;
+
+		// Updates current epoch randomness and computes the *next* epoch randomness.
+		let next_randomness = Self::update_epoch_randomness(next_epoch_idx);
+
+		if let Some(config) = NextEpochConfig::<T>::take() {
+			EpochConfig::<T>::put(config);
+		}
+
+		let next_config = PendingEpochConfigChange::<T>::take();
+		if let Some(next_config) = next_config {
+			NextEpochConfig::<T>::put(next_config);
+		}
+
+		// After we update the current epoch, we signal the *next* epoch change
+		// so that nodes can track changes.
+		let next_epoch = NextEpochDescriptor {
+			randomness: next_randomness,
+			authorities: next_authorities.into_inner(),
+			config: next_config,
+		};
+		Self::deposit_next_epoch_descriptor_digest(next_epoch);
+
+		let epoch_tag = (epoch_idx & 1) as u8;
+
+		// Optionally finish sorting
+		if metadata.unsorted_tickets_count != 0 {
+			Self::sort_segments(u32::MAX, epoch_tag, &mut metadata);
+			metadata_dirty = true;
+		}
+
+		// Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies.
+		// Ids are left since are just cyclically overwritten on-the-go.
+		let prev_epoch_tag = epoch_tag ^ 1;
+		let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize];
+		if *prev_epoch_tickets_count != 0 {
+			for idx in 0..*prev_epoch_tickets_count {
+				if let Some(ticket_id) = TicketsIds::<T>::get((prev_epoch_tag, idx)) {
+					TicketsData::<T>::remove(ticket_id);
+				}
+			}
+			*prev_epoch_tickets_count = 0;
+			metadata_dirty = true;
+		}
+
+		if metadata_dirty {
+			TicketsMeta::<T>::set(metadata);
+		}
+	}
+
+	// Call this function on epoch change to enact current epoch randomness.
+	//
+	// Returns the next epoch randomness.
+	fn update_epoch_randomness(next_epoch_index: u64) -> Randomness {
+		let curr_epoch_randomness = NextRandomness::<T>::get();
+		CurrentRandomness::<T>::put(curr_epoch_randomness);
+
+		let accumulator = RandomnessAccumulator::<T>::get();
+
+		let mut buf = [0; RANDOMNESS_LENGTH + 8];
+		buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]);
+		buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes());
+
+		let next_randomness = hashing::blake2_256(&buf);
+		NextRandomness::<T>::put(&next_randomness);
+
+		next_randomness
+	}
+
+	// Deposit per-slot randomness.
+	fn deposit_slot_randomness(randomness: &Randomness) {
+		let accumulator = RandomnessAccumulator::<T>::get();
+
+		let mut buf = [0; 2 * RANDOMNESS_LENGTH];
+		buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]);
+		buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]);
+
+		let accumulator = hashing::blake2_256(&buf);
+		RandomnessAccumulator::<T>::put(accumulator);
+	}
+
+	// Deposit next epoch descriptor in the block header digest.
+	fn deposit_next_epoch_descriptor_digest(desc: NextEpochDescriptor) {
+		let item = ConsensusLog::NextEpochData(desc);
+		let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, item.encode());
+		<frame_system::Pallet<T>>::deposit_log(log)
+	}
+
+	// Initialize authorities on genesis phase.
+	//
+	// Genesis authorities may have been initialized via other means (e.g. via session pallet).
+	//
+	// If this function has already been called with some authorities, then the new list
+	// should match the previously set one.
+	fn genesis_authorities_initialize(authorities: &[AuthorityId]) {
+		let prev_authorities = Authorities::<T>::get();
+
+		if !prev_authorities.is_empty() {
+			// This function has already been called.
+			if prev_authorities.as_slice() == authorities {
+				return
+			} else {
+				panic!("Authorities were already initialized");
+			}
+		}
+
+		let authorities = WeakBoundedVec::try_from(authorities.to_vec())
+			.expect("Initial number of authorities should be lower than T::MaxAuthorities");
+		Authorities::<T>::put(&authorities);
+		NextAuthorities::<T>::put(&authorities);
+	}
+
+	// Method to be called on first block `on_initialize` to properly populate some key parameters.
+	fn post_genesis_initialize(slot: Slot) {
+		// Keep track of the actual first slot used (may not be zero based).
+		GenesisSlot::<T>::put(slot);
+
+		// Properly initialize randomness using genesis hash and current slot.
+		// This is important to guarantee that a different set of tickets are produced for:
+		// - different chains which share the same ring parameters and
+		// - same chain started with a different slot base.
+		let genesis_hash = frame_system::Pallet::<T>::parent_hash();
+		let mut buf = genesis_hash.as_ref().to_vec();
+		buf.extend_from_slice(&slot.to_le_bytes());
+		let randomness = hashing::blake2_256(buf.as_slice());
+		RandomnessAccumulator::<T>::put(randomness);
+
+		let next_randoness = Self::update_epoch_randomness(1);
+
+		// Deposit a log as this is the first block in first epoch.
+		let next_epoch = NextEpochDescriptor {
+			randomness: next_randoness,
+			authorities: Self::next_authorities().into_inner(),
+			config: None,
+		};
+		Self::deposit_next_epoch_descriptor_digest(next_epoch);
+	}
+
+	/// Current epoch information.
+	pub fn current_epoch() -> Epoch {
+		let index = EpochIndex::<T>::get();
+		Epoch {
+			index,
+			start: Self::epoch_start(index),
+			length: T::EpochLength::get(),
+			authorities: Self::authorities().into_inner(),
+			randomness: Self::randomness(),
+			config: Self::config(),
+		}
+	}
+
+	/// Next epoch information.
+	pub fn next_epoch() -> Epoch {
+		let index = EpochIndex::<T>::get() + 1;
+		Epoch {
+			index,
+			start: Self::epoch_start(index),
+			length: T::EpochLength::get(),
+			authorities: Self::next_authorities().into_inner(),
+			randomness: Self::next_randomness(),
+			config: Self::next_config().unwrap_or_else(|| Self::config()),
+		}
+	}
+
+	/// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy.
+	///
+	/// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots,
+	/// with n >= k, then the tickets are assigned to the slots according to the following
+	/// strategy:
+	///
+	/// slot-index  : [ 0,  1,  2, ............ , n ]
+	/// tickets     : [ t1, t3, t5, ... , t4, t2, t0 ].
+	///
+	/// With slot-index computed as `epoch_start() - slot`.
+	///
+	/// If `slot` value falls within the current epoch then we fetch tickets from the current epoch
+	/// tickets list.
+	///
+	/// If `slot` value falls within the next epoch then we fetch tickets from the next epoch
+	/// tickets ids list. Note that in this case we may have not finished receiving all the tickets
+	/// for that epoch yet. The next epoch tickets should be considered "stable" only after the
+	/// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`).
+	///
+	/// Returns `None` if, according to the sorting strategy, there is no ticket associated to the
+	/// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k),
+	/// or if the slot falls beyond the next epoch.
+	///
+	/// Before importing the first block this returns `None`.
+	pub fn slot_ticket_id(slot: Slot) -> Option<TicketId> {
+		if frame_system::Pallet::<T>::block_number().is_zero() {
+			return None
+		}
+		let epoch_idx = EpochIndex::<T>::get();
+		let epoch_len = T::EpochLength::get();
+		let mut slot_idx = Self::slot_index(slot);
+		let mut metadata = TicketsMeta::<T>::get();
+
+		let get_ticket_idx = |slot_idx| {
+			let ticket_idx = if slot_idx < epoch_len / 2 {
+				2 * slot_idx + 1
+			} else {
+				2 * (epoch_len - (slot_idx + 1))
+			};
+			debug!(
+				target: LOG_TARGET,
+				"slot-idx {} <-> ticket-idx {}",
+				slot_idx,
+				ticket_idx
+			);
+			ticket_idx as u32
+		};
+
+		let mut epoch_tag = (epoch_idx & 1) as u8;
+
+		if epoch_len <= slot_idx && slot_idx < 2 * epoch_len {
+			// Try to get a ticket for the next epoch. Since its state values were not enacted yet,
+			// we may have to finish sorting the tickets.
+			epoch_tag ^= 1;
+			slot_idx -= epoch_len;
+			if metadata.unsorted_tickets_count != 0 {
+				Self::sort_segments(u32::MAX, epoch_tag, &mut metadata);
+				TicketsMeta::<T>::set(metadata);
+			}
+		} else if slot_idx >= 2 * epoch_len {
+			return None
+		}
+
+		let ticket_idx = get_ticket_idx(slot_idx);
+		if ticket_idx < metadata.tickets_count[epoch_tag as usize] {
+			TicketsIds::<T>::get((epoch_tag, ticket_idx))
+		} else {
+			None
+		}
+	}
+
+	/// Returns ticket id and data associated with the given `slot`.
+	///
+	/// Refer to the `slot_ticket_id` documentation for the slot-ticket association
+	/// criteria.
+	pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> {
+		Self::slot_ticket_id(slot).and_then(|id| TicketsData::<T>::get(id).map(|body| (id, body)))
+	}
+
+	// Sort and truncate candidate tickets, cleanup storage.
+	fn sort_and_truncate(candidates: &mut Vec<u128>, max_tickets: usize) -> u128 {
+		candidates.sort_unstable();
+		candidates.drain(max_tickets..).for_each(TicketsData::<T>::remove);
+		candidates[max_tickets - 1]
+	}
+
+	/// Sort the tickets which belong to the epoch with the specified `epoch_tag`.
+	///
+	/// At most `max_segments` are taken from the `UnsortedSegments` structure.
+	///
+	/// The tickets of the removed segments are merged with the tickets on the `SortedCandidates`
+	/// which is then sorted an truncated to contain at most `MaxTickets` entries.
+	///
+	/// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected
+	/// as the next epoch tickets, else it is saved to be used by next calls of this function.
+	pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) {
+		let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE);
+		let max_segments = max_segments.min(unsorted_segments_count);
+		let max_tickets = Self::epoch_length() as usize;
+
+		// Fetch the sorted candidates (if any).
+		let mut candidates = SortedCandidates::<T>::take().into_inner();
+
+		// There is an upper bound to check only if we already sorted the max number
+		// of allowed tickets.
+		let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX);
+
+		let mut require_sort = false;
+
+		// Consume at most `max_segments` segments.
+		// During the process remove every stale ticket from `TicketsData` storage.
+		for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) {
+			let segment = UnsortedSegments::<T>::take(segment_idx);
+			metadata.unsorted_tickets_count -= segment.len() as u32;
+
+			// Push only ids with a value less than the current `upper_bound`.
+			let prev_len = candidates.len();
+			for ticket_id in segment {
+				if ticket_id < upper_bound {
+					candidates.push(ticket_id);
+				} else {
+					TicketsData::<T>::remove(ticket_id);
+				}
+			}
+			require_sort = candidates.len() != prev_len;
+
+			// As we approach the tail of the segments buffer the `upper_bound` value is expected
+			// to decrease (fast). We thus expect the number of tickets pushed into the
+			// `candidates` vector to follow an exponential drop.
+			//
+			// Given this, sorting and truncating after processing each segment may be an overkill
+			// as we may find pushing few tickets more and more often. Is preferable to perform
+			// the sort and truncation operations only when we reach some bigger threshold
+			// (currently set as twice the capacity of `SortCandidate`).
+			//
+			// The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed
+			// to be submitted and the epoch length) the more this check becomes relevant.
+			if candidates.len() > 2 * max_tickets {
+				upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets);
+				require_sort = false;
+			}
+		}
+
+		if candidates.len() > max_tickets {
+			Self::sort_and_truncate(&mut candidates, max_tickets);
+		} else if require_sort {
+			candidates.sort_unstable();
+		}
+
+		if metadata.unsorted_tickets_count == 0 {
+			// Sorting is over, write to next epoch map.
+			candidates.iter().enumerate().for_each(|(i, id)| {
+				TicketsIds::<T>::insert((epoch_tag, i as u32), id);
+			});
+			metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32;
+		} else {
+			// Keep the partial result for the next calls.
+			SortedCandidates::<T>::set(BoundedVec::truncate_from(candidates));
+		}
+	}
+
+	/// Append a set of tickets to the segments map.
+	pub(crate) fn append_tickets(mut tickets: BoundedVec<TicketId, EpochLengthFor<T>>) {
+		debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len());
+		tickets.iter().for_each(|t| trace!(target: LOG_TARGET, "  + {t:032x}"));
+
+		let mut metadata = TicketsMeta::<T>::get();
+		let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE;
+
+		while !tickets.is_empty() {
+			let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE;
+			let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize);
+
+			let mut segment = UnsortedSegments::<T>::get(segment_idx);
+			let _ = segment
+				.try_extend(tickets.drain(..to_be_added))
+				.defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector.");
+			UnsortedSegments::<T>::insert(segment_idx, segment);
+
+			metadata.unsorted_tickets_count += to_be_added as u32;
+			segment_idx += 1;
+		}
+
+		TicketsMeta::<T>::set(metadata);
+	}
+
+	/// Remove all tickets related data.
+	///
+	/// May not be efficient as the calling places may repeat some of this operations
+	/// but is a very extraordinary operation (hopefully never happens in production)
+	/// and better safe than sorry.
+	fn reset_tickets_data() {
+		let metadata = TicketsMeta::<T>::get();
+
+		// Remove even/odd-epoch data.
+		for epoch_tag in 0..=1 {
+			for idx in 0..metadata.tickets_count[epoch_tag] {
+				if let Some(id) = TicketsIds::<T>::get((epoch_tag as u8, idx)) {
+					TicketsData::<T>::remove(id);
+				}
+			}
+		}
+
+		// Remove all unsorted tickets segments.
+		let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE);
+		(0..segments_count).for_each(UnsortedSegments::<T>::remove);
+
+		// Reset sorted candidates
+		SortedCandidates::<T>::kill();
+
+		// Reset tickets metadata
+		TicketsMeta::<T>::kill();
+	}
+
+	/// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to
+	/// `submit_unsigned_transaction`.
+	///
+	/// The submitted tickets are added to the next epoch outstanding tickets as long as the
+	/// extrinsic is called within the first half of the epoch. Tickets received during the
+	/// second half are dropped.
+	pub fn submit_tickets_unsigned_extrinsic(tickets: Vec<TicketEnvelope>) -> bool {
+		let tickets = BoundedVec::truncate_from(tickets);
+		let call = Call::submit_tickets { tickets };
+		match SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction(call.into()) {
+			Ok(_) => true,
+			Err(e) => {
+				error!(target: LOG_TARGET, "Error submitting tickets {:?}", e);
+				false
+			},
+		}
+	}
+
+	/// Epoch length
+	pub fn epoch_length() -> u32 {
+		T::EpochLength::get()
+	}
+}
+
+/// Trigger an epoch change, if any should take place.
+pub trait EpochChangeTrigger {
+	/// May trigger an epoch change, if any should take place.
+	///
+	/// Returns an optional `Weight` if epoch change has been triggered.
+	///
+	/// This should be called during every block, after initialization is done.
+	fn trigger<T: Config>(_: BlockNumberFor<T>) -> Weight;
+}
+
+/// An `EpochChangeTrigger` which does nothing.
+///
+/// In practice this means that the epoch change logic is left to some external component
+/// (e.g. pallet-session).
+pub struct EpochChangeExternalTrigger;
+
+impl EpochChangeTrigger for EpochChangeExternalTrigger {
+	fn trigger<T: Config>(_: BlockNumberFor<T>) -> Weight {
+		// nothing - trigger is external.
+		Weight::zero()
+	}
+}
+
+/// An `EpochChangeTrigger` which recycle the same authorities set forever.
+///
+/// The internal trigger should only be used when no other module is responsible for
+/// changing authority set.
+pub struct EpochChangeInternalTrigger;
+
+impl EpochChangeTrigger for EpochChangeInternalTrigger {
+	fn trigger<T: Config>(block_num: BlockNumberFor<T>) -> Weight {
+		if Pallet::<T>::should_end_epoch(block_num) {
+			let authorities = Pallet::<T>::next_authorities();
+			let next_authorities = authorities.clone();
+			let len = next_authorities.len() as u32;
+			Pallet::<T>::enact_epoch_change(authorities, next_authorities);
+			T::WeightInfo::enact_epoch_change(len, T::EpochLength::get())
+		} else {
+			Weight::zero()
+		}
+	}
+}
+
+impl<T: Config> BoundToRuntimeAppPublic for Pallet<T> {
+	type Public = AuthorityId;
+}
diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b700207c4991b8d6d51c9295814576f6fd2e57ee
--- /dev/null
+++ b/substrate/frame/sassafras/src/mock.rs
@@ -0,0 +1,343 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Test utilities for Sassafras pallet.
+
+use crate::{self as pallet_sassafras, EpochChangeInternalTrigger, *};
+
+use frame_support::{
+	derive_impl,
+	traits::{ConstU32, OnFinalize, OnInitialize},
+};
+use sp_consensus_sassafras::{
+	digests::SlotClaim,
+	vrf::{RingProver, VrfSignature},
+	AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId,
+};
+use sp_core::{
+	crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps},
+	ed25519::Public as EphemeralPublic,
+	H256, U256,
+};
+use sp_runtime::{
+	testing::{Digest, DigestItem, Header, TestXt},
+	BuildStorage,
+};
+
+const LOG_TARGET: &str = "sassafras::tests";
+
+const EPOCH_LENGTH: u32 = 10;
+const MAX_AUTHORITIES: u32 = 100;
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
+impl frame_system::Config for Test {
+	type Block = frame_system::mocking::MockBlock<Test>;
+}
+
+impl<C> frame_system::offchain::SendTransactionTypes<C> for Test
+where
+	RuntimeCall: From<C>,
+{
+	type OverarchingCall = RuntimeCall;
+	type Extrinsic = TestXt<RuntimeCall, ()>;
+}
+
+impl pallet_sassafras::Config for Test {
+	type EpochLength = ConstU32<EPOCH_LENGTH>;
+	type MaxAuthorities = ConstU32<MAX_AUTHORITIES>;
+	type EpochChangeTrigger = EpochChangeInternalTrigger;
+	type WeightInfo = ();
+}
+
+frame_support::construct_runtime!(
+	pub enum Test {
+		System: frame_system,
+		Sassafras: pallet_sassafras,
+	}
+);
+
+// Default used for most of the tests.
+//
+// The redundancy factor has been set to max value to accept all submitted
+// tickets without worrying about the threshold.
+pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration =
+	EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 5 };
+
+/// Build and returns test storage externalities
+pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities {
+	new_test_ext_with_pairs(authorities_len, false).1
+}
+
+/// Build and returns test storage externalities and authority set pairs used
+/// by Sassafras genesis configuration.
+pub fn new_test_ext_with_pairs(
+	authorities_len: usize,
+	with_ring_context: bool,
+) -> (Vec<AuthorityPair>, sp_io::TestExternalities) {
+	let pairs = (0..authorities_len)
+		.map(|i| AuthorityPair::from_seed(&U256::from(i).into()))
+		.collect::<Vec<_>>();
+
+	let authorities: Vec<_> = pairs.iter().map(|p| p.public()).collect();
+
+	let mut storage = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
+
+	pallet_sassafras::GenesisConfig::<Test> {
+		authorities: authorities.clone(),
+		epoch_config: TEST_EPOCH_CONFIGURATION,
+		_phantom: sp_std::marker::PhantomData,
+	}
+	.assimilate_storage(&mut storage)
+	.unwrap();
+
+	let mut ext: sp_io::TestExternalities = storage.into();
+
+	if with_ring_context {
+		ext.execute_with(|| {
+			log::debug!(target: LOG_TARGET, "Building testing ring context");
+			let ring_ctx = vrf::RingContext::new_testing();
+			RingContext::<Test>::set(Some(ring_ctx.clone()));
+			Sassafras::update_ring_verifier(&authorities);
+		});
+	}
+
+	(pairs, ext)
+}
+
+fn make_ticket_with_prover(
+	attempt: u32,
+	pair: &AuthorityPair,
+	prover: &RingProver,
+) -> TicketEnvelope {
+	log::debug!("attempt: {}", attempt);
+
+	// Values are referring to the next epoch
+	let epoch = Sassafras::epoch_index() + 1;
+	let randomness = Sassafras::next_randomness();
+
+	// Make a dummy ephemeral public that hopefully is unique within one test instance.
+	// In the tests, the values within the erased public are just used to compare
+	// ticket bodies, so it is not important to be a valid key.
+	let mut raw: [u8; 32] = [0; 32];
+	raw.copy_from_slice(&pair.public().as_slice()[0..32]);
+	let erased_public = EphemeralPublic::unchecked_from(raw);
+	let revealed_public = erased_public;
+
+	let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch);
+
+	let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public };
+	let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input);
+
+	let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover);
+
+	// Ticket-id can be generated via vrf-preout.
+	// We don't care that much about its value here.
+	TicketEnvelope { body, signature }
+}
+
+pub fn make_prover(pair: &AuthorityPair) -> RingProver {
+	let public = pair.public();
+	let mut prover_idx = None;
+
+	let ring_ctx = Sassafras::ring_context().unwrap();
+
+	let pks: Vec<sp_core::bandersnatch::Public> = Sassafras::authorities()
+		.iter()
+		.enumerate()
+		.map(|(idx, auth)| {
+			if public == *auth {
+				prover_idx = Some(idx);
+			}
+			*auth.as_ref()
+		})
+		.collect();
+
+	log::debug!("Building prover. Ring size: {}", pks.len());
+	let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap();
+	log::debug!("Done");
+
+	prover
+}
+
+/// Construct `attempts` tickets envelopes for the next epoch.
+///
+/// E.g. by passing an optional threshold
+pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec<TicketEnvelope> {
+	let prover = make_prover(pair);
+	(0..attempts)
+		.into_iter()
+		.map(|attempt| make_ticket_with_prover(attempt, pair, &prover))
+		.collect()
+}
+
+pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) {
+	// Values are referring to the next epoch
+	let epoch = Sassafras::epoch_index() + 1;
+	let randomness = Sassafras::next_randomness();
+
+	let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch);
+	let ticket_id_output = pair.as_inner_ref().vrf_output(&ticket_id_input);
+
+	let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output);
+
+	// Make a dummy ephemeral public that hopefully is unique within one test instance.
+	// In the tests, the values within the erased public are just used to compare
+	// ticket bodies, so it is not important to be a valid key.
+	let mut raw: [u8; 32] = [0; 32];
+	raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]);
+	raw[16..].copy_from_slice(&id.to_le_bytes());
+	let erased_public = EphemeralPublic::unchecked_from(raw);
+	let revealed_public = erased_public;
+
+	let body = TicketBody { attempt_idx, erased_public, revealed_public };
+
+	(id, body)
+}
+
+pub fn make_dummy_ticket_body(attempt_idx: u32) -> (TicketId, TicketBody) {
+	let hash = sp_core::hashing::blake2_256(&attempt_idx.to_le_bytes());
+
+	let erased_public = EphemeralPublic::unchecked_from(hash);
+	let revealed_public = erased_public;
+
+	let body = TicketBody { attempt_idx, erased_public, revealed_public };
+
+	let mut bytes = [0u8; 16];
+	bytes.copy_from_slice(&hash[..16]);
+	let id = TicketId::from_le_bytes(bytes);
+
+	(id, body)
+}
+
+pub fn make_ticket_bodies(
+	number: u32,
+	pair: Option<&AuthorityPair>,
+) -> Vec<(TicketId, TicketBody)> {
+	(0..number)
+		.into_iter()
+		.map(|i| match pair {
+			Some(pair) => make_ticket_body(i, pair),
+			None => make_dummy_ticket_body(i),
+		})
+		.collect()
+}
+
+/// Persist the given tickets in the unsorted segments buffer.
+///
+/// This function skips all the checks performed by the `submit_tickets` extrinsic and
+/// directly appends the tickets to the `UnsortedSegments` structure.
+pub fn persist_next_epoch_tickets_as_segments(tickets: &[(TicketId, TicketBody)]) {
+	let mut ids = Vec::with_capacity(tickets.len());
+	tickets.iter().for_each(|(id, body)| {
+		TicketsData::<Test>::set(id, Some(body.clone()));
+		ids.push(*id);
+	});
+	let max_chunk_size = Sassafras::epoch_length() as usize;
+	ids.chunks(max_chunk_size).for_each(|chunk| {
+		Sassafras::append_tickets(BoundedVec::truncate_from(chunk.to_vec()));
+	})
+}
+
+/// Calls the [`persist_next_epoch_tickets_as_segments`] and then proceeds to the
+/// sorting of the candidates.
+///
+/// Only "winning" tickets are left.
+pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) {
+	persist_next_epoch_tickets_as_segments(tickets);
+	// Force sorting of next epoch tickets (enactment) by explicitly querying the first of them.
+	let next_epoch = Sassafras::next_epoch();
+	assert_eq!(TicketsMeta::<Test>::get().unsorted_tickets_count, tickets.len() as u32);
+	Sassafras::slot_ticket(next_epoch.start).unwrap();
+	assert_eq!(TicketsMeta::<Test>::get().unsorted_tickets_count, 0);
+}
+
+fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature {
+	let mut epoch = Sassafras::epoch_index();
+	let mut randomness = Sassafras::randomness();
+
+	// Check if epoch is going to change on initialization.
+	let epoch_start = Sassafras::current_epoch_start();
+	let epoch_length = EPOCH_LENGTH.into();
+	if epoch_start != 0_u64 && slot >= epoch_start + epoch_length {
+		epoch += slot.saturating_sub(epoch_start).saturating_div(epoch_length);
+		randomness = crate::NextRandomness::<Test>::get();
+	}
+
+	let data = vrf::slot_claim_sign_data(&randomness, slot, epoch);
+	pair.as_ref().vrf_sign(&data)
+}
+
+/// Construct a `PreDigest` instance for the given parameters.
+pub fn make_slot_claim(
+	authority_idx: AuthorityIndex,
+	slot: Slot,
+	pair: &AuthorityPair,
+) -> SlotClaim {
+	let vrf_signature = slot_claim_vrf_signature(slot, pair);
+	SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None }
+}
+
+/// Construct a `Digest` with a `SlotClaim` item.
+pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest {
+	let claim = make_slot_claim(authority_idx, slot, pair);
+	Digest { logs: vec![DigestItem::from(&claim)] }
+}
+
+pub fn initialize_block(
+	number: u64,
+	slot: Slot,
+	parent_hash: H256,
+	pair: &AuthorityPair,
+) -> Digest {
+	let digest = make_digest(0, slot, pair);
+	System::reset_events();
+	System::initialize(&number, &parent_hash, &digest);
+	Sassafras::on_initialize(number);
+	digest
+}
+
+pub fn finalize_block(number: u64) -> Header {
+	Sassafras::on_finalize(number);
+	System::finalize()
+}
+
+/// Progress the pallet state up to the given block `number` and `slot`.
+pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest {
+	Sassafras::on_finalize(System::block_number());
+	let parent_hash = System::finalize().hash();
+
+	let digest = make_digest(0, slot, pair);
+
+	System::reset_events();
+	System::initialize(&number, &parent_hash, &digest);
+	Sassafras::on_initialize(number);
+
+	digest
+}
+
+/// Progress the pallet state up to the given block `number`.
+/// Slots will grow linearly accordingly to blocks.
+pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option<Digest> {
+	let mut slot = Sassafras::current_slot() + 1;
+	let mut digest = None;
+	for i in System::block_number() + 1..=number {
+		let dig = go_to_block(i, slot, pair);
+		digest = Some(dig);
+		slot = slot + 1;
+	}
+	digest
+}
diff --git a/substrate/frame/sassafras/src/tests.rs b/substrate/frame/sassafras/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ec3425cce7bf61e299bb811ac41edda3d9761ae5
--- /dev/null
+++ b/substrate/frame/sassafras/src/tests.rs
@@ -0,0 +1,874 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Tests for Sassafras pallet.
+
+use crate::*;
+use mock::*;
+
+use sp_consensus_sassafras::Slot;
+
+fn h2b<const N: usize>(hex: &str) -> [u8; N] {
+	array_bytes::hex2array_unchecked(hex)
+}
+
+fn b2h<const N: usize>(bytes: [u8; N]) -> String {
+	array_bytes::bytes2hex("", &bytes)
+}
+
+#[test]
+fn genesis_values_assumptions_check() {
+	new_test_ext(3).execute_with(|| {
+		assert_eq!(Sassafras::authorities().len(), 3);
+		assert_eq!(Sassafras::config(), TEST_EPOCH_CONFIGURATION);
+	});
+}
+
+#[test]
+fn post_genesis_randomness_initialization() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(1, false);
+	let pair = &pairs[0];
+
+	ext.execute_with(|| {
+		assert_eq!(Sassafras::randomness(), [0; 32]);
+		assert_eq!(Sassafras::next_randomness(), [0; 32]);
+		assert_eq!(Sassafras::randomness_accumulator(), [0; 32]);
+
+		// Test the values with a zero genesis block hash
+		let _ = initialize_block(1, 123.into(), [0x00; 32].into(), pair);
+
+		assert_eq!(Sassafras::randomness(), [0; 32]);
+		println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+		assert_eq!(
+			Sassafras::next_randomness(),
+			h2b("b9497550deeeb4adc134555930de61968a0558f8947041eb515b2f5fa68ffaf7")
+		);
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("febcc7fe9539fe17ed29f525831394edfb30b301755dc9bd91584a1f065faf87")
+		);
+		let (id1, _) = make_ticket_bodies(1, Some(pair))[0];
+
+		// Reset what is relevant
+		NextRandomness::<Test>::set([0; 32]);
+		RandomnessAccumulator::<Test>::set([0; 32]);
+
+		// Test the values with a non-zero genesis block hash
+		let _ = initialize_block(1, 123.into(), [0xff; 32].into(), pair);
+
+		assert_eq!(Sassafras::randomness(), [0; 32]);
+		println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+		assert_eq!(
+			Sassafras::next_randomness(),
+			h2b("51c1e3b3a73d2043b3cabae98ff27bdd4aad8967c21ecda7b9465afaa0e70f37")
+		);
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("466bf3007f2e17bffee0b3c42c90f33d654f5ff61eff28b0cc650825960abd52")
+		);
+		let (id2, _) = make_ticket_bodies(1, Some(pair))[0];
+
+		// Ticket ids should be different when next epoch randomness is different
+		assert_ne!(id1, id2);
+
+		// Reset what is relevant
+		NextRandomness::<Test>::set([0; 32]);
+		RandomnessAccumulator::<Test>::set([0; 32]);
+
+		// Test the values with a non-zero genesis block hash
+		let _ = initialize_block(1, 321.into(), [0x00; 32].into(), pair);
+
+		println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+		assert_eq!(
+			Sassafras::next_randomness(),
+			h2b("d85d84a54f79453000eb62e8a17b30149bd728d3232bc2787a89d51dc9a36008")
+		);
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("8a035eed02b5b8642b1515ed19752df8df156627aea45c4ef6e3efa88be9a74d")
+		);
+		let (id2, _) = make_ticket_bodies(1, Some(pair))[0];
+
+		// Ticket ids should be different when next epoch randomness is different
+		assert_ne!(id1, id2);
+	});
+}
+
+// Tests if the sorted tickets are assigned to each slot outside-in.
+#[test]
+fn slot_ticket_id_outside_in_fetch() {
+	let genesis_slot = Slot::from(100);
+	let tickets_count = 6;
+
+	// Current epoch tickets
+	let curr_tickets: Vec<TicketId> = (0..tickets_count).map(|i| i as TicketId).collect();
+
+	// Next epoch tickets
+	let next_tickets: Vec<TicketId> =
+		(0..tickets_count - 1).map(|i| (i + tickets_count) as TicketId).collect();
+
+	new_test_ext(0).execute_with(|| {
+		// Some corner cases
+		TicketsIds::<Test>::insert((0, 0_u32), 1_u128);
+
+		// Cleanup
+		(0..3).for_each(|i| TicketsIds::<Test>::remove((0, i as u32)));
+
+		curr_tickets
+			.iter()
+			.enumerate()
+			.for_each(|(i, id)| TicketsIds::<Test>::insert((0, i as u32), id));
+
+		next_tickets
+			.iter()
+			.enumerate()
+			.for_each(|(i, id)| TicketsIds::<Test>::insert((1, i as u32), id));
+
+		TicketsMeta::<Test>::set(TicketsMetadata {
+			tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32],
+			unsorted_tickets_count: 0,
+		});
+
+		// Before importing the first block the pallet always return `None`
+		// This is a kind of special hardcoded case that should never happen in practice
+		// as the first thing the pallet does is to initialize the genesis slot.
+
+		assert_eq!(Sassafras::slot_ticket_id(0.into()), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), None);
+
+		// Initialize genesis slot..
+		GenesisSlot::<Test>::set(genesis_slot);
+		frame_system::Pallet::<Test>::set_block_number(One::one());
+
+		// Try to fetch a ticket for a slot before current epoch.
+		assert_eq!(Sassafras::slot_ticket_id(0.into()), None);
+
+		// Current epoch tickets.
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0]));
+
+		// Next epoch tickets (note that only 5 tickets are available)
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2]));
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0]));
+
+		// Try to fetch the tickets for slots beyond the next epoch.
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None);
+		assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None);
+	});
+}
+
+// Different test for outside-in test with more focus on corner case correctness.
+#[test]
+fn slot_ticket_id_outside_in_fetch_corner_cases() {
+	new_test_ext(0).execute_with(|| {
+		frame_system::Pallet::<Test>::set_block_number(One::one());
+
+		let mut meta = TicketsMetadata { tickets_count: [0, 0], unsorted_tickets_count: 0 };
+		let curr_epoch_idx = EpochIndex::<Test>::get();
+
+		let mut epoch_test = |epoch_idx| {
+			let tag = (epoch_idx & 1) as u8;
+			let epoch_start = Sassafras::epoch_start(epoch_idx);
+
+			// cleanup
+			meta.tickets_count = [0, 0];
+			TicketsMeta::<Test>::set(meta);
+			assert!((0..10).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none()));
+
+			meta.tickets_count[tag as usize] += 1;
+			TicketsMeta::<Test>::set(meta);
+			TicketsIds::<Test>::insert((tag, 0_u32), 1_u128);
+			assert_eq!(Sassafras::slot_ticket_id((epoch_start + 9).into()), Some(1_u128));
+			assert!((0..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none()));
+
+			meta.tickets_count[tag as usize] += 1;
+			TicketsMeta::<Test>::set(meta);
+			TicketsIds::<Test>::insert((tag, 1_u32), 2_u128);
+			assert_eq!(Sassafras::slot_ticket_id((epoch_start + 0).into()), Some(2_u128));
+			assert!((1..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none()));
+
+			meta.tickets_count[tag as usize] += 2;
+			TicketsMeta::<Test>::set(meta);
+			TicketsIds::<Test>::insert((tag, 2_u32), 3_u128);
+			assert_eq!(Sassafras::slot_ticket_id((epoch_start + 8).into()), Some(3_u128));
+			assert!((1..8).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none()));
+		};
+
+		// Even epoch
+		epoch_test(curr_epoch_idx);
+		epoch_test(curr_epoch_idx + 1);
+	});
+}
+
+#[test]
+fn on_first_block_after_genesis() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+
+	ext.execute_with(|| {
+		let start_slot = Slot::from(100);
+		let start_block = 1;
+
+		let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]);
+
+		let common_assertions = || {
+			assert_eq!(Sassafras::genesis_slot(), start_slot);
+			assert_eq!(Sassafras::current_slot(), start_slot);
+			assert_eq!(Sassafras::epoch_index(), 0);
+			assert_eq!(Sassafras::current_epoch_start(), start_slot);
+			assert_eq!(Sassafras::current_slot_index(), 0);
+			assert_eq!(Sassafras::randomness(), [0; 32]);
+			println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+			assert_eq!(
+				Sassafras::next_randomness(),
+				h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e")
+			);
+		};
+
+		// Post-initialization status
+
+		assert!(ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("f0d42f6b7c0d157ecbd788be44847b80a96c290c04b5dfa5d1d40c98aa0c04ed")
+		);
+
+		let header = finalize_block(start_block);
+
+		// Post-finalization status
+
+		assert!(!ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"),
+		);
+
+		// Header data check
+
+		assert_eq!(header.digest.logs.len(), 2);
+		assert_eq!(header.digest.logs[0], digest.logs[0]);
+
+		// Genesis epoch start deposits consensus
+		let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData(
+			sp_consensus_sassafras::digests::NextEpochDescriptor {
+				authorities: Sassafras::next_authorities().into_inner(),
+				randomness: Sassafras::next_randomness(),
+				config: None,
+			},
+		);
+		let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode());
+		assert_eq!(header.digest.logs[1], consensus_digest)
+	})
+}
+
+#[test]
+fn on_normal_block() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+	let start_slot = Slot::from(100);
+	let start_block = 1;
+	let end_block = start_block + 1;
+
+	ext.execute_with(|| {
+		initialize_block(start_block, start_slot, Default::default(), &pairs[0]);
+
+		// We don't want to trigger an epoch change in this test.
+		let epoch_length = Sassafras::epoch_length() as u64;
+		assert!(epoch_length > end_block);
+
+		// Progress to block 2
+		let digest = progress_to_block(end_block, &pairs[0]).unwrap();
+
+		let common_assertions = || {
+			assert_eq!(Sassafras::genesis_slot(), start_slot);
+			assert_eq!(Sassafras::current_slot(), start_slot + 1);
+			assert_eq!(Sassafras::epoch_index(), 0);
+			assert_eq!(Sassafras::current_epoch_start(), start_slot);
+			assert_eq!(Sassafras::current_slot_index(), 1);
+			assert_eq!(Sassafras::randomness(), [0; 32]);
+			println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+			assert_eq!(
+				Sassafras::next_randomness(),
+				h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e")
+			);
+		};
+
+		// Post-initialization status
+
+		assert!(ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"),
+		);
+
+		let header = finalize_block(end_block);
+
+		// Post-finalization status
+
+		assert!(!ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("be9261adb9686dfd3f23f8a276b7acc7f4beb3137070beb64c282ac22d84cbf0"),
+		);
+
+		// Header data check
+
+		assert_eq!(header.digest.logs.len(), 1);
+		assert_eq!(header.digest.logs[0], digest.logs[0]);
+	});
+}
+
+#[test]
+fn produce_epoch_change_digest_no_config() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+
+	ext.execute_with(|| {
+		let start_slot = Slot::from(100);
+		let start_block = 1;
+
+		initialize_block(start_block, start_slot, Default::default(), &pairs[0]);
+
+		// We want to trigger an epoch change in this test.
+		let epoch_length = Sassafras::epoch_length() as u64;
+		let end_block = start_block + epoch_length;
+
+		let digest = progress_to_block(end_block, &pairs[0]).unwrap();
+
+		let common_assertions = || {
+			assert_eq!(Sassafras::genesis_slot(), start_slot);
+			assert_eq!(Sassafras::current_slot(), start_slot + epoch_length);
+			assert_eq!(Sassafras::epoch_index(), 1);
+			assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_length);
+			assert_eq!(Sassafras::current_slot_index(), 0);
+			println!("[DEBUG] {}", b2h(Sassafras::randomness()));
+			assert_eq!(
+				Sassafras::randomness(),
+				h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e")
+			);
+		};
+
+		// Post-initialization status
+
+		assert!(ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+		assert_eq!(
+			Sassafras::next_randomness(),
+			h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"),
+		);
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("bf0f1228f4ff953c8c1bda2cceb668bf86ea05d7ae93e26d021c9690995d5279"),
+		);
+
+		let header = finalize_block(end_block);
+
+		// Post-finalization status
+
+		assert!(!ClaimTemporaryData::<Test>::exists());
+		common_assertions();
+		println!("[DEBUG] {}", b2h(Sassafras::next_randomness()));
+		assert_eq!(
+			Sassafras::next_randomness(),
+			h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"),
+		);
+		println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator()));
+		assert_eq!(
+			Sassafras::randomness_accumulator(),
+			h2b("8a1ceb346036c386d021264b10912c8b656799668004c4a487222462b394cd89"),
+		);
+
+		// Header data check
+
+		assert_eq!(header.digest.logs.len(), 2);
+		assert_eq!(header.digest.logs[0], digest.logs[0]);
+		// Deposits consensus log on epoch change
+		let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData(
+			sp_consensus_sassafras::digests::NextEpochDescriptor {
+				authorities: Sassafras::next_authorities().into_inner(),
+				randomness: Sassafras::next_randomness(),
+				config: None,
+			},
+		);
+		let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode());
+		assert_eq!(header.digest.logs[1], consensus_digest)
+	})
+}
+
+#[test]
+fn produce_epoch_change_digest_with_config() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+
+	ext.execute_with(|| {
+		let start_slot = Slot::from(100);
+		let start_block = 1;
+
+		initialize_block(start_block, start_slot, Default::default(), &pairs[0]);
+
+		let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 };
+		Sassafras::plan_config_change(RuntimeOrigin::root(), config).unwrap();
+
+		// We want to trigger an epoch change in this test.
+		let epoch_length = Sassafras::epoch_length() as u64;
+		let end_block = start_block + epoch_length;
+
+		let digest = progress_to_block(end_block, &pairs[0]).unwrap();
+
+		let header = finalize_block(end_block);
+
+		// Header data check.
+		// Skip pallet status checks that were already performed by other tests.
+
+		assert_eq!(header.digest.logs.len(), 2);
+		assert_eq!(header.digest.logs[0], digest.logs[0]);
+		// Deposits consensus log on epoch change
+		let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData(
+			sp_consensus_sassafras::digests::NextEpochDescriptor {
+				authorities: Sassafras::next_authorities().into_inner(),
+				randomness: Sassafras::next_randomness(),
+				config: Some(config),
+			},
+		);
+		let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode());
+		assert_eq!(header.digest.logs[1], consensus_digest)
+	})
+}
+
+#[test]
+fn segments_incremental_sort_works() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(1, false);
+	let pair = &pairs[0];
+	let segments_count = 14;
+	let start_slot = Slot::from(100);
+	let start_block = 1;
+
+	ext.execute_with(|| {
+		let epoch_length = Sassafras::epoch_length() as u64;
+		// -3 just to have the last segment not full...
+		let submitted_tickets_count = segments_count * SEGMENT_MAX_SIZE - 3;
+
+		initialize_block(start_block, start_slot, Default::default(), pair);
+
+		// Manually populate the segments to skip the threshold check
+		let mut tickets = make_ticket_bodies(submitted_tickets_count, None);
+		persist_next_epoch_tickets_as_segments(&tickets);
+
+		// Proceed to half of the epoch (sortition should not have been started yet)
+		let half_epoch_block = start_block + epoch_length / 2;
+		progress_to_block(half_epoch_block, pair);
+
+		let mut unsorted_tickets_count = submitted_tickets_count;
+
+		// Check that next epoch tickets sortition is not started yet
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		// Follow the incremental sortition block by block
+
+		progress_to_block(half_epoch_block + 1, pair);
+		unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE - 3;
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count,);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		progress_to_block(half_epoch_block + 2, pair);
+		unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE;
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		progress_to_block(half_epoch_block + 3, pair);
+		unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE;
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		progress_to_block(half_epoch_block + 4, pair);
+		unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE;
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		let header = finalize_block(half_epoch_block + 4);
+
+		// Sort should be finished now.
+		// Check that next epoch tickets count have the correct value.
+		// Bigger ticket ids were discarded during sortition.
+		unsorted_tickets_count -= 2 * SEGMENT_MAX_SIZE;
+		assert_eq!(unsorted_tickets_count, 0);
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count);
+		assert_eq!(meta.tickets_count, [0, epoch_length as u32]);
+		// Epoch change log should have been pushed as well
+		assert_eq!(header.digest.logs.len(), 1);
+		// No tickets for the current epoch
+		assert_eq!(TicketsIds::<Test>::get((0, 0)), None);
+
+		// Check persistence of "winning" tickets
+		tickets.sort_by_key(|t| t.0);
+		(0..epoch_length as usize).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((1, i as u32)).unwrap();
+			let body = TicketsData::<Test>::get(id).unwrap();
+			assert_eq!((id, body), tickets[i]);
+		});
+		// Check removal of "loosing" tickets
+		(epoch_length as usize..tickets.len()).into_iter().for_each(|i| {
+			assert!(TicketsIds::<Test>::get((1, i as u32)).is_none());
+			assert!(TicketsData::<Test>::get(tickets[i].0).is_none());
+		});
+
+		// The next block will be the first produced on the new epoch.
+		// At this point the tickets are found already sorted and ready to be used.
+		let slot = Sassafras::current_slot() + 1;
+		let number = System::block_number() + 1;
+		initialize_block(number, slot, header.hash(), pair);
+		let header = finalize_block(number);
+		// Epoch changes digest is also produced
+		assert_eq!(header.digest.logs.len(), 2);
+	});
+}
+
+#[test]
+fn tickets_fetch_works_after_epoch_change() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+	let pair = &pairs[0];
+	let start_slot = Slot::from(100);
+	let start_block = 1;
+	let submitted_tickets = 300;
+
+	ext.execute_with(|| {
+		initialize_block(start_block, start_slot, Default::default(), pair);
+
+		// We don't want to trigger an epoch change in this test.
+		let epoch_length = Sassafras::epoch_length() as u64;
+		assert!(epoch_length > 2);
+		progress_to_block(2, &pairs[0]).unwrap();
+
+		// Persist tickets as three different segments.
+		let tickets = make_ticket_bodies(submitted_tickets, None);
+		persist_next_epoch_tickets_as_segments(&tickets);
+
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, submitted_tickets);
+		assert_eq!(meta.tickets_count, [0, 0]);
+
+		// Progress up to the last epoch slot (do not enact epoch change)
+		progress_to_block(epoch_length, &pairs[0]).unwrap();
+
+		// At this point next epoch tickets should have been sorted and ready to be used
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, 0);
+		assert_eq!(meta.tickets_count, [0, epoch_length as u32]);
+
+		// Compute and sort the tickets ids (aka tickets scores)
+		let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect();
+		expected_ids.sort();
+		expected_ids.truncate(epoch_length as usize);
+
+		// Check if we can fetch next epoch tickets ids (outside-in).
+		let slot = Sassafras::current_slot();
+		assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 4).unwrap(), expected_ids[7]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[6]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]);
+		assert!(Sassafras::slot_ticket_id(slot + 11).is_none());
+
+		// Enact epoch change by progressing one more block
+
+		progress_to_block(epoch_length + 1, &pairs[0]).unwrap();
+
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, 0);
+		assert_eq!(meta.tickets_count, [0, 10]);
+
+		// Check if we can fetch current epoch tickets ids (outside-in).
+		let slot = Sassafras::current_slot();
+		assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[7]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 6).unwrap(), expected_ids[6]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]);
+		assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]);
+		assert!(Sassafras::slot_ticket_id(slot + 10).is_none());
+
+		// Enact another epoch change, for which we don't have any ticket
+		progress_to_block(2 * epoch_length + 1, &pairs[0]).unwrap();
+		let meta = TicketsMeta::<Test>::get();
+		assert_eq!(meta.unsorted_tickets_count, 0);
+		assert_eq!(meta.tickets_count, [0, 0]);
+	});
+}
+
+#[test]
+fn block_allowed_to_skip_epochs() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+	let pair = &pairs[0];
+	let start_slot = Slot::from(100);
+	let start_block = 1;
+
+	ext.execute_with(|| {
+		let epoch_length = Sassafras::epoch_length() as u64;
+
+		initialize_block(start_block, start_slot, Default::default(), pair);
+
+		let tickets = make_ticket_bodies(3, Some(pair));
+		persist_next_epoch_tickets(&tickets);
+
+		let next_random = Sassafras::next_randomness();
+
+		// We want to skip 3 epochs in this test.
+		let offset = 4 * epoch_length;
+		go_to_block(start_block + offset, start_slot + offset, &pairs[0]);
+
+		// Post-initialization status
+
+		assert!(ClaimTemporaryData::<Test>::exists());
+		assert_eq!(Sassafras::genesis_slot(), start_slot);
+		assert_eq!(Sassafras::current_slot(), start_slot + offset);
+		assert_eq!(Sassafras::epoch_index(), 4);
+		assert_eq!(Sassafras::current_epoch_start(), start_slot + offset);
+		assert_eq!(Sassafras::current_slot_index(), 0);
+
+		// Tickets data has been discarded
+		assert_eq!(TicketsMeta::<Test>::get(), TicketsMetadata::default());
+		assert!(tickets.iter().all(|(id, _)| TicketsData::<Test>::get(id).is_none()));
+		assert_eq!(SortedCandidates::<Test>::get().len(), 0);
+
+		// We used the last known next epoch randomness as a fallback
+		assert_eq!(next_random, Sassafras::randomness());
+	});
+}
+
+#[test]
+fn obsolete_tickets_are_removed_on_epoch_change() {
+	let (pairs, mut ext) = new_test_ext_with_pairs(4, false);
+	let pair = &pairs[0];
+	let start_slot = Slot::from(100);
+	let start_block = 1;
+
+	ext.execute_with(|| {
+		let epoch_length = Sassafras::epoch_length() as u64;
+
+		initialize_block(start_block, start_slot, Default::default(), pair);
+
+		let tickets = make_ticket_bodies(10, Some(pair));
+		let mut epoch1_tickets = tickets[..4].to_vec();
+		let mut epoch2_tickets = tickets[4..].to_vec();
+
+		// Persist some tickets for next epoch (N)
+		persist_next_epoch_tickets(&epoch1_tickets);
+		assert_eq!(TicketsMeta::<Test>::get().tickets_count, [0, 4]);
+		// Check next epoch tickets presence
+		epoch1_tickets.sort_by_key(|t| t.0);
+		(0..epoch1_tickets.len()).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((1, i as u32)).unwrap();
+			let body = TicketsData::<Test>::get(id).unwrap();
+			assert_eq!((id, body), epoch1_tickets[i]);
+		});
+
+		// Advance one epoch to enact the tickets
+		go_to_block(start_block + epoch_length, start_slot + epoch_length, pair);
+		assert_eq!(TicketsMeta::<Test>::get().tickets_count, [0, 4]);
+
+		// Persist some tickets for next epoch (N+1)
+		persist_next_epoch_tickets(&epoch2_tickets);
+		assert_eq!(TicketsMeta::<Test>::get().tickets_count, [6, 4]);
+		epoch2_tickets.sort_by_key(|t| t.0);
+		// Check for this epoch and next epoch tickets presence
+		(0..epoch1_tickets.len()).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((1, i as u32)).unwrap();
+			let body = TicketsData::<Test>::get(id).unwrap();
+			assert_eq!((id, body), epoch1_tickets[i]);
+		});
+		(0..epoch2_tickets.len()).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((0, i as u32)).unwrap();
+			let body = TicketsData::<Test>::get(id).unwrap();
+			assert_eq!((id, body), epoch2_tickets[i]);
+		});
+
+		// Advance to epoch 2 and check for cleanup
+
+		go_to_block(start_block + 2 * epoch_length, start_slot + 2 * epoch_length, pair);
+		assert_eq!(TicketsMeta::<Test>::get().tickets_count, [6, 0]);
+
+		(0..epoch1_tickets.len()).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((1, i as u32)).unwrap();
+			assert!(TicketsData::<Test>::get(id).is_none());
+		});
+		(0..epoch2_tickets.len()).into_iter().for_each(|i| {
+			let id = TicketsIds::<Test>::get((0, i as u32)).unwrap();
+			let body = TicketsData::<Test>::get(id).unwrap();
+			assert_eq!((id, body), epoch2_tickets[i]);
+		});
+	})
+}
+
+const TICKETS_FILE: &str = "src/data/25_tickets_100_auths.bin";
+
+fn data_read<T: Decode>(filename: &str) -> T {
+	use std::{fs::File, io::Read};
+	let mut file = File::open(filename).unwrap();
+	let mut buf = Vec::new();
+	file.read_to_end(&mut buf).unwrap();
+	T::decode(&mut &buf[..]).unwrap()
+}
+
+fn data_write<T: Encode>(filename: &str, data: T) {
+	use std::{fs::File, io::Write};
+	let mut file = File::create(filename).unwrap();
+	let buf = data.encode();
+	file.write_all(&buf).unwrap();
+}
+
+// We don't want to implement anything secure here.
+// Just a trivial shuffle for the tests.
+fn trivial_fisher_yates_shuffle<T>(vector: &mut Vec<T>, random_seed: u64) {
+	let mut rng = random_seed as usize;
+	for i in (1..vector.len()).rev() {
+		let j = rng % (i + 1);
+		vector.swap(i, j);
+		rng = (rng.wrapping_mul(6364793005) + 1) as usize; // Some random number generation
+	}
+}
+
+// For this test we use a set of pre-constructed tickets from a file.
+// Creating a large set of tickets on the fly takes time, and may be annoying
+// for test execution.
+//
+// A valid ring-context is required for this test since we are passing through the
+// `submit_ticket` call which tests for ticket validity.
+#[test]
+fn submit_tickets_with_ring_proof_check_works() {
+	use sp_core::Pair as _;
+	// env_logger::init();
+
+	let (authorities, mut tickets): (Vec<AuthorityId>, Vec<TicketEnvelope>) =
+		data_read(TICKETS_FILE);
+
+	// Also checks that duplicates are discarded
+	tickets.extend(tickets.clone());
+	trivial_fisher_yates_shuffle(&mut tickets, 321);
+
+	let (pairs, mut ext) = new_test_ext_with_pairs(authorities.len(), true);
+	let pair = &pairs[0];
+	// Check if deserialized data has been generated for the correct set of authorities...
+	assert!(authorities.iter().zip(pairs.iter()).all(|(auth, pair)| auth == &pair.public()));
+
+	ext.execute_with(|| {
+		let start_slot = Slot::from(0);
+		let start_block = 1;
+
+		// Tweak the config to discard ~half of the tickets.
+		let mut config = EpochConfig::<Test>::get();
+		config.redundancy_factor = 25;
+		EpochConfig::<Test>::set(config);
+
+		initialize_block(start_block, start_slot, Default::default(), pair);
+		NextRandomness::<Test>::set([0; 32]);
+
+		// Check state before tickets submission
+		assert_eq!(
+			TicketsMeta::<Test>::get(),
+			TicketsMetadata { unsorted_tickets_count: 0, tickets_count: [0, 0] },
+		);
+
+		// Submit the tickets
+		let max_tickets_per_call = Sassafras::epoch_length() as usize;
+		tickets.chunks(max_tickets_per_call).for_each(|chunk| {
+			let chunk = BoundedVec::truncate_from(chunk.to_vec());
+			Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap();
+		});
+
+		// Check state after submission
+		assert_eq!(
+			TicketsMeta::<Test>::get(),
+			TicketsMetadata { unsorted_tickets_count: 16, tickets_count: [0, 0] },
+		);
+		assert_eq!(UnsortedSegments::<Test>::get(0).len(), 16);
+		assert_eq!(UnsortedSegments::<Test>::get(1).len(), 0);
+
+		finalize_block(start_block);
+	})
+}
+
+#[test]
+#[ignore = "test tickets data generator"]
+fn make_tickets_data() {
+	use super::*;
+	use sp_core::crypto::Pair;
+
+	// Number of authorities who produces tickets (for the sake of this test)
+	let tickets_authors_count = 5;
+	// Total number of authorities (the ring)
+	let authorities_count = 100;
+	let (pairs, mut ext) = new_test_ext_with_pairs(authorities_count, true);
+
+	let authorities: Vec<_> = pairs.iter().map(|sk| sk.public()).collect();
+
+	ext.execute_with(|| {
+		let config = EpochConfig::<Test>::get();
+
+		let tickets_count = tickets_authors_count * config.attempts_number as usize;
+		let mut tickets = Vec::with_capacity(tickets_count);
+
+		// Construct pre-built tickets with a well known `NextRandomness` value.
+		NextRandomness::<Test>::set([0; 32]);
+
+		println!("Constructing {} tickets", tickets_count);
+		pairs.iter().take(tickets_authors_count).enumerate().for_each(|(i, pair)| {
+			let t = make_tickets(config.attempts_number, pair);
+			tickets.extend(t);
+			println!("{:.2}%", 100f32 * ((i + 1) as f32 / tickets_authors_count as f32));
+		});
+
+		data_write(TICKETS_FILE, (authorities, tickets));
+	});
+}
diff --git a/substrate/frame/sassafras/src/weights.rs b/substrate/frame/sassafras/src/weights.rs
new file mode 100644
index 0000000000000000000000000000000000000000..32ea2d29a180b310c944d50014fa4b61f7d7d88b
--- /dev/null
+++ b/substrate/frame/sassafras/src/weights.rs
@@ -0,0 +1,425 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_sassafras`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
+//! DATE: 2023-11-16, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `behemoth`, CPU: `AMD Ryzen Threadripper 3970X 32-Core Processor`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// ./target/release/node-template
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_sassafras
+// --extrinsic
+// *
+// --steps
+// 20
+// --repeat
+// 3
+// --output
+// weights.rs
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_sassafras`.
+pub trait WeightInfo {
+	fn on_initialize() -> Weight;
+	fn enact_epoch_change(x: u32, y: u32, ) -> Weight;
+	fn submit_tickets(x: u32, ) -> Weight;
+	fn plan_config_change() -> Weight;
+	fn update_ring_verifier(x: u32, ) -> Weight;
+	fn load_ring_context() -> Weight;
+	fn sort_segments(x: u32, ) -> Weight;
+}
+
+/// Weights for `pallet_sassafras` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `System::Digest` (r:1 w:1)
+	/// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:0)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:0)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1)
+	/// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentSlot` (r:0 w:1)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1)
+	/// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:0 w:1)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	fn on_initialize() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `302`
+		//  Estimated: `4787`
+		// Minimum execution time: 438_039_000 picoseconds.
+		Weight::from_parts(439_302_000, 4787)
+			.saturating_add(T::DbWeight::get().reads(6_u64))
+			.saturating_add(T::DbWeight::get().writes(5_u64))
+	}
+	/// Storage: `Sassafras::CurrentSlot` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:1)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:1 w:0)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:1)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsMeta` (r:1 w:1)
+	/// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:1)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0)
+	/// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextEpochConfig` (r:1 w:1)
+	/// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1)
+	/// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `System::Digest` (r:1 w:1)
+	/// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Sassafras::SortedCandidates` (r:1 w:0)
+	/// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:79 w:79)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsIds` (r:5000 w:200)
+	/// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::Authorities` (r:0 w:1)
+	/// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:0 w:9896)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:0 w:1)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochConfig` (r:0 w:1)
+	/// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentRandomness` (r:0 w:1)
+	/// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	/// The range of component `y` is `[1000, 5000]`.
+	fn enact_epoch_change(x: u32, y: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `594909 + x * (33 ±0) + y * (53 ±0)`
+		//  Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)`
+		// Minimum execution time: 121_279_846_000 picoseconds.
+		Weight::from_parts(94_454_851_972, 593350)
+			// Standard Error: 24_177_301
+			.saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into()))
+			// Standard Error: 601_053
+			.saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into()))
+			.saturating_add(T::DbWeight::get().reads(13_u64))
+			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into())))
+			.saturating_add(T::DbWeight::get().writes(112_u64))
+			.saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(y.into())))
+			.saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into()))
+			.saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into()))
+	}
+	/// Storage: `Sassafras::CurrentSlot` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:0)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:1 w:0)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:1 w:0)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:0)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextEpochConfig` (r:1 w:0)
+	/// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:25 w:25)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsMeta` (r:1 w:1)
+	/// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:1 w:1)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 25]`.
+	fn submit_tickets(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3869`
+		//  Estimated: `5519 + x * (2559 ±0)`
+		// Minimum execution time: 36_904_934_000 picoseconds.
+		Weight::from_parts(25_822_957_295, 5519)
+			// Standard Error: 11_047_832
+			.saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into()))
+			.saturating_add(T::DbWeight::get().reads(9_u64))
+			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into())))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into())))
+			.saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into()))
+	}
+	/// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1)
+	/// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	fn plan_config_change() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 4_038_000 picoseconds.
+		Weight::from_parts(4_499_000, 0)
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:0 w:1)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	fn update_ring_verifier(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `590485`
+		//  Estimated: `591809`
+		// Minimum execution time: 105_121_424_000 picoseconds.
+		Weight::from_parts(105_527_334_385, 591809)
+			// Standard Error: 2_933_910
+			.saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into()))
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	fn load_ring_context() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `590485`
+		//  Estimated: `591809`
+		// Minimum execution time: 44_005_681_000 picoseconds.
+		Weight::from_parts(44_312_079_000, 591809)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+	}
+	/// Storage: `Sassafras::SortedCandidates` (r:1 w:0)
+	/// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:100 w:100)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsIds` (r:0 w:200)
+	/// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:0 w:12600)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	fn sort_segments(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `222 + x * (2060 ±0)`
+		//  Estimated: `4687 + x * (4529 ±0)`
+		// Minimum execution time: 183_501_000 picoseconds.
+		Weight::from_parts(183_501_000, 4687)
+			// Standard Error: 1_426_363
+			.saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into()))
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into())))
+			.saturating_add(T::DbWeight::get().writes((129_u64).saturating_mul(x.into())))
+			.saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into()))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `System::Digest` (r:1 w:1)
+	/// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:0)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:0)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1)
+	/// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentSlot` (r:0 w:1)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1)
+	/// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:0 w:1)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	fn on_initialize() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `302`
+		//  Estimated: `4787`
+		// Minimum execution time: 438_039_000 picoseconds.
+		Weight::from_parts(439_302_000, 4787)
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
+	}
+	/// Storage: `Sassafras::CurrentSlot` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:1)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:1 w:0)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:1)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsMeta` (r:1 w:1)
+	/// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:1)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0)
+	/// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextEpochConfig` (r:1 w:1)
+	/// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1)
+	/// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `System::Digest` (r:1 w:1)
+	/// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Sassafras::SortedCandidates` (r:1 w:0)
+	/// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:79 w:79)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsIds` (r:5000 w:200)
+	/// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::Authorities` (r:0 w:1)
+	/// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:0 w:9896)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:0 w:1)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochConfig` (r:0 w:1)
+	/// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::CurrentRandomness` (r:0 w:1)
+	/// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	/// The range of component `y` is `[1000, 5000]`.
+	fn enact_epoch_change(x: u32, y: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `594909 + x * (33 ±0) + y * (53 ±0)`
+		//  Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)`
+		// Minimum execution time: 121_279_846_000 picoseconds.
+		Weight::from_parts(94_454_851_972, 593350)
+			// Standard Error: 24_177_301
+			.saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into()))
+			// Standard Error: 601_053
+			.saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into()))
+			.saturating_add(RocksDbWeight::get().reads(13_u64))
+			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into())))
+			.saturating_add(RocksDbWeight::get().writes(112_u64))
+			.saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(y.into())))
+			.saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into()))
+			.saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into()))
+	}
+	/// Storage: `Sassafras::CurrentSlot` (r:1 w:0)
+	/// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::EpochIndex` (r:1 w:0)
+	/// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::GenesisSlot` (r:1 w:0)
+	/// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:1 w:0)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextAuthorities` (r:1 w:0)
+	/// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextEpochConfig` (r:1 w:0)
+	/// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::NextRandomness` (r:1 w:0)
+	/// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:25 w:25)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsMeta` (r:1 w:1)
+	/// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:1 w:1)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 25]`.
+	fn submit_tickets(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3869`
+		//  Estimated: `5519 + x * (2559 ±0)`
+		// Minimum execution time: 36_904_934_000 picoseconds.
+		Weight::from_parts(25_822_957_295, 5519)
+			// Standard Error: 11_047_832
+			.saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into()))
+			.saturating_add(RocksDbWeight::get().reads(9_u64))
+			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into())))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+			.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into())))
+			.saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into()))
+	}
+	/// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1)
+	/// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
+	fn plan_config_change() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 4_038_000 picoseconds.
+		Weight::from_parts(4_499_000, 0)
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::RingVerifierData` (r:0 w:1)
+	/// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	fn update_ring_verifier(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `590485`
+		//  Estimated: `591809`
+		// Minimum execution time: 105_121_424_000 picoseconds.
+		Weight::from_parts(105_527_334_385, 591809)
+			// Standard Error: 2_933_910
+			.saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into()))
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `Sassafras::RingContext` (r:1 w:0)
+	/// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`)
+	fn load_ring_context() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `590485`
+		//  Estimated: `591809`
+		// Minimum execution time: 44_005_681_000 picoseconds.
+		Weight::from_parts(44_312_079_000, 591809)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+	}
+	/// Storage: `Sassafras::SortedCandidates` (r:1 w:0)
+	/// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::UnsortedSegments` (r:100 w:100)
+	/// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsIds` (r:0 w:200)
+	/// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`)
+	/// Storage: `Sassafras::TicketsData` (r:0 w:12600)
+	/// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// The range of component `x` is `[1, 100]`.
+	fn sort_segments(x: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `222 + x * (2060 ±0)`
+		//  Estimated: `4687 + x * (4529 ±0)`
+		// Minimum execution time: 183_501_000 picoseconds.
+		Weight::from_parts(183_501_000, 4687)
+			// Standard Error: 1_426_363
+			.saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into()))
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into())))
+			.saturating_add(RocksDbWeight::get().writes((129_u64).saturating_mul(x.into())))
+			.saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into()))
+	}
+}
diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml
index 67f09e2b904130d74f0343f6834a2aa378d88a7e..e71f82b4382f168b46f0d995831fb6bf87a8faa0 100644
--- a/substrate/primitives/consensus/sassafras/Cargo.toml
+++ b/substrate/primitives/consensus/sassafras/Cargo.toml
@@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"]
 scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false }
 scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true }
-sp-api = { default-features = false, path = "../../api" }
-sp-application-crypto = { default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] }
-sp-consensus-slots = { default-features = false, path = "../slots" }
-sp-core = { default-features = false, path = "../../core", features = ["bandersnatch-experimental"] }
-sp-runtime = { default-features = false, path = "../../runtime" }
-sp-std = { default-features = false, path = "../../std" }
+sp-api = { path = "../../api", default-features = false }
+sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] }
+sp-consensus-slots = { path = "../slots", default-features = false }
+sp-core = { path = "../../core", default-features = false, features = ["bandersnatch-experimental"] }
+sp-runtime = { path = "../../runtime", default-features = false }
+sp-std = { path = "../../std", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/substrate/primitives/consensus/sassafras/README.md b/substrate/primitives/consensus/sassafras/README.md
index b0f3685494e4e3eb8b68bf15318960f532fe22ce..d6251940a496f52ff28c6471582c1981681bb8ec 100644
--- a/substrate/primitives/consensus/sassafras/README.md
+++ b/substrate/primitives/consensus/sassafras/README.md
@@ -1,12 +1,6 @@
 Primitives for SASSAFRAS.
 
-# ⚠️ WARNING ⚠️
+- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41
+- RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26
 
-The crate interfaces and structures are highly experimental and may be subject
-to significant changes.
-
-Depends on upstream experimental feature: `bandersnatch-experimental`.
-
-These structs were mostly extracted from the main SASSAFRAS protocol PR: https://github.com/paritytech/polkadot-sdk/pull/1336.
-
-Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41
+Depends on `sp-core` feature: `bandersnatch-experimental`.
diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs
index 95a305099de553cbe263fc1acd5605d1670d638b..5274f1309d8251977fe3bf30f2209bf8538f5e19 100644
--- a/substrate/primitives/consensus/sassafras/src/digests.rs
+++ b/substrate/primitives/consensus/sassafras/src/digests.rs
@@ -48,11 +48,11 @@ pub struct SlotClaim {
 /// This is mandatory in the first block of each epoch.
 #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)]
 pub struct NextEpochDescriptor {
+	/// Randomness value.
+	pub randomness: Randomness,
 	/// Authorities list.
 	pub authorities: Vec<AuthorityId>,
-	/// Epoch randomness.
-	pub randomness: Randomness,
-	/// Epoch configurable parameters.
+	/// Epoch configuration.
 	///
 	/// If not present previous epoch parameters are used.
 	pub config: Option<EpochConfiguration>,
diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs
index e421e771d406a197cb30429a43b5c57d7f48b9de..1752f76588635f5a80fddabc86023a439361643c 100644
--- a/substrate/primitives/consensus/sassafras/src/lib.rs
+++ b/substrate/primitives/consensus/sassafras/src/lib.rs
@@ -80,33 +80,43 @@ pub type EquivocationProof<H> = sp_consensus_slots::EquivocationProof<H, Authori
 /// Randomness required by some protocol's operations.
 pub type Randomness = [u8; RANDOMNESS_LENGTH];
 
-/// Configuration data that can be modified on epoch change.
+/// Protocol configuration that can be modified on epoch change.
+///
+/// Mostly tweaks to the ticketing system parameters.
 #[derive(
 	Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default,
 )]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct EpochConfiguration {
-	/// Tickets threshold redundancy factor.
+	/// Tickets redundancy factor.
+	///
+	/// Expected ratio between epoch's slots and the cumulative number of tickets which can
+	/// be submitted by the set of epoch validators.
 	pub redundancy_factor: u32,
-	/// Tickets attempts for each validator.
+	/// Tickets max attempts for each validator.
+	///
+	/// Influences the anonymity of block producers. As all published tickets have a public
+	/// attempt number less than `attempts_number` if two tickets share an attempt number
+	/// then they must belong to two different validators, which reduces anonymity late as
+	/// we approach the epoch tail.
+	///
+	/// This anonymity loss already becomes small when `attempts_number = 64` or `128`.
 	pub attempts_number: u32,
 }
 
 /// Sassafras epoch information
 #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo)]
 pub struct Epoch {
-	/// The epoch index.
-	pub epoch_idx: u64,
-	/// The starting slot of the epoch.
-	pub start_slot: Slot,
-	/// Slot duration in milliseconds.
-	pub slot_duration: SlotDuration,
-	/// Duration of epoch in slots.
-	pub epoch_duration: u64,
-	/// Authorities for the epoch.
-	pub authorities: Vec<AuthorityId>,
-	/// Randomness for the epoch.
+	/// Epoch index.
+	pub index: u64,
+	/// Starting slot of the epoch.
+	pub start: Slot,
+	/// Number of slots in the epoch.
+	pub length: u32,
+	/// Randomness value.
 	pub randomness: Randomness,
+	/// Authorities list.
+	pub authorities: Vec<AuthorityId>,
 	/// Epoch configuration.
 	pub config: EpochConfiguration,
 }
diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs
index d81770c96d9bbec9dc8a625e048173d705a9f7cb..dc0a61990d3ea7248a4bbbbf69a6eed72942719f 100644
--- a/substrate/primitives/consensus/sassafras/src/ticket.rs
+++ b/substrate/primitives/consensus/sassafras/src/ticket.rs
@@ -62,10 +62,10 @@ pub struct TicketClaim {
 	pub erased_signature: EphemeralSignature,
 }
 
-/// Computes ticket-id maximum allowed value for a given epoch.
+/// Computes a boundary for [`TicketId`] maximum allowed value for a given epoch.
 ///
-/// Only ticket identifiers below this threshold should be considered for slot
-/// assignment.
+/// Only ticket identifiers below this threshold should be considered as candidates
+/// for slot assignment.
 ///
 /// The value is computed as `TicketId::MAX*(redundancy*slots)/(attempts*validators)`
 ///
@@ -76,16 +76,51 @@ pub struct TicketClaim {
 /// - `validators`: number of validators in epoch.
 ///
 /// If `attempts * validators = 0` then we return 0.
+///
+/// For details about the formula and implications refer to
+/// [*probabilities an parameters*](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS#probabilities-and-parameters)
+/// paragraph of the w3f introduction to the protocol.
+// TODO: replace with [RFC-26](https://github.com/polkadot-fellows/RFCs/pull/26)
+// "Tickets Threshold" paragraph once is merged
 pub fn ticket_id_threshold(
 	redundancy: u32,
 	slots: u32,
 	attempts: u32,
 	validators: u32,
 ) -> TicketId {
-	let den = attempts as u64 * validators as u64;
 	let num = redundancy as u64 * slots as u64;
+	let den = attempts as u64 * validators as u64;
 	TicketId::max_value()
 		.checked_div(den.into())
 		.unwrap_or_default()
 		.saturating_mul(num.into())
 }
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	// This is a trivial example/check which just better explain explains the rationale
+	// behind the threshold.
+	//
+	// After this reading the formula should become obvious.
+	#[test]
+	fn ticket_id_threshold_trivial_check() {
+		// For an epoch with `s` slots we want to accept a number of tickets equal to ~s·r
+		let redundancy = 2;
+		let slots = 1000;
+		let attempts = 100;
+		let validators = 500;
+
+		let threshold = ticket_id_threshold(redundancy, slots, attempts, validators);
+		let threshold = threshold as f64 / TicketId::MAX as f64;
+
+		// We expect that the total number of tickets allowed to be submited
+		// is slots*redundancy
+		let avt = ((attempts * validators) as f64 * threshold) as u32;
+		assert_eq!(avt, slots * redundancy);
+
+		println!("threshold: {}", threshold);
+		println!("avt = {}", avt);
+	}
+}
diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs
index d25a656f9508f7090755af20cfa0418b5954015e..bdbac0aae03774d589f52054906f56ee6c7056a9 100644
--- a/substrate/primitives/consensus/sassafras/src/vrf.rs
+++ b/substrate/primitives/consensus/sassafras/src/vrf.rs
@@ -23,7 +23,7 @@ use sp_consensus_slots::Slot;
 use sp_std::vec::Vec;
 
 pub use sp_core::bandersnatch::{
-	ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature},
+	ring_vrf::{RingContext, RingProver, RingVerifier, RingVerifierData, RingVrfSignature},
 	vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature},
 };
 
diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml
index 9c556c07736ddc68b81bc89d65c5259b69fa10f3..331d762e0d7b8cc9c44e5de444db6f70488d6a10 100644
--- a/substrate/primitives/core/Cargo.toml
+++ b/substrate/primitives/core/Cargo.toml
@@ -56,7 +56,7 @@ sp-runtime-interface = { path = "../runtime-interface", default-features = false
 # bls crypto
 w3f-bls = { version = "0.1.3", default-features = false, optional = true }
 # bandersnatch crypto
-bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true }
+bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "2019248", default-features = false, features = ["substrate-curves"], optional = true }
 
 [dev-dependencies]
 criterion = "0.4.0"
diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs
index 78b7f12f9ffd4c5ce9faacb6990f80e091994cf7..1d666f13b6275dea2cc88ba8216844a703e26045 100644
--- a/substrate/primitives/core/src/bandersnatch.rs
+++ b/substrate/primitives/core/src/bandersnatch.rs
@@ -20,13 +20,17 @@
 //!
 //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF.
 
-#[cfg(feature = "std")]
+#[cfg(feature = "serde")]
 use crate::crypto::Ss58Codec;
 use crate::crypto::{
 	ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic,
 };
 #[cfg(feature = "full_crypto")]
 use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret};
+#[cfg(feature = "serde")]
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+#[cfg(all(not(feature = "std"), feature = "serde"))]
+use sp_std::alloc::{format, string::String};
 
 use bandersnatch_vrfs::CanonicalSerialize;
 #[cfg(feature = "full_crypto")]
@@ -44,23 +48,12 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band");
 #[cfg(feature = "full_crypto")]
 pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext";
 
-// Max ring domain size.
-const RING_DOMAIN_SIZE: usize = 1024;
-
 #[cfg(feature = "full_crypto")]
-const SEED_SERIALIZED_LEN: usize = 32;
-
-// Short-Weierstrass form serialized sizes.
-const PUBLIC_SERIALIZED_LEN: usize = 33;
-const SIGNATURE_SERIALIZED_LEN: usize = 65;
-const RING_SIGNATURE_SERIALIZED_LEN: usize = 755;
-const PREOUT_SERIALIZED_LEN: usize = 33;
+const SEED_SERIALIZED_SIZE: usize = 32;
 
-// Max size of serialized ring-vrf context params.
-//
-// This size is dependent on the ring domain size and the actual value
-// is equal to the SCALE encoded size of the `KZG` backend.
-const RING_CONTEXT_SERIALIZED_LEN: usize = 147716;
+const PUBLIC_SERIALIZED_SIZE: usize = 33;
+const SIGNATURE_SERIALIZED_SIZE: usize = 65;
+const PREOUT_SERIALIZED_SIZE: usize = 33;
 
 /// Bandersnatch public key.
 #[cfg_attr(feature = "full_crypto", derive(Hash))]
@@ -77,16 +70,16 @@ const RING_CONTEXT_SERIALIZED_LEN: usize = 147716;
 	MaxEncodedLen,
 	TypeInfo,
 )]
-pub struct Public(pub [u8; PUBLIC_SERIALIZED_LEN]);
+pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]);
 
-impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_LEN]> for Public {
-	fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_LEN]) -> Self {
+impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_SIZE]> for Public {
+	fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_SIZE]) -> Self {
 		Public(raw)
 	}
 }
 
-impl AsRef<[u8; PUBLIC_SERIALIZED_LEN]> for Public {
-	fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_LEN] {
+impl AsRef<[u8; PUBLIC_SERIALIZED_SIZE]> for Public {
+	fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_SIZE] {
 		&self.0
 	}
 }
@@ -107,17 +100,17 @@ impl TryFrom<&[u8]> for Public {
 	type Error = ();
 
 	fn try_from(data: &[u8]) -> Result<Self, Self::Error> {
-		if data.len() != PUBLIC_SERIALIZED_LEN {
+		if data.len() != PUBLIC_SERIALIZED_SIZE {
 			return Err(())
 		}
-		let mut r = [0u8; PUBLIC_SERIALIZED_LEN];
+		let mut r = [0u8; PUBLIC_SERIALIZED_SIZE];
 		r.copy_from_slice(data);
 		Ok(Self::unchecked_from(r))
 	}
 }
 
 impl ByteArray for Public {
-	const LEN: usize = PUBLIC_SERIALIZED_LEN;
+	const LEN: usize = PUBLIC_SERIALIZED_SIZE;
 }
 
 impl TraitPublic for Public {}
@@ -142,16 +135,31 @@ impl sp_std::fmt::Debug for Public {
 	}
 }
 
+#[cfg(feature = "serde")]
+impl Serialize for Public {
+	fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
+		serializer.serialize_str(&self.to_ss58check())
+	}
+}
+
+#[cfg(feature = "serde")]
+impl<'de> Deserialize<'de> for Public {
+	fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+		Public::from_ss58check(&String::deserialize(deserializer)?)
+			.map_err(|e| de::Error::custom(format!("{:?}", e)))
+	}
+}
+
 /// Bandersnatch signature.
 ///
 /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript
 /// `label`.
 #[cfg_attr(feature = "full_crypto", derive(Hash))]
 #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)]
-pub struct Signature([u8; SIGNATURE_SERIALIZED_LEN]);
+pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]);
 
-impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_LEN]> for Signature {
-	fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_LEN]) -> Self {
+impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature {
+	fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Self {
 		Signature(raw)
 	}
 }
@@ -172,17 +180,17 @@ impl TryFrom<&[u8]> for Signature {
 	type Error = ();
 
 	fn try_from(data: &[u8]) -> Result<Self, Self::Error> {
-		if data.len() != SIGNATURE_SERIALIZED_LEN {
+		if data.len() != SIGNATURE_SERIALIZED_SIZE {
 			return Err(())
 		}
-		let mut r = [0u8; SIGNATURE_SERIALIZED_LEN];
+		let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE];
 		r.copy_from_slice(data);
 		Ok(Self::unchecked_from(r))
 	}
 }
 
 impl ByteArray for Signature {
-	const LEN: usize = SIGNATURE_SERIALIZED_LEN;
+	const LEN: usize = SIGNATURE_SERIALIZED_SIZE;
 }
 
 impl CryptoType for Signature {
@@ -204,7 +212,7 @@ impl sp_std::fmt::Debug for Signature {
 
 /// The raw secret seed, which can be used to reconstruct the secret [`Pair`].
 #[cfg(feature = "full_crypto")]
-type Seed = [u8; SEED_SERIALIZED_LEN];
+type Seed = [u8; SEED_SERIALIZED_SIZE];
 
 /// Bandersnatch secret key.
 #[cfg(feature = "full_crypto")]
@@ -232,10 +240,10 @@ impl TraitPair for Pair {
 	///
 	/// The slice must be 32 bytes long or it will return an error.
 	fn from_seed_slice(seed_slice: &[u8]) -> Result<Pair, SecretStringError> {
-		if seed_slice.len() != SEED_SERIALIZED_LEN {
+		if seed_slice.len() != SEED_SERIALIZED_SIZE {
 			return Err(SecretStringError::InvalidSeedLength)
 		}
-		let mut seed = [0; SEED_SERIALIZED_LEN];
+		let mut seed = [0; SEED_SERIALIZED_SIZE];
 		seed.copy_from_slice(seed_slice);
 		let secret = SecretKey::from_seed(&seed);
 		Ok(Pair { secret, seed })
@@ -266,7 +274,7 @@ impl TraitPair for Pair {
 
 	fn public(&self) -> Public {
 		let public = self.secret.to_public();
-		let mut raw = [0; PUBLIC_SERIALIZED_LEN];
+		let mut raw = [0; PUBLIC_SERIALIZED_SIZE];
 		public
 			.serialize_compressed(raw.as_mut_slice())
 			.expect("serialization length is constant and checked by test; qed");
@@ -344,7 +352,7 @@ pub mod vrf {
 
 	impl Encode for VrfOutput {
 		fn encode(&self) -> Vec<u8> {
-			let mut bytes = [0; PREOUT_SERIALIZED_LEN];
+			let mut bytes = [0; PREOUT_SERIALIZED_SIZE];
 			self.0
 				.serialize_compressed(bytes.as_mut_slice())
 				.expect("serialization length is constant and checked by test; qed");
@@ -354,21 +362,24 @@ pub mod vrf {
 
 	impl Decode for VrfOutput {
 		fn decode<R: codec::Input>(i: &mut R) -> Result<Self, codec::Error> {
-			let buf = <[u8; PREOUT_SERIALIZED_LEN]>::decode(i)?;
-			let preout = bandersnatch_vrfs::VrfPreOut::deserialize_compressed(buf.as_slice())
-				.map_err(|_| "vrf-preout decode error: bad preout")?;
+			let buf = <[u8; PREOUT_SERIALIZED_SIZE]>::decode(i)?;
+			let preout =
+				bandersnatch_vrfs::VrfPreOut::deserialize_compressed_unchecked(buf.as_slice())
+					.map_err(|_| "vrf-preout decode error: bad preout")?;
 			Ok(VrfOutput(preout))
 		}
 	}
 
+	impl EncodeLike for VrfOutput {}
+
 	impl MaxEncodedLen for VrfOutput {
 		fn max_encoded_len() -> usize {
-			<[u8; PREOUT_SERIALIZED_LEN]>::max_encoded_len()
+			<[u8; PREOUT_SERIALIZED_SIZE]>::max_encoded_len()
 		}
 	}
 
 	impl TypeInfo for VrfOutput {
-		type Identity = [u8; PREOUT_SERIALIZED_LEN];
+		type Identity = [u8; PREOUT_SERIALIZED_SIZE];
 
 		fn type_info() -> scale_info::Type {
 			Self::Identity::type_info()
@@ -395,10 +406,10 @@ pub mod vrf {
 	///   will contribute to the signature as well.
 	#[derive(Clone)]
 	pub struct VrfSignData {
-		/// VRF inputs to be signed.
-		pub inputs: VrfIosVec<VrfInput>,
 		/// Associated protocol transcript.
 		pub transcript: Transcript,
+		/// VRF inputs to be signed.
+		pub inputs: VrfIosVec<VrfInput>,
 	}
 
 	impl VrfSignData {
@@ -468,10 +479,10 @@ pub mod vrf {
 	/// Refer to [`VrfSignData`] for more details.
 	#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
 	pub struct VrfSignature {
-		/// VRF (pre)outputs.
-		pub outputs: VrfIosVec<VrfOutput>,
 		/// Transcript signature.
 		pub signature: Signature,
+		/// VRF (pre)outputs.
+		pub outputs: VrfIosVec<VrfOutput>,
 	}
 
 	#[cfg(feature = "full_crypto")]
@@ -539,7 +550,7 @@ pub mod vrf {
 			let outputs = VrfIosVec::truncate_from(outputs);
 
 			let mut signature =
-				VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_LEN]), outputs };
+				VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_SIZE]), outputs };
 
 			thin_signature
 				.proof
@@ -567,7 +578,7 @@ pub mod vrf {
 			data: &VrfSignData,
 			signature: &VrfSignature,
 		) -> bool {
-			let Ok(public) = PublicKey::deserialize_compressed(self.as_slice()) else {
+			let Ok(public) = PublicKey::deserialize_compressed_unchecked(self.as_slice()) else {
 				return false
 			};
 
@@ -577,10 +588,10 @@ pub mod vrf {
 			// Deserialize only the proof, the rest has already been deserialized
 			// This is another hack used because backend signature type is generic over
 			// the number of ios.
-			let Ok(proof) =
-				ThinVrfSignature::<0>::deserialize_compressed(signature.signature.as_ref())
-					.map(|s| s.proof)
-			else {
+			let Ok(proof) = ThinVrfSignature::<0>::deserialize_compressed_unchecked(
+				signature.signature.as_ref(),
+			)
+			.map(|s| s.proof) else {
 				return false
 			};
 			let signature = ThinVrfSignature { proof, preouts };
@@ -609,16 +620,100 @@ pub mod vrf {
 pub mod ring_vrf {
 	use super::{vrf::*, *};
 	pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG};
-	use bandersnatch_vrfs::{CanonicalDeserialize, PublicKey};
+	use bandersnatch_vrfs::{ring::VerifierKey, CanonicalDeserialize, PublicKey};
+
+	/// Ring max size (keyset max size).
+	pub const RING_MAX_SIZE: u32 = RING_DOMAIN_MAX_SIZE - RING_DOMAIN_OVERHEAD;
+
+	/// Ring domain max size.
+	pub const RING_DOMAIN_MAX_SIZE: u32 = 2048;
+
+	/// Overhead in the domain size over the max ring size.
+	///
+	/// Some bits of the domain are reserved for the zk proof to work.
+	pub(crate) const RING_DOMAIN_OVERHEAD: u32 = 257;
+
+	// Max size of serialized ring-vrf context params.
+	//
+	// The actual size is dependent on the ring domain size and this value
+	// has been computed for `RING_DOMAIN_MAX_SIZE` with compression disabled
+	// for performance reasons.
+	//
+	// 1024 uncompressed
+	// pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295412;
+	// 1024 compressed
+	// pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 147716;
+	// 2048 uncompressed
+	pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 590324;
+	// 2048 compressed
+	// pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295172;
+
+	pub(crate) const RING_VERIFIER_DATA_SERIALIZED_SIZE: usize = 388;
+	pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755;
+
+	/// remove as soon as soon as serialization is implemented by the backend
+	pub struct RingVerifierData {
+		/// Domain size.
+		pub domain_size: u32,
+		/// Verifier key.
+		pub verifier_key: VerifierKey,
+	}
+
+	impl From<RingVerifierData> for RingVerifier {
+		fn from(vd: RingVerifierData) -> RingVerifier {
+			bandersnatch_vrfs::ring::make_ring_verifier(vd.verifier_key, vd.domain_size as usize)
+		}
+	}
+
+	impl Encode for RingVerifierData {
+		fn encode(&self) -> Vec<u8> {
+			const ERR_STR: &str = "serialization length is constant and checked by test; qed";
+			let mut buf = [0; RING_VERIFIER_DATA_SERIALIZED_SIZE];
+			self.domain_size.serialize_compressed(&mut buf[..4]).expect(ERR_STR);
+			self.verifier_key.serialize_compressed(&mut buf[4..]).expect(ERR_STR);
+			buf.encode()
+		}
+	}
+
+	impl Decode for RingVerifierData {
+		fn decode<R: codec::Input>(i: &mut R) -> Result<Self, codec::Error> {
+			const ERR_STR: &str = "serialization length is constant and checked by test; qed";
+			let buf = <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::decode(i)?;
+			let domain_size =
+				<u32 as CanonicalDeserialize>::deserialize_compressed_unchecked(&mut &buf[..4])
+					.expect(ERR_STR);
+			let verifier_key = <bandersnatch_vrfs::ring::VerifierKey as CanonicalDeserialize>::deserialize_compressed_unchecked(&mut &buf[4..]).expect(ERR_STR);
+
+			Ok(RingVerifierData { domain_size, verifier_key })
+		}
+	}
 
-	/// Context used to produce ring signatures.
+	impl EncodeLike for RingVerifierData {}
+
+	impl MaxEncodedLen for RingVerifierData {
+		fn max_encoded_len() -> usize {
+			<[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::max_encoded_len()
+		}
+	}
+
+	impl TypeInfo for RingVerifierData {
+		type Identity = [u8; RING_VERIFIER_DATA_SERIALIZED_SIZE];
+
+		fn type_info() -> scale_info::Type {
+			Self::Identity::type_info()
+		}
+	}
+
+	/// Context used to construct ring prover and verifier.
 	#[derive(Clone)]
 	pub struct RingContext(KZG);
 
 	impl RingContext {
-		/// Build an dummy instance used for testing purposes.
+		/// Build an dummy instance for testing purposes.
+		///
+		/// `domain_size` is set to `RING_DOMAIN_MAX_SIZE`.
 		pub fn new_testing() -> Self {
-			Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32))
+			Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE))
 		}
 
 		/// Get the keyset max size.
@@ -630,7 +725,7 @@ pub mod ring_vrf {
 		pub fn prover(&self, public_keys: &[Public], public_idx: usize) -> Option<RingProver> {
 			let mut pks = Vec::with_capacity(public_keys.len());
 			for public_key in public_keys {
-				let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?;
+				let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?;
 				pks.push(pk.0.into());
 			}
 
@@ -643,7 +738,7 @@ pub mod ring_vrf {
 		pub fn verifier(&self, public_keys: &[Public]) -> Option<RingVerifier> {
 			let mut pks = Vec::with_capacity(public_keys.len());
 			for public_key in public_keys {
-				let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?;
+				let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?;
 				pks.push(pk.0.into());
 			}
 
@@ -651,13 +746,26 @@ pub mod ring_vrf {
 			let ring_verifier = self.0.init_ring_verifier(verifier_key);
 			Some(ring_verifier)
 		}
+
+		/// Information required for a lazy construction of a ring verifier.
+		pub fn verifier_data(&self, public_keys: &[Public]) -> Option<RingVerifierData> {
+			let mut pks = Vec::with_capacity(public_keys.len());
+			for public_key in public_keys {
+				let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?;
+				pks.push(pk.0.into());
+			}
+			Some(RingVerifierData {
+				verifier_key: self.0.verifier_key(pks),
+				domain_size: self.0.domain_size,
+			})
+		}
 	}
 
 	impl Encode for RingContext {
 		fn encode(&self) -> Vec<u8> {
-			let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_LEN]);
+			let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_MAX_SIZE]);
 			self.0
-				.serialize_compressed(buf.as_mut_slice())
+				.serialize_uncompressed(buf.as_mut_slice())
 				.expect("serialization length is constant and checked by test; qed");
 			buf.encode()
 		}
@@ -665,9 +773,9 @@ pub mod ring_vrf {
 
 	impl Decode for RingContext {
 		fn decode<R: codec::Input>(i: &mut R) -> Result<Self, codec::Error> {
-			let buf = <Box<[u8; RING_CONTEXT_SERIALIZED_LEN]>>::decode(i)?;
-			let kzg =
-				KZG::deserialize_compressed(buf.as_slice()).map_err(|_| "KZG decode error")?;
+			let buf = <Box<[u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]>>::decode(i)?;
+			let kzg = KZG::deserialize_uncompressed_unchecked(buf.as_slice())
+				.map_err(|_| "KZG decode error")?;
 			Ok(RingContext(kzg))
 		}
 	}
@@ -676,12 +784,12 @@ pub mod ring_vrf {
 
 	impl MaxEncodedLen for RingContext {
 		fn max_encoded_len() -> usize {
-			<[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len()
+			<[u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]>::max_encoded_len()
 		}
 	}
 
 	impl TypeInfo for RingContext {
-		type Identity = [u8; RING_CONTEXT_SERIALIZED_LEN];
+		type Identity = [u8; RING_CONTEXT_SERIALIZED_MAX_SIZE];
 
 		fn type_info() -> scale_info::Type {
 			Self::Identity::type_info()
@@ -691,10 +799,10 @@ pub mod ring_vrf {
 	/// Ring VRF signature.
 	#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
 	pub struct RingVrfSignature {
+		/// Ring signature.
+		pub signature: [u8; RING_SIGNATURE_SERIALIZED_SIZE],
 		/// VRF (pre)outputs.
 		pub outputs: VrfIosVec<VrfOutput>,
-		/// Ring signature.
-		pub signature: [u8; RING_SIGNATURE_SERIALIZED_LEN],
 	}
 
 	#[cfg(feature = "full_crypto")]
@@ -731,7 +839,7 @@ pub mod ring_vrf {
 			let outputs = VrfIosVec::truncate_from(outputs);
 
 			let mut signature =
-				RingVrfSignature { outputs, signature: [0; RING_SIGNATURE_SERIALIZED_LEN] };
+				RingVrfSignature { outputs, signature: [0; RING_SIGNATURE_SERIALIZED_SIZE] };
 
 			ring_signature
 				.proof
@@ -769,7 +877,7 @@ pub mod ring_vrf {
 			verifier: &RingVerifier,
 		) -> bool {
 			let Ok(vrf_signature) =
-				bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed(
+				bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed_unchecked(
 					self.signature.as_slice(),
 				)
 			else {
@@ -795,7 +903,7 @@ pub mod ring_vrf {
 mod tests {
 	use super::{ring_vrf::*, vrf::*, *};
 	use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE};
-	const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0xcb; SEED_SERIALIZED_LEN];
+	const DEV_SEED: &[u8; SEED_SERIALIZED_SIZE] = &[0xcb; SEED_SERIALIZED_SIZE];
 
 	#[allow(unused)]
 	fn b2h(bytes: &[u8]) -> String {
@@ -808,9 +916,10 @@ mod tests {
 
 	#[test]
 	fn backend_assumptions_sanity_check() {
-		let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32);
-		assert_eq!(kzg.max_keyset_size(), RING_DOMAIN_SIZE - 257);
-		assert_eq!(kzg.compressed_size(), RING_CONTEXT_SERIALIZED_LEN);
+		let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE);
+		assert_eq!(kzg.max_keyset_size() as u32, RING_MAX_SIZE);
+
+		assert_eq!(kzg.uncompressed_size(), RING_CONTEXT_SERIALIZED_MAX_SIZE);
 
 		let pks: Vec<_> = (0..16)
 			.map(|i| SecretKey::from_seed(&[i as u8; 32]).to_public().0.into())
@@ -819,11 +928,14 @@ mod tests {
 		let secret = SecretKey::from_seed(&[0u8; 32]);
 
 		let public = secret.to_public();
-		assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_LEN);
+		assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_SIZE);
 
 		let input = VrfInput::new(b"foo", &[]);
 		let preout = secret.vrf_preout(&input.0);
-		assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_LEN);
+		assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_SIZE);
+
+		let verifier_key = kzg.verifier_key(pks.clone());
+		assert_eq!(verifier_key.compressed_size() + 4, RING_VERIFIER_DATA_SERIALIZED_SIZE);
 
 		let prover_key = kzg.prover_key(pks);
 		let ring_prover = kzg.init_ring_prover(prover_key, 0);
@@ -832,12 +944,12 @@ mod tests {
 
 		let thin_signature: bandersnatch_vrfs::ThinVrfSignature<0> =
 			secret.sign_thin_vrf(data.transcript.clone(), &[]);
-		assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_LEN);
+		assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_SIZE);
 
 		let ring_signature: bandersnatch_vrfs::RingVrfSignature<0> =
 			bandersnatch_vrfs::RingProver { ring_prover: &ring_prover, secret: &secret }
 				.sign_ring_vrf(data.transcript.clone(), &[]);
-		assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_LEN);
+		assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_SIZE);
 	}
 
 	#[test]
@@ -941,7 +1053,8 @@ mod tests {
 
 		let bytes = expected.encode();
 
-		let expected_len = data.inputs.len() * PREOUT_SERIALIZED_LEN + SIGNATURE_SERIALIZED_LEN + 1;
+		let expected_len =
+			data.inputs.len() * PREOUT_SERIALIZED_SIZE + SIGNATURE_SERIALIZED_SIZE + 1;
 		assert_eq!(bytes.len(), expected_len);
 
 		let decoded = VrfSignature::decode(&mut bytes.as_slice()).unwrap();
@@ -1055,7 +1168,7 @@ mod tests {
 		let bytes = expected.encode();
 
 		let expected_len =
-			data.inputs.len() * PREOUT_SERIALIZED_LEN + RING_SIGNATURE_SERIALIZED_LEN + 1;
+			data.inputs.len() * PREOUT_SERIALIZED_SIZE + RING_SIGNATURE_SERIALIZED_SIZE + 1;
 		assert_eq!(bytes.len(), expected_len);
 
 		let decoded = RingVrfSignature::decode(&mut bytes.as_slice()).unwrap();
@@ -1067,11 +1180,31 @@ mod tests {
 		let ctx1 = RingContext::new_testing();
 		let enc1 = ctx1.encode();
 
-		assert_eq!(enc1.len(), RingContext::max_encoded_len());
+		assert_eq!(enc1.len(), RING_CONTEXT_SERIALIZED_MAX_SIZE);
+		assert_eq!(RingContext::max_encoded_len(), RING_CONTEXT_SERIALIZED_MAX_SIZE);
 
 		let ctx2 = RingContext::decode(&mut enc1.as_slice()).unwrap();
 		let enc2 = ctx2.encode();
 
 		assert_eq!(enc1, enc2);
 	}
+
+	#[test]
+	fn encode_decode_verifier_data() {
+		let ring_ctx = RingContext::new_testing();
+
+		let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect();
+		assert!(pks.len() <= ring_ctx.max_keyset_size());
+
+		let verifier_data = ring_ctx.verifier_data(&pks).unwrap();
+		let enc1 = verifier_data.encode();
+
+		assert_eq!(enc1.len(), RING_VERIFIER_DATA_SERIALIZED_SIZE);
+		assert_eq!(RingVerifierData::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE);
+
+		let vd2 = RingVerifierData::decode(&mut enc1.as_slice()).unwrap();
+		let enc2 = vd2.encode();
+
+		assert_eq!(enc1, enc2);
+	}
 }