From 43be64f2f79d007f2e352c2bc7810a8e0619f210 Mon Sep 17 00:00:00 2001
From: Robert Habermeier <rphmeier@gmail.com>
Date: Fri, 30 Oct 2020 22:07:52 -0500
Subject: [PATCH] Implementer's guide: Approval Voting Subsystem (#1691)

* add storage for approvals module

* basics of approval logic

* fix links

* Session info module

* create an approvals_inherent module

* integrate approvals module with inclusion

* Remove approvals runtime

* tweak mentions of on-chain logic

* add note on finality-grandpa voting rule

* elaborate on node-side components

* stub for availability recovery

* add another note on voting rule

* Beginnings of approval subsystems

* flesh out approval voting now

* logic for checking assignment certs

* initial scheduler logic

* scheduler logic

* adjst tranche taking logic

* approval voting import

* approval work (voting side)

* amend some TODOs

* mark some TODOs

* describe `ApprovedAncestor`

* reference protocol-approval.md

* clarity on bitfield

* remove approvals_inherent

* tweak session_info module according to review

* formatting & nits

Co-authored-by: Robert Habermeier <robert@Roberts-MacBook-Pro.local>
---
 .../roadmap/implementers-guide/src/SUMMARY.md |   8 +
 .../implementers-guide/src/node/README.md     |   9 +-
 .../src/node/approval/README.md               |   7 +
 .../src/node/approval/approval-networking.md  |   7 +
 .../src/node/approval/approval-voting.md      | 247 ++++++++++++++++++
 .../node/approval/dispute-participation.md    |   5 +
 .../availability/availability-recovery.md     |  22 ++
 .../src/protocol-approval.md                  |  16 +-
 .../src/runtime/inclusion.md                  |   1 -
 .../src/runtime/session_info.md               |  55 ++++
 .../implementers-guide/src/types/approval.md  | 128 +++++++++
 .../src/types/overseer-protocol.md            |  74 ++++++
 .../implementers-guide/src/types/runtime.md   |  16 +-
 13 files changed, 586 insertions(+), 9 deletions(-)
 create mode 100644 polkadot/roadmap/implementers-guide/src/node/approval/README.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/node/approval/approval-networking.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/node/approval/dispute-participation.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/runtime/session_info.md
 create mode 100644 polkadot/roadmap/implementers-guide/src/types/approval.md

diff --git a/polkadot/roadmap/implementers-guide/src/SUMMARY.md b/polkadot/roadmap/implementers-guide/src/SUMMARY.md
index d51f26f71ea..f37fc08f964 100644
--- a/polkadot/roadmap/implementers-guide/src/SUMMARY.md
+++ b/polkadot/roadmap/implementers-guide/src/SUMMARY.md
@@ -8,6 +8,7 @@
 - [Architecture Overview](architecture.md)
   - [Messaging Overview](messaging.md)
 - [Runtime Architecture](runtime/README.md)
+  - [ApprovalsInherent Module](runtime/approvals_inherent.md)
   - [Initializer Module](runtime/initializer.md)
   - [Configuration Module](runtime/configuration.md)
   - [Disputes Module](runtime/disputes.md)
@@ -16,6 +17,7 @@
   - [Inclusion Module](runtime/inclusion.md)
   - [InclusionInherent Module](runtime/inclusioninherent.md)
   - [Router Module](runtime/router.md)
+  - [Session Info Module](runtime/session_info.md)
 - [Runtime APIs](runtime-api/README.md)
   - [Validators](runtime-api/validators.md)
   - [Validator Groups](runtime-api/validator-groups.md)
@@ -39,8 +41,13 @@
     - [PoV Distribution](node/backing/pov-distribution.md)
   - [Availability Subsystems](node/availability/README.md)
     - [Availability Distribution](node/availability/availability-distribution.md)
+    - [Availability Recovery](node/availability/availability-recovery.md)
     - [Bitfield Distribution](node/availability/bitfield-distribution.md)
     - [Bitfield Signing](node/availability/bitfield-signing.md)
+  - [Approval Subsystems](node/approval/README.md)
+    - [Approval Voting](node/approval/approval-voting.md)
+    - [Approval Networking](node/approval/approval-networking.md)
+    - [Dispute Participation](node/approval/dispute-participation.md)
   - [Utility Subsystems](node/utility/README.md)
     - [Availability Store](node/utility/availability-store.md)
     - [Candidate Validation](node/utility/candidate-validation.md)
@@ -59,6 +66,7 @@
     - [Chain](types/chain.md)
     - [Messages](types/messages.md)
     - [Network](types/network.md)
+    - [Approvals](types/approval.md)
 
 [Glossary](glossary.md)
 [Further Reading](further-reading.md)
diff --git a/polkadot/roadmap/implementers-guide/src/node/README.md b/polkadot/roadmap/implementers-guide/src/node/README.md
index f6d7e7a887f..44eeb4bf977 100644
--- a/polkadot/roadmap/implementers-guide/src/node/README.md
+++ b/polkadot/roadmap/implementers-guide/src/node/README.md
@@ -10,7 +10,14 @@ The architecture of the node-side behavior aims to embody the Rust principles of
 
 Many operations that need to be carried out involve the network, which is asynchronous. This asynchrony affects all core subsystems that rely on the network as well. The approach of hierarchical state machines is well-suited to this kind of environment.
 
-We introduce a hierarchy of state machines consisting of an overseer supervising subsystems, where Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems.
+We introduce 
+
+## Components
+
+The node architecture consists of the following components:
+  * The Overseer (and subsystems): A hierarchy of state machines where an overseer supervises subsystems. Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems.
+  * A block proposer: Logic triggered by the consensus algorithm of the chain when the node should author a block.
+  * A GRANDPA voting rule: A strategy for selecting chains to vote on in the GRANDPA algorithm to ensure that only valid parachain candidates appear in finalized relay-chain blocks.
 
 ## Assumptions
 
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/README.md b/polkadot/roadmap/implementers-guide/src/node/approval/README.md
new file mode 100644
index 00000000000..41ba527f1b6
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/README.md
@@ -0,0 +1,7 @@
+# Approval Subsystems
+
+The approval subsystems implement the node-side of the [Approval Protocol](../../protocol-approval.md).
+
+We make a divide between the [assignment/voting logic](approval-voting.md) and the [networking](approval-networking.md) that distributes assignment certifications and approval votes. The logic in the assignment and voting also informs the GRANDPA voting rule on how to vote.
+
+This category of subsystems also contains a module for [participating in live disputes](dispute-participation.md) and tracks all observed votes (backing or approval) by all validators on all candidates.
\ No newline at end of file
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-networking.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-networking.md
new file mode 100644
index 00000000000..558d4447c95
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-networking.md
@@ -0,0 +1,7 @@
+# Approval Networking
+
+> TODO: <https://github.com/paritytech/polkadot/issues/1603>
+
+## Protocol
+
+## Functionality
\ No newline at end of file
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
new file mode 100644
index 00000000000..4bafdd9e7ed
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
@@ -0,0 +1,247 @@
+# Approval Voting
+
+Reading the [section on the approval protocol](../../protocol-approval.md) will likely be necessary to understand the aims of this subsystem.
+
+## Protocol
+
+Input:
+  - `ApprovalVotingMessage::CheckAndImportAssignment`
+  - `ApprovalVotingMessage::CheckAndImportApproval`
+  - `ApprovalVotingMessage::ApprovedAncestor`
+
+Output:
+  - `ApprovalNetworkingMessage::DistributeAssignment`
+  - `ApprovalNetworkingMessage::DistributeApproval`
+  - `RuntimeApiMessage::Request`
+  - `ChainApiMessage`
+  - `AvailabilityRecoveryMessage::Recover`
+  - `CandidateExecutionMessage::ValidateFromExhaustive`
+
+## Functionality
+
+The approval voting subsystem is responsible for casting votes and determining approval of candidates and as a result, blocks.
+
+This subsystem wraps a database which is used to store metadata about unfinalized blocks and the candidates within them. Candidates may appear in multiple blocks, and assignment criteria are chosen differently based on the hash of the block they appear in.
+
+## Database Schema
+
+The database schema is designed with the following goals in mind:
+  1. To provide an easy index from unfinalized blocks to candidates
+  1. To provide a lookup from candidate hash to approval status
+  1. To be easy to clear on start-up. What has happened while we were offline is unimportant.
+  1. To be fast to clear entries outdated by finality
+
+Structs:
+
+```rust
+struct TrancheEntry {
+    tranche: DelayTranche,
+    // assigned validators who have not yet approved, and the instant we received
+    // their assignment.
+    assignments: Vec<(ValidatorIndex, Tick)>,
+}
+
+struct OurAssignment {
+  cert: AssignmentCert,
+  tranche: DelayTranche,
+  validator_index: ValidatorIndex,
+  triggered: bool,
+}
+
+struct ApprovalEntry {
+    tranches: Vec<TrancheEntry>, // sorted ascending by tranche number.
+    backing_group: GroupIndex,
+    // When the next wakeup for this entry should occur. This is either to
+    // check a no-show or to check if we need to broadcast an assignment.
+    next_wakeup: Tick,
+    our_assignment: Option<OurAssignment>,
+    assignments: Bitfield, // n_validators bits
+    approved: bool,
+}
+
+struct CandidateEntry {
+    candidate: CandidateReceipt,
+    session: SessionIndex,
+    // Assignments are based on blocks, so we need to track assignments separately
+    // based on the block we are looking at.
+    block_assignments: HashMap<Hash, ApprovalEntry>,
+    approvals: Bitfield, // n_validators bits
+}
+
+struct BlockEntry {
+    block_hash: Hash,
+    session: SessionIndex,
+    slot: SlotNumber,
+    received_late_by: Duration,
+    // random bytes derived from the VRF submitted within the block by the block
+    // author as a credential and used as input to approval assignment criteria.
+    relay_vrf_story: [u8; 32],
+    // The candidates included as-of this block and the index of the core they are
+    // leaving. Sorted ascending by core index.
+    candidates: Vec<(CoreIndex, Hash)>,
+    // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
+    // The i'th bit is `true` iff the candidate has been approved in the context of
+    // this block. The block can be considered approved has all bits set to 1
+    approved_bitfield: Bitfield,
+    rotation_offset: GroupIndex,
+    children: Vec<Hash>,
+}
+
+// slot_duration * 2 + DelayTranche gives the number of delay tranches since the
+// unix epoch.
+type Tick = u64;
+
+struct TrackerEntry 
+
+struct StoredBlockRange(BlockNumber, BlockNumber)
+```
+
+In the schema, we map
+
+```
+"StoredBlocks" => StoredBlockRange
+BlockNumber => Vec<BlockHash>
+BlockHash => BlockEntry
+CandidateHash => CandidateEntry
+```
+
+## Logic
+
+In-memory state:
+
+```rust
+struct ApprovalVoteRequest {
+  validator_index: ValidatorIndex,
+  block_hash: Hash,
+  candidate_index: u32,
+}
+
+struct State {
+    earliest_session: SessionIndex,
+    session_info: Vec<SessionInfo>,
+    keystore: KeyStorePtr,
+    wakeups: BTreeMap<Tick, Vec<(Hash, Hash)>>, // Tick -> [(Relay Block, Candidate Hash)]
+    
+    // These are connected to each other.
+    approval_vote_tx: mpsc::Sender<ApprovalVoteRequest>,
+    approval_vote_rx: mpsc::Receiver<ApprovalVoteRequest>,
+}
+```
+
+[`SessionInfo`](../../runtime/session_info.md)
+
+On start-up, we clear everything currently stored by the database. This is done by loading the `StoredBlockRange`, iterating through each block number, iterating through each block hash, and iterating through each candidate referenced by each block. Although this is `O(o*n*p)`, we don't expect to have more than a few unfinalized blocks at any time and in extreme cases, a few thousand. The clearing operation should be relatively fast as a result.
+
+Main loop:
+  * Each iteration, select over all of
+    * The next `Tick` in `wakeups`: trigger `wakeup_process` for each `(Hash, Hash)` pair scheduled under the `Tick` and then remove all entries under the `Tick`.
+    * The next message from the overseer: handle the message as described in the [Incoming Messages section](#incoming-messages)
+    * The next request from `approval_vote_rx`: handle with `issue_approval`
+
+### Incoming Messages
+
+#### `OverseerSignal::BlockFinalized`
+
+On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the ChainApi subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendents of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`.
+
+
+#### `OverseerSignal::ActiveLeavesUpdate`
+
+On receiving an `OverseerSignal::ActiveLeavesUpdate(update)`:
+  * We determine the set of new blocks that were not in our previous view. This is done by querying the ancestry of all new items in the view and contrasting against the stored `BlockNumber`s. Typically, there will be only one new block. We fetch the headers and information on these blocks from the ChainApi subsystem. 
+  * We update the `StoredBlockRange` and the `BlockNumber` maps. We use the RuntimeApiSubsystem to determine the set of candidates included in these blocks and use BABE logic to determine the slot number and VRF of the blocks. 
+  * We also note how late we appear to have received the block. We create a `BlockEntry` for each block and a `CandidateEntry` for each candidate obtained from `CandidateIncluded` events after making a `RuntimeApiRequest::CandidateEvents` request.
+  * Ensure that the `CandidateEntry` contains a `block_assignments` entry for the block, with the correct backing group set.
+  * If a validator in this session, compute and assign `our_assignment` for the `block_assignments`
+    * Only if not a member of the backing group.
+    * Run `RelayVRFModulo` and `RelayVRFDelay` according to the [the approvals protocol section](../../protocol-approval.md#assignment-criteria)
+  * invoke `process_wakeup(relay_block, candidate)` for each new candidate in each new block - this will automatically broadcast a 0-tranche assignment, kick off approval work, and schedule the next delay.
+
+#### `ApprovalVotingMessage::CheckAndImportAssignment`
+
+On receiving a `ApprovalVotingMessage::CheckAndImportAssignment` message, we check the assignment cert against the block entry. The cert itself contains information necessary to determine the candidate that is being assigned-to. In detail:
+  * Load the `BlockEntry` for the relay-parent referenced by the message. If there is none, return `VoteCheckResult::Report`.
+  * Fetch the `SessionInfo` for the session of the block
+  * Determine the assignment key of the validator based on that.
+  * Check the assignment cert
+    * If the cert kind is `RelayVRFModulo`, then the certificate is valid as long as `sample < session_info.relay_vrf_samples` and the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ sample.encode()` as described with [the approvals protocol section](../../protocol-approval.md#assignment-criteria). We set `core_index = vrf.make_bytes().to_u32() % session_info.n_cores`. If the `BlockEntry` causes inclusion of a candidate at `core_index`, then this is a valid assignment for the candidate at `core_index` and has delay tranche 0. Otherwise, it can be ignored.
+    * If the cert kind is `RelayVRFDelay`, then we check if the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ cert.core_index.encode()` as described in [the approvals protocol section](../../protocol-approval.md#assignment-criteria). The cert can be ignored if the block did not cause inclusion of a candidate on that core index. Otherwise, this is a valid assignment for the included candidate. The delay tranche for the assignment is determined by reducing `(vrf.make_bytes().to_u64() % (session_info.n_delay_tranches + session_info.zeroth_delay_tranche_width)).saturating_sub(session_info.zeroth_delay_tranche_width)`.
+    * `import_checked_assignment`
+    * return the appropriate `VoteCheckResult` on the response channel.
+
+#### `ApprovalVotingMessage::CheckAndImportApproval`
+
+On receiving a `CheckAndImportApproval(indirect_approval_vote, response_channel)` message:
+  * Fetch the `BlockEntry` from the indirect approval vote's `block_hash`. If none, return `VoteCheckResult::Bad`.
+  * Fetch the `CandidateEntry` from the indirect approval vote's `candidate_index`. If the block did not trigger inclusion of enough candidates, return `VoteCheckResult::Bad`.
+  * Construct a `SignedApprovalVote` using the candidate hash and check against the validator's approval key, based on the session info of the block. If invalid or no such validator, return `VoteCheckResult::Bad`.
+  * Send `VoteCheckResult::Accepted`,
+  * `import_checked_approval(BlockEntry, CandidateEntry, ValidatorIndex)`
+
+#### `ApprovalVotingMessage::ApprovedAncestor`
+
+On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`:
+  * Iterate over the ancestry of the hash all the way back to block number given, starting from the provided block hash.
+  * Keep track of an `all_approved_max: Option<Hash>`.
+  * For each block hash encountered, load the `BlockEntry` associated. If any are not found, return `None` on the response channel and conclude.
+  * If the block entry's `approval_bitfield` has all bits set to 1 and `all_approved_max == None`, set `all_approved_max = Some(current_hash)`.
+  * If the block entry's `approval_bitfield` has any 0 bits, set `all_approved_max = None`.
+  * After iterating all ancestry, return `all_approved_max`.
+
+### Utility
+
+#### `import_checked_assignment`
+  * Load the candidate in question and access the `approval_entry` for the block hash the cert references.
+  * Ensure the validator index is not part of the backing group for the candidate.
+  * Ensure the validator index is not present in the approval entry already.
+  * Create a tranche entry for the delay tranche in the approval entry and note the assignment within it.
+  * Note the candidate index within the approval entry.
+
+#### `import_checked_approval(BlockEntry, CandidateEntry, ValidatorIndex)`
+  * Set the corresponding bit of the `approvals` bitfield in the `CandidateEntry` to `1`.
+  * For each `ApprovalEntry` in the `CandidateEntry` (typically only 1), check whether the validator is assigned as a checker.
+    * If so, set `n_tranches = tranches_to_approve(approval_entry)`.
+    * If `check_approval(block_entry, approval_entry, n_tranches)` is true, set the corresponding bit in the `block_entry.approved_bitfield`.
+
+#### `tranches_to_approve(approval_entry) -> tranches`
+  * Determine the amount of tranches `n_tranches` our view of the protocol requires of this approval entry
+    * First, take tranches until we have at least `session_info.needed_approvals`. Call the number of tranches taken `k`
+    * Then, count no-shows in tranches `0..k`. For each no-show, we require another checker. Take new tranches until each no-show is covered, so now we've taken `l` tranches. e.g. if there are 2 no-shows, we might only need to take 1 additional tranche with >= 2 assignments. Or we might need to take 3 tranches, where one is empty and the other two have 1 assignment each.
+    * Count no-shows in tranches `k..l` and for each of those, take tranches until all no-shows are covered. Repeat so on until either
+      * We run out of tranches to take, having not received any assignments past a certain point. In this case we set `n_tranches` to a special value `ALL` which indicates that new assignments are needed.
+      * All no-shows are covered. Set `n_tranches` to the number of tranches taken
+    * return `n_tranches`
+
+#### `check_approval(block_entry, approval_entry, n_tranches) -> bool`
+  * If `n_tranches` is ALL, return false
+  * Otherwise, if all validators in `n_tranches` have approved, return `true`. If any validator in these tranches has not yet approved but is not yet considered a no-show, return `false`.
+
+#### `process_wakeup(relay_block, candidate_hash)`
+  * Load the `BlockEntry` and `CandidateEntry` from disk. If either is not present, this may have lost a race with finality and can be ignored. Also load the `ApprovalEntry` for the block and candidate.
+  * Set `n_tranches = tranches_to_approve(approval_entry)`
+  * If `OurAssignment` has tranche `<= n_tranches`, the tranche is live according to our local clock (based against block slot), and we have not triggered the assignment already
+    * Import to `ApprovalEntry`
+    * Broadcast on network with an `ApprovalNetworkingMessage::DistributeAssignment`.
+    * Kick off approval work with `launch_approval`
+  * Schedule another wakeup based on `next_wakeup`
+
+#### `next_wakeup(approval_entry, candidate_entry)`:
+  * Return the earlier of our next no-show timeout or the tranche of our assignment, if not yet triggered
+  * Our next no-show timeout is computed by finding the earliest-received assignment within `n_tranches` for which we have not received an approval and adding `to_ticks(session_info.no_show_slots)` to it.
+
+#### `launch_approval(SessionIndex, CandidateDescriptor, ValidatorIndex, block_hash, candidate_index)`:
+  * Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session.
+  * Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, response_sender)`
+  * Load the historical validation code of the parachain (TODO: https://github.com/paritytech/polkadot/issues/1877)
+  * Spawn a background task with a clone of `approval_vote_tx`
+    * Wait for the available data
+    * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message
+    * Wait for the result of validation
+    * If valid, issue a message on `approval_vote_tx` detailing the request.
+
+#### `issue_approval(request)`:
+  * Fetch the block entry and candidate entry. Ignore if `None` - we've probably just lost a race with finality.
+  * Construct a `SignedApprovalVote` with the validator index for the session.
+  * Transform into an `IndirectSignedApprovalVote` using the `block_hash` and `candidate_index` from the request.
+  * `import_checked_approval(block_entry, candidate_entry, validator_index)`
+  * Dispatch an `ApprovalNetworkingMessage::DistributeApproval` message.
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/dispute-participation.md b/polkadot/roadmap/implementers-guide/src/node/approval/dispute-participation.md
new file mode 100644
index 00000000000..10c278c20df
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/dispute-participation.md
@@ -0,0 +1,5 @@
+# Dispute Participation
+
+## Protocol
+
+## Functionality
\ No newline at end of file
diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md
new file mode 100644
index 00000000000..e4a23e0c00c
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md
@@ -0,0 +1,22 @@
+# Availability Recovery
+
+> TODO: <https://github.com/paritytech/polkadot/issues/1597>
+
+This subsystem is the inverse of the [Availability Distribution](availability-distribution.md) subsystem: validators will serve the availability chunks kept in the availability store to nodes who connect to them. And the subsystem will also implement the other side: the logic for nodes to connect to validators, request availability pieces, and reconstruct the `AvailableData`.
+
+## Protocol
+
+`PeerSet`: `Validation`
+
+Input:
+
+- NetworkBridgeUpdateV1(update)
+- TODO: input message to request a fetch.
+
+Output:
+
+- NetworkBridge::SendValidationMessage
+- NetworkBridge::ReportPeer
+- AvailabilityStore::QueryChunk
+
+## Functionality
diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
index 189a2d8e8c3..828724916a3 100644
--- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md
+++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
@@ -28,7 +28,7 @@ Approval has roughly two parts:
 
 - **Approval checks** listens to the assignments subsystem for outgoing assignment notices that we shall check specific candidates.  It then performs these checks by first invoking the reconstruction subsystem to obtain the candidate, second invoking the candidate validity utility subsystem upon the candidate, and finally sending out an approval vote, or perhaps initiating a dispute.
 
-These both run first as off-chain consensus protocols using messages gossiped among all validators, and second as an on-chain record of this off-chain protocols' progress after the fact.  We need the on-chain protocol to provide rewards for the on-chain protocol, and doing an on-chain protocol simplify interaction with GRANDPA.  
+These both run first as off-chain consensus protocols using messages gossiped among all validators, and second as an on-chain record of this off-chain protocols' progress after the fact.  We need the on-chain protocol to provide rewards for the off-chain protocol.
 
 Approval requires two gossiped message types, assignment notices created by its assignments subsystem, and approval votes sent by our approval checks subsystem when authorized by the candidate validity utility subsystem.  
 
@@ -102,11 +102,11 @@ Assignment criteria come in three flavors, `RelayVRFModulo`, `RelayVRFDelay` and
 
 Among these, we have two distinct VRF output computations:
 
-`RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number.  It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core.  We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block.  We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly.  All successful `RelayVRFModulo` samples are assigned delay tranche zero.
+`RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number.  It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core.  We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block.  We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly.  All successful `RelayVRFModulo` samples are assigned delay tranche zero.
 
 There is no sampling process for `RelayVRFDelay` and `RelayEquivocation`.  We instead run them on specific candidates and they compute a delay from their VRF output.  `RelayVRFDelay` runs for all candidates included under, aka declared available by, a relay chain block, and inputs the associated VRF output via `RelayVRFStory`.  `RelayEquivocation` runs only on candidate block equivocations, and inputs their block hashes via the `RelayEquivocation` story.
 
-`RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0.  In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche.
+`RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0.  In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche.
 
 As future work (or TODO?), we should merge assignment notices with the same delay and story using `vrf_merge`.  We cannot merge those with the same delay and different stories because `RelayEquivocationStory`s could change but `RelayVRFStory` never changes. 
 
@@ -152,7 +152,7 @@ TODO: When?  Is this optimal for the network?  etc.
 
 ## On-chain verification
 
-We should verify approval on-chain to reward approval checkers and to simplify integration with GRANDPA.  We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain, which helps with both these goals.
+We should verify approval on-chain to reward approval checkers. We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain, which helps with this goal. The major challenge with an on-chain record of the off-chain process is adversarial block producers who may either censor votes or publish votes to the chain which cause other votes to be ignored and unrewards (reward stealing).
 
 In principle, all validators have some "tranche" at which they're assigned to the parachain candidate, which ensures we reach enough validators eventually.  As noted above, we often retract "no shows" when the slow validator eventually shows up, so witnessing their initially being a "no show" helps manage rewards.
 
@@ -186,6 +186,14 @@ Any validator could send their assignment notices and/or approval votes too earl
 
 Assignment notices being gossiped too early might create a denial of service vector.  If so, we might exploit the relative time scheme that synchronises our clocks, which conceivably permits just dropping excessively early assignments. 
 
+## Finality GRANDPA Voting Rule
+
+The relay-chain requires validators to participate in GRANDPA. In GRANDPA, validators submit off-chain votes on what they believe to be the best block of the chain, and GRANDPA determines the common block contained by a supermajority of sub-chains. There are also additional constraints on what can be submitted based on results of previous rounds of voting.
+
+In order to avoid finalizing anything which has not received enough approval votes or is disputed, we will pair the approval protocol with an alteration to the GRANDPA voting strategy for honest nodes which causes them to vote only on chains where every parachain candidate within has been approved.  Furthermore, the voting rule prevents voting for chains where there is any live dispute or any dispute has resolved to a candidate being invalid.
+
+Thus, the finalized relay-chain should contain only relay-chain blocks where a majority believe that every block within has been sufficiently approved.
+
 ### Future work
 
 We could consider additional gossip messages with which nodes claims "slow availability" and/or "slow candidate" to fine tune the assignments "no show" system, but long enough "no show" delays suffice probably.
diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md
index 17dbdc94cc0..46984e276d7 100644
--- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md
+++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md
@@ -56,7 +56,6 @@ All failed checks should lead to an unrecoverable error making the block invalid
   1. apply each bit of bitfield to the corresponding pending candidate. looking up parathread cores using the `core_lookup`. Disregard bitfields that have a `1` bit for any free cores.
   1. For each applied bit of each availability-bitfield, set the bit for the validator in the `CandidatePendingAvailability`'s `availability_votes` bitfield. Track all candidates that now have >2/3 of bits set in their `availability_votes`. These candidates are now available and can be enacted.
   1. For all now-available candidates, invoke the `enact_candidate` routine with the candidate and relay-parent number.
-  1. > TODO: pass it onwards to `Validity` module.
   1. Return a list of freed cores consisting of the cores where candidates have become available.
 * `process_candidates(BackedCandidates, scheduled: Vec<CoreAssignment>, group_validators: Fn(GroupIndex) -> Option<Vec<ValidatorIndex>>)`:
   1. check that each candidate corresponds to a scheduled core and that they are ordered in the same order the cores appear in assignments in `scheduled`.
diff --git a/polkadot/roadmap/implementers-guide/src/runtime/session_info.md b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
new file mode 100644
index 00000000000..697e79fab51
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
@@ -0,0 +1,55 @@
+# Session Info
+
+For disputes and approvals, we need access to information about validator sets from prior sessions. We also often want easy access to the same information about the current session's validator set. This module aggregates and stores this information in a rolling window while providing easy APIs for access.
+
+## Storage
+
+Helper structs:
+
+```rust
+struct SessionInfo {
+    // validators in canonical ordering.
+    validators: Vec<ValidatorId>,
+    // validators' authority discovery keys for the session in canonical ordering.
+    discovery_keys: Vec<DiscoveryId>,
+    // The assignment and approval keys for validators.
+    approval_keys: Vec<(AssignmentId, ApprovalId)>,
+    // validators in shuffled ordering - these are the validator groups as produced
+    // by the `Scheduler` module for the session and are typically referred to by
+    // `GroupIndex`.
+    validator_groups: Vec<Vec<ValidatorIndex>>,
+    // The number of availability cores used by the protocol during this session.
+    n_cores: u32,
+    // the zeroth delay tranche width.
+    zeroth_delay_tranche_width: u32,
+    // The number of samples we do of relay_vrf_modulo.
+    relay_vrf_modulo_samples: u32,
+    // The number of delay tranches in total.
+    n_delay_tranches: u32,
+    // How many slots (BABE / SASSAFRAS) must pass before an assignment is considered a
+    // no-show.
+    no_show_slots: u32,
+    /// The number of validators needed to approve a block.
+	needed_approvals: u32,
+}
+```
+
+Storage Layout: 
+
+```rust
+/// The earliest session for which previous session info is stored.
+EarliestStoredSession: SessionIndex,
+/// Session information. Should have an entry from `EarliestStoredSession..=CurrentSessionIndex`
+Sessions: map SessionIndex => Option<SessionInfo>,
+```
+
+## Session Change
+
+1. Update the `CurrentSessionIndex`.
+1. Update `EarliestStoredSession` based on `config.dispute_period` and remove all entries from `Sessions` from the previous value up to the new value.
+1. Create a new entry in `Sessions` with information about the current session.
+
+## Routines
+
+* `earliest_stored_session() -> SessionIndex`: Yields the earliest session for which we have information stored.
+* `session_info(session: SessionIndex) -> Option<SessionInfo>`: Yields the session info for the given session, if stored.
\ No newline at end of file
diff --git a/polkadot/roadmap/implementers-guide/src/types/approval.md b/polkadot/roadmap/implementers-guide/src/types/approval.md
new file mode 100644
index 00000000000..1f44dc43ffd
--- /dev/null
+++ b/polkadot/roadmap/implementers-guide/src/types/approval.md
@@ -0,0 +1,128 @@
+# Approval Types
+
+## ApprovalId
+
+The public key of a keypair used by a validator for approval voting.
+
+## AssignmentId
+
+The private key of a keypair used by a validator for approval voting.
+
+## AssignmentCert
+
+An `AssignmentCert`, short for Assignment Certificate, is a piece of data provided by a validator to prove that they have been selected to perform secondary approval checks on an included candidate.
+
+These certificates can be checked in the context of a specific block, candidate, and validator assignment VRF key. The block state will also provide further context about the availability core states at that block.
+
+```rust
+enum AssignmentCertKind {
+    RelayVRFModulo {
+        relay_vrf: (VRFInOut, VRFProof),
+        sample: u32,
+    },
+    RelayVRFDelay {
+        relay_vrf: (VRFInOut, VRFProof),
+        core_index: CoreIndex,
+    }
+}
+
+struct AssignmentCert {
+    // The criterion which is claimed to be met by this cert.
+    kind: AssignmentCertKind,
+    // The VRF showing the criterion is met.
+    vrf: VRFInOut,
+}
+```
+
+> TODO: RelayEquivocation cert. Probably can only be broadcast to chains that have handled an equivocation report.
+
+## ApprovalVote
+
+A vote of approval on a candidate.
+
+```rust
+struct ApprovalVote(Hash);
+```
+
+## SignedApprovalVote
+
+```rust
+struct SignedApprovalVote {
+    vote: ApprovalVote,
+    validator: ValidatorIndex,
+    signature: ApprovalSignature,
+}
+```
+
+## IndirectSignedApprovalVote
+
+A signed approval vote which references the candidate indirectly via the block. If there exists a look-up to the candidate hash from the block hash and candidate index, then this can be transformed into a `SignedApprovalVote`.
+
+Although this vote references the candidate by a specific block hash and candidate index, the vote actually applies to
+
+```rust
+struct IndirectSignedApprovalVote {
+    // A block hash where the candidate appears.
+    block_hash: Hash,
+    // The index of the candidate in the list of candidates fully included as-of the block.
+    candidate_index: u32,
+    validator: ValidatorIndex,
+    signature: ApprovalSignature,
+}
+```
+
+## CheckedAssignmentCert
+
+An assignment cert which has checked both the VRF and the validity of the implied assignment according to the selection criteria rules of the protocol. This type should be declared in such a way as to be instantiable only when the checks have actually been done. Fields should be accessible via getters, not direct struct access.
+
+```rust
+struct CheckedAssignmentCert {
+    cert: AssignmentCert,
+    validator: ValidatorIndex,
+    relay_block: Hash,
+    candidate_hash: Hash,
+    delay_tranche: DelayTranche,
+}
+```
+
+## DelayTranche
+
+```rust
+type DelayTranche = u32;
+```
+
+## RelayVRFStory
+
+Assignment criteria are based off of possible stories about the relay-chain block that included the candidate. More information on stories is available in [the informational page on approvals.](../protocol-approval.md#stories).
+
+```rust
+/// A story based on the VRF that authorized the relay-chain block where the candidate was
+/// included.
+///
+/// VRF Context is "A&V RC-VRF"
+struct RelayVRFStory(VRFInOut);
+```
+
+## RelayEquivocationStory
+
+```rust
+/// A story based on the candidate hash itself. Should be used when a candidate is an
+/// equivocation: when there are two relay-chain blocks with the same RelayVRFStory, but only
+/// one contains the candidate.
+///
+/// VRF Context is "A&V RC-EQUIV"
+struct RelayEquivocationStory(Hash);
+```
+
+## ExecutionTimePair
+
+```rust
+struct ExecutionTimePair {
+    // The absolute time in milliseconds that the validator claims to have taken
+    // with the block.
+    absolute: u32,
+    // The validator's believed ratio in execution time to the average, expressed as a fixed-point
+    // 16-bit unsigned integer with 8 bits before and after the point.
+    ratio: FixedU16,
+}
+```
\ No newline at end of file
diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
index d25553e03aa..a6561ca661d 100644
--- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
+++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
@@ -10,6 +10,8 @@ Signals from the overseer to a subsystem to request change in execution that has
 enum OverseerSignal {
   /// Signal about a change in active leaves.
   ActiveLeavesUpdate(ActiveLeavesUpdate),
+  /// Signal about a new best finalized block.
+  BlockFinalized(Hash),
   /// Conclude all operation.
   Conclude,
 }
@@ -33,6 +35,63 @@ struct ActiveLeavesUpdate {
 }
 ```
 
+## Approval Voting
+
+Messages received by the approval voting subsystem.
+
+```rust
+enum VoteCheckResult {
+	// The vote was accepted and should be propagated onwards.
+	Accepted,
+	// The vote was bad and should be ignored, reporting the peer who propagated it.
+	Bad,
+	// We do not have enough information to evaluate the vote. Ignore but don't report.
+	// This should occur primarily on startup.
+	Ignore,
+}
+
+enum ApprovalVotingMessage {
+	/// Check if the assignment is valid and can be accepted by our view of the protocol.
+	/// Should not be sent unless the block hash is known.
+	CheckAndImportAssignment(
+		Hash, 
+		AssignmentCert, 
+		ValidatorIndex,
+		ResponseChannel<VoteCheckResult>,
+	),
+	/// Check if the approval vote is valid and can be accepted by our view of the
+	/// protocol.
+	///
+	/// Should not be sent unless the block hash within the indirect vote is known.
+	CheckAndImportApproval(
+		IndirectSignedApprovalVote,
+		ResponseChannel<VoteCheckResult>,
+	),
+	/// Returns the highest possible ancestor hash of the provided block hash which is
+	/// acceptable to vote on finality for. 
+	/// The `BlockNumber` provided is the number of the block's ancestor which is the
+	/// earliest possible vote.
+	/// 
+	/// It can also return the same block hash, if that is acceptable to vote upon. 
+	/// Return `None` if the input hash is unrecognized.
+	ApprovedAncestor(Hash, BlockNumber, ResponseChannel<Option<Hash>>),
+}
+```
+
+## Approval Networking
+
+Messages received by the approval networking subsystem.
+
+```rust
+enum ApprovalNetworkingMessage {
+	/// Distribute an assignment cert from the local validator. The cert is assumed
+	/// to be valid for the given relay-parent and validator index.
+	DistributeAssignment(Hash, AssignmentCert, ValidatorIndex),
+	/// Distribute an approval vote for the local validator.
+	DistributeApproval(IndirectApprovalVote),
+}
+```
+
 ## All Messages
 
 > TODO (now)
@@ -55,6 +114,21 @@ enum AvailabilityDistributionMessage {
 }
 ```
 
+## Availability Recovery Message
+
+Messages received by the availability recovery subsystem.
+
+```rust
+enum AvailabilityRecoveryMessage {
+	/// Recover available data from validators on the network.
+	RecoverAvailableData(
+		CandidateDescriptor, 
+		SessionIndex, 
+		ResponseChannel<Option<AvailableData>>,
+	),
+}
+```
+
 ## Availability Store Message
 
 Messages to and from the availability store.
diff --git a/polkadot/roadmap/implementers-guide/src/types/runtime.md b/polkadot/roadmap/implementers-guide/src/types/runtime.md
index 9bcecb7aa5b..6fe51fa2f06 100644
--- a/polkadot/roadmap/implementers-guide/src/types/runtime.md
+++ b/polkadot/roadmap/implementers-guide/src/types/runtime.md
@@ -12,9 +12,6 @@ struct HostConfiguration {
 	pub validation_upgrade_frequency: BlockNumber,
 	/// The delay, in blocks, before a validation upgrade is applied.
 	pub validation_upgrade_delay: BlockNumber,
-	/// The acceptance period, in blocks. This is the amount of blocks after availability that validators
-	/// and fishermen have to perform secondary approval checks or issue reports.
-	pub acceptance_period: BlockNumber,
 	/// The maximum validation code size, in bytes.
 	pub max_code_size: u32,
 	/// The maximum head-data size, in bytes.
@@ -34,6 +31,19 @@ struct HostConfiguration {
 	pub thread_availability_period: BlockNumber,
 	/// The amount of blocks ahead to schedule parathreads.
 	pub scheduling_lookahead: u32,
+	/// The amount of sessions to keep for disputes.
+	pub dispute_period: SessionIndex,
+	/// The amount of consensus slots that must pass between submitting an assignment and
+	/// submitting an approval vote before a validator is considered a no-show.
+	/// Must be at least 1.
+	pub no_show_slots: u32,
+	/// The width of the zeroth delay tranche for approval assignments. This many delay tranches
+	/// beyond 0 are all consolidated to form a wide 0 tranche.
+	pub zeroth_delay_tranche_width: u32,
+	/// The number of validators needed to approve a block.
+	pub needed_approvals: u32,
+	/// The number of samples to do of the RelayVRFModulo approval assignment criterion.
+	pub relay_vrf_modulo_samples: u32,
 	/// Total number of individual messages allowed in the parachain -> relay-chain message queue.
 	pub max_upward_queue_count: u32,
 	/// Total size of messages allowed in the parachain -> relay-chain message queue before which
-- 
GitLab