Unverified Commit 2dbfac31 authored by Peter Goodspeed-Niklaus's avatar Peter Goodspeed-Niklaus Committed by GitHub
Browse files

Message and Data Type Flow Visualizations (#2286)

* start graphing message flow between subsystems

This commit includes messages originating from the first three subsystems

* use standard arrowhead names

* add messages from bitfield distribution && use circo layout

* finish adding all subsystem message traffic to graph

* reduce line spam

* start work on graphing v1 types

* finish graphing candidate-related structs

* show hash relations, format

* show provenance of ValidityAttestation

* add the rest of the v1 polkadot primitives

* add type diagram from polkadot::parachain::primitives

* start work digramming the path to backing

* diagram message flow from collation generation -> candidate selection

* document flow through CandidateBacking, CandidateValidation

* graph data flow through StatementDistribution

* trace data flow through PoVDistribution to Backing

* finish documenting node side subsystem data flow
parent ab606e14
Pipeline #121742 passed with stages
in 25 minutes and 23 seconds
......@@ -7,6 +7,10 @@ title = "The Polkadot Parachain Host Implementers' Guide"
command = "mdbook-graphviz"
command = "mdbook-mermaid"
additional-js = ["mermaid.min.js", "mermaid-init.js"]
This diff is collapsed.
......@@ -11,3 +11,427 @@ Since this goal of determining when to start and conclude work relative to a spe
The work that subsystems spawn to be done on a specific relay-parent is known as a job. Subsystems should set up and tear down jobs according to the signals received from the overseer. Subsystems may share or cache state between jobs.
Subsystems must be robust to spurious exits. The outputs of the set of subsystems as a whole comprises of signed messages and data committed to disk. Care must be taken to avoid issuing messages that are not substantiated. Since subsystems need to be safe under spurious exits, it is the expected behavior that an `OverseerSignal::Conclude` can just lead to breaking the loop and exiting directly as opposed to waiting for everything to shut down gracefully.
## Subsystem Message Traffic
Which subsystems send messages to which other subsystems.
**Note**: This diagram omits the overseer for simplicity. In fact, all messages are relayed via the overseer.
**Note**: Messages with a filled diamond arrowhead ("♦") include a `oneshot::Sender` which communicates a response from the recipient.
Messages with an open triangle arrowhead ("Δ") do not include a return sender.
```dot process
digraph {
node [shape = oval];
concentrate = true;
av_store [label = "Availability Store"]
avail_dist [label = "Availability Distribution"]
avail_rcov [label = "Availability Recovery"]
bitf_dist [label = "Bitfield Distribution"]
bitf_sign [label = "Bitfield Signing"]
cand_back [label = "Candidate Backing"]
cand_sel [label = "Candidate Selection"]
cand_val [label = "Candidate Validation"]
chn_api [label = "Chain API"]
coll_gen [label = "Collation Generation"]
coll_prot [label = "Collator Protocol"]
net_brdg [label = "Network Bridge"]
pov_dist [label = "PoV Distribution"]
provisioner [label = "Provisioner"]
runt_api [label = "Runtime API"]
stmt_dist [label = "Statement Distribution"]
av_store -> runt_api [arrowhead = "diamond", label = "Request::CandidateEvents"]
av_store -> chn_api [arrowhead = "diamond", label = "BlockNumber"]
av_store -> chn_api [arrowhead = "diamond", label = "BlockHeader"]
av_store -> runt_api [arrowhead = "diamond", label = "Request::Validators"]
av_store -> chn_api [arrowhead = "diamond", label = "FinalizedBlockHash"]
avail_dist -> net_brdg [arrowhead = "onormal", label = "Request::SendValidationMessages"]
avail_dist -> runt_api [arrowhead = "diamond", label = "Request::AvailabilityCores"]
avail_dist -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
avail_dist -> av_store [arrowhead = "diamond", label = "QueryDataAvailability"]
avail_dist -> av_store [arrowhead = "diamond", label = "QueryChunk"]
avail_dist -> av_store [arrowhead = "diamond", label = "StoreChunk"]
avail_dist -> runt_api [arrowhead = "diamond", label = "Request::Validators"]
avail_dist -> chn_api [arrowhead = "diamond", label = "Ancestors"]
avail_dist -> runt_api [arrowhead = "diamond", label = "Request::SessionIndexForChild"]
avail_rcov -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
avail_rcov -> av_store [arrowhead = "diamond", label = "QueryChunk"]
avail_rcov -> net_brdg [arrowhead = "diamond", label = "ConnectToValidators"]
avail_rcov -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage::Chunk"]
avail_rcov -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage::RequestChunk"]
bitf_dist -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
bitf_dist -> provisioner [arrowhead = "onormal", label = "ProvisionableData::Bitfield"]
bitf_dist -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage"]
bitf_dist -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage"]
bitf_dist -> runt_api [arrowhead = "diamond", label = "Request::Validatiors"]
bitf_dist -> runt_api [arrowhead = "diamond", label = "Request::SessionIndexForChild"]
bitf_sign -> av_store [arrowhead = "diamond", label = "QueryChunkAvailability"]
bitf_sign -> runt_api [arrowhead = "diamond", label = "Request::AvailabilityCores"]
bitf_sign -> bitf_dist [arrowhead = "onormal", label = "DistributeBitfield"]
cand_back -> av_store [arrowhead = "diamond", label = "StoreAvailableData"]
cand_back -> pov_dist [arrowhead = "diamond", label = "FetchPoV"]
cand_back -> cand_val [arrowhead = "diamond", label = "ValidateFromChainState"]
cand_back -> cand_sel [arrowhead = "onormal", label = "Invalid"]
cand_back -> provisioner [arrowhead = "onormal", label = "ProvisionableData::MisbehaviorReport"]
cand_back -> provisioner [arrowhead = "onormal", label = "ProvisionableData::BackedCandidate"]
cand_back -> pov_dist [arrowhead = "onormal", label = "DistributePoV"]
cand_back -> stmt_dist [arrowhead = "onormal", label = "Share"]
cand_sel -> coll_prot [arrowhead = "diamond", label = "FetchCollation"]
cand_sel -> cand_back [arrowhead = "onormal", label = "Second"]
cand_sel -> coll_prot [arrowhead = "onormal", label = "ReportCollator"]
cand_val -> runt_api [arrowhead = "diamond", label = "Request::PersistedValidationData"]
cand_val -> runt_api [arrowhead = "diamond", label = "Request::ValidationCode"]
cand_val -> runt_api [arrowhead = "diamond", label = "Request::CheckValidationOutputs"]
coll_gen -> coll_prot [arrowhead = "onormal", label = "DistributeCollation"]
coll_prot -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
coll_prot -> net_brdg [arrowhead = "onormal", label = "Declare"]
coll_prot -> net_brdg [arrowhead = "onormal", label = "AdvertiseCollation"]
coll_prot -> net_brdg [arrowhead = "onormal", label = "Collation"]
coll_prot -> net_brdg [arrowhead = "onormal", label = "RequestCollation"]
coll_prot -> cand_sel [arrowhead = "onormal", label = "Collation"]
net_brdg -> avail_dist [arrowhead = "onormal", label = "NetworkBridgeUpdateV1"]
net_brdg -> bitf_dist [arrowhead = "onormal", label = "NetworkBridgeUpdateV1"]
net_brdg -> pov_dist [arrowhead = "onormal", label = "NetworkBridgeUpdateV1"]
net_brdg -> stmt_dist [arrowhead = "onormal", label = "NetworkBridgeUpdateV1"]
net_brdg -> coll_prot [arrowhead = "onormal", label = "NetworkBridgeUpdateV1"]
pov_dist -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage"]
pov_dist -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
provisioner -> cand_back [arrowhead = "diamond", label = "GetBackedCandidates"]
provisioner -> chn_api [arrowhead = "diamond", label = "BlockNumber"]
stmt_dist -> net_brdg [arrowhead = "onormal", label = "SendValidationMessage"]
stmt_dist -> net_brdg [arrowhead = "onormal", label = "ReportPeer"]
stmt_dist -> cand_back [arrowhead = "onormal", label = "Statement"]
stmt_dist -> runt_api [arrowhead = "onormal", label = "Request::Validators"]
stmt_dist -> runt_api [arrowhead = "onormal", label = "Request::SessionIndexForChild"]
## The Path to Inclusion (Node Side)
Let's contextualize that diagram a bit by following a parachain block from its creation through finalization.
Parachains can use completely arbitrary processes to generate blocks. The relay chain doesn't know or care about
the details; each parachain just needs to provide a [collator](collators/collation-generation.md).
**Note**: Inter-subsystem communications are relayed via the overseer, but that step is omitted here for brevity.
**Note**: Dashed lines indicate a request/response cycle, where the response is communicated asynchronously via
a oneshot channel. Adjacent dashed lines may be processed in parallel.
participant Overseer
participant CollationGeneration
participant RuntimeApi
participant CollatorProtocol
Overseer ->> CollationGeneration: ActiveLeavesUpdate
loop for each activated head
CollationGeneration -->> RuntimeApi: Request availability cores
CollationGeneration -->> RuntimeApi: Request validators
Note over CollationGeneration: Determine an appropriate ScheduledCore <br/>and OccupiedCoreAssumption
CollationGeneration -->> RuntimeApi: Request full validation data
Note over CollationGeneration: Build the collation
CollationGeneration ->> CollatorProtocol: DistributeCollation
The `DistributeCollation` messages that `CollationGeneration` sends to the `CollatorProtocol` contains
two items: a `CandidateReceipt` and `PoV`. The `CollatorProtocol` is then responsible for distributing
that collation to interested validators. However, not all potential collations are of interest. The
`CandidateSelection` subsystem is responsible for determining which collations are interesting, before
`CollatorProtocol` actually fetches the collation.
participant CollationGeneration
participant CS as CollatorProtocol::CollatorSide
participant NB as NetworkBridge
participant VS as CollatorProtocol::ValidatorSide
participant CandidateSelection
CollationGeneration ->> CS: DistributeCollation
CS -->> NB: ConnectToValidators
Note over CS,NB: This connects to multiple validators.
CS ->> NB: Declare
NB ->> VS: Declare
Note over CS: Ensure that the connected validator is among<br/>the para's validator set. Otherwise, skip it.
CS ->> NB: AdvertiseCollation
NB ->> VS: AdvertiseCollation
VS ->> CandidateSelection: Collation
Note over CandidateSelection: Lots of other machinery in play here,<br/>but there are only three outcomes from the<br/>perspective of the `CollatorProtocol`:
alt happy path
CandidateSelection -->> VS: FetchCollation
Activate VS
VS ->> NB: RequestCollation
NB ->> CS: RequestCollation
CS ->> NB: Collation
NB ->> VS: Collation
Deactivate VS
else collation invalid or unexpected
CandidateSelection ->> VS: ReportCollator
VS ->> NB: ReportPeer
else CandidateSelection already selected a different candidate
Note over CandidateSelection: silently drop
Assuming we hit the happy path, flow continues with `CandidateSelection` receiving a `(candidate_receipt, pov)` as
the return value from its
`FetchCollation` request. The only time `CandidateSelection` actively requests a collation is when
it hasn't yet seconded one for some `relay_parent`, and is ready to second.
participant CS as CandidateSelection
participant CB as CandidateBacking
participant CV as CandidateValidation
participant PV as Provisioner
participant SD as StatementDistribution
participant PD as PoVDistribution
CS ->> CB: Second
% fn validate_and_make_available
CB -->> CV: ValidateFromChainState
Note over CB,CV: There's some complication in the source, as<br/>candidates are actually validated in a separate task.
alt valid
Note over CB: This is where we transform the CandidateReceipt into a CommittedCandidateReceipt
% CandidateBackingJob::sign_import_and_distribute_statement
% CandidateBackingJob::import_statement
CB ->> PV: ProvisionableData::BackedCandidate
% CandidateBackingJob::issue_new_misbehaviors
opt if there is misbehavior to report
CB ->> PV: ProvisionableData::MisbehaviorReport
% CandidateBackingJob::distribute_signed_statement
CB ->> SD: Share
% CandidateBackingJob::distribute_pov
CB ->> PD: DistributePoV
else invalid
CB ->> CS: Invalid
At this point, you'll see that control flows in two directions: to `StatementDistribution` to distribute
the `SignedStatement`, and to `PoVDistribution` to distribute the `PoV`. However, that's largely a mirage:
while the initial implementation distributes `PoV`s by gossip, that's inefficient, and will be replaced
with a system which fetches `PoV`s only when actually necessary.
> TODO: figure out more precisely the current status and plans; write them up
Therefore, we'll follow the `SignedStatement`. The `StatementDistribution` subsystem is largely concerned
with implementing a gossip protocol:
participant SD as StatementDistribution
participant NB as NetworkBridge
participant Listener
alt On receipt of a<br/>SignedStatement from CandidateBacking
% fn circulate_statement_and_dependents
SD ->> NB: SendValidationMessage
Note right of NB: Bridge sends validation message to all appropriate peers
else On initialization, from other subsystems:
Listener ->> SD: RegisterStatementListener
else On receipt of peer validation message
NB ->> SD: NetworkBridgeUpdateV1
% fn handle_incoming_message
alt if we aren't already aware of the relay parent for this statement
SD ->> NB: ReportPeer
else the statement corresponds to our View
Note over SD,Listener: Forward the statement to each registered listener
SD ->> Listener: SignedFullStatement
% fn circulate_statement
opt if we know of peers who haven't seen this message, gossip it
SD ->> NB: SendValidationMessage
But who are these `Listener`s who've asked to be notified about incoming `SignedStatement`s?
Nobody, as yet.
Let's pick back up with the PoV Distribution subsystem.
participant CB as CandidateBacking
participant PD as PoVDistribution
participant Listener
participant NB as NetworkBridge
CB ->> PD: DistributePoV
Note over PD,Listener: Various subsystems can register listeners for when PoVs arrive
loop for each Listener
PD ->> Listener: Arc<PoV>
Note over PD: Gossip to connected peers
PD ->> NB: SendPoV
Note over PD,NB: On receipt of a network PoV, PovDistribution forwards it to each Listener.<br/>It also penalizes bad gossipers.
Unlike in the case of `StatementDistribution`, there is another subsystem which in various circumstances
already registers a listener to be notified when a new `PoV` arrives: `CandidateBacking`. Note that this
is the second time that `CandidateBacking` has gotten involved. The first instance was from the perspective
of the validator choosing to second a candidate via its `CandidateSelection` subsystem. This time, it's
from the perspective of some other validator, being informed that this foreign `PoV` has been received.
participant SD as StatementDistribution
participant CB as CandidateBacking
participant PD as PoVDistribution
participant AS as AvailabilityStore
SD ->> CB: Statement
% CB::maybe_validate_and_import => CB::kick_off_validation_work
CB -->> PD: FetchPoV
Note over CB,PD: This call creates the Listener from the previous diagram
CB ->> AS: StoreAvailableData
At this point, things have gone a bit nonlinear. Let's pick up the thread again with `BitfieldSigning`. As
the `Overseer` activates each relay parent, it starts a `BitfieldSigningJob` which operates on an extremely
simple metric: after creation, it immediately goes to sleep for 1.5 seconds. On waking, it records the state
of the world pertaining to availability at that moment.
participant OS as Overseer
participant BS as BitfieldSigning
participant RA as RuntimeApi
participant AS as AvailabilityStore
participant BD as BitfieldDistribution
OS ->> BS: ActiveLeavesUpdate
loop for each activated relay parent
Note over BS: Wait 1.5 seconds
BS -->> RA: Request::AvailabilityCores
loop for each availability core
BS -->> AS: QueryChunkAvailability
BS ->> BD: DistributeBitfield
`BitfieldDistribution` is, like the other `*Distribution` subsystems, primarily interested in implementing
a peer-to-peer gossip network propagating its particular messages. However, it also serves as an essential
relay passing the message along.
participant BS as BitfieldSigning
participant BD as BitfieldDistribution
participant NB as NetworkBridge
participant PV as Provisioner
BS ->> BD: DistributeBitfield
BD ->> PV: ProvisionableData::Bitfield
BD ->> NB: SendValidationMessage::BitfieldDistribution::Bitfield
We've now seen the message flow to the `Provisioner`: both `CandidateBacking` and `BitfieldDistribution`
contribute provisionable data. Now, let's look at that subsystem.
Much like the `BitfieldSigning` subsystem, the `Provisioner` creates a new job for each newly-activated
leaf, and starts a timer. Unlike `BitfieldSigning`, we won't depict that part of the process, because
the `Provisioner` also has other things going on.
participant A as Arbitrary
participant PV as Provisioner
participant CB as CandidateBacking
participant BD as BitfieldDistribution
participant RA as RuntimeApi
participant PO as Proposer
alt receive request to forward block authorship data
A ->> PV: RequestBlockAuthorshipData
Note over A,PV: This request contains a mpsc::Sender, which the Provisioner keeps
else receive provisionable data
CB ->> PV: ProvisionableData
BD ->> PV: ProvisionableData
loop over stored Senders
PV ->> A: ProvisionableData
Note over PV: store bitfields and backed candidates
else receive request for inherent data
PO ->> PV: RequestInherentData
alt we have already constructed the inherent data
PV ->> PO: send the inherent data
else we have not yet constructed the inherent data
Note over PV,PO: Store the return sender without sending immediately
else timer times out
note over PV: Waited 2 seconds
PV -->> RA: RuntimeApiRequest::AvailabilityCores
Note over PV: construct and store the inherent data
loop over stored inherent data requests
PV ->> PO: (SignedAvailabilityBitfields, BackedCandidates)
In principle, any arbitrary subsystem could send a `RequestInherentData` to the `Provisioner`. In practice,
only the `Proposer` does so. Likewise, any arbitrary subsystem could send a `RequestBlockAuthorshipData`; the
distinction is that no subsystem currently does so.
The proposer is an atypical subsystem in that, unlike most of them, it is not primarily driven by
the `Overseer`, but instead by the `sp_consensus::Environment` and `sp_consensus::Proposer` traits
from Substrate. It doesn't make much sense to diagram this flow because it's very linear:
- Substrate creates a `Proposer` from the `ProposerFactory` once per upcoming block, using the `parent_header: Header`.
- At some later point, it calls `Proposer::propose(self, ...)`, consuming the proposer to generate a proposal
- `Proposer::propose` sends a `RequestInherentData` to the `Provisioner`. This has a fixed timeout of
2.5 seconds, meaning that the provisioner has approximately 0.5 seconds to generate and send the data.
The tuple `(SignedAvailabilityBitfields, BackedCandidates, ParentHeader)` is injected by the `Proposer`
into the inherent data. From that point on, control passes from the node to the runtime.
# Type Definitions
This section of the guide provides type definitions of various categories.
## V1 Overview
Diagrams are rendered in high resolution; open them in a separate tab to see full scale.
These data types are defined in `polkadot/primitives/src/v1.rs`:
```dot process
digraph {
rankdir = LR;
node [shape = plain]
CandidateDescriptor [label = <
<tr><td border="0" colspan="2" port="name">CandidateDescriptor&lt;H = Hash&gt;</td></tr>
<tr><td>para_id</td><td port="para_id">Id</td></tr>
<tr><td>relay_parent</td><td port="relay_parent">H</td></tr>
<tr><td>collator</td><td port="collator">CollatorId</td></tr>
<tr><td>persisted_validation_data_hash</td><td port="persisted_validation_data_hash">Hash</td></tr>
<tr><td>pov_hash</td><td port="pov_hash">Hash</td></tr>
<tr><td>erasure_root</td><td port="erasure_root">Hash</td></tr>
<tr><td>signature</td><td port="signature">CollatorSignature</td></tr>
CandidateDescriptor:para_id -> Id:w
CandidateDescriptor:pov_hash -> PoVHash
CandidateDescriptor:collator -> CollatorId:w
CandidateDescriptor:persisted_validation_data_hash -> PersistedValidationDataHash
Id [label="polkadot_parachain::primitives::Id"]
CollatorId [label="polkadot_primitives::v0::CollatorId"]
PoVHash [label = "Hash", shape="doublecircle", fill="gray90"]
PoVHash -> PoV:name
CandidateReceipt [label = <
<tr><td border="0" colspan="2" port="name">CandidateReceipt&lt;H = Hash&gt;</td></tr>
<tr><td>descriptor</td><td port="descriptor">CandidateDescriptor&lt;H&gt;</td></tr>
<tr><td>commitments_hash</td><td port="commitments_hash">Hash</td></tr>
CandidateReceipt:descriptor -> CandidateDescriptor:name
CandidateReceipt:commitments_hash -> CandidateCommitmentsHash
CandidateHash [label = "Hash", shape="doublecircle", fill="gray90"]
CandidateHash -> CandidateReceipt:name
CandidateCommitmentsHash [label = "Hash", shape="doublecircle", fill="gray90"]
CandidateCommitmentsHash -> CandidateCommitments:name
FullCandidateReceipt [label = <
<tr><td border="0" colspan="2" port="name">FullCandidateReceipt&lt;H = Hash, N = BlockNumber&gt;</td></tr>
<tr><td>inner</td><td port="inner">CandidateReceipt&lt;H&gt;</td></tr>
<tr><td>validation_data</td><td port="validation_data">ValidationData&lt;N&gt;</td></tr>
FullCandidateReceipt:inner -> CandidateReceipt:name
FullCandidateReceipt:validation_data -> ValidationData:name
CommittedCandidateReceipt [label = <
<tr><td border="0" colspan="2" port="name">CommittedCandidateReceipt&lt;H = Hash&gt;</td></tr>
<tr><td>descriptor</td><td port="descriptor">CandidateDescriptor&lt;H&gt;</td></tr>
<tr><td>commitments</td><td port="commitments">CandidateCommitments</td></tr>
CommittedCandidateReceipt:descriptor -> CandidateDescriptor:name
CommittedCandidateReceipt:commitments -> CandidateCommitments:name
ValidationData [label = <
<tr><td border="0" colspan="2" port="name">ValidationData&lt;N = BlockNumber&gt;</td></tr>
<tr><td>persisted</td><td port="persisted">PersistedValidationData&lt;N&gt;</td></tr>
<tr><td>transient</td><td port="transient">TransientValidationData&lt;N&gt;</td></tr>
ValidationData:persisted -> PersistedValidationData:name
ValidationData:transient -> TransientValidationData:name
PersistedValidationData [label = <
<tr><td border="0" colspan="2" port="name">PersistedValidationData&lt;N = BlockNumber&gt;</td></tr>
<tr><td>parent_head</td><td port="parent_head">HeadData</td></tr>
<tr><td>block_number</td><td port="block_number">N</td></tr>
<tr><td>relay_storage_root</td><td port="relay_storage_root">Hash</td></tr>
<tr><td>hrmp_mqc_heads</td><td port="hrmp_mqc_heads">Vec&lt;(Id, Hash)&gt;</td></tr>
<tr><td>dmq_mqc_head</td><td port="dmq_mqc_head">Hash</td></tr>
<tr><td>max_pov_size</td><td port="max_pov_size">u32</td></tr>
PersistedValidationData:parent_head -> HeadData:w
PersistedValidationData:hrmp_mqc_heads -> Id:w
PersistedValidationData:hrmp_mqc_heads -> MQCHash
PersistedValidationData:dmq_mqc_head -> MQCHash
PersistedValidationDataHash [label = "Hash", shape="doublecircle", fill="gray90"]
PersistedValidationDataHash -> PersistedValidationData:name
TransientValidationData [label = <
<tr><td border="0" colspan="2" port="name">TransientValidationData&lt;N = BlockNumber&gt;</td></tr>
<tr><td>max_code_size</td><td port="max_code_size">u32</td></tr>
<tr><td>max_head_data_size</td><td port="max_head_data_size">u32</td></tr>
<tr><td>balance</td><td port="balance">Balance</td></tr>
<tr><td>code_upgrade_allowed</td><td port="code_upgrade_allowed">Option&lt;N&gt;</td></tr>
<tr><td>dmq_length</td><td port="dmq_length">u32</td></tr>
TransientValidationData:balance -> "polkadot_core_primitives::v1::Balance":w
CandidateCommitments [label = <
<tr><td border="0" colspan="2" port="name">CandidateCommitments&lt;N = BlockNumber&gt;</td></tr>
<tr><td>upward_messages</td><td port="upward_messages">Vec&lt;UpwardMessage&gt;</td></tr>
<tr><td>horizontal_messages</td><td port="horizontal_messages">Vec&lt;OutboundHrmpMessage&lt;Id&gt;&gt;</td></tr>
<tr><td>new_validation_code</td><td port="new_validation_code">Option&lt;ValidationCode&gt;</td></tr>
<tr><td>head_data</td><td port="head_data">HeadData</td></tr>
<tr><td>processed_downward_messages</td><td port="processed_downward_messages">u32</td></tr>
<tr><td>hrmp_watermark</td><td port="hrmp_watermark">N</td></tr>
CandidateCommitments:upward_messages -> "polkadot_parachain::primitives::UpwardMessage":w
CandidateCommitments:horizontal_messages -> "polkadot_core_primitives::v1::OutboundHrmpMessage":w
CandidateCommitments:head_data -> HeadData:w
CandidateCommitments:horizontal_messages -> "polkadot_parachain::primitives::Id":w
CandidateCommitments:new_validation_code -> "polkadot_parachain::primitives::ValidationCode":w
PoV [label = <
<tr><td border="0" colspan="2" port="name">PoV</td></tr>
<tr><td>block_data</td><td port="block_data">BlockData</td></tr>
PoV:block_data -> "polkadot_parachain::primitives::BlockData":w
BackedCandidate [label = <
<tr><td border="0" colspan="2" port="name">BackedCandidate&lt;H = Hash&gt;</td></tr>
<tr><td>candidate</td><td port="candidate">CommittedCandidateReceipt&lt;H&gt;</td></tr>
<tr><td>validity_votes</td><td port="validity_votes">Vec&lt;ValidityAttestation&gt;</td></tr>
<tr><td>validator_indices</td><td port="validator_indices">BitVec</td></tr>
BackedCandidate:candidate -> CommittedCandidateReceipt:name
BackedCandidate:validity_votes -> "polkadot_primitives:v0:ValidityAttestation":w
HeadData [label = "polkadot_parachain::primitives::HeadData"]
CoreIndex [label = <