Newer
Older
rx.await.map_err(|e| Error::QueryChunkResponseChannel(e))
}
async fn store_chunk<Context>(
ctx: &mut Context,
candidate_hash: CandidateHash,
validator_index: ValidatorIndex,
erasure_chunk: ErasureChunk,
) -> Result<std::result::Result<(), ()>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk {
candidate_hash,
relay_parent,
validator_index,
chunk: erasure_chunk,
tx,
}
)).await
.map_err(|e| Error::StoreChunkSendQuery(e))?;
rx.await.map_err(|e| Error::StoreChunkResponseChannel(e))
}
/// Request the head data for a particular para.
async fn query_pending_availability<Context>(
ctx: &mut Context,
relay_parent: Hash,
para: ParaId,
) -> Result<Option<CommittedCandidateReceipt>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::CandidatePendingAvailability(para, tx),
)))
.await
.map_err(|e| Error::QueryPendingAvailabilitySendQuery(e))?;
rx.await
.map_err(|e| Error::QueryPendingAvailabilityResponseChannel(e))?
.map_err(|e| Error::QueryPendingAvailability(e))
}
/// Query the validator set.
async fn query_validators<Context>(
ctx: &mut Context,
relay_parent: Hash,
) -> Result<Vec<ValidatorId>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
let query_validators = AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::Validators(tx),
));
ctx.send_message(query_validators)
.await
.map_err(|e| Error::QueryValidatorsSendQuery(e))?;
rx.await
.map_err(|e| Error::QueryValidatorsResponseChannel(e))?
.map_err(|e| Error::QueryValidators(e))
}
/// Query the hash of the `K` ancestors
async fn query_k_ancestors<Context>(
ctx: &mut Context,
relay_parent: Hash,
k: usize,
) -> Result<Vec<Hash>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
let query_ancestors = AllMessages::ChainApi(ChainApiMessage::Ancestors {
hash: relay_parent,
k,
response_channel: tx,
});
ctx.send_message(query_ancestors)
.await
.map_err(|e| Error::QueryAncestorsSendQuery(e))?;
rx.await
.map_err(|e| Error::QueryAncestorsResponseChannel(e))?
.map_err(|e| Error::QueryAncestors(e))
}
/// Query the session index of a relay parent
async fn query_session_index_for_child<Context>(
ctx: &mut Context,
relay_parent: Hash,
) -> Result<SessionIndex>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
let query_session_idx_for_child = AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionIndexForChild(tx),
));
ctx.send_message(query_session_idx_for_child)
.await
.map_err(|e| Error::QuerySessionSendQuery(e))?;
rx.await
.map_err(|e| Error::QuerySessionResponseChannel(e))?
.map_err(|e| Error::QuerySession(e))
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
}
/// Queries up to k ancestors with the constraints of equiv session
async fn query_up_to_k_ancestors_in_same_session<Context>(
ctx: &mut Context,
relay_parent: Hash,
k: usize,
) -> Result<Vec<Hash>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
// k + 1 since we always query the child's session index
// ordering is [parent, grandparent, greatgrandparent, greatgreatgrandparent, ...]
let ancestors = query_k_ancestors(ctx, relay_parent, k + 1).await?;
let desired_session = query_session_index_for_child(ctx, relay_parent).await?;
// we would only need `ancestors.len() - 1`, but the one extra could avoid a re-alloc
// if the consumer wants to push the `relay_parent` onto it too and does not hurt otherwise
let mut acc = Vec::with_capacity(ancestors.len());
// iterate from youngest to oldest
let mut iter = ancestors.into_iter().peekable();
while let Some(ancestor) = iter.next() {
if let Some(ancestor_parent) = iter.peek() {
let session = query_session_index_for_child(ctx, *ancestor_parent).await?;
if session != desired_session {
break;
}
acc.push(ancestor);
} else {
// either ended up at genesis or the blocks were
// already pruned
break;
}
}
debug_assert!(acc.len() <= k);
Ok(acc)
}
#[derive(Clone)]
struct MetricsInner {
gossipped_availability_chunks: prometheus::Counter<prometheus::U64>,
}
/// Availability Distribution metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
fn on_chunk_distributed(&self) {
if let Some(metrics) = &self.0 {
metrics.gossipped_availability_chunks.inc();
}
}
}
impl metrics::Metrics for Metrics {
fn try_register(
registry: &prometheus::Registry,
) -> std::result::Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
gossipped_availability_chunks: prometheus::register(
prometheus::Counter::new(
"parachain_gossipped_availability_chunks_total",
"Number of availability chunks gossipped to other peers.",
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}