Unverified Commit 96af6ead authored by asynchronous rob's avatar asynchronous rob Committed by GitHub
Browse files

Refactor primitives (#1383)

* create a v1 primitives module

* Improve guide on availability types

* punctuate

* new parachains runtime uses new primitives

* tests of new runtime now use new primitives

* add ErasureChunk to guide

* export erasure chunk from v1 primitives

* subsystem crate uses v1 primitives

* node-primitives uses new v1 primitives

* port overseer to new primitives

* new-proposer uses v1 primitives (no ParachainHost anymore)

* fix no-std compilation for primitives

* service-new uses v1 primitives

* network-bridge uses new primitives

* statement distribution uses v1 primitives

* PoV distribution uses v1 primitives; add PoV::hash fn

* move parachain to v0

* remove inclusion_inherent module and place into v1

* remove everything from primitives crate root

* remove some unused old types from v0 primitives

* point everything else at primitives::v0

* squanch some warns up

* add RuntimeDebug import to no-std as well

* port over statement-table and validation

* fix final errors in validation and node-primitives

* add dummy Ord impl to committed candidate receipt

* guide: update CandidateValidationMessage

* add primitive for validationoutputs

* expand CandidateValidationMessage further

* bikeshed

* add some impls to omitted-validation-data and available-data

* expand CandidateValidationMessage

* make erasure-coding generic over v1/v0

* update usages of erasure-coding

* implement commitments.hash()

* use Arc<Pov> for CandidateValidation

* improve new erasure-coding method names

* fix up candidate backing

* update docs a bit

* fix most tests and add short-circuiting to make_pov_available

* fix remainder of candidate backing tests

* squanching warns

* squanch it up

* some fallout

* overseer fallout

* free from polkadot-test-service hell
parent 8845df22
Pipeline #100069 passed with stages
in 25 minutes and 41 seconds
......@@ -25,12 +25,10 @@
use futures::prelude::*;
use futures::channel::{mpsc, oneshot};
use keystore::KeyStorePtr;
use polkadot_primitives::{
use polkadot_primitives::v0::{
Hash, Block,
parachain::{
PoVBlock, AbridgedCandidateReceipt, ErasureChunk,
ParachainHost, AvailableData, OmittedValidationData,
},
PoVBlock, AbridgedCandidateReceipt, ErasureChunk,
ParachainHost, AvailableData, OmittedValidationData,
};
use sp_runtime::traits::HashFor;
use sp_blockchain::Result as ClientResult;
......
......@@ -19,11 +19,8 @@ use kvdb_rocksdb::{Database, DatabaseConfig};
use kvdb::{KeyValueDB, DBTransaction};
use codec::{Encode, Decode};
use polkadot_erasure_coding as erasure;
use polkadot_primitives::{
Hash,
parachain::{
ErasureChunk, AvailableData, AbridgedCandidateReceipt,
},
use polkadot_primitives::v0::{
Hash, ErasureChunk, AvailableData, AbridgedCandidateReceipt,
};
use parking_lot::Mutex;
......@@ -273,7 +270,7 @@ impl Store {
// If there are no block data in the store at this point,
// check that they can be reconstructed now and add them to store if they can.
if self.execution_data(&candidate_hash).is_none() {
if let Ok(available_data) = erasure::reconstruct(
if let Ok(available_data) = erasure::reconstruct_v0(
n_validators as usize,
v.iter().map(|chunk| (chunk.chunk.as_ref(), chunk.index as usize)),
)
......@@ -390,7 +387,7 @@ impl Store {
mod tests {
use super::*;
use polkadot_erasure_coding::{self as erasure};
use polkadot_primitives::parachain::{
use polkadot_primitives::v0::{
Id as ParaId, BlockData, AvailableData, PoVBlock, OmittedValidationData,
};
......@@ -489,7 +486,7 @@ mod tests {
let available_data = available_data(&[42; 8]);
let n_validators = 5;
let erasure_chunks = erasure::obtain_chunks(
let erasure_chunks = erasure::obtain_chunks_v0(
n_validators,
&available_data,
).unwrap();
......
......@@ -33,8 +33,8 @@ use consensus_common::{
import_queue::CacheKeyId,
};
use sp_core::traits::SpawnNamed;
use polkadot_primitives::{Block, BlockId, Hash};
use polkadot_primitives::parachain::{
use polkadot_primitives::v0::{
Block, BlockId, Hash,
ParachainHost, ValidatorId, AbridgedCandidateReceipt, AvailableData,
ValidatorPair, ErasureChunk,
};
......
......@@ -55,12 +55,11 @@ use log::warn;
use sc_client_api::{StateBackend, BlockchainEvents};
use sp_blockchain::HeaderBackend;
use sp_core::Pair;
use polkadot_primitives::{
use polkadot_primitives::v0::{
BlockId, Hash, Block, DownwardMessage,
parachain::{
self, BlockData, DutyRoster, HeadData, Id as ParaId,
PoVBlock, ValidatorId, CollatorPair, LocalValidationData, GlobalValidationSchedule,
}
BlockData, DutyRoster, HeadData, Id as ParaId,
PoVBlock, ValidatorId, CollatorPair, LocalValidationData, GlobalValidationSchedule,
Collation, CollationInfo, collator_signature_payload,
};
use polkadot_cli::{
ProvideRuntimeApi, ParachainHost, IdentifyVariant,
......@@ -69,7 +68,7 @@ use polkadot_cli::{
pub use polkadot_cli::service::Configuration;
pub use polkadot_cli::Cli;
pub use polkadot_validation::SignedStatement;
pub use polkadot_primitives::parachain::CollatorId;
pub use polkadot_primitives::v0::CollatorId;
pub use sc_network::PeerId;
pub use service::RuntimeApiCollection;
pub use sc_cli::SubstrateCli;
......@@ -164,7 +163,7 @@ pub async fn collate<P>(
downward_messages: Vec<DownwardMessage>,
mut para_context: P,
key: Arc<CollatorPair>,
) -> Option<parachain::Collation>
) -> Option<Collation>
where
P: ParachainContext,
P::ProduceCandidate: Send,
......@@ -181,13 +180,13 @@ pub async fn collate<P>(
};
let pov_block_hash = pov_block.hash();
let signature = key.sign(&parachain::collator_signature_payload(
let signature = key.sign(&collator_signature_payload(
&relay_parent,
&local_id,
&pov_block_hash,
));
let info = parachain::CollationInfo {
let info = CollationInfo {
parachain_index: local_id,
relay_parent,
collator: key.public(),
......@@ -196,7 +195,7 @@ pub async fn collate<P>(
pov_block_hash,
};
let collation = parachain::Collation {
let collation = Collation {
info,
pov: pov_block,
};
......@@ -456,7 +455,7 @@ where
#[cfg(not(feature = "service-rewr"))]
fn compute_targets(para_id: ParaId, session_keys: &[ValidatorId], roster: DutyRoster) -> HashSet<ValidatorId> {
use polkadot_primitives::parachain::Chain;
use polkadot_primitives::v0::Chain;
roster.validator_duty.iter().enumerate()
.filter(|&(_, c)| c == &Chain::Parachain(para_id))
......
......@@ -94,3 +94,8 @@ pub enum DownwardMessage<AccountId = crate::AccountId> {
/// XCMP message for the Parachain.
XCMPMessage(sp_std::vec::Vec<u8>),
}
/// V1 primitives.
pub mod v1 {
pub use super::*;
}
......@@ -26,8 +26,8 @@
use codec::{Encode, Decode};
use reed_solomon::galois_16::{self, ReedSolomon};
use primitives::{Hash as H256, BlakeTwo256, HashT};
use primitives::parachain::AvailableData;
use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT};
use primitives::v1;
use sp_core::Blake2Hasher;
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
......@@ -124,14 +124,32 @@ fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
})
}
/// Obtain erasure-coded chunks for v0 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v0(n_validators: usize, data: &v0::AvailableData)
-> Result<Vec<Vec<u8>>, Error>
{
obtain_chunks(n_validators, data)
}
/// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v1(n_validators: usize, data: &v1::AvailableData)
-> Result<Vec<Vec<u8>>, Error>
{
obtain_chunks(n_validators, data)
}
/// Obtain erasure-coded chunks, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks(n_validators: usize, available_data: &AvailableData)
fn obtain_chunks<T: Encode>(n_validators: usize, data: &T)
-> Result<Vec<Vec<u8>>, Error>
{
let params = code_params(n_validators)?;
let encoded = available_data.encode();
let encoded = data.encode();
if encoded.is_empty() {
return Err(Error::BadPayload);
......@@ -145,15 +163,42 @@ pub fn obtain_chunks(n_validators: usize, available_data: &AvailableData)
Ok(shards.into_iter().map(|w| w.into_inner()).collect())
}
/// Reconstruct the block data from a set of chunks.
/// Reconstruct the v0 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<v0::AvailableData, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
{
reconstruct(n_validators, chunks)
}
/// Reconstruct the v1 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<v1::AvailableData, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
{
reconstruct(n_validators, chunks)
}
/// Reconstruct decodable data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<AvailableData, Error>
fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
{
let params = code_params(n_validators)?;
......@@ -343,7 +388,7 @@ impl<'a, I: Iterator<Item=&'a [u8]>> codec::Input for ShardInput<'a, I> {
#[cfg(test)]
mod tests {
use super::*;
use primitives::parachain::{BlockData, PoVBlock};
use primitives::v0::{AvailableData, BlockData, PoVBlock};
#[test]
fn field_order_is_right_size() {
......@@ -420,7 +465,7 @@ mod tests {
assert_eq!(chunks.len(), 10);
// any 4 chunks should work.
let reconstructed = reconstruct(
let reconstructed: AvailableData = reconstruct(
10,
[
(&*chunks[1], 1),
......
......@@ -17,8 +17,7 @@
//! Bridge between the network and consensus service for getting collations to it.
use codec::{Encode, Decode};
use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{CollatorId, Id as ParaId, Collation};
use polkadot_primitives::v0::{Hash, CollatorId, Id as ParaId, Collation};
use sc_network::PeerId;
use futures::channel::oneshot;
......@@ -236,7 +235,7 @@ impl CollatorPool {
mod tests {
use super::*;
use sp_core::crypto::UncheckedInto;
use polkadot_primitives::parachain::{CollationInfo, BlockData, PoVBlock};
use polkadot_primitives::v0::{CollationInfo, BlockData, PoVBlock};
use futures::executor::block_on;
fn make_pov(block_data: Vec<u8>) -> PoVBlock {
......
......@@ -33,7 +33,7 @@
use sc_network_gossip::{ValidationResult as GossipValidationResult};
use sc_network::ReputationChange;
use polkadot_validation::GenericStatement;
use polkadot_primitives::Hash;
use polkadot_primitives::v0::Hash;
use std::collections::HashMap;
......
......@@ -58,8 +58,8 @@ use sc_network_gossip::{
ValidatorContext, MessageIntent,
};
use polkadot_validation::{SignedStatement};
use polkadot_primitives::{Block, Hash};
use polkadot_primitives::parachain::{
use polkadot_primitives::v0::{
Block, Hash,
ParachainHost, ValidatorId, ErasureChunk as PrimitiveChunk, SigningContext, PoVBlock,
};
use polkadot_erasure_coding::{self as erasure};
......@@ -755,7 +755,7 @@ mod tests {
use sc_network_gossip::Validator as ValidatorT;
use std::sync::mpsc;
use parking_lot::Mutex;
use polkadot_primitives::parachain::{AbridgedCandidateReceipt, BlockData};
use polkadot_primitives::v0::{AbridgedCandidateReceipt, BlockData};
use sp_core::sr25519::Signature as Sr25519Signature;
use polkadot_validation::GenericStatement;
......
......@@ -19,7 +19,7 @@
//! Collations are attempted to be repropagated when a new validator connects,
//! a validator changes his session key, or when they are generated.
use polkadot_primitives::{Hash, parachain::{ValidatorId}};
use polkadot_primitives::v0::{Hash, ValidatorId};
use crate::legacy::collator_pool::Role;
use std::collections::{HashMap, HashSet};
use std::time::Duration;
......@@ -144,7 +144,7 @@ impl<C: Clone> LocalCollations<C> {
mod tests {
use super::*;
use sp_core::crypto::UncheckedInto;
use polkadot_primitives::parachain::ValidatorId;
use polkadot_primitives::v0::ValidatorId;
#[test]
fn add_validator_with_ready_collation() {
......
......@@ -25,7 +25,7 @@ pub mod gossip;
use codec::Decode;
use futures::prelude::*;
use polkadot_primitives::Hash;
use polkadot_primitives::v0::Hash;
use sc_network::PeerId;
use sc_network_gossip::TopicNotification;
use log::debug;
......
......@@ -21,7 +21,7 @@
#![recursion_limit="256"]
use polkadot_primitives::{Block, Hash, BlakeTwo256, HashT};
use polkadot_primitives::v0::{Block, Hash, BlakeTwo256, HashT};
pub mod legacy;
pub mod protocol;
......
......@@ -30,12 +30,10 @@ use futures::task::{Context, Poll};
use futures::stream::{FuturesUnordered, StreamFuture};
use log::{debug, trace};
use polkadot_primitives::{
use polkadot_primitives::v0::{
Hash, Block,
parachain::{
PoVBlock, ValidatorId, ValidatorIndex, Collation, AbridgedCandidateReceipt,
ErasureChunk, ParachainHost, Id as ParaId, CollatorId,
},
PoVBlock, ValidatorId, ValidatorIndex, Collation, AbridgedCandidateReceipt,
ErasureChunk, ParachainHost, Id as ParaId, CollatorId,
};
use polkadot_validation::{
SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators,
......
......@@ -17,8 +17,8 @@ use super::*;
use crate::legacy::gossip::GossipPoVBlock;
use parking_lot::Mutex;
use polkadot_primitives::Block;
use polkadot_primitives::parachain::{
use polkadot_primitives::v0::{
Block,
Id as ParaId, Chain, DutyRoster, ParachainHost, ValidatorId,
Retriable, CollatorId, AbridgedCandidateReceipt,
GlobalValidationSchedule, LocalValidationData, ErasureChunk, SigningContext,
......@@ -198,7 +198,7 @@ sp_api::mock_impl_runtime_apis! {
parent_hash: Default::default(),
}
}
fn downward_messages(_: ParaId) -> Vec<polkadot_primitives::DownwardMessage> {
fn downward_messages(_: ParaId) -> Vec<polkadot_primitives::v0::DownwardMessage> {
Vec::new()
}
}
......
......@@ -21,6 +21,7 @@
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use bitvec::vec::BitVec;
......@@ -36,17 +37,15 @@ use streamunordered::{StreamUnordered, StreamYield};
use primitives::Pair;
use keystore::KeyStorePtr;
use polkadot_primitives::{
Hash,
parachain::{
AbridgedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId,
ValidatorIndex, HeadData, SigningContext, PoVBlock, OmittedValidationData,
CandidateDescriptor, LocalValidationData, GlobalValidationSchedule, AvailableData,
ErasureChunk,
},
use polkadot_primitives::v1::{
CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId,
ValidatorIndex, SigningContext, PoV, OmittedValidationData,
CandidateDescriptor, AvailableData, ErasureChunk, ValidatorSignature, Hash, CandidateReceipt,
CandidateCommitments,
};
use polkadot_node_primitives::{
FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, ValidationResult,
ValidationOutputs,
};
use polkadot_subsystem::{
FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem,
......@@ -59,8 +58,12 @@ use polkadot_subsystem::messages::{
};
use statement_table::{
generic::AttestedCandidate as TableAttestedCandidate,
Table, Context as TableContextTrait, Statement as TableStatement,
SignedStatement as TableSignedStatement, Summary as TableSummary,
Context as TableContextTrait,
Table,
v1::{
Statement as TableStatement,
SignedStatement as TableSignedStatement, Summary as TableSummary,
},
};
#[derive(Debug, derive_more::From)]
......@@ -90,8 +93,6 @@ struct CandidateBackingJob {
/// Outbound message channel sending part.
tx_from: mpsc::Sender<FromJob>,
/// `HeadData`s of the parachains that this validator is assigned to.
head_data: HeadData,
/// The `ParaId`s assigned to this validator.
assignment: ParaId,
/// We issued `Valid` or `Invalid` statements on about these candidates.
......@@ -118,8 +119,22 @@ struct TableContext {
}
impl TableContextTrait for TableContext {
fn is_member_of(&self, authority: ValidatorIndex, group: &ParaId) -> bool {
self.groups.get(group).map_or(false, |g| g.iter().position(|&a| a == authority).is_some())
type AuthorityId = ValidatorIndex;
type Digest = Hash;
type GroupId = ParaId;
type Signature = ValidatorSignature;
type Candidate = CommittedCandidateReceipt;
fn candidate_digest(candidate: &CommittedCandidateReceipt) -> Hash {
candidate.hash()
}
fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId {
candidate.descriptor().para_id
}
fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool {
self.groups.get(group).map_or(false, |g| g.iter().position(|a| a == authority).is_some())
}
fn requisite_votes(&self, group: &ParaId) -> usize {
......@@ -221,7 +236,7 @@ impl CandidateBackingJob {
async fn issue_candidate_invalid_message(
&mut self,
candidate: AbridgedCandidateReceipt,
candidate: CandidateReceipt,
) -> Result<(), Error> {
self.tx_from.send(FromJob::CandidateSelection(
CandidateSelectionMessage::Invalid(self.parent, candidate)
......@@ -231,34 +246,69 @@ impl CandidateBackingJob {
}
/// Validate the candidate that is requested to be `Second`ed and distribute validation result.
///
/// Returns `Ok(true)` if we issued a `Seconded` statement about this candidate.
async fn validate_and_second(
&mut self,
candidate: AbridgedCandidateReceipt,
pov: PoVBlock,
) -> Result<ValidationResult, Error> {
let valid = self.request_candidate_validation(candidate.clone(), pov.clone()).await?;
let statement = match valid.0 {
ValidationResult::Valid => {
candidate: &CandidateReceipt,
pov: PoV,
) -> Result<bool, Error> {
let valid = self.request_candidate_validation(
candidate.descriptor().clone(),
Arc::new(pov.clone()),
).await?;
let candidate_hash = candidate.hash();
let statement = match valid {
ValidationResult::Valid(outputs) => {
// make PoV available for later distribution. Send data to the availability
// store to keep. Sign and dispatch `valid` statement to network if we
// have not seconded the given candidate.
self.make_pov_available(pov, valid.1, valid.2).await?;
self.issued_statements.insert(candidate.hash());
Statement::Seconded(candidate)
//
// If the commitments hash produced by validation is not the same as given by
// the collator, do not make available and report the collator.
let commitments_check = self.make_pov_available(
pov,
outputs,
|commitments| if commitments.hash() == candidate.commitments_hash {
Ok(CommittedCandidateReceipt {
descriptor: candidate.descriptor().clone(),
commitments,
})
} else {
Err(())
},
).await?;
match commitments_check {
Ok(candidate) => {
self.issued_statements.insert(candidate_hash);
Some(Statement::Seconded(candidate))
}
Err(()) => {
self.issue_candidate_invalid_message(candidate.clone()).await?;
None
}
}
}
ValidationResult::Invalid => {
let candidate_hash = candidate.hash();
self.issue_candidate_invalid_message(candidate).await?;
Statement::Invalid(candidate_hash)
// no need to issue a statement about this if we aren't seconding it.
//
// there's an infinite amount of garbage out there. no need to acknowledge
// all of it.
self.issue_candidate_invalid_message(candidate.clone()).await?;
None
}
};
if let Some(signed_statement) = self.sign_statement(statement) {
let issued_statement = statement.is_some();
if let Some(signed_statement) = statement.and_then(|s| self.sign_statement(s)) {
self.import_statement(&signed_statement).await?;
self.distribute_signed_statement(signed_statement).await?;
}
Ok(valid.0)
Ok(issued_statement)
}
fn get_backed(&self) -> Vec<NewBackedCandidate> {
......@@ -303,7 +353,7 @@ impl CandidateBackingJob {
}
/// Check if there have happened any new misbehaviors and issue necessary messages.
///
///
/// TODO: Report multiple misbehaviors (https://github.com/paritytech/polkadot/issues/1387)
async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> {
let mut reports = Vec::new();
......@@ -354,7 +404,7 @@ impl CandidateBackingJob {
match msg {
CandidateBackingMessage::Second(_, candidate, pov) => {
// Sanity check that candidate is from our assignment.
if candidate.parachain_index != self.assignment {
if candidate.descriptor().para_id != self.assignment {
return Ok(());
}
......@@ -367,8 +417,8 @@ impl CandidateBackingJob {
let candidate_hash = candidate.hash();
if !self.issued_statements.contains(&candidate_hash) {
if let Ok(ValidationResult::Valid) = self.validate_and_second(
candidate,
if let Ok(true) = self.validate_and_second(
&candidate,
pov,
).await {
self.seconded = Some(candidate_hash);
......@@ -397,17 +447,40 @@ impl CandidateBackingJob {
async fn kick_off_validation_work(
&mut self,
summary: TableSummary,
) -> Result<ValidationResult, Error> {
let candidate = self.table.get_candidate(&summary.candidate).ok_or(Error::CandidateNotFound)?;
let candidate = candidate.clone();
let descriptor = candidate.to_descriptor();
let candidate_hash = candidate.hash();
let pov = self.request_pov_from_distribution(descriptor).await?;
let v = self.request_candidate_validation(candidate, pov).await?;
) -> Result<(), Error> {
let candidate_hash = summary.candidate.clone();
if self.issued_statements.contains(&candidate_hash) {
return Ok(())
}
// We clone the commitments here because there are borrowck
// errors relating to this being a struct and methods borrowing the entirety of self
// and not just those things that the function uses.
let candidate = self.table.get_candidate(&candidate_hash).ok_or(Error::CandidateNotFound)?;
let expected_commitments = candidate.commitments.clone();
let descriptor = candidate.descriptor().clone();
let pov = self.request_pov_from_distribution(descriptor.clone()).await?;
let v = self.request_candidate_validation(descriptor, pov.clone()).await?;