(gossip: PGM) -> Self
where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static
{
let inner = InnerStore::new_in_memory();
let worker = Arc::new(Worker::start(inner.clone(), gossip));
let to_worker = worker.to_worker().clone();
Self {
inner,
worker,
to_worker,
}
}
/// Obtain a [`BlockImport`] implementation to import blocks into this store.
///
/// This block import will act upon all newly imported blocks sending information
/// about parachain heads included in them to this `Store`'s background worker.
/// The user may create multiple instances of [`BlockImport`]s with this call.
///
/// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html
pub fn block_import(
&self,
wrapped_block_import: I,
client: Arc,
thread_pool: TaskExecutor,
keystore: KeyStorePtr,
) -> ClientResult<(AvailabilityBlockImport)>
where
P: ProvideRuntimeApi + BlockchainEvents + BlockBody + Send + Sync + 'static,
P::Api: ParachainHost,
P::Api: ApiExt,
{
let to_worker = self.to_worker.clone();
let import = AvailabilityBlockImport::new(
self.inner.clone(),
client,
wrapped_block_import,
thread_pool,
keystore,
to_worker,
);
Ok(import)
}
/// Make some data available provisionally.
///
/// Validators with the responsibility of maintaining availability
/// for a block or collators collating a block will call this function
/// in order to persist that data to disk and so it can be queried and provided
/// to other nodes in the network.
///
/// The message data of `Data` is optional but is expected
/// to be present with the exception of the case where there is no message data
/// due to the block's invalidity. Determination of invalidity is beyond the
/// scope of this function.
///
/// This method will send the `Data` to the background worker, allowing caller to
/// asynchrounously wait for the result.
pub async fn make_available(&self, data: Data) -> io::Result<()> {
let (s, r) = oneshot::channel();
let msg = WorkerMsg::MakeAvailable(MakeAvailable {
data,
result: s,
});
let _ = self.to_worker.unbounded_send(msg);
if let Ok(Ok(())) = r.await {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
}
/// Get a set of all chunks we are waiting for grouped by
/// `(relay_parent, erasure_root, candidate_hash, our_id)`.
pub fn awaited_chunks(&self) -> Option> {
self.inner.awaited_chunks()
}
/// Qery which candidates were included in the relay chain block by block's parent.
pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option> {
self.inner.get_candidates_in_relay_block(relay_block)
}
/// Make a validator's index and a number of validators at a relay parent available.
///
/// This information is needed before the `add_candidates_in_relay_block` is called
/// since that call forms the awaited frontier of chunks.
/// In the current implementation this function is called in the `get_or_instantiate` at
/// the start of the parachain agreement process on top of some parent hash.
pub fn add_validator_index_and_n_validators(
&self,
relay_parent: &Hash,
validator_index: u32,
n_validators: u32,
) -> io::Result<()> {
self.inner.add_validator_index_and_n_validators(
relay_parent,
validator_index,
n_validators,
)
}
/// Query a validator's index and n_validators by relay parent.
pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> {
self.inner.get_validator_index_and_n_validators(relay_parent)
}
/// Adds an erasure chunk to storage.
///
/// The chunk should be checked for validity against the root of encoding
/// and its proof prior to calling this.
///
/// This method will send the chunk to the background worker, allowing caller to
/// asynchrounously wait for the result.
pub async fn add_erasure_chunk(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
chunk: ErasureChunk,
) -> io::Result<()> {
self.add_erasure_chunks(relay_parent, receipt, vec![chunk]).await
}
/// Adds a set of erasure chunks to storage.
///
/// The chunks should be checked for validity against the root of encoding
/// and it's proof prior to calling this.
///
/// This method will send the chunks to the background worker, allowing caller to
/// asynchrounously waiting for the result.
pub async fn add_erasure_chunks(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
chunks: I,
) -> io::Result<()>
where I: IntoIterator-
{
self.add_candidate(relay_parent, receipt.clone()).await?;
let (s, r) = oneshot::channel();
let chunks = chunks.into_iter().collect();
let candidate_hash = receipt.hash();
let msg = WorkerMsg::Chunks(Chunks {
relay_parent,
candidate_hash,
chunks,
result: s,
});
let _ = self.to_worker.unbounded_send(msg);
if let Ok(Ok(())) = r.await {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
}
/// Queries an erasure chunk by its block's parent and hash and index.
pub fn get_erasure_chunk(
&self,
relay_parent: &Hash,
block_data_hash: Hash,
index: usize,
) -> Option {
self.inner.get_erasure_chunk(relay_parent, block_data_hash, index)
}
/// Stores a candidate receipt.
pub async fn add_candidate(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
) -> io::Result<()> {
let (s, r) = oneshot::channel();
let msg = WorkerMsg::ParachainBlocks(ParachainBlocks {
relay_parent,
blocks: vec![(receipt, None)],
result: s,
});
let _ = self.to_worker.unbounded_send(msg);
if let Ok(Ok(())) = r.await {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
}
/// Queries a candidate receipt by it's hash.
pub fn get_candidate(&self, candidate_hash: &Hash) -> Option {
self.inner.get_candidate(candidate_hash)
}
/// Query block data.
pub fn block_data(&self, relay_parent: Hash, block_data_hash: Hash) -> Option {
self.inner.block_data(relay_parent, block_data_hash)
}
/// Query block data by corresponding candidate receipt's hash.
pub fn block_data_by_candidate(&self, relay_parent: Hash, candidate_hash: Hash)
-> Option
{
self.inner.block_data_by_candidate(relay_parent, candidate_hash)
}
/// Query message queue data by message queue root hash.
pub fn queue_by_root(&self, queue_root: &Hash) -> Option> {
self.inner.queue_by_root(queue_root)
}
}