Skip to content
Snippets Groups Projects
Commit 66931028 authored by Jon Häggblad's avatar Jon Häggblad Committed by GitHub
Browse files

Don't repeatedly lookup keys in `babe_epochAuthorship` rpc function (#5962)

* babe: don't repeatedly lookup keys in authorship rpc function

Expose a new function `claim_slot_using_keypars` in Babe so that the `babe_epochAuthorship` can
lookup authorship for all slots in the epoch without repeatedly looking up keys in the keystore.

Time to run the `babe_epochAuthorship` RPC call goes from 7s to 25ms on a local dev chain on my
machine.

* babe: pass reference to slice instead of ref to Vec

* babe: fix bunch of clippy warnings
parent b21b70f8
No related merge requests found
......@@ -116,9 +116,20 @@ impl<B, C, SC> BabeApi for BabeRPCHandler<B, C, SC>
let mut claims: HashMap<AuthorityId, EpochAuthorship> = HashMap::new();
let key_pairs = {
let keystore = keystore.read();
epoch.authorities.iter().enumerate()
.flat_map(|(i, a)| {
keystore.key_pair::<sp_consensus_babe::AuthorityPair>(&a.0).ok().map(|kp| (kp, i))
})
.collect::<Vec<_>>()
};
for slot_number in epoch_start..epoch_end {
let epoch = epoch_data(&shared_epoch, &client, &babe_config, slot_number, &select_chain)?;
if let Some((claim, key)) = authorship::claim_slot(slot_number, &epoch, &keystore) {
if let Some((claim, key)) =
authorship::claim_slot_using_key_pairs(slot_number, &epoch, &key_pairs)
{
match claim {
PreDigest::Primary { .. } => {
claims.entry(key.public()).or_default().primary.push(slot_number);
......@@ -163,7 +174,7 @@ pub enum Error {
impl From<Error> for jsonrpc_core::Error {
fn from(error: Error) -> Self {
jsonrpc_core::Error {
message: format!("{}", error).into(),
message: format!("{}", error),
code: jsonrpc_core::ErrorCode::ServerError(1234),
data: None,
}
......
......@@ -124,7 +124,7 @@ pub(super) fn secondary_slot_author(
fn claim_secondary_slot(
slot_number: SlotNumber,
epoch: &Epoch,
keystore: &KeyStorePtr,
key_pairs: &[(AuthorityPair, usize)],
author_secondary_vrf: bool,
) -> Option<(PreDigest, AuthorityPair)> {
let Epoch { authorities, randomness, epoch_index, .. } = epoch;
......@@ -139,14 +139,7 @@ fn claim_secondary_slot(
*randomness,
)?;
let keystore = keystore.read();
for (pair, authority_index) in authorities.iter()
.enumerate()
.flat_map(|(i, a)| {
keystore.key_pair::<AuthorityPair>(&a.0).ok().map(|kp| (kp, i))
})
{
for (pair, authority_index) in key_pairs {
if pair.public() == *expected_author {
let pre_digest = if author_secondary_vrf {
let transcript = super::authorship::make_transcript(
......@@ -161,16 +154,16 @@ fn claim_secondary_slot(
slot_number,
vrf_output: VRFOutput(s.0.to_output()),
vrf_proof: VRFProof(s.1),
authority_index: authority_index as u32,
authority_index: *authority_index as u32,
})
} else {
PreDigest::SecondaryPlain(SecondaryPlainPreDigest {
slot_number,
authority_index: authority_index as u32,
authority_index: *authority_index as u32,
})
};
return Some((pre_digest, pair));
return Some((pre_digest, pair.clone()));
}
}
......@@ -186,7 +179,26 @@ pub fn claim_slot(
epoch: &Epoch,
keystore: &KeyStorePtr,
) -> Option<(PreDigest, AuthorityPair)> {
claim_primary_slot(slot_number, epoch, epoch.config.c, keystore)
let key_pairs = {
let keystore = keystore.read();
epoch.authorities.iter()
.enumerate()
.flat_map(|(i, a)| {
keystore.key_pair::<AuthorityPair>(&a.0).ok().map(|kp| (kp, i))
})
.collect::<Vec<_>>()
};
claim_slot_using_key_pairs(slot_number, epoch, &key_pairs)
}
/// Like `claim_slot`, but allows passing an explicit set of key pairs. Useful if we intend
/// to make repeated calls for different slots using the same key pairs.
pub fn claim_slot_using_key_pairs(
slot_number: SlotNumber,
epoch: &Epoch,
key_pairs: &[(AuthorityPair, usize)],
) -> Option<(PreDigest, AuthorityPair)> {
claim_primary_slot(slot_number, epoch, epoch.config.c, &key_pairs)
.or_else(|| {
if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() ||
epoch.config.allowed_slots.is_secondary_vrf_slots_allowed()
......@@ -194,7 +206,7 @@ pub fn claim_slot(
claim_secondary_slot(
slot_number,
&epoch,
keystore,
&key_pairs,
epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(),
)
} else {
......@@ -216,39 +228,33 @@ fn claim_primary_slot(
slot_number: SlotNumber,
epoch: &Epoch,
c: (u64, u64),
keystore: &KeyStorePtr,
key_pairs: &[(AuthorityPair, usize)],
) -> Option<(PreDigest, AuthorityPair)> {
let Epoch { authorities, randomness, epoch_index, .. } = epoch;
let keystore = keystore.read();
for (pair, authority_index) in authorities.iter()
.enumerate()
.flat_map(|(i, a)| {
keystore.key_pair::<AuthorityPair>(&a.0).ok().map(|kp| (kp, i))
})
{
for (pair, authority_index) in key_pairs {
let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index);
// Compute the threshold we will use.
//
// We already checked that authorities contains `key.public()`, so it can't
// be empty. Therefore, this division in `calculate_threshold` is safe.
let threshold = super::authorship::calculate_primary_threshold(c, authorities, authority_index);
let threshold = super::authorship::calculate_primary_threshold(c, authorities, *authority_index);
let pre_digest = get_keypair(&pair)
let pre_digest = get_keypair(pair)
.vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold))
.map(|s| {
PreDigest::Primary(PrimaryPreDigest {
slot_number,
vrf_output: VRFOutput(s.0.to_output()),
vrf_proof: VRFProof(s.1),
authority_index: authority_index as u32,
authority_index: *authority_index as u32,
})
});
// early exit on first successful claim
if let Some(pre_digest) = pre_digest {
return Some((pre_digest, pair));
return Some((pre_digest, pair.clone()));
}
}
......
......@@ -112,7 +112,7 @@ pub(crate) fn write_epoch_changes<Block: BlockT, F, R>(
/// Write the cumulative chain-weight of a block ot aux storage.
pub(crate) fn write_block_weight<H: Encode, F, R>(
block_hash: H,
block_weight: &BabeBlockWeight,
block_weight: BabeBlockWeight,
write_aux: F,
) -> R where
F: FnOnce(&[(Vec<u8>, &[u8])]) -> R,
......
......@@ -78,7 +78,6 @@ use std::{
collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration},
any::Any, borrow::Cow
};
use sp_consensus_babe;
use sp_consensus::{ImportResult, CanAuthorWith};
use sp_consensus::import_queue::{
BoxJustificationImport, BoxFinalityProofImport,
......@@ -186,7 +185,7 @@ impl Epoch {
start_slot: slot_number,
duration: genesis_config.epoch_length,
authorities: genesis_config.genesis_authorities.clone(),
randomness: genesis_config.randomness.clone(),
randomness: genesis_config.randomness,
config: BabeEpochConfiguration {
c: genesis_config.c,
allowed_slots: genesis_config.allowed_slots,
......@@ -399,7 +398,7 @@ pub fn start_babe<B, C, SC, E, I, SO, CAW, Error>(BabeParams {
register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?;
sc_consensus_uncles::register_uncles_inherent_data_provider(
client.clone(),
client,
select_chain.clone(),
&inherent_data_providers,
)?;
......@@ -494,7 +493,7 @@ impl<B, C, E, I, Error, SO> sc_consensus_slots::SimpleSlotWorker<B> for BabeWork
&self.keystore,
);
if let Some(_) = s {
if s.is_some() {
debug!(target: "babe", "Claimed slot {}", slot_number);
}
......@@ -796,7 +795,7 @@ impl<Block, Client> Verifier<Block> for BabeVerifier<Block, Client> where
// FIXME #1019 in the future, alter this queue to allow deferring of headers
let v_params = verification::VerificationParams {
header: header.clone(),
pre_digest: Some(pre_digest.clone()),
pre_digest: Some(pre_digest),
slot_now: slot_now + 1,
epoch: viable_epoch.as_ref(),
};
......@@ -952,7 +951,7 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
let hash = block.post_hash();
let number = block.header.number().clone();
let number = *block.header.number();
// early exit if block already in chain, otherwise the check for
// epoch changes will error when trying to re-import an epoch change
......@@ -1133,7 +1132,7 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
aux_schema::write_block_weight(
hash,
&total_weight,
total_weight,
|values| block.auxiliary.extend(
values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
),
......@@ -1153,7 +1152,7 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
aux_schema::load_block_weight(&*self.client, last_best)
.map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))?
.ok_or_else(
|| ConsensusError::ChainLookup(format!("No block weight for parent header."))
|| ConsensusError::ChainLookup("No block weight for parent header.".to_string())
)?
};
......@@ -1170,7 +1169,7 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
// revert to the original epoch changes in case there's an error
// importing the block
if let Err(_) = import_result {
if import_result.is_err() {
if let Some(old_epoch_changes) = old_epoch_changes {
*epoch_changes = old_epoch_changes;
}
......@@ -1283,7 +1282,7 @@ pub fn import_queue<Block: BlockT, Client, Inner>(
register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?;
let verifier = BabeVerifier {
client: client.clone(),
client,
inherent_data_providers,
config: babe_link.config,
epoch_changes: babe_link.epoch_changes,
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment