> + Send + 'static,
{
let worker = AuraWorker {
client,
@@ -171,6 +174,7 @@ pub fn start_aura(
keystore,
sync_oracle: sync_oracle.clone(),
force_authoring,
+ backoff_authoring_blocks,
_key_type: PhantomData::,
};
register_aura_inherent_data_provider(
@@ -188,20 +192,22 @@ pub fn start_aura(
))
}
-struct AuraWorker {
+struct AuraWorker {
client: Arc,
block_import: Arc>,
env: E,
keystore: SyncCryptoStorePtr,
sync_oracle: SO,
force_authoring: bool,
+ backoff_authoring_blocks: Option,
_key_type: PhantomData,
}
-impl sc_consensus_slots::SimpleSlotWorker for AuraWorker
+impl sc_consensus_slots::SimpleSlotWorker
+ for AuraWorker
where
B: BlockT,
- C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync,
+ C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync,
C::Api: AuraApi>,
E: Environment,
E::Proposer: Proposer>,
@@ -210,6 +216,7 @@ where
P::Public: AppPublic + Public + Member + Encode + Decode + Hash,
P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug,
SO: SyncOracle + Send + Clone,
+ BS: BackoffAuthoringBlocksStrategy> + Send + 'static,
Error: std::error::Error + Send + From + 'static,
{
type BlockImport = I;
@@ -316,6 +323,21 @@ where
self.force_authoring
}
+ fn should_backoff(&self, slot_number: u64, chain_head: &B::Header) -> bool {
+ if let Some(ref strategy) = self.backoff_authoring_blocks {
+ if let Ok(chain_head_slot) = find_pre_digest::(chain_head) {
+ return strategy.should_backoff(
+ *chain_head.number(),
+ chain_head_slot,
+ self.client.info().finalized_number,
+ slot_number,
+ self.logging_target(),
+ );
+ }
+ }
+ false
+ }
+
fn sync_oracle(&mut self) -> &mut Self::SyncOracle {
&mut self.sync_oracle
}
@@ -814,7 +836,6 @@ pub fn import_queue(
slot_duration: SlotDuration,
block_import: I,
justification_import: Option>,
- finality_proof_import: Option>,
client: Arc,
inherent_data_providers: InherentDataProviders,
spawner: &S,
@@ -846,7 +867,6 @@ pub fn import_queue(
verifier,
Box::new(block_import),
justification_import,
- finality_proof_import,
spawner,
registry,
))
@@ -863,7 +883,7 @@ mod tests {
use sp_keyring::sr25519::Keyring;
use sc_client_api::BlockchainEvents;
use sp_consensus_aura::sr25519::AuthorityPair;
- use sc_consensus_slots::SimpleSlotWorker;
+ use sc_consensus_slots::{SimpleSlotWorker, BackoffAuthoringOnFinalizedHeadLagging};
use std::task::Poll;
use sc_block_builder::BlockBuilderProvider;
use sp_runtime::traits::Header as _;
@@ -1012,7 +1032,7 @@ mod tests {
&inherent_data_providers, slot_duration.get()
).expect("Registers aura inherent data provider");
- aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>(
+ aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _, _>(
slot_duration,
client.clone(),
select_chain,
@@ -1021,6 +1041,7 @@ mod tests {
DummyOracle,
inherent_data_providers,
false,
+ Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
keystore,
sp_consensus::AlwaysCanAuthor,
).expect("Starts aura"));
@@ -1081,6 +1102,7 @@ mod tests {
keystore: keystore.into(),
sync_oracle: DummyOracle.clone(),
force_authoring: false,
+ backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
_key_type: PhantomData::,
};
diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md
index faba3948ed71583bb04d4f36962ec92c806c1634..a404d2ea447064e083d05aac59752a8c41d97096 100644
--- a/client/consensus/babe/README.md
+++ b/client/consensus/babe/README.md
@@ -43,6 +43,6 @@ primary blocks in the chain. We will pick the heaviest chain (more primary
blocks) and will go with the longest one in case of a tie.
An in-depth description and analysis of the protocol can be found here:
-
+
License: GPL-3.0-or-later WITH Classpath-exception-2.0
\ No newline at end of file
diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml
index 5b3169e600a98e0730fca62b782fdcac59225da7..8a376e6c95b9a043c6d8884ff959709e3c66ee53 100644
--- a/client/consensus/babe/rpc/Cargo.toml
+++ b/client/consensus/babe/rpc/Cargo.toml
@@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
sc-consensus-babe = { version = "0.8.0", path = "../" }
sc-rpc-api = { version = "0.8.0", path = "../../../rpc-api" }
-jsonrpc-core = "15.0.0"
-jsonrpc-core-client = "15.0.0"
-jsonrpc-derive = "15.0.0"
+jsonrpc-core = "15.1.0"
+jsonrpc-core-client = "15.1.0"
+jsonrpc-derive = "15.1.0"
sp-consensus-babe = { version = "0.8.0", path = "../../../../primitives/consensus/babe" }
serde = { version = "1.0.104", features=["derive"] }
sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" }
diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs
index 4705381c2b918ba12c090a8a3bfbdd4e8c8f0b90..3f2a583482afb5d7486f44e0f6c45b7e0f5d6111 100644
--- a/client/consensus/babe/src/lib.rs
+++ b/client/consensus/babe/src/lib.rs
@@ -59,7 +59,7 @@
//! blocks) and will go with the longest one in case of a tie.
//!
//! An in-depth description and analysis of the protocol can be found here:
-//!
+//!
#![forbid(unsafe_code)]
#![warn(missing_docs)]
@@ -79,9 +79,7 @@ use std::{
any::Any, borrow::Cow, convert::TryInto,
};
use sp_consensus::{ImportResult, CanAuthorWith};
-use sp_consensus::import_queue::{
- BoxJustificationImport, BoxFinalityProofImport,
-};
+use sp_consensus::import_queue::BoxJustificationImport;
use sp_core::crypto::Public;
use sp_application_crypto::AppKey;
use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore};
@@ -114,6 +112,7 @@ use log::{debug, info, log, trace, warn};
use prometheus_endpoint::Registry;
use sc_consensus_slots::{
SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation,
+ BackoffAuthoringBlocksStrategy,
};
use sc_consensus_epochs::{
descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor,
@@ -199,58 +198,86 @@ impl Epoch {
}
}
+/// Errors encountered by the babe authorship task.
#[derive(derive_more::Display, Debug)]
-enum Error {
+pub enum Error {
+ /// Multiple BABE pre-runtime digests
#[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")]
MultiplePreRuntimeDigests,
+ /// No BABE pre-runtime digest found
#[display(fmt = "No BABE pre-runtime digest found")]
NoPreRuntimeDigest,
+ /// Multiple BABE epoch change digests
#[display(fmt = "Multiple BABE epoch change digests, rejecting!")]
MultipleEpochChangeDigests,
+ /// Multiple BABE config change digests
#[display(fmt = "Multiple BABE config change digests, rejecting!")]
MultipleConfigChangeDigests,
+ /// Could not extract timestamp and slot
#[display(fmt = "Could not extract timestamp and slot: {:?}", _0)]
Extraction(sp_consensus::Error),
+ /// Could not fetch epoch
#[display(fmt = "Could not fetch epoch at {:?}", _0)]
FetchEpoch(B::Hash),
+ /// Header rejected: too far in the future
#[display(fmt = "Header {:?} rejected: too far in the future", _0)]
TooFarInFuture(B::Hash),
+ /// Parent unavailable. Cannot import
#[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)]
ParentUnavailable(B::Hash, B::Hash),
+ /// Slot number must increase
#[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)]
SlotNumberMustIncrease(u64, u64),
+ /// Header has a bad seal
#[display(fmt = "Header {:?} has a bad seal", _0)]
HeaderBadSeal(B::Hash),
+ /// Header is unsealed
#[display(fmt = "Header {:?} is unsealed", _0)]
HeaderUnsealed(B::Hash),
+ /// Slot author not found
#[display(fmt = "Slot author not found")]
SlotAuthorNotFound,
+ /// Secondary slot assignments are disabled for the current epoch.
#[display(fmt = "Secondary slot assignments are disabled for the current epoch.")]
SecondarySlotAssignmentsDisabled,
+ /// Bad signature
#[display(fmt = "Bad signature on {:?}", _0)]
BadSignature(B::Hash),
+ /// Invalid author: Expected secondary author
#[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)]
InvalidAuthor(AuthorityId, AuthorityId),
+ /// No secondary author expected.
#[display(fmt = "No secondary author expected.")]
NoSecondaryAuthorExpected,
+ /// VRF verification of block by author failed
#[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)]
VRFVerificationOfBlockFailed(AuthorityId, u128),
+ /// VRF verification failed
#[display(fmt = "VRF verification failed: {:?}", _0)]
VRFVerificationFailed(SignatureError),
+ /// Could not fetch parent header
#[display(fmt = "Could not fetch parent header: {:?}", _0)]
FetchParentHeader(sp_blockchain::Error),
+ /// Expected epoch change to happen.
#[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)]
ExpectedEpochChange(B::Hash, u64),
+ /// Unexpected config change.
#[display(fmt = "Unexpected config change")]
UnexpectedConfigChange,
+ /// Unexpected epoch change
#[display(fmt = "Unexpected epoch change")]
UnexpectedEpochChange,
+ /// Parent block has no associated weight
#[display(fmt = "Parent block of {} has no associated weight", _0)]
ParentBlockNoAssociatedWeight(B::Hash),
#[display(fmt = "Checking inherents failed: {}", _0)]
+ /// Check Inherents error
CheckInherents(String),
+ /// Client error
Client(sp_blockchain::Error),
+ /// Runtime error
Runtime(sp_inherents::Error),
+ /// Fork tree error
ForkTree(Box>),
}
@@ -326,7 +353,7 @@ impl std::ops::Deref for Config {
}
/// Parameters for BABE.
-pub struct BabeParams {
+pub struct BabeParams {
/// The keystore that manages the keys of the node.
pub keystore: SyncCryptoStorePtr,
@@ -353,6 +380,9 @@ pub struct BabeParams {
/// Force authoring of blocks even if we are offline
pub force_authoring: bool,
+ /// Strategy and parameters for backing off block production.
+ pub backoff_authoring_blocks: Option,
+
/// The source of timestamps for relative slots
pub babe_link: BabeLink,
@@ -361,7 +391,7 @@ pub struct BabeParams {
}
/// Start the babe worker.
-pub fn start_babe(BabeParams {
+pub fn start_babe(BabeParams {
keystore,
client,
select_chain,
@@ -370,9 +400,10 @@ pub fn start_babe(BabeParams {
sync_oracle,
inherent_data_providers,
force_authoring,
+ backoff_authoring_blocks,
babe_link,
can_author_with,
-}: BabeParams) -> Result<
+}: BabeParams) -> Result<
BabeWorker,
sp_consensus::Error,
> where
@@ -388,6 +419,7 @@ pub fn start_babe