diff --git a/substrate/core/client/src/genesis.rs b/substrate/core/client/src/genesis.rs index d7d398a4e908b74aec1f1f75510d5a0e771b288e..098c7a35e9bfd62607fa8bdae7233dd9507ca68b 100644 --- a/substrate/core/client/src/genesis.rs +++ b/substrate/core/client/src/genesis.rs @@ -70,7 +70,7 @@ mod tests { let signature = Pair::from(Keyring::from_public(Public::from_raw(tx.from.to_fixed_bytes())).unwrap()) .sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + Extrinsic::Transfer(tx, signature) }).collect::<Vec<_>>(); let extrinsics_root = ordered_trie_root::<Blake2Hasher, _, _>(transactions.iter().map(Encode::encode)).into(); diff --git a/substrate/core/finality-grandpa/primitives/src/lib.rs b/substrate/core/finality-grandpa/primitives/src/lib.rs index 15e4a027f6c24ffb0b3a8603c460d056d8e5e636..8776c262a078b2d2d48ebfaa80084ef575f23f66 100644 --- a/substrate/core/finality-grandpa/primitives/src/lib.rs +++ b/substrate/core/finality-grandpa/primitives/src/lib.rs @@ -97,6 +97,10 @@ decl_runtime_apis! { /// Get the current GRANDPA authorities and weights. This should not change except /// for when changes are scheduled and the corresponding delay has passed. + /// + /// When called at block B, it will return the set of authorities that should be + /// used to finalize descendants of this block (B+1, B+2, ...). The block B itself + /// is finalized by the authorities from block B-1. fn grandpa_authorities() -> Vec<(Ed25519AuthorityId, u64)>; } } diff --git a/substrate/core/finality-grandpa/src/finality_proof.rs b/substrate/core/finality-grandpa/src/finality_proof.rs new file mode 100644 index 0000000000000000000000000000000000000000..c927aa68a08e95997453644b20807e1bb90dedb8 --- /dev/null +++ b/substrate/core/finality-grandpa/src/finality_proof.rs @@ -0,0 +1,428 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see <http://www.gnu.org/licenses/>. + +//! GRANDPA block finality proof generation and check. +//! +//! Finality of block B is proved by providing: +//! 1) valid headers sub-chain from the block B to the block F; +//! 2) valid (with respect to proved authorities) GRANDPA justification of the block F; +//! 3) proof-of-execution of the `grandpa_authorities` call at the block F. +//! +//! Since earliest possible justification is returned, the GRANDPA authorities set +//! at the block F is guaranteed to be the same as in the block B (this is because block +//! that enacts new GRANDPA authorities set always comes with justification). It also +//! means that the `set_id` is the same at blocks B and F. +//! +//! The caller should track the `set_id`. The most straightforward way is to fetch finality +//! proofs ONLY for blocks on the tip of the chain and track the latest known `set_id`. + +use std::collections::HashMap; + +use client::{ + blockchain::Backend as BlockchainBackend, + error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}, + light::fetcher::RemoteCallRequest, +}; +use codec::{Encode, Decode}; +use grandpa::BlockNumberOps; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{ + NumberFor, Block as BlockT, Header as HeaderT, One, +}; +use substrate_primitives::{Ed25519AuthorityId, H256}; + +use GrandpaJustification; + +/// Prepare proof-of-finality for the given block. +/// +/// The proof is the serialized `FinalityProof` constructed using earliest known +/// justification of the block. None is returned if there's no known justification atm. +pub fn prove_finality<Block: BlockT, B, G>( + blockchain: &B, + generate_execution_proof: G, + block: Block::Hash, +) -> ::client::error::Result<Option<Vec<u8>>> + where + B: BlockchainBackend<Block>, + G: Fn(&BlockId<Block>, &str, &[u8]) -> ClientResult<Vec<Vec<u8>>>, +{ + let block_id = BlockId::Hash(block); + let mut block_number = blockchain.expect_block_number_from_id(&block_id)?; + + // early-return if we sure that the block isn't finalized yet + let info = blockchain.info()?; + if info.finalized_number < block_number { + return Ok(None); + } + + // early-return if we sure that the block is NOT a part of canonical chain + let canonical_block = blockchain.expect_block_hash_from_id(&BlockId::Number(block_number))?; + if block != canonical_block { + return Err(ClientErrorKind::Backend( + "Cannot generate finality proof for non-canonical block".into() + ).into()); + } + + // now that we know that the block is finalized, we can generate finalization proof + + // we need to prove grandpa authorities set that has generated justification + // BUT since `GrandpaApi::grandpa_authorities` call returns the set that becames actual + // at the next block, the proof-of execution is generated using parent block' state + // (this will fail if we're trying to prove genesis finality, but such the call itself is redundant) + let mut current_header = blockchain.expect_header(BlockId::Hash(block))?; + let parent_block_id = BlockId::Hash(*current_header.parent_hash()); + let authorities_proof = generate_execution_proof( + &parent_block_id, + "GrandpaApi_grandpa_authorities", + &[], + )?; + + // search for earliest post-block (inclusive) justification + let mut finalization_path = Vec::new(); + loop { + finalization_path.push(current_header); + + match blockchain.justification(BlockId::Number(block_number))? { + Some(justification) => return Ok(Some(FinalityProof { + finalization_path, + justification, + authorities_proof, + }.encode())), + None if block_number == info.finalized_number => break, + None => { + block_number = block_number + One::one(); + current_header = blockchain.expect_header(BlockId::Number(block_number))?; + }, + } + } + + Err(ClientErrorKind::Backend( + "cannot find justification for finalized block".into() + ).into()) +} + +/// Check proof-of-finality for the given block. +/// +/// Returns the vector of headers (including `block` header, ordered by ASC block number) that MUST be +/// validated + imported at once (i.e. within single db transaction). If at least one of those headers +/// is invalid, all other MUST be considered invalid. +pub fn check_finality_proof<Block: BlockT<Hash=H256>, C>( + check_execution_proof: C, + parent_header: Block::Header, + block: (NumberFor<Block>, Block::Hash), + set_id: u64, + remote_proof: Vec<u8>, +) -> ClientResult<Vec<Block::Header>> + where + NumberFor<Block>: grandpa::BlockNumberOps, + C: Fn(&RemoteCallRequest<Block::Header>) -> ClientResult<Vec<u8>>, +{ + do_check_finality_proof::<Block, C, GrandpaJustification<Block>>( + check_execution_proof, + parent_header, + block, + set_id, + remote_proof, + ) +} + +/// Check proof-of-finality using given justification type. +fn do_check_finality_proof<Block: BlockT<Hash=H256>, C, J>( + check_execution_proof: C, + parent_header: Block::Header, + block: (NumberFor<Block>, Block::Hash), + set_id: u64, + remote_proof: Vec<u8>, +) -> ClientResult<Vec<Block::Header>> + where + NumberFor<Block>: grandpa::BlockNumberOps, + C: Fn(&RemoteCallRequest<Block::Header>) -> ClientResult<Vec<u8>>, + J: ProvableJustification<Block::Header>, +{ + // decode finality proof + let proof = FinalityProof::<Block::Header, J>::decode(&mut &remote_proof[..]) + .ok_or_else(|| ClientErrorKind::BadJustification("failed to decode finality proof".into()))?; + + // check that the first header in finalization path is the block itself + { + let finalized_header = proof.finalization_path.first() + .ok_or_else(|| ClientError::from(ClientErrorKind::BadJustification( + "finality proof: finalized path is empty".into() + )))?; + if *finalized_header.number() != block.0 || finalized_header.hash() != block.1 { + return Err(ClientErrorKind::BadJustification( + "finality proof: block is not a part of finalized path".into() + ).into()); + } + } + + // check that the last header in finalization path is the jsutification target block + let just_block = proof.justification.target_block(); + { + let finalized_header = proof.finalization_path.last() + .expect("checked above that proof.finalization_path is not empty; qed"); + if *finalized_header.number() != just_block.0 || finalized_header.hash() != just_block.1 { + return Err(ClientErrorKind::BadJustification( + "finality proof: target jsutification block is not a part of finalized path".into() + ).into()); + } + } + + // check authorities set proof && get grandpa authorities that should have signed justification + let grandpa_authorities = check_execution_proof(&RemoteCallRequest { + block: just_block.1, + header: parent_header, + method: "GrandpaApi_grandpa_authorities".into(), + call_data: vec![], + retry_count: None, + })?; + let grandpa_authorities: Vec<(Ed25519AuthorityId, u64)> = Decode::decode(&mut &grandpa_authorities[..]) + .ok_or_else(|| ClientErrorKind::BadJustification("failed to decode GRANDPA authorities set proof".into()))?; + + // and now check justification + proof.justification.verify(set_id, &grandpa_authorities.into_iter().collect())?; + + Ok(proof.finalization_path) +} + +/// Proof of finality. +/// +/// Finality of block B is proved by providing: +/// 1) valid headers sub-chain from the block B to the block F; +/// 2) proof of `GrandpaApi::grandpa_authorities()` call at the block F; +/// 3) valid (with respect to proved authorities) GRANDPA justification of the block F. +#[derive(Debug, PartialEq, Encode, Decode)] +struct FinalityProof<Header, Justification> { + /// Headers-path (ordered by block number, ascending) from the block we're gathering proof for + /// (inclusive) to the target block of the justification (inclusive). + pub finalization_path: Vec<Header>, + /// Justification (finalization) of the last block from the `finalization_path`. + pub justification: Justification, + /// Proof of `GrandpaApi::grandpa_authorities` call execution at the + /// justification' target block. + pub authorities_proof: Vec<Vec<u8>>, +} + +/// Justification used to prove block finality. +trait ProvableJustification<Header: HeaderT>: Encode + Decode { + /// Get target block of this justification. + fn target_block(&self) -> (Header::Number, Header::Hash); + + /// Verify justification with respect to authorities set and authorities set id. + fn verify(&self, set_id: u64, authorities: &HashMap<Ed25519AuthorityId, u64>) -> ClientResult<()>; +} + +impl<Block: BlockT<Hash=H256>> ProvableJustification<Block::Header> for GrandpaJustification<Block> + where + NumberFor<Block>: BlockNumberOps, +{ + fn target_block(&self) -> (NumberFor<Block>, Block::Hash) { + (self.commit.target_number, self.commit.target_hash) + } + + fn verify(&self, set_id: u64, authorities: &HashMap<Ed25519AuthorityId, u64>) -> ClientResult<()> { + GrandpaJustification::verify(self, set_id, authorities) + } +} + +#[cfg(test)] +mod tests { + use test_client::runtime::{Block, Header}; + use test_client::client::backend::NewBlockState; + use test_client::client::in_mem::Blockchain as InMemoryBlockchain; + use super::*; + + type FinalityProof = super::FinalityProof<Header, Vec<u8>>; + + #[derive(Encode, Decode)] + struct ValidFinalityProof(Vec<u8>); + + impl ProvableJustification<Header> for ValidFinalityProof { + fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } + + fn verify(&self, set_id: u64, authorities: &HashMap<Ed25519AuthorityId, u64>) -> ClientResult<()> { + assert_eq!(set_id, 1); + assert_eq!(authorities, &vec![ + (Ed25519AuthorityId([1u8; 32]), 1), + (Ed25519AuthorityId([2u8; 32]), 2), + (Ed25519AuthorityId([3u8; 32]), 3), + ].into_iter().collect()); + Ok(()) + } + } + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new(number, 0.into(), 0.into(), parent_hash, Default::default()) + } + + fn side_header(number: u64) -> Header { + Header::new(number, 0.into(), 1.into(), header(number - 1).hash(), Default::default()) + } + + fn test_blockchain() -> InMemoryBlockchain<Block> { + let blockchain = InMemoryBlockchain::<Block>::new(); + blockchain.insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(2).hash(), header(2), None, None, NewBlockState::Best).unwrap(); + blockchain.insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final).unwrap(); + blockchain + } + + #[test] + fn finality_proof_is_not_generated_for_non_final_block() { + let blockchain = test_blockchain(); + blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); + + // when asking for finality of block 4, None is returned + let proof_of_4 = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(4).hash()) + .unwrap(); + assert_eq!(proof_of_4, None); + } + + #[test] + fn finality_proof_fails_for_non_canonical_block() { + let blockchain = test_blockchain(); + blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); + blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); + blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); + + // when asking for finality of side-block 42, None is returned + let proof_of_side_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), 42.into()).is_err(); + assert_eq!(proof_of_side_4_fails, true); + } + + #[test] + fn finality_proof_fails_if_no_justification_known() { + let blockchain = test_blockchain(); + blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); + + // when asking for finality of block 4, search for justification failing + let proof_of_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), 42.into()).is_err(); + assert_eq!(proof_of_4_fails, true); + } + + #[test] + fn prove_finality_is_generated() { + let blockchain = test_blockchain(); + + // when asking for finality of block 2, justification of 3 is returned + let proof_of_2: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(2).hash()) + .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); + assert_eq!(proof_of_2, FinalityProof { + finalization_path: vec![header(2), header(3)], + justification: vec![3], + authorities_proof: vec![vec![42]], + }); + + // when asking for finality of block 3, justification of 3 is returned + let proof_of_3: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(3).hash()) + .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); + assert_eq!(proof_of_3, FinalityProof { + finalization_path: vec![header(3)], + justification: vec![3], + authorities_proof: vec![vec![42]], + }); + } + + #[test] + fn finality_proof_check_fails_when_block_is_not_included() { + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); + proof_of_2.finalization_path.remove(0); + + // block for which we're trying to request finality proof is missing from finalization_path + assert_eq!(do_check_finality_proof::<Block, _, ValidFinalityProof>( + |_| Ok(Vec::<u8>::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ).is_err(), true); + } + + #[test] + fn finality_proof_check_fails_when_justified_block_is_not_included() { + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); + proof_of_2.finalization_path.remove(1); + + // justified block is missing from finalization_path + assert_eq!(do_check_finality_proof::<Block, _, ValidFinalityProof>( + |_| Ok(Vec::<u8>::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ).is_err(), true); + } + + #[test] + fn finality_proof_check_fails_when_justification_verification_fails() { + #[derive(Encode, Decode)] + struct InvalidFinalityProof(Vec<u8>); + + impl ProvableJustification<Header> for InvalidFinalityProof { + fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } + + fn verify(&self, _set_id: u64, _authorities: &HashMap<Ed25519AuthorityId, u64>) -> ClientResult<()> { + Err(ClientErrorKind::Backend("test error".into()).into()) + } + } + + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); + proof_of_2.finalization_path.remove(1); + + // justification is not valid + assert_eq!(do_check_finality_proof::<Block, _, InvalidFinalityProof>( + |_| Ok(Vec::<u8>::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ).is_err(), true); + } + + #[test] + fn finality_proof_check_works() { + let proof_of_2 = prove_finality(&test_blockchain(), |_, _, _| Ok(vec![vec![42]]), header(2).hash()) + .unwrap().unwrap(); + assert_eq!(do_check_finality_proof::<Block, _, ValidFinalityProof>( + |_| Ok(vec![ + (Ed25519AuthorityId([1u8; 32]), 1u64), + (Ed25519AuthorityId([2u8; 32]), 2u64), + (Ed25519AuthorityId([3u8; 32]), 3u64), + ].encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2, + ).unwrap(), vec![header(2), header(3)]); + } +} diff --git a/substrate/core/finality-grandpa/src/lib.rs b/substrate/core/finality-grandpa/src/lib.rs index 00ea6734af56d58218a47cbf5e959204cf3c366f..aacee8fe7d78a4f45f5640d7e8be5917d76e91dd 100644 --- a/substrate/core/finality-grandpa/src/lib.rs +++ b/substrate/core/finality-grandpa/src/lib.rs @@ -92,7 +92,7 @@ use codec::{Encode, Decode}; use consensus_common::{BlockImport, Error as ConsensusError, ErrorKind as ConsensusErrorKind, ImportBlock, ImportResult, Authorities}; use runtime_primitives::traits::{ NumberFor, Block as BlockT, Header as HeaderT, DigestFor, ProvideRuntimeApi, Hash as HashT, - DigestItemFor, DigestItem, + DigestItemFor, DigestItem, As, Zero, }; use fg_primitives::GrandpaApi; use runtime_primitives::generic::BlockId; @@ -116,6 +116,7 @@ pub use fg_primitives::ScheduledChange; mod authorities; mod communication; +mod finality_proof; mod until_imported; #[cfg(feature="service-integration")] @@ -123,11 +124,14 @@ mod service_integration; #[cfg(feature="service-integration")] pub use service_integration::{LinkHalfForService, BlockImportForService}; +pub use finality_proof::{prove_finality, check_finality_proof}; + #[cfg(test)] mod tests; const LAST_COMPLETED_KEY: &[u8] = b"grandpa_completed_round"; const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; +const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; /// round-number, round-state type LastCompleted<H, N> = (u64, RoundState<H, N>); @@ -165,6 +169,10 @@ pub type CompactCommit<Block> = grandpa::CompactCommit< pub struct Config { /// The expected duration for a message to be gossiped across the network. pub gossip_duration: Duration, + /// Justification generation period (in blocks). GRANDPA will try to generate justifications + /// at least every justification_period blocks. There are some other events which might cause + /// justification generation. + pub justification_period: u64, /// The local signing key. pub local_key: Option<Arc<ed25519::Pair>>, /// Some local identifier of the voter. @@ -304,12 +312,65 @@ impl<B, E, Block: BlockT<Hash=H256>, RA> BlockStatus<Block> for Arc<Client<B, E, } } +/// Consensus-related data changes tracker. +#[derive(Debug, Encode, Decode)] +struct ConsensusChanges<H, N> { + pending_changes: Vec<(N, H)>, +} + +impl<H: Copy + PartialEq, N: Copy + Ord> ConsensusChanges<H, N> { + /// Create empty consensus changes. + pub fn empty() -> Self { + ConsensusChanges { pending_changes: Vec::new(), } + } + + /// Note unfinalized change of consensus-related data. + pub fn note_change(&mut self, at: (N, H)) { + let idx = self.pending_changes + .binary_search_by_key(&at.0, |change| change.0) + .unwrap_or_else(|i| i); + self.pending_changes.insert(idx, at); + } + + /// Finalize all pending consensus changes that are finalized by given block. + /// Returns true if there any changes were finalized. + pub fn finalize<F: Fn(N) -> ::client::error::Result<Option<H>>>( + &mut self, + block: (N, H), + canonical_at_height: F, + ) -> ::client::error::Result<(bool, bool)> { + let (split_idx, has_finalized_changes) = self.pending_changes.iter() + .enumerate() + .take_while(|(_, &(at_height, _))| at_height <= block.0) + .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| + ( + Some(idx), + has_finalized_changes + .and_then(|has_finalized_changes| if has_finalized_changes { + Ok(has_finalized_changes) + } else { + canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) + }), + )); + + let altered_changes = split_idx.is_some(); + if let Some(split_idx) = split_idx { + self.pending_changes = self.pending_changes.split_off(split_idx + 1); + } + has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) + } +} + +/// Thread-safe consensus changes tracker reference. +type SharedConsensusChanges<H, N> = Arc<parking_lot::Mutex<ConsensusChanges<H, N>>>; + /// The environment we run GRANDPA in. struct Environment<B, E, Block: BlockT, N: Network, RA> { inner: Arc<Client<B, E, Block, RA>>, voters: Arc<HashMap<Ed25519AuthorityId, u64>>, config: Config, authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>, + consensus_changes: SharedConsensusChanges<Block::Hash, NumberFor<Block>>, network: N, set_id: u64, } @@ -515,7 +576,15 @@ impl<B, E, Block: BlockT<Hash=H256>, N, RA> voter::Environment<Block::Hash, Numb } fn finalize_block(&self, hash: Block::Hash, number: NumberFor<Block>, round: u64, commit: Commit<Block>) -> Result<(), Self::Error> { - finalize_block(&*self.inner, &self.authority_set, hash, number, (round, commit).into()) + finalize_block( + &*self.inner, + &self.authority_set, + &self.consensus_changes, + Some(As::sa(self.config.justification_period)), + hash, + number, + (round, commit).into(), + ) } fn round_commit_timer(&self) -> Self::Timer { @@ -616,20 +685,23 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> { ) -> Result<GrandpaJustification<Block>, ClientError> where NumberFor<Block>: grandpa::BlockNumberOps, { - use grandpa::Chain; + GrandpaJustification::<Block>::decode(&mut &*encoded).ok_or_else(|| { + let msg = "failed to decode grandpa justification".to_string(); + ClientErrorKind::BadJustification(msg).into() + }).and_then(|just| just.verify(set_id, voters).map(|_| just)) + } - let justification = match GrandpaJustification::decode(&mut &*encoded) { - Some(justification) => justification, - _ => { - let msg = "failed to decode grandpa justification".to_string(); - return Err(ClientErrorKind::BadJustification(msg).into()); - } - }; + /// Validate the commit and the votes' ancestry proofs. + fn verify(&self, set_id: u64, voters: &HashMap<Ed25519AuthorityId, u64>) -> Result<(), ClientError> + where + NumberFor<Block>: grandpa::BlockNumberOps, + { + use grandpa::Chain; - let ancestry_chain = AncestryChain::<Block>::new(&justification.votes_ancestries); + let ancestry_chain = AncestryChain::<Block>::new(&self.votes_ancestries); match grandpa::validate_commit( - &justification.commit, + &self.commit, voters, None, &ancestry_chain, @@ -642,23 +714,23 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> { } let mut visited_hashes = HashSet::new(); - for signed in justification.commit.precommits.iter() { + for signed in self.commit.precommits.iter() { if let Err(_) = communication::check_message_sig::<Block>( &grandpa::Message::Precommit(signed.precommit.clone()), &signed.id, &signed.signature, - justification.round, + self.round, set_id, ) { return Err(ClientErrorKind::BadJustification( "invalid signature for precommit in grandpa justification".to_string()).into()); } - if justification.commit.target_hash == signed.precommit.target_hash { + if self.commit.target_hash == signed.precommit.target_hash { continue; } - match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) { + match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { Ok(route) => { // ancestry starts from parent hash but the precommit target hash has been visited visited_hashes.insert(signed.precommit.target_hash); @@ -673,7 +745,7 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> { } } - let ancestry_hashes = justification.votes_ancestries + let ancestry_hashes = self.votes_ancestries .iter() .map(|h: &Block::Header| h.hash()) .collect(); @@ -683,7 +755,7 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> { "invalid precommit ancestries in grandpa justification with unused headers".to_string()).into()); } - Ok(justification) + Ok(()) } } @@ -710,6 +782,8 @@ impl<Block: BlockT> From<GrandpaJustification<Block>> for JustificationOrCommit< fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>( client: &Client<B, E, Block, RA>, authority_set: &SharedAuthoritySet<Block::Hash, NumberFor<Block>>, + consensus_changes: &SharedConsensusChanges<Block::Hash, NumberFor<Block>>, + justification_period: Option<NumberFor<Block>>, hash: Block::Hash, number: NumberFor<Block>, justification_or_commit: JustificationOrCommit<Block>, @@ -720,6 +794,7 @@ fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>( { // lock must be held through writing to DB to avoid race let mut authority_set = authority_set.inner().write(); + let mut consensus_changes = consensus_changes.lock(); let status = authority_set.apply_changes(number, |canon_number| { canonical_at_height(client, (hash, number), canon_number) })?; @@ -756,6 +831,20 @@ fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>( } } + // check if this is this is the first finalization of some consensus changes + let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes + .finalize((number, hash), |at_height| canonical_at_height(client, (hash, number), at_height))?; + if alters_consensus_changes { + let encoded = consensus_changes.encode(); + let write_result = Backend::insert_aux(&**client.backend(), &[(CONSENSUS_CHANGES_KEY, &encoded[..])], &[]); + if let Err(e) = write_result { + warn!(target: "finality", "Failed to write updated consensus changes to disk. Bailing."); + warn!(target: "finality", "Node is in a potentially inconsistent state."); + + return Err(e.into()); + } + } + // NOTE: this code assumes that honest voters will never vote past a // transition block, thus we don't have to worry about the case where // we have a transition with `effective_block = N`, but we finalize @@ -764,8 +853,26 @@ fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>( // syncing clients. let justification = match justification_or_commit { JustificationOrCommit::Justification(justification) => Some(justification.encode()), - JustificationOrCommit::Commit((round_number, commit)) => - if status.new_set_block.is_some() { + JustificationOrCommit::Commit((round_number, commit)) => { + let mut justification_required = + // justification is always required when block that enacts new authorities + // set is finalized + status.new_set_block.is_some() || + // justification is required when consensus changes are finalized + finalizes_consensus_changes; + + // justification is required every N blocks to be able to prove blocks + // finalization to remote nodes + if !justification_required { + if let Some(justification_period) = justification_period { + let last_finalized_number = client.info()?.chain.finalized_number; + justification_required = (!last_finalized_number.is_zero() || + number - last_finalized_number == justification_period) && + (last_finalized_number / justification_period != number / justification_period); + } + } + + if justification_required { let justification = GrandpaJustification::from_commit( client, round_number, @@ -775,7 +882,8 @@ fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>( Some(justification.encode()) } else { None - }, + } + }, }; debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); @@ -822,6 +930,7 @@ pub struct GrandpaBlockImport<B, E, Block: BlockT<Hash=H256>, RA, PRA> { inner: Arc<Client<B, E, Block, RA>>, authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>, authority_set_change: mpsc::UnboundedSender<NewAuthoritySet<Block::Hash, NumberFor<Block>>>, + consensus_changes: SharedConsensusChanges<Block::Hash, NumberFor<Block>>, api: Arc<PRA>, } @@ -909,6 +1018,7 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block> // we don't want to finalize on `inner.import_block` let justification = block.justification.take(); + let enacts_consensus_change = new_authorities.is_some(); let import_result = self.inner.import_block(block, new_authorities).map_err(|e| { if let Some((old_set, mut authorities)) = just_in_case { debug!(target: "afg", "Restoring old set after block import error: {:?}", e); @@ -918,22 +1028,17 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block> }); let import_result = match import_result { - Ok(ImportResult::Queued) => ImportResult::Queued, - Ok(r) => return Ok(r), - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(ImportResult::Queued) => ImportResult::Queued, + Ok(r) => return Ok(r), + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), }; let enacts_change = self.authority_set.inner().read().enacts_change(number, |canon_number| { canonical_at_height(&self.inner, (hash, number), canon_number) - }); + }).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; - match enacts_change { - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - Ok(enacted) => { - if !enacted { - return Ok(import_result); - } - } + if !enacts_change && !enacts_consensus_change { + return Ok(import_result); } match justification { @@ -952,6 +1057,8 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block> let result = finalize_block( &*self.inner, &self.authority_set, + &self.consensus_changes, + None, hash, number, justification.into(), @@ -959,11 +1066,14 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block> match result { Ok(_) => { - unreachable!("returns Ok when no authority set change should be enacted; \ - verified previously that finalizing the current block enacts a change; \ - qed;"); + assert!(!enacts_change, "returns Ok when no authority set change should be enacted; qed;"); }, Err(ExitOrError::AuthoritiesChanged(new)) => { + assert!( + enacts_change, + "returns AuthoritiesChanged when authority set change should be enacted; qed;" + ); + debug!(target: "finality", "Imported justified block #{} that enacts authority set change, signalling voter.", number); if let Err(e) = self.authority_set_change.unbounded_send(new) { return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); @@ -981,8 +1091,20 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block> } }, None => { - trace!(target: "finality", "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", number); - } + if enacts_change { + trace!( + target: "finality", + "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", + number, + ); + } + + // we have imported block with consensus data changes, but without justification + // => remember to create justification when next block will be finalized + if enacts_consensus_change { + self.consensus_changes.lock().note_change((number, hash)); + } + }, } Ok(import_result) @@ -1060,6 +1182,7 @@ pub struct LinkHalf<B, E, Block: BlockT<Hash=H256>, RA> { client: Arc<Client<B, E, Block, RA>>, authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>, authority_set_change: mpsc::UnboundedReceiver<NewAuthoritySet<Block::Hash, NumberFor<Block>>>, + consensus_changes: SharedConsensusChanges<Block::Hash, NumberFor<Block>>, } struct AncestryChain<Block: BlockT> { @@ -1142,6 +1265,15 @@ pub fn block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>( .into(), }; + let consensus_changes = Backend::get_aux(&**client.backend(), CONSENSUS_CHANGES_KEY)?; + let consensus_changes = Arc::new(parking_lot::Mutex::new(match consensus_changes { + Some(raw) => ConsensusChanges::decode(&mut &raw[..]) + .ok_or_else(|| ::client::error::ErrorKind::Backend( + format!("GRANDPA consensus changes kept in invalid format") + ))?, + None => ConsensusChanges::empty(), + })); + let (authority_set_change_tx, authority_set_change_rx) = mpsc::unbounded(); Ok(( @@ -1149,12 +1281,14 @@ pub fn block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>( inner: client.clone(), authority_set: authority_set.clone(), authority_set_change: authority_set_change_tx, + consensus_changes: consensus_changes.clone(), api }, LinkHalf { client, authority_set, authority_set_change: authority_set_change_rx, + consensus_changes, }, )) } @@ -1231,6 +1365,7 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>( client, authority_set, authority_set_change, + consensus_changes, } = link; let chain_info = client.info()?; @@ -1253,6 +1388,7 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>( network: network.clone(), set_id: authority_set.set_id(), authority_set: authority_set.clone(), + consensus_changes: consensus_changes.clone(), }); let initial_state = (initial_environment, last_round_number, last_state, authority_set_change.into_future()); @@ -1291,6 +1427,7 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>( let config = config.clone(); let network = network.clone(); let authority_set = authority_set.clone(); + let consensus_changes = consensus_changes.clone(); let trigger_authority_set_change = |new: NewAuthoritySet<_, _>, authority_set_change| { let env = Arc::new(Environment { @@ -1300,6 +1437,7 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>( set_id: new.set_id, network, authority_set, + consensus_changes, }); // start the new authority set using the block where the diff --git a/substrate/core/finality-grandpa/src/tests.rs b/substrate/core/finality-grandpa/src/tests.rs index 18bfaef1a5ff2baff8ae877d707e913bcc052e97..39924263adf4a1596bfddb5d7f6977fcd78f6794 100644 --- a/substrate/core/finality-grandpa/src/tests.rs +++ b/substrate/core/finality-grandpa/src/tests.rs @@ -25,6 +25,7 @@ use tokio::runtime::current_thread; use keyring::Keyring; use client::{ BlockchainEvents, error::Result, + blockchain::Backend as BlockchainBackend, runtime_api::{Core, RuntimeVersion, ApiExt, ConstructRuntimeApi, CallRuntimeAt}, }; use test_client::{self, runtime::BlockNumber}; @@ -332,22 +333,7 @@ fn make_ids(keys: &[Keyring]) -> Vec<(Ed25519AuthorityId, u64)> { .collect() } -#[test] -fn finalize_3_voters_no_observers() { - let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(20, false); - net.sync(); - - for i in 0..3 { - assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 20, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - +fn run_to_completion(blocks: u64, net: Arc<Mutex<GrandpaTestNet>>, peers: &[Keyring]) { let mut finality_notifications = Vec::new(); let mut runtime = current_thread::Runtime::new().unwrap(); @@ -363,7 +349,7 @@ fn finalize_3_voters_no_observers() { }; finality_notifications.push( client.finality_notification_stream() - .take_while(|n| Ok(n.header.number() < &20)) + .take_while(|n| Ok(n.header.number() < &blocks)) .for_each(|_| Ok(())) ); fn assert_send<T: Send>(_: &T) { } @@ -371,6 +357,7 @@ fn finalize_3_voters_no_observers() { let voter = run_grandpa( Config { gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, local_key: Some(Arc::new(key.clone().into())), name: Some(format!("peer#{}", peer_id)), }, @@ -397,6 +384,28 @@ fn finalize_3_voters_no_observers() { runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); } +#[test] +fn finalize_3_voters_no_observers() { + let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(20, false); + net.sync(); + + for i in 0..3 { + assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 20, + "Peer #{} failed to sync", i); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(20, net.clone(), peers); + + // normally there's no justification for finalized blocks + assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(20)).unwrap().is_none(), + "Extra justification for block#1"); +} + #[test] fn finalize_3_voters_1_observer() { let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; @@ -432,6 +441,7 @@ fn finalize_3_voters_1_observer() { let voter = run_grandpa( Config { gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, local_key, name: Some(format!("peer#{}", peer_id)), }, @@ -589,6 +599,7 @@ fn transition_3_voters_twice_1_observer() { let voter = run_grandpa( Config { gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, local_key, name: Some(format!("peer#{}", peer_id)), }, @@ -616,3 +627,59 @@ fn transition_3_voters_twice_1_observer() { runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); } + +#[test] +fn justification_is_emitted_when_consensus_data_changes() { + let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); + + // import block#1 WITH consensus data change + let new_authorities = vec![Ed25519AuthorityId::from([42; 32])]; + net.peer(0).push_authorities_change_block(new_authorities); + net.sync(); + let net = Arc::new(Mutex::new(net)); + run_to_completion(1, net.clone(), peers); + + // ... and check that there's no justification for block#1 + assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(1)).unwrap().is_some(), + "Missing justification for block#1"); +} + +#[test] +fn justification_is_generated_periodically() { + let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(32, false); + net.sync(); + + let net = Arc::new(Mutex::new(net)); + run_to_completion(32, net.clone(), peers); + + // when block#32 (justification_period) is finalized, justification + // is required => generated + for i in 0..3 { + assert!(net.lock().peer(i).client().backend().blockchain() + .justification(BlockId::Number(32)).unwrap().is_some()); + } +} + +#[test] +fn consensus_changes_works() { + let mut changes = ConsensusChanges::<H256, u64>::empty(); + + // pending changes are not finalized + changes.note_change((10, 1.into())); + assert_eq!(changes.finalize((5, 5.into()), |_| Ok(None)).unwrap(), (false, false)); + + // no change is selected from competing pending changes + changes.note_change((1, 1.into())); + changes.note_change((1, 101.into())); + assert_eq!(changes.finalize((10, 10.into()), |_| Ok(Some(1001.into()))).unwrap(), (true, false)); + + // change is selected from competing pending changes + changes.note_change((1, 1.into())); + changes.note_change((1, 101.into())); + assert_eq!(changes.finalize((10, 10.into()), |_| Ok(Some(1.into()))).unwrap(), (true, true)); +} \ No newline at end of file diff --git a/substrate/core/network/src/test/mod.rs b/substrate/core/network/src/test/mod.rs index 48b401b21ae1af3ecb9b5b33d923f8f1871ae025..952381601549970e73df7fe339a6e822bc2e5cab 100644 --- a/substrate/core/network/src/test/mod.rs +++ b/substrate/core/network/src/test/mod.rs @@ -27,9 +27,10 @@ use std::sync::Arc; use parking_lot::RwLock; use client; use client::block_builder::BlockBuilder; +use primitives::Ed25519AuthorityId; use runtime_primitives::Justification; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Zero, AuthorityIdFor}; +use runtime_primitives::traits::{Block as BlockT, Zero, Header, Digest, DigestItem, AuthorityIdFor}; use io::SyncIo; use protocol::{Context, Protocol, ProtocolContext}; use config::ProtocolConfig; @@ -92,6 +93,9 @@ impl<B: BlockT> Verifier<B> for PassThroughVerifier { justification: Option<Justification>, body: Option<Vec<B::Extrinsic>> ) -> Result<(ImportBlock<B>, Option<Vec<AuthorityIdFor<B>>>), String> { + let new_authorities = header.digest().log(DigestItem::as_authorities_change) + .map(|auth| auth.iter().cloned().collect()); + Ok((ImportBlock { origin, header, @@ -101,7 +105,7 @@ impl<B: BlockT> Verifier<B> for PassThroughVerifier { post_digests: vec![], auxiliary: Vec::new(), fork_choice: ForkChoiceStrategy::LongestChain, - }, None)) + }, new_authorities)) } } @@ -414,7 +418,7 @@ impl<V: 'static + Verifier<Block>, D> Peer<V, D> { nonce, }; let signature = Keyring::from_raw_public(transfer.from.to_fixed_bytes()).unwrap().sign(&transfer.encode()).into(); - builder.push(Extrinsic { transfer, signature }).unwrap(); + builder.push(Extrinsic::Transfer(transfer, signature)).unwrap(); nonce = nonce + 1; builder.bake().unwrap() }); @@ -423,6 +427,13 @@ impl<V: 'static + Verifier<Block>, D> Peer<V, D> { } } + pub fn push_authorities_change_block(&self, new_authorities: Vec<Ed25519AuthorityId>) { + self.generate_blocks(1, BlockOrigin::File, |mut builder| { + builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); + builder.bake().unwrap() + }); + } + /// Execute a function with specialization for this peer. pub fn with_spec<F, U>(&self, f: F) -> U where F: FnOnce(&mut DummySpecialization, &mut Context<Block>) -> U diff --git a/substrate/core/rpc/src/author/tests.rs b/substrate/core/rpc/src/author/tests.rs index 82f5d371626ad590c1bcd2c37af137cbe8d10f4a..d84fa72225d17dc4851be66b79b242b89c22f317 100644 --- a/substrate/core/rpc/src/author/tests.rs +++ b/substrate/core/rpc/src/author/tests.rs @@ -36,7 +36,7 @@ fn uxt(sender: Keyring, nonce: u64) -> Extrinsic { to: Default::default(), }; let signature = Keyring::from_raw_public(tx.from.to_fixed_bytes()).unwrap().sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + Extrinsic::Transfer(tx, signature) } #[test] @@ -48,7 +48,7 @@ fn submit_transaction_should_not_cause_error() { pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client))), subscriptions: Subscriptions::new(runtime.executor()), }; - let h: H256 = hex!("e10ad66bce51ef3e2a1167934ce3740d2d8c703810f9b314e89f2e783f75e826").into(); + let h: H256 = hex!("81897a4890fb7554e7f77c533a865846a11583a56a8ad5e307543188d55e64f1").into(); assert_matches!( AuthorApi::submit_extrinsic(&p, uxt(Keyring::Alice, 1).encode().into()), @@ -68,7 +68,7 @@ fn submit_rich_transaction_should_not_cause_error() { pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))), subscriptions: Subscriptions::new(runtime.executor()), }; - let h: H256 = hex!("fccc48291473c53746cd267cf848449edd7711ee6511fba96919d5f9f4859e4f").into(); + let h: H256 = hex!("9ec8469b5dcfe29cc274ac1d07ad73d80be57566ace0fcdbe51ebcf4b51e925b").into(); assert_matches!( AuthorApi::submit_extrinsic(&p, uxt(Keyring::Alice, 0).encode().into()), @@ -106,7 +106,7 @@ fn should_watch_extrinsic() { to: Default::default(), }; let signature = Keyring::from_raw_public(tx.from.to_fixed_bytes()).unwrap().sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + Extrinsic::Transfer(tx, signature) }; AuthorApi::submit_extrinsic(&p, replacement.encode().into()).unwrap(); let (res, data) = runtime.block_on(data.into_future()).unwrap(); @@ -116,7 +116,7 @@ fn should_watch_extrinsic() { ); assert_eq!( runtime.block_on(data.into_future()).unwrap().0, - Some(r#"{"jsonrpc":"2.0","method":"test","params":{"result":{"usurped":"0xed454dcee51431679c2559403187a56567fded1fc50b6ae3aada87c1d412df5c"},"subscription":1}}"#.into()) + Some(r#"{"jsonrpc":"2.0","method":"test","params":{"result":{"usurped":"0x53daed816610aa6b22dedbcee43aba44a7ca7155cc71f2919c5e79ebbc7de58c"},"subscription":1}}"#.into()) ); } diff --git a/substrate/core/service/src/components.rs b/substrate/core/service/src/components.rs index 397e3d7a89dd6468fddb8f456a4ef8763893d358..dc116873b6e39afbeaf27391a4347e48aaa80c18 100644 --- a/substrate/core/service/src/components.rs +++ b/substrate/core/service/src/components.rs @@ -561,7 +561,7 @@ mod tests { to: Default::default(), }; let signature = Keyring::from_raw_public(transfer.from.to_fixed_bytes()).unwrap().sign(&transfer.encode()).into(); - Extrinsic { transfer, signature } + Extrinsic::Transfer(transfer, signature) }; // store the transaction in the pool pool.submit_one(&BlockId::hash(client.best_block_header().unwrap().hash()), transaction.clone()).unwrap(); diff --git a/substrate/core/sr-primitives/src/traits.rs b/substrate/core/sr-primitives/src/traits.rs index 67e441c792447bbf4c20ad113fae78f1a76982f3..ac15df0881a0c8c5bfb42f35b1ff64578818d83c 100644 --- a/substrate/core/sr-primitives/src/traits.rs +++ b/substrate/core/sr-primitives/src/traits.rs @@ -574,7 +574,7 @@ pub trait Digest: Member + MaybeSerializeDebugButNotDeserialize + Default { fn pop(&mut self) -> Option<Self::Item>; /// Get reference to the first digest item that matches the passed predicate. - fn log<T, F: Fn(&Self::Item) -> Option<&T>>(&self, predicate: F) -> Option<&T> { + fn log<T: ?Sized, F: Fn(&Self::Item) -> Option<&T>>(&self, predicate: F) -> Option<&T> { self.logs().iter() .filter_map(predicate) .next() diff --git a/substrate/core/test-client/src/block_builder_ext.rs b/substrate/core/test-client/src/block_builder_ext.rs index 651559114af88fb22bf692cf9709f59125f65b95..5803c5303d4e5af4b9ec2a04916289d0845d7fda 100644 --- a/substrate/core/test-client/src/block_builder_ext.rs +++ b/substrate/core/test-client/src/block_builder_ext.rs @@ -40,5 +40,5 @@ impl<'a, A> BlockBuilderExt for client::block_builder::BlockBuilder<'a, runtime: fn sign_tx(transfer: runtime::Transfer) -> runtime::Extrinsic { let signature = keyring::Keyring::from_raw_public(transfer.from.to_fixed_bytes()).unwrap().sign(&codec::Encode::encode(&transfer)).into(); - runtime::Extrinsic { transfer, signature } + runtime::Extrinsic::Transfer(transfer, signature) } diff --git a/substrate/core/test-runtime/src/lib.rs b/substrate/core/test-runtime/src/lib.rs index eb5efdf684afb4769dc49e473ea5e9a02fcbd703..3d2e9b5ca8a7d64e322ca9a5aee4598bd67e2f5a 100644 --- a/substrate/core/test-runtime/src/lib.rs +++ b/substrate/core/test-runtime/src/lib.rs @@ -104,9 +104,9 @@ pub struct Transfer { /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] -pub struct Extrinsic { - pub transfer: Transfer, - pub signature: Ed25519Signature, +pub enum Extrinsic { + AuthoritiesChange(Vec<Ed25519AuthorityId>), + Transfer(Transfer, Ed25519Signature), } #[cfg(feature = "std")] @@ -121,10 +121,15 @@ impl BlindCheckable for Extrinsic { type Checked = Self; fn check(self) -> Result<Self, &'static str> { - if ::runtime_primitives::verify_encoded_lazy(&self.signature, &self.transfer, &self.transfer.from) { - Ok(self) - } else { - Err("bad signature") + match self { + Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), + Extrinsic::Transfer(transfer, signature) => { + if ::runtime_primitives::verify_encoded_lazy(&signature, &transfer, &transfer.from) { + Ok(Extrinsic::Transfer(transfer, signature)) + } else { + Err("bad signature") + } + }, } } } @@ -135,6 +140,15 @@ impl ExtrinsicT for Extrinsic { } } +impl Extrinsic { + pub fn transfer(&self) -> &Transfer { + match self { + Extrinsic::Transfer(ref transfer, _) => transfer, + _ => panic!("cannot convert to transfer ref"), + } + } +} + /// An identifier for an account on this system. pub type AccountId = H256; /// A simple hash type for all our hashing. diff --git a/substrate/core/test-runtime/src/system.rs b/substrate/core/test-runtime/src/system.rs index 19739b21cb2a0030b9152dd5e09efa1056241957..d4559b1eb7fad50ca9341bc596454992510e1e65 100644 --- a/substrate/core/test-runtime/src/system.rs +++ b/substrate/core/test-runtime/src/system.rs @@ -24,7 +24,7 @@ use runtime_primitives::traits::{Hash as HashT, BlakeTwo256, Digest as DigestT}; use runtime_primitives::generic; use runtime_primitives::{ApplyError, ApplyOutcome, ApplyResult, transaction_validity::TransactionValidity}; use codec::{KeyedVec, Encode}; -use super::{AccountId, BlockNumber, Extrinsic, H256 as Hash, Block, Header, Digest}; +use super::{AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest}; use primitives::{Ed25519AuthorityId, Blake2Hasher}; use primitives::storage::well_known_keys; @@ -36,6 +36,7 @@ storage_items! { // The current block number being processed. Set by `execute_block`. Number: b"sys:num" => required BlockNumber; ParentHash: b"sys:pha" => required Hash; + NewAuthorities: b"sys:new_auth" => Vec<Ed25519AuthorityId>; } pub fn balance_of_key(who: AccountId) -> Vec<u8> { @@ -96,17 +97,20 @@ pub fn execute_block(block: Block) { if let Some(storage_changes_root) = storage_changes_root(header.parent_hash.into(), header.number - 1) { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root.into())); } + if let Some(new_authorities) = <NewAuthorities>::take() { + digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); + } assert!(digest == header.digest, "Header digest items must match that calculated."); } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { - let tx = match check_signature(&utx) { - Ok(tx) => tx, - Err(_) => return TransactionValidity::Invalid, - }; + if check_signature(&utx).is_err() { + return TransactionValidity::Invalid; + } + let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::get_or(&nonce_key, 0); if tx.nonce < expected_nonce { @@ -166,6 +170,9 @@ pub fn finalise_block() -> Header { if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } + if let Some(new_authorities) = <NewAuthorities>::take() { + digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); + } Header { number, @@ -177,21 +184,21 @@ pub fn finalise_block() -> Header { } #[inline(always)] -fn check_signature(utx: &Extrinsic) -> Result<::Transfer, ApplyError> { +fn check_signature(utx: &Extrinsic) -> Result<(), ApplyError> { use runtime_primitives::traits::BlindCheckable; - - let utx = match utx.clone().check() { - Ok(tx) => tx, - Err(_) => return Err(ApplyError::BadSignature), - }; - - Ok(utx.transfer) + utx.clone().check().map_err(|_| ApplyError::BadSignature)?; + Ok(()) } fn execute_transaction_backend(utx: &Extrinsic) -> ApplyResult { - // check signature - let tx = check_signature(utx)?; + check_signature(utx)?; + match utx { + Extrinsic::Transfer(ref transfer, _) => execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), + } +} +fn execute_transfer_backend(tx: &Transfer) -> ApplyResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::get_or(&nonce_key, 0); @@ -217,6 +224,12 @@ fn execute_transaction_backend(utx: &Extrinsic) -> ApplyResult { Ok(ApplyOutcome::Success) } +fn execute_new_authorities_backend(new_authorities: &[Ed25519AuthorityId]) -> ApplyResult { + let new_authorities: Vec<Ed25519AuthorityId> = new_authorities.iter().cloned().collect(); + <NewAuthorities>::put(new_authorities); + Ok(ApplyOutcome::Success) +} + #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use primitives::hexdisplay::HexDisplay; @@ -266,7 +279,7 @@ mod tests { fn construct_signed_tx(tx: Transfer) -> Extrinsic { let signature = Keyring::from_raw_public(tx.from.to_fixed_bytes()).unwrap().sign(&tx.encode()).into(); - Extrinsic { transfer: tx, signature } + Extrinsic::Transfer(tx, signature) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities<Blake2Hasher>) { @@ -318,7 +331,7 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: hex!("c3d2cc317b5897af4c7f65d76b028971ce9fad745678732ff6d42301b4245a9c").into(), - extrinsics_root: hex!("4e689a607609f69df099af82577ae6c5969c44f1afe33a43cd7af926eba42272").into(), + extrinsics_root: hex!("198205cb7729fec8ccdc2e58571a4858586a4f305898078e0e8bee1dddea7e4b").into(), digest: Digest { logs: vec![], }, }, extrinsics: vec![ @@ -343,7 +356,7 @@ mod tests { parent_hash: b.header.hash(), number: 2, state_root: hex!("2c822d948bb68d7f7a1976d4f827a276a95a3ba1c4c15dbfab3bafbeb85f2b4d").into(), - extrinsics_root: hex!("009268a854b21f339c53d3c7a6619a27f564703311d91f11f61573a7fed5ca1c").into(), + extrinsics_root: hex!("041fa8971dda28745967179a9f39e3ca1a595c510682105df1cff74ae6f05e0d").into(), digest: Digest { logs: vec![], }, }, extrinsics: vec![ diff --git a/substrate/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm b/substrate/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm index 451cac0489f4632dad587a0e8fb8ecc07753ba98..3b32eaae5b4fceef8dc3368b0ffa18a0fd59d501 100644 Binary files a/substrate/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm and b/substrate/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm differ diff --git a/substrate/core/transaction-pool/graph/src/pool.rs b/substrate/core/transaction-pool/graph/src/pool.rs index a16db8206ef3f5a5c8360ce6214be7b8996fc90f..e96d320046d3735e94b60cde723e7f0dc4ee8ad4 100644 --- a/substrate/core/transaction-pool/graph/src/pool.rs +++ b/substrate/core/transaction-pool/graph/src/pool.rs @@ -327,7 +327,7 @@ mod tests { /// Verify extrinsic at given block. fn validate_transaction(&self, at: &BlockId<Self::Block>, uxt: &ExtrinsicFor<Self>) -> Result<TransactionValidity, Self::Error> { let block_number = self.block_id_to_number(at)?.unwrap(); - let nonce = uxt.transfer.nonce; + let nonce = uxt.transfer().nonce; if nonce < block_number { Ok(TransactionValidity::Invalid) @@ -359,15 +359,12 @@ mod tests { /// Hash the extrinsic. fn hash(&self, uxt: &ExtrinsicFor<Self>) -> Self::Hash { - (uxt.transfer.from.to_low_u64_be() << 5) + uxt.transfer.nonce + (uxt.transfer().from.to_low_u64_be() << 5) + uxt.transfer().nonce } } fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic { - transfer, - signature: Default::default(), - } + Extrinsic::Transfer(transfer, Default::default()) } fn pool() -> Pool<TestApi> { diff --git a/substrate/core/transaction-pool/src/tests.rs b/substrate/core/transaction-pool/src/tests.rs index e02bf054014b427ee7008b19c88f07d34e2fb7b3..c79ca5605be77a4700a1b7b6f8ef0e77f4bcb887 100644 --- a/substrate/core/transaction-pool/src/tests.rs +++ b/substrate/core/transaction-pool/src/tests.rs @@ -42,12 +42,12 @@ impl txpool::ChainApi for TestApi { fn validate_transaction(&self, at: &BlockId<Self::Block>, uxt: &txpool::ExtrinsicFor<Self>) -> error::Result<TransactionValidity> { let expected = index(at); - let requires = if expected == uxt.transfer.nonce { + let requires = if expected == uxt.transfer().nonce { vec![] } else { - vec![vec![uxt.transfer.nonce as u8 - 1]] + vec![vec![uxt.transfer().nonce as u8 - 1]] }; - let provides = vec![vec![uxt.transfer.nonce as u8]]; + let provides = vec![vec![uxt.transfer().nonce as u8]]; Ok(TransactionValidity::Valid { priority: 1, @@ -93,10 +93,7 @@ fn uxt(who: Keyring, nonce: Index) -> Extrinsic { amount: 1, }; let signature = transfer.using_encoded(|e| who.sign(e)); - Extrinsic { - transfer, - signature: signature.into(), - } + Extrinsic::Transfer(transfer, signature.into()) } fn pool() -> Pool<TestApi> { @@ -109,7 +106,7 @@ fn submission_should_work() { assert_eq!(209, index(&BlockId::number(0))); pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209]); } @@ -119,7 +116,7 @@ fn multiple_submission_should_work() { pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); } @@ -128,7 +125,7 @@ fn early_nonce_should_be_culled() { let pool = pool(); pool.submit_one(&BlockId::number(0), uxt(Alice, 208)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::<Index>::new()); } @@ -137,11 +134,11 @@ fn late_nonce_should_be_queued() { let pool = pool(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::<Index>::new()); pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); } @@ -151,12 +148,12 @@ fn prune_tags_should_work() { pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); pool.prune_tags(&BlockId::number(1), vec![vec![209]]).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![210]); } @@ -169,7 +166,7 @@ fn should_ban_invalid_transactions() { pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap_err(); // when - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::<Index>::new()); // then diff --git a/substrate/node/cli/src/service.rs b/substrate/node/cli/src/service.rs index 89dbd02319d008cc3560271984201bc46d10fa09..38f34aedf5a0dd3d86a7f0160cd3952ba6258e28 100644 --- a/substrate/node/cli/src/service.rs +++ b/substrate/node/cli/src/service.rs @@ -100,6 +100,7 @@ construct_service_factory! { grandpa::Config { local_key, gossip_duration: Duration::new(4, 0), // FIXME: make this available through chainspec? + justification_period: 4096, name: Some(service.config.name.clone()) }, link_half, diff --git a/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm b/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm index 4319fbc77cfdf1650ac987dbf7b073a9b58dce47..f0e4c77f77998fbfadaaeca112c6b3c6f6e871b7 100644 Binary files a/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm and b/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm differ diff --git a/substrate/srml/consensus/src/lib.rs b/substrate/srml/consensus/src/lib.rs index 2a55ac9e1968f728835d4a343f20d5ae8993bbc0..e8ef08e4b9ccbf2d79c9fa68f7bc1e3c471c854e 100644 --- a/substrate/srml/consensus/src/lib.rs +++ b/substrate/srml/consensus/src/lib.rs @@ -50,6 +50,9 @@ use primitives::traits::{ use substrate_primitives::storage::well_known_keys; use system::{ensure_signed, ensure_inherent}; +#[cfg(any(feature = "std", test))] +use substrate_primitives::Ed25519AuthorityId; + mod mock; mod tests; @@ -143,7 +146,7 @@ impl<SessionKey: Member> RawLog<SessionKey> { // Implementation for tests outside of this crate. #[cfg(any(feature = "std", test))] -impl<N> From<RawLog<N>> for primitives::testing::DigestItem where N: Into<substrate_primitives::Ed25519AuthorityId> { +impl<N> From<RawLog<N>> for primitives::testing::DigestItem where N: Into<Ed25519AuthorityId> { fn from(log: RawLog<N>) -> primitives::testing::DigestItem { match log { RawLog::AuthoritiesChange(authorities) =>