......@@ -341,7 +341,7 @@ impl<'a> TransactionEval<'a> {
};
let signature_version = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => SignatureVersion::ForkId,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => SignatureVersion::Base,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => SignatureVersion::Base,
};
let verify_checksequence = deployments.csv();
......@@ -557,6 +557,7 @@ mod tests {
.into_bytes(),
}],
lock_time: 0xffffffff,
joint_split: None,
}.into();
assert_eq!(transaction.raw.outputs[0].script_pubkey.len(), 46 + 2);
......
......@@ -38,7 +38,7 @@ impl BackwardsCompatibleChainVerifier {
let current_time = ::time::get_time().sec as u32;
// first run pre-verification
let chain_verifier = ChainVerifier::new(block, self.consensus.network, current_time);
let chain_verifier = ChainVerifier::new(block, &self.consensus, current_time);
chain_verifier.check()?;
assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number));
......@@ -95,7 +95,7 @@ impl BackwardsCompatibleChainVerifier {
// TODO: full verification
let current_time = ::time::get_time().sec as u32;
let header = IndexedBlockHeader::new(hash.clone(), header.clone());
let header_verifier = HeaderVerifier::new(&header, self.consensus.network, current_time);
let header_verifier = HeaderVerifier::new(&header, &self.consensus, current_time);
header_verifier.check()
}
......
......@@ -293,7 +293,9 @@ mod tests {
merkle_root_hash: Default::default(),
time: time,
bits: 0.into(),
nonce: height,
nonce: height.into(),
hash_final_sapling_root: None,
equihash_solution: None,
};
previous_header_hash = header.hash();
......
// https://github.com/zcash/zcash/commit/fdda3c5085199d2c2170887aa064fc42afdb0360
use blake2_rfc::blake2b::Blake2b;
use byteorder::{BigEndian, LittleEndian, ByteOrder, WriteBytesExt};
use chain::BlockHeader;
//use hex::ToHex;
pub struct EquihashParams {
pub N: u32,
pub K: u32,
}
impl EquihashParams {
pub fn indices_per_hash_output(&self) -> usize {
(512 / self.N) as usize
}
pub fn hash_output(&self) -> usize {
(self.indices_per_hash_output() * self.N as usize / 8usize) as usize
}
pub fn collision_bit_length(&self) -> usize {
(self.N / (self.K + 1)) as usize
}
pub fn collision_byte_length(&self) -> usize {
(self.collision_bit_length() + 7) / 8
}
pub fn final_full_width(&self) -> usize {
2 * self.collision_byte_length() + 4 * (1 << self.K)
}
pub fn solution_size(&self) -> usize {
((1usize << self.K) * (self.collision_bit_length() + 1) / 8) as usize
}
pub fn hash_length(&self) -> usize {
(self.K as usize + 1) * self.collision_byte_length()
}
}
pub fn verify_block_equihash_solution(params: &EquihashParams, header: &BlockHeader) -> bool {
let equihash_solution = match header.equihash_solution.as_ref() {
Some(equihash_solution) => equihash_solution,
None => return false,
};
let input = header.equihash_input();
verify_equihash_solution(params, &input, &equihash_solution.0)
}
pub fn verify_equihash_solution(params: &EquihashParams, input: &[u8], solution: &[u8]) -> bool {
if solution.len() != params.solution_size() {
return false;
}
let mut context = new_blake2(params);
context.update(input);
// pure equihash
let collision_bit_length = params.collision_bit_length();
let indices = get_indices_from_minimal(solution, collision_bit_length);
let mut rows = Vec::new();
for idx in indices {
let hash = generate_hash(&context, (idx as usize / params.indices_per_hash_output()) as u32);
let hash_begin = (idx as usize % params.indices_per_hash_output()) * params.N as usize / 8;
let hash_end = hash_begin + params.N as usize / 8;
let mut row = vec![0; params.final_full_width()];
let expanded_hash = expand_array(
&hash[hash_begin..hash_end],
params.collision_bit_length(),
0);
row[0..expanded_hash.len()].clone_from_slice(&expanded_hash);
row[params.hash_length()..params.hash_length() + 4].clone_from_slice(&to_big_endian(idx));
rows.push(row);
}
let mut hash_len = params.hash_length();
let mut indices_len = 4;
while rows.len() > 1 {
let mut rows_check = Vec::new();
for i in 0..rows.len() / 2 {
let row1 = &rows[i * 2];
let row2 = &rows[i * 2 + 1];
if !has_collision(row1, row2, params.collision_byte_length()) {
return false;
}
if indices_before(row2, row1, hash_len, indices_len) {
return false;
}
if !distinct_indices(row1, row2, hash_len, indices_len) {
return false;
}
rows_check.push(merge_rows(row1, row2, hash_len, indices_len, params.collision_byte_length()));
}
rows = rows_check;
hash_len -= params.collision_byte_length();
indices_len *= 2;
}
rows[0].iter().take(hash_len).all(|x| *x == 0)
}
fn merge_rows(row1: &[u8], row2: &[u8], len: usize, indices_len: usize, trim: usize) -> Vec<u8> {
let mut row = row1.to_vec();
for i in trim..len {
row[i - trim] = row1[i] ^ row2[i];
}
if indices_before(row1, row2, len, indices_len) {
row[len - trim..len - trim + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
} else {
row[len - trim..len - trim + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
}
row
}
fn distinct_indices(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> bool {
let mut i = 0;
let mut j = 0;
while i < indices_len {
while j < indices_len {
if row1[len + i..len + i + 4] == row2[len + j..len + j + 4] {
return false;
}
j += 4;
}
i += 4;
}
true
}
fn has_collision(row1: &[u8], row2: &[u8], collision_byte_length: usize) -> bool {
for i in 0..collision_byte_length {
if row1[i] != row2[i] {
return false;
}
}
true
}
fn indices_before(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> bool {
for i in 0..indices_len {
if row1[len + i] < row2[len + i] {
return true;
} else if row1[len + i] > row2[len + i] {
return false;
}
}
false
}
fn generate_hash(context: &Blake2b, g: u32) -> Vec<u8> {
let mut context = context.clone();
context.update(&to_little_endian(g));
context.finalize().as_bytes().to_vec()
}
fn get_indices_from_minimal(solution: &[u8], collision_bit_length: usize) -> Vec<u32> {
let indices_len = 8 * 4 * solution.len() / (collision_bit_length + 1);
let byte_pad = 4 - ((collision_bit_length + 1 + 7) / 8);
let array = expand_array(solution, collision_bit_length + 1, byte_pad);
let mut ret = Vec::new();
for i in 0..indices_len / 4 {
ret.push(array_to_eh_index(&array[i*4..i*4 + 4]));
}
ret
}
fn get_minimal_from_indices(indices: &[u32], collision_bit_length: usize) -> Vec<u8> {
let indices_len = indices.len() * 4;
let min_len = (collision_bit_length + 1) * indices_len / (8 * 4);
let byte_pad = 4 - ((collision_bit_length + 1) + 7) / 8;
let mut array = Vec::new();
for i in 0..indices.len() {
let mut be_index = Vec::new();
be_index.write_u32::<BigEndian>(indices[i]).unwrap();
array.extend(be_index);
}
let mut ret = vec![0u8; min_len];
compress_array(&array, &mut ret, collision_bit_length + 1, byte_pad);
ret
}
fn array_to_eh_index(data: &[u8]) -> u32 {
BigEndian::read_u32(data)
}
fn expand_array(data: &[u8], bit_len: usize, byte_pad: usize) -> Vec<u8> {
let mut array = Vec::new();
let out_width = (bit_len + 7) / 8 + byte_pad;
let bit_len_mask = (1u32 << bit_len) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
let mut j = 0usize;
for i in 0usize..data.len() {
acc_value = (acc_value << 8) | (data[i] as u32);
acc_bits += 8;
// When we have bit_len or more bits in the accumulator, write the next
// output element.
if acc_bits >= bit_len {
acc_bits -= bit_len;
for x in 0usize..byte_pad {
array.push(0);
}
for x in byte_pad..out_width {
array.push((
// Big-endian
(acc_value >> (acc_bits + (8 * (out_width - x - 1)))) as u8
) & (
// Apply bit_len_mask across byte boundaries
((bit_len_mask >> (8 * (out_width - x - 1))) & 0xFF) as u8
));
}
j += out_width;
}
}
array
}
fn compress_array(data: &[u8], array: &mut Vec<u8>, bit_len: usize, byte_pad: usize) {
let in_width = (bit_len + 7) / 8 + byte_pad;
let bit_len_mask = (1u32 << bit_len) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
let mut j = 0usize;
for i in 0usize..array.len() {
// When we have fewer than 8 bits left in the accumulator, read the next
// input element.
if acc_bits < 8 {
acc_value = acc_value << bit_len;
for x in byte_pad..in_width {
acc_value = acc_value | ((
data[j + x] & (((bit_len_mask >> (8 * (in_width - x - 1))) & 0xFF) as u8)
) as u32) << (8 * (in_width - x - 1));
}
j += in_width;
acc_bits += bit_len;
}
acc_bits -= 8;
array[i] = ((acc_value >> acc_bits) & 0xFF) as u8;
}
}
fn new_blake2(params: &EquihashParams) -> Blake2b {
let mut personalization = [0u8; 16];
personalization[0..8].clone_from_slice(b"ZcashPoW");
personalization[8..12].clone_from_slice(&to_little_endian(params.N));
personalization[12..16].clone_from_slice(&to_little_endian(params.K));
Blake2b::with_params(params.hash_output(), &[], &[], &personalization)
}
fn to_little_endian(num: u32) -> [u8; 4] {
let mut le_num = [0u8; 4];
LittleEndian::write_u32(&mut le_num[..], num);
le_num
}
fn to_big_endian(num: u32) -> [u8; 4] {
let mut be_num = [0u8; 4];
BigEndian::write_u32(&mut be_num[..], num);
be_num
}
#[cfg(test)]
mod tests {
use primitives::bigint::{Uint, U256};
use super::*;
fn test_equihash_verifier(n: u32, k: u32, input: &[u8], nonce: U256, solution: &[u32]) -> bool {
let solution = get_minimal_from_indices(solution, (n / (k + 1)) as usize);
/*
ZCash (reset && BOOST_TEST_LOG_LEVEL=message ./src/test/test_bitcoin --run_test=equihash_tests/validator_testvectors):
pbtc:
*/
let mut le_nonce = vec![0; 32];
nonce.to_little_endian(&mut le_nonce);
let mut input = input.to_vec();
input.extend(le_nonce);
let params = EquihashParams { N: n, K: k };
verify_equihash_solution(&params, &input, &solution)
}
#[test]
fn verify_equihash_solution_works() {
assert!(test_equihash_verifier(
96, 5, b"Equihash is an asymmetric PoW based on the Generalised Birthday problem.",
U256::one(), &vec![
2261, 15185, 36112, 104243, 23779, 118390, 118332, 130041, 32642, 69878, 76925, 80080, 45858, 116805, 92842, 111026, 15972, 115059, 85191, 90330, 68190, 122819, 81830, 91132, 23460, 49807, 52426, 80391, 69567, 114474, 104973, 122568,
],
));
}
}
......@@ -61,6 +61,7 @@ pub enum Error {
NonCanonicalTransactionOrdering,
/// Database error
Database(DBError),
InvalidEquihashSolution,
}
impl From<DBError> for Error {
......
......@@ -58,6 +58,10 @@ extern crate lazy_static;
extern crate log;
extern crate parking_lot;
extern crate rayon;
extern crate blake2_rfc;
extern crate byteorder;
#[cfg(test)]
extern crate rand;
extern crate storage;
extern crate chain;
......@@ -73,11 +77,13 @@ pub mod constants;
mod canon;
mod deployments;
mod duplex_store;
mod equihash;
mod error;
mod sigops;
mod timestamp;
mod work;
mod work_bch;
mod work_zcash;
// pre-verification
mod verify_block;
......
......@@ -23,5 +23,6 @@ pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeade
}
timestamps.sort();
timestamps[timestamps.len() / 2]
}
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use chain::IndexedBlock;
use network::Network;
use network::ConsensusParams;
use error::Error;
use verify_block::BlockVerifier;
use verify_header::HeaderVerifier;
......@@ -13,11 +13,11 @@ pub struct ChainVerifier<'a> {
}
impl<'a> ChainVerifier<'a> {
pub fn new(block: &'a IndexedBlock, network: Network, current_time: u32) -> Self {
pub fn new(block: &'a IndexedBlock, consensus: &ConsensusParams, current_time: u32) -> Self {
trace!(target: "verification", "Block pre-verification {}", block.hash().to_reversed_str());
ChainVerifier {
block: BlockVerifier::new(block),
header: HeaderVerifier::new(&block.header, network, current_time),
header: HeaderVerifier::new(&block.header, consensus, current_time),
transactions: block.transactions.iter().map(TransactionVerifier::new).collect(),
}
}
......
use primitives::compact::Compact;
use chain::IndexedBlockHeader;
use network::Network;
use network::ConsensusParams;
use work::is_valid_proof_of_work;
use error::Error;
use constants::BLOCK_MAX_FUTURE;
......@@ -11,9 +11,9 @@ pub struct HeaderVerifier<'a> {
}
impl<'a> HeaderVerifier<'a> {
pub fn new(header: &'a IndexedBlockHeader, network: Network, current_time: u32) -> Self {
pub fn new(header: &'a IndexedBlockHeader, consensus: &ConsensusParams, current_time: u32) -> Self {
HeaderVerifier {
proof_of_work: HeaderProofOfWork::new(header, network),
proof_of_work: HeaderProofOfWork::new(header, consensus),
timestamp: HeaderTimestamp::new(header, current_time, BLOCK_MAX_FUTURE as u32),
}
}
......@@ -31,10 +31,10 @@ pub struct HeaderProofOfWork<'a> {
}
impl<'a> HeaderProofOfWork<'a> {
fn new(header: &'a IndexedBlockHeader, network: Network) -> Self {
fn new(header: &'a IndexedBlockHeader, consensus: &ConsensusParams) -> Self {
HeaderProofOfWork {
header: header,
max_work_bits: network.max_bits().into(),
max_work_bits: consensus.network.max_bits(&consensus.fork).into(),
}
}
......
......@@ -6,6 +6,7 @@ use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, ConsensusFork};
use storage::{BlockHeaderProvider, BlockRef};
use work_bch::work_required_bitcoin_cash;
use work_zcash::work_required_zcash;
use constants::{
DOUBLE_SPACING_SECONDS, TARGET_TIMESPAN_SECONDS,
......@@ -58,7 +59,7 @@ pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
let max_bits = consensus.network.max_bits().into();
let max_bits = consensus.network.max_bits(&consensus.fork).into();
if height == 0 {
return max_bits;
}
......@@ -66,6 +67,11 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
match consensus.fork {
ConsensusFork::ZCash(ref fork) =>
return work_required_zcash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, store, fork, max_bits),
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height =>
return work_required_bitcoin_cash(IndexedBlockHeader {
hash: parent_hash,
......@@ -79,13 +85,13 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_hash, time, height, store, Network::Testnet)
return work_required_testnet(parent_hash, time, height, store, consensus)
}
parent_header.bits
}
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Network) -> Compact {
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let mut bits = Vec::new();
......@@ -93,7 +99,7 @@ pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &
let parent_header = store.block_header(block_ref.clone()).expect("height != 0; qed");
let max_time_gap = parent_header.time + DOUBLE_SPACING_SECONDS;
let max_bits = network.max_bits().into();
let max_bits = consensus.network.max_bits(&consensus.fork).into();
if time > max_time_gap {
return max_bits;
}
......@@ -152,7 +158,7 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
mod tests {
use primitives::hash::H256;
use primitives::compact::Compact;
use network::Network;
use network::{Network, ConsensusFork};
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work, block_reward_satoshi};
fn is_valid_pow(max: Compact, bits: u32, hash: &'static str) -> bool {
......@@ -163,14 +169,14 @@ mod tests {
#[test]
fn test_is_valid_proof_of_work() {
// block 2
assert!(is_valid_pow(Network::Mainnet.max_bits().into(), 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
assert!(is_valid_pow(Network::Mainnet.max_bits(&ConsensusFork::BitcoinCore).into(), 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
// block 400_000
assert!(is_valid_pow(Network::Mainnet.max_bits().into(), 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
assert!(is_valid_pow(Network::Mainnet.max_bits(&ConsensusFork::BitcoinCore).into(), 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
// other random tests
assert!(is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000000"));
assert!(!is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000001"));
assert!(!is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
assert!(is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000000"));
assert!(!is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000001"));
assert!(!is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
}
#[test]
......
......@@ -24,7 +24,7 @@ pub fn work_required_bitcoin_cash(parent_header: IndexedBlockHeader, time: u32,
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_header.hash, time, height, store, Network::Testnet)
return work_required_testnet(parent_header.hash, time, height, store, consensus)
}
if parent_header.raw.bits == max_bits {
......@@ -140,7 +140,7 @@ fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time:
// Special difficulty rule for testnet:
// If the new block's timestamp is more than 2 * 10 minutes then allow
// mining of a min-difficulty block.
let max_bits = consensus.network.max_bits();
let max_bits = consensus.network.max_bits(&consensus.fork);
if consensus.network == Network::Testnet || consensus.network == Network::Unitest {
let max_time_gap = parent_header.raw.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
......@@ -163,7 +163,7 @@ fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time:
// Compute the target based on time and work done during the interval.
let next_target = compute_target(first_header, last_header, store);
let max_bits = consensus.network.max_bits();
let max_bits = consensus.network.max_bits(&consensus.fork);
if next_target > max_bits {
return max_bits.into();
}
......@@ -172,7 +172,7 @@ fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time:
}
#[cfg(test)]
mod tests {
pub mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::hash::H256;
......@@ -184,16 +184,27 @@ mod tests {
use super::work_required_bitcoin_cash_adjusted;
#[derive(Default)]
struct MemoryBlockHeaderProvider {
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
......@@ -227,7 +238,9 @@ mod tests {
merkle_root_hash: 0.into(),
time: 1269211443,
bits: 0x207fffff.into(),
nonce: 0,
nonce: 0.into(),
hash_final_sapling_root: None,
equihash_solution: None,
});
// create x100 pre-HF blocks
......@@ -276,7 +289,7 @@ mod tests {
}));
let limit_bits = uahf_consensus.network.max_bits();
let limit_bits = uahf_consensus.network.max_bits(&ConsensusFork::BitcoinCore);
let initial_bits = limit_bits >> 4;
let mut header_provider = MemoryBlockHeaderProvider::default();
......@@ -287,7 +300,9 @@ mod tests {
merkle_root_hash: 0.into(),
time: 1269211443,
bits: initial_bits.into(),
nonce: 0,
nonce: 0.into(),
hash_final_sapling_root: None,
equihash_solution: None,
});
// Pile up some blocks every 10 mins to establish some history.
......
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::{Uint, U256};
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, ZCashConsensusParams};
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
use work::{is_retarget_height, work_required_testnet, work_required_retarget};
/// Returns work required for given header for the ZCash block
pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeaderProvider, fork: &ZCashConsensusParams, max_bits: Compact) -> Compact {
// Find the first block in the averaging interval
let parent_hash = parent_header.hash.clone();
let mut oldest_hash = parent_header.raw.previous_header_hash;
let mut bits_total: U256 = parent_header.raw.bits.into();
for i in 1..fork.pow_averaging_window {
let previous_header = match store.block_header(oldest_hash.into()) {
Some(previous_header) => previous_header,
None => return max_bits,
};
bits_total = bits_total + previous_header.bits.into();
oldest_hash = previous_header.previous_header_hash;
}
let bits_avg = bits_total / fork.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, fork, max_bits)
}
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, fork: &ZCashConsensusParams, max_bits: Compact) -> Compact {
// Limit adjustment step
// Use medians to prevent time-warp attacks
let actual_timespan = parent_mtp - oldest_mtp;
let mut actual_timespan = fork.averaging_window_timespan() as i64 +
(actual_timespan as i64 - fork.averaging_window_timespan() as i64) / 4;
if actual_timespan < fork.min_actual_timespan() as i64 {
actual_timespan = fork.min_actual_timespan() as i64;
}
if actual_timespan > fork.max_actual_timespan() as i64 {
actual_timespan = fork.max_actual_timespan() as i64;
}
// Retarget
let actual_timespan = actual_timespan as u32;
let mut bits_new = bits_avg / fork.averaging_window_timespan().into();
bits_new = bits_new * actual_timespan.into();
if bits_new > max_bits.into() {
return max_bits;
}
bits_new.into()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::U256;
use network::{Network, ZCashConsensusParams, ConsensusFork};
use storage::{BlockHeaderProvider, BlockRef};
use chain::BlockHeader;
use timestamp::median_timestamp_inclusive;
use work::work_required;
use work_bch::tests::MemoryBlockHeaderProvider;
use super::{work_required_zcash, calculate_work_required};
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn zcash_work_required_works() {
let fork = ZCashConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits(&ConsensusFork::ZCash(fork.clone()));
let last_block = 2 * fork.pow_averaging_window;
let first_block = last_block - fork.pow_averaging_window;
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
time: 1269211443,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
hash_final_sapling_root: None,
equihash_solution: None,
});
// Start with blocks evenly-spaced and equal difficulty
for i in 1..last_block+1 {
let header = BlockHeader {
time: header_provider.last().time + fork.pow_target_spacing,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: header_provider.by_height[i as usize - 1].hash(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
hash_final_sapling_root: None,
equihash_solution: None,
};
header_provider.insert(header);
}
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
assert_eq!(actual, expected);
// Result should be unchanged, modulo integer division precision loss
let mut bits_expected: U256 = Compact::new(0x1e7fffff).into();
bits_expected = bits_expected / fork.averaging_window_timespan().into();
bits_expected = bits_expected * fork.averaging_window_timespan().into();
assert_eq!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into()),
bits_expected.into());
// Randomise the final block time (plus 1 to ensure it is always different)
use rand::{thread_rng, Rng};
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.time += thread_rng().gen_range(1, fork.pow_target_spacing / 2);
header_provider.replace_last(last_header);
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
assert_eq!(actual, expected);
// Result should not be unchanged
let bits_expected = Compact::new(0x1e7fffff);
assert!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into()) != bits_expected);
// Change the final block difficulty
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.bits = Compact::new(0x1e0fffff);
header_provider.replace_last(last_header);
// Result should not be the same as if last difficulty was used
let bits_avg = header_provider.by_height[last_block as usize].bits;
let expected = calculate_work_required(bits_avg.into(),
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
assert!(actual != expected);
// Result should be the same as if the average difficulty was used
let bits_avg = "0000796968696969696969696969696969696969696969696969696969696969".parse().unwrap();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
}
}
\ No newline at end of file