Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pruning proof minor improvements #590

Merged
merged 9 commits into from
Nov 10, 2024
4 changes: 2 additions & 2 deletions consensus/core/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use crate::{
tx::TxResult,
},
header::Header,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
BlockHashSet, BlueWorkType, ChainPath,
Expand Down Expand Up @@ -203,7 +203,7 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}

fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> {
fn validate_pruning_proof(&self, proof: &PruningPointProof, proof_metadata: &PruningProofMetadata) -> PruningImportResult<()> {
unimplemented!()
}

Expand Down
3 changes: 3 additions & 0 deletions consensus/core/src/errors/pruning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ pub enum PruningImportError {

#[error("process exit was initiated while validating pruning point proof")]
PruningValidationInterrupted,

#[error("block {0} at level {1} has invalid proof of work for level")]
ProofOfWorkFailed(Hash, BlockLevel),
}

pub type PruningImportResult<T> = std::result::Result<T, PruningImportError>;
4 changes: 4 additions & 0 deletions consensus/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ pub mod utxo;
/// overall blocks, so 2^192 is definitely a justified upper-bound.
pub type BlueWorkType = kaspa_math::Uint192;

/// The extends directly from the expectation above about having no more than
/// 2^128 work in a single block
pub const MAX_WORK_LEVEL: BlockLevel = 128;

/// The type used to represent the GHOSTDAG K parameter
pub type KType = u16;

Expand Down
13 changes: 13 additions & 0 deletions consensus/core/src/pruning.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::{
header::Header,
trusted::{TrustedGhostdagData, TrustedHeader},
BlueWorkType,
};
use kaspa_hashes::Hash;
use std::sync::Arc;
Expand All @@ -19,3 +20,15 @@ pub struct PruningPointTrustedData {
/// Union of GHOSTDAG data required to verify blocks in the future of the pruning point
pub ghostdag_blocks: Vec<TrustedGhostdagData>,
}

#[derive(Clone, Copy)]
pub struct PruningProofMetadata {
/// The claimed work of the initial relay block (from the prover)
pub relay_block_blue_work: BlueWorkType,
}

impl PruningProofMetadata {
pub fn new(relay_block_blue_work: BlueWorkType) -> Self {
Self { relay_block_blue_work }
}
}
14 changes: 12 additions & 2 deletions consensus/pow/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,22 @@ impl State {
}

pub fn calc_block_level(header: &Header, max_block_level: BlockLevel) -> BlockLevel {
let (block_level, _) = calc_block_level_check_pow(header, max_block_level);
block_level
}

pub fn calc_block_level_check_pow(header: &Header, max_block_level: BlockLevel) -> (BlockLevel, bool) {
if header.parents_by_level.is_empty() {
return max_block_level; // Genesis has the max block level
return (max_block_level, true); // Genesis has the max block level
}

let state = State::new(header);
let (_, pow) = state.check_pow(header.nonce);
let (passed, pow) = state.check_pow(header.nonce);
let block_level = calc_level_from_pow(pow, max_block_level);
(block_level, passed)
}

pub fn calc_level_from_pow(pow: Uint256, max_block_level: BlockLevel) -> BlockLevel {
let signed_block_level = max_block_level as i64 - pow.bits() as i64;
max(signed_block_level, 0) as BlockLevel
}
10 changes: 7 additions & 3 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use kaspa_consensus_core::{
merkle::calc_hash_merkle_root,
muhash::MuHashExtensions,
network::NetworkType,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher,
Expand Down Expand Up @@ -757,8 +757,12 @@ impl ConsensusApi for Consensus {
calc_hash_merkle_root(txs.iter(), storage_mass_activated)
}

fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> {
self.services.pruning_proof_manager.validate_pruning_point_proof(proof)
fn validate_pruning_proof(
&self,
proof: &PruningPointProof,
proof_metadata: &PruningProofMetadata,
) -> Result<(), PruningImportError> {
self.services.pruning_proof_manager.validate_pruning_point_proof(proof, proof_metadata)
}

fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> {
Expand Down
1 change: 0 additions & 1 deletion consensus/src/consensus/services.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ impl ConsensusServices {
relations_services[0].clone(),
storage.headers_store.clone(),
reachability_service.clone(),
false,
);

let coinbase_manager = CoinbaseManager::new(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use kaspa_consensus_core::header::Header;
use kaspa_consensus_core::BlockLevel;
use kaspa_core::time::unix_now;
use kaspa_database::prelude::StoreResultExtensions;
use std::cmp::max;
use kaspa_pow::calc_level_from_pow;

impl HeaderProcessor {
/// Validates the header in isolation including pow check against header declared bits.
Expand Down Expand Up @@ -102,8 +102,7 @@ impl HeaderProcessor {
let state = kaspa_pow::State::new(header);
let (passed, pow) = state.check_pow(header.nonce);
if passed || self.skip_proof_of_work {
let signed_block_level = self.max_block_level as i64 - pow.bits() as i64;
Ok(max(signed_block_level, 0) as BlockLevel)
Ok(calc_level_from_pow(pow, self.max_block_level))
} else {
Err(RuleError::InvalidPoW)
}
Expand Down
64 changes: 63 additions & 1 deletion consensus/src/processes/difficulty.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::model::stores::{
use kaspa_consensus_core::{
config::params::MIN_DIFFICULTY_WINDOW_LEN,
errors::difficulty::{DifficultyError, DifficultyResult},
BlockHashSet, BlueWorkType,
BlockHashSet, BlueWorkType, MAX_WORK_LEVEL,
};
use kaspa_math::{Uint256, Uint320};
use std::{
Expand Down Expand Up @@ -282,6 +282,16 @@ pub fn calc_work(bits: u32) -> BlueWorkType {
res.try_into().expect("Work should not exceed 2**192")
}

pub fn level_work(level: u8, max_block_level: u8) -> BlueWorkType {
// Need to make a special condition for level 0 to ensure true work is always used
if level == 0 {
return 0.into();
}
// We use 256 here so the result corresponds to the work at the level from calc_level_from_pow
let exp = (level as u32) + 256 - (max_block_level as u32);
BlueWorkType::from_u64(1) << exp.min(MAX_WORK_LEVEL as u32)
}

#[derive(Eq)]
struct DifficultyBlock {
timestamp: u64,
Expand All @@ -307,3 +317,55 @@ impl Ord for DifficultyBlock {
self.timestamp.cmp(&other.timestamp).then_with(|| self.sortable_block.cmp(&other.sortable_block))
}
}

#[cfg(test)]
mod tests {
use kaspa_consensus_core::{BlockLevel, BlueWorkType, MAX_WORK_LEVEL};
use kaspa_math::{Uint256, Uint320};
use kaspa_pow::calc_level_from_pow;

use crate::processes::difficulty::{calc_work, level_work};
use kaspa_utils::hex::ToHex;

#[test]
fn test_target_levels() {
let max_block_level: BlockLevel = 225;
for level in 1..=max_block_level {
// required pow for level
let level_target = (Uint320::from_u64(1) << (max_block_level - level).max(MAX_WORK_LEVEL) as u32) - Uint320::from_u64(1);
let level_target = Uint256::from_be_bytes(level_target.to_be_bytes()[8..40].try_into().unwrap());
let calculated_level = calc_level_from_pow(level_target, max_block_level);

let true_level_work = calc_work(level_target.compact_target_bits());
let calc_level_work = level_work(level, max_block_level);

// A "good enough" estimate of level work is within 1% diff from work with actual level target
// It's hard to calculate percentages with these large numbers, so to get around using floats
// we multiply the difference by 100. if the result is <= the calc_level_work it means
// difference must have been less than 1%
let (percent_diff, overflowed) = (true_level_work - calc_level_work).overflowing_mul(BlueWorkType::from_u64(100));
let is_good_enough = percent_diff <= calc_level_work;

println!("Level {}:", level);
println!(
" data | {} | {} | {} / {} |",
level_target.compact_target_bits(),
level_target.bits(),
calculated_level,
max_block_level
);
println!(" pow | {}", level_target.to_hex());
println!(" work | 0000000000000000{}", true_level_work.to_hex());
println!(" lvwork | 0000000000000000{}", calc_level_work.to_hex());
println!(" diff<1% | {}", !overflowed && (is_good_enough));

assert!(is_good_enough);
}
}

#[test]
fn test_base_level_work() {
// Expect that at level 0, the level work is always 0
assert_eq!(BlueWorkType::from(0), level_work(0, 255));
}
}
64 changes: 47 additions & 17 deletions consensus/src/processes/ghostdag/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::sync::Arc;

use kaspa_consensus_core::{
blockhash::{self, BlockHashExtensions, BlockHashes},
BlockHashMap, BlueWorkType, HashMapCustomHasher,
BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher,
};
use kaspa_hashes::Hash;
use kaspa_utils::refs::Refs;
Expand All @@ -16,7 +16,7 @@ use crate::{
relations::RelationsStoreReader,
},
},
processes::difficulty::calc_work,
processes::difficulty::{calc_work, level_work},
};

use super::ordering::*;
Expand All @@ -29,7 +29,15 @@ pub struct GhostdagManager<T: GhostdagStoreReader, S: RelationsStoreReader, U: R
pub(super) relations_store: S,
pub(super) headers_store: Arc<V>,
pub(super) reachability_service: U,
use_score_as_work: bool,

/// Level work is a lower-bound for the amount of work represented by each block.
/// When running GD for higher-level sub-DAGs, this value should be set accordingly
/// to the work represented by that level, and then used as a lower bound
/// for the work calculated from header bits (which depends on current difficulty).
/// For instance, assuming level 80 (i.e., pow hash has at least 80 zeros) is always
/// above the difficulty target, all blocks in it should represent the same amount of
/// work regardless of whether current difficulty requires 20 zeros or 25 zeros.
level_work: BlueWorkType,
}

impl<T: GhostdagStoreReader, S: RelationsStoreReader, U: ReachabilityService, V: HeaderStoreReader> GhostdagManager<T, S, U, V> {
Expand All @@ -40,9 +48,30 @@ impl<T: GhostdagStoreReader, S: RelationsStoreReader, U: ReachabilityService, V:
relations_store: S,
headers_store: Arc<V>,
reachability_service: U,
use_score_as_work: bool,
) -> Self {
Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work }
// For ordinary GD, always keep level_work=0 so the lower bound is ineffective
Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, level_work: 0.into() }
}

pub fn with_level(
genesis_hash: Hash,
k: KType,
ghostdag_store: Arc<T>,
relations_store: S,
headers_store: Arc<V>,
reachability_service: U,
level: BlockLevel,
max_block_level: BlockLevel,
) -> Self {
Self {
genesis_hash,
k,
ghostdag_store,
relations_store,
reachability_service,
headers_store,
level_work: level_work(level, max_block_level),
}
}

pub fn genesis_ghostdag_data(&self) -> GhostdagData {
Expand Down Expand Up @@ -115,20 +144,21 @@ impl<T: GhostdagStoreReader, S: RelationsStoreReader, U: ReachabilityService, V:
}
}

// Handle the special case of origin children first
if selected_parent.is_origin() {
// ORIGIN is always a single parent so both blue score and work should remain zero
return new_block_data;
}

let blue_score = self.ghostdag_store.get_blue_score(selected_parent).unwrap() + new_block_data.mergeset_blues.len() as u64;

let blue_work: BlueWorkType = if self.use_score_as_work {
blue_score.into()
} else {
let added_blue_work: BlueWorkType = new_block_data
.mergeset_blues
.iter()
.cloned()
.map(|hash| if hash.is_origin() { 0.into() } else { calc_work(self.headers_store.get_bits(hash).unwrap()) })
.sum();

self.ghostdag_store.get_blue_work(selected_parent).unwrap() + added_blue_work
};
let added_blue_work: BlueWorkType = new_block_data
.mergeset_blues
.iter()
.cloned()
.map(|hash| calc_work(self.headers_store.get_bits(hash).unwrap()).max(self.level_work))
.sum();
let blue_work: BlueWorkType = self.ghostdag_store.get_blue_work(selected_parent).unwrap() + added_blue_work;

new_block_data.finalize_score_and_work(blue_score, blue_work);

Expand Down
2 changes: 1 addition & 1 deletion consensus/src/processes/pruning_proof/apply.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ impl PruningProofManager {
let mut up_heap = BinaryHeap::with_capacity(capacity_estimate);
for header in proof.iter().flatten().cloned() {
if let Vacant(e) = dag.entry(header.hash) {
// TODO: Check if pow passes
// pow passing has already been checked during validation
let block_level = calc_block_level(&header, self.max_block_level);
self.headers_store.insert(header.hash, header.clone(), block_level).unwrap();

Expand Down
23 changes: 9 additions & 14 deletions consensus/src/processes/pruning_proof/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc};

use itertools::Itertools;
use kaspa_consensus_core::{
blockhash::{BlockHashExtensions, BlockHashes, ORIGIN},
blockhash::{BlockHashExtensions, BlockHashes},
header::Header,
pruning::PruningPointProof,
BlockHashSet, BlockLevel, HashMapCustomHasher,
Expand Down Expand Up @@ -331,27 +331,25 @@ impl PruningProofManager {
reachability_service: self.reachability_service.clone(),
root,
};
let gd_manager = GhostdagManager::new(
let gd_manager = GhostdagManager::with_level(
root,
self.ghostdag_k,
ghostdag_store.clone(),
relations_service.clone(),
self.headers_store.clone(),
self.reachability_service.clone(),
level != 0,
level,
self.max_block_level,
);

// Note there is no need to initialize origin since we have a single root
ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap();
ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap();

let mut topological_heap: BinaryHeap<_> = Default::default();
let mut visited = BlockHashSet::new();
for child in relations_service.get_children(root).unwrap().read().iter().copied() {
topological_heap.push(Reverse(SortableBlock {
hash: child,
// It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology
blue_work: self.headers_store.get_header(child).unwrap().blue_work,
}));
topological_heap
.push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work }));
}

let mut has_required_block = required_block.is_some_and(|required_block| root == required_block);
Expand All @@ -378,11 +376,8 @@ impl PruningProofManager {
ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists();

for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() {
topological_heap.push(Reverse(SortableBlock {
hash: child,
// It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology
blue_work: self.headers_store.get_header(child).unwrap().blue_work,
}));
topological_heap
.push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work }));
}
}

Expand Down
Loading