Skip to content

Commit 1243a04

Browse files
authored
Retain index data up to retention period root (#657)
* test: add test for getting populated tx - Includes retention_period_days support in simpa * Retain index up to retention root - instead of pruning point * Use get_daa_score instead of compact_header * Use sink timestamp as "now" in all cases for retention
1 parent ac677a0 commit 1243a04

File tree

3 files changed

+47
-15
lines changed

3 files changed

+47
-15
lines changed

consensus/src/pipeline/pruning_processor/processor.rs

+5-3
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ use kaspa_consensus_core::{
3535
BlockHashMap, BlockHashSet, BlockLevel,
3636
};
3737
use kaspa_consensusmanager::SessionLock;
38-
use kaspa_core::{debug, info, time::unix_now, trace, warn};
38+
use kaspa_core::{debug, info, trace, warn};
3939
use kaspa_database::prelude::{BatchDbWriter, MemoryWriter, StoreResultExtensions, DB};
4040
use kaspa_hashes::Hash;
4141
use kaspa_muhash::MuHash;
@@ -383,7 +383,7 @@ impl PruningProcessor {
383383

384384
// Prune the selected chain index below the pruning point
385385
let mut selected_chain_write = self.selected_chain_store.write();
386-
selected_chain_write.prune_below_pruning_point(BatchDbWriter::new(&mut batch), new_pruning_point).unwrap();
386+
selected_chain_write.prune_below_pruning_point(BatchDbWriter::new(&mut batch), retention_period_root).unwrap();
387387

388388
// Flush the batch to the DB
389389
self.db.write(batch).unwrap();
@@ -563,7 +563,9 @@ impl PruningProcessor {
563563
let retention_period_ms = (retention_period_days * 86400.0 * 1000.0).ceil() as u64;
564564

565565
// The target timestamp we would like to find a point below
566-
let retention_period_root_ts_target = unix_now().saturating_sub(retention_period_ms);
566+
let sink_timestamp_as_current_time =
567+
self.headers_store.get_timestamp(self.lkg_virtual_state.load().ghostdag_data.selected_parent).unwrap();
568+
let retention_period_root_ts_target = sink_timestamp_as_current_time.saturating_sub(retention_period_ms);
567569

568570
// Iterate from the new pruning point to the prev retention root and search for the first point with enough days above it.
569571
// Note that prev retention root is always a past pruning point, so we can iterate via pruning samples until we reach it.

consensus/src/pipeline/virtual_processor/utxo_inquirer.rs

+7-11
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,7 @@ impl VirtualStateProcessor {
2828
) -> Result<SignableTransaction, UtxoInquirerError> {
2929
let retention_period_root_daa_score = self
3030
.headers_store
31-
.get_compact_header_data(retention_period_root_hash)
32-
.map(|compact_header| compact_header.daa_score)
31+
.get_daa_score(retention_period_root_hash)
3332
.map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(retention_period_root_hash))?;
3433

3534
if accepting_block_daa_score < retention_period_root_daa_score {
@@ -94,11 +93,8 @@ impl VirtualStateProcessor {
9493
.get_by_hash(retention_period_root_hash)
9594
.map_err(|_| UtxoInquirerError::MissingIndexForHash(retention_period_root_hash))?;
9695
let (tip_index, tip_hash) = sc_read.get_tip().map_err(|_| UtxoInquirerError::MissingTipData)?;
97-
let tip_daa_score = self
98-
.headers_store
99-
.get_compact_header_data(tip_hash)
100-
.map(|tip| tip.daa_score)
101-
.map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(tip_hash))?;
96+
let tip_daa_score =
97+
self.headers_store.get_daa_score(tip_hash).map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(tip_hash))?;
10298

10399
// For a chain segment it holds that len(segment) <= daa_score(segment end) - daa_score(segment start). This is true
104100
// because each chain block increases the daa score by at least one. Hence we can lower bound our search by high index
@@ -117,14 +113,14 @@ impl VirtualStateProcessor {
117113
UtxoInquirerError::MissingHashAtIndex(mid)
118114
})?;
119115

120-
// 2. Get the compact header so we have access to the daa_score. Error if we cannot find the header
121-
let compact_header = self.headers_store.get_compact_header_data(hash).map_err(|_| {
122-
trace!("Did not find a compact header with hash {}", hash);
116+
// 2. Get the daa_score. Error if the header is not found
117+
let daa_score = self.headers_store.get_daa_score(hash).map_err(|_| {
118+
trace!("Did not find a header with hash {}", hash);
123119
UtxoInquirerError::MissingCompactHeaderForBlockHash(hash)
124120
})?;
125121

126122
// 3. Compare block daa score to our target
127-
match compact_header.daa_score.cmp(&target_daa_score) {
123+
match daa_score.cmp(&target_daa_score) {
128124
cmp::Ordering::Equal => {
129125
// We found the chain block we need
130126
break hash;

simpa/src/main.rs

+35-1
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,8 @@ struct Args {
124124
rocksdb_mem_budget: Option<usize>,
125125
#[arg(long, default_value_t = false)]
126126
long_payload: bool,
127+
#[arg(long)]
128+
retention_period_days: Option<f64>,
127129
}
128130

129131
#[cfg(feature = "heap")]
@@ -198,7 +200,10 @@ fn main_impl(mut args: Args) {
198200
.apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params))
199201
.apply_args(|config| apply_args_to_perf_params(&args, &mut config.perf))
200202
.adjust_perf_params_to_consensus_params()
201-
.apply_args(|config| config.ram_scale = args.ram_scale)
203+
.apply_args(|config| {
204+
config.ram_scale = args.ram_scale;
205+
config.retention_period_days = args.retention_period_days;
206+
})
202207
.skip_proof_of_work()
203208
.enable_sanity_checks();
204209
if !args.test_pruning {
@@ -260,7 +265,36 @@ fn main_impl(mut args: Args) {
260265
let num_blocks = hashes.len();
261266
let num_txs = print_stats(&consensus, &hashes, args.delay, args.bps, config.ghostdag_k().before());
262267
info!("There are {num_blocks} blocks with {num_txs} transactions overall above the current pruning point");
268+
269+
if args.retention_period_days.is_some() {
270+
let hashes_retention = topologically_ordered_hashes(&consensus, consensus.get_retention_period_root());
271+
info!("There are {} blocks above the retention period root", hashes_retention.len());
272+
}
273+
263274
consensus.validate_pruning_points(consensus.get_sink()).unwrap();
275+
276+
// Test whether we can still retrieve a populated transaction given a txid and the accepting block daa score.
277+
for hash in hashes.iter().cloned() {
278+
if !consensus.is_chain_block(hash).unwrap() {
279+
// only chain blocks are worth checking the acceptance data of
280+
continue;
281+
}
282+
283+
if let Ok(block_acceptance_data) = consensus.get_block_acceptance_data(hash) {
284+
block_acceptance_data.iter().for_each(|cbad| {
285+
let block = consensus.get_block(hash).unwrap();
286+
cbad.accepted_transactions.iter().for_each(|ate| {
287+
assert!(
288+
consensus.get_populated_transaction(ate.transaction_id, block.header.daa_score).is_ok(),
289+
"Expected to find find tx {} at accepted daa {} via get_populated_transaction",
290+
ate.transaction_id,
291+
block.header.daa_score
292+
);
293+
});
294+
});
295+
}
296+
}
297+
264298
drop(consensus);
265299
return;
266300
}

0 commit comments

Comments
 (0)