From 8aa10732eb7d8226d40951fb9fe3b71d3cf13b9a Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Fri, 24 Jan 2025 19:53:30 +0200 Subject: [PATCH 1/7] Instant time instead of SystemTime Changed functions to use std::time::Instant which is monotonic, to avoid Rust panics with SystemTime. Replaced some unwraps with an expect. Removed redundant brackets, and secp256k1:: --- rothschild/src/main.rs | 45 ++++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 9baeaa04e7..d9cef361ab 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -21,7 +21,7 @@ use secp256k1::{ rand::{thread_rng, Rng}, Keypair, }; -use tokio::time::{interval, MissedTickBehavior}; +use tokio::time::{Instant, interval, MissedTickBehavior}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; const FEE_RATE: u64 = 10; @@ -161,14 +161,16 @@ async fn main() { Default::default(), ) .await - .unwrap(); + .expect("Critical error: failed to connect to the RPC server."); + info!("Connected to RPC"); - let mut pending = HashMap::new(); + + let mut pending: HashMap = HashMap::new(); let schnorr_key = if let Some(private_key_hex) = args.private_key { let mut private_key_bytes = [0u8; 32]; faster_hex::hex_decode(private_key_hex.as_bytes(), &mut private_key_bytes).unwrap(); - secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, &private_key_bytes).unwrap() + Keypair::from_seckey_slice(secp256k1::SECP256K1, &private_key_bytes).unwrap() } else { let (sk, pk) = &secp256k1::generate_keypair(&mut thread_rng()); let kaspa_addr = Address::new(ADDRESS_PREFIX, ADDRESS_VERSION, &pk.x_only_public_key().0.serialize()); @@ -208,7 +210,10 @@ async fn main() { } info!("{}", log_message); - let info = rpc_client.get_block_dag_info().await.unwrap(); + let info = rpc_client.get_block_dag_info() + .await + .expect("Failed to get block dag info."); + let coinbase_maturity = match info.network.suffix { Some(11) => TESTNET11_PARAMS.coinbase_maturity, None | Some(_) => TESTNET_PARAMS.coinbase_maturity, @@ -251,7 +256,7 @@ async fn main() { info!( "Tx rate: {:.1}/sec, avg UTXO amount: {}, avg UTXOs per tx: {}, avg outs per tx: {}, estimated available UTXOs: {}", 1000f64 * (stats.num_txs as f64) / (time_past as f64), - (stats.utxos_amount / stats.num_utxos as u64), + stats.utxos_amount / stats.num_utxos as u64, stats.num_utxos / stats.num_txs, stats.num_outs / stats.num_txs, utxos_len.saturating_sub(pending_len), @@ -332,7 +337,7 @@ async fn main() { fn should_maximize_inputs( old_value: bool, utxos: &[(TransactionOutpoint, UtxoEntry)], - pending: &HashMap, + pending: &HashMap, ) -> bool { let estimated_utxos = if utxos.len() > pending.len() { utxos.len() - pending.len() } else { 0 }; if !old_value && estimated_utxos > 1_000_000 { @@ -362,7 +367,7 @@ async fn pause_if_mempool_is_full(rpc_client: &GrpcClient) { async fn refresh_utxos( rpc_client: &GrpcClient, kaspa_addr: Address, - pending: &mut HashMap, + pending: &mut HashMap, coinbase_maturity: u64, ) -> Vec<(TransactionOutpoint, UtxoEntry)> { populate_pending_outpoints_from_mempool(rpc_client, kaspa_addr.clone(), pending).await; @@ -372,10 +377,11 @@ async fn refresh_utxos( async fn populate_pending_outpoints_from_mempool( rpc_client: &GrpcClient, kaspa_addr: Address, - pending_outpoints: &mut HashMap, + pending_outpoints: &mut HashMap, ) { let entries = rpc_client.get_mempool_entries_by_addresses(vec![kaspa_addr], true, false).await.unwrap(); - let now = unix_now(); + let now = Instant::now(); + for entry in entries { for entry in entry.sending { for input in entry.transaction.inputs { @@ -389,7 +395,7 @@ async fn fetch_spendable_utxos( rpc_client: &GrpcClient, kaspa_addr: Address, coinbase_maturity: u64, - pending: &mut HashMap, + pending: &mut HashMap, ) -> Vec<(TransactionOutpoint, UtxoEntry)> { let resp = rpc_client.get_utxos_by_addresses(vec![kaspa_addr]).await.unwrap(); let dag_info = rpc_client.get_block_dag_info().await.unwrap(); @@ -420,7 +426,7 @@ async fn maybe_send_tx( tx_sender: &async_channel::Sender, kaspa_addr: Address, utxos: &mut [(TransactionOutpoint, UtxoEntry)], - pending: &mut HashMap, + pending: &mut HashMap, schnorr_key: Keypair, stats: Arc>, maximize_inputs: bool, @@ -443,7 +449,7 @@ async fn maybe_send_tx( // have funds in this tick has_fund = true; - let now = unix_now(); + let now = Instant::now(); for input in selected_utxos.iter() { pending.insert(input.0, now); } @@ -486,11 +492,16 @@ async fn maybe_send_tx( true } -fn clean_old_pending_outpoints(pending: &mut HashMap) { - let now = unix_now(); - let old_keys = pending.iter().filter(|(_, time)| now - *time > 3600 * 1000).map(|(op, _)| *op).collect_vec(); +fn clean_old_pending_outpoints(pending: &mut HashMap) { + let now = Instant::now(); + + let old_keys: Vec<_> = pending + .iter() + .filter(|(_, &time)| now.duration_since(time) > Duration::from_secs(3600)) + .map(|(op, _)| *op) + .collect(); for key in old_keys { - pending.remove(&key).unwrap(); + pending.remove(&key); } } From 58ba334f837648020821c7654ed37c906e9c16b2 Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Sat, 25 Jan 2025 08:48:14 +0200 Subject: [PATCH 2/7] fn clean_old_pending_outpoints + lint Changed fn clean_old_pending_outpoints to retain keys that are younger than an hour, instead of collecting older than an hour ones as a vector, and then using a new for loop to deleting them. linting with cargo fmt --- rothschild/src/main.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index d9cef361ab..495e681e9a 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -21,7 +21,7 @@ use secp256k1::{ rand::{thread_rng, Rng}, Keypair, }; -use tokio::time::{Instant, interval, MissedTickBehavior}; +use tokio::time::{interval, Instant, MissedTickBehavior}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; const FEE_RATE: u64 = 10; @@ -210,9 +210,7 @@ async fn main() { } info!("{}", log_message); - let info = rpc_client.get_block_dag_info() - .await - .expect("Failed to get block dag info."); + let info = rpc_client.get_block_dag_info().await.expect("Failed to get block dag info."); let coinbase_maturity = match info.network.suffix { Some(11) => TESTNET11_PARAMS.coinbase_maturity, @@ -494,15 +492,7 @@ async fn maybe_send_tx( fn clean_old_pending_outpoints(pending: &mut HashMap) { let now = Instant::now(); - - let old_keys: Vec<_> = pending - .iter() - .filter(|(_, &time)| now.duration_since(time) > Duration::from_secs(3600)) - .map(|(op, _)| *op) - .collect(); - for key in old_keys { - pending.remove(&key); - } + pending.retain(|_, &mut time| now.duration_since(time) <= Duration::from_secs(3600)); } fn required_fee(num_utxos: usize, num_outs: u64) -> u64 { From 0bffb60b4317b23618a2a635b9e4db9f0e610ba2 Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Fri, 7 Feb 2025 17:22:46 +0200 Subject: [PATCH 3/7] optimization: flow.rs; fn sync_missing_relay_past_headers # Optimization flow.rs ; async sync_missing_relay_past_headers # Fix - Save memory; changed jobs to pass an iterator to try_join_all instead of collecting a vector and then passing it. - Changed error check to return an explicit error, instead of implicit --- protocol/flows/src/v5/ibd/flow.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 0dd7fe64f1..c34242aaf8 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -445,20 +445,21 @@ impl IbdFlow { let msg = dequeue_with_timeout!(self.incoming_route, Payload::BlockHeaders)?; let chunk: HeadersChunk = msg.try_into()?; - let jobs: Vec = - chunk.into_iter().map(|h| consensus.validate_and_insert_block(Block::from_header_arc(h)).virtual_state_task).collect(); + + let jobs = + chunk.into_iter().map(|header| consensus.validate_and_insert_block(Block::from_header_arc(header)).virtual_state_task); + try_join_all(jobs).await?; dequeue_with_timeout!(self.incoming_route, Payload::DoneHeaders)?; if consensus.async_get_block_status(relay_block_hash).await.is_none() { // If the relay block has still not been received, the peer is misbehaving - Err(ProtocolError::OtherOwned(format!( + return Err(ProtocolError::OtherOwned(format!( "did not receive relay block {} from peer {} during block download", relay_block_hash, self.router - ))) - } else { - Ok(()) + ))); } + Ok(()) } async fn validate_staging_timestamps( From c1a7c048c8bcc39f585be8a37d9a011c6a9a013b Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Wed, 12 Feb 2025 09:13:51 +0200 Subject: [PATCH 4/7] Revert "optimization: flow.rs; fn sync_missing_relay_past_headers" This reverts commit 0bffb60b4317b23618a2a635b9e4db9f0e610ba2. --- protocol/flows/src/v5/ibd/flow.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index c34242aaf8..0dd7fe64f1 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -445,21 +445,20 @@ impl IbdFlow { let msg = dequeue_with_timeout!(self.incoming_route, Payload::BlockHeaders)?; let chunk: HeadersChunk = msg.try_into()?; - - let jobs = - chunk.into_iter().map(|header| consensus.validate_and_insert_block(Block::from_header_arc(header)).virtual_state_task); - + let jobs: Vec = + chunk.into_iter().map(|h| consensus.validate_and_insert_block(Block::from_header_arc(h)).virtual_state_task).collect(); try_join_all(jobs).await?; dequeue_with_timeout!(self.incoming_route, Payload::DoneHeaders)?; if consensus.async_get_block_status(relay_block_hash).await.is_none() { // If the relay block has still not been received, the peer is misbehaving - return Err(ProtocolError::OtherOwned(format!( + Err(ProtocolError::OtherOwned(format!( "did not receive relay block {} from peer {} during block download", relay_block_hash, self.router - ))); + ))) + } else { + Ok(()) } - Ok(()) } async fn validate_staging_timestamps( From 0b6d2f56ef339956da08a375afc1fa40c8bd2387 Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Thu, 13 Feb 2025 17:36:52 +0200 Subject: [PATCH 5/7] workflow testing --- .github/workflows/ci.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9b1e99a1cb..9b5422b47d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -113,6 +113,9 @@ jobs: target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - name: Run cargo tests non--release + run: cargo test --no-fail-fast + - name: Run cargo build with devnet-prealloc feature run: cargo build --release --features devnet-prealloc --workspace --all --tests --benches From 9a5de304d87ccd66da7225d1f64e4fdf41b58fed Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Tue, 25 Mar 2025 13:05:13 +0200 Subject: [PATCH 6/7] fix: u128 panics bench.rs Since benches are meant to measure performance not correctness, the following u128 benches have added wrapping in order to prevent panics. The other Uint128 tests did not panic, so they were left as is; e.g. Benchmarking u128/right shift: Warming up for 3.0000 sthread 'main' panicked at math/benches/bench.rs:50:54: attempt to shift right with overflow stack backtrace: 0: 0x613015e7a3aa - 1: 0x613015ce9323 - 2: 0x613015e76f13 - 3: 0x613015e7a202 - 4: 0x613015e7aff7 - 5: 0x613015e7ae5d - 6: 0x613015e7b5a7 - 7: 0x613015e7b416 - 8: 0x613015e7a859 - 9: 0x613015e7b0ec - 10: 0x613015c1ad20 - 11: 0x613015c1b3d7 - 12: 0x613015c830d9 - 13: 0x613015c266ba - 14: 0x613015c48bf5 - 15: 0x613015cadfa4 - 16: 0x613015c7769f - 17: 0x613015ca018d - 18: 0x613015ca18e3 - 19: 0x613015ca4ac9 - 20: 0x613015e72e9d - 21: 0x613015ca1385 - 22: 0x72338162a1ca - __libc_start_call_main at ./csu/../sysdeps/nptl/libc_start_call_main.h:58:16 23: 0x72338162a28b - __libc_start_main_impl at ./csu/../csu/libc-start.c:360:3 24: 0x613015c21ff5 - 25: 0x0 - error: bench failed, to rerun pass `-p kaspa-math --bench bench` --- math/benches/bench.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/math/benches/bench.rs b/math/benches/bench.rs index 34bcd3ece5..deaea4b266 100644 --- a/math/benches/bench.rs +++ b/math/benches/bench.rs @@ -40,14 +40,14 @@ fn bench_uint128(c: &mut Criterion) { let mut u128_c = c.benchmark_group("u128"); - bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a + b, "add"); - bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a + (b as u128), "addition u64"); - bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a * b, "multiplication"); - bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a * (b as u128), "multiplication u64"); - bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a / b, "division"); - bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a / (b as u128), "u64 division"); - bench_op(&mut u128_c, &u128_one, &shifts, |a, b| a << b, "left shift"); - bench_op(&mut u128_c, &u128_one, &shifts, |a, b| a >> b, "right shift"); + bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a.wrapping_add(b), "add"); + bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a.wrapping_add(b as u128), "addition u64"); + bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a.wrapping_mul(b), "multiplication"); + bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a.wrapping_mul(b as u128), "multiplication u64"); + bench_op(&mut u128_c, &u128_one, &u128_two, |a, b| a.wrapping_div(b), "division"); + bench_op(&mut u128_c, &u128_one, &u64s, |a, b| a.wrapping_div(b as u128), "u64 division"); + bench_op(&mut u128_c, &u128_one, &shifts, |a, b| a.wrapping_shl(b), "left shift"); + bench_op(&mut u128_c, &u128_one, &shifts, |a, b| a.wrapping_shr(b), "right shift"); u128_c.finish(); let mut uint128_c = c.benchmark_group("Uint128"); From 532ce00babdba4363ddaddf59ce3dfb4dc0f1c96 Mon Sep 17 00:00:00 2001 From: supr <98590479+miningexperiments@users.noreply.github.com> Date: Tue, 25 Mar 2025 13:31:47 +0200 Subject: [PATCH 7/7] revert ci.yaml to match kaspanet/rusty-kaspa master --- .github/workflows/ci.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9b5422b47d..9b1e99a1cb 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -113,9 +113,6 @@ jobs: target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - name: Run cargo tests non--release - run: cargo test --no-fail-fast - - name: Run cargo build with devnet-prealloc feature run: cargo build --release --features devnet-prealloc --workspace --all --tests --benches