From d2fed4e0007d108aff5b7d352490e46f23377037 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Mon, 16 Mar 2026 02:21:28 +0700 Subject: [PATCH 1/3] feat: add mempool support See the changes in `ARCHITECTURE.md` for a detailed description. - Add `MempoolManager` that activates after initial sync to monitor unconfirmed transactions via BIP37 bloom filters or local address matching. Includes peer relay management, dedup tracking, IS lock handling, and auto-rebuilding filters on address pool changes. - Extend `WalletInterface` with `MempoolTransactionResult` return type and `watched_outpoints()` for bloom filter construction. Wire mempool manager into `SyncCoordinator` and propagate `confirmed_txids` through `BlockProcessed` events for mempool eviction. - Add FFI bindings, dashd integration tests, and wallet unit tests. --- dash-spv-ffi/FFI_API.md | 29 +- dash-spv-ffi/include/dash_spv_ffi.h | 24 + dash-spv-ffi/src/bin/ffi_cli.rs | 8 +- dash-spv-ffi/src/callbacks.rs | 9 + dash-spv-ffi/src/types.rs | 51 +- dash-spv-ffi/tests/dashd_sync/callbacks.rs | 9 +- dash-spv/ARCHITECTURE.md | 154 +- dash-spv/src/client/lifecycle.rs | 16 +- dash-spv/src/network/manager.rs | 3 + dash-spv/src/network/mod.rs | 16 + dash-spv/src/sync/blocks/manager.rs | 4 + dash-spv/src/sync/events.rs | 7 +- dash-spv/src/sync/identifier.rs | 3 + dash-spv/src/sync/mempool/filter.rs | 163 ++ dash-spv/src/sync/mempool/manager.rs | 1550 ++++++++++++++++++++ dash-spv/src/sync/mempool/mod.rs | 11 + dash-spv/src/sync/mempool/progress.rs | 171 +++ dash-spv/src/sync/mempool/sync_manager.rs | 616 ++++++++ dash-spv/src/sync/mod.rs | 3 + dash-spv/src/sync/progress.rs | 21 +- dash-spv/src/sync/sync_coordinator.rs | 10 +- dash-spv/src/sync/sync_manager.rs | 6 +- dash-spv/src/test_utils/node.rs | 106 +- dash-spv/src/types.rs | 5 +- dash-spv/tests/dashd_sync/helpers.rs | 174 ++- dash-spv/tests/dashd_sync/main.rs | 1 + dash-spv/tests/dashd_sync/setup.rs | 39 + dash-spv/tests/dashd_sync/tests_mempool.rs | 514 +++++++ key-wallet/src/manager/event_tests.rs | 255 ++++ key-wallet/src/manager/mod.rs | 28 +- key-wallet/src/manager/process_block.rs | 175 ++- key-wallet/src/manager/wallet_interface.rs | 31 +- key-wallet/src/test_utils/wallet.rs | 86 +- 33 files changed, 4252 insertions(+), 46 deletions(-) create mode 100644 dash-spv/src/sync/mempool/filter.rs create mode 100644 dash-spv/src/sync/mempool/manager.rs create mode 100644 dash-spv/src/sync/mempool/mod.rs create mode 100644 dash-spv/src/sync/mempool/progress.rs create mode 100644 dash-spv/src/sync/mempool/sync_manager.rs create mode 100644 dash-spv/tests/dashd_sync/tests_mempool.rs diff --git a/dash-spv-ffi/FFI_API.md b/dash-spv-ffi/FFI_API.md index 8f4606063..4b1d4db22 100644 --- a/dash-spv-ffi/FFI_API.md +++ b/dash-spv-ffi/FFI_API.md @@ -4,7 +4,7 @@ This document provides a comprehensive reference for all FFI (Foreign Function I **Auto-generated**: This documentation is automatically generated from the source code. Do not edit manually. -**Total Functions**: 49 +**Total Functions**: 50 ## Table of Contents @@ -13,6 +13,7 @@ This document provides a comprehensive reference for all FFI (Foreign Function I - [Synchronization](#synchronization) - [Wallet Operations](#wallet-operations) - [Transaction Management](#transaction-management) +- [Mempool Operations](#mempool-operations) - [Platform Integration](#platform-integration) - [Event Callbacks](#event-callbacks) - [Error Handling](#error-handling) @@ -82,6 +83,14 @@ Functions: 1 |----------|-------------|--------| | `dash_spv_ffi_client_broadcast_transaction` | Broadcasts a transaction to the Dash network via connected peers | client | +### Mempool Operations + +Functions: 1 + +| Function | Description | Module | +|----------|-------------|--------| +| `dash_spv_ffi_mempool_progress_destroy` | Destroy an `FFIMempoolProgress` object | types | + ### Platform Integration Functions: 2 @@ -558,6 +567,24 @@ Broadcasts a transaction to the Dash network via connected peers. # Safety - ` --- +### Mempool Operations - Detailed + +#### `dash_spv_ffi_mempool_progress_destroy` + +```c +dash_spv_ffi_mempool_progress_destroy(progress: *mut FFIMempoolProgress) -> () +``` + +**Description:** +Destroy an `FFIMempoolProgress` object. # Safety - `progress` must be a pointer returned from this crate, or null. + +**Safety:** +- `progress` must be a pointer returned from this crate, or null. + +**Module:** `types` + +--- + ### Platform Integration - Detailed #### `ffi_dash_spv_get_platform_activation_height` diff --git a/dash-spv-ffi/include/dash_spv_ffi.h b/dash-spv-ffi/include/dash_spv_ffi.h index c47776cdd..d032f829c 100644 --- a/dash-spv-ffi/include/dash_spv_ffi.h +++ b/dash-spv-ffi/include/dash_spv_ffi.h @@ -38,6 +38,7 @@ typedef enum FFIManagerId { Masternodes = 4, ChainLocks = 5, InstantSend = 6, + Mempool = 7, } FFIManagerId; typedef enum FFIMempoolStrategy { @@ -144,6 +145,18 @@ typedef struct FFIInstantSendProgress { uint64_t last_activity; } FFIInstantSendProgress; +/** + * Progress for mempool transaction monitoring. + */ +typedef struct FFIMempoolProgress { + enum FFISyncState state; + uint32_t received; + uint32_t relevant; + uint32_t tracked; + uint32_t removed; + uint64_t last_activity; +} FFIMempoolProgress; + /** * Aggregate progress for all sync managers. * Provides a complete view of the parallel sync system's state. @@ -162,6 +175,7 @@ typedef struct FFISyncProgress { struct FFIMasternodesProgress *masternodes; struct FFIChainLockProgress *chainlocks; struct FFIInstantSendProgress *instantsend; + struct FFIMempoolProgress *mempool; } FFISyncProgress; /** @@ -249,6 +263,8 @@ typedef void (*OnBlocksNeededCallback)(const struct FFIBlockNeeded *blocks, typedef void (*OnBlockProcessedCallback)(uint32_t height, const uint8_t (*hash)[32], uint32_t new_address_count, + const uint8_t (*confirmed_txids)[32], + uint32_t confirmed_txid_count, void *user_data); /** @@ -976,6 +992,14 @@ struct FFIResult ffi_dash_spv_get_platform_activation_height(struct FFIDashSpvCl */ void dash_spv_ffi_instantsend_progress_destroy(struct FFIInstantSendProgress *progress) ; +/** + * Destroy an `FFIMempoolProgress` object. + * + * # Safety + * - `progress` must be a pointer returned from this crate, or null. + */ + void dash_spv_ffi_mempool_progress_destroy(struct FFIMempoolProgress *progress) ; + /** * Destroy an `FFISyncProgress` object and all its nested pointers. * diff --git a/dash-spv-ffi/src/bin/ffi_cli.rs b/dash-spv-ffi/src/bin/ffi_cli.rs index 6d56ae4e5..d20eeeeb2 100644 --- a/dash-spv-ffi/src/bin/ffi_cli.rs +++ b/dash-spv-ffi/src/bin/ffi_cli.rs @@ -29,6 +29,7 @@ extern "C" fn on_sync_start(manager_id: FFIManagerId, _user_data: *mut c_void) { FFIManagerId::Masternodes => "Masternodes", FFIManagerId::ChainLocks => "ChainLocks", FFIManagerId::InstantSend => "InstantSend", + FFIManagerId::Mempool => "Mempool", }; println!("[Sync] Manager started: {}", manager_name); } @@ -76,9 +77,14 @@ extern "C" fn on_block_processed( height: u32, _hash: *const [u8; 32], new_address_count: u32, + _confirmed_txids: *const [u8; 32], + confirmed_txid_count: u32, _user_data: *mut c_void, ) { - println!("[Sync] Block processed: height={}, new_addresses={}", height, new_address_count); + println!( + "[Sync] Block processed: height={}, new_addresses={}, confirmed_txs={}", + height, new_address_count, confirmed_txid_count + ); } extern "C" fn on_masternode_state_updated(height: u32, _user_data: *mut c_void) { diff --git a/dash-spv-ffi/src/callbacks.rs b/dash-spv-ffi/src/callbacks.rs index c30a4e725..721909709 100644 --- a/dash-spv-ffi/src/callbacks.rs +++ b/dash-spv-ffi/src/callbacks.rs @@ -28,6 +28,7 @@ pub enum FFIManagerId { Masternodes = 4, ChainLocks = 5, InstantSend = 6, + Mempool = 7, } impl From for FFIManagerId { @@ -40,6 +41,7 @@ impl From for FFIManagerId { dash_spv::sync::ManagerIdentifier::Masternode => FFIManagerId::Masternodes, dash_spv::sync::ManagerIdentifier::ChainLock => FFIManagerId::ChainLocks, dash_spv::sync::ManagerIdentifier::InstantSend => FFIManagerId::InstantSend, + dash_spv::sync::ManagerIdentifier::Mempool => FFIManagerId::Mempool, } } } @@ -163,6 +165,8 @@ pub type OnBlockProcessedCallback = Option< height: u32, hash: *const [u8; 32], new_address_count: u32, + confirmed_txids: *const [u8; 32], + confirmed_txid_count: u32, user_data: *mut c_void, ), >; @@ -351,13 +355,18 @@ impl FFISyncEventCallbacks { block_hash, height, new_addresses, + confirmed_txids, } => { if let Some(cb) = self.on_block_processed { let hash_bytes = block_hash.as_byte_array(); + let txid_bytes: Vec<[u8; 32]> = + confirmed_txids.iter().map(|txid| *txid.as_byte_array()).collect(); cb( *height, hash_bytes as *const [u8; 32], new_addresses.len() as u32, + txid_bytes.as_ptr(), + txid_bytes.len() as u32, self.user_data, ); } diff --git a/dash-spv-ffi/src/types.rs b/dash-spv-ffi/src/types.rs index e53a2a9b1..2fb791637 100644 --- a/dash-spv-ffi/src/types.rs +++ b/dash-spv-ffi/src/types.rs @@ -1,8 +1,8 @@ use dash_spv::client::config::MempoolStrategy; use dash_spv::sync::{ BlockHeadersProgress, BlocksProgress, ChainLockProgress, FilterHeadersProgress, - FiltersProgress, InstantSendProgress, MasternodesProgress, ProgressPercentage, SyncProgress, - SyncState, + FiltersProgress, InstantSendProgress, MasternodesProgress, MempoolProgress, ProgressPercentage, + SyncProgress, SyncState, }; use dash_spv::types::MempoolRemovalReason; use std::ffi::{CStr, CString}; @@ -259,6 +259,31 @@ impl From<&InstantSendProgress> for FFIInstantSendProgress { } } +/// Progress for mempool transaction monitoring. +#[repr(C)] +#[derive(Debug, Clone, Default)] +pub struct FFIMempoolProgress { + pub state: FFISyncState, + pub received: u32, + pub relevant: u32, + pub tracked: u32, + pub removed: u32, + pub last_activity: u64, +} + +impl From<&MempoolProgress> for FFIMempoolProgress { + fn from(progress: &MempoolProgress) -> Self { + FFIMempoolProgress { + state: progress.state().into(), + received: progress.received(), + relevant: progress.relevant(), + tracked: progress.tracked(), + removed: progress.removed(), + last_activity: progress.last_activity().elapsed().as_secs(), + } + } +} + /// Aggregate progress for all sync managers. /// Provides a complete view of the parallel sync system's state. #[repr(C)] @@ -274,6 +299,7 @@ pub struct FFISyncProgress { pub masternodes: *mut FFIMasternodesProgress, pub chainlocks: *mut FFIChainLockProgress, pub instantsend: *mut FFIInstantSendProgress, + pub mempool: *mut FFIMempoolProgress, } impl From for FFISyncProgress { @@ -320,6 +346,12 @@ impl From for FFISyncProgress { .map(|p| Box::into_raw(Box::new(FFIInstantSendProgress::from(p)))) .unwrap_or(std::ptr::null_mut()); + let mempool = progress + .mempool() + .ok() + .map(|p| Box::into_raw(Box::new(FFIMempoolProgress::from(p)))) + .unwrap_or(std::ptr::null_mut()); + Self { state: progress.state().into(), percentage: progress.percentage(), @@ -331,6 +363,7 @@ impl From for FFISyncProgress { masternodes, chainlocks, instantsend, + mempool, } } } @@ -486,6 +519,17 @@ pub unsafe extern "C" fn dash_spv_ffi_instantsend_progress_destroy( } } +/// Destroy an `FFIMempoolProgress` object. +/// +/// # Safety +/// - `progress` must be a pointer returned from this crate, or null. +#[no_mangle] +pub unsafe extern "C" fn dash_spv_ffi_mempool_progress_destroy(progress: *mut FFIMempoolProgress) { + if !progress.is_null() { + let _ = Box::from_raw(progress); + } +} + /// Destroy an `FFISyncProgress` object and all its nested pointers. /// /// # Safety @@ -517,5 +561,8 @@ pub unsafe extern "C" fn dash_spv_ffi_sync_progress_destroy(progress: *mut FFISy if !p.instantsend.is_null() { dash_spv_ffi_instantsend_progress_destroy(p.instantsend); } + if !p.mempool.is_null() { + dash_spv_ffi_mempool_progress_destroy(p.mempool); + } } } diff --git a/dash-spv-ffi/tests/dashd_sync/callbacks.rs b/dash-spv-ffi/tests/dashd_sync/callbacks.rs index 20ca342ff..56a8125b0 100644 --- a/dash-spv-ffi/tests/dashd_sync/callbacks.rs +++ b/dash-spv-ffi/tests/dashd_sync/callbacks.rs @@ -233,6 +233,8 @@ extern "C" fn on_block_processed( height: u32, _hash: *const [u8; 32], new_address_count: u32, + _confirmed_txids: *const [u8; 32], + confirmed_txid_count: u32, user_data: *mut c_void, ) { let Some(tracker) = (unsafe { tracker_from(user_data) }) else { @@ -240,7 +242,12 @@ extern "C" fn on_block_processed( }; tracker.processed_block_heights.lock().unwrap_or_else(|e| e.into_inner()).push(height); tracker.block_processed_count.fetch_add(1, Ordering::SeqCst); - tracing::debug!("on_block_processed: height={}, new_addresses={}", height, new_address_count); + tracing::debug!( + "on_block_processed: height={}, new_addresses={}, confirmed_txs={}", + height, + new_address_count, + confirmed_txid_count + ); } extern "C" fn on_masternode_state_updated(height: u32, user_data: *mut c_void) { diff --git a/dash-spv/ARCHITECTURE.md b/dash-spv/ARCHITECTURE.md index 6fbd17cd2..9ec363165 100644 --- a/dash-spv/ARCHITECTURE.md +++ b/dash-spv/ARCHITECTURE.md @@ -25,7 +25,7 @@ ### Current State: Production-Ready Structure ✅ **Code Organization: EXCELLENT (A+)** -- ✅ Parallel event-driven sync architecture with 7 independent managers +- ✅ Parallel event-driven sync architecture with 8 independent managers - ✅ SyncManager trait with standard event loop pattern - ✅ SyncEvent broadcast channel for inter-manager communication - ✅ client/: 8 modules (2,895 lines) @@ -59,7 +59,7 @@ |----------|-------|-------| | Total Files | 110+ | Well-organized module structure | | Total Lines | ~40,000 | All files appropriately sized | -| Sync Managers | 7 | Block headers, filter headers, filters, blocks, masternodes, chainlock, instantsend | +| Sync Managers | 8 | Block headers, filter headers, filters, blocks, masternodes, chainlock, instantsend, mempool | | Largest File | network/manager.rs | 1,322 lines - Acceptable complexity | | Module Count | 10+ | Well-separated concerns | @@ -91,13 +91,13 @@ │ ▼ ┌─────────────────────────────────────────────────────────┐ - │ Parallel Sync Managers (7) │ + │ Parallel Sync Managers (8) │ ├──────────────┬──────────────┬──────────────┬────────────┤ │ BlockHeaders │ FilterHeaders│ Filters │ Blocks │ │ Manager │ Manager │ Manager │ Manager │ ├──────────────┼──────────────┼──────────────┼────────────┤ - │ Masternodes │ ChainLock │ InstantSend │ │ - │ Manager │ Manager │ Manager │ │ + │ Masternodes │ ChainLock │ InstantSend │ Mempool │ + │ Manager │ Manager │ Manager │ Manager │ └──────────────┴──────────────┴──────────────┴────────────┘ │ ▼ @@ -141,8 +141,12 @@ │ FiltersManager ──BlocksNeeded──> BlocksManager │ │ │ │ BlocksManager ──BlockProcessed──> FiltersManager (for gap limit rescan) │ +│ ──BlockProcessed──> MempoolManager (confirmed tx removal)│ │ │ -│ SyncCoordinator ──SyncComplete──> External listeners │ +│ InstantSendManager ──InstantLockReceived──> MempoolManager │ +│ │ +│ SyncCoordinator ──SyncComplete──> MempoolManager (activation trigger) │ +│ ──SyncComplete──> External listeners │ └──────────────────────────────────────────────────────────────────────────┘ │ │ Progress (watch channels) @@ -976,7 +980,7 @@ storage/disk/ #### Overview -The sync module uses a parallel, event-driven architecture where 7 independent managers run concurrently in their own tokio tasks, communicating via a broadcast event channel. +The sync module uses a parallel, event-driven architecture where 8 independent managers run concurrently in their own tokio tasks, communicating via a broadcast event channel. #### Architecture Summary @@ -988,7 +992,8 @@ SyncCoordinator ├── BlocksManager - Downloads matched blocks, processes through wallet ├── MasternodesManager - Synchronizes masternode list via QRInfo/MnListDiff ├── ChainLockManager - Receives and validates ChainLocks -└── InstantSendManager - Receives and validates InstantLocks +├── InstantSendManager - Receives and validates InstantLocks +└── MempoolManager - Tracks unconfirmed wallet transactions via BIP37 or full-fetch ``` #### Core Components @@ -1041,10 +1046,10 @@ The trait provides a default `run()` implementation with the standard event loop | `FilterHeadersStored` | FilterHeadersManager | FiltersManager | | `FiltersSyncComplete` | FiltersManager | BlocksManager | | `BlocksNeeded` | FiltersManager | BlocksManager | -| `BlockProcessed` | BlocksManager | FiltersManager (gap limit rescan) | +| `BlockProcessed` | BlocksManager | FiltersManager (gap limit rescan), MempoolManager (confirmed tx removal) | | `ChainLockReceived` | ChainLockManager | External listeners | -| `InstantLockReceived` | InstantSendManager | External listeners | -| `SyncComplete` | Coordinator | External listeners | +| `InstantLockReceived` | InstantSendManager | MempoolManager (IS lock association) | +| `SyncComplete` | Coordinator | MempoolManager (activation trigger), External listeners | ##### `src/sync/progress.rs` - Aggregate Progress @@ -1111,6 +1116,132 @@ sync// - Validates signatures - Emits `InstantLockReceived` events +##### `src/sync/mempool/` - Mempool Transaction Tracking + +Tracks unconfirmed transactions relevant to the wallet in real time after chain sync completes. Unlike other managers that participate in the initial sync pipeline, the mempool manager is purely post-sync: it activates only after `SyncComplete` and runs continuously until shutdown. + +**Module structure:** +```text +sync/mempool/ +├── mod.rs - Module exports, bloom filter false-positive rate constant +├── manager.rs - Core state machine and transaction processing +├── sync_manager.rs - SyncManager trait implementation (event routing, tick logic) +├── bloom.rs - BIP37 bloom filter construction from wallet addresses/outpoints +└── progress.rs - Progress tracking (received, relevant, tracked, removed) +``` + +**Multi-peer activation:** + +The manager activates mempool relay on all connected peers simultaneously. When `SyncComplete` arrives, `activate_all_peers()` enables relay on every peer that has completed handshake. Peers connecting after activation are activated immediately if the manager is already in `Synced` state. + +Since the client connects with `relay=false`, peers won't send transaction INVs until explicitly enabled. Two strategies control how relay is enabled: + +- **BloomFilter**: Sends a BIP37 bloom filter containing wallet address hashes (P2PKH/P2SH hash160) and UTXO outpoints via `filterload` (which implicitly enables filtered relay), then `mempool`. The peer filters INV messages server-side, reducing bandwidth. The filter is rebuilt on all activated peers when new addresses are discovered during block processing. +- **FetchAll**: Sends `filterclear` (which enables unfiltered relay), then `mempool`. The manager checks wallet relevance locally. Higher bandwidth but no address leakage to peers. + +**Transaction processing pipeline:** + +```text +Peer INV(tx) + │ + ▼ +handle_inv() + ├─ Skip if: in seen_txids (180s dedup window), pending, queued, or in mempool state + ├─ Skip if: at capacity (max_transactions) + └─ Enqueue to announcing peer's queue + │ + ▼ +send_queued() (up to 100 in-flight getdata requests) + │ + ▼ +Peer TX + │ + ▼ +handle_tx() + ├─ Add txid to seen_txids (prevents re-download from other peers) + ├─ Check for pre-arrived InstantSend lock in pending_is_locks + ├─ wallet.process_mempool_transaction(tx, is_locked) + │ ├─ Not relevant → discard + │ └─ Relevant → store in MempoolState + │ ├─ Wallet emits BalanceUpdated event + │ └─ New addresses discovered → flag filter rebuild + └─ Return MempoolTransactionResult { is_relevant, net_amount, is_outgoing, addresses, new_addresses } +``` + +The `seen_txids` map provides a 180-second deduplication window to handle the case where multiple peers respond to the initial `mempool` request with overlapping INVs. + +**Events consumed:** + +| Event | Action | +|-------|--------| +| `SyncComplete` | Activate mempool relay on all connected peers (transitions to `Synced`) | +| `BlockProcessed` | Remove confirmed txids from mempool state; immediately rebuild bloom filter if new addresses | +| `InstantLockReceived` | Mark transaction as IS-locked, or store in pending_is_locks if TX not yet received | +| `PeerConnected` | Activate on new peer immediately if already synced | +| `PeerDisconnected` | Remove peer; redistribute its queued txids to a random activated peer | +| `PeersUpdated(0)` | All peers lost: call `stop_sync()`, transition to `WaitingForConnections` | + +**InstantSend lock handling:** + +IS locks can arrive before or after their corresponding transaction. Both orderings are handled: +- Lock after TX: set `is_instant_send` flag on stored transaction, notify wallet via `process_instant_send_lock` +- Lock before TX: store lock in `pending_is_locks` map; when the TX arrives via `handle_tx()`, it is processed with the IS flag already set + +Pending IS locks are pruned after 24 hours alongside expired transactions. + +**Bloom filter lifecycle:** + +Rebuilds happen immediately when the wallet state changes: +- On `handle_tx()` when a wallet-relevant transaction is received (new UTXOs, spent inputs, potentially new addresses from gap limit maintenance) +- On `BlockProcessed` with confirmed txids or new addresses, if the sync state is `Synced` (during initial sync, filter rebuilds are deferred until sync completes) + +The rebuild sequence on each activated peer is: `filterclear` → `filterload` (with updated wallet data) → `mempool` (re-request inventory with the new filter). + +**Periodic maintenance (tick):** + +| Action | Trigger | +|--------|---------| +| Prune expired transactions | Transactions older than 24 hours | +| Requeue timed-out requests | Getdata requests unanswered for 120s | +| Drain queued txids | Send getdata up to 100 in-flight limit | + +**Peer failover:** + +Each peer has its own txid queue (`None` = connected but inactive, `Some(VecDeque)` = activated). On disconnect: +- Peer with queued txids: redistribute to a random activated peer +- No activated peers remaining: queued items dropped with warning +- All peers lost (`PeersUpdated` with count 0): manager transitions to `WaitingForConnections`, then re-activates via `start_sync()` when peers return + +**Wallet integration:** + +The `WalletInterface` trait provides four methods for mempool support: + +| Method | Purpose | +|--------|---------| +| `process_mempool_transaction(tx, is_instant_send)` | Check relevance across all accounts, return net amount and new addresses | +| `monitored_addresses()` | All watched addresses for bloom filter construction | +| `watched_outpoints()` | All owned UTXOs for bloom filter spend detection | +| `process_instant_send_lock(txid)` | Mark UTXOs as IS-locked, transition balance to spendable | + +**Balance semantics:** + +`MempoolState` tracks two pending balance categories: +- `pending_balance`: regular unconfirmed transactions +- `pending_instant_balance`: IS-locked transactions (immediately spendable) + +The wallet emits `BalanceUpdated` events only when balance actually changes, with four categories: spendable, unconfirmed, immature, locked. + +**Capacity and limits:** + +| Parameter | Value | Purpose | +|-----------|-------|---------| +| `max_mempool_transactions` | configurable (default 1000) | Cap on tracked transactions | +| `MAX_IN_FLIGHT` | 100 | Max concurrent getdata requests | +| `MEMPOOL_TX_EXPIRY` | 24 hours | Auto-prune for unconfirmed transactions | +| `PENDING_REQUEST_TIMEOUT` | 120 seconds | Requeue unanswered getdata | +| `SEEN_TXID_EXPIRY` | 180 seconds | Dedup window for multi-peer INV overlap | +| `BLOOM_FALSE_POSITIVE_RATE` | 0.0005 (0.05%) | BIP37 filter false-positive rate | + #### Design Strengths - **True parallelism**: Headers, filters, and masternodes sync concurrently @@ -1134,3 +1265,4 @@ sync// | MasternodesManager | sync/masternodes/ | manager.rs, pipeline.rs, sync_manager.rs | Masternode list via QRInfo/MnListDiff | | ChainLockManager | sync/chainlock/ | manager.rs, sync_manager.rs | ChainLock message handling | | InstantSendManager | sync/instantsend/ | manager.rs, sync_manager.rs | InstantLock message handling | +| MempoolManager | sync/mempool/ | manager.rs, sync_manager.rs, bloom.rs, progress.rs | Post-sync mempool transaction tracking via BIP37 or full-fetch | diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index dceea1caf..c96480b47 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -23,7 +23,7 @@ use crate::storage::{ }; use crate::sync::{ BlockHeadersManager, BlocksManager, ChainLockManager, FilterHeadersManager, FiltersManager, - InstantSendManager, Managers, MasternodesManager, SyncCoordinator, + InstantSendManager, Managers, MasternodesManager, MempoolManager, SyncCoordinator, }; use crate::types::MempoolState; use dashcore::sml::masternode_list_engine::MasternodeListEngine; @@ -122,10 +122,18 @@ impl DashSpvClient let storage = Arc::new(Mutex::new(storage)); diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index d4369efdc..01d7f4656 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -992,6 +992,9 @@ impl PeerNetworkManager { } let preferred_service = match &message { + NetworkMessage::FilterLoad(_) + | NetworkMessage::FilterClear + | NetworkMessage::MemPool => Some((ServiceFlags::BLOOM, true)), NetworkMessage::GetCFHeaders(_) | NetworkMessage::GetCFilters(_) => { Some((ServiceFlags::COMPACT_FILTERS, true)) } diff --git a/dash-spv/src/network/mod.rs b/dash-spv/src/network/mod.rs index 35695cdaa..78ced64dc 100644 --- a/dash-spv/src/network/mod.rs +++ b/dash-spv/src/network/mod.rs @@ -24,6 +24,7 @@ use crate::error::NetworkResult; use crate::NetworkError; use dashcore::network::message::NetworkMessage; use dashcore::network::message_blockdata::{GetHeadersMessage, Inventory}; +use dashcore::network::message_bloom::FilterLoad; use dashcore::network::message_filter::{GetCFHeaders, GetCFilters}; use dashcore::network::message_qrinfo::GetQRInfo; use dashcore::network::message_sml::GetMnListDiff; @@ -145,6 +146,21 @@ impl RequestSender { hashes.into_iter().map(Inventory::Block).collect(), )) } + + /// Send a filterload message to a specific peer. + pub fn send_filter_load(&self, filter_load: FilterLoad, peer: SocketAddr) -> NetworkResult<()> { + self.send_message_to_peer(NetworkMessage::FilterLoad(filter_load), peer) + } + + /// Send a filterclear message to a specific peer. + pub fn send_filter_clear(&self, peer: SocketAddr) -> NetworkResult<()> { + self.send_message_to_peer(NetworkMessage::FilterClear, peer) + } + + /// Send a mempool message to request inventory from a specific peer. + pub fn request_mempool(&self, peer: SocketAddr) -> NetworkResult<()> { + self.send_message_to_peer(NetworkMessage::MemPool, peer) + } } /// Network manager trait for abstracting network operations. diff --git a/dash-spv/src/sync/blocks/manager.rs b/dash-spv/src/sync/blocks/manager.rs index 585e346de..bf289057b 100644 --- a/dash-spv/src/sync/blocks/manager.rs +++ b/dash-spv/src/sync/blocks/manager.rs @@ -100,6 +100,9 @@ impl BlocksManager = result.relevant_txids().cloned().collect(); + // Collect new addresses for gap limit rescanning let new_addresses: Vec<_> = result.new_addresses.into_iter().collect(); if !new_addresses.is_empty() { @@ -122,6 +125,7 @@ impl BlocksManager, + /// Transaction IDs confirmed in this block that are relevant to the wallet + confirmed_txids: Vec, }, /// Masternode state updated to a new height. diff --git a/dash-spv/src/sync/identifier.rs b/dash-spv/src/sync/identifier.rs index a09e3e8fd..048355073 100644 --- a/dash-spv/src/sync/identifier.rs +++ b/dash-spv/src/sync/identifier.rs @@ -10,6 +10,7 @@ pub enum ManagerIdentifier { Masternode, ChainLock, InstantSend, + Mempool, } impl Display for ManagerIdentifier { @@ -22,6 +23,7 @@ impl Display for ManagerIdentifier { ManagerIdentifier::Masternode => write!(f, "Masternode"), ManagerIdentifier::ChainLock => write!(f, "ChainLock"), ManagerIdentifier::InstantSend => write!(f, "InstantSend"), + ManagerIdentifier::Mempool => write!(f, "Mempool"), } } } @@ -39,5 +41,6 @@ mod tests { assert_eq!(ManagerIdentifier::Masternode.to_string(), "Masternode"); assert_eq!(ManagerIdentifier::ChainLock.to_string(), "ChainLock"); assert_eq!(ManagerIdentifier::InstantSend.to_string(), "InstantSend"); + assert_eq!(ManagerIdentifier::Mempool.to_string(), "Mempool"); } } diff --git a/dash-spv/src/sync/mempool/filter.rs b/dash-spv/src/sync/mempool/filter.rs new file mode 100644 index 000000000..c1b3e0229 --- /dev/null +++ b/dash-spv/src/sync/mempool/filter.rs @@ -0,0 +1,163 @@ +//! Bloom filter builder for wallet addresses and outpoints. +//! +//! Builds BIP37 bloom filters from wallet data for peer-side transaction filtering. + +use dashcore::address::Payload; +use dashcore::bloom::BloomFilter; +use dashcore::consensus::Encodable; +use dashcore::network::message_bloom::{BloomFlags, FilterLoad}; +use dashcore::{Address, OutPoint}; + +use crate::error::{SyncError, SyncResult}; + +/// Extract the raw hash payload bytes from an address for bloom filter insertion. +fn address_payload_bytes(addr: &Address) -> Option> { + match addr.payload() { + Payload::PubkeyHash(hash) => Some(<[u8; 20]>::from(*hash).to_vec()), + Payload::ScriptHash(hash) => Some(<[u8; 20]>::from(*hash).to_vec()), + _ => { + tracing::warn!("skipping unknown address type for bloom filter: {:?}", addr); + None + } + } +} + +/// Build a bloom filter from wallet addresses and outpoints. +/// +/// Addresses are inserted as their raw hash payload bytes (20-byte hash160 +/// for P2PKH/P2SH). This matches what Dash Core's `CheckScript` extracts as +/// data pushes from scriptPubKeys. +/// +/// Outpoints are inserted as consensus-serialized bytes (`txid || vout_le`) +/// to detect transactions spending our UTXOs. +pub(super) fn build_wallet_bloom_filter( + addresses: &[Address], + outpoints: &[OutPoint], + false_positive_rate: f64, + tweak: u32, +) -> SyncResult { + let element_count = addresses.len() + outpoints.len(); + if element_count == 0 { + let filter = BloomFilter::new(1, false_positive_rate, tweak, BloomFlags::All) + .map_err(|e| SyncError::Validation(e.to_string()))?; + return Ok(FilterLoad::from_bloom_filter(&filter)); + } + + let mut filter = + BloomFilter::new(element_count as u32, false_positive_rate, tweak, BloomFlags::All) + .map_err(|e| SyncError::Validation(e.to_string()))?; + + for addr in addresses { + if let Some(payload) = address_payload_bytes(addr) { + filter.insert(&payload); + } + } + + for outpoint in outpoints { + let mut buf = Vec::new(); + outpoint.consensus_encode(&mut buf).map_err(|e| SyncError::Validation(e.to_string()))?; + filter.insert(&buf); + } + + Ok(FilterLoad::from_bloom_filter(&filter)) +} + +#[cfg(test)] +mod tests { + use std::slice; + + use super::*; + use crate::sync::mempool::BLOOM_FALSE_POSITIVE_RATE; + use dashcore::hashes::Hash; + use dashcore::{Network, Txid}; + + fn test_addr(seed: usize) -> Address { + Address::dummy(Network::Testnet, seed) + } + + fn test_outpoint(seed: u8, vout: u32) -> OutPoint { + OutPoint { + txid: Txid::from_byte_array([seed; 32]), + vout, + } + } + + fn outpoint_bytes(outpoint: &OutPoint) -> Vec { + let mut buf = Vec::new(); + outpoint.consensus_encode(&mut buf).unwrap(); + buf + } + + fn build_filter(addrs: &[Address], outpoints: &[OutPoint]) -> FilterLoad { + build_wallet_bloom_filter(addrs, outpoints, BLOOM_FALSE_POSITIVE_RATE, 0).unwrap() + } + + #[test] + fn test_address_membership() { + let addr = test_addr(0); + let other = test_addr(1); + let filter = build_filter(slice::from_ref(&addr), &[]).to_bloom_filter().unwrap(); + + assert!(filter.contains(&address_payload_bytes(&addr).unwrap())); + assert!(!filter.contains(&address_payload_bytes(&other).unwrap())); + } + + #[test] + fn test_outpoint_membership() { + let outpoint = test_outpoint(1, 0); + let filter = build_filter(&[], &[outpoint]).to_bloom_filter().unwrap(); + + assert!(filter.contains(&outpoint_bytes(&outpoint))); + } + + #[test] + fn test_empty_inputs() { + let filter = build_filter(&[], &[]).to_bloom_filter().unwrap(); + assert!(!filter.contains(&[1, 2, 3])); + } + + fn test_p2sh_addr(seed: u8) -> Address { + // Build OP_HASH160 <20-byte-hash> OP_EQUAL script, then wrap as P2SH + let redeem_script = dashcore::ScriptBuf::from(vec![seed; 20]); + Address::p2sh(&redeem_script, Network::Testnet).unwrap() + } + + #[test] + fn test_p2sh_address_membership() { + let addr = test_p2sh_addr(0x42); + let other = test_p2sh_addr(0x43); + let filter = build_filter(slice::from_ref(&addr), &[]).to_bloom_filter().unwrap(); + + assert!(filter.contains(&address_payload_bytes(&addr).unwrap())); + assert!(!filter.contains(&address_payload_bytes(&other).unwrap())); + } + + #[test] + fn test_combined_addresses_and_outpoints() { + let addr1 = test_addr(0); + let addr2 = test_p2sh_addr(0x10); + let op1 = test_outpoint(1, 0); + let op2 = test_outpoint(2, 1); + + let filter = + build_filter(&[addr1.clone(), addr2.clone()], &[op1, op2]).to_bloom_filter().unwrap(); + + assert!(filter.contains(&address_payload_bytes(&addr1).unwrap())); + assert!(filter.contains(&address_payload_bytes(&addr2).unwrap())); + assert!(filter.contains(&outpoint_bytes(&op1))); + assert!(filter.contains(&outpoint_bytes(&op2))); + + // Random data should not match + assert!(!filter.contains(&[0xff; 20])); + } + + #[test] + fn test_rejects_invalid_fp_rates() { + let addr = test_addr(0); + let addrs = slice::from_ref(&addr); + + for rate in [0.0, -0.5, 1.0, 1.5] { + assert!(build_wallet_bloom_filter(addrs, &[], rate, 0).is_err()); + } + } +} diff --git a/dash-spv/src/sync/mempool/manager.rs b/dash-spv/src/sync/mempool/manager.rs new file mode 100644 index 000000000..acc89f600 --- /dev/null +++ b/dash-spv/src/sync/mempool/manager.rs @@ -0,0 +1,1550 @@ +//! Mempool manager for monitoring unconfirmed transactions. +//! +//! Activates after initial sync is complete and uses either BIP37 bloom +//! filters or local address matching to identify wallet-relevant +//! transactions from the mempool. + +use std::collections::{HashMap, VecDeque}; +use std::fmt; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use dashcore::network::message_blockdata::Inventory; +use dashcore::{Amount, Transaction, Txid}; +use rand::seq::IteratorRandom; +use tokio::sync::RwLock; + +use super::filter::build_wallet_bloom_filter; +use super::BLOOM_FALSE_POSITIVE_RATE; +use crate::client::config::MempoolStrategy; +use crate::error::SyncResult; +use crate::network::RequestSender; +use crate::sync::mempool::MempoolProgress; +use crate::sync::SyncEvent; +use crate::types::{MempoolState, UnconfirmedTransaction}; +use key_wallet::manager::WalletInterface; + +/// Timeout for pruning expired mempool transactions (24 hours). +pub(super) const MEMPOOL_TX_EXPIRY: Duration = Duration::from_secs(24 * 3600); + +/// Timeout for pending getdata requests that never received a response. +const PENDING_REQUEST_TIMEOUT: Duration = Duration::from_secs(120); + +/// Maximum number of in-flight getdata requests. +const MAX_IN_FLIGHT: usize = 100; + +/// Maximum number of pending IS locks awaiting their corresponding transaction. +const MAX_PENDING_IS_LOCKS: usize = 1000; + +/// How long a downloaded txid stays in the dedup map. +/// Covers the window where multiple peers respond to the initial `mempool` request. +const SEEN_TXID_EXPIRY: Duration = Duration::from_secs(180); + +/// Mempool manager that monitors unconfirmed transactions from the P2P network. +/// +/// Tracks connected peers via a unified map where: +/// - `None` = peer is connected but not yet activated (before sync completes) +/// - `Some(VecDeque)` = peer is activated (relay enabled), queue holds pending getdata txids +pub(crate) struct MempoolManager { + pub(super) progress: MempoolProgress, + pub(super) wallet: Arc>, + pub(super) mempool_state: Arc>, + strategy: MempoolStrategy, + max_transactions: usize, + /// Txids we have requested via getdata but not yet received, with request time. + pending_requests: HashMap, + /// Connected peers and their activation state. + pub(super) peers: HashMap>>, + /// IS lock txids that arrived before their corresponding transaction, with insertion time. + pending_is_locks: HashMap, + /// Txids already downloaded, with download timestamp. + /// Prevents duplicate downloads when multiple peers announce the same transactions. + /// Entries expire after `SEEN_TXID_EXPIRY`. + seen_txids: HashMap, +} + +impl MempoolManager { + /// Creates a new mempool manager with the given wallet, shared mempool state, + /// bloom filter strategy, and transaction capacity limit. + pub(crate) fn new( + wallet: Arc>, + mempool_state: Arc>, + strategy: MempoolStrategy, + max_transactions: usize, + ) -> Self { + Self { + progress: MempoolProgress::default(), + wallet, + mempool_state, + strategy, + max_transactions, + pending_requests: HashMap::new(), + peers: HashMap::new(), + pending_is_locks: HashMap::new(), + seen_txids: HashMap::new(), + } + } + + /// Activate mempool monitoring on a single peer. + /// + /// Since we connect with `relay=false`, peers won't send transaction INVs + /// until we explicitly enable relay: + /// - BloomFilter strategy: sends `filterload` (which enables filtered relay) + `mempool` + /// - FetchAll strategy: sends `filterclear` (which enables unfiltered relay) + `mempool` + pub(super) async fn activate_peer( + &mut self, + peer: SocketAddr, + requests: &RequestSender, + ) -> SyncResult<()> { + tracing::info!("Activating mempool on peer {} (strategy: {:?})", peer, self.strategy); + + match self.strategy { + MempoolStrategy::BloomFilter => { + self.load_bloom_filter(peer, requests).await?; + } + MempoolStrategy::FetchAll => { + requests.send_filter_clear(peer)?; + } + } + requests.request_mempool(peer)?; + + self.peers.insert(peer, Some(VecDeque::new())); + Ok(()) + } + + /// Activate mempool relay on all connected but not-yet-activated peers. + pub(super) async fn activate_all_peers(&mut self, requests: &RequestSender) -> SyncResult<()> { + let inactive: Vec = + self.peers.iter().filter(|(_, v)| v.is_none()).map(|(k, _)| *k).collect(); + for peer in inactive { + self.activate_peer(peer, requests).await?; + } + Ok(()) + } + + /// Build and send a bloom filter to the mempool peer. + async fn load_bloom_filter( + &mut self, + peer: SocketAddr, + requests: &RequestSender, + ) -> SyncResult<()> { + let wallet = self.wallet.read().await; + let addresses = wallet.monitored_addresses(); + let outpoints = wallet.watched_outpoints(); + drop(wallet); + + if addresses.is_empty() && outpoints.is_empty() { + tracing::debug!("No addresses or outpoints to build bloom filter from"); + return Ok(()); + } + + let filter_load = build_wallet_bloom_filter( + &addresses, + &outpoints, + BLOOM_FALSE_POSITIVE_RATE, + rand::random(), + )?; + + tracing::info!( + "Built bloom filter with {} addresses and {} outpoints (fp_rate={}, size={}B)", + addresses.len(), + outpoints.len(), + BLOOM_FALSE_POSITIVE_RATE, + filter_load.filter.len() + ); + + requests.send_filter_load(filter_load, peer)?; + + Ok(()) + } + + /// Rebuild the bloom filter on all activated peers. + pub(super) async fn rebuild_filter(&mut self, requests: &RequestSender) -> SyncResult<()> { + if self.strategy != MempoolStrategy::BloomFilter { + return Ok(()); + } + + let activated: Vec = + self.peers.iter().filter(|(_, v)| v.is_some()).map(|(k, _)| *k).collect(); + + if activated.is_empty() { + return Ok(()); + } + + for peer in activated { + requests.send_filter_clear(peer)?; + self.load_bloom_filter(peer, requests).await?; + requests.request_mempool(peer)?; + } + + Ok(()) + } + + /// Handle incoming inventory announcements. + /// + /// Filters for new transaction txids and enqueues them. The actual getdata + /// requests are sent by `send_queued()`, respecting the in-flight limit. + pub(super) async fn handle_inv( + &mut self, + inv: &[Inventory], + peer: SocketAddr, + requests: &RequestSender, + ) -> SyncResult> { + let mempool_full = + self.mempool_state.read().await.transactions.len() >= self.max_transactions; + if mempool_full { + return Ok(vec![]); + } + + let total_queued: usize = + self.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum(); + let mut enqueued = 0; + for item in inv { + let Inventory::Transaction(txid) = item else { + continue; + }; + + if self.seen_txids.get(txid).is_some_and(|t| t.elapsed() < SEEN_TXID_EXPIRY) + || self.pending_requests.contains_key(txid) + || self.is_queued(txid) + || self.mempool_state.read().await.transactions.contains_key(txid) + { + continue; + } + if self.pending_requests.len() + total_queued + enqueued >= self.max_transactions { + break; + } + // Only queue on activated peers + if let Some(Some(queue)) = self.peers.get_mut(&peer) { + queue.push_back(*txid); + enqueued += 1; + } + } + + if enqueued > 0 { + tracing::debug!("Enqueued {} mempool txids for download", enqueued); + self.send_queued(requests).await?; + } + + Ok(vec![]) + } + + /// Drain per-peer queues and send getdata for up to `MAX_IN_FLIGHT` items. + /// + /// Deduplicates at send time against `pending_requests` and `mempool_state` + /// in case a transaction was received between enqueue and send. + pub(super) async fn send_queued(&mut self, requests: &RequestSender) -> SyncResult<()> { + let mut available = MAX_IN_FLIGHT.saturating_sub(self.pending_requests.len()); + let has_queued = self.peers.values().any(|v| v.as_ref().is_some_and(|q| !q.is_empty())); + if available == 0 || !has_queued { + return Ok(()); + } + + let now = Instant::now(); + let mut per_peer: HashMap> = HashMap::new(); + + let activated_peers: Vec = self + .peers + .iter() + .filter(|(_, v)| v.as_ref().is_some_and(|q| !q.is_empty())) + .map(|(k, _)| *k) + .collect(); + for peer in activated_peers { + if available == 0 { + break; + } + let Some(Some(queue)) = self.peers.get_mut(&peer) else { + continue; + }; + while available > 0 { + let Some(txid) = queue.pop_front() else { + break; + }; + if self.pending_requests.contains_key(&txid) + || self.mempool_state.read().await.transactions.contains_key(&txid) + { + continue; + } + self.pending_requests.insert(txid, now); + per_peer.entry(peer).or_default().push(Inventory::Transaction(txid)); + available -= 1; + } + } + + let total_queued: usize = + self.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum(); + for (peer, inventory) in per_peer { + if inventory.is_empty() { + continue; + } + tracing::debug!( + "Requesting {} mempool transactions via getdata from {} ({} still queued)", + inventory.len(), + peer, + total_queued, + ); + requests.request_inventory(inventory, peer)?; + } + Ok(()) + } + + /// Handle a received transaction. + pub(super) async fn handle_tx( + &mut self, + tx: Transaction, + requests: &RequestSender, + ) -> SyncResult> { + let txid = tx.txid(); + self.pending_requests.remove(&txid); + self.seen_txids.insert(txid, Instant::now()); + self.progress.add_received(1); + + // Check for a pre-arrived IS lock before wallet processing consumes it + let is_locked = self.pending_is_locks.remove(&txid).is_some(); + + let result = { + let mut wallet = self.wallet.write().await; + wallet.process_mempool_transaction(&tx, is_locked).await + }; + + if !result.is_relevant { + return Ok(vec![]); + } + + self.progress.add_relevant(1); + tracing::info!("Wallet-relevant mempool transaction: {}", txid); + + // Build and store the unconfirmed transaction. + // The wallet already confirmed relevance, so we store unconditionally. + let unconfirmed_tx = UnconfirmedTransaction::new( + tx, + Amount::ZERO, + is_locked, + result.is_outgoing, + result.addresses, + result.net_amount, + ); + { + let mut state = self.mempool_state.write().await; + state.add_transaction(unconfirmed_tx); + self.progress.set_tracked(state.transactions.len() as u32); + } + + // Wallet-relevant transactions change the monitored set (new UTXOs, spent + // inputs, potentially new addresses from gap limit maintenance). + self.rebuild_filter(requests).await?; + + Ok(vec![]) + } + + /// Remove transactions from the mempool that have been confirmed in a block. + pub(super) async fn remove_confirmed(&mut self, txids: &[Txid]) { + self.seen_txids.retain(|_, t| t.elapsed() < SEEN_TXID_EXPIRY); + let mut removed = Vec::new(); + { + let mut state = self.mempool_state.write().await; + for txid in txids { + if state.remove_transaction(txid).is_some() { + removed.push(*txid); + } + } + if !removed.is_empty() { + self.progress.add_removed(removed.len() as u32); + self.progress.set_tracked(state.transactions.len() as u32); + tracing::debug!("Removed {} confirmed transactions from mempool", removed.len()); + } + } + } + + /// Mark a mempool transaction as InstantSend-locked and notify the wallet. + /// + /// If the transaction hasn't arrived yet, remembers the txid so the lock + /// can be applied when the transaction is later received via `handle_tx`. + pub(super) async fn mark_instant_send(&mut self, txid: &Txid) { + let mut state = self.mempool_state.write().await; + let marked = if let Some(tx) = state.transactions.get_mut(txid) { + tx.is_instant_send = true; + tracing::debug!("Marked mempool tx {} as InstantSend-locked", txid); + true + } else if self.pending_is_locks.len() < MAX_PENDING_IS_LOCKS { + self.pending_is_locks.insert(*txid, Instant::now()); + tracing::debug!("IS lock arrived before tx {}, remembering for later", txid); + false + } else { + tracing::warn!( + "Pending IS locks at capacity ({}), dropping IS lock for {}", + MAX_PENDING_IS_LOCKS, + txid + ); + false + }; + drop(state); + if marked { + let mut wallet = self.wallet.write().await; + wallet.process_instant_send_lock(*txid); + } + } + + /// Prune transactions and pending IS locks older than `timeout`. + pub(super) async fn prune_expired(&mut self, timeout: Duration) { + let mut state = self.mempool_state.write().await; + let pruned = state.prune_expired(timeout); + if !pruned.is_empty() { + self.progress.add_removed(pruned.len() as u32); + self.progress.set_tracked(state.transactions.len() as u32); + tracing::debug!("Pruned {} expired mempool transactions", pruned.len()); + for txid in &pruned { + self.pending_is_locks.remove(txid); + } + } + + // Prune pending IS locks whose transaction never arrived + let before = self.pending_is_locks.len(); + self.pending_is_locks.retain(|_, inserted_at| inserted_at.elapsed() < timeout); + let expired = before - self.pending_is_locks.len(); + if expired > 0 { + tracing::debug!("Pruned {} expired pending IS locks", expired); + } + } + + fn is_queued(&self, txid: &Txid) -> bool { + self.peers.values().filter_map(|v| v.as_ref()).any(|q| q.contains(txid)) + } + + /// Register a newly connected peer (not yet activated). + pub(super) fn handle_peer_connected(&mut self, peer: SocketAddr) { + self.peers.entry(peer).or_insert(None); + } + + /// Remove a disconnected peer, redistributing its queued txids to another activated peer. + pub(super) fn handle_peer_disconnected(&mut self, peer: SocketAddr) { + if let Some(Some(orphaned)) = self.peers.remove(&peer) { + if !orphaned.is_empty() { + let target = self + .peers + .iter_mut() + .filter(|(_, v)| v.is_some()) + .map(|(_, v)| v) + .choose(&mut rand::thread_rng()); + if let Some(Some(queue)) = target { + queue.extend(orphaned); + } else { + tracing::warn!( + "Dropped {} orphaned txids from disconnected peer {}: no activated peers available", + orphaned.len(), + peer + ); + } + } + } + } + + /// Clear all peer state, pending requests, and pending IS locks. + pub(super) fn clear_pending(&mut self) { + self.pending_requests.clear(); + self.peers.clear(); + self.pending_is_locks.clear(); + } + + /// Remove pending requests that have timed out without receiving a response. + /// Timed-out txids are re-queued to any connected peer for retry. + pub(super) fn prune_pending_requests(&mut self) { + let mut timed_out = Vec::new(); + self.pending_requests.retain(|txid, requested_at| { + if requested_at.elapsed() >= PENDING_REQUEST_TIMEOUT { + timed_out.push(*txid); + false + } else { + true + } + }); + if timed_out.is_empty() { + return; + } + tracing::debug!("Pruned {} timed-out pending requests, re-queuing", timed_out.len()); + let target = + self.peers.values_mut().filter_map(|v| v.as_mut()).choose(&mut rand::thread_rng()); + if let Some(queue) = target { + queue.extend(timed_out); + } else { + tracing::warn!( + "Dropped {} timed-out txids: no activated peers available for re-queue", + timed_out.len() + ); + } + } +} + +impl fmt::Debug for MempoolManager { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let activated = self.peers.values().filter(|v| v.is_some()).count(); + f.debug_struct("MempoolManager") + .field("progress", &self.progress) + .field("strategy", &self.strategy) + .field("pending_requests", &self.pending_requests.len()) + .field("peers", &self.peers.len()) + .field("activated_peers", &activated) + .field( + "queued", + &self.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + ) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::network::NetworkRequest; + use dashcore::hashes::Hash; + use dashcore::network::message::NetworkMessage; + use dashcore::{Address, BlockHash, Network, ScriptBuf, Transaction}; + use key_wallet::test_utils::MockWallet; + use key_wallet::transaction_checking::TransactionContext; + + use crate::sync::SyncState; + use crate::test_utils::test_socket_address; + use tokio::sync::mpsc; + + fn create_test_manager( + ) -> (MempoolManager, RequestSender, mpsc::UnboundedReceiver) { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + manager.progress.set_state(SyncState::Synced); + + (manager, requests, rx) + } + + fn create_bloom_manager( + ) -> (MempoolManager, RequestSender, mpsc::UnboundedReceiver) { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + + (manager, requests, rx) + } + + #[tokio::test] + async fn test_activation_fetch_all() { + let peer = test_socket_address(1); + let (mut manager, requests, mut rx) = create_test_manager(); + manager.activate_peer(peer, &requests).await.unwrap(); + + // FetchAll activation sends filterclear then mempool to the chosen peer + let msg1 = rx.recv().await.unwrap(); + assert!( + matches!(msg1, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterClear, p) if p == peer) + ); + let msg2 = rx.recv().await.unwrap(); + assert!( + matches!(msg2, NetworkRequest::SendMessageToPeer(NetworkMessage::MemPool, p) if p == peer) + ); + assert!(matches!(manager.peers.get(&peer), Some(Some(_)))); + } + + #[tokio::test] + async fn test_activation_bloom_filter_skips_empty_wallet() { + let (mut manager, requests, mut rx) = create_bloom_manager(); + manager.activate_peer(test_socket_address(1), &requests).await.unwrap(); + + // No addresses in mock wallet, so only MemPool should be sent (no FilterLoad) + let mut found_filter_load = false; + while let Ok(msg) = rx.try_recv() { + if matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) { + found_filter_load = true; + } + } + assert!(!found_filter_load, "should not send FilterLoad for empty wallet"); + } + + #[tokio::test] + async fn test_handle_inv_deduplication() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + let txid = Txid::from_byte_array([1u8; 32]); + let inv = vec![Inventory::Transaction(txid)]; + + // First call should add to pending + let events = manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!(events.is_empty()); + assert!(manager.pending_requests.contains_key(&txid)); + + // Second call with same txid should be filtered out + let events = manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.pending_requests.len(), 1); + } + + #[tokio::test] + async fn test_handle_inv_capacity_limit() { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, _rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = MempoolManager::new( + wallet, + mempool_state.clone(), + MempoolStrategy::FetchAll, + 2, // Very small capacity + ); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + // Fill mempool to capacity + { + let mut state = mempool_state.write().await; + for i in 0..2u32 { + let tx = Transaction { + version: 1, + lock_time: i, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + state.add_transaction(UnconfirmedTransaction::new( + tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + } + + // New transactions should be filtered out + let new_txid = Txid::from_byte_array([99u8; 32]); + let inv = vec![Inventory::Transaction(new_txid)]; + let events = manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!(events.is_empty()); + assert!(!manager.pending_requests.contains_key(&new_txid)); + } + + #[tokio::test] + async fn test_handle_inv_pending_requests_limit() { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, _rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 2); + manager.progress.set_state(SyncState::Synced); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + // Fill pending requests to capacity + let inv1: Vec = + (0..2).map(|i| Inventory::Transaction(Txid::from_byte_array([i; 32]))).collect(); + manager.handle_inv(&inv1, peer, &requests).await.unwrap(); + assert_eq!(manager.pending_requests.len(), 2); + + // Additional requests should be rejected when pending is at capacity + let extra_txid = Txid::from_byte_array([99; 32]); + let inv2 = vec![Inventory::Transaction(extra_txid)]; + manager.handle_inv(&inv2, peer, &requests).await.unwrap(); + assert!(!manager.pending_requests.contains_key(&extra_txid)); + } + + #[test] + fn test_prune_pending_requests_timeout() { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, _rx) = mpsc::unbounded_channel::(); + let _requests = RequestSender::new(tx); + + let mut manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + + let fresh_txid = Txid::from_byte_array([1; 32]); + let stale_txid = Txid::from_byte_array([2; 32]); + + manager.pending_requests.insert(fresh_txid, Instant::now()); + manager + .pending_requests + .insert(stale_txid, Instant::now() - PENDING_REQUEST_TIMEOUT - Duration::from_secs(1)); + + manager.prune_pending_requests(); + + assert!(manager.pending_requests.contains_key(&fresh_txid)); + assert!(!manager.pending_requests.contains_key(&stale_txid)); + } + + #[tokio::test] + async fn test_handle_tx_irrelevant() { + let (mut manager, requests, _rx) = create_test_manager(); + + let tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + let events = manager.handle_tx(tx, &requests).await.unwrap(); + // MockWallet returns is_relevant=false by default + assert!(events.is_empty()); + assert_eq!(manager.progress.received(), 1); + + // Irrelevant tx should not be stored in mempool state + let state = manager.mempool_state.read().await; + assert!(!state.transactions.contains_key(&txid)); + assert_eq!(manager.progress.relevant(), 0); + } + + #[tokio::test] + async fn test_handle_inv_non_transaction_filtered() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + let inv = vec![ + Inventory::Block(BlockHash::all_zeros()), + Inventory::Transaction(Txid::from_byte_array([1u8; 32])), + ]; + + let events = manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!(events.is_empty()); + // Only the transaction should be tracked, not the block + assert_eq!(manager.pending_requests.len(), 1); + } + + #[tokio::test] + async fn test_prune_expired() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let fresh_tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let fresh_txid = fresh_tx.txid(); + + let expired_tx = Transaction { + version: 1, + lock_time: 99, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let expired_txid = expired_tx.txid(); + let test_timeout = Duration::from_secs(2); + + { + let mut state = manager.mempool_state.write().await; + state.add_transaction(UnconfirmedTransaction::new( + fresh_tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + let mut expired_utx = UnconfirmedTransaction::new( + expired_tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + ); + expired_utx.first_seen = Instant::now() - test_timeout - Duration::from_secs(1); + state.add_transaction(expired_utx); + } + + manager.prune_expired(test_timeout).await; + + let state = manager.mempool_state.read().await; + assert_eq!(state.transactions.len(), 1); + assert!(state.transactions.contains_key(&fresh_txid)); + assert!(!state.transactions.contains_key(&expired_txid)); + drop(state); + assert_eq!(manager.progress.removed(), 1); + } + + /// Create a manager with BloomFilter strategy where the wallet reports + /// mempool transactions as relevant. BloomFilter strategy skips local + /// address pre-filtering, relying on the wallet for definitive checks. + fn create_relevant_manager( + ) -> (MempoolManager, RequestSender, Arc>) { + let mut mock = MockWallet::new(); + mock.set_mempool_relevant(true); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, _rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let manager = + MempoolManager::new(wallet.clone(), mempool_state, MempoolStrategy::BloomFilter, 1000); + + (manager, requests, wallet) + } + + #[tokio::test] + async fn test_handle_tx_relevant_stores_transaction() { + let (mut manager, requests, _wallet) = create_relevant_manager(); + + let tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + let events = manager.handle_tx(tx, &requests).await.unwrap(); + assert!(events.is_empty()); + + // Verify transaction was stored in mempool state + let state = manager.mempool_state.read().await; + assert!(state.transactions.contains_key(&txid)); + assert_eq!(manager.progress.received(), 1); + assert_eq!(manager.progress.relevant(), 1); + assert_eq!(manager.progress.tracked(), 1); + } + + #[tokio::test] + async fn test_handle_tx_clears_pending_request() { + let (mut manager, requests, _wallet) = create_relevant_manager(); + + let tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + // Simulate that we requested this transaction + manager.pending_requests.insert(txid, Instant::now()); + assert!(manager.pending_requests.contains_key(&txid)); + + manager.handle_tx(tx, &requests).await.unwrap(); + // Pending request should be cleared regardless of relevance + assert!(!manager.pending_requests.contains_key(&txid)); + + // Since the manager uses BloomFilter strategy (relevant mock), tx should be stored + let state = manager.mempool_state.read().await; + assert!(state.transactions.contains_key(&txid)); + } + + fn create_bloom_manager_with_addresses( + addresses: Vec
, + ) -> (MempoolManager, RequestSender, mpsc::UnboundedReceiver) { + let mut mock = MockWallet::new(); + mock.set_addresses(addresses); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + + (manager, requests, rx) + } + + /// Create a test P2PKH address from a byte pattern. + fn test_address(byte: u8) -> Address { + // Build OP_DUP OP_HASH160 <20-byte-hash> OP_EQUALVERIFY OP_CHECKSIG + let mut script_bytes = vec![0x76, 0xa9, 0x14]; // OP_DUP OP_HASH160 PUSH20 + script_bytes.extend_from_slice(&[byte; 20]); + script_bytes.push(0x88); // OP_EQUALVERIFY + script_bytes.push(0xac); // OP_CHECKSIG + let script = ScriptBuf::from(script_bytes); + Address::from_script(&script, Network::Testnet).unwrap() + } + + #[tokio::test] + async fn test_bloom_filter_loaded_with_addresses() { + let addr = test_address(0xab); + + let (mut manager, requests, mut rx) = create_bloom_manager_with_addresses(vec![addr]); + manager.activate_peer(test_socket_address(1), &requests).await.unwrap(); + + let mut found_filter_load = false; + while let Ok(msg) = rx.try_recv() { + if matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) { + found_filter_load = true; + } + } + assert!(found_filter_load, "expected FilterLoad for wallet with addresses"); + } + + #[tokio::test] + async fn test_mark_instant_send_emits_status_change() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let tx = Transaction { + version: 1, + lock_time: 42, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + { + let mut state = manager.mempool_state.write().await; + state.add_transaction(UnconfirmedTransaction::new( + tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + + manager.mark_instant_send(&txid).await; + + // Verify mempool state also reflects IS flag + let state = manager.mempool_state.read().await; + assert!(state.transactions.get(&txid).unwrap().is_instant_send); + drop(state); + + let wallet = manager.wallet.read().await; + let status_changes = wallet.status_changes(); + let changes = status_changes.lock().await; + assert_eq!(changes.len(), 1); + assert_eq!(changes[0].0, txid); + assert_eq!(changes[0].1, TransactionContext::InstantSend); + } + + #[tokio::test] + async fn test_mark_instant_send_stores_pending_for_unknown() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let unknown_txid = Txid::from_byte_array([0xbb; 32]); + manager.mark_instant_send(&unknown_txid).await; + + // No immediate wallet notification + let wallet = manager.wallet.read().await; + let status_changes = wallet.status_changes(); + let changes = status_changes.lock().await; + assert!(changes.is_empty()); + + // But the txid is remembered for when the transaction arrives + assert!(manager.pending_is_locks.contains_key(&unknown_txid)); + } + + #[tokio::test] + async fn test_in_flight_limit() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + // Send 200 INVs — only MAX_IN_FLIGHT should go to pending, rest queued + let inv: Vec = (0..200u16) + .map(|i| { + let mut bytes = [0u8; 32]; + bytes[0..2].copy_from_slice(&i.to_le_bytes()); + Inventory::Transaction(Txid::from_byte_array(bytes)) + }) + .collect(); + + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert_eq!(manager.pending_requests.len(), MAX_IN_FLIGHT); + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 100 + ); + } + + #[tokio::test] + async fn test_send_queued_drains_after_response() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + // Fill with 150 INVs + let inv: Vec = (0..150u16) + .map(|i| { + let mut bytes = [0u8; 32]; + bytes[0..2].copy_from_slice(&i.to_le_bytes()); + Inventory::Transaction(Txid::from_byte_array(bytes)) + }) + .collect(); + + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert_eq!(manager.pending_requests.len(), MAX_IN_FLIGHT); + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 50 + ); + + // Simulate receiving 10 responses (freeing 10 slots) + let pending_txids: Vec = manager.pending_requests.keys().take(10).copied().collect(); + for txid in &pending_txids { + manager.pending_requests.remove(txid); + } + assert_eq!(manager.pending_requests.len(), 90); + + // send_queued should fill the freed slots + manager.send_queued(&requests).await.unwrap(); + assert_eq!(manager.pending_requests.len(), MAX_IN_FLIGHT); + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 40 + ); + } + + #[tokio::test] + async fn test_send_queued_skips_already_received() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + + // Create a real transaction and get its actual txid + let tx = Transaction { + version: 1, + lock_time: 0xaa, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + // Enqueue the txid on an activated peer + manager.peers.insert(peer, Some(VecDeque::from([txid]))); + + // Simulate the transaction arriving in mempool_state before send + { + let mut state = manager.mempool_state.write().await; + state.add_transaction(UnconfirmedTransaction::new( + tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + + manager.send_queued(&requests).await.unwrap(); + // Txid should have been skipped, not added to pending + assert!(manager.pending_requests.is_empty()); + assert!(manager.peers.values().filter_map(|v| v.as_ref()).all(|q| q.is_empty())); + } + + #[test] + fn test_clear_pending_clears_queue() { + let (mut manager, _requests, _rx) = create_test_manager(); + + manager.pending_requests.insert(Txid::from_byte_array([1; 32]), Instant::now()); + manager + .peers + .insert(test_socket_address(1), Some(VecDeque::from([Txid::from_byte_array([2; 32])]))); + manager.pending_is_locks.insert(Txid::from_byte_array([3; 32]), Instant::now()); + + manager.clear_pending(); + + assert!(manager.pending_requests.is_empty()); + assert!(manager.peers.is_empty()); + assert!(manager.pending_is_locks.is_empty()); + } + + #[tokio::test] + async fn test_send_queued_noop_at_capacity() { + let (mut manager, requests, _rx) = create_test_manager(); + + // Fill pending to MAX_IN_FLIGHT + for i in 0..MAX_IN_FLIGHT as u16 { + let mut bytes = [0u8; 32]; + bytes[0..2].copy_from_slice(&i.to_le_bytes()); + manager.pending_requests.insert(Txid::from_byte_array(bytes), Instant::now()); + } + + // Add something to the queue on an activated peer + manager.peers.insert( + test_socket_address(1), + Some(VecDeque::from([Txid::from_byte_array([0xff; 32])])), + ); + + manager.send_queued(&requests).await.unwrap(); + // Queue should remain unchanged (one peer with one txid) + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 1 + ); + assert_eq!(manager.pending_requests.len(), MAX_IN_FLIGHT); + } + + #[tokio::test] + async fn test_instant_send_before_transaction() { + let (mut manager, requests, _wallet) = create_relevant_manager(); + + let tx = Transaction { + version: 1, + lock_time: 77, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + // IS lock arrives before the transaction + manager.mark_instant_send(&txid).await; + assert!(manager.pending_is_locks.contains_key(&txid)); + + // Transaction arrives + manager.handle_tx(tx, &requests).await.unwrap(); + + // Pending IS lock consumed + assert!(manager.pending_is_locks.is_empty()); + + // Transaction stored with IS flag set + let state = manager.mempool_state.read().await; + assert!(state.transactions.get(&txid).unwrap().is_instant_send); + } + + #[tokio::test] + async fn test_instant_send_before_irrelevant_transaction() { + let (mut manager, requests, _rx) = create_test_manager(); + + let tx = Transaction { + version: 1, + lock_time: 88, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + // IS lock arrives before the transaction + manager.mark_instant_send(&txid).await; + assert!(manager.pending_is_locks.contains_key(&txid)); + + // Transaction arrives but wallet says it's not relevant + manager.handle_tx(tx, &requests).await.unwrap(); + + // Pending IS lock cleaned up (no leak) + assert!(manager.pending_is_locks.is_empty()); + + // Irrelevant tx should not be stored in mempool state + let state = manager.mempool_state.read().await; + assert!(!state.transactions.contains_key(&txid)); + } + + #[tokio::test] + async fn test_pending_is_locks_capacity_limit() { + let (mut manager, _requests, _rx) = create_test_manager(); + + // Fill pending IS locks to capacity + for i in 0..MAX_PENDING_IS_LOCKS { + let mut bytes = [0u8; 32]; + bytes[0..8].copy_from_slice(&(i as u64).to_le_bytes()); + manager.pending_is_locks.insert(Txid::from_byte_array(bytes), Instant::now()); + } + assert_eq!(manager.pending_is_locks.len(), MAX_PENDING_IS_LOCKS); + + // Next IS lock should be dropped + let overflow_txid = Txid::from_byte_array([0xff; 32]); + manager.mark_instant_send(&overflow_txid).await; + assert!(!manager.pending_is_locks.contains_key(&overflow_txid)); + assert_eq!(manager.pending_is_locks.len(), MAX_PENDING_IS_LOCKS); + } + + #[tokio::test] + async fn test_prune_expired_removes_is_lock_for_expired_tx() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + let test_timeout = Duration::from_secs(2); + + // Add the tx with a timestamp in the past so it expires + { + let mut state = manager.mempool_state.write().await; + let mut utx = + UnconfirmedTransaction::new(tx, Amount::from_sat(0), false, false, Vec::new(), 0); + utx.first_seen = Instant::now() - test_timeout - Duration::from_secs(1); + state.add_transaction(utx); + } + + // Also store a pending IS lock for this txid and an unrelated one + let unrelated_txid = Txid::from_byte_array([0xdd; 32]); + manager.pending_is_locks.insert(txid, Instant::now()); + manager.pending_is_locks.insert(unrelated_txid, Instant::now()); + + manager.prune_expired(test_timeout).await; + + // The expired tx's IS lock should be removed + assert!( + !manager.pending_is_locks.contains_key(&txid), + "IS lock for expired tx should be removed" + ); + // The unrelated IS lock should be preserved + assert!( + manager.pending_is_locks.contains_key(&unrelated_txid), + "IS lock for non-expired tx should be preserved" + ); + } + + #[tokio::test] + async fn test_prune_expired_removes_stale_pending_is_locks() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let test_timeout = Duration::from_secs(2); + + // Insert a pending IS lock that is older than the test timeout + let stale_txid = Txid::from_byte_array([0xaa; 32]); + manager + .pending_is_locks + .insert(stale_txid, Instant::now() - test_timeout - Duration::from_secs(1)); + + // Insert a fresh pending IS lock + let fresh_txid = Txid::from_byte_array([0xbb; 32]); + manager.pending_is_locks.insert(fresh_txid, Instant::now()); + + manager.prune_expired(test_timeout).await; + + assert!( + !manager.pending_is_locks.contains_key(&stale_txid), + "stale pending IS lock should be pruned" + ); + assert!( + manager.pending_is_locks.contains_key(&fresh_txid), + "fresh pending IS lock should be preserved" + ); + } + + #[tokio::test] + async fn test_handle_inv_dedup_against_queue() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + // Fill pending to capacity so items go to queue + for i in 0..MAX_IN_FLIGHT as u16 { + let mut bytes = [0u8; 32]; + bytes[0..2].copy_from_slice(&i.to_le_bytes()); + manager.pending_requests.insert(Txid::from_byte_array(bytes), Instant::now()); + } + + let txid = Txid::from_byte_array([0xff; 32]); + let inv = vec![Inventory::Transaction(txid)]; + + // First call enqueues + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 1 + ); + + // Second call with same txid should be deduped + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert_eq!( + manager.peers.values().filter_map(|v| v.as_ref()).map(|q| q.len()).sum::(), + 1 + ); + } + + #[tokio::test] + async fn test_bloom_filter_load_failure_propagates() { + let addr = test_address(0xab); + let mut mock = MockWallet::new(); + mock.set_addresses(vec![addr]); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + + // Drop receiver so send_filter_load fails + drop(rx); + + // activate() should propagate the error + let result = manager.activate_peer(test_socket_address(1), &requests).await; + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_handle_tx_relevant_populates_wallet_effect_fields() { + let (mut manager, requests, wallet) = create_relevant_manager(); + + let tx = Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + // Set effect data on the mock wallet before handle_tx + { + let w = wallet.read().await; + w.set_effect(txid, 50000, vec!["yWdXnYxGbouNoo8yMvcbZmZ3Gdp6BpySxL".into()]).await; + } + + manager.handle_tx(tx, &requests).await.unwrap(); + + let state = manager.mempool_state.read().await; + let stored = state.transactions.get(&txid).unwrap(); + assert_eq!(stored.net_amount, 50000); + assert!(!stored.is_outgoing); + assert!(!stored.is_instant_send); + assert_eq!(stored.addresses.len(), 1); + assert_eq!(stored.addresses[0].to_string(), "yWdXnYxGbouNoo8yMvcbZmZ3Gdp6BpySxL"); + } + + #[tokio::test] + async fn test_handle_tx_outgoing_transaction() { + let (mut manager, requests, wallet) = create_relevant_manager(); + + let tx = Transaction { + version: 1, + lock_time: 123, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + + { + let w = wallet.read().await; + w.set_effect(txid, -30000, vec![]).await; + } + + manager.handle_tx(tx, &requests).await.unwrap(); + + let state = manager.mempool_state.read().await; + let stored = state.transactions.get(&txid).unwrap(); + assert_eq!(stored.net_amount, -30000); + assert!(stored.is_outgoing); + assert!(!stored.is_instant_send); + assert!(stored.addresses.is_empty()); + } + + #[test] + fn test_peer_connected_creates_entry() { + let (mut manager, _requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + + assert!(!manager.peers.contains_key(&peer)); + manager.handle_peer_connected(peer); + assert!(manager.peers.contains_key(&peer)); + assert!(manager.peers[&peer].is_none()); + } + + #[test] + fn test_peer_disconnected_redistributes_queue() { + let (mut manager, _requests, _rx) = create_test_manager(); + let peer1 = test_socket_address(1); + let peer2 = test_socket_address(2); + + // Both peers activated with queues + let txid1 = Txid::from_byte_array([1; 32]); + let txid2 = Txid::from_byte_array([2; 32]); + manager.peers.insert(peer1, Some(VecDeque::from([txid1, txid2]))); + manager.peers.insert(peer2, Some(VecDeque::new())); + + manager.handle_peer_disconnected(peer1); + + assert!(!manager.peers.contains_key(&peer1)); + // Txids should have moved to peer2 + let peer2_queue = manager.peers[&peer2].as_ref().unwrap(); + assert!(peer2_queue.contains(&txid1)); + assert!(peer2_queue.contains(&txid2)); + } + + #[test] + fn test_peer_disconnected_no_peers_drops_queue() { + let (mut manager, _requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + + manager.peers.insert(peer, Some(VecDeque::from([Txid::from_byte_array([1; 32])]))); + + manager.handle_peer_disconnected(peer); + + assert!(manager.peers.is_empty()); + } + + #[test] + fn test_prune_pending_requeues_to_activated_peer() { + let (mut manager, _requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + let txid = Txid::from_byte_array([1; 32]); + manager + .pending_requests + .insert(txid, Instant::now() - PENDING_REQUEST_TIMEOUT - Duration::from_secs(1)); + + manager.prune_pending_requests(); + + assert!(!manager.pending_requests.contains_key(&txid)); + assert!(manager.peers[&peer].as_ref().unwrap().contains(&txid)); + } + + #[test] + fn test_prune_pending_drops_when_no_peers() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let txid = Txid::from_byte_array([1; 32]); + manager + .pending_requests + .insert(txid, Instant::now() - PENDING_REQUEST_TIMEOUT - Duration::from_secs(1)); + + manager.prune_pending_requests(); + + assert!(!manager.pending_requests.contains_key(&txid)); + assert!(manager.peers.is_empty()); + } + + #[tokio::test] + async fn test_remove_confirmed_removes_txids() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let mut txids = Vec::new(); + { + let mut state = manager.mempool_state.write().await; + for i in 0..3u32 { + let tx = Transaction { + version: 1, + lock_time: i, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + txids.push(txid); + state.add_transaction(UnconfirmedTransaction::new( + tx, + Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + assert_eq!(state.transactions.len(), 3); + } + + // Remove 2 of the 3 transactions + manager.remove_confirmed(&txids[..2]).await; + + let state = manager.mempool_state.read().await; + assert_eq!(state.transactions.len(), 1); + assert!(state.transactions.contains_key(&txids[2])); + drop(state); + + assert_eq!(manager.progress.removed(), 2); + assert_eq!(manager.progress.tracked(), 1); + } + + #[tokio::test] + async fn test_remove_confirmed_unknown_txids_noop() { + let (mut manager, _requests, _rx) = create_test_manager(); + + let unknown = vec![Txid::from_byte_array([0xaa; 32]), Txid::from_byte_array([0xbb; 32])]; + + manager.remove_confirmed(&unknown).await; + + let state = manager.mempool_state.read().await; + assert!(state.transactions.is_empty()); + assert_eq!(manager.progress.removed(), 0); + } + + #[tokio::test] + async fn test_rebuild_filter_clears_and_reloads() { + let addr = test_address(0xab); + let (mut manager, requests, mut rx) = create_bloom_manager_with_addresses(vec![addr]); + let peer = test_socket_address(1); + + manager.activate_peer(peer, &requests).await.unwrap(); + + // Drain activation messages + while rx.try_recv().is_ok() {} + + manager.rebuild_filter(&requests).await.unwrap(); + + // Verify message sequence: FilterClear, FilterLoad, MemPool + let msg1 = rx.try_recv().unwrap(); + assert!(matches!(msg1, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterClear, _))); + let msg2 = rx.try_recv().unwrap(); + assert!(matches!( + msg2, + NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _) + )); + let msg3 = rx.try_recv().unwrap(); + assert!(matches!(msg3, NetworkRequest::SendMessageToPeer(NetworkMessage::MemPool, _))); + } + + #[tokio::test] + async fn test_rebuild_filter_no_activated_peers_noop() { + let (mut manager, requests, mut rx) = create_bloom_manager(); + // No activation, so no activated peers + assert!(manager.peers.values().all(|v| v.is_none())); + + manager.rebuild_filter(&requests).await.unwrap(); + assert!(rx.try_recv().is_err()); + } + + #[tokio::test] + async fn test_seen_txids_deduplication_window() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.peers.insert(peer, Some(VecDeque::new())); + + let txid = Txid::from_byte_array([1u8; 32]); + let inv = vec![Inventory::Transaction(txid)]; + + // A fresh seen_txids entry should cause handle_inv to skip the txid + manager.seen_txids.insert(txid, Instant::now()); + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!(manager.pending_requests.is_empty(), "seen txid should be skipped"); + + // An expired entry should allow the txid to be accepted again + manager.seen_txids.insert(txid, Instant::now() - SEEN_TXID_EXPIRY - Duration::from_secs(1)); + manager.handle_inv(&inv, peer, &requests).await.unwrap(); + assert!( + manager.pending_requests.contains_key(&txid), + "expired seen txid should be accepted" + ); + } + + #[test] + fn test_peer_disconnect_keeps_other_peers_intact() { + let (mut manager, _requests, _rx) = create_test_manager(); + let peer1 = test_socket_address(1); + let peer2 = test_socket_address(2); + + // Both activated + manager.peers.insert(peer1, Some(VecDeque::new())); + manager.peers.insert(peer2, Some(VecDeque::from([Txid::from_byte_array([1; 32])]))); + + manager.handle_peer_disconnected(peer1); + + assert!(!manager.peers.contains_key(&peer1)); + // peer2 should still be present and activated + assert!(manager.peers.contains_key(&peer2)); + assert!(manager.peers[&peer2].is_some()); + } +} diff --git a/dash-spv/src/sync/mempool/mod.rs b/dash-spv/src/sync/mempool/mod.rs new file mode 100644 index 000000000..94f1a84e0 --- /dev/null +++ b/dash-spv/src/sync/mempool/mod.rs @@ -0,0 +1,11 @@ +mod filter; +mod manager; +mod progress; +mod sync_manager; + +pub(crate) use manager::MempoolManager; +pub use progress::MempoolProgress; + +/// Bloom filter false positive rate for BIP37 mempool filtering. +// TODO: probably expose via config, e.g. as a privacy level enum (low/medium/high) instead of a raw f64 +const BLOOM_FALSE_POSITIVE_RATE: f64 = 0.0005; diff --git a/dash-spv/src/sync/mempool/progress.rs b/dash-spv/src/sync/mempool/progress.rs new file mode 100644 index 000000000..4a7b34893 --- /dev/null +++ b/dash-spv/src/sync/mempool/progress.rs @@ -0,0 +1,171 @@ +use crate::sync::SyncState; +use std::fmt; +use std::time::Instant; + +/// Progress tracking for mempool transaction monitoring. +#[derive(Debug, Clone, PartialEq)] +pub struct MempoolProgress { + /// Current sync state. + state: SyncState, + /// Total transactions received from the network. + received: u32, + /// Transactions that matched wallet addresses. + relevant: u32, + /// Transactions currently tracked in mempool state (wallet-relevant). + tracked: u32, + /// Transactions removed (confirmed or expired). + removed: u32, + /// Time of last activity. + last_activity: Instant, +} + +impl Default for MempoolProgress { + fn default() -> Self { + Self { + state: Default::default(), + received: 0, + relevant: 0, + tracked: 0, + removed: 0, + last_activity: Instant::now(), + } + } +} + +impl MempoolProgress { + pub fn state(&self) -> SyncState { + self.state + } + + pub fn received(&self) -> u32 { + self.received + } + + pub fn relevant(&self) -> u32 { + self.relevant + } + + pub fn tracked(&self) -> u32 { + self.tracked + } + + pub fn removed(&self) -> u32 { + self.removed + } + + pub fn last_activity(&self) -> Instant { + self.last_activity + } + + pub(super) fn set_state(&mut self, state: SyncState) { + self.state = state; + self.bump_last_activity(); + } + + pub(super) fn add_received(&mut self, count: u32) { + self.received += count; + self.bump_last_activity(); + } + + pub(super) fn add_relevant(&mut self, count: u32) { + self.relevant += count; + self.bump_last_activity(); + } + + pub(super) fn set_tracked(&mut self, count: u32) { + self.tracked = count; + self.bump_last_activity(); + } + + pub(super) fn add_removed(&mut self, count: u32) { + self.removed += count; + self.bump_last_activity(); + } + + fn bump_last_activity(&mut self) { + self.last_activity = Instant::now(); + } +} + +impl fmt::Display for MempoolProgress { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{:?} received: {}, relevant: {}, tracked: {}, removed: {}, last_activity: {}s", + self.state, + self.received, + self.relevant, + self.tracked, + self.removed, + self.last_activity.elapsed().as_secs() + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_values() { + let p = MempoolProgress::default(); + assert_eq!(p.state(), SyncState::WaitForEvents); + assert_eq!(p.received(), 0); + assert_eq!(p.relevant(), 0); + assert_eq!(p.tracked(), 0); + assert_eq!(p.removed(), 0); + } + + #[test] + fn test_mutators_update_correctly() { + let mut p = MempoolProgress::default(); + + p.add_received(5); + assert_eq!(p.received(), 5); + p.add_received(3); + assert_eq!(p.received(), 8); + + p.add_relevant(2); + assert_eq!(p.relevant(), 2); + + p.set_tracked(10); + assert_eq!(p.tracked(), 10); + // set_tracked replaces, not accumulates + p.set_tracked(7); + assert_eq!(p.tracked(), 7); + + p.add_removed(3); + assert_eq!(p.removed(), 3); + + p.set_state(SyncState::Synced); + assert_eq!(p.state(), SyncState::Synced); + } + + #[test] + fn test_last_activity_updated_on_mutation() { + let mut p = MempoolProgress::default(); + let before = p.last_activity(); + + // Small sleep to ensure time difference + std::thread::sleep(std::time::Duration::from_millis(1)); + p.add_received(1); + + assert!(p.last_activity() >= before); + } + + #[test] + fn test_display_format() { + let mut p = MempoolProgress::default(); + p.add_received(10); + p.add_relevant(3); + p.set_tracked(2); + p.add_removed(1); + + let display = format!("{}", p); + assert!(display.contains("received: 10")); + assert!(display.contains("relevant: 3")); + assert!(display.contains("tracked: 2")); + assert!(display.contains("removed: 1")); + assert!(display.contains("WaitForEvents")); + } +} diff --git a/dash-spv/src/sync/mempool/sync_manager.rs b/dash-spv/src/sync/mempool/sync_manager.rs new file mode 100644 index 000000000..44c1dfba8 --- /dev/null +++ b/dash-spv/src/sync/mempool/sync_manager.rs @@ -0,0 +1,616 @@ +use super::manager::MEMPOOL_TX_EXPIRY; +use crate::error::SyncResult; +use crate::network::{Message, MessageType, NetworkEvent, RequestSender}; +use crate::sync::{ + ManagerIdentifier, MempoolManager, SyncEvent, SyncManager, SyncManagerProgress, SyncState, +}; +use async_trait::async_trait; +use dashcore::network::message::NetworkMessage; +use key_wallet::manager::WalletInterface; + +#[async_trait] +impl SyncManager for MempoolManager { + fn identifier(&self) -> ManagerIdentifier { + ManagerIdentifier::Mempool + } + + fn state(&self) -> SyncState { + self.progress.state() + } + + fn set_state(&mut self, state: SyncState) { + self.progress.set_state(state); + } + + fn wanted_message_types(&self) -> &'static [MessageType] { + &[MessageType::Inv, MessageType::Tx] + } + + async fn start_sync(&mut self, requests: &RequestSender) -> SyncResult> { + // After a full disconnect, re-activate mempool on all connected peers + self.activate_all_peers(requests).await?; + let has_activated = self.peers.values().any(|v| v.is_some()); + if has_activated { + self.set_state(SyncState::Synced); + tracing::info!("Mempool manager re-activated after disconnect recovery"); + } + // If no peers could be activated, stay in WaitingForConnections so the + // next PeersUpdated event will retry activation. + Ok(vec![]) + } + + fn clear_in_flight_state(&mut self) { + self.clear_pending(); + } + + async fn handle_message( + &mut self, + msg: Message, + requests: &RequestSender, + ) -> SyncResult> { + match msg.inner() { + NetworkMessage::Inv(inv) => self.handle_inv(inv, msg.peer_address(), requests).await, + NetworkMessage::Tx(tx) => self.handle_tx(tx.clone(), requests).await, + _ => Ok(vec![]), + } + } + + async fn handle_sync_event( + &mut self, + event: &SyncEvent, + requests: &RequestSender, + ) -> SyncResult> { + match event { + SyncEvent::SyncComplete { + .. + } => { + if self.state() != SyncState::Synced { + self.activate_all_peers(requests).await?; + let has_activated = self.peers.values().any(|v| v.is_some()); + if has_activated { + self.set_state(SyncState::Synced); + tracing::info!("Mempool manager activated on all peers"); + return Ok(vec![]); + } else { + tracing::warn!( + "Sync complete but no peers available for mempool activation" + ); + } + } + Ok(vec![]) + } + SyncEvent::BlockProcessed { + new_addresses, + confirmed_txids, + .. + } => { + // Remove confirmed transactions from mempool + if !confirmed_txids.is_empty() { + self.remove_confirmed(confirmed_txids).await; + } + if self.state() == SyncState::Synced + && (!confirmed_txids.is_empty() || !new_addresses.is_empty()) + { + // Confirmed transactions change the wallet's UTXO set and + // new addresses expand the monitored set. Both make the + // bloom filter stale, so rebuild immediately. + self.rebuild_filter(requests).await?; + } + Ok(vec![]) + } + SyncEvent::InstantLockReceived { + instant_lock, + .. + } => { + self.mark_instant_send(&instant_lock.txid).await; + Ok(vec![]) + } + _ => Ok(vec![]), + } + } + + async fn tick(&mut self, requests: &RequestSender) -> SyncResult> { + if self.state() != SyncState::Synced { + return Ok(vec![]); + } + + // Prune expired transactions periodically + self.prune_expired(MEMPOOL_TX_EXPIRY).await; + + // Prune pending requests that never received a response + self.prune_pending_requests(); + + // Send queued getdata requests now that slots may have freed up + self.send_queued(requests).await?; + + Ok(vec![]) + } + + async fn handle_network_event( + &mut self, + event: &NetworkEvent, + requests: &RequestSender, + ) -> SyncResult> { + match event { + NetworkEvent::PeerConnected { + address, + } => { + self.handle_peer_connected(*address); + // If synced, activate the new peer immediately + if self.state() == SyncState::Synced + && self.peers.get(address).is_some_and(|v| v.is_none()) + { + tracing::info!("Activating mempool on newly connected peer {}", address); + self.activate_peer(*address, requests).await?; + } + } + NetworkEvent::PeerDisconnected { + address, + } => { + self.handle_peer_disconnected(*address); + } + NetworkEvent::PeersUpdated { + connected_count, + best_height, + .. + } => { + if let Some(best_height) = best_height { + self.update_target_height(*best_height); + } + if *connected_count == 0 { + self.stop_sync(); + } else if self.state() == SyncState::WaitingForConnections { + return self.start_sync(requests).await; + } + } + } + Ok(vec![]) + } + + fn progress(&self) -> SyncManagerProgress { + SyncManagerProgress::Mempool(self.progress.clone()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::client::config::MempoolStrategy; + use crate::network::NetworkRequest; + use crate::test_utils::test_socket_address; + use crate::types::MempoolState; + use dashcore::hashes::Hash; + use key_wallet::test_utils::MockWallet; + use std::sync::Arc; + use tokio::sync::{mpsc, RwLock}; + + fn create_test_manager( + ) -> (MempoolManager, RequestSender, mpsc::UnboundedReceiver) { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let manager = MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + + (manager, requests, rx) + } + + #[test] + fn test_sync_manager_trait_basics() { + let (mut manager, _, _rx) = create_test_manager(); + + assert_eq!(manager.identifier(), ManagerIdentifier::Mempool); + assert_eq!(manager.state(), SyncState::WaitForEvents); + + let types = manager.wanted_message_types(); + assert!(types.contains(&MessageType::Inv)); + assert!(types.contains(&MessageType::Tx)); + assert_eq!(types.len(), 2); + + manager.set_state(SyncState::Synced); + assert_eq!(manager.state(), SyncState::Synced); + + assert!(matches!(manager.progress(), SyncManagerProgress::Mempool(_))); + } + + #[tokio::test] + async fn test_handle_sync_complete_activates() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = crate::test_utils::test_socket_address(1); + manager.handle_peer_connected(peer); + + let event = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::Synced); + assert!(matches!(manager.peers.get(&peer), Some(Some(_)))); + } + + #[tokio::test] + async fn test_handle_sync_complete_subsequent_cycles() { + let (mut manager, requests, _rx) = create_test_manager(); + manager.handle_peer_connected(crate::test_utils::test_socket_address(1)); + + // Activate first + let event0 = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&event0, &requests).await.unwrap(); + + // Subsequent cycles should not change state + let event1 = SyncEvent::SyncComplete { + header_tip: 1001, + cycle: 1, + }; + let events = manager.handle_sync_event(&event1, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::Synced); + } + + #[tokio::test] + async fn test_reactivation_after_disconnect() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Initial activation + let event = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::Synced); + + // Simulate disconnect by resetting state + manager.set_state(SyncState::WaitForEvents); + + // Re-sync should re-activate + let event = SyncEvent::SyncComplete { + header_tip: 1001, + cycle: 1, + }; + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::Synced); + } + + #[tokio::test] + async fn test_peer_connect_activates_when_synced() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer1 = test_socket_address(1); + manager.handle_peer_connected(peer1); + + // Activate via SyncComplete + let event = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(matches!(manager.peers.get(&peer1), Some(Some(_)))); + + // New peer connects while synced => should activate immediately + let peer2 = test_socket_address(2); + let connect = NetworkEvent::PeerConnected { + address: peer2, + }; + let events = manager.handle_network_event(&connect, &requests).await.unwrap(); + assert!(events.is_empty()); + assert!(matches!(manager.peers.get(&peer2), Some(Some(_)))); + } + + #[tokio::test] + async fn test_network_event_peer_connect_disconnect() { + let (mut manager, requests, _rx) = create_test_manager(); + + let peer1 = test_socket_address(1); + let peer2 = test_socket_address(2); + + // Connecting peers should return empty events (not synced yet) + let connect1 = NetworkEvent::PeerConnected { + address: peer1, + }; + let events = manager.handle_network_event(&connect1, &requests).await.unwrap(); + assert!(events.is_empty()); + assert!(manager.peers.contains_key(&peer1)); + + let connect2 = NetworkEvent::PeerConnected { + address: peer2, + }; + let events = manager.handle_network_event(&connect2, &requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.peers.len(), 2); + + let disconnect1 = NetworkEvent::PeerDisconnected { + address: peer1, + }; + let events = manager.handle_network_event(&disconnect1, &requests).await.unwrap(); + assert!(events.is_empty()); + + // Still have peer2 available + assert!(manager.peers.contains_key(&peer2)); + assert_eq!(manager.peers.len(), 1); + + // Disconnecting an already-disconnected peer should not error + let events = manager.handle_network_event(&disconnect1, &requests).await.unwrap(); + assert!(events.is_empty()); + } + + #[tokio::test] + async fn test_block_processed_removes_confirmed_txids() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + + // Add transactions to mempool state + let mut txids = Vec::new(); + { + let mut state = manager.mempool_state.write().await; + for i in 0..2u32 { + let tx = dashcore::Transaction { + version: 1, + lock_time: i, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + txids.push(txid); + state.add_transaction(crate::types::UnconfirmedTransaction::new( + tx, + dashcore::Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + } + + let event = SyncEvent::BlockProcessed { + block_hash: dashcore::BlockHash::all_zeros(), + height: 1001, + new_addresses: vec![], + confirmed_txids: txids.clone(), + }; + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(events.is_empty()); + + let state = manager.mempool_state.read().await; + assert!(state.transactions.is_empty()); + } + + #[tokio::test] + async fn test_instant_lock_received_marks_transaction() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + + // Add a transaction to mempool + let tx = dashcore::Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid = tx.txid(); + { + let mut state = manager.mempool_state.write().await; + state.add_transaction(crate::types::UnconfirmedTransaction::new( + tx, + dashcore::Amount::from_sat(0), + false, + false, + Vec::new(), + 0, + )); + } + + // Fire InstantLockReceived with a lock whose txid matches + let mut is_lock = dashcore::InstantLock::dummy(0..1); + is_lock.txid = txid; + + let event = SyncEvent::InstantLockReceived { + instant_lock: is_lock, + validated: true, + }; + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + assert!(events.is_empty()); + + let state = manager.mempool_state.read().await; + assert!(state.transactions.get(&txid).unwrap().is_instant_send); + } + + #[tokio::test] + async fn test_peer_disconnect_removes_from_peers() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + + // Disconnect the only peer + let disconnect = NetworkEvent::PeerDisconnected { + address: peer, + }; + let events = manager.handle_network_event(&disconnect, &requests).await.unwrap(); + assert!(events.is_empty()); + assert!(manager.peers.is_empty()); + } + + #[tokio::test] + async fn test_sync_complete_no_peers_stays_inactive() { + let (mut manager, requests, _rx) = create_test_manager(); + + let event = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + let events = manager.handle_sync_event(&event, &requests).await.unwrap(); + + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::WaitForEvents); + assert!(manager.peers.is_empty()); + } + + #[tokio::test] + async fn test_start_sync_no_peers_stays_waiting() { + let (mut manager, requests, _rx) = create_test_manager(); + + // Simulate full disconnect setting state to WaitingForConnections + manager.set_state(SyncState::WaitingForConnections); + + // start_sync with no peers should stay in WaitingForConnections + let events = manager.start_sync(&requests).await.unwrap(); + assert!(events.is_empty()); + assert_eq!(manager.state(), SyncState::WaitingForConnections); + } + + #[tokio::test] + async fn test_disconnect_recovery_reactivates_on_reconnect() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate via SyncComplete + let event = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&event, &requests).await.unwrap(); + assert_eq!(manager.state(), SyncState::Synced); + + // Disconnect peer + let disconnect = NetworkEvent::PeerDisconnected { + address: peer, + }; + manager.handle_network_event(&disconnect, &requests).await.unwrap(); + + // PeersUpdated with 0 triggers stop_sync + let update = NetworkEvent::PeersUpdated { + connected_count: 0, + addresses: vec![], + best_height: None, + }; + manager.handle_network_event(&update, &requests).await.unwrap(); + assert_eq!(manager.state(), SyncState::WaitingForConnections); + + // PeersUpdated with 1 but no peers tracked yet: stays WaitingForConnections + let update = NetworkEvent::PeersUpdated { + connected_count: 1, + addresses: vec![peer], + best_height: Some(1000), + }; + manager.handle_network_event(&update, &requests).await.unwrap(); + assert_eq!(manager.state(), SyncState::WaitingForConnections); + + // Peer reconnects and PeersUpdated fires again + manager.handle_peer_connected(peer); + let update = NetworkEvent::PeersUpdated { + connected_count: 1, + addresses: vec![peer], + best_height: Some(1000), + }; + manager.handle_network_event(&update, &requests).await.unwrap(); + assert_eq!(manager.state(), SyncState::Synced); + assert!(matches!(manager.peers.get(&peer), Some(Some(_)))); + } + + #[tokio::test] + async fn test_block_processed_confirmed_txids_rebuilds_filter() { + let mut mock = MockWallet::new(); + // Wallet needs at least one address for the bloom filter to be built + let script = dashcore::ScriptBuf::from_bytes(vec![ + 0x76, 0xa9, 0x14, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, + 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0x88, 0xac, + ]); + let addr = dashcore::Address::from_script(&script, dashcore::Network::Testnet).unwrap(); + mock.set_addresses(vec![addr]); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, mut rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + + // Drain activation messages + while rx.try_recv().is_ok() {} + + // BlockProcessed with confirmed txids should rebuild immediately + let event = SyncEvent::BlockProcessed { + block_hash: dashcore::BlockHash::all_zeros(), + height: 1001, + new_addresses: vec![], + confirmed_txids: vec![dashcore::Txid::all_zeros()], + }; + manager.handle_sync_event(&event, &requests).await.unwrap(); + + // Verify a FilterLoad was sent + let mut found_filter_load = false; + while let Ok(req) = rx.try_recv() { + if matches!(req, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) { + found_filter_load = true; + } + } + assert!(found_filter_load, "expected FilterLoad after confirmed txids"); + } + + #[tokio::test] + async fn test_block_processed_no_changes_no_rebuild_flag() { + let (mut manager, requests, _rx) = create_test_manager(); + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + + // BlockProcessed with no confirmed txids and no new addresses + let event = SyncEvent::BlockProcessed { + block_hash: dashcore::BlockHash::all_zeros(), + height: 1001, + new_addresses: vec![], + confirmed_txids: vec![], + }; + manager.handle_sync_event(&event, &requests).await.unwrap(); + } +} diff --git a/dash-spv/src/sync/mod.rs b/dash-spv/src/sync/mod.rs index bcffed2aa..8a6925ef7 100644 --- a/dash-spv/src/sync/mod.rs +++ b/dash-spv/src/sync/mod.rs @@ -10,6 +10,7 @@ mod filters; mod identifier; mod instantsend; mod masternodes; +mod mempool; mod progress; mod sync_coordinator; mod sync_manager; @@ -21,6 +22,8 @@ pub use filter_headers::{FilterHeadersManager, FilterHeadersProgress}; pub use filters::{FiltersManager, FiltersProgress}; pub use instantsend::{InstantSendManager, InstantSendProgress}; pub use masternodes::{MasternodesManager, MasternodesProgress}; +pub(crate) use mempool::MempoolManager; +pub use mempool::MempoolProgress; pub use events::SyncEvent; pub use identifier::ManagerIdentifier; diff --git a/dash-spv/src/sync/progress.rs b/dash-spv/src/sync/progress.rs index 85641647a..724dc993a 100644 --- a/dash-spv/src/sync/progress.rs +++ b/dash-spv/src/sync/progress.rs @@ -1,7 +1,7 @@ use crate::error::{SyncError, SyncResult}; use crate::sync::{ BlockHeadersProgress, BlocksProgress, ChainLockProgress, FilterHeadersProgress, - FiltersProgress, InstantSendProgress, MasternodesProgress, + FiltersProgress, InstantSendProgress, MasternodesProgress, MempoolProgress, }; use dashcore::prelude::CoreBlockHeight; use std::fmt; @@ -34,6 +34,8 @@ pub struct SyncProgress { chainlocks: Option, /// InstantSend synchronization progress. instantsend: Option, + /// Mempool monitoring progress. + mempool: Option, } impl SyncProgress { @@ -156,6 +158,12 @@ impl SyncProgress { .ok_or_else(|| SyncError::InvalidState("InstantSendManager not started".into())) } + pub fn mempool(&self) -> SyncResult<&MempoolProgress> { + self.mempool + .as_ref() + .ok_or_else(|| SyncError::InvalidState("MempoolManager not started".into())) + } + pub fn update_headers(&mut self, progress: BlockHeadersProgress) { let updated_headers = Some(progress); if self.headers != updated_headers { @@ -209,6 +217,14 @@ impl SyncProgress { self.instantsend = updated_instantsend; } } + + /// Update mempool progress. + pub fn update_mempool(&mut self, progress: MempoolProgress) { + let updated_mempool = Some(progress); + if self.mempool != updated_mempool { + self.mempool = updated_mempool; + } + } } impl fmt::Display for SyncProgress { @@ -235,6 +251,9 @@ impl fmt::Display for SyncProgress { if let Some(i) = &self.instantsend { writeln!(f, " InstantSend: {}", i)?; } + if let Some(m) = &self.mempool { + writeln!(f, " Mempool: {}", m)?; + } Ok(()) } } diff --git a/dash-spv/src/sync/sync_coordinator.rs b/dash-spv/src/sync/sync_coordinator.rs index cd53058a9..889aea4c0 100644 --- a/dash-spv/src/sync/sync_coordinator.rs +++ b/dash-spv/src/sync/sync_coordinator.rs @@ -20,8 +20,8 @@ use crate::storage::{ use crate::sync::progress::ProgressPercentage; use crate::sync::{ BlockHeadersManager, BlocksManager, ChainLockManager, FilterHeadersManager, FiltersManager, - InstantSendManager, ManagerIdentifier, MasternodesManager, SyncEvent, SyncManager, - SyncManagerProgress, SyncManagerTaskContext, SyncProgress, + InstantSendManager, ManagerIdentifier, MasternodesManager, MempoolManager, SyncEvent, + SyncManager, SyncManagerProgress, SyncManagerTaskContext, SyncProgress, }; use crate::SyncError; use key_wallet::manager::WalletInterface; @@ -78,6 +78,7 @@ where pub masternode: Option>, pub chainlock: Option>, pub instantsend: Option, + pub(crate) mempool: Option>, } impl Default for Managers @@ -98,6 +99,7 @@ where masternode: None, chainlock: None, instantsend: None, + mempool: None, } } } @@ -156,6 +158,7 @@ where try_update_progress(managers.masternode.as_ref(), &mut initial_progress); try_update_progress(managers.chainlock.as_ref(), &mut initial_progress); try_update_progress(managers.instantsend.as_ref(), &mut initial_progress); + try_update_progress(managers.mempool.as_ref(), &mut initial_progress); tracing::info!("Initial sync progress {}", initial_progress.clone()); @@ -213,6 +216,7 @@ where let masternode = self.managers.masternode.take(); let chainlock = self.managers.chainlock.take(); let instantsend = self.managers.instantsend.take(); + let mempool = self.managers.mempool.take(); // Spawn each manager using the macro spawn_manager!(self, block_headers, network); @@ -222,6 +226,7 @@ where spawn_manager!(self, masternode, network); spawn_manager!(self, chainlock, network); spawn_manager!(self, instantsend, network); + spawn_manager!(self, mempool, network); // Clone receivers for progress task let receivers = self.progress_receivers.clone(); @@ -402,6 +407,7 @@ fn update_progress_from_manager( SyncManagerProgress::Masternodes(m) => progress.update_masternodes(m), SyncManagerProgress::ChainLock(c) => progress.update_chainlocks(c), SyncManagerProgress::InstantSend(i) => progress.update_instantsend(i), + SyncManagerProgress::Mempool(m) => progress.update_mempool(m), } } diff --git a/dash-spv/src/sync/sync_manager.rs b/dash-spv/src/sync/sync_manager.rs index 72f423e1b..8d03e24eb 100644 --- a/dash-spv/src/sync/sync_manager.rs +++ b/dash-spv/src/sync/sync_manager.rs @@ -2,8 +2,8 @@ use crate::error::SyncResult; use crate::network::{Message, MessageType, NetworkEvent, RequestSender}; use crate::sync::{ BlockHeadersProgress, BlocksProgress, ChainLockProgress, FilterHeadersProgress, - FiltersProgress, InstantSendProgress, ManagerIdentifier, MasternodesProgress, SyncEvent, - SyncState, + FiltersProgress, InstantSendProgress, ManagerIdentifier, MasternodesProgress, MempoolProgress, + SyncEvent, SyncState, }; use async_trait::async_trait; @@ -30,6 +30,7 @@ pub enum SyncManagerProgress { Masternodes(MasternodesProgress), ChainLock(ChainLockProgress), InstantSend(InstantSendProgress), + Mempool(MempoolProgress), } impl SyncManagerProgress { @@ -42,6 +43,7 @@ impl SyncManagerProgress { SyncManagerProgress::Masternodes(progress) => progress.state(), SyncManagerProgress::ChainLock(progress) => progress.state(), SyncManagerProgress::InstantSend(progress) => progress.state(), + SyncManagerProgress::Mempool(progress) => progress.state(), } } } diff --git a/dash-spv/src/test_utils/node.rs b/dash-spv/src/test_utils/node.rs index c2b3b4c3f..28a94b9b5 100644 --- a/dash-spv/src/test_utils/node.rs +++ b/dash-spv/src/test_utils/node.rs @@ -2,9 +2,11 @@ //! //! This provides utilities for managing a dashd instance and loading test wallet data. -use dashcore::{Address, Amount, BlockHash, Txid}; +use dashcore::{Address, Amount, BlockHash, Transaction, Txid}; +use dashcore_rpc::json as rpc_json; use dashcore_rpc::{Auth, Client, RpcApi}; use serde::Deserialize; +use std::collections::HashMap; use std::fs; use std::net::SocketAddr; use std::path::{Path, PathBuf}; @@ -142,6 +144,7 @@ impl DashCoreNode { "-timestampindex=0".to_string(), "-blockfilterindex=1".to_string(), "-peerblockfilters=1".to_string(), + "-peerbloomfilters=1".to_string(), "-debug=all".to_string(), format!("-wallet={}", self.config.wallet), ]; @@ -287,7 +290,7 @@ impl DashCoreNode { hashes } - /// Send DASH to an address. + /// Send DASH to an address from the primary wallet. pub fn send_to_address(&self, address: &Address, amount: Amount) -> Txid { let client = self.rpc_client(); let txid = client @@ -297,6 +300,105 @@ impl DashCoreNode { txid } + /// Send DASH to an address from a specific wallet. + pub fn send_to_address_from_wallet( + &self, + wallet_name: &str, + address: &Address, + amount: Amount, + ) -> Txid { + let client = self.rpc_client_for_wallet(wallet_name); + let txid = client + .send_to_address(address, amount, None, None, None, None, None, None, None, None) + .expect("failed to send to address"); + tracing::info!("Sent {} to {} (wallet: {}), txid: {}", amount, address, wallet_name, txid); + txid + } + + /// List unspent outputs for a specific wallet. + pub fn list_unspent_from_wallet( + &self, + wallet_name: &str, + ) -> Vec { + let client = self.rpc_client_for_wallet(wallet_name); + client.list_unspent(None, None, None, None, None).expect("failed to list unspent") + } + + /// Create, sign, and broadcast a raw transaction spending a single UTXO. + /// Sends the input amount minus fee to the destination address. + pub fn send_raw_from_wallet( + &self, + wallet_name: &str, + input_txid: Txid, + input_vout: u32, + input_amount: Amount, + destination: &Address, + fee: Amount, + ) -> Txid { + let client = self.rpc_client_for_wallet(wallet_name); + + let inputs = vec![rpc_json::CreateRawTransactionInput { + txid: input_txid, + vout: input_vout, + sequence: None, + }]; + let send_amount = input_amount.checked_sub(fee).expect("fee exceeds input amount"); + let mut outputs = HashMap::new(); + outputs.insert(destination.to_string(), send_amount); + + let raw_tx: Transaction = client + .create_raw_transaction(&inputs, &outputs, None) + .expect("failed to create raw tx"); + + let signed = client + .sign_raw_transaction_with_wallet(&raw_tx, None, None) + .expect("failed to sign raw tx"); + assert!(signed.complete, "raw transaction signing incomplete"); + + let txid = client + .send_raw_transaction(&signed.transaction().expect("invalid signed tx")) + .expect("failed to send raw tx"); + tracing::info!( + "Sent raw tx from wallet '{}': {} -> {}, txid: {}", + wallet_name, + input_amount, + destination, + txid + ); + txid + } + + /// Connect this dashd node to another dashd node via P2P and wait for the + /// connection to be established. + pub async fn connect_to_node(&self, addr: SocketAddr) { + let client = self.rpc_client(); + client.onetry_node(&addr.to_string()).expect("failed to connect to node"); + + for _ in 0..30 { + let peers = client.get_peer_info().expect("failed to get peer info"); + if peers.iter().any(|p| p.addr.to_string().starts_with(&addr.ip().to_string())) { + tracing::info!("Connected to node {}", addr); + return; + } + sleep(Duration::from_millis(500)).await; + } + panic!("Timed out waiting for connection to {}", addr); + } + + /// Disconnect a specific peer by address. + pub fn disconnect_peer(&self, addr: SocketAddr) { + let client = self.rpc_client(); + client.disconnect_node(&addr.to_string()).expect("failed to disconnect peer"); + tracing::info!("Disconnected peer {}", addr); + } + + /// Enable or disable all P2P network activity on this node. + pub fn set_network_active(&self, active: bool) { + let client = self.rpc_client(); + client.set_network_active(active).expect("failed to set network active"); + tracing::info!("Set network active={} on dashd", active); + } + /// Disconnect all currently connected peers. pub fn disconnect_all_peers(&self) { let client = self.rpc_client(); diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 87b89dd1f..960a08a49 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -336,8 +336,9 @@ impl MempoolState { }); // Also prune old recent sends - let cutoff = Instant::now() - timeout; - self.recent_sends.retain(|_, &mut timestamp| timestamp > cutoff); + if let Some(cutoff) = Instant::now().checked_sub(timeout) { + self.recent_sends.retain(|_, &mut timestamp| timestamp > cutoff); + } expired } diff --git a/dash-spv/tests/dashd_sync/helpers.rs b/dash-spv/tests/dashd_sync/helpers.rs index 4bc05f8e9..2a27e51aa 100644 --- a/dash-spv/tests/dashd_sync/helpers.rs +++ b/dash-spv/tests/dashd_sync/helpers.rs @@ -1,7 +1,10 @@ use dash_spv::network::NetworkEvent; -use dash_spv::sync::{ProgressPercentage, SyncEvent, SyncProgress}; +use dash_spv::sync::{ProgressPercentage, SyncEvent, SyncProgress, SyncState}; use dash_spv::test_utils::DashCoreNode; +use dashcore::Txid; +use key_wallet::manager::WalletEvent; use key_wallet::manager::{WalletId, WalletManager}; +use key_wallet::transaction_checking::TransactionContext; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; use std::collections::HashSet; @@ -123,6 +126,66 @@ pub(super) async fn wait_for_network_event( } } +/// Wait for a wallet `TransactionReceived` event with mempool status within the given timeout. +/// Returns `Some(txid)` if received, `None` on timeout. +pub(super) async fn wait_for_mempool_tx( + receiver: &mut broadcast::Receiver, + max_wait: Duration, +) -> Option { + let timeout = tokio::time::sleep(max_wait); + tokio::pin!(timeout); + + loop { + tokio::select! { + _ = &mut timeout => return None, + result = receiver.recv() => { + match result { + Ok(WalletEvent::TransactionReceived { txid, status: TransactionContext::Mempool, .. }) => return Some(txid), + Ok(_) => continue, + Err(_) => return None, + } + } + } + } +} + +/// Wait for the mempool manager to reach `Synced` state via the progress watch channel. +/// Returns `true` if the state is reached within the timeout, `false` otherwise. +pub(super) async fn wait_for_mempool_synced( + progress_receiver: &mut watch::Receiver, +) -> bool { + let timeout = tokio::time::sleep(Duration::from_secs(30)); + tokio::pin!(timeout); + + loop { + { + let progress = progress_receiver.borrow_and_update(); + if progress.mempool().ok().is_some_and(|m| m.state() == SyncState::Synced) { + return true; + } + } + + tokio::select! { + _ = &mut timeout => return false, + result = progress_receiver.changed() => { + if result.is_err() { + return false; + } + } + } + } +} + +/// Assert that no mempool `TransactionReceived` event arrives within the given duration. +pub(super) async fn assert_no_mempool_tx( + receiver: &mut broadcast::Receiver, + wait: Duration, +) { + if let Some(txid) = wait_for_mempool_tx(receiver, wait).await { + panic!("Unexpected mempool TransactionReceived event with txid: {}", txid); + } +} + /// Run a disconnect-and-reconnect loop during sync, then verify final state. /// /// Waits for progress events, disconnects all peers after every 5th event, @@ -205,3 +268,112 @@ pub(super) async fn run_disconnect_loop( client_handle.stop().await; ctx.assert_synced(&client_handle.client.progress().await).await; } + +/// Wait for two clients to sync to the target height concurrently. +pub(super) async fn wait_for_sync_both( + a: &mut ClientHandle, + b: &mut ClientHandle, + target_height: u32, +) { + tokio::join!( + wait_for_sync(&mut a.progress_receiver, target_height), + wait_for_sync(&mut b.progress_receiver, target_height), + ); +} + +/// Wait for a mempool transaction event from two clients concurrently. +/// Asserts both detect the same txid. +pub(super) async fn wait_for_mempool_tx_both( + a: &mut ClientHandle, + b: &mut ClientHandle, + timeout: Duration, +) -> Option { + let (r_a, r_b) = tokio::join!( + wait_for_mempool_tx(&mut a.wallet_event_receiver, timeout), + wait_for_mempool_tx(&mut b.wallet_event_receiver, timeout), + ); + match (r_a, r_b) { + (Some(txid_a), Some(txid_b)) => { + assert_eq!(txid_a, txid_b, "Clients detected different txids"); + Some(txid_a) + } + (None, None) => None, + (a, b) => panic!("Strategy mismatch: client_a={:?}, client_b={:?}", a, b), + } +} + +/// Collect N mempool transaction events from two clients concurrently. +/// Asserts both detect the same set of txids. +pub(super) async fn wait_for_mempool_txs_both( + a: &mut ClientHandle, + b: &mut ClientHandle, + count: usize, + timeout: Duration, +) -> HashSet { + async fn collect_n( + receiver: &mut broadcast::Receiver, + count: usize, + timeout: Duration, + ) -> HashSet { + let mut txids = HashSet::new(); + for _ in 0..count { + let txid = wait_for_mempool_tx(receiver, timeout) + .await + .expect("Expected mempool TransactionReceived event"); + txids.insert(txid); + } + txids + } + + let (txids_a, txids_b) = tokio::join!( + collect_n(&mut a.wallet_event_receiver, count, timeout), + collect_n(&mut b.wallet_event_receiver, count, timeout), + ); + assert_eq!(txids_a, txids_b, "Clients detected different txid sets"); + txids_a +} + +/// Wait for both clients to reach mempool Synced state. +pub(super) async fn wait_for_mempool_synced_both(a: &mut ClientHandle, b: &mut ClientHandle) { + let (r_a, r_b) = tokio::join!( + wait_for_mempool_synced(&mut a.progress_receiver), + wait_for_mempool_synced(&mut b.progress_receiver), + ); + assert!(r_a, "Client A: expected mempool to reach Synced state"); + assert!(r_b, "Client B: expected mempool to reach Synced state"); +} + +/// Assert that neither client receives a mempool transaction event within the given duration. +pub(super) async fn assert_no_mempool_tx_both( + a: &mut ClientHandle, + b: &mut ClientHandle, + wait: Duration, +) { + tokio::join!( + assert_no_mempool_tx(&mut a.wallet_event_receiver, wait), + assert_no_mempool_tx(&mut b.wallet_event_receiver, wait), + ); +} + +/// Wait for a network event on both clients concurrently. +pub(super) async fn wait_for_network_event_both( + a: &mut ClientHandle, + b: &mut ClientHandle, + predicate: impl Fn(&NetworkEvent) -> bool + Clone, + max_wait: Duration, +) -> bool { + let pred_clone = predicate.clone(); + let (r_a, r_b) = tokio::join!( + wait_for_network_event(&mut a.network_event_receiver, predicate, max_wait), + wait_for_network_event(&mut b.network_event_receiver, pred_clone, max_wait), + ); + r_a && r_b +} + +/// Assert mempool transaction count on both clients. +pub(super) async fn assert_mempool_count_both(a: &ClientHandle, b: &ClientHandle, expected: usize) { + let count_a = a.client.get_mempool_transaction_count().await; + let count_b = b.client.get_mempool_transaction_count().await; + assert_eq!(count_a, expected, "Client A mempool count: expected {}, got {}", expected, count_a); + assert_eq!(count_b, expected, "Client B mempool count: expected {}, got {}", expected, count_b); +} diff --git a/dash-spv/tests/dashd_sync/main.rs b/dash-spv/tests/dashd_sync/main.rs index aefe73ac7..da6f3c6a1 100644 --- a/dash-spv/tests/dashd_sync/main.rs +++ b/dash-spv/tests/dashd_sync/main.rs @@ -6,5 +6,6 @@ mod helpers; mod setup; mod tests_basic; mod tests_disconnect; +mod tests_mempool; mod tests_restart; mod tests_transaction; diff --git a/dash-spv/tests/dashd_sync/setup.rs b/dash-spv/tests/dashd_sync/setup.rs index d8caa7c47..f7c8319ba 100644 --- a/dash-spv/tests/dashd_sync/setup.rs +++ b/dash-spv/tests/dashd_sync/setup.rs @@ -1,3 +1,4 @@ +use dash_spv::client::config::MempoolStrategy; use dash_spv::network::NetworkEvent; use dash_spv::storage::{PeerStorage, PersistentPeerStorage, PersistentStorage}; use dash_spv::test_utils::{retain_test_dir, DashdTestContext, TestChain}; @@ -10,7 +11,9 @@ use dash_spv::{ }; use dashcore::network::address::AddrV2Message; use dashcore::network::constants::ServiceFlags; +use dashcore::Txid; use key_wallet::managed_account::managed_account_type::ManagedAccountType; +use key_wallet::manager::WalletEvent; use key_wallet::manager::{WalletId, WalletManager}; use key_wallet::wallet::initialization::WalletAccountCreationOptions; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; @@ -100,6 +103,19 @@ impl TestContext { pub(super) async fn spawn_new_client(&self) -> ClientHandle { create_and_start_client(&self.client_config, Arc::clone(&self.wallet)).await } + + /// Spawns an independent client with the given mempool strategy. + /// + /// Each call creates a fresh wallet (same mnemonic) and a separate storage directory. + /// The caller must hold the returned `TempDir` alive for the duration of the test. + pub(super) async fn spawn_client(&self, strategy: MempoolStrategy) -> (ClientHandle, TempDir) { + let storage = TempDir::new().expect("Failed to create client temp dir"); + let mut config = create_test_config(storage.path().to_path_buf(), self.dashd.addr); + config.mempool_strategy = strategy; + let (wallet, _) = create_test_wallet(&self.dashd.wallet.mnemonic, Network::Regtest); + let handle = create_and_start_client(&config, wallet).await; + (handle, storage) + } /// Retrieves the total count of transactions across all accounts in the wallet. pub(super) async fn transaction_count(&self) -> usize { let wallet_read = self.wallet.read().await; @@ -258,6 +274,8 @@ pub(super) struct ClientHandle { pub(super) sync_event_receiver: broadcast::Receiver, /// A channel for receiving network events. pub(super) network_event_receiver: broadcast::Receiver, + /// A channel for receiving wallet events. + pub(super) wallet_event_receiver: broadcast::Receiver, /// A cancellation token for the client's run loop. pub(super) cancel_token: CancellationToken, } @@ -274,6 +292,22 @@ impl ClientHandle { } } +/// Check if a transaction exists in a client's wallet. +pub(super) async fn client_has_transaction( + client: &TestClient, + wallet_id: &WalletId, + txid: &Txid, +) -> bool { + let wallet_read = client.wallet().read().await; + let wallet_info = wallet_read.get_wallet_info(wallet_id).expect("Wallet info not found"); + wallet_info + .accounts() + .all_accounts() + .iter() + .any(|account| account.transactions.contains_key(txid)) + || wallet_info.immature_transactions().iter().any(|tx| &tx.txid() == txid) +} + /// Creates a new SPV client and starts it. pub(super) async fn create_and_start_client( config: &ClientConfig, @@ -291,6 +325,10 @@ pub(super) async fn create_and_start_client( let progress_receiver = client.subscribe_progress().await; let sync_event_receiver = client.subscribe_sync_events().await; let network_event_receiver = client.subscribe_network_events().await; + let wallet_event_receiver = { + let w = client.wallet().read().await; + w.subscribe_events() + }; let cancel_token = CancellationToken::new(); let run_token = cancel_token.clone(); @@ -303,6 +341,7 @@ pub(super) async fn create_and_start_client( progress_receiver, sync_event_receiver, network_event_receiver, + wallet_event_receiver, cancel_token, } } diff --git a/dash-spv/tests/dashd_sync/tests_mempool.rs b/dash-spv/tests/dashd_sync/tests_mempool.rs new file mode 100644 index 000000000..ec71bcc24 --- /dev/null +++ b/dash-spv/tests/dashd_sync/tests_mempool.rs @@ -0,0 +1,514 @@ +use std::collections::HashSet; +use std::time::Duration; + +use dash_spv::client::config::MempoolStrategy; +use dash_spv::network::NetworkEvent; +use dash_spv::test_utils::{DashdTestContext, TestChain}; +use dashcore::Amount; + +use super::helpers::{ + assert_mempool_count_both, assert_no_mempool_tx_both, wait_for_mempool_synced_both, + wait_for_mempool_tx_both, wait_for_mempool_txs_both, wait_for_network_event, + wait_for_network_event_both, wait_for_sync_both, +}; +use super::setup::{ + client_has_transaction, create_and_start_client, create_test_wallet, TestContext, +}; + +const MEMPOOL_TIMEOUT: Duration = Duration::from_secs(30); + +/// Verify mempool detects an incoming wallet transaction using both strategies. +#[tokio::test] +async fn test_mempool_detects_incoming_tx() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + let receive_address = ctx.receive_address().await; + let txid = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(100_000_000)); + tracing::info!("Sent tx to SPV wallet, txid: {}", txid); + + let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Expected mempool TransactionReceived event"); + assert_eq!(mempool_txid, txid, "Mempool event txid should match sent txid"); + + assert_mempool_count_both(&fa, &bf, 1).await; + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_detects_incoming_tx passed"); +} + +/// Verify mempool ignores transactions not relevant to the SPV wallet. +#[tokio::test] +async fn test_mempool_ignores_irrelevant_tx() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + // Fund the "default" wallet with a regular (non-coinbase) output so it's + // immediately spendable. Send from the primary wallet and mine the tx. + let default_addr = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.send_to_address(&default_addr, Amount::from_sat(100_000_000)); + let miner_addr = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.generate_blocks(1, &miner_addr); + let funded_height = ctx.dashd.initial_height + 1; + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, funded_height).await; + + // Send from the "default" wallet to itself (no relation to SPV wallet) + let non_wallet_address = ctx.dashd.node.get_new_address_from_wallet("default"); + let txid = ctx.dashd.node.send_to_address_from_wallet( + "default", + &non_wallet_address, + Amount::from_sat(50_000_000), + ); + tracing::info!("Sent irrelevant tx (not to SPV wallet), txid: {}", txid); + + assert_no_mempool_tx_both(&mut fa, &mut bf, Duration::from_secs(3)).await; + assert_mempool_count_both(&fa, &bf, 0).await; + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_ignores_irrelevant_tx passed"); +} + +/// Verify a mempool transaction transitions to confirmed after mining. +#[tokio::test] +async fn test_mempool_to_confirmed_lifecycle() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + let receive_address = ctx.receive_address().await; + let txid = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(100_000_000)); + tracing::info!("Sent tx to SPV wallet (lifecycle test), txid: {}", txid); + + let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Expected mempool TransactionReceived event"); + assert_eq!(mempool_txid, txid); + + assert_mempool_count_both(&fa, &bf, 1).await; + + // Mine the transaction + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.generate_blocks(1, &miner_address); + let new_height = ctx.dashd.initial_height + 1; + wait_for_sync_both(&mut fa, &mut bf, new_height).await; + + assert_mempool_count_both(&fa, &bf, 0).await; + assert!( + client_has_transaction(&fa.client, &ctx.wallet_id, &txid).await, + "FetchAll: confirmed tx should be in wallet" + ); + assert!( + client_has_transaction(&bf.client, &ctx.wallet_id, &txid).await, + "BloomFilter: confirmed tx should be in wallet" + ); + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_to_confirmed_lifecycle passed"); +} + +/// Verify multiple mempool transactions are all detected. +#[tokio::test] +async fn test_mempool_multiple_txs() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + let receive_address = ctx.receive_address().await; + let amounts = + [Amount::from_sat(50_000_000), Amount::from_sat(75_000_000), Amount::from_sat(120_000_000)]; + let mut expected_txids = HashSet::new(); + for amount in &amounts { + let txid = ctx.dashd.node.send_to_address(&receive_address, *amount); + tracing::info!("Sent {} to SPV wallet (multi-tx test), txid: {}", amount, txid); + expected_txids.insert(txid); + } + + let received_txids = wait_for_mempool_txs_both(&mut fa, &mut bf, 3, MEMPOOL_TIMEOUT).await; + assert_eq!(received_txids, expected_txids, "Received mempool txids should match sent txids"); + + assert_mempool_count_both(&fa, &bf, 3).await; + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_multiple_txs passed"); +} + +/// Verify mempool detects both incoming (address match) and outgoing (outpoint match) transactions. +/// +/// 1. Sync to tip +/// 2. Send from "default" wallet TO the SPV wallet receive address (incoming) +/// 3. Wait for mempool event (address match) +/// 4. Mine the tx so it becomes a confirmed UTXO in the SPV wallet +/// 5. Craft a raw tx that spends the wallet UTXO with all outputs going to an external +/// "default" address (no change back to the wallet) and broadcast it +/// 6. Wait for mempool event (outpoint match only, no address match) +/// 7. Assert both txids were detected +#[tokio::test] +async fn test_mempool_incoming_and_outgoing_tx() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + // Step 1: Send an incoming transaction to the SPV wallet + let receive_address = ctx.receive_address().await; + let incoming_amount = Amount::from_sat(200_000_000); + let incoming_txid = ctx.dashd.node.send_to_address(&receive_address, incoming_amount); + tracing::info!("Sent incoming tx to SPV wallet, txid: {}", incoming_txid); + + let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Expected mempool event for incoming tx"); + assert_eq!(mempool_txid, incoming_txid); + + // Step 2: Mine the incoming tx so it becomes a confirmed UTXO + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.generate_blocks(1, &miner_address); + let mined_height = ctx.dashd.initial_height + 1; + wait_for_sync_both(&mut fa, &mut bf, mined_height).await; + + // Step 3: Craft a raw transaction that spends the wallet UTXO with all outputs + // going to an external address. This ensures the mempool detects it purely via + // the watched outpoint, not via any output address match. + let wallet_name = &ctx.dashd.wallet.wallet_name; + let utxos = ctx.dashd.node.list_unspent_from_wallet(wallet_name); + let utxo = utxos + .iter() + .find(|u| u.txid == incoming_txid) + .expect("Incoming tx UTXO not found in wallet"); + + let external_address = ctx.dashd.node.get_new_address_from_wallet("default"); + let fee = Amount::from_sat(10_000); + let outgoing_txid = ctx.dashd.node.send_raw_from_wallet( + wallet_name, + utxo.txid, + utxo.vout, + utxo.amount, + &external_address, + fee, + ); + tracing::info!("Sent raw outgoing tx (outpoint-only match), txid: {}", outgoing_txid); + + let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Expected mempool event for outgoing tx (outpoint match)"); + assert_eq!(mempool_txid, outgoing_txid); + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_incoming_and_outgoing_tx passed"); +} + +/// Verify full mempool lifecycle: detection, disconnect recovery, and confirmation. +/// +/// 1. Sync to tip with empty mempool +/// 2. Send 2 transactions, verify both arrive via mempool events +/// 3. Disconnect the SPV client from the peer (via dashd disconnectnode) +/// 4. Send 1 transaction while disconnected (it sits in dashd's mempool) +/// 5. Reconnect and wait for mempool reactivation +/// 6. Verify the tx sent while disconnected is detected (mempool dump on reconnect) +/// 7. Verify all 3 transactions are tracked +/// 8. Mine a block, verify all txs transition to confirmed, mempool count drops to 0 +#[tokio::test] +async fn test_mempool_lifecycle() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let (mut fa, _fa_dir) = ctx.spawn_client(MempoolStrategy::FetchAll).await; + let (mut bf, _bf_dir) = ctx.spawn_client(MempoolStrategy::BloomFilter).await; + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + // Wait for mempool activation before sending transactions + wait_for_mempool_synced_both(&mut fa, &mut bf).await; + tokio::time::sleep(Duration::from_secs(1)).await; + tracing::info!("Mempool synced on both clients"); + + // Step 1: Send 2 transactions, verify both arrive + let receive_address = ctx.receive_address().await; + let txid1 = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(50_000_000)); + let txid2 = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(60_000_000)); + tracing::info!("Sent tx1={}, tx2={}", txid1, txid2); + + let received = wait_for_mempool_txs_both(&mut fa, &mut bf, 2, MEMPOOL_TIMEOUT).await; + assert!(received.contains(&txid1), "Should have received tx1"); + assert!(received.contains(&txid2), "Should have received tx2"); + assert_mempool_count_both(&fa, &bf, 2).await; + + // Step 2: Disconnect the peer + ctx.dashd.node.disconnect_all_peers(); + let saw_disconnect = wait_for_network_event_both( + &mut fa, + &mut bf, + |e| matches!(e, NetworkEvent::PeerDisconnected { .. }), + Duration::from_secs(10), + ) + .await; + assert!(saw_disconnect, "Both clients should observe PeerDisconnected"); + + // Step 3: Send a transaction while disconnected + let txid3 = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(70_000_000)); + tracing::info!("Sent tx3={} while disconnected", txid3); + + // Step 4: Reconnect and wait for mempool reactivation + let saw_reconnect = wait_for_network_event_both( + &mut fa, + &mut bf, + |e| matches!(e, NetworkEvent::PeerConnected { .. }), + Duration::from_secs(30), + ) + .await; + assert!(saw_reconnect, "Both clients should reconnect to peer"); + + wait_for_mempool_synced_both(&mut fa, &mut bf).await; + tracing::info!("Mempool reactivated after reconnect on both clients"); + + // Step 5: Verify tx sent while disconnected is detected via mempool dump + let detected = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Expected mempool event for tx sent while disconnected"); + assert_eq!(detected, txid3, "Should detect tx3 via mempool dump on reconnect"); + + // Step 6: Verify all 3 transactions tracked + assert_mempool_count_both(&fa, &bf, 3).await; + + // Step 7: Mine a block, verify all txs confirmed + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.generate_blocks(1, &miner_address); + let new_height = ctx.dashd.initial_height + 1; + wait_for_sync_both(&mut fa, &mut bf, new_height).await; + + assert_mempool_count_both(&fa, &bf, 0).await; + for (label, client) in [("FetchAll", &fa.client), ("BloomFilter", &bf.client)] { + assert!( + client_has_transaction(client, &ctx.wallet_id, &txid1).await, + "{}: tx1 should be confirmed", + label + ); + assert!( + client_has_transaction(client, &ctx.wallet_id, &txid2).await, + "{}: tx2 should be confirmed", + label + ); + assert!( + client_has_transaction(client, &ctx.wallet_id, &txid3).await, + "{}: tx3 should be confirmed", + label + ); + } + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_lifecycle passed"); +} + +/// Verify mempool handles peer disconnection with multi-peer activation. +/// +/// Uses two dashd nodes connected to each other. Both SPV clients connect to both peers and +/// exercise these scenarios in sequence: +/// 1. Both peers active after sync — send tx, verify detection +/// 2. Disconnect one peer — no event expected, send tx from remaining, verify detection +/// 3. Disconnect both, reconnect — wait for mempool Synced state, verify detection +#[tokio::test] +async fn test_mempool_peer_disconnect_reactivation() { + let Some(ctx) = TestContext::new(TestChain::Minimal).await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + let Some(dashd2) = DashdTestContext::new(TestChain::Minimal).await else { + eprintln!("Skipping test (could not create second dashd node)"); + return; + }; + + // Connect the two dashd nodes so mempool transactions propagate between them + ctx.dashd.node.connect_to_node(dashd2.addr).await; + + // Spawn both SPV clients with both peers configured + let mut fa_config = ctx.client_config.clone(); + fa_config.add_peer(dashd2.addr); + + let fa_storage = tempfile::TempDir::new().expect("Failed to create FetchAll temp dir"); + let bf_storage = tempfile::TempDir::new().expect("Failed to create BloomFilter temp dir"); + + let mut fa_cfg = fa_config.clone(); + fa_cfg.storage_path = fa_storage.path().to_path_buf(); + fa_cfg.mempool_strategy = MempoolStrategy::FetchAll; + + let mut bf_cfg = fa_config.clone(); + bf_cfg.storage_path = bf_storage.path().to_path_buf(); + bf_cfg.mempool_strategy = MempoolStrategy::BloomFilter; + + let (fa_wallet, _) = create_test_wallet(&ctx.dashd.wallet.mnemonic, dash_spv::Network::Regtest); + let (bf_wallet, _) = create_test_wallet(&ctx.dashd.wallet.mnemonic, dash_spv::Network::Regtest); + + let mut fa = create_and_start_client(&fa_cfg, fa_wallet).await; + let mut bf = create_and_start_client(&bf_cfg, bf_wallet).await; + + // Sync both clients + wait_for_sync_both(&mut fa, &mut bf, ctx.dashd.initial_height).await; + + // Both peers should be activated after sync + wait_for_mempool_synced_both(&mut fa, &mut bf).await; + tokio::time::sleep(Duration::from_secs(1)).await; + tracing::info!("Mempool synced on all peers for both clients"); + + // --- Scenario 1: baseline mempool detection with both peers --- + let receive_address = ctx.receive_address().await; + let txid1 = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(50_000_000)); + tracing::info!("[scenario 1] sent tx {}", txid1); + + let detected = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Scenario 1: expected mempool tx detection"); + assert_eq!(detected, txid1); + assert_mempool_count_both(&fa, &bf, 1).await; + + // --- Scenario 2: disconnect one peer, verify detection still works --- + // Resubscribe to get fresh receivers, avoiding stale events or lagged errors + // from earlier phases that could cause the wait to miss the disconnect event. + let mut fa_net_rx = fa.network_event_receiver.resubscribe(); + let mut bf_net_rx = bf.network_event_receiver.resubscribe(); + + ctx.dashd.node.disconnect_all_peers(); + + let (fa_disc, bf_disc) = tokio::join!( + wait_for_network_event( + &mut fa_net_rx, + |e| matches!(e, NetworkEvent::PeerDisconnected { address } if *address == ctx.dashd.addr), + Duration::from_secs(10), + ), + wait_for_network_event( + &mut bf_net_rx, + |e| matches!(e, NetworkEvent::PeerDisconnected { address } if *address == ctx.dashd.addr), + Duration::from_secs(10), + ), + ); + assert!(fa_disc, "FetchAll: should observe PeerDisconnected"); + assert!(bf_disc, "BloomFilter: should observe PeerDisconnected"); + tokio::time::sleep(Duration::from_secs(1)).await; + + let txid2 = dashd2.node.send_to_address(&receive_address, Amount::from_sat(60_000_000)); + tracing::info!("[scenario 2] sent tx {} from remaining peer", txid2); + + let detected = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Scenario 2: expected mempool tx detection from remaining peer"); + assert_eq!(detected, txid2); + assert_mempool_count_both(&fa, &bf, 2).await; + + // --- Scenario 3: disconnect both peers, verify recovery --- + ctx.dashd.node.set_network_active(false); + dashd2.node.set_network_active(false); + + // Wait for both disconnect events on both clients + for (label, receiver) in [ + ("FetchAll", &mut fa.network_event_receiver), + ("BloomFilter", &mut bf.network_event_receiver), + ] { + let mut seen_dashd1 = false; + let mut seen_dashd2 = false; + let deadline = tokio::time::sleep(Duration::from_secs(10)); + tokio::pin!(deadline); + while !seen_dashd1 || !seen_dashd2 { + tokio::select! { + _ = &mut deadline => panic!("{}: timed out waiting for both peer disconnects", label), + result = receiver.recv() => { + match result { + Ok(NetworkEvent::PeerDisconnected { address }) if address == ctx.dashd.addr => { + seen_dashd1 = true; + } + Ok(NetworkEvent::PeerDisconnected { address }) if address == dashd2.addr => { + seen_dashd2 = true; + } + _ => {} + } + } + } + } + } + tracing::info!("[scenario 3] both peers disconnected from both clients"); + + // Re-enable networking so SPV can reconnect + ctx.dashd.node.set_network_active(true); + dashd2.node.set_network_active(true); + ctx.dashd.node.connect_to_node(dashd2.addr).await; + + // Wait for reconnection and mempool reactivation on both clients + let saw_reconnect = wait_for_network_event_both( + &mut fa, + &mut bf, + |e| matches!(e, NetworkEvent::PeerConnected { .. }), + Duration::from_secs(30), + ) + .await; + assert!(saw_reconnect, "Both clients should reconnect to a peer"); + + wait_for_mempool_synced_both(&mut fa, &mut bf).await; + tokio::time::sleep(Duration::from_secs(1)).await; + tracing::info!("[scenario 3] mempool recovered on both clients"); + + let txid3 = ctx.dashd.node.send_to_address(&receive_address, Amount::from_sat(70_000_000)); + tracing::info!("[scenario 3] sent tx {}", txid3); + + let detected = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) + .await + .expect("Scenario 3: expected mempool tx detection after recovery"); + assert_eq!(detected, txid3); + assert_mempool_count_both(&fa, &bf, 3).await; + + fa.stop().await; + bf.stop().await; + tracing::info!("test_mempool_peer_disconnect_reactivation passed"); +} diff --git a/key-wallet/src/manager/event_tests.rs b/key-wallet/src/manager/event_tests.rs index 1b555ad90..dc308e997 100644 --- a/key-wallet/src/manager/event_tests.rs +++ b/key-wallet/src/manager/event_tests.rs @@ -175,6 +175,105 @@ async fn test_mempool_after_instantsend_is_suppressed() { .await; } +// --------------------------------------------------------------------------- +// BalanceUpdated event tests +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_mempool_tx_emits_balance_updated() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xf1); + + manager.process_mempool_transaction(&tx, false).await; + + let events = drain_events(&mut rx); + let balance_events: Vec<_> = + events.iter().filter(|e| matches!(e, WalletEvent::BalanceUpdated { .. })).collect(); + assert_eq!(balance_events.len(), 1, "expected exactly 1 BalanceUpdated, got {:?}", events); + assert!( + matches!( + balance_events[0], + WalletEvent::BalanceUpdated { + wallet_id: wid, + unconfirmed, + spendable, + .. + } if *wid == wallet_id && *unconfirmed == TX_AMOUNT && *spendable == 0 + ), + "expected BalanceUpdated with unconfirmed={TX_AMOUNT}, spendable=0, got {:?}", + balance_events[0] + ); +} + +#[tokio::test] +async fn test_instantsend_tx_emits_balance_updated_spendable() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xf2); + + manager.process_mempool_transaction(&tx, true).await; + + let events = drain_events(&mut rx); + let balance_events: Vec<_> = + events.iter().filter(|e| matches!(e, WalletEvent::BalanceUpdated { .. })).collect(); + assert_eq!(balance_events.len(), 1, "expected exactly 1 BalanceUpdated, got {:?}", events); + assert!( + matches!( + balance_events[0], + WalletEvent::BalanceUpdated { + wallet_id: wid, + spendable, + unconfirmed, + .. + } if *wid == wallet_id && *spendable == TX_AMOUNT && *unconfirmed == 0 + ), + "expected BalanceUpdated with spendable={TX_AMOUNT}, unconfirmed=0, got {:?}", + balance_events[0] + ); +} + +#[tokio::test] +async fn test_mempool_to_instantsend_transitions_balance() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xf3); + + // Mempool tx: balance should be unconfirmed + manager.process_mempool_transaction(&tx, false).await; + let events = drain_events(&mut rx); + assert!( + events.iter().any(|e| matches!( + e, + WalletEvent::BalanceUpdated { + wallet_id: wid, + unconfirmed, + spendable, + .. + } if *wid == wallet_id && *unconfirmed == TX_AMOUNT && *spendable == 0 + )), + "expected unconfirmed balance after mempool, got {:?}", + events + ); + + // IS lock: balance should move from unconfirmed to spendable + manager.process_instant_send_lock(tx.txid()); + let events = drain_events(&mut rx); + assert!( + events.iter().any(|e| matches!( + e, + WalletEvent::BalanceUpdated { + wallet_id: wid, + spendable, + unconfirmed, + .. + } if *wid == wallet_id && *spendable == TX_AMOUNT && *unconfirmed == 0 + )), + "expected spendable balance after IS lock, got {:?}", + events + ); +} + // --------------------------------------------------------------------------- // Production API tests // --------------------------------------------------------------------------- @@ -194,6 +293,68 @@ async fn test_process_instant_send_lock_for_unknown_txid() { assert_eq!(balance_before, balance_after); } +#[tokio::test] +async fn test_process_instant_send_lock_dedup() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let tx = create_tx_paying_to(&addr, 0xe1); + + manager.process_mempool_transaction(&tx, false).await; + let mut rx = manager.subscribe_events(); + + // First IS lock should emit events + manager.process_instant_send_lock(tx.txid()); + let events = drain_events(&mut rx); + assert!( + events.iter().any(|e| matches!( + e, + WalletEvent::TransactionStatusChanged { + wallet_id: wid, + status: TransactionContext::InstantSend, + .. + } if *wid == wallet_id + )), + "expected TransactionStatusChanged(InstantSend) with correct wallet_id, got {:?}", + events + ); + assert!( + events.iter().any( + |e| matches!(e, WalletEvent::BalanceUpdated { wallet_id: wid, .. } if *wid == wallet_id) + ), + "expected BalanceUpdated for wallet, got {:?}", + events + ); + + // Second IS lock should be a no-op + manager.process_instant_send_lock(tx.txid()); + assert_no_events(&mut rx); +} + +#[tokio::test] +async fn test_process_instant_send_lock_after_block_confirmation() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let tx = create_tx_paying_to(&addr, 0xe2); + + // Process as IS mempool tx, then confirm in block + manager.process_mempool_transaction(&tx, true).await; + let block_ctx = TransactionContext::InBlock { + height: 500, + block_hash: Some(BlockHash::from_byte_array([0xe2; 32])), + timestamp: Some(5000), + }; + manager.check_transaction_in_all_wallets(&tx, block_ctx, true, true).await; + + // IS lock after block confirmation is a no-op (already tracked via mempool IS) + let mut rx = manager.subscribe_events(); + manager.process_instant_send_lock(tx.txid()); + assert_no_events(&mut rx); + + // Confirm height preserved + let history = manager.wallet_transaction_history(&wallet_id).unwrap(); + let records: Vec<_> = history.iter().filter(|r| r.txid == tx.txid()).collect(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].height, Some(500)); +} + #[tokio::test] async fn test_mixed_instantsend_paths_no_duplicate_events() { let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); @@ -326,6 +487,46 @@ async fn test_process_block_emits_events() { ); } +#[tokio::test] +async fn test_irrelevant_mempool_tx_emits_no_events() { + use dashcore::{PublicKey, ScriptBuf}; + + let (mut manager, _wallet_id, _addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + + // Create a tx paying to a random script that doesn't match any wallet address + let random_script = + ScriptBuf::new_p2pkh(&PublicKey::from_slice(&[2; 33]).unwrap().pubkey_hash()); + let tx = Transaction { + version: 2, + lock_time: 0, + input: vec![dashcore::TxIn { + previous_output: dashcore::OutPoint { + txid: dashcore::Txid::from_byte_array([0xe4; 32]), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: u32::MAX, + witness: dashcore::Witness::default(), + }], + output: vec![dashcore::TxOut { + value: TX_AMOUNT, + script_pubkey: random_script, + }], + special_transaction_payload: None, + }; + + let result = manager.process_mempool_transaction(&tx, false).await; + + assert!(!result.is_relevant); + assert_eq!(result.net_amount, 0); + assert_no_events(&mut rx); +} + +// --------------------------------------------------------------------------- +// Edge case tests +// --------------------------------------------------------------------------- + #[tokio::test] async fn test_instantsend_to_chainlocked_event_flow() { assert_lifecycle_flow( @@ -342,6 +543,58 @@ async fn test_instantsend_to_chainlocked_event_flow() { .await; } +#[tokio::test] +async fn test_mempool_to_block_to_chainlocked_event_flow() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xc4); + + // Step 1: mempool — emits TransactionReceived + manager.check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true).await; + let event = assert_single_event(&mut rx); + assert!( + matches!( + event, + WalletEvent::TransactionReceived { + status: TransactionContext::Mempool, + .. + } + ), + "expected TransactionReceived(Mempool), got {:?}", + event + ); + + // Step 2: block confirmation — emits TransactionStatusChanged + let block_ctx = TransactionContext::InBlock { + height: 1700, + block_hash: Some(BlockHash::from_byte_array([0xc4; 32])), + timestamp: Some(17000), + }; + manager.check_transaction_in_all_wallets(&tx, block_ctx, true, true).await; + let event = assert_single_event(&mut rx); + assert!( + matches!( + event, + WalletEvent::TransactionStatusChanged { + status: TransactionContext::InBlock { .. }, + .. + } + ), + "expected TransactionStatusChanged(InBlock), got {:?}", + event + ); + + // Step 3: chain lock on already-confirmed tx — no event (wallet doesn't + // track chain lock state separately from block confirmation) + let cl_ctx = TransactionContext::InChainLockedBlock { + height: 1700, + block_hash: Some(BlockHash::from_byte_array([0xc4; 32])), + timestamp: Some(17000), + }; + manager.check_transaction_in_all_wallets(&tx, cl_ctx, true, true).await; + assert_no_events(&mut rx); +} + #[tokio::test] async fn test_chainlocked_block_event_flow() { let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); @@ -383,6 +636,7 @@ async fn test_check_transaction_dry_run_does_not_persist_state() { .await; assert!(!result.affected_wallets.is_empty()); + assert_eq!(result.total_received, TX_AMOUNT); assert_no_events(&mut rx); // Call again — should still report as relevant (state not persisted) @@ -390,6 +644,7 @@ async fn test_check_transaction_dry_run_does_not_persist_state() { .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, false, false) .await; assert!(!result2.affected_wallets.is_empty()); + assert_eq!(result2.total_received, TX_AMOUNT); assert_no_events(&mut rx); // Now persist — should still report as new since dry runs didn't record it diff --git a/key-wallet/src/manager/mod.rs b/key-wallet/src/manager/mod.rs index 72e17b23a..376ba1f7e 100644 --- a/key-wallet/src/manager/mod.rs +++ b/key-wallet/src/manager/mod.rs @@ -16,7 +16,7 @@ mod wallet_interface; pub use events::WalletEvent; pub use matching::{check_compact_filters_for_addresses, FilterMatchKey}; -pub use wallet_interface::{BlockProcessingResult, WalletInterface}; +pub use wallet_interface::{BlockProcessingResult, MempoolTransactionResult, WalletInterface}; use crate::account::AccountCollection; use crate::transaction_checking::TransactionContext; @@ -74,6 +74,12 @@ pub struct CheckTransactionsResult { pub is_new_transaction: bool, /// New addresses generated during gap limit maintenance pub new_addresses: Vec
, + /// Total value received across all wallets + pub total_received: u64, + /// Total value sent across all wallets + pub total_sent: u64, + /// Addresses involved across all wallets + pub involved_addresses: Vec
, } /// High-level wallet manager that manages multiple wallets @@ -540,6 +546,16 @@ impl WalletManager { result.is_new_transaction = true; } + // Aggregate totals and involved addresses across wallets + result.total_received = + result.total_received.saturating_add(check_result.total_received); + result.total_sent = result.total_sent.saturating_add(check_result.total_sent); + for account_match in &check_result.affected_accounts { + for addr_info in account_match.account_type_match.all_involved_addresses() { + result.involved_addresses.push(addr_info.address); + } + } + #[cfg(feature = "std")] if check_result.is_new_transaction { // First time seeing this transaction — emit TransactionReceived @@ -1058,6 +1074,16 @@ impl WalletManager { } } } + + /// Get all outpoints from wallet UTXOs across all managed wallets. + /// Used for bloom filter construction to detect spends of our UTXOs. + pub fn watched_outpoints(&self) -> Vec { + let mut outpoints = Vec::new(); + for info in self.wallet_infos.values() { + outpoints.extend(info.utxos().into_iter().map(|u| u.outpoint)); + } + outpoints + } } /// Wallet manager errors diff --git a/key-wallet/src/manager/process_block.rs b/key-wallet/src/manager/process_block.rs index 62652b774..e67ded76c 100644 --- a/key-wallet/src/manager/process_block.rs +++ b/key-wallet/src/manager/process_block.rs @@ -1,4 +1,6 @@ -use crate::manager::wallet_interface::{BlockProcessingResult, WalletInterface}; +use crate::manager::wallet_interface::{ + BlockProcessingResult, MempoolTransactionResult, WalletInterface, +}; use crate::manager::{WalletEvent, WalletManager}; use crate::transaction_checking::transaction_router::TransactionRouter; use crate::transaction_checking::TransactionContext; @@ -49,17 +51,51 @@ impl WalletInterface for WalletM result } - async fn process_mempool_transaction(&mut self, tx: &Transaction) { - let context = TransactionContext::Mempool; + async fn process_mempool_transaction( + &mut self, + tx: &Transaction, + is_instant_send: bool, + ) -> MempoolTransactionResult { + let context = if is_instant_send { + TransactionContext::InstantSend + } else { + TransactionContext::Mempool + }; + let snapshot = self.snapshot_balances(); + let check_result = self.check_transaction_in_all_wallets(tx, context, true, false).await; + + let is_relevant = !check_result.affected_wallets.is_empty(); + let net_amount = if is_relevant { + check_result.total_received as i64 - check_result.total_sent as i64 + } else { + 0 + }; + + // Refresh cached balances only for affected wallets + for wallet_id in &check_result.affected_wallets { + if let Some(info) = self.wallet_infos.get_mut(wallet_id) { + info.update_balance(); + } + } + self.emit_balance_changes(&snapshot); - // Check transaction against all wallets - self.check_transaction_in_all_wallets(tx, context, true, true).await; + MempoolTransactionResult { + is_relevant, + net_amount, + is_outgoing: net_amount < 0, + addresses: check_result.involved_addresses, + new_addresses: check_result.new_addresses, + } } fn monitored_addresses(&self) -> Vec
{ self.monitored_addresses() } + fn watched_outpoints(&self) -> Vec { + self.watched_outpoints() + } + async fn transaction_effect(&self, tx: &Transaction) -> Option<(i64, Vec)> { // Aggregate across all managed wallets. If any wallet considers it relevant, // compute net = total_received - total_sent and collect involved addresses. @@ -194,8 +230,28 @@ impl WalletInterface for WalletM #[cfg(test)] mod tests { use super::*; + use crate::manager::test_helpers::*; use crate::wallet::managed_wallet_info::ManagedWalletInfo; - use dashcore::Network; + use dashcore::block::{Header, Version}; + use dashcore::hashes::Hash; + use dashcore::pow::CompactTarget; + use dashcore::{ + BlockHash, Network, OutPoint, ScriptBuf, TxIn, TxMerkleNode, TxOut, Txid, Witness, + }; + + fn make_block(txdata: Vec) -> Block { + Block { + header: Header { + version: Version::ONE, + prev_blockhash: BlockHash::from_byte_array([0; 32]), + merkle_root: TxMerkleNode::from_byte_array([0; 32]), + time: 1000, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 0, + }, + txdata, + } + } #[tokio::test] async fn test_synced_height() { @@ -212,4 +268,111 @@ mod tests { manager.update_synced_height(10); assert_eq!(manager.synced_height(), 10); } + + #[tokio::test] + async fn test_process_mempool_transaction_balance_events() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + + // Relevant tx should emit BalanceUpdated + let tx = create_tx_paying_to(&addr, 0xaa); + manager.process_mempool_transaction(&tx, false).await; + + let mut found = false; + while let Ok(event) = rx.try_recv() { + if let WalletEvent::BalanceUpdated { + unconfirmed, + .. + } = event + { + assert!(unconfirmed > 0, "unconfirmed balance should increase"); + found = true; + break; + } + } + assert!(found, "should emit BalanceUpdated for mempool transaction"); + + // Irrelevant tx should not emit any events + let unrelated_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: Txid::from_byte_array([0xbb; 32]), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: u32::MAX, + witness: Witness::default(), + }], + output: vec![TxOut { + value: 100_000, + script_pubkey: ScriptBuf::new_p2pkh(&dashcore::PubkeyHash::from_byte_array( + [0xff; 20], + )), + }], + special_transaction_payload: None, + }; + manager.process_mempool_transaction(&unrelated_tx, false).await; + assert!(rx.try_recv().is_err(), "should not emit events for irrelevant transaction"); + } + + #[tokio::test] + async fn test_process_block_emits_balance_updated() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let tx = create_tx_paying_to(&addr, 0xcc); + let block = make_block(vec![tx]); + + let mut rx = manager.subscribe_events(); + manager.process_block(&block, 100).await; + + let mut found = false; + while let Ok(event) = rx.try_recv() { + if let WalletEvent::BalanceUpdated { + spendable, + .. + } = event + { + assert!(spendable > 0, "spendable balance should increase after block"); + found = true; + break; + } + } + assert!(found, "should emit BalanceUpdated for block processing"); + } + + #[tokio::test] + async fn test_mempool_transaction_result_contains_wallet_effect_data() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let tx = create_tx_paying_to(&addr, 0xaa); + + let result = manager.process_mempool_transaction(&tx, false).await; + + assert!(result.is_relevant); + assert_eq!(result.net_amount, TX_AMOUNT as i64); + assert!(!result.is_outgoing); + assert!(!result.addresses.is_empty()); + } + + #[tokio::test] + async fn test_check_transaction_populates_totals() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + + let tx = create_tx_paying_to(&addr, 0xf0); + let result = manager + .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true) + .await; + + assert!(!result.affected_wallets.is_empty()); + assert_eq!(result.total_received, TX_AMOUNT); + assert_eq!(result.total_sent, 0); + assert!( + !result.involved_addresses.is_empty(), + "involved_addresses should contain the target address" + ); + assert!( + result.involved_addresses.contains(&addr), + "involved_addresses should contain the target address" + ); + } } diff --git a/key-wallet/src/manager/wallet_interface.rs b/key-wallet/src/manager/wallet_interface.rs index 0e8188801..e814d8eda 100644 --- a/key-wallet/src/manager/wallet_interface.rs +++ b/key-wallet/src/manager/wallet_interface.rs @@ -7,7 +7,7 @@ use alloc::string::String; use alloc::vec::Vec; use async_trait::async_trait; use dashcore::prelude::CoreBlockHeight; -use dashcore::{Address, Block, Transaction, Txid}; +use dashcore::{Address, Block, OutPoint, Transaction, Txid}; use tokio::sync::broadcast; /// Result of processing a block through the wallet @@ -21,6 +21,21 @@ pub struct BlockProcessingResult { pub new_addresses: Vec
, } +/// Result of processing a mempool transaction through the wallet +#[derive(Debug, Default, Clone)] +pub struct MempoolTransactionResult { + /// Whether the transaction was relevant to any wallet. + pub is_relevant: bool, + /// Net amount change for the wallet (received - sent) in satoshis. + pub net_amount: i64, + /// Whether this is an outgoing transaction (net_amount < 0). + pub is_outgoing: bool, + /// Addresses involved in this transaction. + pub addresses: Vec
, + /// New addresses generated during gap limit maintenance. + pub new_addresses: Vec
, +} + impl BlockProcessingResult { /// Returns all relevant transaction IDs (new and existing) pub fn relevant_txids(&self) -> impl Iterator { @@ -45,12 +60,22 @@ pub trait WalletInterface: Send + Sync + 'static { height: CoreBlockHeight, ) -> BlockProcessingResult; - /// Called when a transaction is seen in the mempool - async fn process_mempool_transaction(&mut self, tx: &Transaction); + /// Called when a transaction is seen in the mempool. + /// Returns whether the transaction was relevant and any new addresses generated. + /// When `is_instant_send` is true, the transaction already has an IS lock. + async fn process_mempool_transaction( + &mut self, + tx: &Transaction, + is_instant_send: bool, + ) -> MempoolTransactionResult; /// Get all addresses the wallet is monitoring for incoming transactions fn monitored_addresses(&self) -> Vec
; + /// Get all outpoints the wallet is watching (unspent outputs). + /// Used for bloom filter construction to detect spends of our UTXOs. + fn watched_outpoints(&self) -> Vec; + /// Return the wallet's per-transaction net change and involved addresses if known. /// Returns (net_amount, addresses) where net_amount is received - sent in satoshis. /// If the wallet has no record for the transaction, returns None. diff --git a/key-wallet/src/test_utils/wallet.rs b/key-wallet/src/test_utils/wallet.rs index 6293bdb82..6b30ab4ed 100644 --- a/key-wallet/src/test_utils/wallet.rs +++ b/key-wallet/src/test_utils/wallet.rs @@ -13,9 +13,12 @@ impl ManagedWalletInfo { } } +use crate::manager::MempoolTransactionResult; use crate::manager::{BlockProcessingResult, WalletEvent, WalletInterface}; +use dashcore::address::NetworkUnchecked; use dashcore::prelude::CoreBlockHeight; -use dashcore::Block; +use dashcore::{Block, OutPoint}; +use std::str::FromStr; use std::{collections::BTreeMap, sync::Arc}; use tokio::sync::{broadcast, Mutex}; @@ -29,6 +32,14 @@ pub struct MockWallet { effects: TransactionEffectsMap, synced_height: CoreBlockHeight, event_sender: broadcast::Sender, + /// When true, process_mempool_transaction returns is_relevant=true. + mempool_relevant: bool, + /// Addresses returned by monitored_addresses. + addresses: Vec
, + /// Outpoints returned by watched_outpoints. + outpoints: Vec, + /// New addresses returned by process_mempool_transaction. + mempool_new_addresses: Vec
, /// Recorded status change notifications for test assertions. status_changes: Arc>>, } @@ -48,10 +59,34 @@ impl MockWallet { effects: Arc::new(Mutex::new(BTreeMap::new())), synced_height: 0, event_sender, + mempool_relevant: false, + addresses: Vec::new(), + outpoints: Vec::new(), + mempool_new_addresses: Vec::new(), status_changes: Arc::new(Mutex::new(Vec::new())), } } + /// Configure whether mempool transactions are reported as relevant. + pub fn set_mempool_relevant(&mut self, relevant: bool) { + self.mempool_relevant = relevant; + } + + /// Set the addresses returned by monitored_addresses. + pub fn set_addresses(&mut self, addresses: Vec
) { + self.addresses = addresses; + } + + /// Set the outpoints returned by watched_outpoints. + pub fn set_outpoints(&mut self, outpoints: Vec) { + self.outpoints = outpoints; + } + + /// Set new addresses returned by process_mempool_transaction. + pub fn set_mempool_new_addresses(&mut self, addresses: Vec
) { + self.mempool_new_addresses = addresses; + } + pub fn status_changes(&self) -> Arc>> { self.status_changes.clone() } @@ -83,9 +118,38 @@ impl WalletInterface for MockWallet { } } - async fn process_mempool_transaction(&mut self, tx: &Transaction) { + async fn process_mempool_transaction( + &mut self, + tx: &Transaction, + _is_instant_send: bool, + ) -> MempoolTransactionResult { let mut processed = self.processed_transactions.lock().await; processed.push(tx.txid()); + + if !self.mempool_relevant { + return MempoolTransactionResult::default(); + } + + let effects = self.effects.lock().await; + let (net_amount, addresses) = if let Some((net, addr_strs)) = effects.get(&tx.txid()) { + let addrs = addr_strs + .iter() + .filter_map(|s| { + Address::::from_str(s).ok().map(|a| a.assume_checked()) + }) + .collect(); + (*net, addrs) + } else { + (0, Vec::new()) + }; + + MempoolTransactionResult { + is_relevant: true, + net_amount, + is_outgoing: net_amount < 0, + addresses, + new_addresses: self.mempool_new_addresses.clone(), + } } async fn describe(&self) -> String { @@ -98,7 +162,11 @@ impl WalletInterface for MockWallet { } fn monitored_addresses(&self) -> Vec
{ - Vec::new() + self.addresses.clone() + } + + fn watched_outpoints(&self) -> Vec { + self.outpoints.clone() } fn synced_height(&self) -> CoreBlockHeight { @@ -148,12 +216,22 @@ impl WalletInterface for NonMatchingMockWallet { BlockProcessingResult::default() } - async fn process_mempool_transaction(&mut self, _tx: &Transaction) {} + async fn process_mempool_transaction( + &mut self, + _tx: &Transaction, + _is_instant_send: bool, + ) -> MempoolTransactionResult { + MempoolTransactionResult::default() + } fn monitored_addresses(&self) -> Vec
{ Vec::new() } + fn watched_outpoints(&self) -> Vec { + Vec::new() + } + fn synced_height(&self) -> CoreBlockHeight { self.synced_height } From b03252affc1ecbe9218b2f0dc8f557ba333f166c Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 19 Mar 2026 22:37:40 +0700 Subject: [PATCH 2/3] refactor: detect bloom filter staleness via `monitor_revision` polling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of requiring consumers to call an external `notify_wallet_addresses_changed()` API whenever wallet addresses change, the mempool manager now internally detects staleness by polling a revision counter on each tick. `WalletManager` maintains a `monitor_revision` counter that increments whenever the monitored address/outpoint set changes (wallet creation/removal, gap limit maintenance). The mempool manager snapshots this counter after each filter build and compares it on tick — a single u64 read behind a read lock. When the revision diverges, the bloom filter is rebuilt automatically. This eliminates a foot-gun where forgetting to call the external API would silently leave the bloom filter stale. --- dash-spv/src/client/lifecycle.rs | 2 + dash-spv/src/sync/mempool/manager.rs | 66 ++-- dash-spv/src/sync/mempool/sync_manager.rs | 286 ++++++++++++++++-- key-wallet-ffi/src/transaction.rs | 74 ++--- key-wallet/src/managed_account/mod.rs | 53 +++- key-wallet/src/manager/mod.rs | 30 +- key-wallet/src/manager/process_block.rs | 107 +++++++ key-wallet/src/manager/wallet_interface.rs | 7 + key-wallet/src/test_utils/wallet.rs | 9 + .../transaction_checking/wallet_checker.rs | 4 + .../wallet_info_interface.rs | 10 + key-wallet/tests/integration_test.rs | 5 + 12 files changed, 550 insertions(+), 103 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index c96480b47..8e055a0cc 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -125,11 +125,13 @@ impl DashSpvClient { /// Prevents duplicate downloads when multiple peers announce the same transactions. /// Entries expire after `SEEN_TXID_EXPIRY`. seen_txids: HashMap, + /// Wallet monitor revision at the time of the last filter build. + /// Compared on each tick to detect when the wallet's monitored set has changed. + pub(super) last_monitor_revision: u64, } impl MempoolManager { @@ -72,6 +75,7 @@ impl MempoolManager { mempool_state: Arc>, strategy: MempoolStrategy, max_transactions: usize, + initial_monitor_revision: u64, ) -> Self { Self { progress: MempoolProgress::default(), @@ -83,6 +87,7 @@ impl MempoolManager { peers: HashMap::new(), pending_is_locks: HashMap::new(), seen_txids: HashMap::new(), + last_monitor_revision: initial_monitor_revision, } } @@ -290,11 +295,7 @@ impl MempoolManager { } /// Handle a received transaction. - pub(super) async fn handle_tx( - &mut self, - tx: Transaction, - requests: &RequestSender, - ) -> SyncResult> { + pub(super) async fn handle_tx(&mut self, tx: Transaction) -> SyncResult> { let txid = tx.txid(); self.pending_requests.remove(&txid); self.seen_txids.insert(txid, Instant::now()); @@ -331,10 +332,6 @@ impl MempoolManager { self.progress.set_tracked(state.transactions.len() as u32); } - // Wallet-relevant transactions change the monitored set (new UTXOs, spent - // inputs, potentially new addresses from gap limit maintenance). - self.rebuild_filter(requests).await?; - Ok(vec![]) } @@ -515,7 +512,7 @@ mod tests { let requests = RequestSender::new(tx); let mut manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000, 0); manager.progress.set_state(SyncState::Synced); (manager, requests, rx) @@ -529,7 +526,7 @@ mod tests { let requests = RequestSender::new(tx); let manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000, 0); (manager, requests, rx) } @@ -599,6 +596,7 @@ mod tests { mempool_state.clone(), MempoolStrategy::FetchAll, 2, // Very small capacity + 0, ); let peer = test_socket_address(1); manager.peers.insert(peer, Some(VecDeque::new())); @@ -640,7 +638,8 @@ mod tests { let (tx, _rx) = mpsc::unbounded_channel::(); let requests = RequestSender::new(tx); - let mut manager = MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 2); + let mut manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 2, 0); manager.progress.set_state(SyncState::Synced); let peer = test_socket_address(1); manager.peers.insert(peer, Some(VecDeque::new())); @@ -666,7 +665,7 @@ mod tests { let _requests = RequestSender::new(tx); let mut manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000, 0); let fresh_txid = Txid::from_byte_array([1; 32]); let stale_txid = Txid::from_byte_array([2; 32]); @@ -684,7 +683,7 @@ mod tests { #[tokio::test] async fn test_handle_tx_irrelevant() { - let (mut manager, requests, _rx) = create_test_manager(); + let (mut manager, _requests, _rx) = create_test_manager(); let tx = Transaction { version: 1, @@ -695,7 +694,7 @@ mod tests { }; let txid = tx.txid(); - let events = manager.handle_tx(tx, &requests).await.unwrap(); + let events = manager.handle_tx(tx).await.unwrap(); // MockWallet returns is_relevant=false by default assert!(events.is_empty()); assert_eq!(manager.progress.received(), 1); @@ -790,15 +789,20 @@ mod tests { let (tx, _rx) = mpsc::unbounded_channel::(); let requests = RequestSender::new(tx); - let manager = - MempoolManager::new(wallet.clone(), mempool_state, MempoolStrategy::BloomFilter, 1000); + let manager = MempoolManager::new( + wallet.clone(), + mempool_state, + MempoolStrategy::BloomFilter, + 1000, + 0, + ); (manager, requests, wallet) } #[tokio::test] async fn test_handle_tx_relevant_stores_transaction() { - let (mut manager, requests, _wallet) = create_relevant_manager(); + let (mut manager, _requests, _wallet) = create_relevant_manager(); let tx = Transaction { version: 1, @@ -809,7 +813,7 @@ mod tests { }; let txid = tx.txid(); - let events = manager.handle_tx(tx, &requests).await.unwrap(); + let events = manager.handle_tx(tx).await.unwrap(); assert!(events.is_empty()); // Verify transaction was stored in mempool state @@ -822,7 +826,7 @@ mod tests { #[tokio::test] async fn test_handle_tx_clears_pending_request() { - let (mut manager, requests, _wallet) = create_relevant_manager(); + let (mut manager, _requests, _wallet) = create_relevant_manager(); let tx = Transaction { version: 1, @@ -837,7 +841,7 @@ mod tests { manager.pending_requests.insert(txid, Instant::now()); assert!(manager.pending_requests.contains_key(&txid)); - manager.handle_tx(tx, &requests).await.unwrap(); + manager.handle_tx(tx).await.unwrap(); // Pending request should be cleared regardless of relevance assert!(!manager.pending_requests.contains_key(&txid)); @@ -857,7 +861,7 @@ mod tests { let requests = RequestSender::new(tx); let manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000, 0); (manager, requests, rx) } @@ -1088,7 +1092,7 @@ mod tests { #[tokio::test] async fn test_instant_send_before_transaction() { - let (mut manager, requests, _wallet) = create_relevant_manager(); + let (mut manager, _requests, _wallet) = create_relevant_manager(); let tx = Transaction { version: 1, @@ -1104,7 +1108,7 @@ mod tests { assert!(manager.pending_is_locks.contains_key(&txid)); // Transaction arrives - manager.handle_tx(tx, &requests).await.unwrap(); + manager.handle_tx(tx).await.unwrap(); // Pending IS lock consumed assert!(manager.pending_is_locks.is_empty()); @@ -1116,7 +1120,7 @@ mod tests { #[tokio::test] async fn test_instant_send_before_irrelevant_transaction() { - let (mut manager, requests, _rx) = create_test_manager(); + let (mut manager, _requests, _rx) = create_test_manager(); let tx = Transaction { version: 1, @@ -1132,7 +1136,7 @@ mod tests { assert!(manager.pending_is_locks.contains_key(&txid)); // Transaction arrives but wallet says it's not relevant - manager.handle_tx(tx, &requests).await.unwrap(); + manager.handle_tx(tx).await.unwrap(); // Pending IS lock cleaned up (no leak) assert!(manager.pending_is_locks.is_empty()); @@ -1274,7 +1278,7 @@ mod tests { let requests = RequestSender::new(tx); let mut manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000, 0); // Drop receiver so send_filter_load fails drop(rx); @@ -1286,7 +1290,7 @@ mod tests { #[tokio::test] async fn test_handle_tx_relevant_populates_wallet_effect_fields() { - let (mut manager, requests, wallet) = create_relevant_manager(); + let (mut manager, _requests, wallet) = create_relevant_manager(); let tx = Transaction { version: 1, @@ -1303,7 +1307,7 @@ mod tests { w.set_effect(txid, 50000, vec!["yWdXnYxGbouNoo8yMvcbZmZ3Gdp6BpySxL".into()]).await; } - manager.handle_tx(tx, &requests).await.unwrap(); + manager.handle_tx(tx).await.unwrap(); let state = manager.mempool_state.read().await; let stored = state.transactions.get(&txid).unwrap(); @@ -1316,7 +1320,7 @@ mod tests { #[tokio::test] async fn test_handle_tx_outgoing_transaction() { - let (mut manager, requests, wallet) = create_relevant_manager(); + let (mut manager, _requests, wallet) = create_relevant_manager(); let tx = Transaction { version: 1, @@ -1332,7 +1336,7 @@ mod tests { w.set_effect(txid, -30000, vec![]).await; } - manager.handle_tx(tx, &requests).await.unwrap(); + manager.handle_tx(tx).await.unwrap(); let state = manager.mempool_state.read().await; let stored = state.transactions.get(&txid).unwrap(); diff --git a/dash-spv/src/sync/mempool/sync_manager.rs b/dash-spv/src/sync/mempool/sync_manager.rs index 44c1dfba8..a20f85d8c 100644 --- a/dash-spv/src/sync/mempool/sync_manager.rs +++ b/dash-spv/src/sync/mempool/sync_manager.rs @@ -50,7 +50,7 @@ impl SyncManager for MempoolManager { ) -> SyncResult> { match msg.inner() { NetworkMessage::Inv(inv) => self.handle_inv(inv, msg.peer_address(), requests).await, - NetworkMessage::Tx(tx) => self.handle_tx(tx.clone(), requests).await, + NetworkMessage::Tx(tx) => self.handle_tx(tx.clone()).await, _ => Ok(vec![]), } } @@ -80,22 +80,14 @@ impl SyncManager for MempoolManager { Ok(vec![]) } SyncEvent::BlockProcessed { - new_addresses, confirmed_txids, .. } => { - // Remove confirmed transactions from mempool + // Remove confirmed transactions from mempool. + // Bloom filter rebuild is handled by the tick's revision check. if !confirmed_txids.is_empty() { self.remove_confirmed(confirmed_txids).await; } - if self.state() == SyncState::Synced - && (!confirmed_txids.is_empty() || !new_addresses.is_empty()) - { - // Confirmed transactions change the wallet's UTXO set and - // new addresses expand the monitored set. Both make the - // bloom filter stale, so rebuild immediately. - self.rebuild_filter(requests).await?; - } Ok(vec![]) } SyncEvent::InstantLockReceived { @@ -123,6 +115,23 @@ impl SyncManager for MempoolManager { // Send queued getdata requests now that slots may have freed up self.send_queued(requests).await?; + // Rebuild bloom filter if the wallet's monitored set has changed. + // + // We poll the revision counter rather than using push-based wallet events + // for simplicity: the revision lives on `ManagedCoreAccount` and auto-bumps + // on address generation and UTXO mutations, giving us a single source of + // truth without needing event emission after every wallet operation. + // Adding a push-based approach would require a new `select!` branch in the + // shared `SyncManager::run` loop or a `WalletEvent` bridge — complexity + // that isn't justified given the 100ms tick latency is negligible for bloom + // filter rebuilds and the read lock is non-contending. + let current_revision = self.wallet.read().await.monitor_revision(); + if current_revision != self.last_monitor_revision { + tracing::info!("Wallet monitor revision changed, rebuilding bloom filter"); + self.rebuild_filter(requests).await?; + self.last_monitor_revision = current_revision; + } + Ok(vec![]) } @@ -191,7 +200,8 @@ mod tests { let (tx, rx) = mpsc::unbounded_channel::(); let requests = RequestSender::new(tx); - let manager = MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000); + let manager = + MempoolManager::new(wallet, mempool_state, MempoolStrategy::FetchAll, 1000, 0); (manager, requests, rx) } @@ -543,9 +553,8 @@ mod tests { } #[tokio::test] - async fn test_block_processed_confirmed_txids_rebuilds_filter() { + async fn test_block_processed_confirmed_txids_does_not_eagerly_rebuild() { let mut mock = MockWallet::new(); - // Wallet needs at least one address for the bloom filter to be built let script = dashcore::ScriptBuf::from_bytes(vec![ 0x76, 0xa9, 0x14, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0x88, 0xac, @@ -558,7 +567,7 @@ mod tests { let requests = RequestSender::new(tx); let mut manager = - MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000); + MempoolManager::new(wallet, mempool_state, MempoolStrategy::BloomFilter, 1000, 0); let peer = test_socket_address(1); manager.handle_peer_connected(peer); @@ -573,7 +582,8 @@ mod tests { // Drain activation messages while rx.try_recv().is_ok() {} - // BlockProcessed with confirmed txids should rebuild immediately + // BlockProcessed does not eagerly rebuild — the tick handles it via + // the revision check. Verify no FilterLoad is sent from the event handler. let event = SyncEvent::BlockProcessed { block_hash: dashcore::BlockHash::all_zeros(), height: 1001, @@ -582,14 +592,10 @@ mod tests { }; manager.handle_sync_event(&event, &requests).await.unwrap(); - // Verify a FilterLoad was sent - let mut found_filter_load = false; - while let Ok(req) = rx.try_recv() { - if matches!(req, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) { - found_filter_load = true; - } - } - assert!(found_filter_load, "expected FilterLoad after confirmed txids"); + let has_filter_load = std::iter::from_fn(|| rx.try_recv().ok()).any(|req| { + matches!(req, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) + }); + assert!(!has_filter_load, "BlockProcessed should not eagerly rebuild filter"); } #[tokio::test] @@ -613,4 +619,236 @@ mod tests { }; manager.handle_sync_event(&event, &requests).await.unwrap(); } + + #[tokio::test] + async fn test_tick_rebuilds_filter_when_monitor_revision_changes() { + let addr = { + let script = dashcore::ScriptBuf::from_bytes(vec![ + 0x76, 0xa9, 0x14, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, + 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0x88, 0xac, + ]); + dashcore::Address::from_script(&script, dashcore::Network::Testnet).unwrap() + }; + + let mut mock = MockWallet::new(); + mock.set_addresses(vec![addr.clone()]); + let initial_revision = mock.monitor_revision(); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, mut rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = MempoolManager::new( + wallet.clone(), + mempool_state, + MempoolStrategy::BloomFilter, + 1000, + initial_revision, + ); + + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + // Activate — this snapshots the monitor revision + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + assert_eq!(manager.state(), SyncState::Synced); + + // Drain activation messages + while rx.try_recv().is_ok() {} + + // tick with unchanged revision should not rebuild + manager.tick(&requests).await.unwrap(); + assert!(rx.try_recv().is_err(), "no messages expected when revision unchanged"); + + // Simulate wallet adding new addresses (bumps revision) + { + let mut w = wallet.write().await; + let addr2 = dashcore::Address::from_script( + &dashcore::ScriptBuf::from_bytes(vec![ + 0x76, 0xa9, 0x14, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, + 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0x88, 0xac, + ]), + dashcore::Network::Testnet, + ) + .unwrap(); + w.set_addresses(vec![addr, addr2]); + } + + // tick should detect stale filter and rebuild + manager.tick(&requests).await.unwrap(); + + let mut found_filter_load = false; + while let Ok(msg) = rx.try_recv() { + if matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) { + found_filter_load = true; + } + } + assert!(found_filter_load, "expected FilterLoad after monitor revision change"); + + // Subsequent tick should not rebuild again (revision was snapshotted) + manager.tick(&requests).await.unwrap(); + assert!(rx.try_recv().is_err(), "no messages expected after revision re-snapshot"); + } + + #[tokio::test] + async fn test_tick_skips_rebuild_for_fetch_all_strategy() { + let wallet = Arc::new(RwLock::new(MockWallet::new())); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, mut rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = + MempoolManager::new(wallet.clone(), mempool_state, MempoolStrategy::FetchAll, 1000, 0); + + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + while rx.try_recv().is_ok() {} + + // Bump revision + { + let mut w = wallet.write().await; + w.set_addresses(vec![dashcore::Address::dummy(dashcore::Network::Testnet, 0)]); + } + + // tick should not send any filter messages for FetchAll + manager.tick(&requests).await.unwrap(); + let mut found_filter = false; + while let Ok(msg) = rx.try_recv() { + if matches!( + msg, + NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _) + | NetworkRequest::SendMessageToPeer(NetworkMessage::FilterClear, _) + ) { + found_filter = true; + } + } + assert!(!found_filter, "FetchAll should not send filter messages on revision change"); + } + + #[tokio::test] + async fn test_tick_rebuilds_filter_when_outpoints_change() { + let addr = { + let script = dashcore::ScriptBuf::from_bytes(vec![ + 0x76, 0xa9, 0x14, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, + 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0x88, 0xac, + ]); + dashcore::Address::from_script(&script, dashcore::Network::Testnet).unwrap() + }; + + let mut mock = MockWallet::new(); + mock.set_addresses(vec![addr]); + let initial_revision = mock.monitor_revision(); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx, mut rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx); + + let mut manager = MempoolManager::new( + wallet.clone(), + mempool_state, + MempoolStrategy::BloomFilter, + 1000, + initial_revision, + ); + + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + while rx.try_recv().is_ok() {} + + // Simulate UTXO set change (new outpoint added) + { + let mut w = wallet.write().await; + w.set_outpoints(vec![dashcore::OutPoint { + txid: dashcore::Txid::from_byte_array([0xee; 32]), + vout: 0, + }]); + } + + // tick should detect the revision change and rebuild + manager.tick(&requests).await.unwrap(); + + let found_filter_load = std::iter::from_fn(|| rx.try_recv().ok()).any(|msg| { + matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) + }); + assert!(found_filter_load, "expected FilterLoad after outpoint change"); + } + + #[tokio::test] + async fn test_handle_tx_does_not_eagerly_rebuild_filter() { + let mut mock = MockWallet::new(); + mock.set_mempool_relevant(true); + let script = dashcore::ScriptBuf::from_bytes(vec![ + 0x76, 0xa9, 0x14, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, + 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0xab, 0x88, 0xac, + ]); + let addr = dashcore::Address::from_script(&script, dashcore::Network::Testnet).unwrap(); + mock.set_addresses(vec![addr]); + let initial_revision = mock.monitor_revision(); + let wallet = Arc::new(RwLock::new(mock)); + let mempool_state = Arc::new(RwLock::new(MempoolState::default())); + let (tx_chan, mut rx) = mpsc::unbounded_channel::(); + let requests = RequestSender::new(tx_chan); + + let mut manager = MempoolManager::new( + wallet.clone(), + mempool_state, + MempoolStrategy::BloomFilter, + 1000, + initial_revision, + ); + + let peer = test_socket_address(1); + manager.handle_peer_connected(peer); + + let sync = SyncEvent::SyncComplete { + header_tip: 1000, + cycle: 0, + }; + manager.handle_sync_event(&sync, &requests).await.unwrap(); + while rx.try_recv().is_ok() {} + + // handle_tx with a relevant transaction should NOT eagerly rebuild + let tx = dashcore::Transaction { + version: 1, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + manager.handle_tx(tx).await.unwrap(); + + let has_filter_load = std::iter::from_fn(|| rx.try_recv().ok()).any(|msg| { + matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) + }); + assert!(!has_filter_load, "handle_tx should not eagerly rebuild filter"); + + // But the next tick should catch it if the wallet revision changed + // (MockWallet bumps revision when set_mempool_relevant triggers processing) + { + let mut w = wallet.write().await; + w.set_addresses(vec![dashcore::Address::dummy(dashcore::Network::Testnet, 0)]); + } + manager.tick(&requests).await.unwrap(); + + let found_filter_load = std::iter::from_fn(|| rx.try_recv().ok()).any(|msg| { + matches!(msg, NetworkRequest::SendMessageToPeer(NetworkMessage::FilterLoad(_), _)) + }); + assert!(found_filter_load, "tick should rebuild after revision change"); + } } diff --git a/key-wallet-ffi/src/transaction.rs b/key-wallet-ffi/src/transaction.rs index ef48fcc06..9807590db 100644 --- a/key-wallet-ffi/src/transaction.rs +++ b/key-wallet-ffi/src/transaction.rs @@ -10,6 +10,7 @@ use dashcore::{ ScriptBuf, Transaction, TxIn, TxOut, Txid, }; use key_wallet::wallet::managed_wallet_info::fee::FeeRate; +use key_wallet::wallet::managed_wallet_info::transaction_building::AccountTypePreference; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; use secp256k1::{Message, Secp256k1, SecretKey}; @@ -123,40 +124,57 @@ pub unsafe extern "C" fn wallet_build_and_sign_transaction( manager_ref.runtime.block_on(async { let mut manager = manager_ref.manager.write().await; - - let managed_wallet = manager.get_wallet_info_mut(&wallet_ref.inner().wallet_id); - - let Some(managed_wallet) = managed_wallet else { - FFIError::set_error( - error, - FFIErrorCode::InvalidInput, - "Could not obtain ManagedWalletInfo for the provided wallet".to_string(), - ); - return false; - }; - - // Get the managed account - let managed_account = - match managed_wallet.accounts.standard_bip44_accounts.get_mut(&account_index) { - Some(account) => account, + let wallet_id = wallet_ref.inner().wallet_id; + + // Get change address through the manager + let change_address = match manager.get_change_address( + &wallet_id, + account_index, + AccountTypePreference::BIP44, + true, + ) { + Ok(result) => match result.address { + Some(addr) => addr, None => { FFIError::set_error( error, FFIErrorCode::WalletError, - format!("Account {} not found", account_index), + "No change address available".to_string(), ); return false; } - }; + }, + Err(e) => { + FFIError::set_error( + error, + FFIErrorCode::WalletError, + format!("Failed to get change address: {}", e), + ); + return false; + } + }; + + // Get the managed account for UTXOs and signing data + let managed_wallet = match manager.get_wallet_info_mut(&wallet_id) { + Some(info) => info, + None => { + FFIError::set_error( + error, + FFIErrorCode::InvalidInput, + "Could not obtain ManagedWalletInfo for the provided wallet".to_string(), + ); + return false; + } + }; - let wallet_account = - match wallet_ref.inner().accounts.standard_bip44_accounts.get(&account_index) { + let managed_account = + match managed_wallet.accounts.standard_bip44_accounts.get_mut(&account_index) { Some(account) => account, None => { FFIError::set_error( error, FFIErrorCode::WalletError, - format!("Wallet account {} not found", account_index), + format!("Account {} not found", account_index), ); return false; } @@ -229,20 +247,6 @@ pub unsafe extern "C" fn wallet_build_and_sign_transaction( }; } - // Get change address (next internal address) - let xpub = wallet_account.extended_public_key(); - let change_address = match managed_account.next_change_address(Some(&xpub), true) { - Ok(addr) => addr, - Err(e) => { - FFIError::set_error( - error, - FFIErrorCode::WalletError, - format!("Failed to get change address: {}", e), - ); - return false; - } - }; - tx_builder = tx_builder .set_change_address(change_address) .set_fee_rate(FeeRate::new(fee_per_kb)); diff --git a/key-wallet/src/managed_account/mod.rs b/key-wallet/src/managed_account/mod.rs index 82ba2b803..668c1c77b 100644 --- a/key-wallet/src/managed_account/mod.rs +++ b/key-wallet/src/managed_account/mod.rs @@ -64,6 +64,10 @@ pub struct ManagedCoreAccount { /// Rebuilt from `transactions` during deserialization. #[cfg_attr(feature = "serde", serde(skip_serializing))] spent_outpoints: HashSet, + /// Revision counter incremented when the monitored address set changes + /// (e.g. new addresses generated). Used to detect bloom filter staleness. + #[cfg_attr(feature = "serde", serde(skip_serializing))] + monitor_revision: u64, } impl ManagedCoreAccount { @@ -78,9 +82,20 @@ impl ManagedCoreAccount { transactions: BTreeMap::new(), utxos: BTreeMap::new(), spent_outpoints: HashSet::new(), + monitor_revision: 0, } } + /// Return the current monitor revision. + pub fn monitor_revision(&self) -> u64 { + self.monitor_revision + } + + /// Increment the monitor revision to signal that the monitored address set changed. + pub fn bump_monitor_revision(&mut self) { + self.monitor_revision += 1; + } + /// Check if an outpoint was spent by a previously recorded transaction. fn is_outpoint_spent(&self, outpoint: &OutPoint) -> bool { self.spent_outpoints.contains(outpoint) @@ -315,6 +330,7 @@ impl ManagedCoreAccount { .collect(); let txid = tx.txid(); + let mut utxos_changed = false; // Insert UTXOs for outputs paying to our addresses for (vout, output) in tx.output.iter().enumerate() { @@ -355,6 +371,7 @@ impl ManagedCoreAccount { utxo.is_instantlocked = matches!(context, TransactionContext::InstantSend); self.utxos.insert(outpoint, utxo); + utxos_changed = true; } } } @@ -369,8 +386,13 @@ impl ManagedCoreAccount { txid = %tx.txid(), "Removed spent UTXO" ); + utxos_changed = true; } } + + if utxos_changed { + self.monitor_revision += 1; + } } _ => {} } @@ -506,12 +528,15 @@ impl ManagedCoreAccount { None => address_pool::KeySource::NoKeySource, }; - external_addresses.next_unused(&key_source, add_to_state).map_err(|e| match e { - crate::error::Error::NoKeySource => { - "No unused addresses available and no key source provided" - } - _ => "Failed to generate receive address", - }) + let addr = + external_addresses.next_unused(&key_source, add_to_state).map_err(|e| match e { + crate::error::Error::NoKeySource => { + "No unused addresses available and no key source provided" + } + _ => "Failed to generate receive address", + })?; + self.monitor_revision += 1; + Ok(addr) } else { Err("Cannot generate receive address for non-standard account type") } @@ -537,12 +562,15 @@ impl ManagedCoreAccount { None => address_pool::KeySource::NoKeySource, }; - internal_addresses.next_unused(&key_source, add_to_state).map_err(|e| match e { - crate::error::Error::NoKeySource => { - "No unused addresses available and no key source provided" - } - _ => "Failed to generate change address", - }) + let addr = + internal_addresses.next_unused(&key_source, add_to_state).map_err(|e| match e { + crate::error::Error::NoKeySource => { + "No unused addresses available and no key source provided" + } + _ => "Failed to generate change address", + })?; + self.monitor_revision += 1; + Ok(addr) } else { Err("Cannot generate change address for non-standard account type") } @@ -1121,6 +1149,7 @@ impl<'de> Deserialize<'de> for ManagedCoreAccount { transactions: helper.transactions, utxos: helper.utxos, spent_outpoints, + monitor_revision: 0, }) } } diff --git a/key-wallet/src/manager/mod.rs b/key-wallet/src/manager/mod.rs index 376ba1f7e..90fdbf34c 100644 --- a/key-wallet/src/manager/mod.rs +++ b/key-wallet/src/manager/mod.rs @@ -98,6 +98,10 @@ pub struct WalletManager { wallets: BTreeMap, /// Mutable wallet info indexed by wallet ID wallet_infos: BTreeMap, + /// Structural revision counter incremented when wallets or accounts are + /// added/removed. Combined with per-wallet account-level revisions to + /// produce the total monitor revision. + structural_revision: u64, /// Event sender for wallet events #[cfg(feature = "std")] event_sender: broadcast::Sender, @@ -112,6 +116,7 @@ impl WalletManager { filter_committed_height: 0, wallets: BTreeMap::new(), wallet_infos: BTreeMap::new(), + structural_revision: 0, #[cfg(feature = "std")] event_sender: broadcast::Sender::new(DEFAULT_WALLET_EVENT_CAPACITY), } @@ -131,6 +136,17 @@ impl WalletManager { &self.event_sender } + /// Return the total monitor revision (structural + per-wallet account revisions). + pub fn monitor_revision(&self) -> u64 { + self.structural_revision + + self.wallet_infos.values().map(|w| w.monitor_revision()).sum::() + } + + /// Increment the structural revision for wallet/account additions or removals. + fn bump_structural_revision(&mut self) { + self.structural_revision += 1; + } + /// Create a new wallet from mnemonic and add it to the manager /// Returns the computed wallet ID pub fn create_wallet_from_mnemonic( @@ -182,6 +198,7 @@ impl WalletManager { self.wallets.insert(wallet_id, wallet_mut); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok(wallet_id) } @@ -288,6 +305,7 @@ impl WalletManager { self.wallets.insert(wallet_id, final_wallet); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok((serialized_bytes, wallet_id)) } @@ -321,6 +339,7 @@ impl WalletManager { self.wallets.insert(wallet_id, wallet); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok(wallet_id) } @@ -353,6 +372,9 @@ impl WalletManager { self.wallets.remove(wallet_id).ok_or(WalletError::WalletNotFound(*wallet_id))?; let info = self.wallet_infos.remove(wallet_id).ok_or(WalletError::WalletNotFound(*wallet_id))?; + // Absorb the removed wallet's account-level revision so the total + // stays monotonically increasing even though we lost a contributor. + self.structural_revision += info.monitor_revision() + 1; Ok((wallet, info)) } @@ -413,6 +435,7 @@ impl WalletManager { self.wallets.insert(wallet_id, wallet); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok(wallet_id) } @@ -460,6 +483,7 @@ impl WalletManager { self.wallets.insert(wallet_id, wallet); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok(wallet_id) } @@ -504,6 +528,7 @@ impl WalletManager { self.wallets.insert(wallet_id, wallet); self.wallet_infos.insert(wallet_id, managed_info); + self.bump_structural_revision(); Ok(wallet_id) } @@ -614,7 +639,10 @@ impl WalletManager { wallet .add_account(account_type, account_xpub) - .map_err(|e| WalletError::AccountCreation(e.to_string())) + .map_err(|e| WalletError::AccountCreation(e.to_string()))?; + + self.bump_structural_revision(); + Ok(()) } /// Get all accounts in a specific wallet diff --git a/key-wallet/src/manager/process_block.rs b/key-wallet/src/manager/process_block.rs index e67ded76c..2c934b195 100644 --- a/key-wallet/src/manager/process_block.rs +++ b/key-wallet/src/manager/process_block.rs @@ -96,6 +96,10 @@ impl WalletInterface for WalletM self.watched_outpoints() } + fn monitor_revision(&self) -> u64 { + self.monitor_revision() + } + async fn transaction_effect(&self, tx: &Transaction) -> Option<(i64, Vec)> { // Aggregate across all managed wallets. If any wallet considers it relevant, // compute net = total_received - total_sent and collect involved addresses. @@ -230,8 +234,12 @@ impl WalletInterface for WalletM #[cfg(test)] mod tests { use super::*; + use crate::account::StandardAccountType; use crate::manager::test_helpers::*; + use crate::wallet::initialization::WalletAccountCreationOptions; + use crate::wallet::managed_wallet_info::transaction_building::AccountTypePreference; use crate::wallet::managed_wallet_info::ManagedWalletInfo; + use crate::AccountType; use dashcore::block::{Header, Version}; use dashcore::hashes::Hash; use dashcore::pow::CompactTarget; @@ -375,4 +383,103 @@ mod tests { "involved_addresses should contain the target address" ); } + + #[tokio::test] + async fn test_monitor_revision_bumps_and_stability() { + let mut manager: WalletManager = WalletManager::new(Network::Testnet); + let mut expected_rev = 0u64; + assert_eq!(manager.monitor_revision(), expected_rev); + + // create_wallet_from_mnemonic bumps + let wallet_id = manager + .create_wallet_from_mnemonic( + TEST_MNEMONIC, + "", + 0, + WalletAccountCreationOptions::Default, + ) + .unwrap(); + expected_rev += 1; + assert_eq!(manager.monitor_revision(), expected_rev, "after create_wallet_from_mnemonic"); + + // create_account bumps + manager + .create_account( + &wallet_id, + AccountType::Standard { + index: 1, + standard_account_type: StandardAccountType::BIP44Account, + }, + None, + ) + .unwrap(); + expected_rev += 1; + assert_eq!(manager.monitor_revision(), expected_rev, "after create_account"); + + // get_receive_address bumps (when address is generated) + let result = + manager.get_receive_address(&wallet_id, 0, AccountTypePreference::PreferBIP44, true); + if result.is_ok() && result.unwrap().address.is_some() { + expected_rev += 1; + assert_eq!(manager.monitor_revision(), expected_rev, "after get_receive_address"); + } + + // get_change_address bumps (when address is generated) + let result = + manager.get_change_address(&wallet_id, 0, AccountTypePreference::PreferBIP44, true); + if result.is_ok() && result.unwrap().address.is_some() { + expected_rev += 1; + assert_eq!(manager.monitor_revision(), expected_rev, "after get_change_address"); + } + + // update_synced_height does NOT bump + manager.update_synced_height(1000); + assert_eq!(manager.monitor_revision(), expected_rev, "after update_synced_height"); + + // process_mempool_transaction bumps from UTXO changes and possibly + // new addresses generated via gap limit maintenance + let rev_before_mempool = manager.monitor_revision(); + let addr = manager.monitored_addresses()[0].clone(); + let tx = create_tx_paying_to(&addr, 0xd0); + let _result = manager.process_mempool_transaction(&tx, false).await; + assert!( + manager.monitor_revision() > rev_before_mempool, + "mempool tx paying to our address should bump revision (UTXO added)" + ); + let rev_after_mempool = manager.monitor_revision(); + + // process_instant_send_lock does NOT bump (no outpoint set change) + manager.process_instant_send_lock(tx.txid()); + assert_eq!( + manager.monitor_revision(), + rev_after_mempool, + "after process_instant_send_lock" + ); + + // process_block bumps from UTXO changes and possibly new addresses + let rev_before_block = manager.monitor_revision(); + let tx2 = create_tx_paying_to(&addr, 0xd1); + let block = make_block(vec![tx2]); + let _result = manager.process_block(&block, 100).await; + assert!( + manager.monitor_revision() > rev_before_block, + "block with tx paying to our address should bump revision (UTXO added)" + ); + + // remove_wallet absorbs the wallet's account-level revision + 1 + let rev_before_remove = manager.monitor_revision(); + manager.remove_wallet(&wallet_id).unwrap(); + assert!( + manager.monitor_revision() > rev_before_remove, + "remove_wallet should bump revision" + ); + + // create_wallet_with_random_mnemonic bumps structural revision + let rev_before = manager.monitor_revision(); + manager.create_wallet_with_random_mnemonic(WalletAccountCreationOptions::Default).unwrap(); + assert!( + manager.monitor_revision() > rev_before, + "create_wallet_with_random_mnemonic should bump revision" + ); + } } diff --git a/key-wallet/src/manager/wallet_interface.rs b/key-wallet/src/manager/wallet_interface.rs index e814d8eda..22ae0f087 100644 --- a/key-wallet/src/manager/wallet_interface.rs +++ b/key-wallet/src/manager/wallet_interface.rs @@ -117,6 +117,13 @@ pub trait WalletInterface: Send + Sync + 'static { } } + /// Return a revision counter that increments whenever the set of monitored + /// addresses or watched outpoints changes. The mempool manager uses this to + /// detect when its bloom filter is stale without requiring an external signal. + fn monitor_revision(&self) -> u64 { + 0 + } + /// Subscribe to wallet events (e.g. transactions received, balance changes). fn subscribe_events(&self) -> broadcast::Receiver; diff --git a/key-wallet/src/test_utils/wallet.rs b/key-wallet/src/test_utils/wallet.rs index 6b30ab4ed..5cf6ded55 100644 --- a/key-wallet/src/test_utils/wallet.rs +++ b/key-wallet/src/test_utils/wallet.rs @@ -42,6 +42,8 @@ pub struct MockWallet { mempool_new_addresses: Vec
, /// Recorded status change notifications for test assertions. status_changes: Arc>>, + /// Monitor revision counter for staleness detection. + monitor_revision: u64, } impl Default for MockWallet { @@ -64,6 +66,7 @@ impl MockWallet { outpoints: Vec::new(), mempool_new_addresses: Vec::new(), status_changes: Arc::new(Mutex::new(Vec::new())), + monitor_revision: 0, } } @@ -75,11 +78,13 @@ impl MockWallet { /// Set the addresses returned by monitored_addresses. pub fn set_addresses(&mut self, addresses: Vec
) { self.addresses = addresses; + self.monitor_revision += 1; } /// Set the outpoints returned by watched_outpoints. pub fn set_outpoints(&mut self, outpoints: Vec) { self.outpoints = outpoints; + self.monitor_revision += 1; } /// Set new addresses returned by process_mempool_transaction. @@ -177,6 +182,10 @@ impl WalletInterface for MockWallet { self.synced_height = height; } + fn monitor_revision(&self) -> u64 { + self.monitor_revision + } + fn subscribe_events(&self) -> broadcast::Receiver { self.event_sender.subscribe() } diff --git a/key-wallet/src/transaction_checking/wallet_checker.rs b/key-wallet/src/transaction_checking/wallet_checker.rs index 9ff7ecfad..04052f0b7 100644 --- a/key-wallet/src/transaction_checking/wallet_checker.rs +++ b/key-wallet/src/transaction_checking/wallet_checker.rs @@ -235,6 +235,7 @@ impl WalletTransactionChecker for ManagedWalletInfo { }; let key_source = KeySource::Public(xpub); + let rev_before = result.new_addresses.len(); for pool in account.account_type.address_pools_mut() { match pool.maintain_gap_limit(&key_source) { Ok(addrs) => result.new_addresses.extend(addrs), @@ -248,6 +249,9 @@ impl WalletTransactionChecker for ManagedWalletInfo { } } } + if result.new_addresses.len() > rev_before { + account.bump_monitor_revision(); + } } if is_new { diff --git a/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs b/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs index 627f3217d..b4cf407d3 100644 --- a/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs +++ b/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs @@ -94,6 +94,12 @@ pub trait WalletInfoInterface: Sized + WalletTransactionChecker + ManagedAccount /// Mark UTXOs for a transaction as InstantSend-locked across all accounts. /// Returns `true` if any UTXO was newly marked. fn mark_instant_send_utxos(&mut self, txid: &Txid) -> bool; + + /// Return the aggregated monitor revision across all accounts. + /// Increments whenever the monitored address set changes. + fn monitor_revision(&self) -> u64 { + 0 + } } /// Default implementation for ManagedWalletInfo @@ -248,4 +254,8 @@ impl WalletInfoInterface for ManagedWalletInfo { } any_changed } + + fn monitor_revision(&self) -> u64 { + self.accounts.all_accounts().iter().map(|a| a.monitor_revision()).sum() + } } diff --git a/key-wallet/tests/integration_test.rs b/key-wallet/tests/integration_test.rs index e3bcff52b..37d7580c8 100644 --- a/key-wallet/tests/integration_test.rs +++ b/key-wallet/tests/integration_test.rs @@ -19,6 +19,7 @@ fn test_wallet_manager_creation() { // WalletManager::new returns Self, not Result assert_eq!(manager.synced_height(), 0); assert_eq!(manager.wallet_count(), 0); // No wallets created yet + assert_eq!(manager.monitor_revision(), 0); } #[test] @@ -26,6 +27,7 @@ fn test_wallet_manager_from_mnemonic() { // Create from a test mnemonic let mnemonic = Mnemonic::generate(12, Language::English).unwrap(); let mut manager = WalletManager::::new(Network::Testnet); + assert_eq!(manager.monitor_revision(), 0); // Create a wallet from mnemonic let wallet_result = manager.create_wallet_from_mnemonic( @@ -36,6 +38,7 @@ fn test_wallet_manager_from_mnemonic() { ); assert!(wallet_result.is_ok(), "Failed to create wallet: {:?}", wallet_result); assert_eq!(manager.wallet_count(), 1); + assert_eq!(manager.monitor_revision(), 1); } #[test] @@ -47,6 +50,7 @@ fn test_account_management() { manager.create_wallet_with_random_mnemonic(WalletAccountCreationOptions::Default); assert!(wallet_result.is_ok(), "Failed to create wallet: {:?}", wallet_result); let wallet_id = wallet_result.unwrap(); + assert_eq!(manager.monitor_revision(), 1); // Add accounts to the wallet // Note: Index 0 already exists from wallet creation, so use index 1 @@ -59,6 +63,7 @@ fn test_account_management() { None, ); assert!(result.is_ok()); + assert_eq!(manager.monitor_revision(), 2); // Get accounts from wallet - Default creates 11 accounts (including PlatformPayment), plus the one we added let accounts = manager.get_accounts(&wallet_id); From 450f72f5148b9fea2cd79bd1e37fa9d68a9fd18a Mon Sep 17 00:00:00 2001 From: xdustinface Date: Fri, 20 Mar 2026 17:55:12 +0700 Subject: [PATCH 3/3] fix: activate mempool on `FiltersSyncComplete` instead of waiting for full sync The mempool only needs the wallet's address/UTXO set for the bloom filter, which is fully populated after filter+block sync. Waiting for masternode sync delays mempool activation unnecessarily since the transaction checker doesn't depend on masternode data. --- dash-spv/src/sync/mempool/sync_manager.rs | 99 ++++++++++------------- 1 file changed, 42 insertions(+), 57 deletions(-) diff --git a/dash-spv/src/sync/mempool/sync_manager.rs b/dash-spv/src/sync/mempool/sync_manager.rs index a20f85d8c..33cf41bd4 100644 --- a/dash-spv/src/sync/mempool/sync_manager.rs +++ b/dash-spv/src/sync/mempool/sync_manager.rs @@ -61,7 +61,9 @@ impl SyncManager for MempoolManager { requests: &RequestSender, ) -> SyncResult> { match event { - SyncEvent::SyncComplete { + // Activate as soon as filter sync completes — the wallet's address + // and UTXO set is fully populated at this point. + SyncEvent::FiltersSyncComplete { .. } => { if self.state() != SyncState::Synced { @@ -69,11 +71,11 @@ impl SyncManager for MempoolManager { let has_activated = self.peers.values().any(|v| v.is_some()); if has_activated { self.set_state(SyncState::Synced); - tracing::info!("Mempool manager activated on all peers"); + tracing::info!("Mempool manager activated after filter sync"); return Ok(vec![]); } else { tracing::warn!( - "Sync complete but no peers available for mempool activation" + "Filter sync complete but no peers available for mempool activation" ); } } @@ -225,14 +227,13 @@ mod tests { } #[tokio::test] - async fn test_handle_sync_complete_activates() { + async fn test_filters_sync_complete_activates() { let (mut manager, requests, _rx) = create_test_manager(); let peer = crate::test_utils::test_socket_address(1); manager.handle_peer_connected(peer); - let event = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; let events = manager.handle_sync_event(&event, &requests).await.unwrap(); @@ -242,21 +243,19 @@ mod tests { } #[tokio::test] - async fn test_handle_sync_complete_subsequent_cycles() { + async fn test_filters_sync_complete_subsequent_is_noop() { let (mut manager, requests, _rx) = create_test_manager(); manager.handle_peer_connected(crate::test_utils::test_socket_address(1)); // Activate first - let event0 = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event0 = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&event0, &requests).await.unwrap(); - // Subsequent cycles should not change state - let event1 = SyncEvent::SyncComplete { - header_tip: 1001, - cycle: 1, + // Subsequent filter sync completions should not change state + let event1 = SyncEvent::FiltersSyncComplete { + tip_height: 1001, }; let events = manager.handle_sync_event(&event1, &requests).await.unwrap(); assert!(events.is_empty()); @@ -270,9 +269,8 @@ mod tests { manager.handle_peer_connected(peer); // Initial activation - let event = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; let events = manager.handle_sync_event(&event, &requests).await.unwrap(); assert!(events.is_empty()); @@ -282,9 +280,8 @@ mod tests { manager.set_state(SyncState::WaitForEvents); // Re-sync should re-activate - let event = SyncEvent::SyncComplete { - header_tip: 1001, - cycle: 1, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1001, }; let events = manager.handle_sync_event(&event, &requests).await.unwrap(); assert!(events.is_empty()); @@ -298,9 +295,8 @@ mod tests { manager.handle_peer_connected(peer1); // Activate via SyncComplete - let event = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&event, &requests).await.unwrap(); assert!(matches!(manager.peers.get(&peer1), Some(Some(_)))); @@ -359,9 +355,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); @@ -410,9 +405,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); @@ -459,9 +453,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); @@ -478,9 +471,8 @@ mod tests { async fn test_sync_complete_no_peers_stays_inactive() { let (mut manager, requests, _rx) = create_test_manager(); - let event = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; let events = manager.handle_sync_event(&event, &requests).await.unwrap(); @@ -509,9 +501,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate via SyncComplete - let event = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let event = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&event, &requests).await.unwrap(); assert_eq!(manager.state(), SyncState::Synced); @@ -573,9 +564,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); @@ -604,9 +594,8 @@ mod tests { let peer = test_socket_address(1); manager.handle_peer_connected(peer); - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); @@ -650,9 +639,8 @@ mod tests { manager.handle_peer_connected(peer); // Activate — this snapshots the monitor revision - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); assert_eq!(manager.state(), SyncState::Synced); @@ -707,9 +695,8 @@ mod tests { let peer = test_socket_address(1); manager.handle_peer_connected(peer); - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); while rx.try_recv().is_ok() {} @@ -764,9 +751,8 @@ mod tests { let peer = test_socket_address(1); manager.handle_peer_connected(peer); - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); while rx.try_recv().is_ok() {} @@ -816,9 +802,8 @@ mod tests { let peer = test_socket_address(1); manager.handle_peer_connected(peer); - let sync = SyncEvent::SyncComplete { - header_tip: 1000, - cycle: 0, + let sync = SyncEvent::FiltersSyncComplete { + tip_height: 1000, }; manager.handle_sync_event(&sync, &requests).await.unwrap(); while rx.try_recv().is_ok() {}