From 2a756cd489f3b19597cbcf7ad337da3cb5e63129 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 15:18:12 +0100 Subject: [PATCH 01/24] chanmon_consistency: fix SplicePending event handling Look up the splice tx by txid instead of assuming it is the first broadcasted tx. Also skip confirmation if the tx is already confirmed, which can happen when SplicePending fires more than once. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 476362324ad..dc61d74bb8a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1931,16 +1931,20 @@ pub fn do_test( .unwrap(); }, events::Event::SplicePending { new_funding_txo, .. } => { - let broadcaster = match $node { - 0 => &broadcast_a, - 1 => &broadcast_b, - _ => &broadcast_c, - }; - let mut txs = broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.confirm_tx(splice_tx); + if !chain_state.confirmed_txids.contains(&new_funding_txo.txid) { + let broadcaster = match $node { + 0 => &broadcast_a, + 1 => &broadcast_b, + _ => &broadcast_c, + }; + let mut txs = broadcaster.txn_broadcasted.borrow_mut(); + let pos = txs + .iter() + .position(|tx| new_funding_txo.txid == tx.compute_txid()) + .expect("SplicePending but splice tx not found in broadcaster"); + let splice_tx = txs.remove(pos); + chain_state.confirm_tx(splice_tx); + } }, events::Event::SpliceFailed { .. } => {}, From 2efe565e8948d7066ee47e77689775b583bbb925 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:00:19 +0100 Subject: [PATCH 02/24] chanmon_consistency: handle force-close-related events and messages Prepare the chanmon_consistency fuzzer to tolerate force-close scenarios by handling the event and message types that force-closes produce. For HandleError, widen the accepted ErrorAction variants beyond DisconnectPeerWithWarning (timeout) to also accept DisconnectPeer, which is what funded channel force-closes generate. This change applies to all four locations: push_excess_b_events\!, process_msg_events\!, and both arms of drain_msg_events_on_disconnect\!. For events, add no-op handling for Event::ChannelClosed and Event::BumpTransaction in process_events\!. Also handle BroadcastChannelUpdate in push_excess_b_events\!, since force-closing a public channel generates this message type. No behavioral change: without force-close action bytes, none of these new paths will fire. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 58 +++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index dc61d74bb8a..c5129752472 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -852,17 +852,6 @@ fn send_mpp_hop_payment( } } -#[inline] -fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { - // Since sending/receiving messages may be delayed, `timer_tick_occurred` may cause a node to - // disconnect their counterparty if they're expecting a timely response. - assert!(matches!( - action, - msgs::ErrorAction::DisconnectPeerWithWarning { msg } - if msg.data.contains("Disconnecting due to timeout awaiting response") - )); -} - #[inline] pub fn do_test( data: &[u8], underlying_out: Out, anchors: bool, @@ -1528,10 +1517,17 @@ pub fn do_test( *node_id == a_id }, MessageSendEvent::HandleError { ref action, ref node_id } => { - assert_action_timeout_awaiting_response(action); + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains("Disconnecting due to timeout awaiting response") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), + } if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, + MessageSendEvent::BroadcastChannelUpdate { .. } => continue, _ => panic!("Unhandled message event {:?}", event), }; if push_a { ba_events.push(event); } else { bc_events.push(event); } @@ -1741,8 +1737,20 @@ pub fn do_test( } } }, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, ref node_id } => { + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains("Disconnecting due to timeout awaiting response") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { ref msg } => { + for dest in nodes.iter() { + if dest.get_our_node_id() == *node_id { + dest.handle_error(nodes[$node].get_our_node_id(), msg); + } + } + }, + _ => panic!("Unexpected HandleError action {:?}", action), + } }, MessageSendEvent::SendChannelReady { .. } => { // Can be generated as a reestablish response @@ -1798,8 +1806,14 @@ pub fn do_test( MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, .. } => match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains( + "Disconnecting due to timeout awaiting response", + ) => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), }, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -1826,8 +1840,14 @@ pub fn do_test( MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, .. } => match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains( + "Disconnecting due to timeout awaiting response", + ) => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), }, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -1947,6 +1967,8 @@ pub fn do_test( } }, events::Event::SpliceFailed { .. } => {}, + events::Event::ChannelClosed { .. } => {}, + events::Event::BumpTransaction(..) => {}, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { From a42bb7a5304bdf60d49bcca34973977b9bfab08e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:01:13 +0100 Subject: [PATCH 03/24] chanmon_consistency: add SignHolderCommitment to supported signer ops Force-closing a channel requires signing the holder commitment transaction. Add SignHolderCommitment to SUPPORTED_SIGNER_OPS and add action bytes 0xcc-0xce to enable this op per node, following the existing pattern for other signer operations. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c5129752472..f17b078bdea 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -500,12 +500,11 @@ impl SignerProvider for KeyProvider { } } -// Since this fuzzer is only concerned with live-channel operations, we don't need to worry about -// any signer operations that come after a force close. -const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ +const SUPPORTED_SIGNER_OPS: [SignerOp; 4] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, SignerOp::ReleaseCommitmentSecret, + SignerOp::SignHolderCommitment, ]; impl KeyProvider { @@ -2547,6 +2546,18 @@ pub fn do_test( keys_manager_c.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[2].signer_unblocked(None); }, + 0xcc => { + keys_manager_a.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[0].signer_unblocked(None); + }, + 0xcd => { + keys_manager_b.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[1].signer_unblocked(None); + }, + 0xce => { + keys_manager_c.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[2].signer_unblocked(None); + }, 0xf0 => { for id in &chan_ab_ids { From cd3c9031c76cbc548e8c8249b616be5dc22a448c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:02:48 +0100 Subject: [PATCH 04/24] chanmon_consistency: sync chain monitors alongside channel managers Extend the sync_with_chain_state closure to also notify the TestChainMonitor of confirmed transactions and new blocks. This is necessary for force-close coverage where the chain monitor needs to see commitment transaction confirmations to process funding spends and trigger HTLC resolution. The monitor is synced before the channel manager at each block height so that monitor events are available when the manager processes the same block. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 57 +++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f17b078bdea..604f9827a4a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1290,6 +1290,7 @@ pub fn do_test( let sync_with_chain_state = |chain_state: &ChainState, node: &ChannelManager<_, _, _, _, _, _, _, _, _>, + monitor: &TestChainMonitor, node_height: &mut u32, num_blocks: Option| { let target_height = if let Some(num_blocks) = num_blocks { @@ -1303,16 +1304,18 @@ pub fn do_test( let (header, txn) = chain_state.block_at(*node_height); let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); if !txdata.is_empty() { + monitor.chain_monitor.transactions_confirmed(header, &txdata, *node_height); node.transactions_confirmed(header, &txdata, *node_height); } + monitor.chain_monitor.best_block_updated(header, *node_height); node.best_block_updated(header, *node_height); } }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut chain_state, &nodes[0], &monitor_a, &mut node_height_a, None); + sync_with_chain_state(&mut chain_state, &nodes[1], &monitor_b, &mut node_height_b, None); + sync_with_chain_state(&mut chain_state, &nodes[2], &monitor_c, &mut node_height_c, None); lock_fundings!(nodes); @@ -2419,13 +2422,49 @@ pub fn do_test( }, // Sync node by 1 block to cover confirmation of a transaction. - 0xa8 => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, Some(1)), - 0xa9 => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, Some(1)), - 0xaa => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, Some(1)), + 0xa8 => sync_with_chain_state( + &mut chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + Some(1), + ), + 0xa9 => sync_with_chain_state( + &mut chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + Some(1), + ), + 0xaa => sync_with_chain_state( + &mut chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + Some(1), + ), // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. - 0xab => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None), - 0xac => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None), - 0xad => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None), + 0xab => sync_with_chain_state( + &mut chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ), + 0xac => sync_with_chain_state( + &mut chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ), + 0xad => sync_with_chain_state( + &mut chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ), 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on From fc8020eb85761cf4ceee4a49b069d9a01575bbee Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:04:55 +0100 Subject: [PATCH 05/24] chanmon_consistency: relax exit invariants to allow force-closed channels Replace the strict channel count assertions in test_return\! with upper bound checks, and drain broadcasters instead of asserting they are empty. Force-closing channels reduces the channel count and produces broadcast commitment transactions, so the old strict checks would fail. Add a closed_channels set to track which channels have been force-closed. The 0xff settlement check uses this to skip closed channels when verifying that each open channel can still send payments. The settlement loop now also drains broadcast transactions, confirms them on-chain, and syncs all nodes to the chain tip. This allows force-close related monitor events to be fully processed during settlement. With closed_channels initially empty, the relaxed checks are equivalent to the previous strict checks for all existing test scenarios. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 44 +++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 604f9827a4a..6642918c407 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1350,18 +1350,18 @@ pub fn do_test( let resolved_payments: RefCell<[HashMap>; 3]> = RefCell::new([new_hash_map(), new_hash_map(), new_hash_map()]); let claimed_payment_hashes: RefCell> = RefCell::new(HashSet::new()); + let closed_channels: RefCell> = RefCell::new(HashSet::new()); macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); + assert!(nodes[0].list_channels().len() <= 3); + assert!(nodes[1].list_channels().len() <= 6); + assert!(nodes[2].list_channels().len() <= 3); - // All broadcasters should be empty (all broadcast transactions should be handled - // explicitly). - assert!(broadcast_a.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_b.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_c.txn_broadcasted.borrow().is_empty()); + // Drain broadcasters since force-closes produce commitment transactions. + broadcast_a.txn_broadcasted.borrow_mut().clear(); + broadcast_b.txn_broadcasted.borrow_mut().clear(); + broadcast_c.txn_broadcasted.borrow_mut().clear(); return; }}; @@ -2723,6 +2723,28 @@ pub fn do_test( complete_all_monitor_updates(&monitor_b, id); complete_all_monitor_updates(&monitor_c, id); } + // Drain any broadcast transactions (from force-closes) and + // confirm them so the monitors can process the spends. + let mut had_txs = false; + for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + had_txs = true; + } + for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + had_txs = true; + } + for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + had_txs = true; + } + if had_txs { + sync_with_chain_state(&chain_state, &nodes[0], &monitor_a, &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[1], &monitor_b, &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[2], &monitor_c, &mut node_height_c, None); + last_pass_no_updates = false; + continue; + } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2800,12 +2822,18 @@ pub fn do_test( // Finally, make sure that at least one end of each channel can make a substantial payment for &chan_id in &chan_ab_ids { + if closed_channels.borrow().contains(&chan_id) { + continue; + } assert!( send(0, 1, chan_id, 10_000_000, &mut p_ctr) || send(1, 0, chan_id, 10_000_000, &mut p_ctr) ); } for &chan_id in &chan_bc_ids { + if closed_channels.borrow().contains(&chan_id) { + continue; + } assert!( send(1, 2, chan_id, 10_000_000, &mut p_ctr) || send(2, 1, chan_id, 10_000_000, &mut p_ctr) From 49cb7fbdf8f11f50a0fecfefb7cb72d90771be18 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:06:29 +0100 Subject: [PATCH 06/24] chanmon_consistency: add force-close and broadcast confirmation actions Add fuzz action bytes to exercise channel force-close scenarios: - 0xd0: Force-close first A-B channel from A's side - 0xd1: Force-close first B-C channel from B's side - 0xd2: Force-close first A-B channel from B's side - 0xd3: Force-close first B-C channel from C's side - 0xd8-0xda: Drain broadcaster and confirm all broadcast transactions for nodes A, B, and C respectively Each force-close action calls force_close_broadcasting_latest_txn and tracks the channel in closed_channels on success. The call may fail if the channel is already closed, which is handled gracefully. The broadcast confirmation actions pick up commitment transactions (or any other broadcast transactions) and add them to the chain state so that subsequent chain syncs make them visible to both the channel manager and chain monitor. The Event::ChannelClosed handler now records the channel_id in closed_channels. Event::DiscardFunding and Event::SpendableOutputs are also handled as no-ops since they may be produced after on-chain confirmation of force-close transactions. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 77 +++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 6642918c407..341f3652fa1 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,8 +15,8 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. We consider it a failure if any action results in a -//! channel being force-closed. +//! send-side handling is correct, other peers. The fuzzer also exercises user-initiated +//! force-closes with on-chain commitment transaction confirmation. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -1969,7 +1969,11 @@ pub fn do_test( } }, events::Event::SpliceFailed { .. } => {}, - events::Event::ChannelClosed { .. } => {}, + events::Event::ChannelClosed { channel_id, .. } => { + closed_channels.borrow_mut().insert(channel_id); + }, + events::Event::DiscardFunding { .. } => {}, + events::Event::SpendableOutputs { .. } => {}, events::Event::BumpTransaction(..) => {}, _ => { @@ -2598,6 +2602,73 @@ pub fn do_test( nodes[2].signer_unblocked(None); }, + // Force-close a channel and track it as closed. + 0xd0 => { + if nodes[0] + .force_close_broadcasting_latest_txn( + &chan_a_id, + &nodes[1].get_our_node_id(), + "]]]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_a_id); + } + }, + 0xd1 => { + if nodes[1] + .force_close_broadcasting_latest_txn( + &chan_b_id, + &nodes[2].get_our_node_id(), + "]]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_b_id); + } + }, + 0xd2 => { + if nodes[1] + .force_close_broadcasting_latest_txn( + &chan_a_id, + &nodes[0].get_our_node_id(), + "]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_a_id); + } + }, + 0xd3 => { + if nodes[2] + .force_close_broadcasting_latest_txn( + &chan_b_id, + &nodes[1].get_our_node_id(), + "]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_b_id); + } + }, + + // Drain broadcasters and confirm all broadcast transactions. + 0xd8 => { + for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + 0xd9 => { + for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + 0xda => { + for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + 0xf0 => { for id in &chan_ab_ids { complete_monitor_update(&monitor_a, id, &complete_first); From 9bc142d61c7256ac27237b8c951a7bf0ab70b150 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:19:17 +0100 Subject: [PATCH 07/24] chanmon_consistency: add large block height advancement actions Add ChainState::advance_height() to append empty blocks to the chain without any transactions. This enables the fuzzer to advance chain height past HTLC cltv_expiry timelocks, which is necessary for the OnchainTxHandler to release timelocked HTLC-timeout claim packages. Action bytes: - 0xdc: Advance chain by 50 empty blocks - 0xdd: Advance chain by 100 empty blocks - 0xde: Advance chain by 200 empty blocks These only extend the chain state. Nodes must still be synced to the new tip via existing 0xa8-0xad actions to observe the new height. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 341f3652fa1..165e7f5fcf6 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -217,6 +217,14 @@ impl ChainState { true } + fn advance_height(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, Vec::new())); + } + } + fn block_at(&self, height: u32) -> &(Header, Vec) { &self.blocks[height as usize] } @@ -2669,6 +2677,12 @@ pub fn do_test( } }, + // Advance chain height by many empty blocks so that HTLC timelocks can + // expire and the OnchainTxHandler releases timelocked claim packages. + 0xdc => chain_state.advance_height(50), + 0xdd => chain_state.advance_height(100), + 0xde => chain_state.advance_height(200), + 0xf0 => { for id in &chan_ab_ids { complete_monitor_update(&monitor_a, id, &complete_first); From 732299960c4bf4883c98158c97c920ab1d49335a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:20:19 +0100 Subject: [PATCH 08/24] chanmon_consistency: add SignHolderHtlcTransaction signer op After a force-close, the channel monitor signs HTLC-timeout and HTLC-success transactions for non-anchor channels. Without SignHolderHtlcTransaction in SUPPORTED_SIGNER_OPS, the signer would block these signatures when signer ops are disabled. Add a single enable action byte (0xcf) that re-enables this op on all three nodes at once, since per-node granularity is less important for post-close operations than for live channel signing. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 165e7f5fcf6..1cae56b04fb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -508,11 +508,12 @@ impl SignerProvider for KeyProvider { } } -const SUPPORTED_SIGNER_OPS: [SignerOp; 4] = [ +const SUPPORTED_SIGNER_OPS: [SignerOp; 5] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, SignerOp::ReleaseCommitmentSecret, SignerOp::SignHolderCommitment, + SignerOp::SignHolderHtlcTransaction, ]; impl KeyProvider { @@ -2609,6 +2610,14 @@ pub fn do_test( keys_manager_c.enable_op_for_all_signers(SignerOp::SignHolderCommitment); nodes[2].signer_unblocked(None); }, + 0xcf => { + keys_manager_a.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + keys_manager_b.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + keys_manager_c.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + nodes[0].signer_unblocked(None); + nodes[1].signer_unblocked(None); + nodes[2].signer_unblocked(None); + }, // Force-close a channel and track it as closed. 0xd0 => { From c01ba687b68a07185129bfea022c91ef6d640dba Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:22:12 +0100 Subject: [PATCH 09/24] chanmon_consistency: resolve timelocked HTLCs during settlement When channels have been force-closed, the 0xff settlement needs to advance chain height past HTLC cltv_expiry timelocks so that the OnchainTxHandler releases timelocked HTLC-timeout claim packages for broadcast. Without this, HTLC-timeout transactions would never be generated and in-flight HTLCs would remain unresolved. The settlement advances in two phases of 250 blocks each: 1. Past cltv_expiry: triggers HTLC-timeout tx broadcasts, which the existing drain-and-confirm loop picks up and confirms on-chain. 2. Past the CSV delay (BREAKDOWN_TIMEOUT=144): allows SpendableOutputs events to fire for both the to_local output and resolved HTLC outputs. Each phase syncs all nodes to the new chain tip and runs process_all_events\!() to drain all resulting messages, events, broadcasts, and monitor updates. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 55 +++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1cae56b04fb..f5311939b4e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2890,6 +2890,61 @@ pub fn do_test( } process_all_events!(); + // If any channels were force-closed, advance chain height past HTLC + // timelocks so HTLC-timeout transactions can be broadcast, confirmed, + // and fully resolved. We advance in two phases: + // 1) Past cltv_expiry so HTLC-timeout txs are released + // 2) Past the CSV delay so SpendableOutputs events fire + if !closed_channels.borrow().is_empty() { + chain_state.advance_height(250); + sync_with_chain_state( + &chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ); + process_all_events!(); + + chain_state.advance_height(250); + sync_with_chain_state( + &chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ); + process_all_events!(); + } + // Verify no payments are stuck - all should have resolved for (idx, pending) in pending_payments.borrow().iter().enumerate() { assert!( From 7eba3598e536e943c381b67a3eff62bceaa3ae7a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 09:23:44 +0100 Subject: [PATCH 10/24] chanmon_consistency: verify claimable balances after settlement After the 0xff settlement completes with force-closed channels, check get_claimable_balances() on all chain monitors. Assert that no ClaimableOnChannelClose balances remain, since those indicate the monitor still considers a channel open when it should be closed. This catches state machine bugs where the force-close state transition is not properly reflected in balance tracking. Other balance types (ClaimableAwaitingConfirmations, MaybeTimeoutClaimableHTLC, etc.) are logged but not asserted empty, since anchor channel HTLC resolution is not yet fully handled (the BumpTransaction events are currently dropped). The check passes open channels via the ignored_channels parameter so that only closed channel balances are examined. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f5311939b4e..0cb72d95191 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -41,7 +41,7 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; +use lightning::chain::channelmonitor::{Balance, ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, @@ -2989,6 +2989,43 @@ pub fn do_test( ); } + // After settlement, verify that closed channels have no + // ClaimableOnChannelClose balances (which would indicate the + // monitor still thinks the channel is open). + if !closed_channels.borrow().is_empty() { + let open_channels = nodes[0] + .list_channels() + .iter() + .chain(nodes[1].list_channels().iter()) + .chain(nodes[2].list_channels().iter()) + .map(|c| c.clone()) + .collect::>(); + let open_refs: Vec<&_> = open_channels.iter().collect(); + for (label, monitor) in + [("A", &monitor_a), ("B", &monitor_b), ("C", &monitor_c)] + { + let balances = monitor.chain_monitor.get_claimable_balances(&open_refs); + for balance in &balances { + if matches!(balance, Balance::ClaimableOnChannelClose { .. }) { + panic!( + "Monitor {} has ClaimableOnChannelClose balance after settlement: {:?}", + label, balance + ); + } + } + if !balances.is_empty() { + out.locked_write( + format!( + "Monitor {} has {} remaining balances after settlement.\n", + label, + balances.len() + ) + .as_bytes(), + ); + } + } + } + last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); From 44b72b6b6aa92dd24079d613dc79ee22e3e62a00 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 19 Feb 2026 11:02:39 +0100 Subject: [PATCH 11/24] chanmon_consistency: add tests verifying force-close fuzzer actions Add tests that exercise the new force-close fuzzer byte actions and verify they produce the expected behavior by asserting on specific log messages. Tests cover: basic force-close lifecycle, negative case without chain advancement, bidirectional force-close, middle node initiating, HTLC timeout delay, HTLC resolution after height advance, and a three-node scenario where A force-closes during fulfill propagation and learns the preimage on-chain from B's HTLC claim. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 412 ++++++++++++++++++++++++++++++++ 1 file changed, 412 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 0cb72d95191..c94156a0358 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -3077,3 +3077,415 @@ pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) { do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false); do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::test_logger::StringBuffer; + + fn run_and_get_log(data: &[u8]) -> String { + let logger = StringBuffer::new(); + do_test(data, logger.clone(), false); + logger.into_string() + } + + #[test] + fn test_force_close_executes() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // Verify initiator force-closed + assert!(log.contains("Force-closing channel"), "Node should initiate force-close"); + // Verify counterparty received the error and also closed + assert!( + log.contains("counterparty force-closed"), + "Counterparty should close after receiving error message" + ); + // Verify commitment tx was confirmed on-chain + assert!( + log.contains("Channel closed by funding output spend"), + "Commitment transaction should be confirmed on-chain" + ); + // Verify spendable output matured + assert!( + log.contains("marked for spending has got enough confirmations"), + "Spendable outputs should mature after chain advancement" + ); + } + + #[test] + fn test_force_close_without_broadcast_confirm_no_resolution() { + // Force-close but skip draining broadcasts, advancing height, and + // settlement. Without chain progression, no on-chain resolution occurs. + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + !log.contains("marked for spending has got enough confirmations"), + "Without chain advancement, outputs should not mature" + ); + } + + #[test] + fn test_force_close_both_directions() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + 0xd1, // B force-closes B-C channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // User-initiated force-closes log with the error message + let user_fc_count = log.matches("Force-closing channel, The error message").count(); + assert_eq!(user_fc_count, 2, "Should have 2 user-initiated force-closes"); + // Both counterparties should also detect the close + let counterparty_count = log.matches("counterparty force-closed").count(); + assert!( + counterparty_count >= 2, + "Both counterparties should close, got {}", + counterparty_count + ); + } + + #[test] + fn test_force_close_middle_node_initiates() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd2, // B force-closes A-B channel (error msg "]]]]]]") + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // B initiated with its specific error message + assert!( + log.contains("]]]]]]\""), + "Middle node B should force-close with its specific error message" + ); + // A should detect counterparty close + assert!( + log.contains("counterparty force-closed with message: ]]]]]]"), + "Node A should detect counterparty force-close from B" + ); + } + + #[test] + fn test_inflight_htlc_force_close_needs_height() { + // Send payment A->B (0x30), process messages to get HTLC committed (0x10, 0x18), + // process events (0x16, 0x1e), then force-close without advancing height. + // The HTLC timeout package should be delayed waiting for its timelock. + let data: Vec = vec![ + 0x00, // mon style + 0x30, // A sends payment to B on chan_a + 0x10, // process all msgs on node 0 (A) + 0x18, // process all msgs on node 1 (B) + 0x10, // process all msgs on node 0 (A) again for revoke_and_ack + 0x16, // process events on node 0 + 0x1e, // process events on node 1 + 0xd0, // A force-closes with B + 0xd8, 0xd9, 0xda, // drain all broadcasts and confirm + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + // HTLC timeout should be delayed because chain height hasn't advanced past CLTV + assert!( + log.contains("Delaying claim of package until its timelock"), + "HTLC claim should be delayed waiting for timelock" + ); + } + + #[test] + fn test_inflight_htlc_resolved_after_height_advance() { + // Same as above but advance height past the HTLC timeout. The HTLC timeout + // tx should get broadcast after the height passes the timelock. + let data: Vec = vec![ + 0x00, // mon style + 0x30, // A sends payment to B on chan_a + 0x10, // process all msgs on node 0 (A) + 0x18, // process all msgs on node 1 (B) + 0x10, // process all msgs on node 0 (A) again for revoke_and_ack + 0x16, // process events on node 0 + 0x1e, // process events on node 1 + 0xd0, // A force-closes with B + 0xd8, // drain and confirm A's broadcasts (commitment tx) + 0xde, 0xde, // advance 200+200 blocks past CLTV + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + // After advancing past the timelock, the HTLC timeout tx should broadcast + assert!( + log.contains("Broadcasting onchain"), + "HTLC timeout transaction should be broadcast after height advance" + ); + } + + #[test] + fn test_three_node_force_close_during_fulfill_propagation() { + // A->B->C payment. C claims, fulfill propagates back to B, then A-B + // is force-closed before B forwards the fulfill to A. B has the + // preimage and claims the HTLC on-chain. A learns the preimage from + // B's on-chain HTLC-success transaction. Settlement (0xff) handles + // syncing all nodes to the chain, enabling signer ops, and + // resolving all pending state. + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0x3c, // send_hop A->B->C, 1_000_000 msat + // Commit HTLC on A-B channel: + 0x11, // Process A: deliver A's update_add+CS to B + 0x19, // Process B: deliver B's RAA+CS to A + 0x11, // Process A: deliver A's RAA to B. A-B HTLC committed. + // Forward HTLC from B to C: + 0x1f, // Process events on B: forward HTLC to C + 0x19, // Process B: deliver B's update_add+CS to C + 0x21, // Process C: deliver C's RAA+CS to B + 0x19, // Process B: deliver B's RAA to C. B-C HTLC committed. + // C claims the payment (two rounds: first decodes HTLC, second claims): + 0x27, // Process events on C: decode HTLC, generate PaymentClaimable + 0x27, // Process events on C: handle PaymentClaimable, call claim_funds + // Deliver C's fulfill to B: + 0x21, // Process C: deliver C's update_fulfill+CS to B. B learns preimage. + // DO NOT process B's messages: B's fulfill hasn't reached A yet. + // A force-closes while HTLC is still committed on A-B: + 0xd0, // A force-closes A-B + // Settle everything: syncs chain, enables signing, resolves on-chain. + 0xff, + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "A should force-close"); + // C should claim and send fulfill to B + assert!( + log.contains("Delivering update_fulfill_htlc from node 2 to node 1"), + "C should deliver fulfill to B" + ); + // B should broadcast HTLC-success claim using the preimage on-chain + assert!( + log.contains("Broadcasting onchain HTLC claim tx (1 preimage"), + "B should broadcast HTLC preimage claim on-chain" + ); + // A should learn the preimage from B's on-chain claim + assert!( + log.contains("resolves outbound HTLC") && log.contains("with preimage"), + "A should learn preimage from B's on-chain HTLC claim" + ); + // Payment should ultimately succeed + assert!( + log.contains("Handling event PaymentSent"), + "Payment should succeed after on-chain resolution" + ); + } + + // Async monitor variants: same scenarios but with all monitors returning + // InProgress (0x07) instead of Completed (0x00). This exercises the + // async monitor update path where updates are queued and completed later. + + #[test] + fn test_force_close_executes_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Node should initiate force-close"); + assert!( + log.contains("counterparty force-closed"), + "Counterparty should close after receiving error message" + ); + assert!( + log.contains("Channel closed by funding output spend"), + "Commitment transaction should be confirmed on-chain" + ); + assert!( + log.contains("marked for spending has got enough confirmations"), + "Spendable outputs should mature after chain advancement" + ); + } + + #[test] + fn test_force_close_without_broadcast_confirm_no_resolution_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + !log.contains("marked for spending has got enough confirmations"), + "Without chain advancement, outputs should not mature" + ); + } + + #[test] + fn test_force_close_both_directions_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + 0xd1, // B force-closes B-C channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + let user_fc_count = log.matches("Force-closing channel, The error message").count(); + assert_eq!(user_fc_count, 2, "Should have 2 user-initiated force-closes"); + let counterparty_count = log.matches("counterparty force-closed").count(); + assert!( + counterparty_count >= 2, + "Both counterparties should close, got {}", + counterparty_count + ); + } + + #[test] + fn test_force_close_middle_node_initiates_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd2, // B force-closes A-B channel (error msg "]]]]]]") + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + assert!( + log.contains("]]]]]]\""), + "Middle node B should force-close with its specific error message" + ); + assert!( + log.contains("counterparty force-closed with message: ]]]]]]"), + "Node A should detect counterparty force-close from B" + ); + } + + #[test] + fn test_inflight_htlc_force_close_needs_height_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x30, // A sends payment to B on chan_a + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x18, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's RAA to B + 0x09, // complete all B's monitor updates (A-B) + 0x16, // process events on node 0 (A) + 0x1e, // process events on node 1 (B) + 0xd0, // A force-closes with B + 0xd8, 0xd9, 0xda, // drain all broadcasts and confirm + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + log.contains("Delaying claim of package until its timelock"), + "HTLC claim should be delayed waiting for timelock" + ); + } + + #[test] + fn test_inflight_htlc_resolved_after_height_advance_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x30, // A sends payment to B on chan_a + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x18, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's RAA to B + 0x09, // complete all B's monitor updates (A-B) + 0x16, // process events on node 0 (A) + 0x1e, // process events on node 1 (B) + 0xd0, // A force-closes with B + 0xd8, // drain and confirm A's broadcasts (commitment tx) + 0xde, 0xde, // advance 200+200 blocks past CLTV + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + log.contains("Broadcasting onchain"), + "HTLC timeout transaction should be broadcast after height advance" + ); + } + + #[test] + fn test_three_node_force_close_during_fulfill_propagation_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x3c, // send_hop A->B->C, 1_000_000 msat + // Commit HTLC on A-B channel (complete-all after each step): + 0x08, // complete all A's monitor updates (A-B) + 0x11, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x19, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x11, // Process A: deliver A's RAA to B. A-B HTLC committed. + 0x09, // complete all B's monitor updates (A-B) + // Forward HTLC from B to C: + 0x1f, // Process events on B: forward HTLC to C + 0x0a, // complete all B's monitor updates (B-C) + 0x19, // Process B: deliver B's update_add+CS to C + 0x0b, // complete all C's monitor updates (B-C) + 0x21, // Process C: deliver C's RAA+CS to B + 0x0a, // complete all B's monitor updates (B-C) + 0x19, // Process B: deliver B's RAA to C. B-C HTLC committed. + 0x0b, // complete all C's monitor updates (B-C) + // C claims the payment (two rounds: first decodes HTLC, second claims): + 0x27, // Process events on C: decode HTLC, generate PaymentClaimable + 0x27, // Process events on C: handle PaymentClaimable, call claim_funds + 0x0b, // complete all C's monitor updates (B-C) + // Deliver C's fulfill to B: + 0x21, // Process C: deliver C's update_fulfill+CS to B. B learns preimage. + 0x0a, // complete all B's monitor updates (B-C) + 0x09, // complete all B's monitor updates (A-B) + // DO NOT process B's messages: B's fulfill hasn't reached A yet. + // A force-closes while HTLC is still committed on A-B: + 0xd0, // A force-closes A-B + // Settle everything: completes monitor updates, syncs chain, resolves. + 0xff, + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "A should force-close"); + assert!( + log.contains("Delivering update_fulfill_htlc from node 2 to node 1"), + "C should deliver fulfill to B" + ); + assert!( + log.contains("Broadcasting onchain HTLC claim tx (1 preimage"), + "B should broadcast HTLC preimage claim on-chain" + ); + assert!( + log.contains("resolves outbound HTLC") && log.contains("with preimage"), + "A should learn preimage from B's on-chain HTLC claim" + ); + assert!( + log.contains("Handling event PaymentSent"), + "Payment should succeed after on-chain resolution" + ); + } +} From a9c2734bb39d24c219c3e9d22287ede87e194aee Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 12:37:03 +0100 Subject: [PATCH 12/24] chanmon_consistency: use correct persist mode during node reload After bc0b2f861 moved the ChannelMonitorUpdateStatus mode check from ChannelManager to ChainMonitor, the fuzzer's reload_node function triggered a false mode violation. It called watch_channel with the persister hardcoded to Completed, then switched to the node's actual mon_style (potentially InProgress). ChainMonitor's check_monitor_update_type saw both modes and panicked. Set the persister to the correct mon_style before calling watch_channel during reload, so ChainMonitor sees a consistent persist mode from the start. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c94156a0358..a3e69663572 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1035,13 +1035,14 @@ pub fn do_test( let manager = <(BlockHash, ChanMan)>::read(&mut &ser[..], read_args).expect("Failed to read manager"); let res = (manager.1, chain_monitor.clone()); + let expected_status = *mon_style[node_id as usize].borrow(); + *chain_monitor.persister.update_ret.lock().unwrap() = expected_status.clone(); for (channel_id, mon) in monitors.drain() { assert_eq!( chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) + Ok(expected_status.clone()) ); } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); res }; From a24f086b704b623184889c3633fd447acb0c986c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 14:57:56 +0100 Subject: [PATCH 13/24] chanmon_consistency: mirror pending monitor update in reload_node In reload_node, watch_channel is called on the real ChainMonitor directly (bypassing TestChainMonitor) because the wrapper's latest_monitors is pre-populated during reload. When persistence returns InProgress, the real ChainMonitor tracks the initial update_id as pending, but TestChainMonitor was unaware of it. This meant complete_all_monitor_updates could never drain and complete the update, so MonitorEvent::Completed was never generated and the reloaded node's channels got permanently stuck. Fix this by mirroring the pending update into TestChainMonitor.latest_monitors after watch_channel returns InProgress. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index a3e69663572..3b3b410ad0d 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1038,10 +1038,21 @@ pub fn do_test( let expected_status = *mon_style[node_id as usize].borrow(); *chain_monitor.persister.update_ret.lock().unwrap() = expected_status.clone(); for (channel_id, mon) in monitors.drain() { + let monitor_id = mon.get_latest_update_id(); assert_eq!( chain_monitor.chain_monitor.watch_channel(channel_id, mon), Ok(expected_status.clone()) ); + // When persistence returns InProgress, the real ChainMonitor tracks + // the initial update_id as pending. We must mirror this in the + // TestChainMonitor's latest_monitors so that + // complete_all_monitor_updates can drain and complete it later. + if expected_status == chain::ChannelMonitorUpdateStatus::InProgress { + let mut map = chain_monitor.latest_monitors.lock().unwrap(); + if let Some(state) = map.get_mut(&channel_id) { + state.pending_monitors.push((monitor_id, state.persisted_monitor.clone())); + } + } } res }; From 1bf995a96a09ee41dfb9f60b6f9c473ca7ef233a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 15:45:48 +0100 Subject: [PATCH 14/24] chanmon_consistency: skip channels without SCID in MPP sends send_mpp_payment and send_mpp_hop_payment used list_channels() and unwrapped short_channel_id, which panics for channels that have been force-closed or are not yet fully open. Filter out channels without an SCID and send over the remaining ones, recomputing path count and amounts accordingly. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 54 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 3b3b410ad0d..2d287167678 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -713,24 +713,25 @@ fn send_mpp_payment( source: &ChanMan, dest: &ChanMan, dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let num_paths = dest_chan_ids.len(); + let mut paths = Vec::new(); + + let dest_chans = dest.list_channels(); + let dest_scids: Vec<_> = dest_chan_ids + .iter() + .filter_map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + }) + .collect(); + let num_paths = dest_scids.len(); if num_paths == 0 { return false; } - let amt_per_path = amt / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - - let dest_chans = dest.list_channels(); - let dest_scids = dest_chan_ids.iter().map(|chan_id| { - dest_chans - .iter() - .find(|chan| chan.channel_id == *chan_id) - .and_then(|chan| chan.short_channel_id) - .unwrap() - }); - for (i, dest_scid) in dest_scids.enumerate() { + for (i, dest_scid) in dest_scids.into_iter().enumerate() { let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { @@ -772,41 +773,38 @@ fn send_mpp_hop_payment( dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - // Create paths by pairing middle_scids with dest_scids - let num_paths = middle_chan_ids.len().max(dest_chan_ids.len()); - if num_paths == 0 { - return false; - } - - let first_hop_fee = 50_000; - let amt_per_path = amt / num_paths as u64; - let fee_per_path = first_hop_fee / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - let middle_chans = middle.list_channels(); let middle_scids: Vec<_> = middle_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { middle_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) .collect(); let dest_chans = dest.list_channels(); let dest_scids: Vec<_> = dest_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { dest_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) .collect(); + let num_paths = middle_scids.len().max(dest_scids.len()); + if middle_scids.is_empty() || dest_scids.is_empty() { + return false; + } + + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + for i in 0..num_paths { let middle_scid = middle_scids[i % middle_scids.len()]; let dest_scid = dest_scids[i % dest_scids.len()]; From baa25405452590d375a4c057b0145ec5289f2dfc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 16:49:45 +0100 Subject: [PATCH 15/24] chanmon_consistency: handle BumpTransaction events for anchor channels For anchor channels, the ChannelMonitor does not broadcast commitment or HTLC-timeout transactions directly. Instead, it emits BumpTransaction events via ChainMonitor::process_pending_events(), which are separate from the ChannelManager events that process_events\! handles. Add chain monitor event processing to the process_all_events\! macro so that ChannelClose events broadcast the commitment tx and HTLCResolution events construct HTLC-timeout transactions via BumpTransactionEventHandler. For ChannelClose, the commitment tx is broadcast directly rather than going through the full anchor-bumping flow, since fuzz crypto produces non-standard signature sizes that trigger weight estimation assertions in the bump handler. Also change the force-close settlement loop from two fixed height advances to four iterations of advance+process_all_events, giving enough rounds for commitment tx confirmation, HTLC-timeout broadcast and confirmation, second-stage tx resolution, and final cleanup. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 133 ++++++++++++++++++++------------ 1 file changed, 82 insertions(+), 51 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2d287167678..e4bc6da800e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -46,7 +46,7 @@ use lightning::chain::transaction::OutPoint; use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, }; -use lightning::events; +use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; @@ -84,6 +84,8 @@ use lightning::util::test_channel_signer::{EnforcementState, SignerOp, TestChann use lightning::util::test_utils::TestWalletSource; use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; +use lightning::events::bump_transaction::sync::BumpTransactionEventHandlerSync; + use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; @@ -2849,6 +2851,57 @@ pub fn do_test( last_pass_no_updates = false; continue; } + // Process chain monitor events (BumpTransaction, SpendableOutputs) + // which are separate from ChannelManager events. + { + let monitors = [&monitor_a, &monitor_b, &monitor_c]; + let broadcasters: [&Arc; 3] = [&broadcast_a, &broadcast_b, &broadcast_c]; + let keys_managers = [&keys_manager_a, &keys_manager_b, &keys_manager_c]; + for (idx, monitor) in monitors.iter().enumerate() { + let wallet = WalletSync::new( + &wallets[idx], + Arc::clone(&loggers[idx]), + ); + let handler = BumpTransactionEventHandlerSync::new( + broadcasters[idx].as_ref(), + &wallet, + keys_managers[idx].as_ref(), + Arc::clone(&loggers[idx]), + ); + let broadcaster = broadcasters[idx]; + monitor.chain_monitor.process_pending_events( + &|event: events::Event| { + if let events::Event::BumpTransaction(ref bump) = event { + match bump { + events::bump_transaction::BumpTransactionEvent::ChannelClose { + commitment_tx, + channel_id, + counterparty_node_id, + .. + } => { + // Broadcast the commitment tx directly. + // Skip the full anchor-bumping flow + // since fuzz crypto causes weight + // assertion failures in the bump + // handler. + broadcaster.broadcast_transactions(&[( + commitment_tx, + lightning::chain::chaininterface::TransactionType::UnilateralClose { + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + }, + )]); + }, + events::bump_transaction::BumpTransactionEvent::HTLCResolution { .. } => { + handler.handle_event(bump); + }, + } + } + Ok(()) + }, + ); + } + } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2902,57 +2955,35 @@ pub fn do_test( // If any channels were force-closed, advance chain height past HTLC // timelocks so HTLC-timeout transactions can be broadcast, confirmed, - // and fully resolved. We advance in two phases: - // 1) Past cltv_expiry so HTLC-timeout txs are released - // 2) Past the CSV delay so SpendableOutputs events fire + // and fully resolved. We iterate multiple times to cover: (1) confirm + // commitment txs, (2) confirm HTLC-timeout txs after bump handling, + // (3) confirm second-stage txs past CSV, (4) final cleanup. if !closed_channels.borrow().is_empty() { - chain_state.advance_height(250); - sync_with_chain_state( - &chain_state, - &nodes[0], - &monitor_a, - &mut node_height_a, - None, - ); - sync_with_chain_state( - &chain_state, - &nodes[1], - &monitor_b, - &mut node_height_b, - None, - ); - sync_with_chain_state( - &chain_state, - &nodes[2], - &monitor_c, - &mut node_height_c, - None, - ); - process_all_events!(); - - chain_state.advance_height(250); - sync_with_chain_state( - &chain_state, - &nodes[0], - &monitor_a, - &mut node_height_a, - None, - ); - sync_with_chain_state( - &chain_state, - &nodes[1], - &monitor_b, - &mut node_height_b, - None, - ); - sync_with_chain_state( - &chain_state, - &nodes[2], - &monitor_c, - &mut node_height_c, - None, - ); - process_all_events!(); + for _ in 0..4 { + chain_state.advance_height(250); + sync_with_chain_state( + &chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ); + process_all_events!(); + } } // Verify no payments are stuck - all should have resolved From 053978af8d76f5f201d034af34272fd04c6b77aa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 20:40:45 +0100 Subject: [PATCH 16/24] chanmon_consistency: fix bogus channel_reestablish infinite loop Skip delivery of bogus channel_reestablish messages (those with both commitment numbers at 0) in the process_msg_events\! macro. These are generated by the lnd workaround in handle_channel_reestablish's Vacant branch. When both nodes have forgotten a channel, delivering these between LDK nodes creates an infinite ping-pong that hits the 100 iteration limit in process_all_events\!. All fuzzer nodes are LDK and will already force-close via the error message path, so skipping these is safe. Also batch the drain+confirm+sync loop so that fee-bump re-broadcasts are confirmed before proceeding, using confirm_tx's return value to skip already-confirmed transactions. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 48 ++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index e4bc6da800e..00d790a9798 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1665,6 +1665,17 @@ pub fn do_test( } }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + if msg.next_local_commitment_number == 0 + && msg.next_remote_commitment_number == 0 + { + // Skip bogus reestablish (lnd workaround). All fuzzer + // nodes are LDK and will already force-close via the + // error message path. Delivering these between LDK + // nodes creates an infinite ping-pong since both sides + // respond with another bogus reestablish for the + // unknown channel. + continue; + } for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { out.locked_write(format!("Delivering channel_reestablish from node {} to node {}.\n", $node, idx).as_bytes()); @@ -2818,7 +2829,7 @@ pub fn do_test( let mut last_pass_no_updates = false; for i in 0..std::usize::MAX { if i == 100 { - panic!("It may take may iterations to settle the state, but it should not take forever"); + panic!("It may take many iterations to settle the state, but it should not take forever"); } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { @@ -2831,23 +2842,30 @@ pub fn do_test( } // Drain any broadcast transactions (from force-closes) and // confirm them so the monitors can process the spends. - let mut had_txs = false; - for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { - chain_state.confirm_tx(tx); - had_txs = true; - } - for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { - chain_state.confirm_tx(tx); - had_txs = true; - } - for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { - chain_state.confirm_tx(tx); - had_txs = true; - } - if had_txs { + // We loop here because syncing can trigger monitors to + // re-broadcast (fee bumps), which need to be confirmed + // before proceeding. + let mut had_new_txs = false; + loop { + let mut found = false; + for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + if !found { + break; + } + had_new_txs = true; sync_with_chain_state(&chain_state, &nodes[0], &monitor_a, &mut node_height_a, None); sync_with_chain_state(&chain_state, &nodes[1], &monitor_b, &mut node_height_b, None); sync_with_chain_state(&chain_state, &nodes[2], &monitor_c, &mut node_height_c, None); + } + if had_new_txs { last_pass_no_updates = false; continue; } From 2f13a42622f3e9c906711f63e1dd23e6fce08a5d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 23 Feb 2026 20:50:33 +0100 Subject: [PATCH 17/24] chanmon_consistency: handle missing channel in splice_out The splice_out closure can be invoked after the channel has already been force-closed by a prior opcode. Return early instead of panicking on unwrap when the channel is no longer listed. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 00d790a9798..f362710fe7a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1452,12 +1452,15 @@ pub fn do_test( // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node // has double the balance required to send a payment upon a `0xff` byte. We do this to // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node + let outbound_capacity_msat = match node .list_channels() .iter() .find(|chan| chan.channel_id == *channel_id) .map(|chan| chan.outbound_capacity_msat) - .unwrap(); + { + Some(v) => v, + None => return, + }; if outbound_capacity_msat < 20_000_000 { return; } From 593ca88f2660711812b4f310202d223eb9e9b31c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 08:48:08 +0100 Subject: [PATCH 18/24] chanmon_consistency: track UTXOs to reject invalid transactions The fuzzer's ChainState would confirm transactions that spend the same input as an already-confirmed transaction, or spend outputs that were never created (due to fuzz txid hash collisions). Both are impossible on a real blockchain. This caused spurious assertion failures when e.g. a splice tx and an old holder commitment tx both got confirmed despite spending the same funding outpoint. Add UTXO tracking to ChainState: confirmed transaction outputs are added to a UTXO set, spent inputs are removed, and new transactions are rejected unless all their inputs reference existing UTXOs. This naturally prevents both double-spends and phantom output spends. Also give each node 50 wallet UTXOs (up from 1) so that anchor-channel HTLC claims don't exhaust the wallet during settlement. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 69 +++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 16 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f362710fe7a..56da9d71282 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -26,6 +26,7 @@ use bitcoin::opcodes; use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; +use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::FeeRate; use bitcoin::block::Header; @@ -187,13 +188,22 @@ impl BroadcasterInterface for TestBroadcaster { struct ChainState { blocks: Vec<(Header, Vec)>, confirmed_txids: HashSet, + /// Tracks unspent outputs created by confirmed transactions. Only + /// transactions that spend existing UTXOs can be confirmed, which + /// prevents fuzz hash collisions from creating phantom spends of + /// outputs that were never actually created. + utxos: HashSet, } impl ChainState { fn new() -> Self { let genesis_hash = genesis_block(Network::Bitcoin).block_hash(); let genesis_header = create_dummy_header(genesis_hash, 42); - Self { blocks: vec![(genesis_header, Vec::new())], confirmed_txids: HashSet::new() } + Self { + blocks: vec![(genesis_header, Vec::new())], + confirmed_txids: HashSet::new(), + utxos: HashSet::new(), + } } fn tip_height(&self) -> u32 { @@ -205,7 +215,28 @@ impl ChainState { if self.confirmed_txids.contains(&txid) { return false; } + // Validate that all inputs spend existing, unspent outputs. This + // rejects both double-spends and spends of outputs that were never + // created (e.g. due to fuzz txid hash collisions where a different + // transaction was confirmed under the same txid). + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + if !self.utxos.contains(&input.previous_output) { + return false; + } + } + } self.confirmed_txids.insert(txid); + if !is_coinbase { + for input in &tx.input { + self.utxos.remove(&input.previous_output); + } + } + // Add this transaction's outputs as new UTXOs. + for idx in 0..tx.output.len() { + self.utxos.insert(BitcoinOutPoint { txid, vout: idx as u32 }); + } let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); @@ -1253,21 +1284,27 @@ pub fn do_test( let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); let wallets = vec![wallet_a, wallet_b, wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|w| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: w.get_change_script().unwrap(), - }) - .collect(), - }; - wallets.iter().enumerate().for_each(|(i, w)| { - w.add_utxo(coinbase_tx.clone(), i as u32); - }); + // Create wallet UTXOs for each node. Each anchor-channel HTLC claim + // needs a wallet input for fees, so we create enough UTXOs to cover + // multiple concurrent claims. + let num_wallet_utxos = 50; + for (wallet_idx, w) in wallets.iter().enumerate() { + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version(wallet_idx as i32 + 100), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: (0..num_wallet_utxos) + .map(|_| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: w.get_change_script().unwrap(), + }) + .collect(), + }; + for vout in 0..num_wallet_utxos { + w.add_utxo(coinbase_tx.clone(), vout); + } + chain_state.confirm_tx(coinbase_tx); + } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; From accbb972e715b2d1b882ee48cfd7e92c545e863b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 09:21:09 +0100 Subject: [PATCH 19/24] chanmon_consistency: handle claimed payments that lose timeout race When an HTLC timeout on-chain beats the receiver's claim, the sender gets ProbeFailed/PaymentFailed instead of PaymentSent. The receiver still saw PaymentClaimable and called claim_funds, so the payment hash was added to claimed_payment_hashes. The end-of-run assertion then fails because it expects every claimed payment to have a corresponding PaymentSent at the sender. Fix this by removing the payment hash from claimed_payment_hashes when we see HTLCHandlingFailed with a Receive failure type, which indicates the claim did not succeed. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 56da9d71282..cfac321524a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -26,8 +26,8 @@ use bitcoin::opcodes; use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; -use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::FeeRate; +use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::block::Header; use bitcoin::hash_types::{BlockHash, Txid}; @@ -2007,6 +2007,16 @@ pub fn do_test( events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { + failure_type: events::HTLCHandlingFailureType::Receive { payment_hash }, + .. + } => { + // The receiver failed to handle this HTLC (e.g., HTLC + // timeout won the race against the claim). Remove it from + // claimed hashes so we don't assert that the sender must + // have received PaymentSent. + claimed_payment_hashes.borrow_mut().remove(&payment_hash); + }, events::Event::HTLCHandlingFailed { .. } => {}, events::Event::FundingTransactionReadyForSigning { From 0fd6b913591bd8c553867d77e68873b5052fe5cc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 09:25:48 +0100 Subject: [PATCH 20/24] Skip weight lower-bound assertions when fuzzing Co-Authored-By: Claude Opus 4.6 --- lightning/src/events/bump_transaction/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 6a5e9948653..098b6f53ee6 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -485,6 +485,9 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 99 / 100 <= signed_tx_weight); let expected_package_fee = Amount::from_sat(fee_for_weight( @@ -733,6 +736,9 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 98 / 100 <= signed_tx_weight); let expected_signed_tx_fee = From 834f2e5f81dd37cb7113c79006f264d10456cb5a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 10:30:48 +0100 Subject: [PATCH 21/24] chanmon_consistency: accept ChannelUnavailable on splice after force-close The splice_channel error handler only expected APIMisuseError with a splice-related message. When a channel has been force-closed, the splice call returns ChannelUnavailable instead, which caused the assertion to panic. Allow ChannelUnavailable as a valid error variant. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index cfac321524a..ddadbc45500 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1455,7 +1455,8 @@ pub fn do_test( }, Err(e) => { assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")) + || matches!(e, APIError::ChannelUnavailable { .. }), "{:?}", e ); From 9de08300a52bc2e268242f82567a4f5e288b9b0b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 10:51:39 +0100 Subject: [PATCH 22/24] chanmon_consistency: resync stale monitors to chain tip during settlement After a node reload with an older monitor, the fuzzer's node_height tracking variable stays at the pre-reload height. During settlement, sync_with_chain_state starts from node_height, so it never re-delivers the blocks that the stale monitor missed. If a commitment tx was confirmed in one of those missed blocks, the monitor never learns about the funding spend, causing get_claimable_balances() to incorrectly report a ClaimableOnChannelClose balance. Fix this by resetting each node_height to the minimum of its current value and all its monitors' best_block heights at the start of settlement, ensuring sync_with_chain_state re-delivers missed blocks. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index ddadbc45500..36421367639 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2875,6 +2875,23 @@ pub fn do_test( nodes[1].signer_unblocked(None); nodes[2].signer_unblocked(None); + // After a node reload with an older monitor, node_height may be + // ahead of the monitor's best_block. Reset to the minimum so + // sync_with_chain_state re-delivers missed blocks during settlement. + for (node, monitor, node_height) in [ + (&nodes[0], &monitor_a, &mut node_height_a), + (&nodes[1], &monitor_b, &mut node_height_b), + (&nodes[2], &monitor_c, &mut node_height_c), + ] { + let mut min = std::cmp::min(*node_height, node.current_best_block().height); + for chan_id in monitor.chain_monitor.list_monitors() { + if let Ok(mon) = monitor.chain_monitor.get_monitor(chan_id) { + min = std::cmp::min(min, mon.current_best_block().height); + } + } + *node_height = min; + } + macro_rules! process_all_events { () => { { let mut last_pass_no_updates = false; From 720bce68f6ed50176719136149223e03df07577c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 24 Feb 2026 12:35:51 +0100 Subject: [PATCH 23/24] chanmon_consistency: sync stale monitors without lowering node height The settlement resync (3e576df9a) reset node_height to the minimum of the node and monitor heights so sync_with_chain_state would re-deliver missed blocks to stale monitors. However, this also fed those low-height blocks to the ChannelManager, which interpreted them as the funding transaction being un-confirmed (get_funding_tx_confirmations returns 0 when height < funding_tx_confirmation_height). This triggered spurious force-closes, leading to multiple force-close cycles with different commitment transactions and stale htlcs_resolved_on_chain entries that caused an assertion failure in get_htlc_balance. Fix by syncing stale monitors directly through their ChainMonitor methods for the missed block range, without touching node_height or the ChannelManager. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 36 ++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 36421367639..5652575ade1 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2875,21 +2875,37 @@ pub fn do_test( nodes[1].signer_unblocked(None); nodes[2].signer_unblocked(None); - // After a node reload with an older monitor, node_height may be - // ahead of the monitor's best_block. Reset to the minimum so - // sync_with_chain_state re-delivers missed blocks during settlement. - for (node, monitor, node_height) in [ - (&nodes[0], &monitor_a, &mut node_height_a), - (&nodes[1], &monitor_b, &mut node_height_b), - (&nodes[2], &monitor_c, &mut node_height_c), + // After a node reload with an older monitor, the monitor may + // be behind node_height. Sync only the monitors (not the + // ChannelManager) for the missed blocks to avoid the + // ChannelManager interpreting lower heights as a reorg. + for (monitor, node_height) in [ + (&monitor_a, &node_height_a), + (&monitor_b, &node_height_b), + (&monitor_c, &node_height_c), ] { - let mut min = std::cmp::min(*node_height, node.current_best_block().height); + let mut min_monitor_height = *node_height; for chan_id in monitor.chain_monitor.list_monitors() { if let Ok(mon) = monitor.chain_monitor.get_monitor(chan_id) { - min = std::cmp::min(min, mon.current_best_block().height); + min_monitor_height = std::cmp::min( + min_monitor_height, + mon.current_best_block().height, + ); + } + } + let mut h = min_monitor_height; + while h < *node_height { + h += 1; + let (header, txn) = chain_state.block_at(h); + let txdata: Vec<_> = + txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + monitor + .chain_monitor + .transactions_confirmed(header, &txdata, h); } + monitor.chain_monitor.best_block_updated(header, h); } - *node_height = min; } macro_rules! process_all_events { From 633c959ac7fe83b51ac6b823cdc6b661a7e952f9 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 26 Feb 2026 13:56:57 +0100 Subject: [PATCH 24/24] chanmon_consistency: tolerate drained splice tx in SplicePending handler During settlement, the drain loop removes all transactions from broadcasters unconditionally, but confirm_tx may reject a splice transaction if its inputs are already spent. When a SplicePending event then fires, the tx is neither in the broadcaster nor in confirmed_txids, causing a panic. Replace the .expect() with an if-let to gracefully skip splice transactions that were already drained. Co-Authored-By: Claude Opus 4.6 --- fuzz/src/chanmon_consistency.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 5652575ade1..5a3e9029f0c 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2043,12 +2043,16 @@ pub fn do_test( _ => &broadcast_c, }; let mut txs = broadcaster.txn_broadcasted.borrow_mut(); - let pos = txs + if let Some(pos) = txs .iter() .position(|tx| new_funding_txo.txid == tx.compute_txid()) - .expect("SplicePending but splice tx not found in broadcaster"); - let splice_tx = txs.remove(pos); - chain_state.confirm_tx(splice_tx); + { + let splice_tx = txs.remove(pos); + chain_state.confirm_tx(splice_tx); + } + // If not found, the settlement drain loop already + // removed it from the broadcaster but confirm_tx + // rejected it (e.g. inputs already spent). } }, events::Event::SpliceFailed { .. } => {},