diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 476362324ad..5a3e9029f0c 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,8 +15,8 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. We consider it a failure if any action results in a -//! channel being force-closed. +//! send-side handling is correct, other peers. The fuzzer also exercises user-initiated +//! force-closes with on-chain commitment transaction confirmation. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -27,6 +27,7 @@ use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; use bitcoin::FeeRate; +use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::block::Header; use bitcoin::hash_types::{BlockHash, Txid}; @@ -41,12 +42,12 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; +use lightning::chain::channelmonitor::{Balance, ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, }; -use lightning::events; +use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; @@ -84,6 +85,8 @@ use lightning::util::test_channel_signer::{EnforcementState, SignerOp, TestChann use lightning::util::test_utils::TestWalletSource; use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; +use lightning::events::bump_transaction::sync::BumpTransactionEventHandlerSync; + use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; @@ -185,13 +188,22 @@ impl BroadcasterInterface for TestBroadcaster { struct ChainState { blocks: Vec<(Header, Vec)>, confirmed_txids: HashSet, + /// Tracks unspent outputs created by confirmed transactions. Only + /// transactions that spend existing UTXOs can be confirmed, which + /// prevents fuzz hash collisions from creating phantom spends of + /// outputs that were never actually created. + utxos: HashSet, } impl ChainState { fn new() -> Self { let genesis_hash = genesis_block(Network::Bitcoin).block_hash(); let genesis_header = create_dummy_header(genesis_hash, 42); - Self { blocks: vec![(genesis_header, Vec::new())], confirmed_txids: HashSet::new() } + Self { + blocks: vec![(genesis_header, Vec::new())], + confirmed_txids: HashSet::new(), + utxos: HashSet::new(), + } } fn tip_height(&self) -> u32 { @@ -203,7 +215,28 @@ impl ChainState { if self.confirmed_txids.contains(&txid) { return false; } + // Validate that all inputs spend existing, unspent outputs. This + // rejects both double-spends and spends of outputs that were never + // created (e.g. due to fuzz txid hash collisions where a different + // transaction was confirmed under the same txid). + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + if !self.utxos.contains(&input.previous_output) { + return false; + } + } + } self.confirmed_txids.insert(txid); + if !is_coinbase { + for input in &tx.input { + self.utxos.remove(&input.previous_output); + } + } + // Add this transaction's outputs as new UTXOs. + for idx in 0..tx.output.len() { + self.utxos.insert(BitcoinOutPoint { txid, vout: idx as u32 }); + } let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); @@ -217,6 +250,14 @@ impl ChainState { true } + fn advance_height(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, Vec::new())); + } + } + fn block_at(&self, height: u32) -> &(Header, Vec) { &self.blocks[height as usize] } @@ -500,12 +541,12 @@ impl SignerProvider for KeyProvider { } } -// Since this fuzzer is only concerned with live-channel operations, we don't need to worry about -// any signer operations that come after a force close. -const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ +const SUPPORTED_SIGNER_OPS: [SignerOp; 5] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, SignerOp::ReleaseCommitmentSecret, + SignerOp::SignHolderCommitment, + SignerOp::SignHolderHtlcTransaction, ]; impl KeyProvider { @@ -705,24 +746,25 @@ fn send_mpp_payment( source: &ChanMan, dest: &ChanMan, dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let num_paths = dest_chan_ids.len(); + let mut paths = Vec::new(); + + let dest_chans = dest.list_channels(); + let dest_scids: Vec<_> = dest_chan_ids + .iter() + .filter_map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + }) + .collect(); + let num_paths = dest_scids.len(); if num_paths == 0 { return false; } - let amt_per_path = amt / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - let dest_chans = dest.list_channels(); - let dest_scids = dest_chan_ids.iter().map(|chan_id| { - dest_chans - .iter() - .find(|chan| chan.channel_id == *chan_id) - .and_then(|chan| chan.short_channel_id) - .unwrap() - }); - - for (i, dest_scid) in dest_scids.enumerate() { + for (i, dest_scid) in dest_scids.into_iter().enumerate() { let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { @@ -764,41 +806,38 @@ fn send_mpp_hop_payment( dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - // Create paths by pairing middle_scids with dest_scids - let num_paths = middle_chan_ids.len().max(dest_chan_ids.len()); - if num_paths == 0 { - return false; - } - - let first_hop_fee = 50_000; - let amt_per_path = amt / num_paths as u64; - let fee_per_path = first_hop_fee / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - let middle_chans = middle.list_channels(); let middle_scids: Vec<_> = middle_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { middle_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) .collect(); let dest_chans = dest.list_channels(); let dest_scids: Vec<_> = dest_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { dest_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) .collect(); + let num_paths = middle_scids.len().max(dest_scids.len()); + if middle_scids.is_empty() || dest_scids.is_empty() { + return false; + } + + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + for i in 0..num_paths { let middle_scid = middle_scids[i % middle_scids.len()]; let dest_scid = dest_scids[i % dest_scids.len()]; @@ -852,17 +891,6 @@ fn send_mpp_hop_payment( } } -#[inline] -fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { - // Since sending/receiving messages may be delayed, `timer_tick_occurred` may cause a node to - // disconnect their counterparty if they're expecting a timely response. - assert!(matches!( - action, - msgs::ErrorAction::DisconnectPeerWithWarning { msg } - if msg.data.contains("Disconnecting due to timeout awaiting response") - )); -} - #[inline] pub fn do_test( data: &[u8], underlying_out: Out, anchors: bool, @@ -1038,13 +1066,25 @@ pub fn do_test( let manager = <(BlockHash, ChanMan)>::read(&mut &ser[..], read_args).expect("Failed to read manager"); let res = (manager.1, chain_monitor.clone()); + let expected_status = *mon_style[node_id as usize].borrow(); + *chain_monitor.persister.update_ret.lock().unwrap() = expected_status.clone(); for (channel_id, mon) in monitors.drain() { + let monitor_id = mon.get_latest_update_id(); assert_eq!( chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) + Ok(expected_status.clone()) ); + // When persistence returns InProgress, the real ChainMonitor tracks + // the initial update_id as pending. We must mirror this in the + // TestChainMonitor's latest_monitors so that + // complete_all_monitor_updates can drain and complete it later. + if expected_status == chain::ChannelMonitorUpdateStatus::InProgress { + let mut map = chain_monitor.latest_monitors.lock().unwrap(); + if let Some(state) = map.get_mut(&channel_id) { + state.pending_monitors.push((monitor_id, state.persisted_monitor.clone())); + } + } } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); res }; @@ -1244,21 +1284,27 @@ pub fn do_test( let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); let wallets = vec![wallet_a, wallet_b, wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|w| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: w.get_change_script().unwrap(), - }) - .collect(), - }; - wallets.iter().enumerate().for_each(|(i, w)| { - w.add_utxo(coinbase_tx.clone(), i as u32); - }); + // Create wallet UTXOs for each node. Each anchor-channel HTLC claim + // needs a wallet input for fees, so we create enough UTXOs to cover + // multiple concurrent claims. + let num_wallet_utxos = 50; + for (wallet_idx, w) in wallets.iter().enumerate() { + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version(wallet_idx as i32 + 100), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: (0..num_wallet_utxos) + .map(|_| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: w.get_change_script().unwrap(), + }) + .collect(), + }; + for vout in 0..num_wallet_utxos { + w.add_utxo(coinbase_tx.clone(), vout); + } + chain_state.confirm_tx(coinbase_tx); + } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; @@ -1302,6 +1348,7 @@ pub fn do_test( let sync_with_chain_state = |chain_state: &ChainState, node: &ChannelManager<_, _, _, _, _, _, _, _, _>, + monitor: &TestChainMonitor, node_height: &mut u32, num_blocks: Option| { let target_height = if let Some(num_blocks) = num_blocks { @@ -1315,16 +1362,18 @@ pub fn do_test( let (header, txn) = chain_state.block_at(*node_height); let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); if !txdata.is_empty() { + monitor.chain_monitor.transactions_confirmed(header, &txdata, *node_height); node.transactions_confirmed(header, &txdata, *node_height); } + monitor.chain_monitor.best_block_updated(header, *node_height); node.best_block_updated(header, *node_height); } }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut chain_state, &nodes[0], &monitor_a, &mut node_height_a, None); + sync_with_chain_state(&mut chain_state, &nodes[1], &monitor_b, &mut node_height_b, None); + sync_with_chain_state(&mut chain_state, &nodes[2], &monitor_c, &mut node_height_c, None); lock_fundings!(nodes); @@ -1359,18 +1408,18 @@ pub fn do_test( let resolved_payments: RefCell<[HashMap>; 3]> = RefCell::new([new_hash_map(), new_hash_map(), new_hash_map()]); let claimed_payment_hashes: RefCell> = RefCell::new(HashSet::new()); + let closed_channels: RefCell> = RefCell::new(HashSet::new()); macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); + assert!(nodes[0].list_channels().len() <= 3); + assert!(nodes[1].list_channels().len() <= 6); + assert!(nodes[2].list_channels().len() <= 3); - // All broadcasters should be empty (all broadcast transactions should be handled - // explicitly). - assert!(broadcast_a.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_b.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_c.txn_broadcasted.borrow().is_empty()); + // Drain broadcasters since force-closes produce commitment transactions. + broadcast_a.txn_broadcasted.borrow_mut().clear(); + broadcast_b.txn_broadcasted.borrow_mut().clear(); + broadcast_c.txn_broadcasted.borrow_mut().clear(); return; }}; @@ -1406,7 +1455,8 @@ pub fn do_test( }, Err(e) => { assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")) + || matches!(e, APIError::ChannelUnavailable { .. }), "{:?}", e ); @@ -1440,12 +1490,15 @@ pub fn do_test( // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node // has double the balance required to send a payment upon a `0xff` byte. We do this to // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node + let outbound_capacity_msat = match node .list_channels() .iter() .find(|chan| chan.channel_id == *channel_id) .map(|chan| chan.outbound_capacity_msat) - .unwrap(); + { + Some(v) => v, + None => return, + }; if outbound_capacity_msat < 20_000_000 { return; } @@ -1528,10 +1581,17 @@ pub fn do_test( *node_id == a_id }, MessageSendEvent::HandleError { ref action, ref node_id } => { - assert_action_timeout_awaiting_response(action); + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains("Disconnecting due to timeout awaiting response") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), + } if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, + MessageSendEvent::BroadcastChannelUpdate { .. } => continue, _ => panic!("Unhandled message event {:?}", event), }; if push_a { ba_events.push(event); } else { bc_events.push(event); } @@ -1646,6 +1706,17 @@ pub fn do_test( } }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + if msg.next_local_commitment_number == 0 + && msg.next_remote_commitment_number == 0 + { + // Skip bogus reestablish (lnd workaround). All fuzzer + // nodes are LDK and will already force-close via the + // error message path. Delivering these between LDK + // nodes creates an infinite ping-pong since both sides + // respond with another bogus reestablish for the + // unknown channel. + continue; + } for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { out.locked_write(format!("Delivering channel_reestablish from node {} to node {}.\n", $node, idx).as_bytes()); @@ -1741,8 +1812,20 @@ pub fn do_test( } } }, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, ref node_id } => { + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains("Disconnecting due to timeout awaiting response") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { ref msg } => { + for dest in nodes.iter() { + if dest.get_our_node_id() == *node_id { + dest.handle_error(nodes[$node].get_our_node_id(), msg); + } + } + }, + _ => panic!("Unexpected HandleError action {:?}", action), + } }, MessageSendEvent::SendChannelReady { .. } => { // Can be generated as a reestablish response @@ -1798,8 +1881,14 @@ pub fn do_test( MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, .. } => match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains( + "Disconnecting due to timeout awaiting response", + ) => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), }, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -1826,8 +1915,14 @@ pub fn do_test( MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); + MessageSendEvent::HandleError { ref action, .. } => match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains( + "Disconnecting due to timeout awaiting response", + ) => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), }, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -1913,6 +2008,16 @@ pub fn do_test( events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { + failure_type: events::HTLCHandlingFailureType::Receive { payment_hash }, + .. + } => { + // The receiver failed to handle this HTLC (e.g., HTLC + // timeout won the race against the claim). Remove it from + // claimed hashes so we don't assert that the sender must + // have received PaymentSent. + claimed_payment_hashes.borrow_mut().remove(&payment_hash); + }, events::Event::HTLCHandlingFailed { .. } => {}, events::Event::FundingTransactionReadyForSigning { @@ -1931,18 +2036,32 @@ pub fn do_test( .unwrap(); }, events::Event::SplicePending { new_funding_txo, .. } => { - let broadcaster = match $node { - 0 => &broadcast_a, - 1 => &broadcast_b, - _ => &broadcast_c, - }; - let mut txs = broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.confirm_tx(splice_tx); + if !chain_state.confirmed_txids.contains(&new_funding_txo.txid) { + let broadcaster = match $node { + 0 => &broadcast_a, + 1 => &broadcast_b, + _ => &broadcast_c, + }; + let mut txs = broadcaster.txn_broadcasted.borrow_mut(); + if let Some(pos) = txs + .iter() + .position(|tx| new_funding_txo.txid == tx.compute_txid()) + { + let splice_tx = txs.remove(pos); + chain_state.confirm_tx(splice_tx); + } + // If not found, the settlement drain loop already + // removed it from the broadcaster but confirm_tx + // rejected it (e.g. inputs already spent). + } }, events::Event::SpliceFailed { .. } => {}, + events::Event::ChannelClosed { channel_id, .. } => { + closed_channels.borrow_mut().insert(channel_id); + }, + events::Event::DiscardFunding { .. } => {}, + events::Event::SpendableOutputs { .. } => {}, + events::Event::BumpTransaction(..) => {}, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -2394,13 +2513,49 @@ pub fn do_test( }, // Sync node by 1 block to cover confirmation of a transaction. - 0xa8 => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, Some(1)), - 0xa9 => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, Some(1)), - 0xaa => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, Some(1)), + 0xa8 => sync_with_chain_state( + &mut chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + Some(1), + ), + 0xa9 => sync_with_chain_state( + &mut chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + Some(1), + ), + 0xaa => sync_with_chain_state( + &mut chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + Some(1), + ), // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. - 0xab => sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None), - 0xac => sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None), - 0xad => sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None), + 0xab => sync_with_chain_state( + &mut chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ), + 0xac => sync_with_chain_state( + &mut chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ), + 0xad => sync_with_chain_state( + &mut chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ), 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on @@ -2521,6 +2676,99 @@ pub fn do_test( keys_manager_c.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[2].signer_unblocked(None); }, + 0xcc => { + keys_manager_a.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[0].signer_unblocked(None); + }, + 0xcd => { + keys_manager_b.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[1].signer_unblocked(None); + }, + 0xce => { + keys_manager_c.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + nodes[2].signer_unblocked(None); + }, + 0xcf => { + keys_manager_a.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + keys_manager_b.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + keys_manager_c.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + nodes[0].signer_unblocked(None); + nodes[1].signer_unblocked(None); + nodes[2].signer_unblocked(None); + }, + + // Force-close a channel and track it as closed. + 0xd0 => { + if nodes[0] + .force_close_broadcasting_latest_txn( + &chan_a_id, + &nodes[1].get_our_node_id(), + "]]]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_a_id); + } + }, + 0xd1 => { + if nodes[1] + .force_close_broadcasting_latest_txn( + &chan_b_id, + &nodes[2].get_our_node_id(), + "]]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_b_id); + } + }, + 0xd2 => { + if nodes[1] + .force_close_broadcasting_latest_txn( + &chan_a_id, + &nodes[0].get_our_node_id(), + "]]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_a_id); + } + }, + 0xd3 => { + if nodes[2] + .force_close_broadcasting_latest_txn( + &chan_b_id, + &nodes[1].get_our_node_id(), + "]]]]]".to_string(), + ) + .is_ok() + { + closed_channels.borrow_mut().insert(chan_b_id); + } + }, + + // Drain broadcasters and confirm all broadcast transactions. + 0xd8 => { + for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + 0xd9 => { + for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + 0xda => { + for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { + chain_state.confirm_tx(tx); + } + }, + + // Advance chain height by many empty blocks so that HTLC timelocks can + // expire and the OnchainTxHandler releases timelocked claim packages. + 0xdc => chain_state.advance_height(50), + 0xdd => chain_state.advance_height(100), + 0xde => chain_state.advance_height(200), 0xf0 => { for id in &chan_ab_ids { @@ -2631,12 +2879,45 @@ pub fn do_test( nodes[1].signer_unblocked(None); nodes[2].signer_unblocked(None); + // After a node reload with an older monitor, the monitor may + // be behind node_height. Sync only the monitors (not the + // ChannelManager) for the missed blocks to avoid the + // ChannelManager interpreting lower heights as a reorg. + for (monitor, node_height) in [ + (&monitor_a, &node_height_a), + (&monitor_b, &node_height_b), + (&monitor_c, &node_height_c), + ] { + let mut min_monitor_height = *node_height; + for chan_id in monitor.chain_monitor.list_monitors() { + if let Ok(mon) = monitor.chain_monitor.get_monitor(chan_id) { + min_monitor_height = std::cmp::min( + min_monitor_height, + mon.current_best_block().height, + ); + } + } + let mut h = min_monitor_height; + while h < *node_height { + h += 1; + let (header, txn) = chain_state.block_at(h); + let txdata: Vec<_> = + txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + monitor + .chain_monitor + .transactions_confirmed(header, &txdata, h); + } + monitor.chain_monitor.best_block_updated(header, h); + } + } + macro_rules! process_all_events { () => { { let mut last_pass_no_updates = false; for i in 0..std::usize::MAX { if i == 100 { - panic!("It may take may iterations to settle the state, but it should not take forever"); + panic!("It may take many iterations to settle the state, but it should not take forever"); } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { @@ -2647,6 +2928,86 @@ pub fn do_test( complete_all_monitor_updates(&monitor_b, id); complete_all_monitor_updates(&monitor_c, id); } + // Drain any broadcast transactions (from force-closes) and + // confirm them so the monitors can process the spends. + // We loop here because syncing can trigger monitors to + // re-broadcast (fee bumps), which need to be confirmed + // before proceeding. + let mut had_new_txs = false; + loop { + let mut found = false; + for tx in broadcast_a.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + for tx in broadcast_b.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + for tx in broadcast_c.txn_broadcasted.borrow_mut().drain(..) { + found |= chain_state.confirm_tx(tx); + } + if !found { + break; + } + had_new_txs = true; + sync_with_chain_state(&chain_state, &nodes[0], &monitor_a, &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[1], &monitor_b, &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[2], &monitor_c, &mut node_height_c, None); + } + if had_new_txs { + last_pass_no_updates = false; + continue; + } + // Process chain monitor events (BumpTransaction, SpendableOutputs) + // which are separate from ChannelManager events. + { + let monitors = [&monitor_a, &monitor_b, &monitor_c]; + let broadcasters: [&Arc; 3] = [&broadcast_a, &broadcast_b, &broadcast_c]; + let keys_managers = [&keys_manager_a, &keys_manager_b, &keys_manager_c]; + for (idx, monitor) in monitors.iter().enumerate() { + let wallet = WalletSync::new( + &wallets[idx], + Arc::clone(&loggers[idx]), + ); + let handler = BumpTransactionEventHandlerSync::new( + broadcasters[idx].as_ref(), + &wallet, + keys_managers[idx].as_ref(), + Arc::clone(&loggers[idx]), + ); + let broadcaster = broadcasters[idx]; + monitor.chain_monitor.process_pending_events( + &|event: events::Event| { + if let events::Event::BumpTransaction(ref bump) = event { + match bump { + events::bump_transaction::BumpTransactionEvent::ChannelClose { + commitment_tx, + channel_id, + counterparty_node_id, + .. + } => { + // Broadcast the commitment tx directly. + // Skip the full anchor-bumping flow + // since fuzz crypto causes weight + // assertion failures in the bump + // handler. + broadcaster.broadcast_transactions(&[( + commitment_tx, + lightning::chain::chaininterface::TransactionType::UnilateralClose { + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + }, + )]); + }, + events::bump_transaction::BumpTransactionEvent::HTLCResolution { .. } => { + handler.handle_event(bump); + }, + } + } + Ok(()) + }, + ); + } + } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2698,6 +3059,39 @@ pub fn do_test( } process_all_events!(); + // If any channels were force-closed, advance chain height past HTLC + // timelocks so HTLC-timeout transactions can be broadcast, confirmed, + // and fully resolved. We iterate multiple times to cover: (1) confirm + // commitment txs, (2) confirm HTLC-timeout txs after bump handling, + // (3) confirm second-stage txs past CSV, (4) final cleanup. + if !closed_channels.borrow().is_empty() { + for _ in 0..4 { + chain_state.advance_height(250); + sync_with_chain_state( + &chain_state, + &nodes[0], + &monitor_a, + &mut node_height_a, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[1], + &monitor_b, + &mut node_height_b, + None, + ); + sync_with_chain_state( + &chain_state, + &nodes[2], + &monitor_c, + &mut node_height_c, + None, + ); + process_all_events!(); + } + } + // Verify no payments are stuck - all should have resolved for (idx, pending) in pending_payments.borrow().iter().enumerate() { assert!( @@ -2724,18 +3118,61 @@ pub fn do_test( // Finally, make sure that at least one end of each channel can make a substantial payment for &chan_id in &chan_ab_ids { + if closed_channels.borrow().contains(&chan_id) { + continue; + } assert!( send(0, 1, chan_id, 10_000_000, &mut p_ctr) || send(1, 0, chan_id, 10_000_000, &mut p_ctr) ); } for &chan_id in &chan_bc_ids { + if closed_channels.borrow().contains(&chan_id) { + continue; + } assert!( send(1, 2, chan_id, 10_000_000, &mut p_ctr) || send(2, 1, chan_id, 10_000_000, &mut p_ctr) ); } + // After settlement, verify that closed channels have no + // ClaimableOnChannelClose balances (which would indicate the + // monitor still thinks the channel is open). + if !closed_channels.borrow().is_empty() { + let open_channels = nodes[0] + .list_channels() + .iter() + .chain(nodes[1].list_channels().iter()) + .chain(nodes[2].list_channels().iter()) + .map(|c| c.clone()) + .collect::>(); + let open_refs: Vec<&_> = open_channels.iter().collect(); + for (label, monitor) in + [("A", &monitor_a), ("B", &monitor_b), ("C", &monitor_c)] + { + let balances = monitor.chain_monitor.get_claimable_balances(&open_refs); + for balance in &balances { + if matches!(balance, Balance::ClaimableOnChannelClose { .. }) { + panic!( + "Monitor {} has ClaimableOnChannelClose balance after settlement: {:?}", + label, balance + ); + } + } + if !balances.is_empty() { + out.locked_write( + format!( + "Monitor {} has {} remaining balances after settlement.\n", + label, + balances.len() + ) + .as_bytes(), + ); + } + } + } + last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); @@ -2787,3 +3224,415 @@ pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) { do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, false); do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}, true); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::test_logger::StringBuffer; + + fn run_and_get_log(data: &[u8]) -> String { + let logger = StringBuffer::new(); + do_test(data, logger.clone(), false); + logger.into_string() + } + + #[test] + fn test_force_close_executes() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // Verify initiator force-closed + assert!(log.contains("Force-closing channel"), "Node should initiate force-close"); + // Verify counterparty received the error and also closed + assert!( + log.contains("counterparty force-closed"), + "Counterparty should close after receiving error message" + ); + // Verify commitment tx was confirmed on-chain + assert!( + log.contains("Channel closed by funding output spend"), + "Commitment transaction should be confirmed on-chain" + ); + // Verify spendable output matured + assert!( + log.contains("marked for spending has got enough confirmations"), + "Spendable outputs should mature after chain advancement" + ); + } + + #[test] + fn test_force_close_without_broadcast_confirm_no_resolution() { + // Force-close but skip draining broadcasts, advancing height, and + // settlement. Without chain progression, no on-chain resolution occurs. + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + !log.contains("marked for spending has got enough confirmations"), + "Without chain advancement, outputs should not mature" + ); + } + + #[test] + fn test_force_close_both_directions() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd0, // A force-closes A-B channel + 0xd1, // B force-closes B-C channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // User-initiated force-closes log with the error message + let user_fc_count = log.matches("Force-closing channel, The error message").count(); + assert_eq!(user_fc_count, 2, "Should have 2 user-initiated force-closes"); + // Both counterparties should also detect the close + let counterparty_count = log.matches("counterparty force-closed").count(); + assert!( + counterparty_count >= 2, + "Both counterparties should close, got {}", + counterparty_count + ); + } + + #[test] + fn test_force_close_middle_node_initiates() { + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0xd2, // B force-closes A-B channel (error msg "]]]]]]") + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: sync nodes, resolve all pending state + ]; + let log = run_and_get_log(&data); + + // B initiated with its specific error message + assert!( + log.contains("]]]]]]\""), + "Middle node B should force-close with its specific error message" + ); + // A should detect counterparty close + assert!( + log.contains("counterparty force-closed with message: ]]]]]]"), + "Node A should detect counterparty force-close from B" + ); + } + + #[test] + fn test_inflight_htlc_force_close_needs_height() { + // Send payment A->B (0x30), process messages to get HTLC committed (0x10, 0x18), + // process events (0x16, 0x1e), then force-close without advancing height. + // The HTLC timeout package should be delayed waiting for its timelock. + let data: Vec = vec![ + 0x00, // mon style + 0x30, // A sends payment to B on chan_a + 0x10, // process all msgs on node 0 (A) + 0x18, // process all msgs on node 1 (B) + 0x10, // process all msgs on node 0 (A) again for revoke_and_ack + 0x16, // process events on node 0 + 0x1e, // process events on node 1 + 0xd0, // A force-closes with B + 0xd8, 0xd9, 0xda, // drain all broadcasts and confirm + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + // HTLC timeout should be delayed because chain height hasn't advanced past CLTV + assert!( + log.contains("Delaying claim of package until its timelock"), + "HTLC claim should be delayed waiting for timelock" + ); + } + + #[test] + fn test_inflight_htlc_resolved_after_height_advance() { + // Same as above but advance height past the HTLC timeout. The HTLC timeout + // tx should get broadcast after the height passes the timelock. + let data: Vec = vec![ + 0x00, // mon style + 0x30, // A sends payment to B on chan_a + 0x10, // process all msgs on node 0 (A) + 0x18, // process all msgs on node 1 (B) + 0x10, // process all msgs on node 0 (A) again for revoke_and_ack + 0x16, // process events on node 0 + 0x1e, // process events on node 1 + 0xd0, // A force-closes with B + 0xd8, // drain and confirm A's broadcasts (commitment tx) + 0xde, 0xde, // advance 200+200 blocks past CLTV + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + // After advancing past the timelock, the HTLC timeout tx should broadcast + assert!( + log.contains("Broadcasting onchain"), + "HTLC timeout transaction should be broadcast after height advance" + ); + } + + #[test] + fn test_three_node_force_close_during_fulfill_propagation() { + // A->B->C payment. C claims, fulfill propagates back to B, then A-B + // is force-closed before B forwards the fulfill to A. B has the + // preimage and claims the HTLC on-chain. A learns the preimage from + // B's on-chain HTLC-success transaction. Settlement (0xff) handles + // syncing all nodes to the chain, enabling signer ops, and + // resolving all pending state. + let data: Vec = vec![ + 0x00, // mon style: all Completed + 0x3c, // send_hop A->B->C, 1_000_000 msat + // Commit HTLC on A-B channel: + 0x11, // Process A: deliver A's update_add+CS to B + 0x19, // Process B: deliver B's RAA+CS to A + 0x11, // Process A: deliver A's RAA to B. A-B HTLC committed. + // Forward HTLC from B to C: + 0x1f, // Process events on B: forward HTLC to C + 0x19, // Process B: deliver B's update_add+CS to C + 0x21, // Process C: deliver C's RAA+CS to B + 0x19, // Process B: deliver B's RAA to C. B-C HTLC committed. + // C claims the payment (two rounds: first decodes HTLC, second claims): + 0x27, // Process events on C: decode HTLC, generate PaymentClaimable + 0x27, // Process events on C: handle PaymentClaimable, call claim_funds + // Deliver C's fulfill to B: + 0x21, // Process C: deliver C's update_fulfill+CS to B. B learns preimage. + // DO NOT process B's messages: B's fulfill hasn't reached A yet. + // A force-closes while HTLC is still committed on A-B: + 0xd0, // A force-closes A-B + // Settle everything: syncs chain, enables signing, resolves on-chain. + 0xff, + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "A should force-close"); + // C should claim and send fulfill to B + assert!( + log.contains("Delivering update_fulfill_htlc from node 2 to node 1"), + "C should deliver fulfill to B" + ); + // B should broadcast HTLC-success claim using the preimage on-chain + assert!( + log.contains("Broadcasting onchain HTLC claim tx (1 preimage"), + "B should broadcast HTLC preimage claim on-chain" + ); + // A should learn the preimage from B's on-chain claim + assert!( + log.contains("resolves outbound HTLC") && log.contains("with preimage"), + "A should learn preimage from B's on-chain HTLC claim" + ); + // Payment should ultimately succeed + assert!( + log.contains("Handling event PaymentSent"), + "Payment should succeed after on-chain resolution" + ); + } + + // Async monitor variants: same scenarios but with all monitors returning + // InProgress (0x07) instead of Completed (0x00). This exercises the + // async monitor update path where updates are queued and completed later. + + #[test] + fn test_force_close_executes_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Node should initiate force-close"); + assert!( + log.contains("counterparty force-closed"), + "Counterparty should close after receiving error message" + ); + assert!( + log.contains("Channel closed by funding output spend"), + "Commitment transaction should be confirmed on-chain" + ); + assert!( + log.contains("marked for spending has got enough confirmations"), + "Spendable outputs should mature after chain advancement" + ); + } + + #[test] + fn test_force_close_without_broadcast_confirm_no_resolution_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + !log.contains("marked for spending has got enough confirmations"), + "Without chain advancement, outputs should not mature" + ); + } + + #[test] + fn test_force_close_both_directions_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd0, // A force-closes A-B channel + 0xd1, // B force-closes B-C channel + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + let user_fc_count = log.matches("Force-closing channel, The error message").count(); + assert_eq!(user_fc_count, 2, "Should have 2 user-initiated force-closes"); + let counterparty_count = log.matches("counterparty force-closed").count(); + assert!( + counterparty_count >= 2, + "Both counterparties should close, got {}", + counterparty_count + ); + } + + #[test] + fn test_force_close_middle_node_initiates_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0xd2, // B force-closes A-B channel (error msg "]]]]]]") + 0xd8, 0xd9, 0xda, // drain and confirm broadcasts for A, B, C + 0xdc, 0xdc, 0xdc, // advance 50+50+50 blocks + 0xff, // settle: completes monitor updates, syncs nodes, resolves state + ]; + let log = run_and_get_log(&data); + + assert!( + log.contains("]]]]]]\""), + "Middle node B should force-close with its specific error message" + ); + assert!( + log.contains("counterparty force-closed with message: ]]]]]]"), + "Node A should detect counterparty force-close from B" + ); + } + + #[test] + fn test_inflight_htlc_force_close_needs_height_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x30, // A sends payment to B on chan_a + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x18, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's RAA to B + 0x09, // complete all B's monitor updates (A-B) + 0x16, // process events on node 0 (A) + 0x1e, // process events on node 1 (B) + 0xd0, // A force-closes with B + 0xd8, 0xd9, 0xda, // drain all broadcasts and confirm + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + log.contains("Delaying claim of package until its timelock"), + "HTLC claim should be delayed waiting for timelock" + ); + } + + #[test] + fn test_inflight_htlc_resolved_after_height_advance_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x30, // A sends payment to B on chan_a + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x18, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x10, // Process A: deliver A's RAA to B + 0x09, // complete all B's monitor updates (A-B) + 0x16, // process events on node 0 (A) + 0x1e, // process events on node 1 (B) + 0xd0, // A force-closes with B + 0xd8, // drain and confirm A's broadcasts (commitment tx) + 0xde, 0xde, // advance 200+200 blocks past CLTV + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "Force-close should fire"); + assert!( + log.contains("Broadcasting onchain"), + "HTLC timeout transaction should be broadcast after height advance" + ); + } + + #[test] + fn test_three_node_force_close_during_fulfill_propagation_async() { + let data: Vec = vec![ + 0x07, // mon style: all InProgress (async) + 0x3c, // send_hop A->B->C, 1_000_000 msat + // Commit HTLC on A-B channel (complete-all after each step): + 0x08, // complete all A's monitor updates (A-B) + 0x11, // Process A: deliver A's update_add+CS to B + 0x09, // complete all B's monitor updates (A-B) + 0x19, // Process B: deliver B's RAA+CS to A + 0x08, // complete all A's monitor updates (A-B) + 0x11, // Process A: deliver A's RAA to B. A-B HTLC committed. + 0x09, // complete all B's monitor updates (A-B) + // Forward HTLC from B to C: + 0x1f, // Process events on B: forward HTLC to C + 0x0a, // complete all B's monitor updates (B-C) + 0x19, // Process B: deliver B's update_add+CS to C + 0x0b, // complete all C's monitor updates (B-C) + 0x21, // Process C: deliver C's RAA+CS to B + 0x0a, // complete all B's monitor updates (B-C) + 0x19, // Process B: deliver B's RAA to C. B-C HTLC committed. + 0x0b, // complete all C's monitor updates (B-C) + // C claims the payment (two rounds: first decodes HTLC, second claims): + 0x27, // Process events on C: decode HTLC, generate PaymentClaimable + 0x27, // Process events on C: handle PaymentClaimable, call claim_funds + 0x0b, // complete all C's monitor updates (B-C) + // Deliver C's fulfill to B: + 0x21, // Process C: deliver C's update_fulfill+CS to B. B learns preimage. + 0x0a, // complete all B's monitor updates (B-C) + 0x09, // complete all B's monitor updates (A-B) + // DO NOT process B's messages: B's fulfill hasn't reached A yet. + // A force-closes while HTLC is still committed on A-B: + 0xd0, // A force-closes A-B + // Settle everything: completes monitor updates, syncs chain, resolves. + 0xff, + ]; + let log = run_and_get_log(&data); + + assert!(log.contains("Force-closing channel"), "A should force-close"); + assert!( + log.contains("Delivering update_fulfill_htlc from node 2 to node 1"), + "C should deliver fulfill to B" + ); + assert!( + log.contains("Broadcasting onchain HTLC claim tx (1 preimage"), + "B should broadcast HTLC preimage claim on-chain" + ); + assert!( + log.contains("resolves outbound HTLC") && log.contains("with preimage"), + "A should learn preimage from B's on-chain HTLC claim" + ); + assert!( + log.contains("Handling event PaymentSent"), + "Payment should succeed after on-chain resolution" + ); + } +} diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 6a5e9948653..098b6f53ee6 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -485,6 +485,9 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 99 / 100 <= signed_tx_weight); let expected_package_fee = Amount::from_sat(fee_for_weight( @@ -733,6 +736,9 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 98 / 100 <= signed_tx_weight); let expected_signed_tx_fee =