From 6368dfbbb8faa597ffe747c92ae5009cc932c470 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Sun, 19 Jan 2025 23:57:13 +0000 Subject: [PATCH 1/5] Add cltv expiry to PendingHTLCRouting::Forward In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track the incoming edge's CLTV expiry in the pending-routing state so that we can include it in the `HTLCSource` in the next commit. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 15 +++++++++++++-- lightning/src/ln/onion_payment.rs | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b6f1c032f42..248b02c59b1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -169,6 +169,8 @@ pub enum PendingHTLCRouting { short_channel_id: u64, // This should be NonZero eventually when we bump MSRV /// Set if this HTLC is being forwarded within a blinded path. blinded: Option, + /// The absolute CLTV of the inbound HTLC + incoming_cltv_expiry: Option, }, /// The onion indicates that this is a payment for an invoice (supposedly) generated by us. /// @@ -269,6 +271,14 @@ impl PendingHTLCRouting { _ => None, } } + + fn incoming_cltv_expiry(&self) -> Option { + match self { + Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry, + Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -5522,9 +5532,9 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, .. } => { + PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { PendingHTLCRouting::Forward { - onion_packet, blinded, short_channel_id: next_hop_scid + onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid, } }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted @@ -12433,6 +12443,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, onion_packet, required), (1, blinded, option), (2, short_channel_id, required), + (3, incoming_cltv_expiry, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 193cdd1582a..f9d4f371227 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -110,6 +110,7 @@ pub(super) fn create_fwd_pending_htlc_info( routing: PendingHTLCRouting::Forward { onion_packet: outgoing_packet, short_channel_id, + incoming_cltv_expiry: Some(msg.cltv_expiry), blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, From a78ea1ffe68cfa3f4b438bd7a1d52f2fe63d90b0 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Mon, 20 Jan 2025 00:09:00 +0000 Subject: [PATCH 2/5] Add cltv expiry to HTLCPreviousHopData In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track and expose the incoming edge's CLTV expiry in the `HTLCSource`, giving `ChannelMonitor` access to it. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 248b02c59b1..251111065ac 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -399,6 +399,9 @@ pub(crate) struct HTLCPreviousHopData { // channel with a preimage provided by the forward channel. outpoint: OutPoint, counterparty_node_id: Option, + /// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward + /// channel remains unconfirmed for too long. + cltv_expiry: Option, } #[derive(PartialEq, Eq)] @@ -701,6 +704,15 @@ impl HTLCSource { true } } + + /// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object), + /// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later. + pub(crate) fn inbound_htlc_expiry(&self) -> Option { + match self { + Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry, + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -5573,7 +5585,7 @@ where err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; - if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), @@ -5584,6 +5596,7 @@ where incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: payment.forward_info.routing.blinded_failure(), + cltv_expiry: incoming_cltv_expiry, }); let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); @@ -5758,6 +5771,7 @@ where outgoing_cltv_value, .. } }) => { + let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); @@ -5773,6 +5787,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, blinded_failure: routing.blinded_failure(), + cltv_expiry, }); let reason = if $next_hop_unknown { @@ -5882,7 +5897,7 @@ where prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { - ref onion_packet, blinded, .. + ref onion_packet, blinded, incoming_cltv_expiry, .. }, skimmed_fee_msat, .. }, }) => { @@ -5897,6 +5912,7 @@ where // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, blinded_failure: blinded.map(|b| b.failure), + cltv_expiry: incoming_cltv_expiry, }); let next_blinding_point = blinded.and_then(|b| { b.next_blinding_override.or_else(|| { @@ -6086,6 +6102,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete @@ -6119,6 +6136,7 @@ where incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, @@ -8979,6 +8997,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: forward_info.routing.blinded_failure(), + cltv_expiry: forward_info.routing.incoming_cltv_expiry(), }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, @@ -11221,6 +11240,7 @@ where outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), + cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(), }); let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { @@ -12559,6 +12579,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (2, outpoint, required), (3, blinded_failure, option), (4, htlc_id, required), + (5, cltv_expiry, option), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), // Note that by the time we get past the required read for type 2 above, outpoint will be From 4c8d59f2c5366c95c8db3dd974c269be9084d84a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 22:00:51 +0000 Subject: [PATCH 3/5] Drop `Channel::historical_inbound_htlc_fulfills` This field was used to test that any HTLC failures didn't come in after an HTLC was fulfilled (indicating, somewhat dubiously, that there may be a bug causing us to fail when we shouldn't have). In the next commit, we'll be failing HTLCs based on on-chain HTLC expiry, but may ultimately receive the preimage thereafter. This would make the `historical_inbound_htlc_fulfills` checks potentially-brittle, so we just remove them as they have dubious value. --- lightning/src/ln/channel.rs | 86 ++++-------------------------- lightning/src/ln/channelmanager.rs | 1 - 2 files changed, 9 insertions(+), 78 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b092a437fe9..3e610ec0e1c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1693,15 +1693,6 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// [`msgs::RevokeAndACK`] message from the counterparty. sent_message_awaiting_response: Option, - #[cfg(any(test, fuzzing))] - // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the - // corresponding HTLC on the inbound path. If, then, the outbound path channel is - // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack - // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This - // is fine, but as a sanity check in our failure to generate the second claim, we check here - // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. - historical_inbound_htlc_fulfills: HashSet, - /// This channel's type, as negotiated during channel open channel_type: ChannelTypeFeatures, @@ -2403,9 +2394,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -2636,9 +2624,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -4665,10 +4650,6 @@ impl FundedChannel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -4698,8 +4679,6 @@ impl FundedChannel where if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } }, @@ -4721,12 +4700,8 @@ impl FundedChannel where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; @@ -4791,14 +4766,8 @@ impl FundedChannel where } } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { self.fail_htlc(htlc_id_arg, err_packet, true, logger) @@ -4816,14 +4785,8 @@ impl FundedChannel where .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L @@ -4841,12 +4804,8 @@ impl FundedChannel where if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); + InboundHTLCState::LocalRemoved(_) => { + return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -4857,11 +4816,7 @@ impl FundedChannel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc_id_arg))); } if !self.context.channel_state.can_generate_new_commitment() { @@ -4875,17 +4830,14 @@ impl FundedChannel where match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("HTLC {} was already claimed!", htlc_id))); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + return Err(ChannelError::Ignore(format!("HTLC {} was already pending failure", htlc_id))); } }, _ => {} @@ -9718,13 +9670,6 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider self.context.channel_update_status.write(writer)?; - #[cfg(any(test, fuzzing))] - (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; - #[cfg(any(test, fuzzing))] - for htlc in self.context.historical_inbound_htlc_fulfills.iter() { - htlc.write(writer)?; - } - // If the channel type is something other than only-static-remote-key, then we need to have // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type @@ -10058,16 +10003,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; - #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = new_hash_set(); - #[cfg(any(test, fuzzing))] - { - let htlc_fulfills_len: u64 = Readable::read(reader)?; - for _ in 0..htlc_fulfills_len { - assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); - } - } - let pending_update_fee = if let Some(feerate) = pending_update_fee_value { Some((feerate, if channel_parameters.is_outbound_from_holder { FeeUpdateState::Outbound @@ -10408,9 +10343,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills, - channel_type: channel_type.unwrap(), channel_keys_id, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 251111065ac..13e64a75491 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6043,7 +6043,6 @@ where // fail-backs are best-effort, we probably already have one // pending, and if not that's OK, if not, the channel is on // the chain and sending the HTLC-Timeout is their problem. - continue; } } } From ff344b4c2cc6de1486a09efc5c4537e618e462b9 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Tue, 21 Jan 2025 22:03:01 +0000 Subject: [PATCH 4/5] Fail HTLC backwards before upstream claims on-chain Fail inbound HTLCs if they expire within a certain number of blocks from the current height. If we haven't seen the preimage for an HTLC by the time the previous hop's timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our counterparty force-close the channel. Co-authored-by: Matt Corallo --- lightning/src/chain/channelmonitor.rs | 74 ++++++++++++ lightning/src/ln/functional_tests.rs | 158 ++++++++++++++++++++++++-- 2 files changed, 224 insertions(+), 8 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3f6bdc3f256..c7edbd51d30 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1023,6 +1023,12 @@ pub(crate) struct ChannelMonitorImpl { /// The first block height at which we had no remaining claimable balances. balances_empty_height: Option, + + /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to + /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout + /// expires. This is used to tell us we already generated an event to fail this HTLC back + /// during a previous block scan. + failed_back_htlc_ids: HashSet, } /// Transaction outputs to watch for on-chain spends. @@ -1445,6 +1451,8 @@ impl ChannelMonitor { counterparty_node_id: Some(counterparty_node_id), initial_counterparty_commitment_info: None, balances_empty_height: None, + + failed_back_htlc_ids: new_hash_set(), }) } @@ -4221,6 +4229,71 @@ impl ChannelMonitorImpl { } } + if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + // Fail back HTLCs on backwards channels if they expire within + // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a + // point where no further off-chain updates will be accepted). If we haven't seen the + // preimage for an HTLC by the time the previous hop's timeout expires, we've lost that + // HTLC, so we might as well fail it back instead of having our counterparty force-close + // the inbound channel. + let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|&(ref a, _, ref b)| (a, b.as_ref())); + + let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let htlcs = current_holder_htlcs + .chain(current_counterparty_htlcs) + .chain(prev_counterparty_htlcs); + + let height = self.best_block.height; + for (htlc, source_opt) in htlcs { + // Only check forwarded HTLCs' previous hops + let source = match source_opt { + Some(source) => source, + None => continue, + }; + let inbound_htlc_expiry = match source.inbound_htlc_expiry() { + Some(cltv_expiry) => cltv_expiry, + None => continue, + }; + let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS); + if inbound_htlc_expiry > max_expiry_height { + continue; + } + let duplicate_event = self.pending_monitor_events.iter().any( + |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { + upd.source == *source + } else { false }); + if duplicate_event { + continue; + } + if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) { + continue; + } + if !duplicate_event { + log_error!(logger, "Failing back HTLC {} upstream to preserve the \ + channel as the forward HTLC hasn't resolved and our backward HTLC \ + expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry); + self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { + source: source.clone(), + payment_preimage: None, + payment_hash: htlc.payment_hash, + htlc_value_satoshis: Some(htlc.amount_msat / 1000), + })); + } + } + } + let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); @@ -5066,6 +5139,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP counterparty_node_id, initial_counterparty_commitment_info, balances_empty_height, + failed_back_htlc_ids: new_hash_set(), }))) } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f1bc11d421c..b922f525d8c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2270,6 +2270,138 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} + +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} + +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + + // Force close the B<->C channel by timing out the HTLC + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + + // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid + // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + + // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which + // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). + let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; + connect_blocks(&nodes[1], upstream_timeout_blocks); + + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + check_added_monitors!(nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, + PaymentFailedConditions::new().blamed_chan_closed(true)); + + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); + check_added_monitors!(nodes[2], 1); + + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], + vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); + + nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + + nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2374,7 +2506,7 @@ fn channel_monitor_network_test() { let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); node2_commitment_txid = node_txn[0].compute_txid(); @@ -3312,8 +3444,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false - , [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id()], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -9774,6 +9906,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); @@ -9789,7 +9923,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let conf_height = nodes[1].best_block_info().1; if !test_height_before_timelock { - connect_blocks(&nodes[1], 24 * 6); + connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); @@ -9808,10 +9942,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t &spending_txn[0] }; check_spends!(htlc_tx, node_txn[0]); - // We should also generate a SpendableOutputs event with the to_self output (as its - // timelock is up). - let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(descriptor_spend_txn.len(), 1); // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we // should immediately fail-backwards the HTLC to the previous hop, without waiting for an @@ -9830,6 +9960,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + // We should also generate a SpendableOutputs event with the to_self output (once the + // timelock is up). + connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(descriptor_spend_txn.len(), 1); + + // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to + // avoid the A<->B channel closing (even though it already has). This will generate a + // spurious HTLCHandlingFailed event. + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); } } From a577f32a22c407f478dc569c01106fcf1e4b3092 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 21:53:01 +0000 Subject: [PATCH 5/5] Fail all `ChannelMonitorUpdate`s after `holder_tx_signed` If we've signed the latest holder tx (i.e. we've force-closed and broadcasted our state), there's not much reason to accept counterparty-transaction-updating `ChannelMonitorUpdate`s, we should make sure the `ChannelManager` fails the channel as soon as possible. This standardizes the failure cases to also match those added to the previous commit, which makes things a bit more readable. --- lightning/src/chain/channelmonitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c7edbd51d30..fe91c711830 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3282,7 +3282,7 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update { + if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret }