From f2280a0db831e87a4dc97d309badaba959ce133c Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 14 Mar 2025 13:44:38 -0400 Subject: [PATCH 01/12] ln/fix: remove undefined PERM | 1 code from reason method This failure code isn't used anywhere in the codebase and is not defined in BOLT 04. --- lightning/src/ln/onion_utils.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 9d528d9dafd..1ef8026f552 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1431,8 +1431,7 @@ impl HTLCFailReason { const NODE: u16 = 0x2000; const UPDATE: u16 = 0x1000; - if failure_code == 1 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } + if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } else if failure_code == 2 | PERM | NODE { debug_assert!(data.is_empty()) } else if failure_code == 3 | PERM | NODE { debug_assert!(data.is_empty()) } else if failure_code == 4 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } From 38171bde457cc19d0fa6597bf18e8b2814a16548 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 14 Mar 2025 16:01:09 -0400 Subject: [PATCH 02/12] ln/fix: invalid_onion_version code for DecodeError::UnknownVersion Realm is no longer specified in BOLT04, use the specified version error instead. --- lightning/src/ln/onion_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 1ef8026f552..15258ae75b0 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -2037,8 +2037,8 @@ fn decode_next_hop, N: NextPacketBytes>( match R::read(&mut chacha_stream, read_args) { Err(err) => { let error_code = match err { - // Unknown realm byte - msgs::DecodeError::UnknownVersion => 0x4000 | 1, + // Unknown version + msgs::DecodeError::UnknownVersion => 0x8000 | 0x4000 | 1, // invalid_onion_payload msgs::DecodeError::UnknownRequiredFeature | msgs::DecodeError::InvalidValue From 4ae87c55db56c861e0af23dd86074989e59ad478 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 1 Apr 2025 16:09:49 -0400 Subject: [PATCH 03/12] ln: add length assertion for invalid onion failure code Although the specification allows an all-zero sha256_of_onion for invalid_onion_blinding errors, it still requires that the value is set. --- lightning/src/ln/onion_utils.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 15258ae75b0..de3d0c69fe5 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1458,6 +1458,7 @@ impl HTLCFailReason { else if failure_code == 21 { debug_assert!(data.is_empty()) } else if failure_code == 22 | PERM { debug_assert!(data.len() <= 11) } else if failure_code == 23 { debug_assert!(data.is_empty()) } + else if failure_code == INVALID_ONION_BLINDING { debug_assert_eq!(data.len(), 32) } else if failure_code & BADONION != 0 { // We set some bogus BADONION failure codes in test, so ignore unknown ones. } From 43627a604c948af513950d6c32acb5697fd93306 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 31 Mar 2025 09:33:31 -0400 Subject: [PATCH 04/12] ln/refactor: introduce enum for bolt 04 failure codes --- lightning/src/ln/async_payments_tests.rs | 10 +- lightning/src/ln/blinded_payment_tests.rs | 47 ++- lightning/src/ln/channel.rs | 35 +- lightning/src/ln/channelmanager.rs | 216 ++++++----- lightning/src/ln/functional_test_utils.rs | 14 +- lightning/src/ln/functional_tests.rs | 7 +- lightning/src/ln/mod.rs | 2 +- lightning/src/ln/onion_payment.rs | 108 +++--- lightning/src/ln/onion_route_tests.rs | 132 ++++--- lightning/src/ln/onion_utils.rs | 440 +++++++++++++++++----- lightning/src/ln/payment_tests.rs | 8 +- lightning/src/ln/priv_short_conf_tests.rs | 7 +- lightning/src/ln/shutdown_tests.rs | 4 +- 13 files changed, 656 insertions(+), 374 deletions(-) diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index b888b9ceb5c..1d9c6fb84c7 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -20,7 +20,7 @@ use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, OnionMessageHandler, }; use crate::ln::offers_tests; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::outbound_payment::PendingOutboundPayment; use crate::ln::outbound_payment::Retry; use crate::offers::invoice_request::InvoiceRequest; @@ -179,7 +179,10 @@ fn invalid_keysend_payment_secret() { assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!( + update_malformed.failure_code, + LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() + ); nodes[1] .node .handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); @@ -196,7 +199,8 @@ fn invalid_keysend_payment_secret() { &nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]), + PaymentFailedConditions::new() + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32]), ); } diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 17494b06098..b9b961e60ec 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -26,8 +26,7 @@ use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, UnsignedGossipMessage, MessageSendEvent}; use crate::ln::onion_payment; -use crate::ln::onion_utils; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS}; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::offers::nonce::Nonce; @@ -118,7 +117,7 @@ pub fn fail_blinded_htlc_backwards( match i { 0 => { let mut payment_failed_conditions = PaymentFailedConditions::new() - .expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32]); if retry_expected { payment_failed_conditions = payment_failed_conditions.retry_expected(); } @@ -137,7 +136,7 @@ pub fn fail_blinded_htlc_backwards( assert_eq!(blinded_node_updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &blinded_node_updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[i-1].node.handle_update_fail_malformed_htlc(nodes[i].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[i-1], &nodes[i], &blinded_node_updates.commitment_signed, true, false); } @@ -437,11 +436,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { match check { ForwardCheckFail::ForwardPayloadEncodedAsReceive => { expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(0x4000 | 22, &[0; 0])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &[0; 0])); } _ => { expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } }; return @@ -469,12 +468,12 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Ensure the intro node will properly blind the error if its downstream node failed to do so. update_malformed.sha256_of_onion = [1; 32]; - update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1; + update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); @@ -482,7 +481,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -534,7 +533,7 @@ fn failed_backwards_to_intro_node() { let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0]; // Check that the final node encodes its failure correctly. - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Modify such the final hop does not correctly blind their error so we can ensure the intro node @@ -547,7 +546,7 @@ fn failed_backwards_to_intro_node() { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } enum ProcessPendingHTLCsCheck { @@ -655,12 +654,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Ensure the intro node will properly blind the error if its downstream node failed to do so. update_malformed.sha256_of_onion = [1; 32]; - update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1; + update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); @@ -668,7 +667,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1042,7 +1041,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false); @@ -1064,7 +1063,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1131,7 +1130,7 @@ fn blinded_path_retries() { assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); $intro_node.node.handle_update_fail_malformed_htlc(nodes[3].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, true, false); @@ -1251,7 +1250,7 @@ fn min_htlc() { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1446,7 +1445,7 @@ fn fails_receive_tlvs_authentication() { commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false); expect_payment_failed_conditions( &nodes[0], payment_hash, true, - PaymentFailedConditions::new().expected_htlc_error_data(0x4000 | 22, &[]), + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &[]), ); } @@ -1728,7 +1727,8 @@ fn route_blinding_spec_test_vector() { match onion_payment::decode_incoming_update_add_htlc_onion( &eve_update_add, &eve_node_signer, &logger, &secp_ctx ) { - Err(HTLCFailureMsg::Malformed(msg)) => assert_eq!(msg.failure_code, INVALID_ONION_BLINDING), + Err((HTLCFailureMsg::Malformed(msg), _)) => assert_eq!(msg.failure_code, + LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()), _ => panic!("Unexpected error") } } @@ -2160,7 +2160,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { } { let payment_failed_conditions = PaymentFailedConditions::new() - .expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 0]); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 0]); expect_payment_failed_conditions(&nodes[0], payment_hash, true, payment_failed_conditions); } } @@ -2453,10 +2453,9 @@ fn test_trampoline_forward_rejection() { do_commitment_signed_dance(&nodes[0], &nodes[1], &unblinded_node_updates.commitment_signed, false, false); } { - // Expect a PERM|10 (unknown_next_peer) error while we are unable to route forwarding - // Trampoline payments. + // Expect UnknownNextPeer error while we are unable to route forwarding Trampoline payments. let payment_failed_conditions = PaymentFailedConditions::new() - .expected_htlc_error_data(0x4000 | 10, &[0; 0]); + .expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[0; 0]); expect_payment_failed_conditions(&nodes[0], payment_hash, false, payment_failed_conditions); } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d6fbd162388..51e9bf4ed9f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,7 +50,7 @@ use crate::ln::chan_utils::{ #[cfg(splicing)] use crate::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; use crate::ln::chan_utils; -use crate::ln::onion_utils::{HTLCFailReason}; +use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator, fee_for_weight}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS}; @@ -7564,21 +7564,17 @@ impl FundedChannel where fn internal_htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig, - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64) .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64)); if fee.is_none() || htlc.amount_msat < fee.unwrap() || (htlc.amount_msat - fee.unwrap()) < amt_to_forward { - return Err(( - "Prior hop has deviated from specified fees parameters or origin node has obsolete ones", - 0x1000 | 12, // fee_insufficient - )); + return Err(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", + LocalHTLCFailureReason::FeeInsufficient)); } if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 { - return Err(( - "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x1000 | 13, // incorrect_cltv_expiry - )); + return Err(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + LocalHTLCFailureReason::IncorrectCLTVExpiry)); } Ok(()) } @@ -7588,7 +7584,7 @@ impl FundedChannel where /// unsuccessful, falls back to the previous one if one exists. pub fn htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config()) .or_else(|err| { if let Some(prev_config) = self.context.prev_config() { @@ -7603,13 +7599,13 @@ impl FundedChannel where /// this function determines whether to fail the HTLC, or forward / claim it. pub fn can_accept_incoming_htlc( &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L - ) -> Result<(), (&'static str, u16)> + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> where F::Target: FeeEstimator, L::Target: Logger { if self.context.channel_state.is_local_shutdown_sent() { - return Err(("Shutdown was already sent", 0x4000|8)) + return Err(("Shutdown was already sent", LocalHTLCFailureReason::DroppedPending)) } let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); @@ -7620,7 +7616,8 @@ impl FundedChannel where // Note that the total dust exposure includes both the dust HTLCs and the excess mining fees of the counterparty commitment transaction log_info!(logger, "Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - return Err(("Exceeded our total dust exposure limit on counterparty commitment tx", 0x1000|7)) + return Err(("Exceeded our total dust exposure limit on counterparty commitment tx", + LocalHTLCFailureReason::DustLimitCounterparty)) } let htlc_success_dust_limit = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { 0 @@ -7634,7 +7631,8 @@ impl FundedChannel where if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + return Err(("Exceeded our dust exposure limit on holder commitment tx", + LocalHTLCFailureReason::DustLimitHolder)) } } @@ -7672,7 +7670,7 @@ impl FundedChannel where } if pending_remote_value_msat.saturating_sub(self.funding.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - return Err(("Fee spike buffer violation", 0x1000|7)); + return Err(("Fee spike buffer violation", LocalHTLCFailureReason::FeeSpikeBuffer)); } } @@ -11135,7 +11133,7 @@ mod tests { use bitcoin::network::Network; #[cfg(splicing)] use bitcoin::Weight; - use crate::ln::onion_utils::INVALID_ONION_BLINDING; + use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; @@ -11775,7 +11773,8 @@ mod tests { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] } }; let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32], + htlc_id, failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], }; let mut holding_cell_htlc_updates = Vec::with_capacity(12); for i in 0..12 { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 88d96f1396f..317b5fed5c4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -60,7 +60,7 @@ use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentPar use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, HopConnector, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; -use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; +use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError, MessageSendEvent}; #[cfg(test)] use crate::ln::outbound_payment; @@ -399,10 +399,11 @@ pub(super) enum HTLCForwardInfo { #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BlindedFailure { /// This HTLC is being failed backwards by the introduction node, and thus should be failed with - /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`. + /// [`msgs::UpdateFailHTLC`] and error code [`LocalHTLCFailureReason::InvalidOnionBlinding`]. FromIntroductionNode, /// This HTLC is being failed backwards by a blinded node within the path, and thus should be - /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`. + /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code + /// [`LocalHTLCFailureReason::InvalidOnionBlinding`]. FromBlindedNode, } @@ -771,13 +772,13 @@ pub enum FailureCode { InvalidOnionPayload(Option<(u64, u16)>), } -impl Into for FailureCode { - fn into(self) -> u16 { +impl Into for FailureCode { + fn into(self) -> LocalHTLCFailureReason { match self { - FailureCode::TemporaryNodeFailure => 0x2000 | 2, - FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3, - FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15, - FailureCode::InvalidOnionPayload(_) => 0x4000 | 22, + FailureCode::TemporaryNodeFailure => LocalHTLCFailureReason::TemporaryNodeFailure, + FailureCode::RequiredNodeFeatureMissing => LocalHTLCFailureReason::RequiredNodeFeature, + FailureCode::IncorrectOrUnknownPaymentDetails => LocalHTLCFailureReason::IncorrectPaymentDetails, + FailureCode::InvalidOnionPayload(_) => LocalHTLCFailureReason::InvalidOnionPayload, } } } @@ -3915,7 +3916,8 @@ where } for htlc_source in failed_htlcs.drain(..) { - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let failure_reason = LocalHTLCFailureReason::DroppedPending; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4038,7 +4040,8 @@ where shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let failure_reason = LocalHTLCFailureReason::DroppedPending; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -4334,22 +4337,25 @@ where fn can_forward_htlc_to_outgoing_channel( &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if // we don't allow forwards outbound over them. - return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10)); + return Err(("Refusing to forward to a private channel based on our config.", + LocalHTLCFailureReason::PrivateChannelForward)); } if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means // "refuse to forward unless the SCID alias was used", so we pretend // we don't have the channel here. - return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10)); + return Err(("Refusing to forward over real channel SCID as our counterparty requested.", + LocalHTLCFailureReason::RealSCIDForward)); } } else { - return Err(("Cannot forward by Node ID without SCID.", 0x4000 | 10)); + return Err(("Cannot forward by Node ID without SCID.", + LocalHTLCFailureReason::InvalidTrampolineForward)); } // Note that we could technically not return an error yet here and just hope @@ -4359,19 +4365,18 @@ where // on a small/per-node/per-channel scale. if !chan.context.is_live() { if !chan.context.is_enabled() { - // channel_disabled - return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20)); + return Err(("Forwarding channel has been disconnected for some time.", + LocalHTLCFailureReason::ChannelDisabled)); } else { - // temporary_channel_failure - return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7)); + return Err(("Forwarding channel is not in a ready state.", + LocalHTLCFailureReason::ChannelNotReady)); } } - if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11)); - } - if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) { - return Err((err, code)); + if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { + return Err(("HTLC amount was below the htlc_minimum_msat", + LocalHTLCFailureReason::AmountBelowMinimum)); } + chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; Ok(()) } @@ -4400,11 +4405,12 @@ where fn can_forward_htlc( &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Trampoline(_) => { - return Err(("Cannot forward by Node ID without SCID.", 0x4000 | 10)); + return Err(("Cannot forward by Node ID without SCID.", + LocalHTLCFailureReason::InvalidTrampolineForward)); } }; match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { @@ -4419,36 +4425,34 @@ where fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) {} else { - return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10)); + return Err(("Don't have available channel for forwarding as requested.", + LocalHTLCFailureReason::UnknownNextPeer)); } } } let cur_height = self.best_block.read().unwrap().height + 1; - if let Err((err_msg, err_code)) = check_incoming_htlc_cltv( - cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry - ) { - return Err((err_msg, err_code)); - } + check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?; Ok(()) } fn htlc_failure_from_update_add_err( &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str, - err_code: u16, is_intro_node_blinded_forward: bool, + reason: LocalHTLCFailureReason, is_intro_node_blinded_forward: bool, shared_secret: &[u8; 32] ) -> HTLCFailureMsg { // at capacity, we write fields `htlc_msat` and `len` let mut res = VecWriter(Vec::with_capacity(8 + 2)); - if err_code & 0x1000 == 0x1000 { - if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 { + if reason.is_temporary() { + if reason == LocalHTLCFailureReason::AmountBelowMinimum || + reason == LocalHTLCFailureReason::FeeInsufficient { msg.amount_msat.write(&mut res).expect("Writes cannot fail"); } - else if err_code == 0x1000 | 13 { + else if reason == LocalHTLCFailureReason::IncorrectCLTVExpiry { msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); } - else if err_code == 0x1000 | 20 { + else if reason == LocalHTLCFailureReason::ChannelDisabled { // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 0u16.write(&mut res).expect("Writes cannot fail"); } @@ -4466,16 +4470,16 @@ where channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), }); } - let (err_code, err_data) = if is_intro_node_blinded_forward { - (INVALID_ONION_BLINDING, &[0; 32][..]) + let (reason, err_data) = if is_intro_node_blinded_forward { + (LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32][..]) } else { - (err_code, &res.0[..]) + (reason, &res.0[..]) }; - let failure = HTLCFailReason::reason(err_code, err_data.to_vec()) + let failure = HTLCFailReason::reason(reason, err_data.to_vec()) .get_encrypted_failure_packet(shared_secret, &None); HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -4490,7 +4494,7 @@ where next_packet_pubkey_opt: Option>, ) -> PendingHTLCStatus { macro_rules! return_err { - ($msg: expr, $err_code: expr, $data: expr) => { + ($msg: expr, $reason: expr, $data: expr) => { { let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); @@ -4500,11 +4504,11 @@ where channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), } )) } - let failure = HTLCFailReason::reason($err_code, $data.to_vec()) + let failure = HTLCFailReason::reason($reason, $data.to_vec()) .get_encrypted_failure_packet(&shared_secret, &None); return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -4530,19 +4534,19 @@ where // delay) once they've sent us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason , &err_data) } }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) } }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) } } } @@ -5739,9 +5743,9 @@ where cltv_expiry: incoming_cltv_expiry, }); - let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); + let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer); let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; - self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination); + self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &reason, destination); } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted Ok(()) @@ -5794,7 +5798,7 @@ where &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx ) { Ok(decoded_onion) => decoded_onion, - Err(htlc_fail) => { + Err((htlc_fail, _)) => { htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); continue; }, @@ -5817,9 +5821,9 @@ where ) }) { Some(Ok(_)) => {}, - Some(Err((err, code))) => { + Some(Err((err, reason))) => { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, err, code, + &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -5832,11 +5836,11 @@ where // Now process the HTLC on the outgoing channel if it's a forward. if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err((err, code)) = self.can_forward_htlc( + if let Err((err, reason)) = self.can_forward_htlc( &update_add_htlc, next_packet_details ) { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, err, code, + &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -5919,7 +5923,7 @@ where }) => { let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); @@ -5943,23 +5947,23 @@ where }; failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::reason($err_code, $err_data), + HTLCFailReason::reason($reason, $err_data), reason )); continue; } } macro_rules! fail_forward { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr) => { { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); + failure_handler!($msg, $reason, $err_data, $phantom_ss, true); } } } macro_rules! failed_payment { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr) => { { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); + failure_handler!($msg, $reason, $err_data, $phantom_ss, false); } } } @@ -5971,17 +5975,17 @@ where onion_packet.hmac, payment_hash, None, &*self.node_signer ) { Ok(res) => res, - Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { + Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => { let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array(); // In this scenario, the phantom would have sent us an // `update_fail_malformed_htlc`, meaning here we encrypt the error as // if it came from us (the second-to-last hop) but contains the sha256 // of the onion. - failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); + failed_payment!(err_msg, reason, sha256_of_onion.to_vec(), None); }, - Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code, shared_secret, .. }) => { + Err(onion_utils::OnionDecodeErr::Relay { err_msg, reason, shared_secret, .. }) => { let phantom_shared_secret = shared_secret.secret_bytes(); - failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); + failed_payment!(err_msg, reason, Vec::new(), Some(phantom_shared_secret)); }, }; let phantom_shared_secret = next_hop.shared_secret().secret_bytes(); @@ -5995,13 +5999,15 @@ where prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)] )), - Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundHTLCErr { reason, err_data, msg }) => failed_payment!(msg, reason, err_data, Some(phantom_shared_secret)) } } else { - fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), + LocalHTLCFailureReason::UnknownNextPeer, Vec::new(), None); } } else { - fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), + LocalHTLCFailureReason::UnknownNextPeer, Vec::new(), None); } }, HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => { @@ -6123,10 +6129,10 @@ where .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let failure_code = 0x1000|7; - let data = self.get_htlc_inbound_temp_fail_data(failure_code); + let reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::reason(failure_code, data), + HTLCFailReason::reason(reason, data), HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); } else { @@ -6280,7 +6286,7 @@ where blinded_failure, cltv_expiry: Some(cltv_expiry), }), payment_hash, - HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, )); continue 'next_forwardable_htlc; @@ -6837,7 +6843,8 @@ where for htlc_source in timed_out_mpp_htlcs.drain(..) { let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); - let reason = HTLCFailReason::from_failure_code(23); + let failure_reason = LocalHTLCFailureReason::MPPTimeout; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } @@ -6934,14 +6941,14 @@ where /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec { - debug_assert_eq!(err_code & 0x1000, 0x1000); - debug_assert_ne!(err_code, 0x1000|11); - debug_assert_ne!(err_code, 0x1000|12); - debug_assert_ne!(err_code, 0x1000|13); + fn get_htlc_inbound_temp_fail_data(&self, reason: LocalHTLCFailureReason) -> Vec { + debug_assert!(reason.is_temporary()); + debug_assert!(reason != LocalHTLCFailureReason::AmountBelowMinimum); + debug_assert!(reason != LocalHTLCFailureReason::FeeInsufficient); + debug_assert!(reason != LocalHTLCFailureReason::IncorrectCLTVExpiry); // at capacity, we write fields `disabled_flags` and `len` let mut enc = VecWriter(Vec::with_capacity(4)); - if err_code == 0x1000 | 20 { + if reason == LocalHTLCFailureReason::ChannelDisabled { // No flags for `disabled_flags` are currently defined so they're always two zero bytes. // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008 0u16.write(&mut enc).expect("Writes cannot fail"); @@ -6958,7 +6965,7 @@ where &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId, counterparty_node_id: &PublicKey ) { - let (failure_code, onion_failure_data) = { + let (failure_reason, onion_failure_data) = { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -6966,22 +6973,22 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { if let Some(_chan) = chan_entry.get().as_funded() { - let failure_code = 0x1000|7; - let data = self.get_htlc_inbound_temp_fail_data(failure_code); - (failure_code, data) + let reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let data = self.get_htlc_inbound_temp_fail_data(reason); + (reason, data) } else { // We shouldn't be trying to fail holding cell HTLCs on an unfunded channel. debug_assert!(false); - (0x4000|10, Vec::new()) + (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + hash_map::Entry::Vacant(_) => (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } - } else { (0x4000|10, Vec::new()) } + } else { (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } }; for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); + let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } @@ -7030,7 +7037,7 @@ where ); let failure = match blinded_failure { Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]); + let blinded_onion_error = HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); let err_packet = blinded_onion_error.get_encrypted_failure_packet( incoming_packet_shared_secret, phantom_shared_secret ); @@ -7039,7 +7046,7 @@ where Some(BlindedFailure::FromBlindedNode) => { HTLCForwardInfo::FailMalformedHTLC { htlc_id: *htlc_id, - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), sha256_of_onion: [0; 32] } }, @@ -7229,7 +7236,7 @@ where let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); + let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -8768,7 +8775,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for htlc_source in dropped_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::DroppedPending); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } if let Some(shutdown_res) = finish_shutdown { @@ -8969,7 +8976,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, Err(chan_err), chan_entry); } if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_entry); + try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code.into(), msg.sha256_of_onion.to_vec())), chan_entry); } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_entry); @@ -9108,7 +9115,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, - HTLCFailReason::from_failure_code(0x4000 | 10), + HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer), HTLCDestination::InvalidForward { requested_forward_scid: scid }, )); } @@ -9592,8 +9599,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); + let failure_reason = LocalHTLCFailureReason::ChannelClosed; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, @@ -11683,9 +11691,9 @@ where let res = f(funded_channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let failure_code = 0x1000|14; /* expiry_too_soon */ - let data = self.get_htlc_inbound_temp_fail_data(failure_code); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; + let data = self.get_htlc_inbound_temp_fail_data(reason); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), HTLCDestination::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); @@ -11808,8 +11816,9 @@ where let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); + let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), - HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCFailReason::reason(reason, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } @@ -11838,7 +11847,7 @@ where _ => unreachable!(), }; timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::from_failure_code(0x2000 | 2), + HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), HTLCDestination::InvalidForward { requested_forward_scid })); let logger = WithContext::from( &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) @@ -14882,8 +14891,9 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let failure_reason = LocalHTLCFailureReason::DroppedPending; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -14915,7 +14925,7 @@ mod tests { use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, ChannelConfigOverrides, HTLCForwardInfo, InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, AcceptChannel, ErrorAction, MessageSendEvent}; - use crate::ln::onion_utils; + use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::prelude::*; use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; @@ -15904,12 +15914,12 @@ mod tests { // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. let current_height: u32 = node[0].node.best_block.read().unwrap().height; - if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), current_height) { - assert_eq!(err_code, 19); + assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount); } else { panic!(); } // If amt_received + extra_fee is equal to the sender intended amount, we're fine. diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 611fcd2b1ad..fa4c10d9248 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -24,6 +24,7 @@ use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEven use crate::ln::outbound_payment::Retry; use crate::ln::peer_handler::IgnoringMessageHandler; use crate::onion_message::messenger::OnionMessenger; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate}; use crate::routing::router::{self, PaymentParameters, Route, RouteParameters}; use crate::sign::{EntropySource, RandomBytes}; @@ -2488,7 +2489,7 @@ pub fn expect_probe_successful_events(node: &Node, mut probe_results: Vec<(Payme } pub struct PaymentFailedConditions<'a> { - pub(crate) expected_htlc_error_data: Option<(u16, &'a [u8])>, + pub(crate) expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, pub(crate) expected_blamed_scid: Option, pub(crate) expected_blamed_chan_closed: Option, pub(crate) expected_mpp_parts_remain: bool, @@ -2517,8 +2518,8 @@ impl<'a> PaymentFailedConditions<'a> { self.expected_blamed_chan_closed = Some(closed); self } - pub fn expected_htlc_error_data(mut self, code: u16, data: &'a [u8]) -> Self { - self.expected_htlc_error_data = Some((code, data)); + pub fn expected_htlc_error_data(mut self, reason: LocalHTLCFailureReason, data: &'a [u8]) -> Self { + self.expected_htlc_error_data = Some((reason, data)); self } pub fn retry_expected(mut self) -> Self { @@ -2539,11 +2540,11 @@ macro_rules! expect_payment_failed_with_update { #[cfg(any(test, feature = "_externalize_tests"))] macro_rules! expect_payment_failed { - ($node: expr, $expected_payment_hash: expr, $payment_failed_permanently: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => { + ($node: expr, $expected_payment_hash: expr, $payment_failed_permanently: expr $(, $expected_error_reason: expr, $expected_error_data: expr)*) => { #[allow(unused_mut)] let mut conditions = $crate::ln::functional_test_utils::PaymentFailedConditions::new(); $( - conditions = conditions.expected_htlc_error_data($expected_error_code, &$expected_error_data); + conditions = conditions.expected_htlc_error_data($expected_error_reason, &$expected_error_data); )* $crate::ln::functional_test_utils::expect_payment_failed_conditions(&$node, $expected_payment_hash, $payment_failed_permanently, conditions); }; @@ -2564,8 +2565,9 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( { assert!(error_code.is_some(), "expected error_code.is_some() = true"); assert!(error_data.is_some(), "expected error_data.is_some() = true"); + let reason: LocalHTLCFailureReason = error_code.unwrap().into(); if let Some((code, data)) = conditions.expected_htlc_error_data { - assert_eq!(error_code.unwrap(), code, "unexpected error code"); + assert_eq!(reason, code, "unexpected error code"); assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4cfd63ab2d6..f44fa30e1f8 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,6 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; use crate::chain::transaction::OutPoint; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; @@ -4814,7 +4815,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // 100_000 msat as u64, followed by the height at which we failed back above let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); + expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -7779,7 +7780,7 @@ pub fn test_check_htlc_underpaying() { // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]); + expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -8972,7 +8973,7 @@ pub fn test_bad_secret_hash() { } } - let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details + let expected_error_code = LocalHTLCFailureReason::IncorrectPaymentDetails; // Error data is the HTLC value (100,000) and current block height let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 16d3d769f48..24beeec5912 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -46,7 +46,7 @@ pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. pub(crate) mod interactivetxs; -pub use onion_utils::create_payment_onion; +pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 27d9bd4d6e7..e829f119a11 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -16,7 +16,7 @@ use crate::ln::channelmanager::{BlindedFailure, BlindedForward, CLTV_FAR_FAR_AWA use crate::types::features::BlindedHopFeatures; use crate::ln::msgs; use crate::ln::onion_utils; -use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING, ONION_DATA_LEN}; +use crate::ln::onion_utils::{HTLCFailReason, ONION_DATA_LEN, LocalHTLCFailureReason}; use crate::sign::{NodeSigner, Recipient}; use crate::util::logger::Logger; @@ -29,7 +29,7 @@ use core::ops::Deref; #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct InboundHTLCErr { /// BOLT 4 error code. - pub err_code: u16, + pub reason: LocalHTLCFailureReason, /// Data attached to this error. pub err_data: Vec, /// Error message text. @@ -102,7 +102,7 @@ pub(super) fn create_fwd_pending_htlc_info( // unreachable right now since we checked it in `decode_update_add_htlc_onion`. InboundHTLCErr { msg: "Underflow calculating outbound amount or cltv value for blinded forward", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], } })?; @@ -112,13 +112,13 @@ pub(super) fn create_fwd_pending_htlc_info( onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), onion_utils::Hop::TrampolineForward { next_trampoline_hop_data, next_trampoline_hop_hmac, new_trampoline_packet_bytes, trampoline_shared_secret, .. } => { @@ -144,7 +144,7 @@ pub(super) fn create_fwd_pending_htlc_info( // unreachable right now since we checked it in `decode_update_add_htlc_onion`. InboundHTLCErr { msg: "Underflow calculating outbound amount or cltv value for blinded forward", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], } })?; @@ -191,7 +191,7 @@ pub(super) fn create_fwd_pending_htlc_info( Some(Ok(pubkey)) => pubkey, _ => return Err(InboundHTLCErr { msg: "Missing next Trampoline hop pubkey from intermediate Trampoline forwarding data", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolinePayload, err_data: Vec::new(), }), }; @@ -255,7 +255,7 @@ pub(super) fn create_recv_pending_htlc_info( ) .map_err(|()| { InboundHTLCErr { - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], msg: "Amount or cltv_expiry violated blinded payment constraints", } @@ -285,7 +285,7 @@ pub(super) fn create_recv_pending_htlc_info( ) .map_err(|()| { InboundHTLCErr { - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], msg: "Amount or cltv_expiry violated blinded payment constraints within Trampoline onion", } @@ -297,21 +297,21 @@ pub(super) fn create_recv_pending_htlc_info( }, onion_utils::Hop::Forward { .. } => { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), msg: "Got non final data with an HMAC of 0", }) }, onion_utils::Hop::BlindedForward { .. } => { return Err(InboundHTLCErr { - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], msg: "Got blinded non final data with an HMAC of 0", }) }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), msg: "Got Trampoline non final data with an HMAC of 0", }) @@ -321,7 +321,7 @@ pub(super) fn create_recv_pending_htlc_info( if onion_cltv_expiry > cltv_expiry { return Err(InboundHTLCErr { msg: "Upstream node set CLTV to less than the CLTV set by the sender", - err_code: 18, + reason: LocalHTLCFailureReason::FinalIncorrectCLTVExpiry, err_data: cltv_expiry.to_be_bytes().to_vec() }) } @@ -337,7 +337,7 @@ pub(super) fn create_recv_pending_htlc_info( err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); return Err(InboundHTLCErr { - err_code: 0x4000 | 15, err_data, + reason: LocalHTLCFailureReason::PaymentClaimBuffer, err_data, msg: "The final CLTV expiry is too soon to handle", }); } @@ -346,7 +346,7 @@ pub(super) fn create_recv_pending_htlc_info( amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0))) { return Err(InboundHTLCErr { - err_code: 19, + reason: LocalHTLCFailureReason::FinalIncorrectHTLCAmount, err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); @@ -361,7 +361,7 @@ pub(super) fn create_recv_pending_htlc_info( let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); if hashed_preimage != payment_hash { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidKeysendPreimage, err_data: Vec::new(), msg: "Payment preimage didn't match payment hash", }); @@ -389,7 +389,7 @@ pub(super) fn create_recv_pending_htlc_info( } } else { return Err(InboundHTLCErr { - err_code: 0x4000|0x2000|3, + reason: LocalHTLCFailureReason::PaymentSecretRequired, err_data: Vec::new(), msg: "We require payment_secrets", }); @@ -424,13 +424,13 @@ where { let (hop, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx - ).map_err(|e| { - let (err_code, err_data) = match e { - HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()), - HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason), + ).map_err(|(msg, failure_reason)| { + let (reason, err_data) = match msg { + HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), + HTLCFailureMsg::Relay(r) => (LocalHTLCFailureReason::InvalidOnionPayload, r.reason), }; let msg = "Failed to decode update add htlc onion"; - InboundHTLCErr { msg, err_code, err_data } + InboundHTLCErr { msg, reason, err_data } })?; Ok(match hop { onion_utils::Hop::Forward { shared_secret, .. } | @@ -442,17 +442,17 @@ where // Forward should always include the next hop details None => return Err(InboundHTLCErr { msg: "Failed to decode update add htlc onion", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), }; - if let Err((err_msg, code)) = check_incoming_htlc_cltv( + if let Err((err_msg, reason)) = check_incoming_htlc_cltv( cur_height, outgoing_cltv_value, msg.cltv_expiry, ) { return Err(InboundHTLCErr { msg: err_msg, - err_code: code, + reason, err_data: Vec::new(), }); } @@ -488,28 +488,28 @@ pub(super) struct NextPacketDetails { pub(super) fn decode_incoming_update_add_htlc_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, -) -> Result<(onion_utils::Hop, Option), HTLCFailureMsg> +) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> where NS::Target: NodeSigner, L::Target: Logger, { - let encode_malformed_error = |message: &str, err_code: u16| { + let encode_malformed_error = |message: &str, failure_reason: LocalHTLCFailureReason| { log_info!(logger, "Failed to accept/forward incoming HTLC: {}", message); - let (sha256_of_onion, failure_code) = if msg.blinding_point.is_some() || err_code == INVALID_ONION_BLINDING { - ([0; 32], INVALID_ONION_BLINDING) + let (sha256_of_onion, failure_reason) = if msg.blinding_point.is_some() || failure_reason == LocalHTLCFailureReason::InvalidOnionBlinding { + ([0; 32], LocalHTLCFailureReason::InvalidOnionBlinding) } else { - (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), err_code) + (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), failure_reason) }; - return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + return Err((HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion, - failure_code, - })); + failure_code: failure_reason.failure_code(), + }), failure_reason)); }; if let Err(_) = msg.onion_routing_packet.public_key { - return encode_malformed_error("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6); + return encode_malformed_error("invalid ephemeral pubkey", LocalHTLCFailureReason::InvalidOnionKey); } if msg.onion_routing_packet.version != 0 { @@ -519,22 +519,22 @@ where //receiving node would have to brute force to figure out which version was put in the //packet by the node that send us the message, in the case of hashing the hop_data, the //node knows the HMAC matched, so they already know what is there... - return encode_malformed_error("Unknown onion packet version", 0x8000 | 0x4000 | 4); + return encode_malformed_error("Unknown onion packet version", LocalHTLCFailureReason::InvalidOnionVersion) } - let encode_relay_error = |message: &str, err_code: u16, shared_secret: [u8; 32], trampoline_shared_secret: Option<[u8; 32]>, data: &[u8]| { + let encode_relay_error = |message: &str, reason: LocalHTLCFailureReason, shared_secret: [u8; 32], trampoline_shared_secret: Option<[u8; 32]>, data: &[u8]| { if msg.blinding_point.is_some() { - return encode_malformed_error(message, INVALID_ONION_BLINDING) + return encode_malformed_error(message, LocalHTLCFailureReason::InvalidOnionBlinding) } log_info!(logger, "Failed to accept/forward incoming HTLC: {}", message); - let failure = HTLCFailReason::reason(err_code, data.to_vec()) + let failure = HTLCFailReason::reason(reason, data.to_vec()) .get_encrypted_failure_packet(&shared_secret, &trampoline_shared_secret); - return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + return Err((HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: failure.data, - })); + }), reason)); }; let next_hop = match onion_utils::decode_next_payment_hop( @@ -542,11 +542,11 @@ where msg.payment_hash, msg.blinding_point, node_signer ) { Ok(res) => res, - Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { - return encode_malformed_error(err_msg, err_code); + Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => { + return encode_malformed_error(err_msg, reason); }, - Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code, shared_secret, trampoline_shared_secret }) => { - return encode_relay_error(err_msg, err_code, shared_secret.secret_bytes(), trampoline_shared_secret.map(|tss| tss.secret_bytes()), &[0; 0]); + Err(onion_utils::OnionDecodeErr::Relay { err_msg, reason, shared_secret, trampoline_shared_secret }) => { + return encode_relay_error(err_msg, reason, shared_secret.secret_bytes(), trampoline_shared_secret.map(|tss| tss.secret_bytes()), &[0; 0]); }, }; @@ -566,7 +566,7 @@ where Ok((amt, cltv)) => (amt, cltv), Err(()) => { return encode_relay_error("Underflow calculating outbound amount or cltv value for blinded forward", - INVALID_ONION_BLINDING, shared_secret.secret_bytes(), None, &[0; 32]); + LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret.secret_bytes(), None, &[0; 32]); } }; let next_packet_pubkey = onion_utils::next_hop_pubkey(&secp_ctx, @@ -594,21 +594,19 @@ where pub(super) fn check_incoming_htlc_cltv( cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32 -) -> Result<(), (&'static str, u16)> { +) -> Result<(), (&'static str, LocalHTLCFailureReason)> { if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { - return Err(( - "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x1000 | 13, // incorrect_cltv_expiry - )); + return Err(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + LocalHTLCFailureReason::IncorrectCLTVExpiry)); } // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, // but we want to be robust wrt to counterparty packet sanitization (see // HTLC_FAIL_BACK_BUFFER rationale). - if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon - return Err(("CLTV expiry is too close", 0x1000 | 14)); + if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { + return Err(("CLTV expiry is too close", LocalHTLCFailureReason::CLTVExpiryTooSoon)); } - if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far - return Err(("CLTV expiry is too far in the future", 21)); + if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { + return Err(("CLTV expiry is too far in the future", LocalHTLCFailureReason::CLTVExpiryTooFar)); } // If the HTLC expires ~now, don't bother trying to forward it to our // counterparty. They should fail it anyway, but we don't want to bother with @@ -619,7 +617,7 @@ pub(super) fn check_incoming_htlc_cltv( // but there is no need to do that, and since we're a bit conservative with our // risk threshold it just results in failing to forward payments. if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { - return Err(("Outgoing CLTV value is too soon", 0x1000 | 14)); + return Err(("Outgoing CLTV value is too soon", LocalHTLCFailureReason::OutgoingCLTVTooSoon)); } Ok(()) diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 4e277adbe30..eac65d095ca 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -17,7 +17,7 @@ use crate::events::{Event, HTLCDestination, PathFailure, PaymentFailureReason}; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields}; -use crate::ln::onion_utils; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::routing::gossip::{NetworkUpdate, RoutingFees}; use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameters, RouteHint, RouteHintHop, Path, TrampolineHop, BlindedTail, RouteHop}; use crate::types::features::{InitFeatures, Bolt11InvoiceFeatures}; @@ -51,7 +51,7 @@ use crate::ln::onion_utils::{construct_trampoline_onion_keys, construct_trampoli use super::msgs::OnionErrorPacket; -fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) +fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: FnMut(), { @@ -68,7 +68,7 @@ fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, fn run_onion_failure_test_with_fail_intercept( _name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, mut callback_msg: F1, mut callback_fail: F2, - mut callback_node: F3, expected_retryable: bool, expected_error_code: Option, + mut callback_node: F3, expected_retryable: bool, expected_error_reason: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option, ) @@ -188,7 +188,10 @@ fn run_onion_failure_test_with_fail_intercept( assert_eq!(events.len(), 2); if let &Event::PaymentPathFailed { ref payment_failed_permanently, ref short_channel_id, ref error_code, failure: PathFailure::OnPath { ref network_update }, .. } = &events[0] { assert_eq!(*payment_failed_permanently, !expected_retryable); - assert_eq!(*error_code, expected_error_code); + assert_eq!(error_code.is_none(), expected_error_reason.is_none()); + if let Some(expected_reason) = expected_error_reason { + assert_eq!(expected_reason, error_code.unwrap().into()) + } if expected_channel_update.is_some() { match network_update { Some(update) => match update { @@ -277,11 +280,6 @@ impl Writeable for BogusOnionHopData { } } -const BADONION: u16 = 0x8000; -const PERM: u16 = 0x4000; -const NODE: u16 = 0x2000; -const UPDATE: u16 = 0x1000; - #[test] fn test_fee_failures() { // Tests that the fee required when forwarding remains consistent over time. This was @@ -314,7 +312,7 @@ fn test_fee_failures() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate @@ -380,7 +378,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -399,7 +397,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -411,20 +409,20 @@ fn test_onion_failure() { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); + }, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -433,19 +431,19 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -454,34 +452,34 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, true, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(BADONION|PERM|4), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(BADONION|PERM|5), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(BADONION|PERM|6), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -495,9 +493,9 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data); msg.reason = failure.data; - }, ||{}, true, Some(UPDATE|7), + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -508,9 +506,9 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data_without_type); msg.reason = failure.data; - }, ||{}, true, Some(UPDATE|7), + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -520,10 +518,10 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentChannelFailure, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::PermanentChannelFailure), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -531,15 +529,15 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredChannelFeature, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::RequiredChannelFeature), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; - run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), + run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::UnknownNextPeer), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; @@ -549,7 +547,7 @@ fn test_onion_failure() { let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; - run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), + run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::AmountBelowMinimum), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -568,13 +566,13 @@ fn test_onion_failure() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("incorrect_cltv_expiry", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(LocalHTLCFailureReason::IncorrectCLTVExpiry), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("expiry_too_soon", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -582,13 +580,13 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, ||{}, true, Some(UPDATE|14), + }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooSoon), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|15), None, None, None); + }, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -596,7 +594,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(0x4000 | 15), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); + }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -609,7 +607,7 @@ fn test_onion_failure() { } } } - }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -623,14 +621,14 @@ fn test_onion_failure() { } } } - }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); - }, true, Some(UPDATE|7), + }, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -641,7 +639,7 @@ fn test_onion_failure() { } nodes[1].node.get_and_clear_pending_msg_events(); nodes[2].node.get_and_clear_pending_msg_events(); - }, true, Some(UPDATE|20), + }, true, Some(LocalHTLCFailureReason::ChannelDisabled), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); @@ -658,17 +656,17 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooFar), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), 23, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::MPPTimeout, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(23), None, None, None); + }, true, Some(LocalHTLCFailureReason::MPPTimeout), None, None, None); run_onion_failure_test_with_fail_intercept("bogus err packet with valid hmac", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -723,7 +721,7 @@ fn test_onion_failure() { onion_utils::test_crypt_failure_packet( &onion_keys[0].shared_secret.as_ref(), &mut onion_error); msg.reason = onion_error.data; - }, || {}, true, Some(0x1000|7), + }, || {}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, @@ -751,7 +749,7 @@ fn test_onion_failure() { onion_utils::test_crypt_failure_packet( &onion_keys[1].shared_secret.as_ref(), &mut onion_error); msg.reason = onion_error.data; - }, || nodes[2].node.fail_htlc_backwards(&payment_hash), true, Some(0x1000|7), + }, || nodes[2].node.fail_htlc_backwards(&payment_hash), true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, @@ -902,12 +900,12 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { // We'll be attempting to route payments using the default ChannelUpdate for channels. This will // lead to onion failures at the first hop once we update the ChannelConfig for the // second hop. - let expect_onion_failure = |name: &str, error_code: u16| { + let expect_onion_failure = |name: &str, error_reason: LocalHTLCFailureReason| { let short_channel_id = channel_to_update.1; let network_update = NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }; run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, - Some(error_code), Some(network_update), Some(short_channel_id), + Some(error_reason), Some(network_update), Some(short_channel_id), Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -937,7 +935,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { // Connect a block, which should expire the previous config, leading to a failure when // forwarding the HTLC. expire_prev_config(); - expect_onion_failure("fee_insufficient", UPDATE|12); + expect_onion_failure("fee_insufficient", LocalHTLCFailureReason::FeeInsufficient); // Redundant updates should not trigger a new ChannelUpdate. assert!(update_and_get_channel_update(&config, false, None, false).is_none()); @@ -951,14 +949,14 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { config.forwarding_fee_base_msat = default_config.forwarding_fee_base_msat; config.cltv_expiry_delta = u16::max_value(); assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some()); - expect_onion_failure("incorrect_cltv_expiry", UPDATE|13); + expect_onion_failure("incorrect_cltv_expiry", LocalHTLCFailureReason::IncorrectCLTVExpiry); // Reset the proportional fee and increase the CLTV expiry delta which should trigger a new // ChannelUpdate. config.cltv_expiry_delta = default_config.cltv_expiry_delta; config.forwarding_fee_proportional_millionths = u32::max_value(); assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some()); - expect_onion_failure("fee_insufficient", UPDATE|12); + expect_onion_failure("fee_insufficient", LocalHTLCFailureReason::FeeInsufficient); // To test persistence of the updated config, we'll re-initialize the ChannelManager. let config_after_restart = { @@ -1401,9 +1399,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { }; let failure_code = failure_code.into(); - let permanent_flag = 0x4000; - let permanent_fail = (failure_code & permanent_flag) != 0; - expect_payment_failed!(nodes[0], payment_hash, permanent_fail, failure_code, failure_data); + expect_payment_failed!(nodes[0], payment_hash, failure_code.is_permanent(), failure_code, failure_data); } @@ -1514,7 +1510,7 @@ fn test_phantom_onion_hmac_failure() { let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x8000 | 0x4000 | 5, &sha256_of_onion); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionHMAC, &sha256_of_onion); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1592,7 +1588,7 @@ fn test_phantom_invalid_onion_payload() { let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x4000 | 22, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1650,7 +1646,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { let error_data = expected_cltv.to_be_bytes().to_vec(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(18, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1699,7 +1695,7 @@ fn test_phantom_failure_too_low_cltv() { ); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1750,7 +1746,7 @@ fn test_phantom_failure_modified_cltv() { err_data.extend_from_slice(&0u16.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 13, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectCLTVExpiry, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1797,7 +1793,7 @@ fn test_phantom_failure_expires_too_soon() { let err_data = 0u16.to_be_bytes(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 14, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::CLTVExpiryTooSoon, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1844,7 +1840,7 @@ fn test_phantom_failure_too_low_recv_amt() { error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1902,7 +1898,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { let err_data = 0u16.to_be_bytes(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 7, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::TemporaryChannelFailure, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1952,6 +1948,6 @@ fn test_phantom_failure_reject_payment() { error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index de3d0c69fe5..88d6d186e62 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -684,8 +684,6 @@ pub(crate) fn set_max_path_length( /// the hops can be of variable length. pub(crate) const ONION_DATA_LEN: usize = 20 * 65; -pub(super) const INVALID_ONION_BLINDING: u16 = 0x8000 | 0x4000 | 24; - #[inline] fn shift_slice_right(arr: &mut [u8], amt: usize) { for i in (amt..arr.len()).rev() { @@ -887,17 +885,18 @@ fn process_chacha(key: &[u8; 32], packet: &mut [u8]) { } fn build_unencrypted_failure_packet( - shared_secret: &[u8], failure_type: u16, failure_data: &[u8], + shared_secret: &[u8], failure_reason: LocalHTLCFailureReason, failure_data: &[u8], ) -> OnionErrorPacket { assert_eq!(shared_secret.len(), 32); assert!(failure_data.len() <= 256 - 2); let um = gen_um_from_shared_secret(&shared_secret); + let failure_code = failure_reason.failure_code(); let failuremsg = { let mut res = Vec::with_capacity(2 + failure_data.len()); - res.push(((failure_type >> 8) & 0xff) as u8); - res.push(((failure_type >> 0) & 0xff) as u8); + res.push(((failure_code >> 8) & 0xff) as u8); + res.push(((failure_code >> 0) & 0xff) as u8); res.extend_from_slice(&failure_data[..]); res }; @@ -916,10 +915,10 @@ fn build_unencrypted_failure_packet( } pub(super) fn build_failure_packet( - shared_secret: &[u8], failure_type: u16, failure_data: &[u8], + shared_secret: &[u8], failure_reason: LocalHTLCFailureReason, failure_data: &[u8], ) -> OnionErrorPacket { let mut onion_error_packet = - build_unencrypted_failure_packet(shared_secret, failure_type, failure_data); + build_unencrypted_failure_packet(shared_secret, failure_reason, failure_data); crypt_failure_packet(shared_secret, &mut onion_error_packet); @@ -1027,11 +1026,6 @@ where let mut _error_packet_ret = None; let mut is_from_final_non_blinded_node = false; - const BADONION: u16 = 0x8000; - const PERM: u16 = 0x4000; - const NODE: u16 = 0x2000; - const UPDATE: u16 = 0x1000; - enum ErrorHop<'a> { RouteHop(&'a RouteHop), TrampolineHop(&'a TrampolineHop), @@ -1371,6 +1365,249 @@ where } } +const BADONION: u16 = 0x8000; +const PERM: u16 = 0x4000; +const NODE: u16 = 0x2000; +const UPDATE: u16 = 0x1000; + +/// The reason that a HTLC was failed by the local node. These errors either represent direct, +/// human-readable mappings of BOLT04 error codes or provide additional information that would +/// otherwise be erased by the BOLT04 error code. +/// +/// For example: +/// [`Self::FeeInsufficient`] is a direct representation of its underlying BOLT04 error code. +/// [`Self::PrivateChannelForward`] provides additional information that is not provided by its +/// BOLT04 error code. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum LocalHTLCFailureReason { + /// There has been a temporary processing failure on the node which may resolve on retry. + TemporaryNodeFailure, + /// These has been a permanent processing failure on the node which will not resolve on retry. + PermanentNodeFailure, + /// The HTLC does not implement a feature that is required by our node. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + RequiredNodeFeature, + /// The onion version specified by the HTLC packet is unknown to our node. + InvalidOnionVersion, + /// The integrity of the HTLC packet cannot be verified because it has an invalid HMAC. + InvalidOnionHMAC, + /// The onion packet has an invalid ephemeral key, so the HTLC cannot be processed. + InvalidOnionKey, + /// A temporary forwarding error has occurred which may resolve on retry. + TemporaryChannelFailure, + /// A permanent forwarding error has occurred which will not resolve on retry. + PermanentChannelFailure, + /// The HTLC does not implement a feature that is required by our channel for processing. + RequiredChannelFeature, + /// The HTLC's target outgoing channel that is not known to our node. + UnknownNextPeer, + /// The HTLC amount is below our advertised htlc_minimum_msat. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + AmountBelowMinimum, + /// The HTLC does not pay sufficient fees. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + FeeInsufficient, + /// The HTLC does not meet the cltv_expiry_delta advertised by our node, set by + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + /// + /// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta + IncorrectCLTVExpiry, + /// The HTLC expires too close to the current block height to be safely processed. + CLTVExpiryTooSoon, + /// A payment was made to our node that either had incorrect payment information, or was + /// unknown to us. + IncorrectPaymentDetails, + /// The HTLC's expiry is less than the expiry height specified by the sender. + /// + /// The forwarding node has either tampered with this value, or the sending node has an + /// old best block height. + FinalIncorrectCLTVExpiry, + /// The HTLC's amount is less than the amount specified by the sender. + /// + /// The forwarding node has tampered with this value, or has a bug in its implementation. + FinalIncorrectHTLCAmount, + /// The channel has been marked as disabled because the channel peer is offline. + ChannelDisabled, + /// The HTLC expires too far in the future, so it is rejected to avoid the worst-case outcome + /// of funds being held for extended periods of time. + /// + /// Limit set by ['crate::ln::channelmanager::CLTV_FAR_FAR_AWAY`]. + CLTVExpiryTooFar, + /// The HTLC payload contained in the onion packet could not be understood by our node. + InvalidOnionPayload, + /// The total amount for a multi-part payment did not arrive in time, so the HTLCs partially + /// paying the amount were canceled. + MPPTimeout, + /// Our node was selected as part of a blinded path, but the packet we received was not + /// properly constructed, or had incorrect values for the blinded path. + /// + /// This may happen if the forwarding node tamperd with the HTLC or the sender or recipient + /// implementations have a bug. + InvalidOnionBlinding, + /// A HTLC forward was failed back rather than forwarded on the proposed outgoing channel + /// because its expiry is too close to the current block height to leave time to safely claim + /// it on chain if the channel force closes. + ForwardExpiryBuffer, + /// The HTLC was failed because it has invalid trampoline forwarding information. + InvalidTrampolineForward, + /// A HTLC receive was failed back rather than claimed because its expiry is too close to + /// the current block height to leave time to safely claim it on chain if the channel force + /// closes. + PaymentClaimBuffer, + /// The HTLC was failed because accepting it would push our commitment's total amount of dust + /// HTLCs over the limit that we allow to be burned to miner fees if the channel closed while + /// they are unresolved. + DustLimitHolder, + /// The HTLC was failed because accepting it would push our counterparty's total amount of + /// dust (small) HTLCs over the limit that we allow to be burned to miner fees if the channel + /// closes while they are unresolved. + DustLimitCounterparty, + /// The HTLC was failed because it would drop the remote party's channel balance such that it + /// cannot cover the fees it is required to pay at various fee rates. This buffer is maintained + /// so that channels can always maintain reasonable fee rates. + FeeSpikeBuffer, + /// The HTLC that requested to be forwarded over a private channel was rejected to prevent + /// revealing the existence of the channel. + PrivateChannelForward, + /// The HTLC was failed because it made a request to forward over the real channel ID of a + /// channel that implements `option_scid_alias` which is a privacy feature to prevent the + /// real channel ID from being known. + RealSCIDForward, + /// The HTLC was rejected because our channel has not yet reached sufficient depth to be used. + ChannelNotReady, + /// A keysend payment with a preimage that did not match the HTLC has was rejected. + InvalidKeysendPreimage, + /// The HTLC was failed because it had an invalid trampoline payload. + InvalidTrampolinePayload, + /// A payment was rejected because it did not include the correct payment secret from an + /// invoice. + PaymentSecretRequired, + /// The HTLC was failed because its expiry is too close to the current block height, and we + /// expect that it will immediately be failed back by our downstream peer. + OutgoingCLTVTooSoon, + /// The HTLC was pending on a channel which is now in the process of being closed. + /// It was not fully committed to, so can just be immediately failed back. + DroppedPending, + /// The HTLC was failed back because its channel is closed and it has timed out on chain. + ChannelClosed, + /// UnknownFailureCode represents BOLT04 failure codes that we are not familiar with. We will + /// encounter this if: + /// - A peer sends us a new failure code that LDK has not yet been upgraded to understand. + /// - We read a deprecated failure code from disk that LDK no longer uses. + /// + /// See + /// for latest defined error codes. + UnknownFailureCode { + /// The bolt 04 failure code. + code: u16, + }, +} + +impl LocalHTLCFailureReason { + pub(super) fn failure_code(&self) -> u16 { + match self { + Self::TemporaryNodeFailure | Self::ForwardExpiryBuffer => NODE | 2, + Self::PermanentNodeFailure => PERM | NODE | 2, + Self::RequiredNodeFeature | Self::PaymentSecretRequired => PERM | NODE | 3, + Self::InvalidOnionVersion => BADONION | PERM | 4, + Self::InvalidOnionHMAC => BADONION | PERM | 5, + Self::InvalidOnionKey => BADONION | PERM | 6, + Self::TemporaryChannelFailure + | Self::DustLimitHolder + | Self::DustLimitCounterparty + | Self::FeeSpikeBuffer + | Self::ChannelNotReady => UPDATE | 7, + Self::PermanentChannelFailure | Self::ChannelClosed | Self::DroppedPending => PERM | 8, + Self::RequiredChannelFeature => PERM | 9, + Self::UnknownNextPeer + | Self::PrivateChannelForward + | Self::RealSCIDForward + | Self::InvalidTrampolineForward => PERM | 10, + Self::AmountBelowMinimum => UPDATE | 11, + Self::FeeInsufficient => UPDATE | 12, + Self::IncorrectCLTVExpiry => UPDATE | 13, + Self::CLTVExpiryTooSoon | Self::OutgoingCLTVTooSoon => UPDATE | 14, + Self::IncorrectPaymentDetails | Self::PaymentClaimBuffer => PERM | 15, + Self::FinalIncorrectCLTVExpiry => 18, + Self::FinalIncorrectHTLCAmount => 19, + Self::ChannelDisabled => UPDATE | 20, + Self::CLTVExpiryTooFar => 21, + Self::InvalidOnionPayload + | Self::InvalidTrampolinePayload + | Self::InvalidKeysendPreimage => PERM | 22, + Self::MPPTimeout => 23, + Self::InvalidOnionBlinding => BADONION | PERM | 24, + Self::UnknownFailureCode { code } => *code, + } + } + + pub(super) fn is_temporary(&self) -> bool { + self.failure_code() & UPDATE == UPDATE + } + + #[cfg(test)] + pub(super) fn is_permanent(&self) -> bool { + self.failure_code() & PERM == PERM + } +} + +impl Into for u16 { + fn into(self) -> LocalHTLCFailureReason { + if self == (NODE | 2) { + LocalHTLCFailureReason::TemporaryNodeFailure + } else if self == (PERM | NODE | 2) { + LocalHTLCFailureReason::PermanentNodeFailure + } else if self == (PERM | NODE | 3) { + LocalHTLCFailureReason::RequiredNodeFeature + } else if self == (BADONION | PERM | 4) { + LocalHTLCFailureReason::InvalidOnionVersion + } else if self == (BADONION | PERM | 5) { + LocalHTLCFailureReason::InvalidOnionHMAC + } else if self == (BADONION | PERM | 6) { + LocalHTLCFailureReason::InvalidOnionKey + } else if self == (UPDATE | 7) { + LocalHTLCFailureReason::TemporaryChannelFailure + } else if self == (PERM | 8) { + LocalHTLCFailureReason::PermanentChannelFailure + } else if self == (PERM | 9) { + LocalHTLCFailureReason::RequiredChannelFeature + } else if self == (PERM | 10) { + LocalHTLCFailureReason::UnknownNextPeer + } else if self == (UPDATE | 11) { + LocalHTLCFailureReason::AmountBelowMinimum + } else if self == (UPDATE | 12) { + LocalHTLCFailureReason::FeeInsufficient + } else if self == (UPDATE | 13) { + LocalHTLCFailureReason::IncorrectCLTVExpiry + } else if self == (UPDATE | 14) { + LocalHTLCFailureReason::CLTVExpiryTooSoon + } else if self == (PERM | 15) { + LocalHTLCFailureReason::IncorrectPaymentDetails + } else if self == 18 { + LocalHTLCFailureReason::FinalIncorrectCLTVExpiry + } else if self == 19 { + LocalHTLCFailureReason::FinalIncorrectHTLCAmount + } else if self == (UPDATE | 20) { + LocalHTLCFailureReason::ChannelDisabled + } else if self == 21 { + LocalHTLCFailureReason::CLTVExpiryTooFar + } else if self == (PERM | 22) { + LocalHTLCFailureReason::InvalidOnionPayload + } else if self == 23 { + LocalHTLCFailureReason::MPPTimeout + } else if self == (BADONION | PERM | 24) { + LocalHTLCFailureReason::InvalidOnionBlinding + } else { + LocalHTLCFailureReason::UnknownFailureCode { code: self } + } + } +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); @@ -1424,51 +1661,78 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, ); impl HTLCFailReason { - #[rustfmt::skip] - pub(super) fn reason(failure_code: u16, data: Vec) -> Self { - const BADONION: u16 = 0x8000; - const PERM: u16 = 0x4000; - const NODE: u16 = 0x2000; - const UPDATE: u16 = 0x1000; - - if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 2 | PERM | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 3 | PERM | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 4 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 5 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 6 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 7 | UPDATE { - debug_assert_eq!(data.len() - 2, u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize) } - else if failure_code == 8 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 9 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 10 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 11 | UPDATE { - debug_assert_eq!(data.len() - 2 - 8, u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize) } - else if failure_code == 12 | UPDATE { - debug_assert_eq!(data.len() - 2 - 8, u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize) } - else if failure_code == 13 | UPDATE { - debug_assert_eq!(data.len() - 2 - 4, u16::from_be_bytes(data[4..6].try_into().unwrap()) as usize) } - else if failure_code == 14 | UPDATE { - debug_assert_eq!(data.len() - 2, u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize) } - else if failure_code == 15 | PERM { debug_assert_eq!(data.len(), 12) } - else if failure_code == 18 { debug_assert_eq!(data.len(), 4) } - else if failure_code == 19 { debug_assert_eq!(data.len(), 8) } - else if failure_code == 20 | UPDATE { - debug_assert_eq!(data.len() - 2 - 2, u16::from_be_bytes(data[2..4].try_into().unwrap()) as usize) } - else if failure_code == 21 { debug_assert!(data.is_empty()) } - else if failure_code == 22 | PERM { debug_assert!(data.len() <= 11) } - else if failure_code == 23 { debug_assert!(data.is_empty()) } - else if failure_code == INVALID_ONION_BLINDING { debug_assert_eq!(data.len(), 32) } - else if failure_code & BADONION != 0 { - // We set some bogus BADONION failure codes in test, so ignore unknown ones. + pub(super) fn reason(failure_reason: LocalHTLCFailureReason, data: Vec) -> Self { + match failure_reason { + LocalHTLCFailureReason::TemporaryNodeFailure + | LocalHTLCFailureReason::ForwardExpiryBuffer => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::PermanentNodeFailure => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::RequiredNodeFeature + | LocalHTLCFailureReason::PaymentSecretRequired => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionVersion => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::InvalidOnionHMAC => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::InvalidOnionKey => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::TemporaryChannelFailure + | LocalHTLCFailureReason::DustLimitHolder + | LocalHTLCFailureReason::DustLimitCounterparty + | LocalHTLCFailureReason::FeeSpikeBuffer + | LocalHTLCFailureReason::ChannelNotReady => { + debug_assert_eq!( + data.len() - 2, + u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize + ) + }, + LocalHTLCFailureReason::PermanentChannelFailure + | LocalHTLCFailureReason::ChannelClosed + | LocalHTLCFailureReason::DroppedPending => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::RequiredChannelFeature => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::UnknownNextPeer + | LocalHTLCFailureReason::PrivateChannelForward + | LocalHTLCFailureReason::RealSCIDForward + | LocalHTLCFailureReason::InvalidTrampolineForward => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::AmountBelowMinimum => debug_assert_eq!( + data.len() - 2 - 8, + u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::FeeInsufficient => debug_assert_eq!( + data.len() - 2 - 8, + u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::IncorrectCLTVExpiry => debug_assert_eq!( + data.len() - 2 - 4, + u16::from_be_bytes(data[4..6].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::CLTVExpiryTooSoon + | LocalHTLCFailureReason::OutgoingCLTVTooSoon => debug_assert_eq!( + data.len() - 2, + u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::IncorrectPaymentDetails + | LocalHTLCFailureReason::PaymentClaimBuffer => debug_assert_eq!(data.len(), 12), + LocalHTLCFailureReason::FinalIncorrectCLTVExpiry => debug_assert_eq!(data.len(), 4), + LocalHTLCFailureReason::FinalIncorrectHTLCAmount => debug_assert_eq!(data.len(), 8), + LocalHTLCFailureReason::ChannelDisabled => debug_assert_eq!( + data.len() - 2 - 2, + u16::from_be_bytes(data[2..4].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::CLTVExpiryTooFar => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionPayload + | LocalHTLCFailureReason::InvalidTrampolinePayload + | LocalHTLCFailureReason::InvalidKeysendPreimage => debug_assert!(data.len() <= 11), + LocalHTLCFailureReason::MPPTimeout => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionBlinding => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::UnknownFailureCode { code } => { + // We set some bogus BADONION failure codes in tests, so allow unknown BADONION. + if code & BADONION == 0 { + debug_assert!(false, "Unknown failure code: {}", code) + } + }, } - else { debug_assert!(false, "Unknown failure code: {}", failure_code) } - Self(HTLCFailReasonRepr::Reason { failure_code, data }) + Self(HTLCFailReasonRepr::Reason { failure_code: failure_reason.failure_code(), data }) } - pub(super) fn from_failure_code(failure_code: u16) -> Self { - Self::reason(failure_code, Vec::new()) + pub(super) fn from_failure_code(failure_reason: LocalHTLCFailureReason) -> Self { + Self::reason(failure_reason, Vec::new()) } pub(super) fn from_msg(msg: &msgs::UpdateFailHTLC) -> Self { @@ -1487,15 +1751,23 @@ impl HTLCFailReason { ) -> msgs::OnionErrorPacket { match self.0 { HTLCFailReasonRepr::Reason { ref failure_code, ref data } => { + let failure_code = *failure_code; if let Some(secondary_shared_secret) = secondary_shared_secret { - let mut packet = - build_failure_packet(secondary_shared_secret, *failure_code, &data[..]); + let mut packet = build_failure_packet( + secondary_shared_secret, + failure_code.into(), + &data[..], + ); crypt_failure_packet(incoming_packet_shared_secret, &mut packet); packet } else { - build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]) + build_failure_packet( + incoming_packet_shared_secret, + failure_code.into(), + &data[..], + ) } }, HTLCFailReasonRepr::LightningError { ref err } => { @@ -1680,14 +1952,14 @@ impl Hop { #[derive(Debug)] pub(crate) enum OnionDecodeErr { /// The HMAC of the onion packet did not match the hop data. - Malformed { err_msg: &'static str, err_code: u16 }, + Malformed { err_msg: &'static str, reason: LocalHTLCFailureReason }, /// We failed to decode the onion payload. /// /// If the payload we failed to decode belonged to a Trampoline onion, following the successful /// decoding of the outer onion, the trampoline_shared_secret field should be set. Relay { err_msg: &'static str, - err_code: u16, + reason: LocalHTLCFailureReason, shared_secret: SharedSecret, trampoline_shared_secret: Option, }, @@ -1738,12 +2010,12 @@ where return Err(OnionDecodeErr::Malformed { err_msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }); } Err(OnionDecodeErr::Relay { err_msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret, trampoline_shared_secret: None, }) @@ -1838,7 +2110,7 @@ where if hop_data.intro_node_blinding_point.is_some() { return Err(OnionDecodeErr::Relay { err_msg: "Non-final intro node Trampoline onion data provided to us as last hop", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret, trampoline_shared_secret: Some(SharedSecret::from_bytes( trampoline_shared_secret, @@ -1847,14 +2119,14 @@ where } Err(OnionDecodeErr::Malformed { err_msg: "Non-final Trampoline onion data provided to us as last hop", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }) }, Ok((msgs::InboundTrampolinePayload::BlindedReceive(hop_data), Some(_))) => { if hop_data.intro_node_blinding_point.is_some() { return Err(OnionDecodeErr::Relay { err_msg: "Final Trampoline intro node onion data provided to us as intermediate hop", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolinePayload, shared_secret, trampoline_shared_secret: Some(SharedSecret::from_bytes( trampoline_shared_secret, @@ -1864,13 +2136,13 @@ where Err(OnionDecodeErr::Malformed { err_msg: "Final Trampoline onion data provided to us as intermediate hop", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }) }, Ok((msgs::InboundTrampolinePayload::Forward(_), None)) => { Err(OnionDecodeErr::Relay { err_msg: "Non-final Trampoline onion data provided to us as last hop", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolinePayload, shared_secret, trampoline_shared_secret: Some(SharedSecret::from_bytes( trampoline_shared_secret, @@ -1881,7 +2153,7 @@ where Err(OnionDecodeErr::Relay { err_msg: "Final Trampoline onion data provided to us as intermediate hop", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolinePayload, shared_secret, trampoline_shared_secret: Some(SharedSecret::from_bytes( trampoline_shared_secret, @@ -1895,12 +2167,12 @@ where if blinding_point.is_some() { return Err(OnionDecodeErr::Malformed { err_msg: "Intermediate Node OnionHopData provided for us as a final node", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }); } Err(OnionDecodeErr::Relay { err_msg: "Intermediate Node OnionHopData provided for us as a final node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret, trampoline_shared_secret: None, }) @@ -2029,7 +2301,7 @@ fn decode_next_hop, N: NextPacketBytes>( if !fixed_time_eq(&Hmac::from_engine(hmac).to_byte_array(), &hmac_bytes) { return Err(OnionDecodeErr::Malformed { err_msg: "HMAC Check failed", - err_code: 0x8000 | 0x4000 | 5, + reason: LocalHTLCFailureReason::InvalidOnionHMAC, }); } @@ -2037,19 +2309,19 @@ fn decode_next_hop, N: NextPacketBytes>( let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&hop_data[..]) }; match R::read(&mut chacha_stream, read_args) { Err(err) => { - let error_code = match err { + let reason = match err { // Unknown version - msgs::DecodeError::UnknownVersion => 0x8000 | 0x4000 | 1, + msgs::DecodeError::UnknownVersion => LocalHTLCFailureReason::InvalidOnionVersion, // invalid_onion_payload msgs::DecodeError::UnknownRequiredFeature | msgs::DecodeError::InvalidValue - | msgs::DecodeError::ShortRead => 0x4000 | 22, + | msgs::DecodeError::ShortRead => LocalHTLCFailureReason::InvalidOnionPayload, // Should never happen - _ => 0x2000 | 2, + _ => LocalHTLCFailureReason::TemporaryNodeFailure, }; return Err(OnionDecodeErr::Relay { err_msg: "Unable to decode our hop data", - err_code: error_code, + reason, shared_secret: SharedSecret::from_bytes(shared_secret), trampoline_shared_secret: None, }); @@ -2059,7 +2331,7 @@ fn decode_next_hop, N: NextPacketBytes>( if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) { return Err(OnionDecodeErr::Relay { err_msg: "Unable to decode our hop data", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret: SharedSecret::from_bytes(shared_secret), trampoline_shared_secret: None, }); @@ -2443,7 +2715,7 @@ mod tests { let onion_keys = build_test_onion_keys(); let mut onion_error = super::build_unencrypted_failure_packet( onion_keys[4].shared_secret.as_ref(), - 0x2002, + LocalHTLCFailureReason::TemporaryNodeFailure, &[0; 0], ); let hex = "4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; @@ -2607,7 +2879,7 @@ mod tests { { // Ensure error decryption works without the Trampoline hops having been hit. - let error_code = 0x2002; + let error_code = LocalHTLCFailureReason::TemporaryNodeFailure; let mut first_hop_error_packet = build_unencrypted_failure_packet( outer_onion_keys[0].shared_secret.as_ref(), error_code, @@ -2621,12 +2893,12 @@ mod tests { let decrypted_failure = process_onion_failure(&secp_ctx, &logger, &htlc_source, first_hop_error_packet); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); }; { // Ensure error decryption works from the first Trampoline hop, but at the outer onion. - let error_code = 0x2003; + let error_code = 0x2003.into(); let mut trampoline_outer_hop_error_packet = build_unencrypted_failure_packet( outer_onion_keys[1].shared_secret.as_ref(), error_code, @@ -2649,12 +2921,12 @@ mod tests { &htlc_source, trampoline_outer_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); }; { // Ensure error decryption works from the Trampoline inner onion. - let error_code = 0x2004; + let error_code = 0x2004.into(); let mut trampoline_inner_hop_error_packet = build_unencrypted_failure_packet( trampoline_onion_keys[0].shared_secret.as_ref(), error_code, @@ -2682,12 +2954,12 @@ mod tests { &htlc_source, trampoline_inner_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); } { // Ensure error decryption works from a later hop in the Trampoline inner onion. - let error_code = 0x2005; + let error_code = 0x2005.into(); let mut trampoline_second_hop_error_packet = build_unencrypted_failure_packet( trampoline_onion_keys[1].shared_secret.as_ref(), error_code, @@ -2720,7 +2992,7 @@ mod tests { &htlc_source, trampoline_second_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); } } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 5adc2d66b11..27693bb5cac 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -23,7 +23,7 @@ use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; -use crate::ln::onion_utils; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; @@ -344,7 +344,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..])); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..])); } else { // Pass half of the payment along the second path. let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); @@ -1952,7 +1952,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_conditions = PaymentFailedConditions::new() .blamed_scid(intercept_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x4000 | 10, &[]); + .expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[]); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } else if test == InterceptTest::Forward { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. @@ -2025,7 +2025,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); - expect_payment_failed!(nodes[0], payment_hash, false, 0x2000 | 2, []); + expect_payment_failed!(nodes[0], payment_hash, false, LocalHTLCFailureReason::TemporaryNodeFailure, []); // Check for unknown intercept id error. let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index c482d97ea8b..dfa0e8817ed 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -14,6 +14,7 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, Event, HTLCDestination}; use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; use crate::types::features::ChannelTypeFeatures; @@ -456,7 +457,7 @@ fn test_inbound_scid_privacy() { expect_payment_failed_conditions(&nodes[0], payment_hash_2, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].short_channel_id.unwrap()) - .blamed_chan_closed(true).expected_htlc_error_data(0x4000|10, &[0; 0])); + .blamed_chan_closed(true).expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[0; 0])); } #[test] @@ -513,7 +514,7 @@ fn test_scid_alias_returned() { let err_data = 0u16.to_be_bytes(); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap()) - .blamed_chan_closed(false).expected_htlc_error_data(0x1000|7, &err_data)); + .blamed_chan_closed(false).expected_htlc_error_data(LocalHTLCFailureReason::TemporaryChannelFailure, &err_data)); route.paths[0].hops[1].fee_msat = 10_000; // Reset to the correct payment amount route.paths[0].hops[0].fee_msat = 0; // But set fee paid to the middle hop to 0 @@ -542,7 +543,7 @@ fn test_scid_alias_returned() { err_data.extend_from_slice(&0u16.to_be_bytes()); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap()) - .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data)); + .blamed_chan_closed(false).expected_htlc_error_data(LocalHTLCFailureReason::FeeInsufficient, &err_data)); } #[test] diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index e49bc4e83be..b14e2bf06a8 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -20,7 +20,7 @@ use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::script::ShutdownScript; use crate::util::test_utils; use crate::util::test_utils::OnGetShutdownScriptpubkey; @@ -484,7 +484,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { if blinded_recipient { expect_payment_failed_conditions(&nodes[0], our_payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } else { expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true); } From 40faf7bf7e963afe8730f7657e644b928ae95549 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 13:46:45 -0400 Subject: [PATCH 05/12] ln: persist failure_reason with HTLCFailureReason --- lightning/src/ln/onion_utils.rs | 88 +++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 88d6d186e62..e98f499c56a 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -22,7 +22,9 @@ use crate::types::features::{ChannelFeatures, NodeFeatures}; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::errors::{self, APIError}; use crate::util::logger::Logger; -use crate::util::ser::{LengthCalculatingWriter, Readable, ReadableArgs, Writeable, Writer}; +use crate::util::ser::{ + LengthCalculatingWriter, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer, +}; use bitcoin::hashes::cmp::fixed_time_eq; use bitcoin::hashes::hmac::{Hmac, HmacEngine}; @@ -1608,6 +1610,49 @@ impl Into for u16 { } } +impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, + (0, TemporaryNodeFailure) => {}, + (2, PermanentNodeFailure) => {}, + (4, RequiredNodeFeature) => {}, + (6, InvalidOnionVersion) => {}, + (8, InvalidOnionHMAC) => {}, + (10, InvalidOnionKey) => {}, + (12, TemporaryChannelFailure) => {}, + (14, PermanentChannelFailure) => {}, + (16, RequiredChannelFeature) => {}, + (18, UnknownNextPeer) => {}, + (20, AmountBelowMinimum) => {}, + (22, FeeInsufficient) => {}, + (24, IncorrectCLTVExpiry) => {}, + (26, CLTVExpiryTooSoon) => {}, + (28, IncorrectPaymentDetails) => {}, + (30, FinalIncorrectCLTVExpiry) => {}, + (32, FinalIncorrectHTLCAmount) => {}, + (34, ChannelDisabled) => {}, + (36, CLTVExpiryTooFar) => {}, + (38, InvalidOnionPayload) => {}, + (40, MPPTimeout) => {}, + (42, InvalidOnionBlinding) => {}, + (44, InvalidTrampolineForward) => {}, + (46, PaymentClaimBuffer) => {}, + (48, DustLimitHolder) => {}, + (50, DustLimitCounterparty) => {}, + (52, FeeSpikeBuffer) => {}, + (54, DroppedPending) => {}, + (56, PrivateChannelForward) => {}, + (58, RealSCIDForward) => {}, + (60, ChannelNotReady) => {}, + (62, InvalidKeysendPreimage) => {}, + (64, InvalidTrampolinePayload) => {}, + (66, PaymentSecretRequired) => {}, + (68, ForwardExpiryBuffer) => {}, + (70, OutgoingCLTVTooSoon) => {}, + (72, ChannelClosed) => {}, + (74, UnknownFailureCode) => { + (0, code, required), + } +); + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); @@ -1616,14 +1661,14 @@ pub(super) struct HTLCFailReason(HTLCFailReasonRepr); #[cfg_attr(test, derive(PartialEq))] enum HTLCFailReasonRepr { LightningError { err: msgs::OnionErrorPacket }, - Reason { failure_code: u16, data: Vec }, + Reason { data: Vec, reason: LocalHTLCFailureReason }, } impl core::fmt::Debug for HTLCFailReason { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { match self.0 { - HTLCFailReasonRepr::Reason { ref failure_code, .. } => { - write!(f, "HTLC error code {}", failure_code) + HTLCFailReasonRepr::Reason { ref reason, .. } => { + write!(f, "HTLC error code {}", reason.failure_code()) }, HTLCFailReasonRepr::LightningError { .. } => { write!(f, "pre-built LightningError") @@ -1655,8 +1700,19 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (_unused, err, (static_value, msgs::OnionErrorPacket { data: data.ok_or(DecodeError::InvalidValue)? })), }, (1, Reason) => { - (0, failure_code, required), + (0, _failure_code, (legacy, u16, + |r: &HTLCFailReasonRepr| Some(r.clone()) )), (2, data, required_vec), + // failure_code was required, and is replaced by reason so any time we do not have a + // reason available failure_code will be Some so we can require reason. + (4, reason, (default_value, + if let Some(code) = _failure_code { + let failure_reason: LocalHTLCFailureReason = code.into(); + RequiredWrapper::from(failure_reason) + } else { + reason + } + )), }, ); @@ -1728,7 +1784,7 @@ impl HTLCFailReason { }, } - Self(HTLCFailReasonRepr::Reason { failure_code: failure_reason.failure_code(), data }) + Self(HTLCFailReasonRepr::Reason { data, reason: failure_reason }) } pub(super) fn from_failure_code(failure_reason: LocalHTLCFailureReason) -> Self { @@ -1750,24 +1806,16 @@ impl HTLCFailReason { &self, incoming_packet_shared_secret: &[u8; 32], secondary_shared_secret: &Option<[u8; 32]>, ) -> msgs::OnionErrorPacket { match self.0 { - HTLCFailReasonRepr::Reason { ref failure_code, ref data } => { - let failure_code = *failure_code; + HTLCFailReasonRepr::Reason { ref data, ref reason } => { if let Some(secondary_shared_secret) = secondary_shared_secret { - let mut packet = build_failure_packet( - secondary_shared_secret, - failure_code.into(), - &data[..], - ); + let mut packet = + build_failure_packet(secondary_shared_secret, *reason, &data[..]); crypt_failure_packet(incoming_packet_shared_secret, &mut packet); packet } else { - build_failure_packet( - incoming_packet_shared_secret, - failure_code.into(), - &data[..], - ) + build_failure_packet(incoming_packet_shared_secret, *reason, &data[..]) } }, HTLCFailReasonRepr::LightningError { ref err } => { @@ -1791,7 +1839,7 @@ impl HTLCFailReason { process_onion_failure(secp_ctx, logger, &htlc_source, err.clone()) }, #[allow(unused)] - HTLCFailReasonRepr::Reason { ref failure_code, ref data, .. } => { + HTLCFailReasonRepr::Reason { ref data, ref reason } => { // we get a fail_malformed_htlc from the first hop // TODO: We'd like to generate a NetworkUpdate for temporary // failures here, but that would be insufficient as find_route @@ -1804,7 +1852,7 @@ impl HTLCFailReason { short_channel_id: Some(path.hops[0].short_channel_id), failed_within_blinded_path: false, #[cfg(any(test, feature = "_test_utils"))] - onion_error_code: Some(*failure_code), + onion_error_code: Some(reason.failure_code()), #[cfg(any(test, feature = "_test_utils"))] onion_error_data: Some(data.clone()), } From 20f43eef9fe2dc5de9f1200c71a190face9a5645 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 31 Mar 2025 09:34:52 -0400 Subject: [PATCH 06/12] ln/refactor: split up construct_pending_htlc_status to get error To be able to obtain the underlying error reason for the pending HTLC, break up the helper method into two parts. This also removes some unnecessary wrapping/unwrapping of messages in PendingHTLCStatus types. --- lightning/src/ln/channelmanager.rs | 99 +++++++++++++----------------- 1 file changed, 42 insertions(+), 57 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 317b5fed5c4..b6abe99845e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4488,67 +4488,55 @@ where }) } - fn construct_pending_htlc_status<'a>( - &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32], - decoded_hop: onion_utils::Hop, allow_underpay: bool, - next_packet_pubkey_opt: Option>, - ) -> PendingHTLCStatus { - macro_rules! return_err { - ($msg: expr, $reason: expr, $data: expr) => { - { - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); - log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); - if msg.blinding_point.is_some() { - return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - } - )) + fn construct_pending_htlc_fail_msg<'a>(&self, msg: &msgs::UpdateAddHTLC, + counterparty_node_id: &PublicKey, shared_secret: [u8; 32], inbound_err: InboundHTLCErr) -> HTLCFailureMsg { + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); + log_info!(logger, "Failed to accept/forward incoming HTLC: {}", inbound_err.msg); + + if msg.blinding_point.is_some() { + return HTLCFailureMsg::Malformed( + msgs::UpdateFailMalformedHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + sha256_of_onion: [0; 32], + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), } - let failure = HTLCFailReason::reason($reason, $data.to_vec()) - .get_encrypted_failure_packet(&shared_secret, &None); - return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason: failure.data, - })); - } - } + ) } + + let failure = HTLCFailReason::reason(inbound_err.reason, inbound_err.err_data.to_vec()) + .get_encrypted_failure_packet(&shared_secret, &None); + return HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: failure.data, + }); + } + + fn get_pending_htlc_info<'a>( + &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], + decoded_hop: onion_utils::Hop, allow_underpay: bool, + next_packet_pubkey_opt: Option>, + ) -> Result { match decoded_hop { onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } | onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => { // OUR PAYMENT! + // Note that we could obviously respond immediately with an update_fulfill_htlc + // message, however that would leak that we are the recipient of this payment, so + // instead we stay symmetric with the forwarding case, only responding (after a + // delay) once they've send us a commitment_signed! let current_height: u32 = self.best_block.read().unwrap().height; - match create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, + create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, current_height) - { - Ok(info) => { - // Note that we could obviously respond immediately with an update_fulfill_htlc - // message, however that would leak that we are the recipient of this payment, so - // instead we stay symmetric with the forwarding case, only responding (after a - // delay) once they've sent us a commitment_signed! - PendingHTLCStatus::Forward(info) - }, - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason , &err_data) - } }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { - match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { - Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) - } + create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { - match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { - Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) - } - } + create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) + }, } } @@ -5849,16 +5837,14 @@ where } } - match self.construct_pending_htlc_status( - &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop, - incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), + match self.get_pending_htlc_info( + &update_add_htlc, shared_secret, next_hop, incoming_accept_underpaying_htlcs, + next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { - PendingHTLCStatus::Forward(htlc_forward) => { - htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id)); - }, - PendingHTLCStatus::Fail(htlc_fail) => { + Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), + Err(inbound_err) => { let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), htlc_destination)); }, } } @@ -11810,7 +11796,6 @@ where payment.htlcs.retain(|htlc| { // If height is approaching the number of blocks we think it takes us to get // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, // just give up on it and fail the HTLC. if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); From 8dadc78e46fd00fd1be9298cda59eb8061974efd Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 31 Mar 2025 10:09:54 -0400 Subject: [PATCH 07/12] ln: surface LocalHTLCFailureReason in queue_add_htlc --- lightning/src/ln/channel.rs | 54 +++++++++++++----------------- lightning/src/ln/channelmanager.rs | 9 ++--- lightning/src/ln/onion_utils.rs | 35 ++++++++++++++++--- 3 files changed, 56 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 51e9bf4ed9f..40cf597adbc 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5868,22 +5868,14 @@ impl FundedChannel where ); update_add_count += 1; }, - Err(e) => { - match e { - ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); - // If we fail to send here, then this HTLC should - // be failed backwards. Failing to send here - // indicates that this HTLC may keep being put back - // into the holding cell without ever being - // successfully forwarded/failed/fulfilled, causing - // our counterparty to eventually close on us. - htlcs_to_fail.push((source.clone(), *payment_hash)); - }, - _ => { - panic!("Got a non-IgnoreError action trying to send holding cell HTLC"); - }, - } + Err((_, msg)) => { + log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); + // If we fail to send here, then this HTLC should be failed + // backwards. Failing to send here indicates that this HTLC may + // keep being put back into the holding cell without ever being + // successfully forwarded/failed/fulfilled, causing our + // counterparty to eventually close on us. + htlcs_to_fail.push((source.clone(), *payment_hash)); } } None @@ -8540,13 +8532,11 @@ impl FundedChannel where /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - /// - /// `Err`s will only be [`ChannelError::Ignore`]. pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(), ChannelError> + ) -> Result<(), (LocalHTLCFailureReason, String)> where F::Target: FeeEstimator, L::Target: Logger { self @@ -8554,8 +8544,7 @@ impl FundedChannel where skimmed_fee_msat, blinding_point, fee_estimator, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) .map_err(|err| { - if let ChannelError::Ignore(_) = err { /* fine */ } - else { debug_assert!(false, "Queueing cannot trigger channel failure"); } + debug_assert!(err.0.is_temporary(), "Queuing HTLC should return temporary error"); err }) } @@ -8575,38 +8564,40 @@ impl FundedChannel where /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods /// on this [`FundedChannel`] if `force_holding_cell` is false. /// - /// `Err`s will only be [`ChannelError::Ignore`]. + /// `Err`'s will always be temporary channel failures. fn send_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result, ChannelError> + ) -> Result, (LocalHTLCFailureReason, String)> where F::Target: FeeEstimator, L::Target: Logger { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) || self.context.channel_state.is_local_shutdown_sent() || self.context.channel_state.is_remote_shutdown_sent() { - return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); + return Err((LocalHTLCFailureReason::ChannelNotReady, + "Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } let channel_total_msat = self.funding.get_value_satoshis() * 1000; if amount_msat > channel_total_msat { - return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat))); + return Err((LocalHTLCFailureReason::AmountExceedsCapacity, + format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat))); } if amount_msat == 0 { - return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); + return Err((LocalHTLCFailureReason::ZeroAmount, "Cannot send 0-msat HTLC".to_owned())); } let available_balances = self.context.get_available_balances(&self.funding, fee_estimator); if amount_msat < available_balances.next_outbound_htlc_minimum_msat { - return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", + return Err((LocalHTLCFailureReason::LiquidityMinimum, format!("Cannot send less than our next-HTLC minimum - {} msat", available_balances.next_outbound_htlc_minimum_msat))); } if amount_msat > available_balances.next_outbound_htlc_limit_msat { - return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat", + return Err((LocalHTLCFailureReason::LiquidityMaximum, format!("Cannot send more than our next-HTLC maximum - {} msat", available_balances.next_outbound_htlc_limit_msat))); } @@ -8617,7 +8608,8 @@ impl FundedChannel where // disconnected during the time the previous hop was doing the commitment dance we may // end up getting here after the forwarding delay. In any case, returning an // IgnoreError will get ChannelManager to do the right thing and fail backwards now. - return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); + return Err((LocalHTLCFailureReason::PeerOffline, + "Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } let need_holding_cell = !self.context.channel_state.can_generate_new_commitment(); @@ -8837,8 +8829,8 @@ impl FundedChannel where { let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger); - if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } - match send_res? { + // All [`LocalHTLCFailureReason`] errors are temporary, so they are [`ChannelError::Ignore`]. + match send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))? { Some(_) => { let monitor_update = self.build_commitment_no_status_check(logger); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b6abe99845e..21b1c44c152 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6100,22 +6100,17 @@ where }; log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); - if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat, + if let Err((reason, msg)) = optimal_channel.queue_add_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator, &&logger) { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } + log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let reason = LocalHTLCFailureReason::TemporaryChannelFailure; let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(reason, data), diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index e98f499c56a..196895188ae 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1497,7 +1497,19 @@ pub enum LocalHTLCFailureReason { DroppedPending, /// The HTLC was failed back because its channel is closed and it has timed out on chain. ChannelClosed, - /// UnknownFailureCode represents BOLT04 failure codes that we are not familiar with. We will + /// The HTLC was failed because its amount is greater than the capacity of the channel. + AmountExceedsCapacity, + /// The HTLC was failed because zero amount HTLCs are not allowed. + ZeroAmount, + /// The HTLC was failed because its amount is less than the smallest HTLC that the channel + /// can currently accept. + LiquidityMinimum, + /// The HTLC was failed because its amount is more than then largest HTLC that the channel + /// can currently accept. + LiquidityMaximum, + /// The HTLC was failed because our remote peer is offline. + PeerOffline, + /// UnknownFailureCode represents BOLT04 failure codes that we are not familiar with. We will /// encounter this if: /// - A peer sends us a new failure code that LDK has not yet been upgraded to understand. /// - We read a deprecated failure code from disk that LDK no longer uses. @@ -1523,7 +1535,12 @@ impl LocalHTLCFailureReason { | Self::DustLimitHolder | Self::DustLimitCounterparty | Self::FeeSpikeBuffer - | Self::ChannelNotReady => UPDATE | 7, + | Self::ChannelNotReady + | Self::AmountExceedsCapacity + | Self::ZeroAmount + | Self::LiquidityMinimum + | Self::LiquidityMaximum + | Self::PeerOffline => UPDATE | 7, Self::PermanentChannelFailure | Self::ChannelClosed | Self::DroppedPending => PERM | 8, Self::RequiredChannelFeature => PERM | 9, Self::UnknownNextPeer @@ -1650,7 +1667,12 @@ impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, (72, ChannelClosed) => {}, (74, UnknownFailureCode) => { (0, code, required), - } + }, + (76, AmountExceedsCapacity) => {}, + (78, ZeroAmount) => {}, + (80, LiquidityMinimum) => {}, + (82, LiquidityMaximum) => {}, + (84, PeerOffline) => {}, ); #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug @@ -1731,7 +1753,12 @@ impl HTLCFailReason { | LocalHTLCFailureReason::DustLimitHolder | LocalHTLCFailureReason::DustLimitCounterparty | LocalHTLCFailureReason::FeeSpikeBuffer - | LocalHTLCFailureReason::ChannelNotReady => { + | LocalHTLCFailureReason::ChannelNotReady + | LocalHTLCFailureReason::AmountExceedsCapacity + | LocalHTLCFailureReason::ZeroAmount + | LocalHTLCFailureReason::LiquidityMinimum + | LocalHTLCFailureReason::LiquidityMaximum + | LocalHTLCFailureReason::PeerOffline => { debug_assert_eq!( data.len() - 2, u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize From bbfe36075d70ff07b55038dff8ec997d2f904c79 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 2 Apr 2025 08:53:34 -0400 Subject: [PATCH 08/12] ln+events+liquidity/refactor: HTLCDestination renamed HTLCHandlingType HTLCDestination currently contains a combination of information about the type of HTLC we handled to fail (a payment, a forward etc) and error information for a select set of cases (unknown next peer, for example). In preparation for a refactor that will split the failure reason out into its own enum, this commit renames HTLCDestination to HTLCHandlingType. --- lightning-liquidity/src/lsps2/service.rs | 8 +- lightning/src/events/mod.rs | 20 ++-- lightning/src/ln/async_payments_tests.rs | 12 +- lightning/src/ln/blinded_payment_tests.rs | 44 +++---- lightning/src/ln/chanmon_update_fail_tests.rs | 16 +-- lightning/src/ln/channelmanager.rs | 86 +++++++------- lightning/src/ln/functional_test_utils.rs | 22 ++-- lightning/src/ln/functional_tests.rs | 112 +++++++++--------- lightning/src/ln/monitor_tests.rs | 14 +-- lightning/src/ln/offers_tests.rs | 4 +- lightning/src/ln/onion_route_tests.rs | 58 ++++----- lightning/src/ln/payment_tests.rs | 58 ++++----- lightning/src/ln/priv_short_conf_tests.rs | 10 +- lightning/src/ln/quiescence_tests.rs | 8 +- lightning/src/ln/reload_tests.rs | 10 +- lightning/src/ln/reorg_tests.rs | 4 +- lightning/src/ln/shutdown_tests.rs | 6 +- 17 files changed, 245 insertions(+), 247 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 9929ac3eed3..68732fa2238 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -25,7 +25,7 @@ use crate::prelude::hash_map::Entry; use crate::prelude::{new_hash_map, HashMap, String, ToString, Vec}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; -use lightning::events::HTLCDestination; +use lightning::events::HTLCHandlingType; use lightning::ln::channelmanager::{AChannelManager, InterceptId}; use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::ln::types::ChannelId; @@ -879,10 +879,8 @@ where /// or if the payment queue is empty /// /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed - pub fn htlc_handling_failed( - &self, failed_next_destination: HTLCDestination, - ) -> Result<(), APIError> { - if let HTLCDestination::NextHopChannel { channel_id, .. } = failed_next_destination { + pub fn htlc_handling_failed(&self, handling_type: HTLCHandlingType) -> Result<(), APIError> { + if let HTLCHandlingType::NextHopChannel { channel_id, .. } = handling_type { let peer_by_channel_id = self.peer_by_channel_id.read().unwrap(); if let Some(counterparty_node_id) = peer_by_channel_id.get(&channel_id) { let outer_state_lock = self.per_peer_state.read().unwrap(); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index a0f26bfbac0..9dab9545fba 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -465,9 +465,9 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, }, ); -/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`]. +/// The type of HTLC that is being handled in [`Event::HTLCHandlingFailed`]. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum HTLCDestination { +pub enum HTLCHandlingType { /// We tried forwarding to a channel but failed to do so. An example of such an instance is when /// there is insufficient capacity in our outbound channel. NextHopChannel { @@ -507,7 +507,7 @@ pub enum HTLCDestination { }, } -impl_writeable_tlv_based_enum_upgradable!(HTLCDestination, +impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingType, (0, NextHopChannel) => { (0, node_id, required), (2, channel_id, required), @@ -1447,8 +1447,8 @@ pub enum Event { HTLCHandlingFailed { /// The channel over which the HTLC was received. prev_channel_id: ChannelId, - /// Destination of the HTLC that failed to be processed. - failed_next_destination: HTLCDestination, + /// The type of HTLC that was handled. + handling_type: HTLCHandlingType, }, /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event /// requires confirmed external funds to be readily available to spend. @@ -1752,11 +1752,11 @@ impl Writeable for Event { (8, path.blinded_tail, option), }) }, - &Event::HTLCHandlingFailed { ref prev_channel_id, ref failed_next_destination } => { + &Event::HTLCHandlingFailed { ref prev_channel_id, ref handling_type } => { 25u8.write(writer)?; write_tlv_fields!(writer, { (0, prev_channel_id, required), - (2, failed_next_destination, required), + (2, handling_type, required), }) }, &Event::BumpTransaction(ref event)=> { @@ -2201,14 +2201,14 @@ impl MaybeReadable for Event { 25u8 => { let mut f = || { let mut prev_channel_id = ChannelId::new_zero(); - let mut failed_next_destination_opt = UpgradableRequired(None); + let mut handling_type_opt = UpgradableRequired(None); read_tlv_fields!(reader, { (0, prev_channel_id, required), - (2, failed_next_destination_opt, upgradable_required), + (2, handling_type_opt, upgradable_required), }); Ok(Some(Event::HTLCHandlingFailed { prev_channel_id, - failed_next_destination: _init_tlv_based_struct_field!(failed_next_destination_opt, upgradable_required), + handling_type: _init_tlv_based_struct_field!(handling_type_opt, upgradable_required), })) }; f() diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 1d9c6fb84c7..87fab52050d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -11,7 +11,7 @@ use crate::blinded_path::message::{MessageContext, OffersContext}; use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; -use crate::events::{Event, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingType, PaymentFailureReason}; use crate::ln::blinded_payment_tests::{fail_blinded_htlc_backwards, get_blinded_route_parameters}; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; @@ -172,7 +172,7 @@ fn invalid_keysend_payment_secret() { PassAlongPathArgs::new(&nodes[0], &expected_route[0], amt_msat, payment_hash, ev.clone()) .with_payment_secret(invalid_payment_secret) .with_payment_preimage(keysend_preimage) - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -698,7 +698,7 @@ fn amount_doesnt_match_invreq() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -914,7 +914,7 @@ fn invalid_async_receive_with_retry( nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash }], + &[HTLCHandlingType::FailedPayment { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -934,7 +934,7 @@ fn invalid_async_receive_with_retry( let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1100,7 +1100,7 @@ fn expired_static_invoice_payment_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index b9b961e60ec..46e80caebf4 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -15,7 +15,7 @@ use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use crate::blinded_path; use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12RefundContext, ForwardTlvs, PaymentConstraints, PaymentContext, PaymentForwardNode, PaymentRelay, UnauthenticatedReceiveTlvs, PAYMENT_PADDING_ROUND_OFF}; use crate::blinded_path::utils::is_padded; -use crate::events::{Event, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channelmanager; @@ -425,10 +425,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let failed_destination = match check { - ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, - ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::InboundOnionCheck => HTLCHandlingType::InvalidOnion, + ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, }; expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -457,9 +457,9 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_pending_htlcs_forwardable!(nodes[2]); let failed_destination = match check { - ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, }; expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -527,7 +527,7 @@ fn failed_backwards_to_intro_node() { do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -606,7 +606,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); + vec![HTLCHandlingType::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards, @@ -626,7 +626,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]); + vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: $failed_scid }]); $curr_node.node.process_pending_htlc_forwards(); }, } @@ -725,7 +725,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { if intercept_node_fails { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); @@ -830,7 +830,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -958,7 +958,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ); nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -988,7 +988,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ReceiveRequirements => { @@ -998,7 +998,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ChannelCheck => { @@ -1014,7 +1014,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ProcessPendingHTLCsCheck => { @@ -1024,7 +1024,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2], - vec![HTLCDestination::FailedPayment { payment_hash }]); + vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { @@ -1032,7 +1032,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors(&nodes[2], 1); } } @@ -1121,7 +1121,7 @@ fn blinded_path_retries() { ($intro_node: expr) => { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -1243,7 +1243,7 @@ fn min_htlc() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] ); check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1436,7 +1436,7 @@ fn fails_receive_tlvs_authentication() { expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_fail.update_fail_htlcs.len() == 1); @@ -2141,7 +2141,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { let args = PassAlongPathArgs::new(&nodes[0], route, amt_msat, payment_hash, first_message_event) .with_payment_preimage(payment_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::InvalidOnion); + .expect_failure(HTLCHandlingType::InvalidOnion); do_pass_along_path(args); { @@ -2435,7 +2435,7 @@ fn test_trampoline_forward_rejection() { let args = PassAlongPathArgs::new(&nodes[0], route, amt_msat, payment_hash, first_message_event) .with_payment_preimage(payment_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e99cf017b66..eed842cdc96 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -17,7 +17,7 @@ use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination}; +use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCHandlingType}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; @@ -824,7 +824,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -905,7 +905,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1730,7 +1730,7 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1752,7 +1752,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -2159,7 +2159,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2511,7 +2511,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { @@ -2549,7 +2549,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 21b1c44c152..65cea4a72d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -44,7 +44,7 @@ use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; +use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCHandlingType, PaymentFailureReason, ReplayEvent}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::inbound_payment; @@ -3298,7 +3298,7 @@ macro_rules! handle_monitor_update_completion { } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } } } @@ -3918,7 +3918,7 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4042,7 +4042,7 @@ where let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { @@ -5732,7 +5732,7 @@ where }); let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer); - let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; + let destination = HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_channel_id }; self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &reason, destination); } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted @@ -5743,20 +5743,20 @@ where let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); - let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { + let get_failed_htlc_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { if let Some(outgoing_scid) = outgoing_scid_opt { match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { Some((outgoing_counterparty_node_id, outgoing_channel_id)) => - HTLCDestination::NextHopChannel { + HTLCHandlingType::NextHopChannel { node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, }, - None => HTLCDestination::UnknownNextHop { + None => HTLCHandlingType::UnknownNextHop { requested_forward_scid: outgoing_scid, }, } } else { - HTLCDestination::FailedPayment { payment_hash } + HTLCHandlingType::FailedPayment { payment_hash } } }; @@ -5787,7 +5787,7 @@ where ) { Ok(decoded_onion) => decoded_onion, Err((htlc_fail, _)) => { - htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); + htlc_fails.push((htlc_fail, HTLCHandlingType::InvalidOnion)); continue; }, }; @@ -5814,8 +5814,8 @@ where &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, handling_type)); continue; }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. @@ -5831,8 +5831,8 @@ where &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, handling_type)); continue; } } @@ -5843,8 +5843,8 @@ where ) { Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), Err(inbound_err) => { - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), htlc_destination)); + let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), handling_type)); }, } } @@ -5856,7 +5856,7 @@ where incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect() ); self.forward_htlcs_without_forward_event(&mut [pending_forwards]); - for (htlc_fail, htlc_destination) in htlc_fails.drain(..) { + for (htlc_fail, handling_type) in htlc_fails.drain(..) { let failure = match htlc_fail { HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC { htlc_id: fail_htlc.htlc_id, @@ -5871,7 +5871,7 @@ where self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure); self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { prev_channel_id: incoming_channel_id, - failed_next_destination: htlc_destination, + handling_type, }, None)); } } @@ -5927,9 +5927,9 @@ where }); let reason = if $next_hop_unknown { - HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } + HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_chan_id } } else { - HTLCDestination::FailedPayment{ payment_hash } + HTLCHandlingType::FailedPayment{ payment_hash } }; failed_forwards.push((htlc_source, payment_hash, @@ -6114,7 +6114,7 @@ where let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCHandlingType::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); } else { forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); @@ -6268,7 +6268,7 @@ where cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data), - HTLCDestination::FailedPayment { payment_hash: $payment_hash }, + HTLCHandlingType::FailedPayment { payment_hash: $payment_hash }, )); continue 'next_forwardable_htlc; } @@ -6826,7 +6826,7 @@ where let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); let failure_reason = LocalHTLCFailureReason::MPPTimeout; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; + let receiver = HTLCHandlingType::FailedPayment { payment_hash: htlc_source.1 }; self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } @@ -6891,7 +6891,7 @@ where for htlc in payment.htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; + let receiver = HTLCHandlingType::FailedPayment { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } @@ -6970,19 +6970,19 @@ where for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } - fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCHandlingType) { let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination); if push_forward_event { self.push_pending_forwards_ev(); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool { + fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, handling_type: HTLCHandlingType) -> bool { // Ensure that no peer state channel storage lock is held when calling this function. // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling @@ -7054,7 +7054,7 @@ where let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { prev_channel_id: *channel_id, - failed_next_destination: destination, + handling_type, }, None)); }, } @@ -7119,7 +7119,7 @@ where for htlc in htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCDestination::FailedPayment { payment_hash }; + let receiver = HTLCHandlingType::FailedPayment { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } return; @@ -7218,7 +7218,7 @@ where htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data); - let receiver = HTLCDestination::FailedPayment { payment_hash }; + let receiver = HTLCHandlingType::FailedPayment { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); @@ -8755,7 +8755,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } for htlc_source in dropped_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::DroppedPending); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -9097,7 +9097,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer), - HTLCDestination::InvalidForward { requested_forward_scid: scid }, + HTLCHandlingType::InvalidForward { requested_forward_scid: scid }, )); } } @@ -9581,7 +9581,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } @@ -11675,7 +11675,7 @@ where let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; let data = self.get_htlc_inbound_temp_fail_data(reason); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); + HTLCHandlingType::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); if let Some(channel_ready) = channel_ready_opt { @@ -11799,7 +11799,7 @@ where let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::reason(reason, htlc_msat_height_data), - HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -11828,7 +11828,7 @@ where }; timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), - HTLCDestination::InvalidForward { requested_forward_scid })); + HTLCHandlingType::InvalidForward { requested_forward_scid })); let logger = WithContext::from( &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) ); @@ -14872,7 +14872,7 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -14899,7 +14899,7 @@ mod tests { use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use bitcoin::secp256k1::ecdh::SharedSecret; use core::sync::atomic::Ordering; - use crate::events::{Event, HTLCDestination, ClosureReason}; + use crate::events::{Event, HTLCHandlingType, ClosureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, ChannelConfigOverrides, HTLCForwardInfo, InterceptId, PaymentId, RecipientOnionFields}; @@ -15037,7 +15037,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15218,7 +15218,7 @@ mod tests { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15263,7 +15263,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15310,7 +15310,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15367,7 +15367,7 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index fa4c10d9248..de0d94a1f89 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -13,7 +13,7 @@ use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; -use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, PathFailure, PaymentPurpose, PaymentFailureReason}; +use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCHandlingType, PathFailure, PaymentPurpose, PaymentFailureReason}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -1947,8 +1947,8 @@ macro_rules! expect_htlc_handling_failed_destinations { for event in $events { match event { $crate::events::Event::PendingHTLCsForwardable { .. } => { }, - $crate::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { - assert!($expected_failures.contains(&failed_next_destination)); + $crate::events::Event::HTLCHandlingFailed { ref handling_type, .. } => { + assert!($expected_failures.contains(&handling_type)); num_expected_failures -= 1; }, _ => panic!("Unexpected destination"), @@ -1959,9 +1959,9 @@ macro_rules! expect_htlc_handling_failed_destinations { } /// Checks that an [`Event::PendingHTLCsForwardable`] is available in the given events and, if -/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCDestination`] is included in the +/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCHandlingType`] is included in the /// `expected_failures` set. -pub fn expect_pending_htlcs_forwardable_conditions(events: Vec, expected_failures: &[HTLCDestination]) { +pub fn expect_pending_htlcs_forwardable_conditions(events: Vec, expected_failures: &[HTLCHandlingType]) { let count = expected_failures.len() + 1; assert_eq!(events.len(), count); assert!(events.iter().find(|event| matches!(event, Event::PendingHTLCsForwardable { .. })).is_some()); @@ -2130,7 +2130,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' if fail_backwards { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, - vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); + vec![crate::events::HTLCHandlingType::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); check_added_monitors!(node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); @@ -2659,7 +2659,7 @@ pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> { pub is_probe: bool, pub custom_tlvs: Vec<(u64, Vec)>, pub payment_metadata: Option>, - pub expected_failure: Option, + pub expected_failure: Option, } impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { @@ -2702,7 +2702,7 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { self.payment_metadata = Some(payment_metadata); self } - pub fn expect_failure(mut self, failure: HTLCDestination) -> Self { + pub fn expect_failure(mut self, failure: HTLCHandlingType) -> Self { self.payment_claimable_expected = false; self.expected_failure = Some(failure); self @@ -2841,7 +2841,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expect fail_payment_along_path(nodes_to_fail_payment.as_slice()); expect_htlc_handling_failed_destinations!( path.last().unwrap().node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash: *payment_hash }] + &[HTLCHandlingType::FailedPayment { payment_hash: *payment_hash }] ); } } @@ -3152,7 +3152,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); - let expected_destinations: Vec = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations); pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash, PaymentFailureReason::RecipientRejected); @@ -3194,7 +3194,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCHandlingType::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f44fa30e1f8..eeee5577dd0 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; @@ -1277,7 +1277,7 @@ pub fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1604,7 +1604,7 @@ pub fn test_fee_spike_violation_fails_htlc() { }; nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2389,7 +2389,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // Check that nodes[1] fails the HTLC upstream expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -2411,7 +2411,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[1], ANTI_REORG_DELAY); // Expect handling another fail back event, but the HTLC is already gone expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -2437,7 +2437,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac PostFailBackAction::FailOffChain => { nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], - vec![HTLCDestination::FailedPayment { payment_hash }]); + vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_fail = commitment_update.update_fail_htlcs[0].clone(); @@ -3474,7 +3474,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -3529,7 +3529,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3595,7 +3595,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3659,7 +3659,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: first_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3672,7 +3672,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: second_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3689,7 +3689,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: third_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -4801,7 +4801,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -4865,7 +4865,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5534,7 +5534,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5700,10 +5700,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[4], 0); let failed_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_3 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_5 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_6 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_3 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_5 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_6 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors!(nodes[4], 1); @@ -5721,8 +5721,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[5], 0); let failed_destinations_2 = vec![ - HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_4 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }, + HTLCHandlingType::FailedPayment { payment_hash: payment_hash_4 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors!(nodes[5], 1); @@ -5736,12 +5736,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events let failed_destinations_3 = vec![ - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors!(nodes[3], 1); @@ -5794,13 +5794,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { - let expected_destinations: Vec = if announce_latest { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + let expected_destinations: Vec = if announce_latest { + repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() } else { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -6171,7 +6171,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7266,7 +7266,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -7333,7 +7333,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -7356,7 +7356,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7407,7 +7407,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -7425,7 +7425,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { } expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7489,7 +7489,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }]); check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7753,7 +7753,7 @@ pub fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -8034,7 +8034,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); + expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingType::FailedPayment { payment_hash: failed_payment_hash }]); match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), @@ -8315,7 +8315,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }]); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -8958,7 +8958,7 @@ pub fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment{ payment_hash: $payment_hash }]); check_added_monitors!(nodes[1], 1); // We should fail the payment back @@ -10085,7 +10085,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -10108,7 +10108,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); + vec![HTLCHandlingType::NextHopChannel { node_id: Some(node_c_id), channel_id }]); } } @@ -10169,8 +10169,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.fail_htlc_backwards(&our_payment_hash); let expected_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, - HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, + HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }, + HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); nodes[1].node.process_pending_htlc_forwards(); @@ -10191,7 +10191,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -10299,7 +10299,7 @@ pub fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -10308,7 +10308,7 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -10365,8 +10365,8 @@ pub fn test_double_partial_claim() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later let failed_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash }, - HTLCDestination::FailedPayment { payment_hash }, + HTLCHandlingType::FailedPayment { payment_hash }, + HTLCHandlingType::FailedPayment { payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); @@ -10575,7 +10575,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats @@ -10736,7 +10736,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); @@ -10789,7 +10789,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_id_1 = nodes[1].node.get_our_node_id(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] ); let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); @@ -10895,7 +10895,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); @@ -11188,7 +11188,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); } else { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index adb79b94356..bdb95bb0d62 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -14,7 +14,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATEN use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; -use crate::events::{Event, ClosureReason, HTLCDestination}; +use crate::events::{Event, ClosureReason, HTLCHandlingType}; use crate::ln::channel; use crate::ln::types::ChannelId; use crate::ln::chan_utils; @@ -86,7 +86,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1217,7 +1217,7 @@ fn test_no_preimage_inbound_htlc_balances() { assert_eq!(as_htlc_timeout_claim.len(), 1); check_spends!(as_htlc_timeout_claim[0], as_txn[0]); expect_pending_htlcs_forwardable_conditions!(nodes[0], - [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]); + [HTLCHandlingType::FailedPayment { payment_hash: to_a_failed_payment_hash }]); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); @@ -1235,7 +1235,7 @@ fn test_no_preimage_inbound_htlc_balances() { nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); expect_pending_htlcs_forwardable_conditions!(nodes[1], - [HTLCDestination::FailedPayment { payment_hash: to_b_failed_payment_hash }]); + [HTLCHandlingType::FailedPayment { payment_hash: to_b_failed_payment_hash }]); let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_claim.len(), 1); check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); @@ -1417,12 +1417,12 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ .iter().map(|a| *a).collect(); events.retain(|ev| { match ev { - Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { node_id, channel_id }, .. } => { + Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::NextHopChannel { node_id, channel_id }, .. } => { assert_eq!(*channel_id, chan_id); assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id())); false }, - Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::FailedPayment { payment_hash }, .. } => { + Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::FailedPayment { payment_hash }, .. } => { assert!(failed_payments.remove(payment_hash)); false }, @@ -1737,7 +1737,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { // pinnable claims, which the remainder of the test assumes. connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); + [HTLCHandlingType::FailedPayment { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index e01f8d847b6..f3b9e6d5bf7 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,7 +47,7 @@ use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; use crate::blinded_path::message::{MessageContext, OffersContext}; -use crate::events::{ClosureReason, Event, HTLCDestination, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCHandlingType, PaymentFailureReason, PaymentPurpose}; use crate::ln::channelmanager::{Bolt12PaymentError, MAX_SHORT_LIVED_RELATIVE_EXPIRY, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; use crate::types::features::Bolt12InvoiceFeatures; use crate::ln::functional_test_utils::*; @@ -2310,7 +2310,7 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); do_pass_along_path(args); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index eac65d095ca..633b05529c3 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -13,7 +13,7 @@ use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::{EntropySource, NodeSigner, Recipient}; -use crate::events::{Event, HTLCDestination, PathFailure, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingType, PathFailure, PaymentFailureReason}; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields}; @@ -51,11 +51,11 @@ use crate::ln::onion_utils::{construct_trampoline_onion_keys, construct_trampoli use super::msgs::OnionErrorPacket; -fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) +fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_type: Option) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: FnMut(), { - run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id, expected_htlc_destination); + run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id, expected_htlc_type); } // test_case @@ -70,7 +70,7 @@ fn run_onion_failure_test_with_fail_intercept( payment_secret: &PaymentSecret, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_reason: Option, expected_channel_update: Option, expected_short_channel_id: Option, - expected_htlc_destination: Option, + expected_htlc_type: Option, ) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC), @@ -113,7 +113,7 @@ fn run_onion_failure_test_with_fail_intercept( let update_1_0 = match test_case { 0|100 => { // intermediate node failure; fail backward to 0 expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_htlc_destination.clone().unwrap()]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_htlc_type.clone().unwrap()]); check_added_monitors(&nodes[1], 1); let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1)); @@ -144,10 +144,10 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); } else if test_case == 1 || test_case == 3 { expect_htlc_forward!(&nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_destination.clone().unwrap()]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_type.clone().unwrap()]); } check_added_monitors!(&nodes[2], 1); @@ -313,7 +313,7 @@ fn test_fee_failures() { run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); + Some(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -359,7 +359,7 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -378,7 +378,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCHandlingType::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -397,7 +397,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCHandlingType::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -473,13 +473,13 @@ fn test_onion_failure() { // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCHandlingType::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCHandlingType::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCHandlingType::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -538,7 +538,7 @@ fn test_onion_failure() { bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::UnknownNextPeer), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -594,7 +594,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); + }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCHandlingType::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -607,7 +607,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -621,7 +621,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::FailedPayment { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -906,7 +906,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_reason), Some(network_update), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), + Some(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -1362,7 +1362,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, payment_amount); nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, failure_code); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1497,7 +1497,7 @@ fn test_phantom_onion_hmac_failure() { } }; nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1574,7 +1574,7 @@ fn test_phantom_invalid_onion_payload() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1632,7 +1632,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1679,7 +1679,7 @@ fn test_phantom_failure_too_low_cltv() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1730,7 +1730,7 @@ fn test_phantom_failure_modified_cltv() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1779,7 +1779,7 @@ fn test_phantom_failure_expires_too_soon() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1826,7 +1826,7 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1884,7 +1884,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1933,7 +1933,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_amt_msat, None, route.paths[0].hops.last().unwrap().pubkey); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 27693bb5cac..dea18f9fc02 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -14,7 +14,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCDestination, PathFailure, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCHandlingType, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; @@ -128,7 +128,7 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -233,7 +233,7 @@ fn mpp_retry_overpay() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }] ); @@ -329,7 +329,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash }]); let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); @@ -337,7 +337,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -565,14 +565,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -663,7 +663,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] ); check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected @@ -914,14 +914,14 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional // incoming HTLCs with the same payment hash later. nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -1199,7 +1199,7 @@ fn test_fulfill_restart_failure() { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); @@ -1657,7 +1657,7 @@ fn abandoned_send_payment_idempotent() { check_send_rejected!(); nodes[1].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::FailedPayment { payment_hash: first_payment_hash }]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -1939,7 +1939,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -2015,7 +2015,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::InvalidForward { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::InvalidForward { requested_forward_scid: intercept_scid }]); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); @@ -2231,7 +2231,7 @@ fn do_automatic_retries(test: AutoRetry) { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: $failing_channel_id, }]); @@ -2715,7 +2715,7 @@ fn fails_paying_after_rejected_by_payee() { expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::FailedPayment { payment_hash }]); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3022,7 +3022,7 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); @@ -3213,7 +3213,7 @@ fn test_simple_partial_retry() { commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 2); @@ -3406,7 +3406,7 @@ fn test_threaded_payment_retries() { nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] ); check_added_monitors(&nodes[1], 1); @@ -3621,7 +3621,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { if fail_payment { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. - let reason = HTLCDestination::FailedPayment { payment_hash }; + let reason = HTLCHandlingType::FailedPayment { payment_hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); @@ -3768,7 +3768,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { }, (false, true) => { nodes[1].node.claim_funds(our_payment_preimage); - let expected_destinations = vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], expected_destinations); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, our_payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3816,7 +3816,7 @@ fn test_retry_custom_tlvs() { // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2_id }]); @@ -3994,7 +3994,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true); } else { // Expect fail back - let expected_destinations = vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); @@ -4003,7 +4003,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ - HTLCDestination::NextHopChannel { + HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); @@ -4089,7 +4089,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] ); check_added_monitors(&nodes[2], 1); @@ -4147,7 +4147,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment {payment_hash}]); + &[HTLCHandlingType::FailedPayment {payment_hash}]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); @@ -4156,7 +4156,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4237,7 +4237,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); @@ -4404,7 +4404,7 @@ fn test_non_strict_forwarding() { }; // The failure to forward will refer to the channel given in the onion. expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index dfa0e8817ed..79ddeef1ab8 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,7 +12,7 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCDestination}; +use crate::events::{ClosureReason, Event, HTLCHandlingType}; use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields}; use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::RoutingFees; @@ -75,7 +75,7 @@ fn test_priv_forwarding_rejection() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); check_added_monitors(&nodes[1], 1); @@ -445,7 +445,7 @@ fn test_inbound_scid_privacy() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] ); check_added_monitors(&nodes[1], 1); @@ -504,7 +504,7 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -530,7 +530,7 @@ fn test_scid_alias_returned() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] ); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 15df7f3293e..561ec7750bf 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -1,5 +1,5 @@ use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{Event, HTLCDestination}; +use crate::events::{Event, HTLCHandlingType}; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channelmanager::PaymentId; use crate::ln::channelmanager::RecipientOnionFields; @@ -142,7 +142,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { expect_pending_htlcs_forwardable!(remote_node); expect_htlc_handling_failed_destinations!( remote_node.node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash }] + &[HTLCHandlingType::FailedPayment { payment_hash }] ); check_added_monitors(remote_node, 1); @@ -338,7 +338,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // `stfu`, the `update_fail/fulfill` will go into the holding cell. if fail_htlc { nodes[1].node.fail_htlc_backwards(&payment_hash2); - let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash2 }; + let failed_payment = HTLCHandlingType::FailedPayment { payment_hash: payment_hash2 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]); } else { nodes[1].node.claim_funds(payment_preimage2); @@ -388,7 +388,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // Have nodes[0] fail/claim nodes[1]'s payment. if fail_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash1); - let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash1 }; + let failed_payment = HTLCHandlingType::FailedPayment { payment_hash: payment_hash1 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]); } else { nodes[0].node.claim_funds(payment_preimage1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e5f4bc20018..b0556c36d7f 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -15,7 +15,7 @@ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; -use crate::events::{ClosureReason, Event, HTLCDestination}; +use crate::events::{ClosureReason, Event, HTLCHandlingType}; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -1112,7 +1112,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } if !claim_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } @@ -1178,7 +1178,7 @@ fn removed_payment_no_manager_persistence() { let node_encoded = nodes[1].node.encode(); nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::FailedPayment { payment_hash }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1210,7 +1210,7 @@ fn removed_payment_no_manager_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1324,7 +1324,7 @@ fn test_htlc_localremoved_persistence() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); // Save the update_fail_htlc message for later comparison. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 934ca0d5fdc..018ea36776d 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -13,7 +13,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; -use crate::events::{Event, ClosureReason, HTLCDestination}; +use crate::events::{Event, ClosureReason, HTLCHandlingType}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; @@ -130,7 +130,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { } else { // Confirm the timeout tx and check that we fail the HTLC backwards connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new())); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index b14e2bf06a8..c93d51385a8 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -13,7 +13,7 @@ use crate::sign::{EntropySource, SignerProvider}; use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; -use crate::events::{Event, HTLCDestination, ClosureReason}; +use crate::events::{Event, HTLCHandlingType, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; @@ -468,7 +468,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] ); check_added_monitors(&nodes[1], 1); @@ -1336,7 +1336,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { if use_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap()); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[0], - [HTLCDestination::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]); + [HTLCHandlingType::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]); } else { *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() *= 10; nodes[0].node.timer_tick_occurred(); From 8727ffbba32e0a6638a96fa0f3d822145dedc29b Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 2 Apr 2025 08:58:32 -0400 Subject: [PATCH 09/12] ln+events+liquidity/refactor: rename FailedPayment to ReceiveRejected Rename variant to be more specific to the current context - a FailedPayment could be a payment that we failed to dispatch or one that we rejected on receive. --- lightning/src/events/mod.rs | 4 +- lightning/src/ln/async_payments_tests.rs | 10 ++-- lightning/src/ln/blinded_payment_tests.rs | 16 ++--- lightning/src/ln/chanmon_update_fail_tests.rs | 6 +- lightning/src/ln/channelmanager.rs | 26 ++++----- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/functional_tests.rs | 58 +++++++++---------- lightning/src/ln/monitor_tests.rs | 8 +-- lightning/src/ln/offers_tests.rs | 2 +- lightning/src/ln/onion_route_tests.rs | 22 +++---- lightning/src/ln/payment_tests.rs | 20 +++---- lightning/src/ln/quiescence_tests.rs | 6 +- lightning/src/ln/reload_tests.rs | 4 +- lightning/src/ln/shutdown_tests.rs | 2 +- 14 files changed, 94 insertions(+), 94 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 9dab9545fba..2f9a5267d0e 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -501,7 +501,7 @@ pub enum HTLCHandlingType { /// * The counterparty node modified the HTLC in transit, /// * A probing attack where an intermediary node is trying to detect if we are the ultimate /// recipient for a payment. - FailedPayment { + ReceiveFailed { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash }, @@ -519,7 +519,7 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingType, (0, requested_forward_scid, required), }, (3, InvalidOnion) => {}, - (4, FailedPayment) => { + (4, ReceiveFailed) => { (0, payment_hash, required), }, ); diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 87fab52050d..c67ff8ddef3 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -172,7 +172,7 @@ fn invalid_keysend_payment_secret() { PassAlongPathArgs::new(&nodes[0], &expected_route[0], amt_msat, payment_hash, ev.clone()) .with_payment_secret(invalid_payment_secret) .with_payment_preimage(keysend_preimage) - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -698,7 +698,7 @@ fn amount_doesnt_match_invreq() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -914,7 +914,7 @@ fn invalid_async_receive_with_retry( nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingType::FailedPayment { payment_hash }], + &[HTLCHandlingType::ReceiveFailed { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -934,7 +934,7 @@ fn invalid_async_receive_with_retry( let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1100,7 +1100,7 @@ fn expired_static_invoice_payment_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 46e80caebf4..e1d5ee68ed0 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -830,7 +830,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -958,7 +958,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ); nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] + nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -998,7 +998,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ChannelCheck => { @@ -1014,7 +1014,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ProcessPendingHTLCsCheck => { @@ -1024,7 +1024,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2], - vec![HTLCHandlingType::FailedPayment { payment_hash }]); + vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { @@ -1032,7 +1032,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors(&nodes[2], 1); } } @@ -1121,7 +1121,7 @@ fn blinded_path_retries() { ($intro_node: expr) => { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -2435,7 +2435,7 @@ fn test_trampoline_forward_rejection() { let args = PassAlongPathArgs::new(&nodes[0], route, amt_msat, payment_hash, first_message_event) .with_payment_preimage(payment_preimage) .without_claimable_event() - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index eed842cdc96..4065ed443c5 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -824,7 +824,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1730,7 +1730,7 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -2511,7 +2511,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 65cea4a72d3..bc21883e643 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5756,7 +5756,7 @@ where }, } } else { - HTLCHandlingType::FailedPayment { payment_hash } + HTLCHandlingType::ReceiveFailed { payment_hash } } }; @@ -5929,7 +5929,7 @@ where let reason = if $next_hop_unknown { HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_chan_id } } else { - HTLCHandlingType::FailedPayment{ payment_hash } + HTLCHandlingType::ReceiveFailed{ payment_hash } }; failed_forwards.push((htlc_source, payment_hash, @@ -6268,7 +6268,7 @@ where cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data), - HTLCHandlingType::FailedPayment { payment_hash: $payment_hash }, + HTLCHandlingType::ReceiveFailed { payment_hash: $payment_hash }, )); continue 'next_forwardable_htlc; } @@ -6826,7 +6826,7 @@ where let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); let failure_reason = LocalHTLCFailureReason::MPPTimeout; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingType::FailedPayment { payment_hash: htlc_source.1 }; + let receiver = HTLCHandlingType::ReceiveFailed { payment_hash: htlc_source.1 }; self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } @@ -6891,7 +6891,7 @@ where for htlc in payment.htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCHandlingType::FailedPayment { payment_hash: *payment_hash }; + let receiver = HTLCHandlingType::ReceiveFailed { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } @@ -7119,7 +7119,7 @@ where for htlc in htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCHandlingType::FailedPayment { payment_hash }; + let receiver = HTLCHandlingType::ReceiveFailed { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } return; @@ -7218,7 +7218,7 @@ where htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data); - let receiver = HTLCHandlingType::FailedPayment { payment_hash }; + let receiver = HTLCHandlingType::ReceiveFailed { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); @@ -11799,7 +11799,7 @@ where let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::reason(reason, htlc_msat_height_data), - HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() })); + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -15037,7 +15037,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15218,7 +15218,7 @@ mod tests { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15263,7 +15263,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15310,7 +15310,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15367,7 +15367,7 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index de0d94a1f89..d1f7b817054 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2841,7 +2841,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expect fail_payment_along_path(nodes_to_fail_payment.as_slice()); expect_htlc_handling_failed_destinations!( path.last().unwrap().node.get_and_clear_pending_events(), - &[HTLCHandlingType::FailedPayment { payment_hash: *payment_hash }] + &[HTLCHandlingType::ReceiveFailed { payment_hash: *payment_hash }] ); } } @@ -3152,7 +3152,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); - let expected_destinations: Vec = repeat(HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations); pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash, PaymentFailureReason::RecipientRejected); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index eeee5577dd0..d66de60e993 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1604,7 +1604,7 @@ pub fn test_fee_spike_violation_fails_htlc() { }; nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2437,7 +2437,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac PostFailBackAction::FailOffChain => { nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], - vec![HTLCHandlingType::FailedPayment { payment_hash }]); + vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_fail = commitment_update.update_fail_htlcs[0].clone(); @@ -3474,7 +3474,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash.clone() }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -3659,7 +3659,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: first_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3672,7 +3672,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: second_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: second_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3689,7 +3689,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: third_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: third_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -4801,7 +4801,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -5700,10 +5700,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[4], 0); let failed_destinations = vec![ - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_1 }, - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_3 }, - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_5 }, - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_6 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_1 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_3 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_5 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_6 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors!(nodes[4], 1); @@ -5721,8 +5721,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[5], 0); let failed_destinations_2 = vec![ - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }, - HTLCHandlingType::FailedPayment { payment_hash: payment_hash_4 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_2 }, + HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_4 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors!(nodes[5], 1); @@ -6171,7 +6171,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7489,7 +7489,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_2 }]); check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7753,7 +7753,7 @@ pub fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -8034,7 +8034,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingType::FailedPayment { payment_hash: failed_payment_hash }]); + expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingType::ReceiveFailed { payment_hash: failed_payment_hash }]); match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), @@ -8315,7 +8315,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_2 }]); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -8958,7 +8958,7 @@ pub fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment{ payment_hash: $payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed{ payment_hash: $payment_hash }]); check_added_monitors!(nodes[1], 1); // We should fail the payment back @@ -10169,8 +10169,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.fail_htlc_backwards(&our_payment_hash); let expected_destinations = vec![ - HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }, - HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }, + HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }, + HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); nodes[1].node.process_pending_htlc_forwards(); @@ -10191,7 +10191,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -10299,7 +10299,7 @@ pub fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -10365,8 +10365,8 @@ pub fn test_double_partial_claim() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later let failed_destinations = vec![ - HTLCHandlingType::FailedPayment { payment_hash }, - HTLCHandlingType::FailedPayment { payment_hash }, + HTLCHandlingType::ReceiveFailed { payment_hash }, + HTLCHandlingType::ReceiveFailed { payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); @@ -10575,7 +10575,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats @@ -10736,7 +10736,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); @@ -10895,7 +10895,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); @@ -11188,7 +11188,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); } else { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index bdb95bb0d62..3ae72331537 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1217,7 +1217,7 @@ fn test_no_preimage_inbound_htlc_balances() { assert_eq!(as_htlc_timeout_claim.len(), 1); check_spends!(as_htlc_timeout_claim[0], as_txn[0]); expect_pending_htlcs_forwardable_conditions!(nodes[0], - [HTLCHandlingType::FailedPayment { payment_hash: to_a_failed_payment_hash }]); + [HTLCHandlingType::ReceiveFailed { payment_hash: to_a_failed_payment_hash }]); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); @@ -1235,7 +1235,7 @@ fn test_no_preimage_inbound_htlc_balances() { nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); expect_pending_htlcs_forwardable_conditions!(nodes[1], - [HTLCHandlingType::FailedPayment { payment_hash: to_b_failed_payment_hash }]); + [HTLCHandlingType::ReceiveFailed { payment_hash: to_b_failed_payment_hash }]); let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_claim.len(), 1); check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); @@ -1422,7 +1422,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id())); false }, - Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::FailedPayment { payment_hash }, .. } => { + Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::ReceiveFailed { payment_hash }, .. } => { assert!(failed_payments.remove(payment_hash)); false }, @@ -1737,7 +1737,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { // pinnable claims, which the remainder of the test assumes. connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCHandlingType::FailedPayment { payment_hash: failed_payment_hash }]); + [HTLCHandlingType::ReceiveFailed { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index f3b9e6d5bf7..fd573de1738 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2310,7 +2310,7 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCHandlingType::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingType::ReceiveFailed { payment_hash }); do_pass_along_path(args); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 633b05529c3..6348a494da9 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -144,7 +144,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash.clone() }]); } else if test_case == 1 || test_case == 3 { expect_htlc_forward!(&nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_type.clone().unwrap()]); @@ -594,7 +594,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCHandlingType::FailedPayment { payment_hash })); + }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCHandlingType::ReceiveFailed { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -607,7 +607,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::ReceiveFailed { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -621,7 +621,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingType::ReceiveFailed { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -1362,7 +1362,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, payment_amount); nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, failure_code); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1497,7 +1497,7 @@ fn test_phantom_onion_hmac_failure() { } }; nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1574,7 +1574,7 @@ fn test_phantom_invalid_onion_payload() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1632,7 +1632,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1679,7 +1679,7 @@ fn test_phantom_failure_too_low_cltv() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1826,7 +1826,7 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash.clone() }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1933,7 +1933,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_amt_msat, None, route.paths[0].hops.last().unwrap().pubkey); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index dea18f9fc02..1da3d0bc46b 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -329,7 +329,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); @@ -565,7 +565,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] @@ -914,7 +914,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional // incoming HTLCs with the same payment hash later. nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1199,7 +1199,7 @@ fn test_fulfill_restart_failure() { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); @@ -1657,7 +1657,7 @@ fn abandoned_send_payment_idempotent() { check_send_rejected!(); nodes[1].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::ReceiveFailed { payment_hash: first_payment_hash }]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -2715,7 +2715,7 @@ fn fails_paying_after_rejected_by_payee() { expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::ReceiveFailed { payment_hash }]); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3621,7 +3621,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { if fail_payment { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. - let reason = HTLCHandlingType::FailedPayment { payment_hash }; + let reason = HTLCHandlingType::ReceiveFailed { payment_hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); @@ -3768,7 +3768,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { }, (false, true) => { nodes[1].node.claim_funds(our_payment_preimage); - let expected_destinations = vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], expected_destinations); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, our_payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3994,7 +3994,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true); } else { // Expect fail back - let expected_destinations = vec![HTLCHandlingType::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingType::ReceiveFailed { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); @@ -4147,7 +4147,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), - &[HTLCHandlingType::FailedPayment {payment_hash}]); + &[HTLCHandlingType::ReceiveFailed {payment_hash}]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 561ec7750bf..d64225547ac 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -142,7 +142,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { expect_pending_htlcs_forwardable!(remote_node); expect_htlc_handling_failed_destinations!( remote_node.node.get_and_clear_pending_events(), - &[HTLCHandlingType::FailedPayment { payment_hash }] + &[HTLCHandlingType::ReceiveFailed { payment_hash }] ); check_added_monitors(remote_node, 1); @@ -338,7 +338,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // `stfu`, the `update_fail/fulfill` will go into the holding cell. if fail_htlc { nodes[1].node.fail_htlc_backwards(&payment_hash2); - let failed_payment = HTLCHandlingType::FailedPayment { payment_hash: payment_hash2 }; + let failed_payment = HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash2 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]); } else { nodes[1].node.claim_funds(payment_preimage2); @@ -388,7 +388,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // Have nodes[0] fail/claim nodes[1]'s payment. if fail_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash1); - let failed_payment = HTLCHandlingType::FailedPayment { payment_hash: payment_hash1 }; + let failed_payment = HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash1 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]); } else { nodes[0].node.claim_funds(payment_preimage1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index b0556c36d7f..c65a3e5276a 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1178,7 +1178,7 @@ fn removed_payment_no_manager_persistence() { let node_encoded = nodes[1].node.encode(); nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingType::ReceiveFailed { payment_hash }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1324,7 +1324,7 @@ fn test_htlc_localremoved_persistence() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingType::ReceiveFailed { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); // Save the update_fail_htlc message for later comparison. diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index c93d51385a8..e3454c47ba7 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -1336,7 +1336,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { if use_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap()); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[0], - [HTLCHandlingType::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]); + [HTLCHandlingType::ReceiveFailed { payment_hash: payment_hash_opt.unwrap() }]); } else { *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() *= 10; nodes[0].node.timer_tick_occurred(); From 6e2f22a5cd2eb47064397c7c8eee546991135b35 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 2 Apr 2025 09:22:50 -0400 Subject: [PATCH 10/12] ln+events+liquidity/refactor: NextHopChannel renamed ForwardFailed Standardize naming within the HTLCHandlingType struct to present more consistent API terminology. --- lightning-liquidity/src/lsps2/service.rs | 6 ++- lightning/src/events/mod.rs | 4 +- lightning/src/ln/blinded_payment_tests.rs | 8 ++-- lightning/src/ln/chanmon_update_fail_tests.rs | 8 ++-- lightning/src/ln/channelmanager.rs | 20 ++++----- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/functional_tests.rs | 44 +++++++++---------- lightning/src/ln/monitor_tests.rs | 4 +- lightning/src/ln/onion_route_tests.rs | 6 +-- lightning/src/ln/payment_tests.rs | 30 ++++++------- lightning/src/ln/priv_short_conf_tests.rs | 8 ++-- lightning/src/ln/reload_tests.rs | 4 +- lightning/src/ln/reorg_tests.rs | 2 +- lightning/src/ln/shutdown_tests.rs | 2 +- 14 files changed, 76 insertions(+), 74 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 68732fa2238..a4a68cdd87d 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -879,8 +879,10 @@ where /// or if the payment queue is empty /// /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed - pub fn htlc_handling_failed(&self, handling_type: HTLCHandlingType) -> Result<(), APIError> { - if let HTLCHandlingType::NextHopChannel { channel_id, .. } = handling_type { + pub fn htlc_handling_failed( + &self, handling_type: HTLCHandlingType, + ) -> Result<(), APIError> { + if let HTLCHandlingType::ForwardFailed { channel_id, .. } = handling_type { let peer_by_channel_id = self.peer_by_channel_id.read().unwrap(); if let Some(counterparty_node_id) = peer_by_channel_id.get(&channel_id) { let outer_state_lock = self.per_peer_state.read().unwrap(); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 2f9a5267d0e..0f74a210b66 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -470,7 +470,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, pub enum HTLCHandlingType { /// We tried forwarding to a channel but failed to do so. An example of such an instance is when /// there is insufficient capacity in our outbound channel. - NextHopChannel { + ForwardFailed { /// The `node_id` of the next node. For backwards compatibility, this field is /// marked as optional, versions prior to 0.0.110 may not always be able to provide /// counterparty node information. @@ -508,7 +508,7 @@ pub enum HTLCHandlingType { } impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingType, - (0, NextHopChannel) => { + (0, ForwardFailed) => { (0, node_id, required), (2, channel_id, required), }, diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index e1d5ee68ed0..bbea42964fd 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -428,7 +428,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { ForwardCheckFail::InboundOnionCheck => HTLCHandlingType::InvalidOnion, ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, }; expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -459,7 +459,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { let failed_destination = match check { ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, }; expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -606,7 +606,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCHandlingType::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); + vec![HTLCHandlingType::ForwardFailed { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards, @@ -1243,7 +1243,7 @@ fn min_htlc() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] ); check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 4065ed443c5..7a78fa28f7c 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -905,7 +905,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1752,7 +1752,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -2159,7 +2159,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2549,7 +2549,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bc21883e643..e5125ed67a5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3298,7 +3298,7 @@ macro_rules! handle_monitor_update_completion { } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id), channel_id }; $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } } } @@ -3918,7 +3918,7 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4042,7 +4042,7 @@ where let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { @@ -5747,7 +5747,7 @@ where if let Some(outgoing_scid) = outgoing_scid_opt { match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { Some((outgoing_counterparty_node_id, outgoing_channel_id)) => - HTLCHandlingType::NextHopChannel { + HTLCHandlingType::ForwardFailed { node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, }, @@ -6114,7 +6114,7 @@ where let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCHandlingType::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCHandlingType::ForwardFailed { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); } else { forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); @@ -6970,7 +6970,7 @@ where for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id.clone()), channel_id }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } @@ -8755,7 +8755,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } for htlc_source in dropped_htlcs.drain(..) { - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::DroppedPending); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -9581,7 +9581,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } @@ -11675,7 +11675,7 @@ where let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; let data = self.get_htlc_inbound_temp_fail_data(reason); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCHandlingType::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); + HTLCHandlingType::ForwardFailed { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); if let Some(channel_ready) = channel_ready_opt { @@ -14872,7 +14872,7 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; - let receiver = HTLCHandlingType::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingType::ForwardFailed { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d1f7b817054..0ff28e82876 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2130,7 +2130,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' if fail_backwards { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, - vec![crate::events::HTLCHandlingType::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); + vec![crate::events::HTLCHandlingType::ForwardFailed{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); check_added_monitors!(node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); @@ -3194,7 +3194,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCHandlingType::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCHandlingType::ForwardFailed { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d66de60e993..f695211130b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1277,7 +1277,7 @@ pub fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2389,7 +2389,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // Check that nodes[1] fails the HTLC upstream expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -2411,7 +2411,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[1], ANTI_REORG_DELAY); // Expect handling another fail back event, but the HTLC is already gone expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -3529,7 +3529,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3595,7 +3595,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4865,7 +4865,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5534,7 +5534,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5736,12 +5736,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events let failed_destinations_3 = vec![ - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCHandlingType::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors!(nodes[3], 1); @@ -5794,13 +5794,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { let expected_destinations: Vec = if announce_latest { - repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + repeat(HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() } else { - repeat(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -7356,7 +7356,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7425,7 +7425,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { } expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -10085,7 +10085,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -10108,7 +10108,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingType::NextHopChannel { node_id: Some(node_c_id), channel_id }]); + vec![HTLCHandlingType::ForwardFailed { node_id: Some(node_c_id), channel_id }]); } } @@ -10308,7 +10308,7 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -10789,7 +10789,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_id_1 = nodes[1].node.get_our_node_id(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(node_id_1), channel_id: chan_id_1 }] ); let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 3ae72331537..d242d7c1536 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -86,7 +86,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1417,7 +1417,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ .iter().map(|a| *a).collect(); events.retain(|ev| { match ev { - Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::NextHopChannel { node_id, channel_id }, .. } => { + Event::HTLCHandlingFailed { handling_type: HTLCHandlingType::ForwardFailed { node_id, channel_id }, .. } => { assert_eq!(*channel_id, chan_id); assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id())); false diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 6348a494da9..e42130b0db8 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -313,7 +313,7 @@ fn test_fee_failures() { run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), - Some(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); + Some(HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -359,7 +359,7 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); - let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + let next_hop_failure = HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -906,7 +906,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_reason), Some(network_update), Some(short_channel_id), - Some(HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), + Some(HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 1da3d0bc46b..ac03a18279d 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -128,7 +128,7 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -233,7 +233,7 @@ fn mpp_retry_overpay() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }] ); @@ -337,7 +337,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -572,7 +572,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -663,7 +663,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] ); check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected @@ -921,7 +921,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + [HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -2231,7 +2231,7 @@ fn do_automatic_retries(test: AutoRetry) { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: $failing_channel_id, }]); @@ -3022,7 +3022,7 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); @@ -3213,7 +3213,7 @@ fn test_simple_partial_retry() { commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 2); @@ -3816,7 +3816,7 @@ fn test_retry_custom_tlvs() { // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], - vec![HTLCHandlingType::NextHopChannel { + vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2_id }]); @@ -4003,7 +4003,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ - HTLCHandlingType::NextHopChannel { + HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); @@ -4089,7 +4089,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] ); check_added_monitors(&nodes[2], 1); @@ -4156,7 +4156,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4237,7 +4237,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingType::NextHopChannel { + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); @@ -4404,7 +4404,7 @@ fn test_non_strict_forwarding() { }; // The failure to forward will refer to the channel given in the onion. expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 79ddeef1ab8..8a3293c5601 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -75,7 +75,7 @@ fn test_priv_forwarding_rejection() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); check_added_monitors(&nodes[1], 1); @@ -445,7 +445,7 @@ fn test_inbound_scid_privacy() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] ); check_added_monitors(&nodes[1], 1); @@ -504,7 +504,7 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -530,7 +530,7 @@ fn test_scid_alias_returned() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] ); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c65a3e5276a..e8268ca9ad4 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1112,7 +1112,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } if !claim_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } @@ -1210,7 +1210,7 @@ fn removed_payment_no_manager_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 018ea36776d..525c7b64504 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -130,7 +130,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { } else { // Confirm the timeout tx and check that we fail the HTLC backwards connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new())); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index e3454c47ba7..56ac43f256d 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -468,7 +468,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + &[HTLCHandlingType::ForwardFailed { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] ); check_added_monitors(&nodes[1], 1); From 310110d3e659e0321d4a055cd3b829f633cfbb7a Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 2 Apr 2025 09:44:28 -0400 Subject: [PATCH 11/12] ln+events: add htlc failure reason to HTLCHandlingFailed --- lightning/src/events/mod.rs | 31 ++++++++++++++++++++++++++++-- lightning/src/ln/channelmanager.rs | 16 +++++++++------ lightning/src/ln/onion_utils.rs | 18 +++++++++++++++++ 3 files changed, 57 insertions(+), 8 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 0f74a210b66..bb182d5c186 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -24,7 +24,7 @@ use crate::chain::transaction; use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::types::features::ChannelTypeFeatures; -use crate::ln::msgs; +use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::offers::invoice::Bolt12Invoice; @@ -524,6 +524,25 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingType, }, ); +/// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum HTLCHandlingFailureReason { + /// The forwarded HTLC was failed back by the downstream node with an encrypted error reason. + Downstream, + /// The HTLC was failed locally by our node. + Local { + /// The reason that our node chose to fail the HTLC. + reason: LocalHTLCFailureReason, + }, +} + +impl_writeable_tlv_based_enum!(HTLCHandlingFailureReason, + (0, Downstream) => {}, + (1, Local) => { + (0, reason, required), + }, +); + /// Will be used in [`Event::HTLCIntercepted`] to identify the next hop in the HTLC's path. /// Currently only used in serialization for the sake of maintaining compatibility. More variants /// will be added for general-purpose HTLC forward intercepts as well as trampoline forward @@ -1449,6 +1468,10 @@ pub enum Event { prev_channel_id: ChannelId, /// The type of HTLC that was handled. handling_type: HTLCHandlingType, + /// The reason that the HTLC failed. + /// + /// This field will be `None` only for objects serialized prior to LDK 0.2.0. + handling_failure: Option }, /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event /// requires confirmed external funds to be readily available to spend. @@ -1752,10 +1775,11 @@ impl Writeable for Event { (8, path.blinded_tail, option), }) }, - &Event::HTLCHandlingFailed { ref prev_channel_id, ref handling_type } => { + &Event::HTLCHandlingFailed { ref prev_channel_id, ref handling_type, ref handling_failure } => { 25u8.write(writer)?; write_tlv_fields!(writer, { (0, prev_channel_id, required), + (1, handling_failure, option), (2, handling_type, required), }) }, @@ -2201,14 +2225,17 @@ impl MaybeReadable for Event { 25u8 => { let mut f = || { let mut prev_channel_id = ChannelId::new_zero(); + let mut handling_failure = None; let mut handling_type_opt = UpgradableRequired(None); read_tlv_fields!(reader, { (0, prev_channel_id, required), + (1, handling_failure, option), (2, handling_type_opt, upgradable_required), }); Ok(Some(Event::HTLCHandlingFailed { prev_channel_id, handling_type: _init_tlv_based_struct_field!(handling_type_opt, upgradable_required), + handling_failure, })) }; f() diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e5125ed67a5..1d80fa07876 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5786,8 +5786,8 @@ where &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx ) { Ok(decoded_onion) => decoded_onion, - Err((htlc_fail, _)) => { - htlc_fails.push((htlc_fail, HTLCHandlingType::InvalidOnion)); + Err((htlc_fail, reason)) => { + htlc_fails.push((htlc_fail, HTLCHandlingType::InvalidOnion, reason.into())); continue; }, }; @@ -5815,7 +5815,7 @@ where is_intro_node_blinded_forward, &shared_secret, ); let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, handling_type)); + htlc_fails.push((htlc_fail, handling_type, reason.into())); continue; }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. @@ -5832,7 +5832,7 @@ where is_intro_node_blinded_forward, &shared_secret, ); let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, handling_type)); + htlc_fails.push((htlc_fail, handling_type, reason.into())); continue; } } @@ -5844,7 +5844,9 @@ where Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), Err(inbound_err) => { let handling_type = get_failed_htlc_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), handling_type)); + let htlc_failure = inbound_err.reason.into(); + htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, + shared_secret, inbound_err), handling_type, htlc_failure)); }, } } @@ -5856,7 +5858,7 @@ where incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect() ); self.forward_htlcs_without_forward_event(&mut [pending_forwards]); - for (htlc_fail, handling_type) in htlc_fails.drain(..) { + for (htlc_fail, handling_type, handling_failure) in htlc_fails.drain(..) { let failure = match htlc_fail { HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC { htlc_id: fail_htlc.htlc_id, @@ -5872,6 +5874,7 @@ where self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { prev_channel_id: incoming_channel_id, handling_type, + handling_failure: Some(handling_failure), }, None)); } } @@ -7055,6 +7058,7 @@ where pending_events.push_back((events::Event::HTLCHandlingFailed { prev_channel_id: *channel_id, handling_type, + handling_failure: Some(onion_error.into()), }, None)); }, } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 196895188ae..2190c766d77 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -11,6 +11,7 @@ use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; use crate::crypto::streams::ChaChaReader; +use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; use crate::ln::msgs; @@ -1627,6 +1628,12 @@ impl Into for u16 { } } +impl Into for LocalHTLCFailureReason { + fn into(self) -> HTLCHandlingFailureReason { + HTLCHandlingFailureReason::Local { reason: self } + } +} + impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, (0, TemporaryNodeFailure) => {}, (2, PermanentNodeFailure) => {}, @@ -1891,6 +1898,17 @@ impl HTLCFailReason { } } +impl Into for &HTLCFailReason { + fn into(self) -> HTLCHandlingFailureReason { + match self.0 { + HTLCFailReasonRepr::LightningError { .. } => HTLCHandlingFailureReason::Downstream, + HTLCFailReasonRepr::Reason { reason, .. } => { + HTLCHandlingFailureReason::Local { reason } + }, + } + } +} + /// Allows `decode_next_hop` to return the next hop packet bytes for either payments or onion /// message forwards. pub(crate) trait NextPacketBytes: AsMut<[u8]> { From cbf32608e51a878e7c17d5ddc12c262bc9f22f36 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 2 Apr 2025 10:18:39 -0400 Subject: [PATCH 12/12] ln+events: deprecate UnknownNextPeer in HTLCHandlingType This variant of HTLCHandlingType contains infromation about the failure cause along with its type - as an UnknownNextPeer is just an InvalidForward that has the failure type UnknownNextPeer. This commit deprecates the variant's use, while still writing it to disk to allow the option to downgrade. --- lightning-liquidity/src/lsps2/service.rs | 4 +- lightning/src/events/mod.rs | 48 +++++++++++++++++++++-- lightning/src/ln/blinded_payment_tests.rs | 4 +- lightning/src/ln/channelmanager.rs | 4 +- lightning/src/ln/onion_route_tests.rs | 8 ++-- lightning/src/ln/payment_tests.rs | 4 +- 6 files changed, 56 insertions(+), 16 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index a4a68cdd87d..1631ffffb91 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -879,9 +879,7 @@ where /// or if the payment queue is empty /// /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed - pub fn htlc_handling_failed( - &self, handling_type: HTLCHandlingType, - ) -> Result<(), APIError> { + pub fn htlc_handling_failed(&self, handling_type: HTLCHandlingType) -> Result<(), APIError> { if let HTLCHandlingType::ForwardFailed { channel_id, .. } = handling_type { let peer_by_channel_id = self.peer_by_channel_id.read().unwrap(); if let Some(counterparty_node_id) = peer_by_channel_id.get(&channel_id) { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index bb182d5c186..47cf68b90d1 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -479,12 +479,16 @@ pub enum HTLCHandlingType { channel_id: ChannelId, }, /// Scenario where we are unsure of the next node to forward the HTLC to. + /// + /// Deprecated: will only be used in versions before LDK v0.2.0. UnknownNextHop { /// Short channel id we are requesting to forward an HTLC to. requested_forward_scid: u64, }, /// We couldn't forward to the outgoing scid. An example would be attempting to send a duplicate /// intercept HTLC. + /// + /// In LDK v0.2.0 and greater, this variant replaces [`Self::UnknownNextHop`]. InvalidForward { /// Short channel id we are requesting to forward an HTLC to. requested_forward_scid: u64 @@ -1777,10 +1781,27 @@ impl Writeable for Event { }, &Event::HTLCHandlingFailed { ref prev_channel_id, ref handling_type, ref handling_failure } => { 25u8.write(writer)?; + + // The [`HTLCHandlingType::UnknownNextPeer`] variant is deprecated, but we want to + // continue writing it to allow downgrading. Detect the case where we're + // representing it as [`HTLCHandlingType::InvalidForward`] and + // [`LocalHTLCFailureReason::UnknownNextHop`] and write the old variant instead. + let downgradable_type = match (handling_type, handling_failure) { + (HTLCHandlingType::InvalidForward { requested_forward_scid }, + Some(HTLCHandlingFailureReason::Local { reason })) + if *reason == LocalHTLCFailureReason::UnknownNextPeer => + { + HTLCHandlingType::UnknownNextHop { + requested_forward_scid: *requested_forward_scid, + } + } + _ => handling_type.clone() + }; + write_tlv_fields!(writer, { (0, prev_channel_id, required), (1, handling_failure, option), - (2, handling_type, required), + (2, downgradable_type, required), }) }, &Event::BumpTransaction(ref event)=> { @@ -2232,11 +2253,32 @@ impl MaybeReadable for Event { (1, handling_failure, option), (2, handling_type_opt, upgradable_required), }); - Ok(Some(Event::HTLCHandlingFailed { + + let mut event = Event::HTLCHandlingFailed { prev_channel_id, handling_type: _init_tlv_based_struct_field!(handling_type_opt, upgradable_required), handling_failure, - })) + }; + + // The [`HTLCHandlingType::UnknownNextPeer`] variant is deprecated, but we + // continue writing it to allow downgrading. If it was written, upgrade + // it to its new representation of [`HTLCHandlingType::InvalidForward`] and + // [`LocalHTLCFailureReason::UnknownNextHop`]. This will cover both the case + // where we have a legacy event + match event { + Event::HTLCHandlingFailed { ref handling_type, .. } => { + if let HTLCHandlingType::UnknownNextHop { requested_forward_scid } = handling_type { + event = Event::HTLCHandlingFailed { + prev_channel_id, + handling_type: HTLCHandlingType::InvalidForward { requested_forward_scid: *requested_forward_scid }, + handling_failure: Some(LocalHTLCFailureReason::UnknownNextPeer.into()), + } + } + } + _ => panic!("HTLCHandlingFailed wrong type") + } + + Ok(Some(event)) }; f() }, diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index bbea42964fd..42c0a41d57b 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -626,7 +626,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: $failed_scid }]); + vec![HTLCHandlingType::InvalidForward { requested_forward_scid: $failed_scid }]); $curr_node.node.process_pending_htlc_forwards(); }, } @@ -725,7 +725,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { if intercept_node_fails { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1d80fa07876..d1bc078abd8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5732,7 +5732,7 @@ where }); let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer); - let destination = HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_channel_id }; + let destination = HTLCHandlingType::InvalidForward { requested_forward_scid: short_channel_id }; self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &reason, destination); } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted @@ -5751,7 +5751,7 @@ where node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, }, - None => HTLCHandlingType::UnknownNextHop { + None => HTLCHandlingType::InvalidForward { requested_forward_scid: outgoing_scid, }, } diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index e42130b0db8..5baf7cd96e6 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -538,7 +538,7 @@ fn test_onion_failure() { bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::UnknownNextPeer), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCHandlingType::UnknownNextHop { requested_forward_scid: short_channel_id })); + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCHandlingType::InvalidForward { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -1730,7 +1730,7 @@ fn test_phantom_failure_modified_cltv() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1779,7 +1779,7 @@ fn test_phantom_failure_expires_too_soon() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1884,7 +1884,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index ac03a18279d..ddc8f528aab 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1939,7 +1939,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -3406,7 +3406,7 @@ fn test_threaded_payment_retries() { nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingType::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + &[HTLCHandlingType::InvalidForward { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] ); check_added_monitors(&nodes[1], 1);