diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ef567983962..10f2b9d76e4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6137,26 +6137,30 @@ where /// /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is /// disconnected). - #[rustfmt::skip] - pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy - (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) - where L::Target: Logger { + pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( + &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L, + ) where + L::Target: Logger, + { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); + let fulfill_resp = + self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { update_blocked, .. } = fulfill_resp { assert!(update_blocked); // The HTLC must have ended up in the holding cell. } } - #[rustfmt::skip] fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, payment_info: Option, logger: &L, - ) -> UpdateFulfillFetch where L::Target: Logger { + ) -> UpdateFulfillFetch + where + L::Target: Logger, + { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -6173,23 +6177,33 @@ where let mut htlc_value_msat = 0; for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { - debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array())); - log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}", - htlc.htlc_id, htlc.payment_hash, payment_preimage_arg); + let expected_hash = + PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()); + debug_assert_eq!(htlc.payment_hash, expected_hash); + log_debug!( + logger, + "Claiming inbound HTLC id {} with payment hash {} with preimage {}", + htlc.htlc_id, + htlc.payment_hash, + payment_preimage_arg + ); match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + debug_assert!( + false, + "Tried to fulfill an HTLC that was already failed" + ); } return UpdateFulfillFetch::DuplicateClaim {}; }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); // Don't return in release mode here so that we can update channel_monitor - } + }, } pending_idx = idx; htlc_value_msat = htlc.amount_msat; @@ -6228,53 +6242,85 @@ where return UpdateFulfillFetch::DuplicateClaim {}; } }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | - &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => - { + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } + | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id()); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + debug_assert!( + false, + "Tried to fulfill an HTLC that was already failed" + ); + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } }, - _ => {} + _ => {}, } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32()); + log_trace!( + logger, + "Adding HTLC claim to holding_cell in channel {}! Current state: {}", + &self.context.channel_id(), + self.context.channel_state.to_u32() + ); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { - payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + htlc_id: htlc_id_arg, }); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; if let InboundHTLCState::Committed = htlc.state { } else { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + debug_assert!( + false, + "Have an inbound HTLC we tried to claim before it was fully committed to" + ); + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); + log_trace!( + logger, + "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", + &htlc.payment_hash, + &self.context.channel_id + ); + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill( + payment_preimage_arg.clone(), + )); } - UpdateFulfillFetch::NewClaim { - monitor_update, - htlc_value_msat, - update_blocked: false, - } + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: false } } - #[rustfmt::skip] pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, payment_info: Option, logger: &L, - ) -> UpdateFulfillCommitFetch where L::Target: Logger { + ) -> UpdateFulfillCommitFetch + where + L::Target: Logger, + { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, update_blocked } => { + UpdateFulfillFetch::NewClaim { + mut monitor_update, + htlc_value_msat, + update_blocked, + } => { // Even if we aren't supposed to let new monitor updates with commitment state // updates run, we still need to push the preimage ChannelMonitorUpdateStep no // matter what. Sadly, to push a new monitor update which flies before others @@ -6287,8 +6333,10 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } else { - let new_mon_id = self.context.blocked_monitor_updates.get(0) - .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); + let blocked_upd = self.context.blocked_monitor_updates.get(0); + let new_mon_id = blocked_upd + .map(|upd| upd.update.update_id) + .unwrap_or(monitor_update.update_id); monitor_update.update_id = new_mon_id; for held_update in self.context.blocked_monitor_updates.iter_mut() { held_update.update.update_id += 1; @@ -6296,14 +6344,21 @@ where if !update_blocked { debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); let update = self.build_commitment_no_status_check(logger); - self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { - update, - }); + self.context + .blocked_monitor_updates + .push(PendingChannelMonitorUpdate { update }); } } - self.monitor_updating_paused(false, !update_blocked, false, Vec::new(), Vec::new(), Vec::new()); - UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, } + self.monitor_updating_paused( + false, + !update_blocked, + false, + Vec::new(), + Vec::new(), + Vec::new(), + ); + UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } @@ -6590,19 +6645,30 @@ where Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - #[rustfmt::skip] - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option), ChannelError> { - if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { - return Err(ChannelError::WarnAndDisconnect("Got fulfill HTLC message while quiescent".to_owned())); + pub fn update_fulfill_htlc( + &mut self, msg: &msgs::UpdateFulfillHTLC, + ) -> Result<(HTLCSource, u64, Option), ChannelError> { + if self.context.channel_state.is_remote_stfu_sent() + || self.context.channel_state.is_quiescent() + { + return Err(ChannelError::WarnAndDisconnect( + "Got fulfill HTLC message while quiescent".to_owned(), + )); } if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); + return Err(ChannelError::close( + "Got fulfill HTLC message when channel was not in an operational state".to_owned(), + )); } if self.context.channel_state.is_peer_disconnected() { - return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); + return Err(ChannelError::close( + "Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned(), + )); } - self.mark_outbound_htlc_removed(msg.htlc_id, OutboundHTLCOutcome::Success(msg.payment_preimage)).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) + let outcome = OutboundHTLCOutcome::Success(msg.payment_preimage); + self.mark_outbound_htlc_removed(msg.htlc_id, outcome) + .map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) } #[rustfmt::skip] @@ -6911,11 +6977,17 @@ where Ok(()) } - #[rustfmt::skip] - fn commitment_signed_update_monitor(&mut self, mut update: ChannelMonitorUpdateStep, logger: &L) -> Result, ChannelError> - where L::Target: Logger + fn commitment_signed_update_monitor( + &mut self, mut update: ChannelMonitorUpdateStep, logger: &L, + ) -> Result, ChannelError> + where + L::Target: Logger, { - if self.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + if self + .holder_commitment_point + .advance(&self.context.holder_signer, &self.context.secp_ctx, logger) + .is_err() + { // We only fail to advance our commitment point/number if we're currently // waiting for our signer to unblock and provide a commitment point. // During post-funding channel operation, we only advance our point upon @@ -6942,7 +7014,8 @@ where if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); need_commitment = true; } } @@ -6970,13 +7043,15 @@ where match &mut update { ChannelMonitorUpdateStep::LatestHolderCommitment { - claimed_htlcs: ref mut update_claimed_htlcs, .. + claimed_htlcs: ref mut update_claimed_htlcs, + .. } => { debug_assert!(update_claimed_htlcs.is_empty()); *update_claimed_htlcs = claimed_htlcs.clone(); }, ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { - claimed_htlcs: ref mut update_claimed_htlcs, .. + claimed_htlcs: ref mut update_claimed_htlcs, + .. } => { debug_assert!(update_claimed_htlcs.is_empty()); *update_claimed_htlcs = claimed_htlcs.clone(); @@ -7016,21 +7091,31 @@ where return Ok(self.push_ret_blockable_mon_update(monitor_update)); } - let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { - // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - - // we'll send one right away when we get the revoke_and_ack when we - // free_holding_cell_htlcs(). - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - true - } else { false }; + let need_commitment_signed = + if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { + // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - + // we'll send one right away when we get the revoke_and_ack when we + // free_holding_cell_htlcs(). + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + true + } else { + false + }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" }); - self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + true, + need_commitment_signed, + false, + Vec::new(), + Vec::new(), + Vec::new(), + ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -7055,18 +7140,30 @@ where /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - #[rustfmt::skip] fn free_holding_cell_htlcs( - &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where F::Target: FeeEstimator, L::Target: Logger + where + F::Target: FeeEstimator, + L::Target: Logger, { assert!(matches!(self.context.channel_state, ChannelState::ChannelReady(_))); assert!(!self.context.channel_state.is_monitor_update_in_progress()); assert!(!self.context.channel_state.is_quiescent()); - if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id()); + if self.context.holding_cell_htlc_updates.len() != 0 + || self.context.holding_cell_update_fee.is_some() + { + log_trace!( + logger, + "Freeing holding cell with {} HTLC updates{} in channel {}", + self.context.holding_cell_htlc_updates.len(), + if self.context.holding_cell_update_fee.is_some() { + " and a fee update" + } else { + "" + }, + &self.context.channel_id() + ); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! @@ -7088,12 +7185,26 @@ where // to rebalance channels. let fail_htlc_res = match &htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { - amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - skimmed_fee_msat, blinding_point, .. + amount_msat, + cltv_expiry, + ref payment_hash, + ref source, + ref onion_routing_packet, + skimmed_fee_msat, + blinding_point, + .. } => { match self.send_htlc( - amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), - false, skimmed_fee_msat, blinding_point, fee_estimator, logger + amount_msat, + *payment_hash, + cltv_expiry, + source.clone(), + onion_routing_packet.clone(), + false, + skimmed_fee_msat, + blinding_point, + fee_estimator, + logger, ) { Ok(update_add_msg_opt) => { // `send_htlc` only returns `Ok(None)`, when an update goes into @@ -7113,7 +7224,7 @@ where // successfully forwarded/failed/fulfilled, causing our // counterparty to eventually close on us. htlcs_to_fail.push((source.clone(), *payment_hash)); - } + }, } None }, @@ -7128,22 +7239,30 @@ where // `ChannelMonitorUpdate` to the user, making this one redundant, however // there's no harm in including the extra `ChannelMonitorUpdateStep` here. // We do not bother to track and include `payment_info` here, however. + let fulfill = + self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger); let mut additional_monitor_update = - if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = - self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger) - { monitor_update } else { unreachable!() }; + if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = fulfill { + monitor_update + } else { + unreachable!() + }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); None }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger) - .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) - }, - &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) - .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) - } + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => Some( + self.fail_htlc(htlc_id, err_packet.clone(), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ())), + ), + &HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, + failure_code, + sha256_of_onion, + } => Some( + self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ())), + ), }; if let Some(res) = fail_htlc_res { match res { @@ -7163,9 +7282,16 @@ where } } } - let update_fee = self.context.holding_cell_update_fee.take().and_then(|feerate| self.send_update_fee(feerate, false, fee_estimator, logger)); + let update_fee = + self.context.holding_cell_update_fee.take().and_then(|feerate| { + self.send_update_fee(feerate, false, fee_estimator, logger) + }); - if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && update_fee.is_none() { + if update_add_count == 0 + && update_fulfill_count == 0 + && update_fail_count == 0 + && update_fee.is_none() + { return (None, htlcs_to_fail); } @@ -8152,8 +8278,12 @@ where } /// Gets the last commitment update for immediate sending to our peer. - #[rustfmt::skip] - fn get_last_commitment_update_for_send(&mut self, logger: &L) -> Result where L::Target: Logger { + fn get_last_commitment_update_for_send( + &mut self, logger: &L, + ) -> Result + where + L::Target: Logger, + { let mut update_add_htlcs = Vec::new(); let mut update_fulfill_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -8185,7 +8315,10 @@ where attribution_data: err_packet.attribution_data.clone(), }); }, - &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { + &InboundHTLCRemovalReason::FailMalformed(( + ref sha256_of_onion, + ref failure_code, + )) => { update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, @@ -8204,31 +8337,42 @@ where } } - let update_fee = if self.funding.is_outbound() && self.context.pending_update_fee.is_some() { + let update_fee = if self.funding.is_outbound() && self.context.pending_update_fee.is_some() + { Some(msgs::UpdateFee { channel_id: self.context.channel_id(), feerate_per_kw: self.context.pending_update_fee.unwrap().0, }) - } else { None }; + } else { + None + }; log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); - let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger) { - if self.context.signer_pending_commitment_update { - log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update"); - self.context.signer_pending_commitment_update = false; - } - update - } else { - if !self.context.signer_pending_commitment_update { - log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); - self.context.signer_pending_commitment_update = true; - } - return Err(()); - }; + let commitment_signed = + if let Ok(update) = self.send_commitment_no_state_update(logger) { + if self.context.signer_pending_commitment_update { + log_trace!( + logger, + "Commitment update generated: clearing signer_pending_commitment_update" + ); + self.context.signer_pending_commitment_update = false; + } + update + } else { + if !self.context.signer_pending_commitment_update { + log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = true; + } + return Err(()); + }; Ok(msgs::CommitmentUpdate { - update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, + update_add_htlcs, + update_fulfill_htlcs, + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee, commitment_signed, }) } @@ -12216,7 +12360,6 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider, { - #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -12256,7 +12399,8 @@ where // Write out the old serialization for shutdown_pubkey for backwards compatibility, if // deserialized from that format. - match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) { + let shutdown_scriptpubkey = self.context.shutdown_scriptpubkey.as_ref(); + match shutdown_scriptpubkey.and_then(|script| script.as_legacy_pubkey()) { Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?, None => [0u8; PUBLIC_KEY_SIZE].write(writer)?, } @@ -12298,7 +12442,10 @@ where &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; match removal_reason { - InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { data, attribution_data }) => { + InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { + data, + attribution_data, + }) => { 0u8.write(writer)?; data.write(writer)?; removed_htlc_failure_attribution_data.push(&attribution_data); @@ -12349,7 +12496,7 @@ where } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; - } + }, &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => { 4u8.write(writer)?; if let OutboundHTLCOutcome::Success(preimage) = outcome { @@ -12357,24 +12504,32 @@ where } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; - } + }, } pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); pending_outbound_blinding_points.push(htlc.blinding_point); } let holding_cell_htlc_update_count = self.context.holding_cell_htlc_updates.len(); - let mut holding_cell_skimmed_fees: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); - let mut holding_cell_blinding_points: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); - let mut holding_cell_failure_attribution_data: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_skimmed_fees: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_blinding_points: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_failure_attribution_data: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (holding_cell_htlc_update_count as u64).write(writer)?; for update in self.context.holding_cell_htlc_updates.iter() { match update { &HTLCUpdateAwaitingACK::AddHTLC { - ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - blinding_point, skimmed_fee_msat, + ref amount_msat, + ref cltv_expiry, + ref payment_hash, + ref source, + ref onion_routing_packet, + blinding_point, + skimmed_fee_msat, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -12397,10 +12552,13 @@ where err_packet.data.write(writer)?; // Store the attribution data for later writing. - holding_cell_failure_attribution_data.push(err_packet.attribution_data.as_ref()); - } + holding_cell_failure_attribution_data + .push(err_packet.attribution_data.as_ref()); + }, &HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code, sha256_of_onion + htlc_id, + failure_code, + sha256_of_onion, } => { // We don't want to break downgrading by adding a new variant, so write a dummy // `::FailHTLC` variant and write the real malformed error as an optional TLV. @@ -12413,7 +12571,7 @@ where // Push 'None' attribution data for FailMalformedHTLC, because FailMalformedHTLC uses the same // type 2 and is deserialized as a FailHTLC. holding_cell_failure_attribution_data.push(None); - } + }, } } @@ -12433,7 +12591,9 @@ where } (self.context.monitor_pending_failures.len() as u64).write(writer)?; - for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() { + for &(ref htlc_source, ref payment_hash, ref fail_reason) in + self.context.monitor_pending_failures.iter() + { htlc_source.write(writer)?; payment_hash.write(writer)?; fail_reason.write(writer)?; @@ -12441,7 +12601,9 @@ where if self.funding.is_outbound() { self.context.pending_update_fee.map(|(a, _)| a).write(writer)?; - } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee { + } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = + self.context.pending_update_fee + { Some(feerate).write(writer)?; } else { // As for inbound HTLCs, if the update was only announced and never committed in a @@ -12486,7 +12648,7 @@ where info.fee_proportional_millionths.write(writer)?; info.cltv_expiry_delta.write(writer)?; }, - None => 0u8.write(writer)? + None => 0u8.write(writer)?, } self.funding.channel_transaction_parameters.write(writer)?; @@ -12506,33 +12668,58 @@ where // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type // out at all. - let chan_type = if self.funding.get_channel_type() != &ChannelTypeFeatures::only_static_remote_key() { - Some(self.funding.get_channel_type()) } else { None }; + let chan_type = + if self.funding.get_channel_type() != &ChannelTypeFeatures::only_static_remote_key() { + Some(self.funding.get_channel_type()) + } else { + None + }; // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to // a different percentage of the channel value then 10%, which older versions of LDK used // to set it to before the percentage was made configurable. + let legacy_reserve_satoshis = get_legacy_default_holder_selected_channel_reserve_satoshis( + self.funding.get_value_satoshis(), + ); let serialized_holder_selected_reserve = - if self.funding.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.funding.get_value_satoshis()) - { Some(self.funding.holder_selected_channel_reserve_satoshis) } else { None }; + if self.funding.holder_selected_channel_reserve_satoshis != legacy_reserve_satoshis { + Some(self.funding.holder_selected_channel_reserve_satoshis) + } else { + None + }; let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config; - old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY; + old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = + MAX_IN_FLIGHT_PERCENT_LEGACY; + let max_in_flight_msat = get_holder_max_htlc_value_in_flight_msat( + self.funding.get_value_satoshis(), + &old_max_in_flight_percent_config, + ); let serialized_holder_htlc_max_in_flight = - if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.funding.get_value_satoshis(), &old_max_in_flight_percent_config) - { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None }; + if self.context.holder_max_htlc_value_in_flight_msat != max_in_flight_msat { + Some(self.context.holder_max_htlc_value_in_flight_msat) + } else { + None + }; let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted); - let initial_channel_ready_event_emitted = Some(self.context.initial_channel_ready_event_emitted); - let funding_tx_broadcast_safe_event_emitted = Some(self.context.funding_tx_broadcast_safe_event_emitted); + let initial_channel_ready_event_emitted = + Some(self.context.initial_channel_ready_event_emitted); + let funding_tx_broadcast_safe_event_emitted = + Some(self.context.funding_tx_broadcast_safe_event_emitted); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore, // we write the high bytes as an option here. let user_id_high_opt = Some((self.context.user_id >> 64) as u64); - let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let holder_max_accepted_htlcs = + if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { + None + } else { + Some(self.context.holder_max_accepted_htlcs) + }; let mut monitor_pending_update_adds = None; if !self.context.monitor_pending_update_adds.is_empty() { @@ -12603,8 +12790,9 @@ where ES::Target: EntropySource, SP::Target: SignerProvider, { - #[rustfmt::skip] - fn read(reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures)) -> Result { + fn read( + reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + ) -> Result { let (entropy_source, signer_provider, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { @@ -12623,7 +12811,8 @@ where } let channel_id: ChannelId = Readable::read(reader)?; - let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?; + let channel_state = ChannelState::from_u32(Readable::read(reader)?) + .map_err(|_| DecodeError::InvalidValue)?; let channel_value_satoshis = Readable::read(reader)?; let latest_monitor_update_id = Readable::read(reader)?; @@ -12641,7 +12830,10 @@ where let pending_inbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( + pending_inbound_htlc_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..pending_inbound_htlc_count { pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -12651,7 +12843,9 @@ where state: match ::read(reader)? { 1 => { let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + InboundHTLCResolution::Resolved { + pending_htlc_status: Readable::read(reader)?, + } } else { Readable::read(reader)? }; @@ -12659,7 +12853,9 @@ where }, 2 => { let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + InboundHTLCResolution::Resolved { + pending_htlc_status: Readable::read(reader)?, + } } else { Readable::read(reader)? }; @@ -12684,7 +12880,10 @@ where } let pending_outbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min( + pending_outbound_htlc_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..pending_outbound_htlc_count { pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -12731,7 +12930,10 @@ where } let holding_cell_htlc_update_count: u64 = Readable::read(reader)?; - let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2)); + let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min( + holding_cell_htlc_update_count as usize, + DEFAULT_MAX_HTLCS as usize * 2, + )); for _ in 0..holding_cell_htlc_update_count { holding_cell_htlc_updates.push(match ::read(reader)? { 0 => HTLCUpdateAwaitingACK::AddHTLC { @@ -12769,15 +12971,25 @@ where let monitor_pending_commitment_signed = Readable::read(reader)?; let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( + monitor_pending_forwards_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..monitor_pending_forwards_count { monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; - let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut monitor_pending_failures = Vec::with_capacity(cmp::min( + monitor_pending_failures_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..monitor_pending_failures_count { - monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); + monitor_pending_failures.push(( + Readable::read(reader)?, + Readable::read(reader)?, + Readable::read(reader)?, + )); } let pending_update_fee_value: Option = Readable::read(reader)?; @@ -12835,7 +13047,8 @@ where _ => return Err(DecodeError::InvalidValue), }; - let channel_parameters: ChannelTransactionParameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; + let channel_parameters: ChannelTransactionParameters = + ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; let funding_transaction: Option = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -12849,11 +13062,14 @@ where let channel_update_status = Readable::read(reader)?; let pending_update_fee = if let Some(feerate) = pending_update_fee_value { - Some((feerate, if channel_parameters.is_outbound_from_holder { - FeeUpdateState::Outbound - } else { - FeeUpdateState::AwaitingRemoteRevokeToAnnounce - })) + Some(( + feerate, + if channel_parameters.is_outbound_from_holder { + FeeUpdateState::Outbound + } else { + FeeUpdateState::AwaitingRemoteRevokeToAnnounce + }, + )) } else { None }; @@ -12861,8 +13077,14 @@ where let mut announcement_sigs = None; let mut target_closing_feerate_sats_per_kw = None; let mut monitor_pending_finalized_fulfills = Some(Vec::new()); - let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis)); - let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config)); + let mut holder_selected_channel_reserve_satoshis = Some( + get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis), + ); + let mut holder_max_htlc_value_in_flight_msat = + Some(get_holder_max_htlc_value_in_flight_msat( + channel_value_satoshis, + &UserConfig::default().channel_handshake_config, + )); // Prior to supporting channel type negotiation, all of our channels were static_remotekey // only, so we default to that if none was written. let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key()); @@ -12962,19 +13184,23 @@ where let mut iter = preimages.into_iter(); for htlc in pending_outbound_htlcs.iter_mut() { match &mut htlc.state { - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(ref mut preimage)) => { + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success( + ref mut preimage, + )) => { // This variant was initialized like this further above debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); // Flatten and unwrap the preimage; they are always set starting in 0.2. *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; - } - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(ref mut preimage)) => { + }, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success( + ref mut preimage, + )) => { // This variant was initialized like this further above debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); // Flatten and unwrap the preimage; they are always set starting in 0.2. *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; - } - _ => {} + }, + _ => {}, } } // We expect all preimages to be consumed above @@ -12983,7 +13209,9 @@ where } let chan_features = channel_type.unwrap(); - if chan_features.supports_any_optional_bits() || chan_features.requires_unknown_bits_from(&our_supported_features) { + if chan_features.supports_any_optional_bits() + || chan_features.requires_unknown_bits_from(&our_supported_features) + { // If the channel was written by a new version and negotiated with features we don't // understand yet, refuse to read it. return Err(DecodeError::UnknownRequiredFeature); @@ -13009,7 +13237,9 @@ where htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?; } // We expect all skimmed fees to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt { let mut iter = skimmed_fees.into_iter(); @@ -13019,7 +13249,9 @@ where } } // We expect all skimmed fees to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(blinding_pts) = pending_outbound_blinding_points_opt { let mut iter = blinding_pts.into_iter(); @@ -13027,7 +13259,9 @@ where htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?; } // We expect all blinding points to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(blinding_pts) = holding_cell_blinding_points_opt { let mut iter = blinding_pts.into_iter(); @@ -13037,74 +13271,112 @@ where } } // We expect all blinding points to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(attribution_data_list) = removed_htlc_failure_attribution_data { let mut removed_htlc_relay_failures = - pending_inbound_htlcs.iter_mut().filter_map(|status| - if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(ref mut packet)) = &mut status.state { + pending_inbound_htlcs.iter_mut().filter_map(|status| { + if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay( + ref mut packet, + )) = &mut status.state + { Some(&mut packet.attribution_data) } else { None } - ); + }); for attribution_data in attribution_data_list { - *removed_htlc_relay_failures.next().ok_or(DecodeError::InvalidValue)? = attribution_data; + *removed_htlc_relay_failures.next().ok_or(DecodeError::InvalidValue)? = + attribution_data; + } + if removed_htlc_relay_failures.next().is_some() { + return Err(DecodeError::InvalidValue); } - if removed_htlc_relay_failures.next().is_some() { return Err(DecodeError::InvalidValue); } } if let Some(attribution_data_list) = holding_cell_failure_attribution_data { let mut holding_cell_failures = - holding_cell_htlc_updates.iter_mut().filter_map(|upd| - if let HTLCUpdateAwaitingACK::FailHTLC { err_packet: OnionErrorPacket { ref mut attribution_data, .. }, .. } = upd { + holding_cell_htlc_updates.iter_mut().filter_map(|upd| { + if let HTLCUpdateAwaitingACK::FailHTLC { + err_packet: OnionErrorPacket { ref mut attribution_data, .. }, + .. + } = upd + { Some(attribution_data) } else { None } - ); + }); for attribution_data in attribution_data_list { *holding_cell_failures.next().ok_or(DecodeError::InvalidValue)? = attribution_data; } - if holding_cell_failures.next().is_some() { return Err(DecodeError::InvalidValue); } + if holding_cell_failures.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(malformed_htlcs) = malformed_htlcs { for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs { - let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| { - if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc { - let matches = *htlc_id == malformed_htlc_id; - if matches { debug_assert!(err_packet.data.is_empty()) } - matches - } else { false } - }).ok_or(DecodeError::InvalidValue)?; + let htlc_idx = holding_cell_htlc_updates + .iter() + .position(|htlc| { + if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc { + let matches = *htlc_id == malformed_htlc_id; + if matches { + debug_assert!(err_packet.data.is_empty()) + } + matches + } else { + false + } + }) + .ok_or(DecodeError::InvalidValue)?; let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id: malformed_htlc_id, failure_code, sha256_of_onion + htlc_id: malformed_htlc_id, + failure_code, + sha256_of_onion, }; - let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc); + let _ = + core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc); } } // If we're restoring this channel for the first time after an upgrade, then we require that the // signer be available so that we can immediately populate the current commitment point. Channel // restoration will fail if this is not possible. - let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) { + let holder_commitment_point = match ( + cur_holder_commitment_point_opt, + next_holder_commitment_point_opt, + ) { (Some(current), Some(next)) => HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, current, next + transaction_number: cur_holder_commitment_transaction_number, + current, + next, }, (Some(current), _) => HolderCommitmentPoint::PendingNext { - transaction_number: cur_holder_commitment_transaction_number, current, + transaction_number: cur_holder_commitment_transaction_number, + current, }, (_, _) => { let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) .expect("Must be able to derive the current commitment point upon channel restoration"); - let next = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx) - .expect("Must be able to derive the next commitment point upon channel restoration"); + let next = holder_signer + .get_per_commitment_point( + cur_holder_commitment_transaction_number - 1, + &secp_ctx, + ) + .expect( + "Must be able to derive the next commitment point upon channel restoration", + ); HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, current, next, + transaction_number: cur_holder_commitment_transaction_number, + current, + next, } }, }; @@ -13113,7 +13385,8 @@ where funding: FundingScope { value_to_self_msat, counterparty_selected_channel_reserve_satoshis, - holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(), + holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis + .unwrap(), #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((0, 0)), @@ -13230,9 +13503,11 @@ where outbound_scid_alias, historical_scids: historical_scids.unwrap(), - funding_tx_broadcast_safe_event_emitted: funding_tx_broadcast_safe_event_emitted.unwrap_or(false), + funding_tx_broadcast_safe_event_emitted: funding_tx_broadcast_safe_event_emitted + .unwrap_or(false), channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), - initial_channel_ready_event_emitted: initial_channel_ready_event_emitted.unwrap_or(true), + initial_channel_ready_event_emitted: initial_channel_ready_event_emitted + .unwrap_or(true), channel_keys_id, @@ -13887,7 +14162,6 @@ mod tests { } #[test] - #[rustfmt::skip] fn blinding_point_skimmed_fee_malformed_ser() { // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized // properly. @@ -13900,23 +14174,72 @@ mod tests { let best_block = BestBlock::from_network(network); let keys_provider = TestKeysInterface::new(&seed, network); - let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let node_b_node_id = + PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let features = channelmanager::provided_init_features(&config); let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger - ).unwrap(); + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &features, + 10000000, + 100000, + 42, + &config, + 0, + 42, + None, + &logger, + ) + .unwrap(); + let open_channel_msg = &outbound_chan + .get_open_channel(ChainHash::using_genesis_block(network), &&logger) + .unwrap(); let mut inbound_chan = InboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), - &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network), &&logger).unwrap(), 7, &config, 0, &&logger, false - ).unwrap(); - outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(&&logger).unwrap(), &config.channel_handshake_limits, &features).unwrap(); - let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: Amount::from_sat(10000000), script_pubkey: outbound_chan.funding.get_funding_redeemscript(), - }]}; - let funding_outpoint = OutPoint{ txid: tx.compute_txid(), index: 0 }; - let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap(); - let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) { + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_channel_type_features(&config), + &features, + open_channel_msg, + 7, + &config, + 0, + &&logger, + false, + ) + .unwrap(); + outbound_chan + .accept_channel( + &inbound_chan.get_accept_channel_message(&&logger).unwrap(), + &config.channel_handshake_limits, + &features, + ) + .unwrap(); + let tx = Transaction { + version: Version::ONE, + lock_time: LockTime::ZERO, + input: Vec::new(), + output: vec![TxOut { + value: Amount::from_sat(10000000), + script_pubkey: outbound_chan.funding.get_funding_redeemscript(), + }], + }; + let funding_outpoint = OutPoint { txid: tx.compute_txid(), index: 0 }; + let funding_created = outbound_chan + .get_funding_created(tx.clone(), funding_outpoint, false, &&logger) + .map_err(|_| ()) + .unwrap() + .unwrap(); + let mut chan = match inbound_chan.funding_created( + &funding_created, + best_block, + &&keys_provider, + &&logger, + ) { Ok((chan, _, _)) => chan, Err((_, e)) => panic!("{}", e), }; @@ -13924,11 +14247,15 @@ mod tests { let dummy_htlc_source = HTLCSource::OutboundRoute { path: Path { hops: vec![RouteHop { - pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(), - node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0, - cltv_expiry_delta: 0, maybe_announced_channel: false, + pubkey: test_utils::pubkey(2), + channel_features: ChannelFeatures::empty(), + node_features: NodeFeatures::empty(), + short_channel_id: 0, + fee_msat: 0, + cltv_expiry_delta: 0, + maybe_announced_channel: false, }], - blinded_tail: None + blinded_tail: None, }, session_priv: test_utils::privkey(42), first_hop_htlc_msat: 0, @@ -13965,8 +14292,8 @@ mod tests { onion_routing_packet: msgs::OnionPacket { version: 0, public_key: Ok(test_utils::pubkey(1)), - hop_data: [0; 20*65], - hmac: [0; 32] + hop_data: [0; 20 * 65], + hmac: [0; 32], }, skimmed_fee_msat: None, blinding_point: None, @@ -13976,12 +14303,18 @@ mod tests { htlc_id: 0, }; let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC { - htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } - }; - let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], + htlc_id, + err_packet: msgs::OnionErrorPacket { + data: vec![42], + attribution_data: Some(AttributionData::new()), + }, }; + let dummy_holding_cell_malformed_htlc = + |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }; let mut holding_cell_htlc_updates = Vec::with_capacity(12); for i in 0..12 { if i % 5 == 0 { @@ -13991,11 +14324,16 @@ mod tests { } else if i % 5 == 2 { let mut dummy_add = dummy_holding_cell_add_htlc.clone(); if let HTLCUpdateAwaitingACK::AddHTLC { - ref mut blinding_point, ref mut skimmed_fee_msat, .. - } = &mut dummy_add { + ref mut blinding_point, + ref mut skimmed_fee_msat, + .. + } = &mut dummy_add + { *blinding_point = Some(test_utils::pubkey(42 + i)); *skimmed_fee_msat = Some(42); - } else { panic!() } + } else { + panic!() + } holding_cell_htlc_updates.push(dummy_add); } else if i % 5 == 3 { holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64)); @@ -14008,9 +14346,12 @@ mod tests { // Encode and decode the channel and ensure that the HTLCs within are the same. let encoded_chan = chan.encode(); let mut s = crate::io::Cursor::new(&encoded_chan); - let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); + let mut reader = + crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)).unwrap(); + let decoded_chan = + FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aef2be93553..a5127c46826 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7811,7 +7811,6 @@ where self.claim_payment_internal(payment_preimage, true); } - #[rustfmt::skip] fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); @@ -7819,7 +7818,10 @@ where let (sources, claiming_payment) = { let res = self.claimable_payments.lock().unwrap().begin_claiming_payment( - payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret, + payment_hash, + &self.node_signer, + &self.logger, + &self.inbound_payment_id_secret, custom_tlvs_known, ); @@ -7827,13 +7829,21 @@ where Ok((htlcs, payment_info)) => (htlcs, payment_info), Err(htlcs) => { for htlc in htlcs { - let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); + let reason = self.get_htlc_fail_reason_from_failure_code( + FailureCode::InvalidOnionPayload(None), + &htlc, + ); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let receiver = HTLCHandlingFailureType::Receive { payment_hash }; - self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); + self.fail_htlc_backwards_internal( + &source, + &payment_hash, + &reason, + receiver, + ); } return; - } + }, } }; debug_assert!(!sources.is_empty()); @@ -7869,7 +7879,10 @@ where mem::drop(per_peer_state); if sources.is_empty() || expected_amt_msat.is_none() { self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + log_info!( + self.logger, + "Attempted to claim an incomplete payment which no longer had any available HTLCs!" + ); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { @@ -7879,18 +7892,21 @@ where return; } if valid_mpp { - let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| { - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - Some(MPPClaimHTLCSource { - counterparty_node_id: cp_id, - funding_txo: htlc.prev_hop.outpoint, - channel_id: htlc.prev_hop.channel_id, - htlc_id: htlc.prev_hop.htlc_id, - }) - } else { - None - } - }).collect(); + let mpp_parts: Vec<_> = sources + .iter() + .filter_map(|htlc| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + Some(MPPClaimHTLCSource { + counterparty_node_id: cp_id, + funding_txo: htlc.prev_hop.outpoint, + channel_id: htlc.prev_hop.channel_id, + htlc_id: htlc.prev_hop.htlc_id, + }) + } else { + None + } + }) + .collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); for part in mpp_parts.iter() { @@ -7908,32 +7924,48 @@ where }; let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment }); for htlc in sources { - let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) - } else { - None - } - ); + let this_mpp_claim = + pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); + Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) + } else { + None + } + }); let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)), } }); self.claim_funds_from_hop( - htlc.prev_hop, payment_preimage, payment_info.clone(), + htlc.prev_hop, + payment_preimage, + payment_info.clone(), |_, definitely_duplicate| { - debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); - (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker) - } + debug_assert!( + !definitely_duplicate, + "We shouldn't claim duplicatively from a payment" + ); + ( + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim: this_mpp_claim, + }), + raa_blocker, + ) + }, ); } } else { for htlc in sources { - let err_data = invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); + let err_data = + invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, err_data); + let reason = HTLCFailReason::reason( + LocalHTLCFailureReason::IncorrectPaymentDetails, + err_data, + ); let receiver = HTLCHandlingFailureType::Receive { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -7970,9 +8002,11 @@ where self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action) } - #[rustfmt::skip] fn claim_mpp_part< - ComplFunc: FnOnce(Option, bool) -> (Option, Option) + ComplFunc: FnOnce( + Option, + bool, + ) -> (Option, Option), >( &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage, payment_info: Option, completion_action: ComplFunc, @@ -7997,33 +8031,58 @@ where // Note here that `peer_state_opt` is always `Some` if `prev_hop.counterparty_node_id` is // `Some`. This is relied on in the closed-channel case below. - let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map( - |counterparty_node_id| per_peer_state.get(counterparty_node_id) - .map(|peer_mutex| peer_mutex.lock().unwrap()) - .expect(MISSING_MON_ERROR) - ); + let mut peer_state_opt = + prev_hop.counterparty_node_id.as_ref().map(|counterparty_node_id| { + per_peer_state + .get(counterparty_node_id) + .map(|peer_mutex| peer_mutex.lock().unwrap()) + .expect(MISSING_MON_ERROR) + }); if let Some(peer_state_lock) = peer_state_opt.as_mut() { let peer_state = &mut **peer_state_lock; - if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(chan_id) { + if let hash_map::Entry::Occupied(mut chan_entry) = + peer_state.channel_by_id.entry(chan_id) + { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); + let fulfill_res = chan.get_update_fulfill_htlc_and_commit( + prev_hop.htlc_id, + payment_preimage, + payment_info, + &&logger, + ); match fulfill_res { UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { - let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false); + let (action_opt, raa_blocker_opt) = + completion_action(Some(htlc_value_msat), false); if let Some(action) = action_opt { log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}", chan_id, action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + peer_state + .monitor_update_blocked_actions + .entry(chan_id) + .or_insert(Vec::new()) + .push(action); } if let Some(raa_blocker) = raa_blocker_opt { - peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker); + peer_state + .actions_blocking_raa_monitor_updates + .entry(chan_id) + .or_insert_with(Vec::new) + .push(raa_blocker); } - handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt, - peer_state, per_peer_state, chan); - } + handle_new_monitor_update!( + self, + prev_hop.funding_txo, + monitor_update, + peer_state_opt, + peer_state, + per_peer_state, + chan + ); + }, UpdateFulfillCommitFetch::DuplicateClaim {} => { let (action_opt, raa_blocker_opt) = completion_action(None, true); if let Some(raa_blocker) = raa_blocker_opt { @@ -8035,8 +8094,14 @@ where // // In any other case, the RAA blocker must still be present and // blocking RAAs. - debug_assert!(during_init || - peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker)); + debug_assert!( + during_init + || peer_state + .actions_blocking_raa_monitor_updates + .get(&chan_id) + .unwrap() + .contains(&raa_blocker) + ); } let action = if let Some(action) = action_opt { action @@ -8051,8 +8116,10 @@ where if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, downstream_funding_outpoint: _, - blocking_action: blocker, downstream_channel_id: channel_id, - } = action { + blocking_action: blocker, + downstream_channel_id: channel_id, + } = action + { if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); if let Some(blockers) = peer_state @@ -8065,7 +8132,9 @@ where // which case we need to only remove the one // blocker which was added duplicatively. let first_blocker = !found_blocker; - if *iter == blocker { found_blocker = true; } + if *iter == blocker { + found_blocker = true; + } *iter != blocker || !first_blocker }); debug_assert!(found_blocker); @@ -8073,7 +8142,10 @@ where } else { debug_assert!(false); } - } else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) { + } else if matches!( + action, + MonitorUpdateCompletionAction::PaymentClaimed { .. } + ) { debug_assert!(during_init, "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)"); mem::drop(per_peer_state); @@ -8083,7 +8155,7 @@ where "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)"); return; }; - } + }, } } return; @@ -8098,10 +8170,14 @@ where payment_preimage, ); } - let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above"); - let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some"); + let counterparty_node_id = + prev_hop.counterparty_node_id.expect("Checked immediately above"); + let mut peer_state = peer_state_opt + .expect("peer_state_opt is always Some when the counterparty_node_id is Some"); - let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) { + let update_id = if let Some(latest_update_id) = + peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) + { *latest_update_id = latest_update_id.saturating_add(1); *latest_update_id } else { @@ -8129,7 +8205,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (action_opt, raa_blocker_opt) = completion_action(None, false); if let Some(raa_blocker) = raa_blocker_opt { - peer_state.actions_blocking_raa_monitor_updates + peer_state + .actions_blocking_raa_monitor_updates .entry(prev_hop.channel_id) .or_default() .push(raa_blocker); @@ -8138,17 +8215,37 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage // to include the `payment_hash` in the log metadata here. let payment_hash = payment_preimage.into(); - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash)); + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(chan_id), + Some(payment_hash), + ); if let Some(action) = action_opt { - log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}", - chan_id, action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + log_trace!( + logger, + "Tracking monitor update completion action for closed channel {}: {:?}", + chan_id, + action + ); + peer_state + .monitor_update_blocked_actions + .entry(chan_id) + .or_insert(Vec::new()) + .push(action); } handle_new_monitor_update!( - self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, - counterparty_node_id, chan_id, POST_CHANNEL_CLOSE + self, + prev_hop.funding_txo, + preimage_update, + peer_state, + peer_state, + per_peer_state, + counterparty_node_id, + chan_id, + POST_CHANNEL_CLOSE ); } @@ -8156,37 +8253,53 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - #[rustfmt::skip] - fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, + fn claim_funds_internal( + &self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: PublicKey, - next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, + next_user_channel_id: Option, ) { match source { - HTLCSource::OutboundRoute { session_priv, payment_id, path, bolt12_invoice, .. } => { + HTLCSource::OutboundRoute { + session_priv, payment_id, path, bolt12_invoice, .. + } => { debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey); let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id, + channel_funding_outpoint: next_channel_outpoint, + channel_id: next_channel_id, counterparty_node_id: path.hops[0].pubkey, }; - self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, bolt12_invoice, - session_priv, path, from_onchain, ev_completion_action, &self.pending_events, - &self.logger); + self.pending_outbound_payments.claim_htlc( + payment_id, + payment_preimage, + bolt12_invoice, + session_priv, + path, + from_onchain, + ev_completion_action, + &self.pending_events, + &self.logger, + ); }, HTLCSource::PreviousHopData(hop_data) => { let prev_channel_id = hop_data.channel_id; let prev_user_channel_id = hop_data.user_channel_id; let prev_node_id = hop_data.counterparty_node_id; - let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - self.claim_funds_from_hop(hop_data, payment_preimage, None, + let completed_blocker = + RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + self.claim_funds_from_hop( + hop_data, + payment_preimage, + None, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = Some(EventUnblockedChannel { counterparty_node_id: next_channel_counterparty_node_id, funding_txo: next_channel_outpoint, channel_id: next_channel_id, - blocking_action: completed_blocker + blocking_action: completed_blocker, }); if definitely_duplicate && startup_replay { @@ -8203,32 +8316,45 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ downstream_channel_id: other_chan.channel_id, blocking_action: other_chan.blocking_action, }), None) - } else { (None, None) } + } else { + (None, None) + } } else { - let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { None } - } else { None }; - debug_assert!(skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat"); - (Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event: events::Event::PaymentForwarded { - prev_channel_id: Some(prev_channel_id), - next_channel_id: Some(next_channel_id), - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id: Some(next_channel_counterparty_node_id), - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }, - downstream_counterparty_and_funding_outpoint: chan_to_release, - }), None) + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { + None + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + ( + Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + event: events::Event::PaymentForwarded { + prev_channel_id: Some(prev_channel_id), + next_channel_id: Some(next_channel_id), + prev_user_channel_id, + next_user_channel_id, + prev_node_id, + next_node_id: Some(next_channel_counterparty_node_id), + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }, + downstream_counterparty_and_funding_outpoint: chan_to_release, + }), + None, + ) } - }); + }, + ); }, } } @@ -9721,17 +9847,23 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - #[rustfmt::skip] - fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { + fn internal_update_fulfill_htlc( + &self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC, + ) -> Result<(), MsgHandleErrInternal> { let funding_txo; let next_user_channel_id; let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close( + format!( + "Can't find a peer matching the passed counterparty node_id {}", + counterparty_node_id + ), + msg.channel_id, + ) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -9764,9 +9896,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), - Some(forwarded_htlc_value), skimmed_fee_msat, false, false, *counterparty_node_id, - funding_txo, msg.channel_id, Some(next_user_channel_id), + self.claim_funds_internal( + htlc_source, + msg.payment_preimage.clone(), + Some(forwarded_htlc_value), + skimmed_fee_msat, + false, + false, + *counterparty_node_id, + funding_txo, + msg.channel_id, + Some(next_user_channel_id), ); Ok(()) @@ -10636,56 +10776,102 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// Process pending events from the [`chain::Watch`], returning whether any events were processed. - #[rustfmt::skip] fn process_pending_monitor_events(&self) -> bool { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let mut failed_channels: Vec<(Result, _)> = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in + pending_monitor_events.drain(..) + { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), Some(htlc_update.payment_hash)); + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + Some(htlc_update.payment_hash), + ); if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); + log_trace!( + logger, + "Claiming HTLC with preimage {} from our monitor", + preimage + ); self.claim_funds_internal( - htlc_update.source, preimage, - htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, - false, counterparty_node_id, funding_outpoint, channel_id, None, + htlc_update.source, + preimage, + htlc_update.htlc_value_satoshis.map(|v| v * 1000), + None, + true, + false, + counterparty_node_id, + funding_outpoint, + channel_id, + None, ); } else { - log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); + log_trace!( + logger, + "Failing HTLC with hash {} from our monitor", + &htlc_update.payment_hash + ); let failure_reason = LocalHTLCFailureReason::OnChainTimeout; - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; let reason = HTLCFailReason::from_failure_code(failure_reason); - self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); + self.fail_htlc_backwards_internal( + &htlc_update.source, + &htlc_update.payment_hash, + &reason, + receiver, + ); } }, - MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => { + MonitorEvent::HolderForceClosed(_) + | MonitorEvent::HolderForceClosedWithInfo { .. } => { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(channel_id) { - let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { + if let hash_map::Entry::Occupied(chan_entry) = + peer_state.channel_by_id.entry(channel_id) + { + let reason = if let MonitorEvent::HolderForceClosedWithInfo { + reason, + .. + } = monitor_event + { reason } else { ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), - message: "Legacy ChannelMonitor closure".to_owned() + message: "Legacy ChannelMonitor closure".to_owned(), } }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan, &channel_id); + let (_, e) = convert_channel_err!( + self, + peer_state, + err, + &mut chan, + &channel_id + ); failed_channels.push((Err(e), counterparty_node_id)); } } }, MonitorEvent::Completed { channel_id, monitor_update_id, .. } => { - self.channel_monitor_updated(&channel_id, monitor_update_id, &counterparty_node_id); + self.channel_monitor_updated( + &channel_id, + monitor_update_id, + &counterparty_node_id, + ); }, } } @@ -15240,13 +15426,9 @@ where } for short_channel_id in channel.context.historical_scids() { - short_to_chan_info.insert( - *short_channel_id, - ( - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - ), - ); + let cp_id = channel.context.get_counterparty_node_id(); + let chan_id = channel.context.channel_id(); + short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); } per_peer_state @@ -16075,10 +16257,8 @@ where .into_iter() .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) { - let existing_payment = claimable_payments.insert( - payment_hash, - ClaimablePayment { purpose, htlcs, onion_fields: onion }, - ); + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } @@ -16087,10 +16267,8 @@ where for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) { - let existing_payment = claimable_payments.insert( - payment_hash, - ClaimablePayment { purpose, htlcs, onion_fields: None }, - ); + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } @@ -16211,13 +16389,9 @@ where return Err(DecodeError::InvalidValue); } if funded_chan.context.is_usable() { - if short_to_chan_info - .insert( - funded_chan.context.outbound_scid_alias(), - (funded_chan.context.get_counterparty_node_id(), *chan_id), - ) - .is_some() - { + let alias = funded_chan.context.outbound_scid_alias(); + let cp_id = funded_chan.context.get_counterparty_node_id(); + if short_to_chan_info.insert(alias, (cp_id, *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!( @@ -16606,21 +16780,17 @@ where let mut pending_events = channel_manager.pending_events.lock().unwrap(); let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_total_msat = + payment.htlcs.first().map(|htlc| htlc.total_msat); pending_events.push_back(( events::Event::PaymentClaimed { receiver_node_id, payment_hash, purpose: payment.purpose, amount_msat: claimable_amt_msat, - htlcs: payment - .htlcs - .iter() - .map(events::ClaimedHTLC::from) - .collect(), - sender_intended_total_msat: payment - .htlcs - .first() - .map(|htlc| htlc.total_msat), + htlcs, + sender_intended_total_msat, onion_fields: payment.onion_fields, payment_id: Some(payment_id), },