From 4441a06dee6967fd9667f8d07a666b3ed7ed1a3a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 11 Jun 2020 15:34:28 -0400 Subject: [PATCH 1/9] Reorder struct definitions so that they are in dependency order. There are a few cases where the upcoming C bindings don't know how to handle something which depends on something defined later in the file. Instead of adding another pass to the C bindings generator, it is much simpler to just reorder structs. --- lightning/src/routing/network_graph.rs | 14 ++++---- lightning/src/util/config.rs | 48 +++++++++++++------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 89c19b84896..0dafc105bd2 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -33,6 +33,13 @@ use std::collections::btree_map::Entry as BtreeEntry; use std::ops::Deref; use bitcoin::hashes::hex::ToHex; +/// Represents the network as nodes and channels between them +#[derive(PartialEq)] +pub struct NetworkGraph { + channels: BTreeMap, + nodes: BTreeMap, +} + /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. @@ -443,13 +450,6 @@ impl Readable for NodeInfo { } } -/// Represents the network as nodes and channels between them -#[derive(PartialEq)] -pub struct NetworkGraph { - channels: BTreeMap, - nodes: BTreeMap, -} - impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (self.channels.len() as u64).write(writer)?; diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index c3726365996..712b5937bab 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -12,30 +12,6 @@ use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; -/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. -/// -/// Default::default() provides sane defaults for most configurations -/// (but currently with 0 relay fees!) -#[derive(Clone, Debug)] -pub struct UserConfig { - /// Channel config that we propose to our counterparty. - pub own_channel_config: ChannelHandshakeConfig, - /// Limits applied to our counterparty's proposed channel config settings. - pub peer_channel_config_limits: ChannelHandshakeLimits, - /// Channel config which affects behavior during channel lifetime. - pub channel_options: ChannelConfig, -} - -impl Default for UserConfig { - fn default() -> Self { - UserConfig { - own_channel_config: ChannelHandshakeConfig::default(), - peer_channel_config_limits: ChannelHandshakeLimits::default(), - channel_options: ChannelConfig::default(), - } - } -} - /// Configuration we set when applicable. /// /// Default::default() provides sane defaults. @@ -228,3 +204,27 @@ impl_writeable!(ChannelConfig, 8+1+1, { announced_channel, commit_upfront_shutdown_pubkey }); + +/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. +/// +/// Default::default() provides sane defaults for most configurations +/// (but currently with 0 relay fees!) +#[derive(Clone, Debug)] +pub struct UserConfig { + /// Channel config that we propose to our counterparty. + pub own_channel_config: ChannelHandshakeConfig, + /// Limits applied to our counterparty's proposed channel config settings. + pub peer_channel_config_limits: ChannelHandshakeLimits, + /// Channel config which affects behavior during channel lifetime. + pub channel_options: ChannelConfig, +} + +impl Default for UserConfig { + fn default() -> Self { + UserConfig { + own_channel_config: ChannelHandshakeConfig::default(), + peer_channel_config_limits: ChannelHandshakeLimits::default(), + channel_options: ChannelConfig::default(), + } + } +} From a05b3fa8974c80a4959179346b55c254ddf509b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 30 May 2020 23:16:29 -0400 Subject: [PATCH 2/9] Always refer to Deref types with where clauses instead of direct This makes it a little easier to write C bindings generation as we only have to handle one case instead of both. --- lightning/src/chain/chaininterface.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 644c3214aca..fe3914545c6 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -245,13 +245,15 @@ pub type BlockNotifierRef<'a, C> = BlockNotifier<'a, &'a ChainListener, C>; /// or a BlockNotifierRef for conciseness. See their documentation for more details, but essentially /// you should default to using a BlockNotifierRef, and use a BlockNotifierArc instead when you /// require ChainListeners with static lifetimes, such as when you're using lightning-net-tokio. -pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref> where C::Target: ChainWatchInterface { +pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref> + where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface { listeners: Mutex>, chain_monitor: C, phantom: PhantomData<&'a ()>, } -impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C> where C::Target: ChainWatchInterface { +impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C> + where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface { /// Constructs a new BlockNotifier without any listeners. pub fn new(chain_monitor: C) -> BlockNotifier<'a, CL, C> { BlockNotifier { From bce202536d0de69cc12b833fa9c7bc9cbba94b93 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 30 May 2020 23:20:17 -0400 Subject: [PATCH 3/9] Refer to generic types by importing them instead of a super-mod. This avoids one case the bindings generation hasn't bothered to handle by simply importing types that are referred to. --- lightning/src/chain/keysinterface.rs | 6 +++--- lightning/src/ln/chan_utils.rs | 3 ++- lightning/src/ln/channelmanager.rs | 16 +++++++++------- lightning/src/ln/channelmonitor.rs | 14 ++++++++------ lightning/src/routing/network_graph.rs | 7 ++++--- 5 files changed, 26 insertions(+), 20 deletions(-) diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index fbc6c9bc6e6..ebe7e070156 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -33,7 +33,7 @@ use util::ser::{Writeable, Writer, Readable}; use ln::chan_utils; use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys}; -use ln::msgs; +use ln::msgs::UnsignedChannelAnnouncement; use std::sync::atomic::{AtomicUsize, Ordering}; use std::io::Error; @@ -316,7 +316,7 @@ pub trait ChannelKeys : Send+Clone { /// Note that if this fails or is rejected, the channel will not be publicly announced and /// our counterparty may (though likely will not) close the channel on us for violating the /// protocol. - fn sign_channel_announcement(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result; + fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result; /// Set the remote channel basepoints and remote/local to_self_delay. /// This is done immediately on incoming channels and as soon as the channel is accepted on outgoing channels. @@ -584,7 +584,7 @@ impl ChannelKeys for InMemoryChannelKeys { Ok(secp_ctx.sign(&sighash, &self.funding_key)) } - fn sign_channel_announcement(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result { + fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result { let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); Ok(secp_ctx.sign(&msghash, &self.funding_key)) } diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index e315398d523..6c7ed11139b 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -30,6 +30,7 @@ use util::byte_utils; use bitcoin::secp256k1::key::{SecretKey, PublicKey}; use bitcoin::secp256k1::{Secp256k1, Signature}; +use bitcoin::secp256k1::Error as SecpError; use bitcoin::secp256k1; use std::{cmp, mem}; @@ -357,7 +358,7 @@ impl_writeable!(ChannelPublicKeys, 33*5, { impl TxCreationKeys { /// Create a new TxCreationKeys from channel base points and the per-commitment point - pub fn new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { + pub fn new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { Ok(TxCreationKeys { per_commitment_point: per_commitment_point.clone(), revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 010505ef10a..83746d01a94 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -42,10 +42,12 @@ use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpd use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; +use ln::msgs::NetAddress; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys}; use util::config::UserConfig; +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; @@ -312,7 +314,7 @@ pub(super) struct ChannelHolder { claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, + pub(super) pending_msg_events: Vec, } /// State we hold per-peer. In the future we should put channels in here, but for now we only hold @@ -1483,7 +1485,7 @@ impl // be absurd. We ensure this by checking that at least 500 (our stated public contract on when // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2; + const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; #[deny(const_err)] #[allow(dead_code)] // ...by failing to compile if the number of addresses that would be half of a message is @@ -1503,7 +1505,7 @@ impl /// only Tor Onion addresses. /// /// Panics if addresses is absurdly large (more than 500). - pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { + pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { let _ = self.total_consistency_lock.read().unwrap(); if addresses.len() > 500 { @@ -3010,14 +3012,14 @@ impl } } -impl events::MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_msg_events(&self) -> Vec { + fn get_and_clear_pending_msg_events(&self) -> Vec { //TODO: This behavior should be documented. It's non-intuitive that we query // ChannelMonitors when clearing other events. self.process_pending_monitor_events(); @@ -3029,14 +3031,14 @@ impl } } -impl events::EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_events(&self) -> Vec { + fn get_and_clear_pending_events(&self) -> Vec { //TODO: This behavior should be documented. It's non-intuitive that we query // ChannelMonitors when clearing other events. self.process_pending_monitor_events(); diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 060e88faa20..c743f647ccd 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -47,11 +47,13 @@ use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys}; use util::logger::Logger; use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48}; use util::{byte_utils, events}; +use util::events::Event; use std::collections::{HashMap, hash_map}; use std::sync::Mutex; use std::{hash,cmp, mem}; use std::ops::Deref; +use std::io::Error; /// An update generated by the underlying Channel itself which contains some new information the /// ChannelMonitor should be made aware of. @@ -317,7 +319,7 @@ impl Vec { + fn get_and_clear_pending_events(&self) -> Vec { let mut pending_events = Vec::new(); for chan in self.monitors.lock().unwrap().values_mut() { pending_events.append(&mut chan.get_and_clear_pending_events()); @@ -795,7 +797,7 @@ pub struct ChannelMonitor { payment_preimages: HashMap, pending_monitor_events: Vec, - pending_events: Vec, + pending_events: Vec, // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce @@ -946,7 +948,7 @@ impl ChannelMonitor { /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the /// returned block hash and the the current chain and then reconnecting blocks to get to the /// best chain) upon deserializing the object! - pub fn write_for_disk(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + pub fn write_for_disk(&self, writer: &mut W) -> Result<(), Error> { //TODO: We still write out all the serialization here manually instead of using the fancy //serialization framework we have, we should migrate things over to it. writer.write_all(&[SERIALIZATION_VERSION; 1])?; @@ -1452,7 +1454,7 @@ impl ChannelMonitor { /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do /// no internal locking in ChannelMonitors. - pub fn get_and_clear_pending_events(&mut self) -> Vec { + pub fn get_and_clear_pending_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_events); ret @@ -1957,7 +1959,7 @@ impl ChannelMonitor { }, OnchainEvent::MaturingOutput { descriptor } => { log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); - self.pending_events.push(events::Event::SpendableOutputs { + self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor] }); } @@ -2437,7 +2439,7 @@ impl Readable for (BlockHash, ChannelMonitor } let pending_events_len: u64 = Readable::read(reader)?; - let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); + let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); for _ in 0..pending_events_len { if let Some(event) = MaybeReadable::read(reader)? { pending_events.push(event); diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 0dafc105bd2..44f2ed237bf 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -20,7 +20,8 @@ use bitcoin::blockdata::opcodes; use chain::chaininterface::{ChainError, ChainWatchInterface}; use ln::features::{ChannelFeatures, NodeFeatures}; -use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField, MAX_VALUE_MSAT}; +use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; @@ -154,7 +155,7 @@ impl RoutingMessageHandler for N self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)) } - fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { + fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = network_graph.get_channels().range(starting_point..); @@ -182,7 +183,7 @@ impl RoutingMessageHandler for N result } - fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { + fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = if let Some(pubkey) = starting_point { From f65765872e70bb273c7a73fc1f6bdc59fec3646d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 30 May 2020 23:22:16 -0400 Subject: [PATCH 4/9] Refer to return types by the trait that they're defined via Instead of using the explicit type which is being returned, refer to them as Self::AssociatedType, to make clear to the bindings what type of thing is being returned. --- lightning/src/chain/keysinterface.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index ebe7e070156..dbe0a8621ce 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -815,7 +815,7 @@ impl KeysInterface for KeysManager { self.shutdown_pubkey.clone() } - fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> InMemoryChannelKeys { + fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> Self::ChanKeySigner { let child_ix = self.channel_child_index.fetch_add(1, Ordering::AcqRel); let ix_and_nanos: u64 = (child_ix as u64) << 32 | (self.starting_time_nanos as u64); self.derive_channel_keys(channel_value_satoshis, ix_and_nanos, self.starting_time_secs) From de8c5dc76dbfc64071763ddf2376b09a9f65479b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 23 Jul 2020 16:10:29 -0400 Subject: [PATCH 5/9] Use slices to references not slices of concrete objects in pub API Because the C bindings maps objects into new structs which contain only a pointer to the underlying (immovable) Rust type, it cannot create a list of Rust types which are contiguous in memory. Thus, in order to allow C clients to call certain Rust functions, we have to use &[&Type] not &[Type]. This commit fixes this issue for the get_route function. --- fuzz/src/router.rs | 5 ++++- lightning/src/ln/functional_tests.rs | 8 ++++++-- lightning/src/routing/router.rs | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 38e29de516f..ffc4608376b 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -236,7 +236,10 @@ pub fn do_test(data: &[u8], out: Out) { } &last_hops_vec[..] }; - let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target, first_hops, last_hops, slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger)); + let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target, + first_hops.map(|c| c.iter().collect::>()).as_ref().map(|a| a.as_slice()), + &last_hops.iter().collect::>(), + slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger)); }, _ => return, } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 7a410c8bf72..4b5453351b4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3625,7 +3625,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { let logger = test_utils::TestLogger::new(); let payment_event = { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3800,7 +3802,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { // Channel should still work fine... let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 7730fd8f2ff..23232a0d0fb 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -14,7 +14,7 @@ use bitcoin::secp256k1::key::PublicKey; -use ln::channelmanager; +use ln::channelmanager::ChannelDetails; use ln::features::{ChannelFeatures, NodeFeatures}; use ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT}; use routing::network_graph::{NetworkGraph, RoutingFees}; @@ -169,8 +169,8 @@ struct DummyDirectionalChannelInfo { /// The fees on channels from us to next-hops are ignored (as they are assumed to all be /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat /// *is* checked as they may change based on the receiving node. -pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, - last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result where L::Target: Logger { +pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[&ChannelDetails]>, + last_hops: &[&RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result where L::Target: Logger { // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by // uptime/success in using a node in the past. if *target == *our_node_id { @@ -907,7 +907,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -954,7 +954,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -1018,7 +1018,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -1071,7 +1071,7 @@ mod tests { let (_, our_id, _, nodes) = get_nodes(&secp_ctx); // Simple test across 2, 3, 5, and 4 via a last_hop channel - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes).iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 5); assert_eq!(route.paths[0][0].pubkey, nodes[1]); @@ -1130,7 +1130,7 @@ mod tests { is_live: true, }]; let mut last_hops = last_hops(&nodes); - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans), &last_hops, 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans.iter().collect::>()), &last_hops.iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[3]); @@ -1150,7 +1150,7 @@ mod tests { last_hops[0].fees.base_msat = 1000; // Revert to via 6 as the fee on 8 goes up - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 4); assert_eq!(route.paths[0][0].pubkey, nodes[1]); @@ -1184,7 +1184,7 @@ mod tests { assert_eq!(route.paths[0][3].channel_features.le_flags(), &Vec::::new()); // We can't learn any flags from invoices, sadly // ...but still use 8 for larger payments as 6 has a variable feerate - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 2000, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::>(), 2000, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 5); assert_eq!(route.paths[0][0].pubkey, nodes[1]); From 6df9129ace609bfb5c7f08ae0f41175126d05b1b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 30 Jul 2020 13:19:11 -0400 Subject: [PATCH 6/9] Use ln OutPoints not bitcoin ones in SpendableOutputDescriptors Lightning OutPoints only have 16 bits to express the output index instead of Bitcoin's 32 bits, implying that some outputs are possibly not expressible as lightning OutPoints. However, such OutPoints can never be hit within the lightning protocol, and must be on-chain spam sent by a third party wishing to donate us money. Still, in order to do so, the third party would need to fill nearly an entire block with garbage, so this case should be relatively safe. A new comment in channelmonitor explains the reasoning a bit further. --- lightning/src/chain/keysinterface.rs | 3 ++- lightning/src/ln/channelmonitor.rs | 22 ++++++++++++++++++---- lightning/src/ln/functional_tests.rs | 6 +++--- lightning/src/util/macro_logger.rs | 6 +++--- 4 files changed, 26 insertions(+), 11 deletions(-) diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index dbe0a8621ce..c23d23b1a3e 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -11,7 +11,7 @@ //! spendable on-chain outputs which the user owns and is responsible for using just as any other //! on-chain output which is theirs. -use bitcoin::blockdata::transaction::{Transaction, OutPoint, TxOut}; +use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; use bitcoin::network::constants::Network; @@ -31,6 +31,7 @@ use bitcoin::secp256k1; use util::byte_utils; use util::ser::{Writeable, Writer, Readable}; +use chain::transaction::OutPoint; use ln::chan_utils; use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys}; use ln::msgs::UnsignedChannelAnnouncement; diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index c743f647ccd..570982fb70c 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -2201,16 +2201,30 @@ impl ChannelMonitor { fn is_paying_spendable_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { let mut spendable_output = None; for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us + if i > ::std::u16::MAX as usize { + // While it is possible that an output exists on chain which is greater than the + // 2^16th output in a given transaction, this is only possible if the output is not + // in a lightning transaction and was instead placed there by some third party who + // wishes to give us money for no reason. + // Namely, any lightning transactions which we pre-sign will never have anywhere + // near 2^16 outputs both because such transactions must have ~2^16 outputs who's + // scripts are not longer than one byte in length and because they are inherently + // non-standard due to their size. + // Thus, it is completely safe to ignore such outputs, and while it may result in + // us ignoring non-lightning fund to us, that is only possible if someone fills + // nearly a full block with garbage just to hit this case. + continue; + } if outp.script_pubkey == self.destination_script { spendable_output = Some(SpendableOutputDescriptor::StaticOutput { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), }); break; } else if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script { if broadcasted_local_revokable_script.0 == outp.script_pubkey { spendable_output = Some(SpendableOutputDescriptor::DynamicOutputP2WSH { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, per_commitment_point: broadcasted_local_revokable_script.1, to_self_delay: self.on_local_tx_csv, output: outp.clone(), @@ -2221,14 +2235,14 @@ impl ChannelMonitor { } } else if self.remote_payment_script == outp.script_pubkey { spendable_output = Some(SpendableOutputDescriptor::StaticOutputRemotePayment { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), key_derivation_params: self.keys.key_derivation_params(), }); break; } else if outp.script_pubkey == self.shutdown_script { spendable_output = Some(SpendableOutputDescriptor::StaticOutput { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), }); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4b5453351b4..f44a8d59e09 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4672,7 +4672,7 @@ macro_rules! check_spendable_outputs { match *outp { SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, ref output, ref key_derivation_params } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), @@ -4700,7 +4700,7 @@ macro_rules! check_spendable_outputs { }, SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref remote_revocation_pubkey } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: *to_self_delay as u32, witness: Vec::new(), @@ -4733,7 +4733,7 @@ macro_rules! check_spendable_outputs { SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { let secp_ctx = Secp256k1::new(); let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 85484aceae3..e565c19ccab 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -136,13 +136,13 @@ impl<'a> std::fmt::Display for DebugSpendable<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self.0 { &SpendableOutputDescriptor::StaticOutput { ref outpoint, .. } => { - write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.index)?; } &SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, .. } => { - write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.index)?; } &SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, .. } => { - write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.index)?; } } Ok(()) From 2ff4ae782eccb84e09f86e7d37c426e6153ea961 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 7 Aug 2020 16:27:26 -0400 Subject: [PATCH 7/9] Give ChannelManagerReadArgs HashMap-of-monitors ownership Its somewhat awkward that ChannelManagerReadArgs requires a mutable reference to a HashMap of ChannelMonitors, forcing the callsite to define a scope for the HashMap which they almost certainly won't use after deserializing the ChannelManager. Worse, to map the current version to C bindings, we'd need to also create a HashMap binding, which is overkill for just this one use. Instead, we just give the ReadArgs struct ownership of the HashMap and add a constructor which fills the HashMap for you. --- fuzz/src/chanmon_consistency.rs | 2 +- lightning/src/ln/channelmanager.rs | 24 +++++++++++++++++++++-- lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/ln/functional_tests.rs | 12 ++++++------ 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 0fc77c58f56..609ce996a18 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -242,7 +242,7 @@ pub fn do_test(data: &[u8], out: Out) { tx_broadcaster: broadcast.clone(), logger, default_config: config, - channel_monitors: &mut monitor_refs, + channel_monitors: monitor_refs, }; (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 83746d01a94..dfa12e1c336 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3775,7 +3775,27 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: /// /// In such cases the latest local transactions will be sent to the tx_broadcaster included in /// this struct. - pub channel_monitors: &'a mut HashMap>, + pub channel_monitors: HashMap>, +} + +impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L> + where M::Target: ManyChannelMonitor, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor + /// HashMap for you. This is primarily useful for C bindings where it is not practical to + /// populate a HashMap directly from C. + pub fn new(keys_manager: K, fee_estimator: F, monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig, + mut channel_monitors: Vec<&'a mut ChannelMonitor>) -> Self { + Self { + keys_manager, fee_estimator, monitor, tx_broadcaster, logger, default_config, + channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() + } + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the @@ -3802,7 +3822,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result { let _ver: u8 = Readable::read(reader)?; let min_ver: u8 = Readable::read(reader)?; if min_ver > SERIALIZATION_VERSION { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 90421209559..b3d5e8c9e9d 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -169,7 +169,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { monitor: self.chan_monitor, tx_broadcaster: self.tx_broadcaster.clone(), logger: &test_utils::TestLogger::new(), - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap(); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f44a8d59e09..9087b17a819 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4318,7 +4318,7 @@ fn test_no_txn_manager_serialize_deserialize() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4426,7 +4426,7 @@ fn test_manager_serialize_deserialize_events() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4516,7 +4516,7 @@ fn test_simple_manager_serialize_deserialize() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4606,7 +4606,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -4620,7 +4620,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -7891,7 +7891,7 @@ fn test_data_loss_protect() { logger: &logger, tx_broadcaster: &tx_broadcaster, default_config: UserConfig::default(), - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap().1 }; nodes[0].node = &node_state_0; From c6bae1fdb023c4982cc01f53507b3bc1b63dbac6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 13 Aug 2020 14:45:34 -0400 Subject: [PATCH 8/9] Rename TxCreationKeys::new to not conflict w/ auto-gen'd C bindings The C bindings automatically create a _new() function for structs which contain only pub fields which we know how to map. This conflicts with the actual TxCreationKeys::new() function, so we simply rename it to capture its nature as a derivation function. --- lightning/src/ln/chan_utils.rs | 2 +- lightning/src/ln/channel.rs | 6 +++--- lightning/src/ln/functional_tests.rs | 2 +- lightning/src/ln/onchaintx.rs | 4 ++-- lightning/src/util/enforcing_trait_impls.rs | 12 ++++++------ 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 6c7ed11139b..998ac9cdcde 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -358,7 +358,7 @@ impl_writeable!(ChannelPublicKeys, 33*5, { impl TxCreationKeys { /// Create a new TxCreationKeys from channel base points and the per-commitment point - pub fn new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { + pub fn derive_new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { Ok(TxCreationKeys { per_commitment_point: per_commitment_point.clone(), revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9d4ccac896c..5e9c07e1d0e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1126,7 +1126,7 @@ impl Channel { let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint; let their_pubkeys = self.their_pubkeys.as_ref().unwrap(); - Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned())) + Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned())) } #[inline] @@ -1140,7 +1140,7 @@ impl Channel { let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint; let their_pubkeys = self.their_pubkeys.as_ref().unwrap(); - Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned())) + Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned())) } /// Gets the redeemscript for the funding transaction output (ie the funding transaction output @@ -4674,7 +4674,7 @@ mod tests { let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); let htlc_basepoint = &chan.local_keys.pubkeys().htlc_basepoint; - let keys = TxCreationKeys::new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap(); + let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap(); chan.their_pubkeys = Some(their_pubkeys); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9087b17a819..2bf73f5646a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1636,7 +1636,7 @@ fn test_fee_spike_violation_fails_htlc() { // Assemble the set of keys we can use for signatures for our commitment_signed message. let commitment_secret = SecretKey::from_slice(&remote_secret1).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &commitment_secret); - let commit_tx_keys = chan_utils::TxCreationKeys::new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint, + let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint, &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); // Build the remote commitment transaction so we can sign it, and then later use the diff --git a/lightning/src/ln/onchaintx.rs b/lightning/src/ln/onchaintx.rs index e21e8fb2ddf..37c7f3964a2 100644 --- a/lightning/src/ln/onchaintx.rs +++ b/lightning/src/ln/onchaintx.rs @@ -583,7 +583,7 @@ impl OnchainTxHandler { for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() { match per_outp_material { &InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_remote_tx_csv } => { - if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { + if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { let witness_script = if let Some(ref htlc) = *htlc { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key) @@ -607,7 +607,7 @@ impl OnchainTxHandler { } }, &InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc } => { - if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { + if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key); if !preimage.is_some() { bumped_tx.lock_time = htlc.cltv_expiry }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation diff --git a/lightning/src/util/enforcing_trait_impls.rs b/lightning/src/util/enforcing_trait_impls.rs index 0d20f7ad422..2127cc1a4da 100644 --- a/lightning/src/util/enforcing_trait_impls.rs +++ b/lightning/src/util/enforcing_trait_impls.rs @@ -46,12 +46,12 @@ impl EnforcingChannelKeys { keys: &TxCreationKeys) { let remote_points = self.inner.remote_pubkeys(); - let keys_expected = TxCreationKeys::new(secp_ctx, - &keys.per_commitment_point, - &remote_points.delayed_payment_basepoint, - &remote_points.htlc_basepoint, - &self.inner.pubkeys().revocation_basepoint, - &self.inner.pubkeys().htlc_basepoint).unwrap(); + let keys_expected = TxCreationKeys::derive_new(secp_ctx, + &keys.per_commitment_point, + &remote_points.delayed_payment_basepoint, + &remote_points.htlc_basepoint, + &self.inner.pubkeys().revocation_basepoint, + &self.inner.pubkeys().htlc_basepoint).unwrap(); if keys != &keys_expected { panic!("derived different per-tx keys") } } } From d224c1def44f167afd711515484207f2f4966d2d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 24 Aug 2020 14:14:05 -0400 Subject: [PATCH 9/9] Add a C-bindings-compatible read lock type for NetworkGraph In order to calculate a route, it is likely that users need to take a read()-lock on NetGraphMsgHandler::network_graph. This is not possible naively from C bindings, as Rust's native RwLock is not exposed. Thus, we provide a simple wrapper around the RwLockReadGuard and expose simple accessor methods. --- lightning/src/routing/network_graph.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 44f2ed237bf..c46781d11b5 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -27,7 +27,7 @@ use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; use std::{cmp, fmt}; -use std::sync::RwLock; +use std::sync::{RwLock, RwLockReadGuard}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::BTreeMap; use std::collections::btree_map::Entry as BtreeEntry; @@ -41,6 +41,11 @@ pub struct NetworkGraph { nodes: BTreeMap, } +/// A simple newtype for RwLockReadGuard<'a, NetworkGraph>. +/// This exists only to make accessing a RwLock possible from +/// the C bindings, as it can be done directly in Rust code. +pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>); + /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. @@ -85,6 +90,21 @@ impl NetGraphMsgHandler where C::Target: ChainWatchInt logger, } } + + /// Take a read lock on the network_graph and return it in the C-bindings + /// newtype helper. This is likely only useful when called via the C + /// bindings as you can call `self.network_graph.read().unwrap()` in Rust + /// yourself. + pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { + LockedNetworkGraph(self.network_graph.read().unwrap()) + } +} + +impl<'a> LockedNetworkGraph<'a> { + /// Get a reference to the NetworkGraph which this read-lock contains. + pub fn graph(&self) -> &NetworkGraph { + &*self.0 + } }