Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 41 additions & 3 deletions channeldb/graph.go
Original file line number Diff line number Diff line change
Expand Up @@ -2143,6 +2143,21 @@ func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
zombieIndex, scid,
)

// TODO(ziggie): Make sure that for the strict
// pruning case we compare the pubkeys and
// whether the right timestamp is not older than
// the `ChannelPruneExpiry`.
//
// NOTE: The timestamp data has no verification
// attached to it in the `ReplyChannelRange` msg
// so we are trusting this data at this point.
// However it is not critical because we are
// just removing the channel from the db when
// the timestamps are more recent. During the
// querying of the gossip msg verification
// happens as usual.
// However we should start punishing peers when
// they don't provide us honest data ?
Comment on lines +2159 to +2160
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah ideally but it is tricky because:

  1. we query them by channel ID so it could be that by the time they reply, they have a new channel update and so will send that new one & so timestamp will not match (but at least should be greater than old one).
  2. Where we currently send this query and where we receive the requested messages is completely disjoint at the moment. So will require some coupling between this and the gossiper.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could do strict validation for every p2p message. In some cases it's useful, in some cases it's not. I'm not totally convinced that we should add logic for this unless not doing so presents a real problem.

isStillZombie := isZombieChan(
info.Node1UpdateTimestamp,
info.Node2UpdateTimestamp,
Expand Down Expand Up @@ -2211,6 +2226,29 @@ type ChannelUpdateInfo struct {
Node2UpdateTimestamp time.Time
}

// NewChannelUpdateInfo is a constructor which makes sure we initialize the
// timestamps with zero seconds unix timestamp which equals
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
node2Timestamp time.Time) ChannelUpdateInfo {

chanInfo := ChannelUpdateInfo{
ShortChannelID: scid,
Node1UpdateTimestamp: node1Timestamp,
Node2UpdateTimestamp: node2Timestamp,
}

if node1Timestamp.IsZero() {
chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
}

if node2Timestamp.IsZero() {
chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
}

return chanInfo
}

// BlockChannelRange represents a range of channels for a given block height.
type BlockChannelRange struct {
// Height is the height of the block all of the channels below were
Expand Down Expand Up @@ -2284,9 +2322,9 @@ func (c *ChannelGraph) FilterChannelRange(startHeight,
rawCid := byteOrder.Uint64(k)
cid := lnwire.NewShortChanIDFromInt(rawCid)

chanInfo := ChannelUpdateInfo{
ShortChannelID: cid,
}
chanInfo := NewChannelUpdateInfo(
cid, time.Time{}, time.Time{},
)

if !withTimestamps {
channelsPerBlock[cid.BlockHeight] = append(
Expand Down
68 changes: 39 additions & 29 deletions channeldb/graph_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1980,9 +1980,9 @@ func TestFilterKnownChanIDs(t *testing.T) {
t.Fatalf("unable to create channel edge: %v", err)
}

chanIDs = append(chanIDs, ChannelUpdateInfo{
ShortChannelID: chanID,
})
chanIDs = append(chanIDs, NewChannelUpdateInfo(
chanID, time.Time{}, time.Time{},
))
}

const numZombies = 5
Expand Down Expand Up @@ -2024,20 +2024,28 @@ func TestFilterKnownChanIDs(t *testing.T) {
// should get the same set back.
{
queryIDs: []ChannelUpdateInfo{
{ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 99,
}},
{ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 100,
}},
{
ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 99,
},
},
{
ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 100,
},
},
},
resp: []ChannelUpdateInfo{
{ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 99,
}},
{ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 100,
}},
{
ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 99,
},
},
{
ShortChannelID: lnwire.ShortChannelID{
BlockHeight: 100,
},
},
},
},

Expand Down Expand Up @@ -2419,7 +2427,7 @@ func TestFilterChannelRange(t *testing.T) {
)
)

updateTimeSeed := int64(1)
updateTimeSeed := time.Now().Unix()
maybeAddPolicy := func(chanID uint64, node *LightningNode,
node2 bool) time.Time {

Expand All @@ -2428,7 +2436,7 @@ func TestFilterChannelRange(t *testing.T) {
chanFlags = lnwire.ChanUpdateDirection
}

var updateTime time.Time
var updateTime = time.Unix(0, 0)
if rand.Int31n(2) == 0 {
updateTime = time.Unix(updateTimeSeed, 0)
err = graph.UpdateEdgePolicy(&models.ChannelEdgePolicy{
Expand Down Expand Up @@ -2456,11 +2464,16 @@ func TestFilterChannelRange(t *testing.T) {
)
require.NoError(t, graph.AddChannelEdge(&channel2))

chanInfo1 := NewChannelUpdateInfo(
chanID1, time.Time{}, time.Time{},
)
chanInfo2 := NewChannelUpdateInfo(
chanID2, time.Time{}, time.Time{},
)
channelRanges = append(channelRanges, BlockChannelRange{
Height: chanHeight,
Channels: []ChannelUpdateInfo{
{ShortChannelID: chanID1},
{ShortChannelID: chanID2},
chanInfo1, chanInfo2,
},
})

Expand All @@ -2471,20 +2484,17 @@ func TestFilterChannelRange(t *testing.T) {
time4 = maybeAddPolicy(channel2.ChannelID, node2, true)
)

chanInfo1 = NewChannelUpdateInfo(
chanID1, time1, time2,
)
chanInfo2 = NewChannelUpdateInfo(
chanID2, time3, time4,
)
channelRangesWithTimestamps = append(
channelRangesWithTimestamps, BlockChannelRange{
Height: chanHeight,
Channels: []ChannelUpdateInfo{
{
ShortChannelID: chanID1,
Node1UpdateTimestamp: time1,
Node2UpdateTimestamp: time2,
},
{
ShortChannelID: chanID2,
Node1UpdateTimestamp: time3,
Node2UpdateTimestamp: time4,
},
chanInfo1, chanInfo2,
},
},
)
Expand Down
16 changes: 16 additions & 0 deletions discovery/gossiper.go
Original file line number Diff line number Diff line change
Expand Up @@ -2745,6 +2745,22 @@ func (d *AuthenticatedGossiper) handleChanUpdate(nMsg *networkMsg,
return nil, true
}

// Check that the ChanUpdate is not too far into the future, this could
// reveal some faulty implementation therefore we log an error.
if time.Until(timestamp) > graph.DefaultChannelPruneExpiry {
log.Errorf("Skewed timestamp (%v) for edge policy of "+
"short_chan_id(%v), timestamp too far in the future: "+
"peer=%v, msg=%s, is_remote=%v", timestamp.Unix(),
shortChanID, nMsg.peer, nMsg.msg.MsgType(),
nMsg.isRemote,
)

nMsg.err <- fmt.Errorf("skewed timestamp of edge policy, "+
"timestamp too far in the future: %v", timestamp.Unix())

return nil, false
}

// Get the node pub key as far since we don't have it in the channel
// update announcement message. We'll need this to properly verify the
// message's signature.
Expand Down
2 changes: 1 addition & 1 deletion discovery/sync_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ func (m *SyncManager) removeGossipSyncer(peer route.Vertex) {
return
}

log.Debugf("Replaced active GossipSyncer(%x) with GossipSyncer(%x)",
log.Debugf("Replaced active GossipSyncer(%v) with GossipSyncer(%x)",
peer, newActiveSyncer.cfg.peerPub)
}

Expand Down
43 changes: 40 additions & 3 deletions discovery/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/graph"
"github.com/lightningnetwork/lnd/lnpeer"
"github.com/lightningnetwork/lnd/lnwire"
"golang.org/x/time/rate"
Expand Down Expand Up @@ -787,6 +788,16 @@ func isLegacyReplyChannelRange(query *lnwire.QueryChannelRange,
// reply to the initial range query to discover new channels that it didn't
// previously know of.
func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) error {
// isStale returns whether the timestamp is too far into the past.
isStale := func(timestamp time.Time) bool {
return time.Since(timestamp) > graph.DefaultChannelPruneExpiry
}

// isSkewed returns whether the timestamp is too far into the future.
isSkewed := func(timestamp time.Time) bool {
return time.Until(timestamp) > graph.DefaultChannelPruneExpiry
}

// If we're not communicating with a legacy node, we'll apply some
// further constraints on their reply to ensure it satisfies our query.
if !isLegacyReplyChannelRange(g.curQueryRangeMsg, msg) {
Expand Down Expand Up @@ -838,16 +849,42 @@ func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) erro
}

for i, scid := range msg.ShortChanIDs {
info := channeldb.ChannelUpdateInfo{
ShortChannelID: scid,
}
info := channeldb.NewChannelUpdateInfo(
scid, time.Time{}, time.Time{},
)

if len(msg.Timestamps) != 0 {
t1 := time.Unix(int64(msg.Timestamps[i].Timestamp1), 0)
info.Node1UpdateTimestamp = t1

t2 := time.Unix(int64(msg.Timestamps[i].Timestamp2), 0)
info.Node2UpdateTimestamp = t2

// Sort out all channels with outdated or skewed
// timestamps. Both timestamps need to be out of
// boundaries for us to skip the channel and not query
// it later on.
switch {
case isStale(info.Node1UpdateTimestamp) &&
isStale(info.Node2UpdateTimestamp):

continue

case isSkewed(info.Node1UpdateTimestamp) &&
isSkewed(info.Node2UpdateTimestamp):

continue

case isStale(info.Node1UpdateTimestamp) &&
isSkewed(info.Node2UpdateTimestamp):

continue

case isStale(info.Node2UpdateTimestamp) &&
isSkewed(info.Node1UpdateTimestamp):

continue
}
}

g.bufferedChanRangeReplies = append(
Expand Down
Loading