diff --git a/discovery/gossiper.go b/discovery/gossiper.go index 357068617f..caf5c4125c 100644 --- a/discovery/gossiper.go +++ b/discovery/gossiper.go @@ -256,6 +256,10 @@ type Config struct { // here? AnnSigner lnwallet.MessageSigner + // ScidCloser is an instance of gossipScidCloser that helps the + // gossiper cut down on spam channel announcements. + ScidCloser GossipScidCloser + // NumActiveSyncers is the number of peers for which we should have // active syncers with. After reaching NumActiveSyncers, any future // gossip syncers will be passive. @@ -434,6 +438,9 @@ type AuthenticatedGossiper struct { // ChannelAnnouncement for the channel is received. prematureChannelUpdates *lru.Cache[uint64, *cachedNetworkMsg] + // banman tracks our peer's ban status. + banman *banman + // networkMsgs is a channel that carries new network broadcasted // message from outside the gossiper service to be processed by the // networkHandler. @@ -512,6 +519,7 @@ func New(cfg Config, selfKeyDesc *keychain.KeyDescriptor) *AuthenticatedGossiper maxRejectedUpdates, ), chanUpdateRateLimiter: make(map[uint64][2]*rate.Limiter), + banman: newBanman(), } gossiper.syncMgr = newSyncManager(&SyncManagerCfg{ @@ -606,6 +614,8 @@ func (d *AuthenticatedGossiper) start() error { d.syncMgr.Start() + d.banman.start() + // Start receiving blocks in its dedicated goroutine. d.wg.Add(2) go d.syncBlockHeight() @@ -762,6 +772,8 @@ func (d *AuthenticatedGossiper) stop() { d.syncMgr.Stop() + d.banman.stop() + close(d.quit) d.wg.Wait() @@ -2438,6 +2450,29 @@ func (d *AuthenticatedGossiper) handleChanAnnouncement(nMsg *networkMsg, return nil, false } + // If this peer is banned and the channel announcement is for a channel + // that we are not a part of, ignore the announcement. + if nMsg.isRemote && d.isBanned(nMsg.peer.PubKey()) { + chanPeer, err := d.isChannelPeer(nMsg.peer.IdentityKey()) + if err != nil { + log.Errorf("failed to check if peer %x is a channel "+ + "peer: %v", nMsg.peer.PubKey(), err) + nMsg.err <- err + return nil, false + } + + if !chanPeer { + nMsg.peer.Disconnect(ErrPeerBanned) + } + + err = fmt.Errorf("ignored channel announcement for "+ + "channel=%v since peer is banned", scid) + log.Tracef(err.Error()) + + nMsg.err <- err + return nil, false + } + // If the advertised inclusionary block is beyond our knowledge of the // chain tip, then we'll ignore it for now. d.Lock() @@ -2488,6 +2523,23 @@ func (d *AuthenticatedGossiper) handleChanAnnouncement(nMsg *networkMsg, } } + // Check if the channel is already closed in which case we can ignore + // it. + closed, err := d.cfg.ScidCloser.IsClosedScid(scid) + if err != nil { + log.Errorf("failed to check if scid %v is closed: %v", scid, + err) + nMsg.err <- err + return nil, false + } + + if closed { + err = fmt.Errorf("ignoring closed channel %v", scid) + log.Error(err) + nMsg.err <- err + return nil, false + } + // With the proof validated (if necessary), we can now store it within // the database for our path finding and syncing needs. var featureBuf bytes.Buffer @@ -2570,6 +2622,47 @@ func (d *AuthenticatedGossiper) handleChanAnnouncement(nMsg *networkMsg, nMsg.err <- nil return anns, true + } else if graph.IsError(err, graph.ErrNoFundingTransaction) || + graph.IsError(err, graph.ErrInvalidFundingOutput) { + + key := newRejectCacheKey( + scid.ToUint64(), + sourceToPub(nMsg.source), + ) + _, _ = d.recentRejects.Put(key, &cachedReject{}) + + // Increment the peer's ban score. We check isRemote + // so we don't accidentally ban ourselves in case of a + // bug. + if nMsg.isRemote { + d.banman.incrementBanScore(nMsg.peer.PubKey()) + } + } else if graph.IsError(err, graph.ErrChannelSpent) { + key := newRejectCacheKey( + scid.ToUint64(), + sourceToPub(nMsg.source), + ) + _, _ = d.recentRejects.Put(key, &cachedReject{}) + + // Since this channel has already been closed, we'll + // add it to the graph's closed channel index such that + // we won't attempt to do expensive validation checks + // on it again. + dbErr := d.cfg.ScidCloser.MarkClosedScid(scid) + if dbErr != nil { + log.Errorf("failed to mark scid(%v) as "+ + "closed: %v", scid, dbErr) + + nMsg.err <- dbErr + return nil, false + } + + // Increment the peer's ban score. We check isRemote + // so we don't accidentally ban ourselves in case of a + // bug. + if nMsg.isRemote { + d.banman.incrementBanScore(nMsg.peer.PubKey()) + } } else { // Otherwise, this is just a regular rejected edge. key := newRejectCacheKey( @@ -2579,6 +2672,31 @@ func (d *AuthenticatedGossiper) handleChanAnnouncement(nMsg *networkMsg, _, _ = d.recentRejects.Put(key, &cachedReject{}) } + if !nMsg.isRemote { + nMsg.err <- err + return nil, false + } + + // If the peer is banned, check if it's a channel peer. We do + // the isBanned check first to not incur the cost of a db + // lookup. + if d.isBanned(nMsg.peer.PubKey()) { + chanPeer, dbErr := d.isChannelPeer( + nMsg.peer.IdentityKey(), + ) + if dbErr != nil { + log.Errorf("failed to check if peer %x is a "+ + "channel peer: %v", nMsg.peer.PubKey(), + dbErr) + nMsg.err <- dbErr + return nil, false + } + + if !chanPeer { + nMsg.peer.Disconnect(ErrPeerBanned) + } + } + nMsg.err <- err return nil, false } @@ -3368,3 +3486,22 @@ func (d *AuthenticatedGossiper) handleAnnSig(nMsg *networkMsg, nMsg.err <- nil return announcements, true } + +// isBanned returns true if the peer identified by pubkey is banned for sending +// invalid channel announcements. +func (d *AuthenticatedGossiper) isBanned(pubkey [33]byte) bool { + return d.banman.isBanned(pubkey) +} + +// isChannelPeer returns true if the peer denoted by pubkey is one we have a +// channel peer. +func (d *AuthenticatedGossiper) isChannelPeer(pubkey *btcec.PublicKey) (bool, + error) { + + chanPeer, err := d.cfg.ScidCloser.IsChannelPeer(pubkey) + if err != nil { + return false, err + } + + return chanPeer, nil +} diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index 7cfc7bce8f..aef018c133 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -25,6 +25,7 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/models" + "github.com/lightningnetwork/lnd/fn" "github.com/lightningnetwork/lnd/graph" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/kvdb" @@ -90,12 +91,13 @@ func makeTestDB(t *testing.T) (*channeldb.DB, error) { type mockGraphSource struct { bestHeight uint32 - mu sync.Mutex - nodes []channeldb.LightningNode - infos map[uint64]models.ChannelEdgeInfo - edges map[uint64][]models.ChannelEdgePolicy - zombies map[uint64][][33]byte - chansToReject map[uint64]struct{} + mu sync.Mutex + nodes []channeldb.LightningNode + infos map[uint64]models.ChannelEdgeInfo + edges map[uint64][]models.ChannelEdgePolicy + zombies map[uint64][][33]byte + chansToReject map[uint64]struct{} + addEdgeErrCode fn.Option[graph.ErrorCode] } func newMockRouter(height uint32) *mockGraphSource { @@ -126,6 +128,12 @@ func (r *mockGraphSource) AddEdge(info *models.ChannelEdgeInfo, r.mu.Lock() defer r.mu.Unlock() + if r.addEdgeErrCode.IsSome() { + return graph.NewErrf( + r.addEdgeErrCode.UnsafeFromSome(), "received error", + ) + } + if _, ok := r.infos[info.ChannelID]; ok { return errors.New("info already exist") } @@ -138,6 +146,14 @@ func (r *mockGraphSource) AddEdge(info *models.ChannelEdgeInfo, return nil } +func (r *mockGraphSource) resetAddEdgeErrCode() { + r.addEdgeErrCode = fn.None[graph.ErrorCode]() +} + +func (r *mockGraphSource) setAddEdgeErrCode(code graph.ErrorCode) { + r.addEdgeErrCode = fn.Some[graph.ErrorCode](code) +} + func (r *mockGraphSource) queueValidationFail(chanID uint64) { r.mu.Lock() defer r.mu.Unlock() @@ -707,7 +723,9 @@ type testCtx struct { broadcastedMessage chan msgWithSenders } -func createTestCtx(t *testing.T, startHeight uint32) (*testCtx, error) { +func createTestCtx(t *testing.T, startHeight uint32, chanPeer bool) (*testCtx, + error) { + // Next we'll initialize an instance of the channel router with mock // versions of the chain and channel notifier. As we don't need to test // any p2p functionality, the peer send and switch send, @@ -765,7 +783,7 @@ func createTestCtx(t *testing.T, startHeight uint32) (*testCtx, error) { peerChan chan<- lnpeer.Peer) { pk, _ := btcec.ParsePubKey(target[:]) - peerChan <- &mockPeer{pk, nil, nil} + peerChan <- &mockPeer{pk, nil, nil, false} }, NotifyWhenOffline: func(_ [33]byte) <-chan struct{} { c := make(chan struct{}) @@ -803,6 +821,7 @@ func createTestCtx(t *testing.T, startHeight uint32) (*testCtx, error) { FindBaseByAlias: findBaseByAlias, GetAlias: getAlias, FindChannel: mockFindChannel, + ScidCloser: newMockScidCloser(chanPeer), }, selfKeyDesc) if err := gossiper.Start(); err != nil { @@ -831,7 +850,7 @@ func TestProcessAnnouncement(t *testing.T) { t.Parallel() timestamp := testTimestamp - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") assertSenderExistence := func(sender *btcec.PublicKey, msg msgWithSenders) { @@ -843,7 +862,7 @@ func TestProcessAnnouncement(t *testing.T) { } } - nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} // First, we'll craft a valid remote channel announcement and send it to // the gossiper so that it can be processed. @@ -947,13 +966,13 @@ func TestPrematureAnnouncement(t *testing.T) { timestamp := testTimestamp - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") _, err = createNodeAnnouncement(remoteKeyPriv1, timestamp) require.NoError(t, err, "can't create node announcement") - nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} // Pretending that we receive the valid channel announcement from // remote side, but block height of this announcement is greater than @@ -978,7 +997,7 @@ func TestPrematureAnnouncement(t *testing.T) { func TestSignatureAnnouncementLocalFirst(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") // Set up a channel that we can use to inspect the messages sent @@ -990,7 +1009,9 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { pk, _ := btcec.ParsePubKey(target[:]) select { - case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}: + case peerChan <- &mockPeer{ + pk, sentMsgs, ctx.gossiper.quit, false, + }: case <-ctx.gossiper.quit: } } @@ -1000,7 +1021,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} + remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit, false} // Recreate lightning network topology. Initialize router with channel // between two nodes. @@ -1150,7 +1171,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { func TestOrphanSignatureAnnouncement(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") // Set up a channel that we can use to inspect the messages sent @@ -1162,7 +1183,9 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { pk, _ := btcec.ParsePubKey(target[:]) select { - case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}: + case peerChan <- &mockPeer{ + pk, sentMsgs, ctx.gossiper.quit, false, + }: case <-ctx.gossiper.quit: } } @@ -1172,7 +1195,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} + remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit, false} // Pretending that we receive local channel announcement from funding // manager, thereby kick off the announcement exchange process, in @@ -1333,7 +1356,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") batch, err := createLocalAnnouncements(0) @@ -1344,7 +1367,9 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { // Set up a channel to intercept the messages sent to the remote peer. sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} + remotePeer := &mockPeer{ + remoteKey, sentToPeer, ctx.gossiper.quit, false, + } // Since the reliable send to the remote peer of the local channel proof // requires a notification when the peer comes online, we'll capture the @@ -1566,7 +1591,7 @@ out: func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") batch, err := createLocalAnnouncements(0) @@ -1578,7 +1603,9 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { // Set up a channel we can use to inspect messages sent by the // gossiper to the remote peer. sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} + remotePeer := &mockPeer{ + remoteKey, sentToPeer, ctx.gossiper.quit, false, + } // Override NotifyWhenOnline to return the remote peer which we expect // meesages to be sent to. @@ -1772,7 +1799,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { ca, err := createRemoteChannelAnnouncement(0) require.NoError(t, err, "can't create remote channel announcement") - nodePeer := &mockPeer{bitcoinKeyPub2, nil, nil} + nodePeer := &mockPeer{bitcoinKeyPub2, nil, nil, false} announcements.AddMsgs(networkMsg{ msg: ca, peer: nodePeer, @@ -2004,7 +2031,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { timestamp = 123456 ) - ctx, err := createTestCtx(t, startingHeight) + ctx, err := createTestCtx(t, startingHeight, false) require.NoError(t, err, "can't create context") // We'll start off by processing a channel announcement without a proof @@ -2058,7 +2085,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { // process it. remoteChanAnn, err := createRemoteChannelAnnouncement(startingHeight - 1) require.NoError(t, err, "unable to create remote channel announcement") - peer := &mockPeer{pubKey, nil, nil} + peer := &mockPeer{pubKey, nil, nil, false} select { case err := <-ctx.gossiper.ProcessRemoteAnnouncement(remoteChanAnn, peer): @@ -2103,7 +2130,7 @@ func TestRejectZombieEdge(t *testing.T) { // We'll start by creating our test context with a batch of // announcements. - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "unable to create test context") batch, err := createRemoteAnnouncements(0) @@ -2204,7 +2231,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) { // We'll start by creating our test context with a batch of // announcements. - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "unable to create test context") batch, err := createRemoteAnnouncements(0) @@ -2361,7 +2388,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) { func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") batch, err := createLocalAnnouncements(0) @@ -2373,7 +2400,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { // Set up a channel that we can use to inspect the messages sent // directly from the gossiper. sentMsgs := make(chan lnwire.Message, 10) - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} + remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit, false} // Override NotifyWhenOnline to return the remote peer which we expect // messages to be sent to. @@ -2558,10 +2585,10 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { func TestExtraDataChannelAnnouncementValidation(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") - remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} // We'll now create an announcement that contains an extra set of bytes // that we don't know of ourselves, but should still include in the @@ -2589,10 +2616,10 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { t.Parallel() timestamp := testTimestamp - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") - remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} // In this scenario, we'll create two announcements, one regular // channel announcement, and another channel update announcement, that @@ -2640,10 +2667,10 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { func TestExtraDataNodeAnnouncementValidation(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") - remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} timestamp := testTimestamp // We'll create a node announcement that includes a set of opaque data @@ -2708,7 +2735,7 @@ func assertProcessAnnouncement(t *testing.T, result chan error) { func TestRetransmit(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") batch, err := createLocalAnnouncements(0) @@ -2716,7 +2743,7 @@ func TestRetransmit(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, nil, nil} + remotePeer := &mockPeer{remoteKey, nil, nil, false} // Process a local channel announcement, channel update and node // announcement. No messages should be broadcasted yet, since no proof @@ -2814,7 +2841,7 @@ func TestRetransmit(t *testing.T) { func TestNodeAnnouncementNoChannels(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") batch, err := createRemoteAnnouncements(0) @@ -2822,7 +2849,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, nil, nil} + remotePeer := &mockPeer{remoteKey, nil, nil, false} // Process the remote node announcement. select { @@ -2899,14 +2926,14 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") processRemoteAnnouncement := ctx.gossiper.ProcessRemoteAnnouncement chanUpdateHeight := uint32(0) timestamp := uint32(123456) - nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} // In this scenario, we'll test whether the message flags field in a // channel update is properly handled. @@ -2998,7 +3025,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { // We'll start by creating our test context and a batch of // announcements. - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "unable to create test context") batch, err := createLocalAnnouncements(0) @@ -3013,7 +3040,9 @@ func TestSendChannelUpdateReliably(t *testing.T) { // Set up a channel we can use to inspect messages sent by the // gossiper to the remote peer. sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} + remotePeer := &mockPeer{ + remoteKey, sentToPeer, ctx.gossiper.quit, false, + } // Since we first wait to be notified of the peer before attempting to // send the message, we'll overwrite NotifyWhenOnline and @@ -3350,7 +3379,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) { // First, we'll make out test context and add 3 random channels to the // graph. startingHeight := uint32(10) - ctx, err := createTestCtx(t, startingHeight) + ctx, err := createTestCtx(t, startingHeight, false) require.NoError(t, err, "unable to create test context") const numChannels = 3 @@ -3367,7 +3396,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) { remoteKey := remoteKeyPriv1.PubKey() sentMsgs := make(chan lnwire.Message, 10) - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} + remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit, false} // The forced code path for sending the private ChannelUpdate to the // remote peer will be hit, forcing it to request a notification that @@ -3529,7 +3558,7 @@ func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) { // We'll start by creating our test context and a set of test channel // announcements. - ctx, err := createTestCtx(t, 0) + ctx, err := createTestCtx(t, 0, false) require.NoError(t, err, "unable to create test context") chanAnn1 := createAnnouncementWithoutProof( @@ -3590,7 +3619,7 @@ func assertMessage(t *testing.T, expected, got lnwire.Message) { func TestSplitAnnouncementsCorrectSubBatches(t *testing.T) { // Create our test harness. const blockHeight = 100 - ctx, err := createTestCtx(t, blockHeight) + ctx, err := createTestCtx(t, blockHeight, false) require.NoError(t, err, "can't create context") const subBatchSize = 10 @@ -3702,7 +3731,7 @@ func (m *SyncManager) markGraphSyncing() { func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, 10) + ctx, err := createTestCtx(t, 10, false) require.NoError(t, err, "can't create context") // We'll mark the graph as not synced. This should prevent us from @@ -3715,7 +3744,7 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { t.Helper() - nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} var errChan chan error if isRemote { errChan = ctx.gossiper.ProcessRemoteAnnouncement( @@ -3775,7 +3804,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { // Create our test harness. const blockHeight = 100 - ctx, err := createTestCtx(t, blockHeight) + ctx, err := createTestCtx(t, blockHeight, false) require.NoError(t, err, "can't create context") ctx.gossiper.cfg.RebroadcastInterval = time.Hour ctx.gossiper.cfg.MaxChannelUpdateBurst = 5 @@ -3791,7 +3820,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { batch, err := createRemoteAnnouncements(blockHeight) require.NoError(t, err) - nodePeer1 := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} + nodePeer1 := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} select { case err := <-ctx.gossiper.ProcessRemoteAnnouncement( batch.chanAnn, nodePeer1, @@ -3810,7 +3839,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { t.Fatal("remote announcement not processed") } - nodePeer2 := &mockPeer{remoteKeyPriv2.PubKey(), nil, nil} + nodePeer2 := &mockPeer{remoteKeyPriv2.PubKey(), nil, nil, false} select { case err := <-ctx.gossiper.ProcessRemoteAnnouncement( batch.chanUpdAnn2, nodePeer2, @@ -3921,7 +3950,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { func TestIgnoreOwnAnnouncement(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") batch, err := createLocalAnnouncements(0) @@ -3929,7 +3958,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, nil, nil} + remotePeer := &mockPeer{remoteKey, nil, nil, false} // Try to let the remote peer tell us about the channel we are part of. select { @@ -4065,7 +4094,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { func TestRejectCacheChannelAnn(t *testing.T) { t.Parallel() - ctx, err := createTestCtx(t, proofMatureDelta) + ctx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") // First, we create a channel announcement to send over to our test @@ -4075,7 +4104,7 @@ func TestRejectCacheChannelAnn(t *testing.T) { remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) require.NoError(t, err, "unable to parse pubkey") - remotePeer := &mockPeer{remoteKey, nil, nil} + remotePeer := &mockPeer{remoteKey, nil, nil, false} // Before sending over the announcement, we'll modify it such that we // know it will always fail. @@ -4139,3 +4168,135 @@ func TestFutureMsgCacheEviction(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 2, item.height, "should be the second item") } + +// TestChanAnnBanningNonChanPeer asserts that non-channel peers who send bogus +// channel announcements are banned properly. +func TestChanAnnBanningNonChanPeer(t *testing.T) { + t.Parallel() + + ctx, err := createTestCtx(t, 1000, false) + require.NoError(t, err, "can't create context") + + nodePeer1 := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, false} + nodePeer2 := &mockPeer{remoteKeyPriv2.PubKey(), nil, nil, false} + + ctx.router.setAddEdgeErrCode(graph.ErrInvalidFundingOutput) + + // Loop 100 times to get nodePeer banned. + for i := 0; i < 100; i++ { + // Craft a valid channel announcement for a channel we don't + // have. We will ensure that it fails validation by modifying + // the router. + ca, err := createRemoteChannelAnnouncement(uint32(i)) + require.NoError(t, err, "can't create channel announcement") + + select { + case err = <-ctx.gossiper.ProcessRemoteAnnouncement( + ca, nodePeer1, + ): + require.NotNil(t, err) + + case <-time.After(2 * time.Second): + t.Fatalf("remote announcement not processed") + } + } + + // The peer should be banned now. + require.True(t, ctx.gossiper.IsBanned(nodePeer1.PubKey())) + + // Assert that nodePeer has been disconnected. + require.True(t, nodePeer1.disconnected) + + // Reset the error code to show that a new announcement will be + // ignored since nodePeer1 is banned. + ctx.router.resetAddEdgeErrCode() + + ca, err := createRemoteChannelAnnouncement(101) + require.NoError(t, err, "can't create channel announcement") + + select { + case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer1): + require.NotNil(t, err) + + case <-time.After(2 * time.Second): + t.Fatalf("remote announcement not processed") + } + + // Set the error to ErrChannelSpent so that we can test that the + // gossiper ignores closed channels. + ctx.router.setAddEdgeErrCode(graph.ErrChannelSpent) + + select { + case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer2): + require.NotNil(t, err) + + case <-time.After(2 * time.Second): + t.Fatalf("remote announcement not processed") + } + + // Check that the announcement's scid is marked as closed. + isClosed, err := ctx.gossiper.cfg.ScidCloser.IsClosedScid( + ca.ShortChannelID, + ) + require.Nil(t, err) + require.True(t, isClosed) + + // Remove the scid from the reject cache. + key := newRejectCacheKey( + ca.ShortChannelID.ToUint64(), + sourceToPub(nodePeer2.IdentityKey()), + ) + + ctx.gossiper.recentRejects.Delete(key) + + // Reset the AddEdge error and pass the same announcement again. An + // error should be returned even though AddEdge won't fail. + ctx.router.resetAddEdgeErrCode() + + select { + case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer2): + require.NotNil(t, err) + + case <-time.After(2 * time.Second): + t.Fatalf("remote announcement not processed") + } +} + +// TestChanAnnBanningChanPeer asserts that channel peers that are banned don't +// get disconnected. +func TestChanAnnBanningChanPeer(t *testing.T) { + t.Parallel() + + ctx, err := createTestCtx(t, 1000, true) + require.NoError(t, err, "can't create context") + + nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil, true} + + ctx.router.setAddEdgeErrCode(graph.ErrInvalidFundingOutput) + + // Loop 100 times to get nodePeer banned. + for i := 0; i < 100; i++ { + // Craft a valid channel announcement for a channel we don't + // have. We will ensure that it fails validation by modifying + // the router. + ca, err := createRemoteChannelAnnouncement(uint32(i)) + require.NoError(t, err, "can't create channel announcement") + + select { + case err = <-ctx.gossiper.ProcessRemoteAnnouncement( + ca, nodePeer, + ): + require.NotNil(t, err) + + case <-time.After(2 * time.Second): + t.Fatalf("remote announcement not processed") + } + } + + // The peer should be banned now. + require.True(t, ctx.gossiper.IsBanned(nodePeer.PubKey())) + + // Assert that the peer wasn't disconnected. + require.True(t, nodePeer.disconnected) + +} diff --git a/discovery/mock_test.go b/discovery/mock_test.go index 94d336db0a..325d364dd9 100644 --- a/discovery/mock_test.go +++ b/discovery/mock_test.go @@ -160,3 +160,38 @@ func (s *mockMessageStore) MessagesForPeer(pubKey [33]byte) ([]lnwire.Message, e return msgs, nil } + +type mockScidCloser struct { + m map[lnwire.ShortChannelID]struct{} + channelPeer bool + + sync.Mutex +} + +func newMockScidCloser(channelPeer bool) *mockScidCloser { + return &mockScidCloser{ + m: make(map[lnwire.ShortChannelID]struct{}), + channelPeer: channelPeer, + } +} + +func (m *mockScidCloser) MarkClosedScid(scid lnwire.ShortChannelID) error { + m.Lock() + m.m[scid] = struct{}{} + m.Unlock() + return nil +} + +func (m *mockScidCloser) IsClosedScid(scid lnwire.ShortChannelID) (bool, + error) { + + m.Lock() + defer m.Unlock() + + _, ok := m.m[scid] + return ok, nil +} + +func (m *mockScidCloser) IsChannelPeer(pubkey *btcec.PublicKey) (bool, error) { + return m.channelPeer, nil +} diff --git a/discovery/reliable_sender_test.go b/discovery/reliable_sender_test.go index d1e69b11fb..0c129d1a6d 100644 --- a/discovery/reliable_sender_test.go +++ b/discovery/reliable_sender_test.go @@ -74,7 +74,7 @@ func TestReliableSenderFlow(t *testing.T) { // Create a mock peer to send the messages to. pubKey := randPubKey(t) msgsSent := make(chan lnwire.Message) - peer := &mockPeer{pubKey, msgsSent, reliableSender.quit} + peer := &mockPeer{pubKey, msgsSent, reliableSender.quit, false} // Override NotifyWhenOnline and NotifyWhenOffline to provide the // notification channels so that we can control when notifications get @@ -193,7 +193,7 @@ func TestReliableSenderStaleMessages(t *testing.T) { // Create a mock peer to send the messages to. pubKey := randPubKey(t) msgsSent := make(chan lnwire.Message) - peer := &mockPeer{pubKey, msgsSent, reliableSender.quit} + peer := &mockPeer{pubKey, msgsSent, reliableSender.quit, false} // Override NotifyWhenOnline to provide the notification channel so that // we can control when notifications get dispatched. diff --git a/server.go b/server.go index 555ee26274..e7cb1e63e4 100644 --- a/server.go +++ b/server.go @@ -1021,6 +1021,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr, return nil, err } + scidCloserMan := discovery.NewScidCloserMan(s.graphDB, s.chanStateDB) + s.authGossiper = discovery.New(discovery.Config{ Graph: s.graphBuilder, Notifier: s.cc.ChainNotifier, @@ -1058,6 +1060,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, GetAlias: s.aliasMgr.GetPeerAlias, FindChannel: s.findChannel, IsStillZombieChannel: s.graphBuilder.IsZombieChannel, + ScidCloser: scidCloserMan, }, nodeKeyDesc) //nolint:lll