Skip to content

fix memory leak cherry pick #9893

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
54f6b16
macaroons: remove context.TODO() in tests
ellemouton Apr 7, 2025
76d2bec
kvdb/etcd: remove context.TODO() from test helpers
ellemouton Apr 7, 2025
4fca1f3
lnd: pass context to `newServer` and `server.Start`
ellemouton Apr 7, 2025
e2c184f
discovery: thread context through to gossiper
ellemouton Apr 7, 2025
5430157
discovery: pass context through to reliable sender
ellemouton Apr 7, 2025
a1a7d77
discovery: thread contexts to syncer
ellemouton Apr 7, 2025
1a5821a
discovery: thread contexts through sync manager
ellemouton Apr 7, 2025
1a8e758
discovery: pass context to ProcessRemoteAnnouncement
ellemouton Apr 7, 2025
6b95b79
discovery: pass context through to bootstrapper SampleNodeAddrs
ellemouton Apr 8, 2025
f2fb482
discovery: remove unnecessary context.Background() calls
ellemouton Apr 9, 2025
0ab61e0
discovery: listen on ctx in any select
ellemouton Apr 9, 2025
1b26b59
graph/db: test clean-up
ellemouton Apr 5, 2025
29ce762
graph/db: remove kvdb param from test helper
ellemouton Apr 5, 2025
861606d
graph/db: remove kvdb.Backend from test helpers
ellemouton Apr 5, 2025
ca52834
graph/db: use only exported KVStore ForEachNode method in tests
ellemouton Apr 9, 2025
cd4a590
autopilot: start threading contexts through
ellemouton Apr 9, 2025
46f09ba
autopilot: continue threading context
ellemouton Apr 9, 2025
1507a7f
autopilot: update AttachmentHeuristics with context
ellemouton Apr 9, 2025
9c3c2b9
graph/db: remove unused Wipe method
ellemouton Mar 29, 2025
c15740b
graph/db: introduce ForEachSourceNodeChannel
ellemouton Mar 26, 2025
9aab68a
graph/db: unexport various methods that expose `kvdb.RTx`
ellemouton Mar 27, 2025
de11997
graph/db: use only exported KVStore methods in tests
ellemouton Apr 5, 2025
2f99706
multi: remove kvdb.RTx from ForEachNodeChannel
ellemouton Mar 26, 2025
6202597
discovery: revert passing ctx through to Start methods
ellemouton Apr 11, 2025
fe3a862
autopilot: revert passing ctx to Start methods
ellemouton Apr 11, 2025
af47604
fn: add comment to context create fn
ziggie1984 Jun 3, 2025
45ebb9b
discovery: add comments to the ctx creation
ziggie1984 Jun 3, 2025
7477a91
docs: add release-notes 19.1
ziggie1984 Jun 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
discovery: remove unnecessary context.Background() calls
  • Loading branch information
ellemouton authored and ziggie1984 committed Jun 4, 2025
commit f2fb4827c741fd811c70eb76ce81bed14b8e0884
33 changes: 13 additions & 20 deletions discovery/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ func (g *GossipSyncer) channelGraphSyncer(ctx context.Context) {
// First, we'll attempt to continue our channel
// synchronization by continuing to send off another
// query chunk.
done := g.synchronizeChanIDs()
done := g.synchronizeChanIDs(ctx)

// If this wasn't our last query, then we'll need to
// transition to our waiting state.
Expand Down Expand Up @@ -596,7 +596,7 @@ func (g *GossipSyncer) channelGraphSyncer(ctx context.Context) {
syncType.IsActiveSync() {

err := g.sendGossipTimestampRange(
time.Now(), math.MaxUint32,
ctx, time.Now(), math.MaxUint32,
)
if err != nil {
log.Errorf("Unable to send update "+
Expand All @@ -616,7 +616,7 @@ func (g *GossipSyncer) channelGraphSyncer(ctx context.Context) {
case syncerIdle:
select {
case req := <-g.syncTransitionReqs:
req.errChan <- g.handleSyncTransition(req)
req.errChan <- g.handleSyncTransition(ctx, req)

case req := <-g.historicalSyncReqs:
g.handleHistoricalSync(req)
Expand Down Expand Up @@ -662,8 +662,8 @@ func (g *GossipSyncer) replyHandler(ctx context.Context) {

// sendGossipTimestampRange constructs and sets a GossipTimestampRange for the
// syncer and sends it to the remote peer.
func (g *GossipSyncer) sendGossipTimestampRange(firstTimestamp time.Time,
timestampRange uint32) error {
func (g *GossipSyncer) sendGossipTimestampRange(ctx context.Context,
firstTimestamp time.Time, timestampRange uint32) error {

endTimestamp := firstTimestamp.Add(
time.Duration(timestampRange) * time.Second,
Expand All @@ -678,7 +678,6 @@ func (g *GossipSyncer) sendGossipTimestampRange(firstTimestamp time.Time,
TimestampRange: timestampRange,
}

ctx, _ := g.cg.Create(context.Background())
if err := g.cfg.sendToPeer(ctx, localUpdateHorizon); err != nil {
return err
}
Expand All @@ -698,7 +697,7 @@ func (g *GossipSyncer) sendGossipTimestampRange(firstTimestamp time.Time,
// been queried for with a response received. We'll chunk our requests as
// required to ensure they fit into a single message. We may re-renter this
// state in the case that chunking is required.
func (g *GossipSyncer) synchronizeChanIDs() bool {
func (g *GossipSyncer) synchronizeChanIDs(ctx context.Context) bool {
// If we're in this state yet there are no more new channels to query
// for, then we'll transition to our final synced state and return true
// to signal that we're fully synchronized.
Expand Down Expand Up @@ -735,7 +734,6 @@ func (g *GossipSyncer) synchronizeChanIDs() bool {

// With our chunk obtained, we'll send over our next query, then return
// false indicating that we're net yet fully synced.
ctx, _ := g.cg.Create(context.Background())
err := g.cfg.sendToPeer(ctx, &lnwire.QueryShortChanIDs{
ChainHash: g.cfg.chainHash,
EncodingType: lnwire.EncodingSortedPlain,
Expand Down Expand Up @@ -1037,7 +1035,7 @@ func (g *GossipSyncer) replyPeerQueries(ctx context.Context,
// meet the channel range, then chunk our responses to the remote node. We also
// ensure that our final fragment carries the "complete" bit to indicate the
// end of our streaming response.
func (g *GossipSyncer) replyChanRangeQuery(_ context.Context,
func (g *GossipSyncer) replyChanRangeQuery(ctx context.Context,
query *lnwire.QueryChannelRange) error {

// Before responding, we'll check to ensure that the remote peer is
Expand All @@ -1049,8 +1047,6 @@ func (g *GossipSyncer) replyChanRangeQuery(_ context.Context,
"chain=%v, we're on chain=%v", query.ChainHash,
g.cfg.chainHash)

ctx, _ := g.cg.Create(context.Background())

return g.cfg.sendToPeerSync(ctx, &lnwire.ReplyChannelRange{
ChainHash: query.ChainHash,
FirstBlockHeight: query.FirstBlockHeight,
Expand Down Expand Up @@ -1124,8 +1120,6 @@ func (g *GossipSyncer) replyChanRangeQuery(_ context.Context,
)
}

ctx, _ := g.cg.Create(context.Background())

return g.cfg.sendToPeerSync(ctx, &lnwire.ReplyChannelRange{
ChainHash: query.ChainHash,
NumBlocks: numBlocks,
Expand Down Expand Up @@ -1263,7 +1257,6 @@ func (g *GossipSyncer) replyShortChanIDs(ctx context.Context,
// each one individually and synchronously to throttle the sends and
// perform buffering of responses in the syncer as opposed to the peer.
for _, msg := range replyMsgs {
ctx, _ := g.cg.Create(context.Background())
err := g.cfg.sendToPeerSync(ctx, msg)
if err != nil {
return err
Expand All @@ -1281,7 +1274,7 @@ func (g *GossipSyncer) replyShortChanIDs(ctx context.Context,
// ApplyGossipFilter applies a gossiper filter sent by the remote node to the
// state machine. Once applied, we'll ensure that we don't forward any messages
// to the peer that aren't within the time range of the filter.
func (g *GossipSyncer) ApplyGossipFilter(_ context.Context,
func (g *GossipSyncer) ApplyGossipFilter(ctx context.Context,
filter *lnwire.GossipTimestampRange) error {

g.Lock()
Expand Down Expand Up @@ -1340,7 +1333,6 @@ func (g *GossipSyncer) ApplyGossipFilter(_ context.Context,
defer returnSema()

for _, msg := range newUpdatestoSend {
ctx, _ := g.cg.Create(context.Background())
err := g.cfg.sendToPeerSync(ctx, msg)
switch {
case err == ErrGossipSyncerExiting:
Expand All @@ -1362,7 +1354,7 @@ func (g *GossipSyncer) ApplyGossipFilter(_ context.Context,
// FilterGossipMsgs takes a set of gossip messages, and only send it to a peer
// iff the message is within the bounds of their set gossip filter. If the peer
// doesn't have a gossip filter set, then no messages will be forwarded.
func (g *GossipSyncer) FilterGossipMsgs(_ context.Context,
func (g *GossipSyncer) FilterGossipMsgs(ctx context.Context,
msgs ...msgWithSenders) {

// If the peer doesn't have an update horizon set, then we won't send
Expand Down Expand Up @@ -1485,7 +1477,6 @@ func (g *GossipSyncer) FilterGossipMsgs(_ context.Context,
return
}

ctx, _ := g.cg.Create(context.Background())
if err = g.cfg.sendToPeer(ctx, msgsToSend...); err != nil {
log.Errorf("unable to send gossip msgs: %v", err)
}
Expand Down Expand Up @@ -1586,7 +1577,9 @@ func (g *GossipSyncer) ProcessSyncTransition(newSyncType SyncerType) error {
//
// NOTE: The gossip syncer might have another sync state as a result of this
// transition.
func (g *GossipSyncer) handleSyncTransition(req *syncTransitionReq) error {
func (g *GossipSyncer) handleSyncTransition(ctx context.Context,
req *syncTransitionReq) error {

// Return early from any NOP sync transitions.
syncType := g.SyncType()
if syncType == req.newSyncType {
Expand Down Expand Up @@ -1621,7 +1614,7 @@ func (g *GossipSyncer) handleSyncTransition(req *syncTransitionReq) error {
req.newSyncType)
}

err := g.sendGossipTimestampRange(firstTimestamp, timestampRange)
err := g.sendGossipTimestampRange(ctx, firstTimestamp, timestampRange)
if err != nil {
return fmt.Errorf("unable to send local update horizon: %w",
err)
Expand Down
4 changes: 2 additions & 2 deletions discovery/syncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1495,7 +1495,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) {

for i := 0; i < chunkSize*2; i += 2 {
// With our set up complete, we'll request a sync of chan ID's.
done := syncer.synchronizeChanIDs()
done := syncer.synchronizeChanIDs(context.Background())

// At this point, we shouldn't yet be done as only 2 items
// should have been queried for.
Expand Down Expand Up @@ -1542,7 +1542,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) {
}

// If we issue another query, the syncer should tell us that it's done.
done := syncer.synchronizeChanIDs()
done := syncer.synchronizeChanIDs(context.Background())
if done {
t.Fatalf("syncer should be finished!")
}
Expand Down