diff --git a/autopilot/manager.go b/autopilot/manager.go index dba4cc6cc5..0463f98d99 100644 --- a/autopilot/manager.go +++ b/autopilot/manager.go @@ -6,7 +6,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/graph" + graphdb "github.com/lightningnetwork/lnd/graph/db" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" ) @@ -36,7 +36,7 @@ type ManagerCfg struct { // SubscribeTopology is used to get a subscription for topology changes // on the network. - SubscribeTopology func() (*graph.TopologyClient, error) + SubscribeTopology func() (*graphdb.TopologyClient, error) } // Manager is struct that manages an autopilot agent, making it possible to diff --git a/autopilot/prefattach_test.go b/autopilot/prefattach_test.go index 784d1a0f8c..f20c3a480b 100644 --- a/autopilot/prefattach_test.go +++ b/autopilot/prefattach_test.go @@ -46,9 +46,14 @@ func newDiskChanGraph(t *testing.T) (testGraph, error) { }) require.NoError(t, err) - graphDB, err := graphdb.NewChannelGraph(backend) + graphDB, err := graphdb.NewChannelGraph(&graphdb.Config{KVDB: backend}) require.NoError(t, err) + require.NoError(t, graphDB.Start()) + t.Cleanup(func() { + require.NoError(t, graphDB.Stop()) + }) + return &testDBGraph{ db: graphDB, databaseChannelGraph: databaseChannelGraph{ diff --git a/config_builder.go b/config_builder.go index 43c9e4a68f..f0b65f7130 100644 --- a/config_builder.go +++ b/config_builder.go @@ -1026,26 +1026,30 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( "instances") } - graphDBOptions := []graphdb.OptionModifier{ + graphDBOptions := []graphdb.KVStoreOptionModifier{ graphdb.WithRejectCacheSize(cfg.Caches.RejectCacheSize), graphdb.WithChannelCacheSize(cfg.Caches.ChannelCacheSize), graphdb.WithBatchCommitInterval(cfg.DB.BatchCommitInterval), + } + + chanGraphOpts := []graphdb.ChanGraphOption{ graphdb.WithUseGraphCache(!cfg.DB.NoGraphCache), } // We want to pre-allocate the channel graph cache according to what we // expect for mainnet to speed up memory allocation. if cfg.ActiveNetParams.Name == chaincfg.MainNetParams.Name { - graphDBOptions = append( - graphDBOptions, graphdb.WithPreAllocCacheNumNodes( + chanGraphOpts = append( + chanGraphOpts, graphdb.WithPreAllocCacheNumNodes( graphdb.DefaultPreAllocCacheNumNodes, ), ) } - dbs.GraphDB, err = graphdb.NewChannelGraph( - databaseBackends.GraphDB, graphDBOptions..., - ) + dbs.GraphDB, err = graphdb.NewChannelGraph(&graphdb.Config{ + KVDB: databaseBackends.GraphDB, + KVStoreOpts: graphDBOptions, + }, chanGraphOpts...) if err != nil { cleanUp() diff --git a/docs/release-notes/release-notes-0.20.0.md b/docs/release-notes/release-notes-0.20.0.md index 1ee2e05fab..1e528bc8da 100644 --- a/docs/release-notes/release-notes-0.20.0.md +++ b/docs/release-notes/release-notes-0.20.0.md @@ -69,6 +69,15 @@ ## Code Health +* Graph abstraction and refactoring work: + - Move the [graph cache out of the graph + CRUD](https://github.com/lightningnetwork/lnd/pull/9544) layer. + - Move [topology + subscription](https://github.com/lightningnetwork/lnd/pull/9577) and + notification handling from the graph.Builder to the ChannelGraph. + ## Tooling and Documentation # Contributors (Alphabetical Order) + +* Elle Mouton diff --git a/graph/builder.go b/graph/builder.go index 3e11155535..f92b523b00 100644 --- a/graph/builder.go +++ b/graph/builder.go @@ -109,8 +109,7 @@ type Builder struct { started atomic.Bool stopped atomic.Bool - ntfnClientCounter atomic.Uint64 - bestHeight atomic.Uint32 + bestHeight atomic.Uint32 cfg *Config @@ -123,22 +122,6 @@ type Builder struct { // of our currently known best chain are sent over. staleBlocks <-chan *chainview.FilteredBlock - // topologyUpdates is a channel that carries new topology updates - // messages from outside the Builder to be processed by the - // networkHandler. - topologyUpdates chan any - - // topologyClients maps a client's unique notification ID to a - // topologyClient client that contains its notification dispatch - // channel. - topologyClients *lnutils.SyncMap[uint64, *topologyClient] - - // ntfnClientUpdates is a channel that's used to send new updates to - // topology notification clients to the Builder. Updates either - // add a new notification client, or cancel notifications for an - // existing client. - ntfnClientUpdates chan *topologyClientUpdate - // channelEdgeMtx is a mutex we use to make sure we process only one // ChannelEdgePolicy at a time for a given channelID, to ensure // consistency between the various database accesses. @@ -163,14 +146,11 @@ var _ ChannelGraphSource = (*Builder)(nil) // NewBuilder constructs a new Builder. func NewBuilder(cfg *Config) (*Builder, error) { return &Builder{ - cfg: cfg, - topologyUpdates: make(chan any), - topologyClients: &lnutils.SyncMap[uint64, *topologyClient]{}, - ntfnClientUpdates: make(chan *topologyClientUpdate), - channelEdgeMtx: multimutex.NewMutex[uint64](), - statTicker: ticker.New(defaultStatInterval), - stats: new(builderStats), - quit: make(chan struct{}), + cfg: cfg, + channelEdgeMtx: multimutex.NewMutex[uint64](), + statTicker: ticker.New(defaultStatInterval), + stats: new(builderStats), + quit: make(chan struct{}), }, nil } @@ -656,28 +636,6 @@ func (b *Builder) pruneZombieChans() error { return nil } -// handleTopologyUpdate is responsible for sending any topology changes -// notifications to registered clients. -// -// NOTE: must be run inside goroutine. -func (b *Builder) handleTopologyUpdate(update any) { - defer b.wg.Done() - - topChange := &TopologyChange{} - err := addToTopologyChange(b.cfg.Graph, topChange, update) - if err != nil { - log.Errorf("unable to update topology change notification: %v", - err) - return - } - - if topChange.isEmpty() { - return - } - - b.notifyTopologyChange(topChange) -} - // networkHandler is the primary goroutine for the Builder. The roles of // this goroutine include answering queries related to the state of the // network, pruning the graph on new block notification, applying network @@ -701,16 +659,6 @@ func (b *Builder) networkHandler() { } select { - // A new fully validated topology update has just arrived. - // We'll notify any registered clients. - case update := <-b.topologyUpdates: - b.wg.Add(1) - go b.handleTopologyUpdate(update) - - // TODO(roasbeef): remove all unconnected vertexes - // after N blocks pass with no corresponding - // announcements. - case chainUpdate, ok := <-b.staleBlocks: // If the channel has been closed, then this indicates // the daemon is shutting down, so we exit ourselves. @@ -783,31 +731,6 @@ func (b *Builder) networkHandler() { " processed.", chainUpdate.Height) } - // A new notification client update has arrived. We're either - // gaining a new client, or cancelling notifications for an - // existing client. - case ntfnUpdate := <-b.ntfnClientUpdates: - clientID := ntfnUpdate.clientID - - if ntfnUpdate.cancel { - client, ok := b.topologyClients.LoadAndDelete( - clientID, - ) - if ok { - close(client.exit) - client.wg.Wait() - - close(client.ntfnChan) - } - - continue - } - - b.topologyClients.Store(clientID, &topologyClient{ - ntfnChan: ntfnUpdate.ntfnChan, - exit: make(chan struct{}), - }) - // The graph prune ticker has ticked, so we'll examine the // state of the known graph to filter out any zombie channels // for pruning. @@ -934,16 +857,6 @@ func (b *Builder) updateGraphWithClosedChannels( log.Infof("Block %v (height=%v) closed %v channels", chainUpdate.Hash, blockHeight, len(chansClosed)) - if len(chansClosed) == 0 { - return err - } - - // Notify all currently registered clients of the newly closed channels. - closeSummaries := createCloseSummaries(blockHeight, chansClosed...) - b.notifyTopologyChange(&TopologyChange{ - ClosedChannels: closeSummaries, - }) - return nil } @@ -1067,12 +980,6 @@ func (b *Builder) AddNode(node *models.LightningNode, return err } - select { - case b.topologyUpdates <- node: - case <-b.quit: - return ErrGraphBuilderShuttingDown - } - return nil } @@ -1117,12 +1024,6 @@ func (b *Builder) AddEdge(edge *models.ChannelEdgeInfo, return err } - select { - case b.topologyUpdates <- edge: - case <-b.quit: - return ErrGraphBuilderShuttingDown - } - return nil } @@ -1224,12 +1125,6 @@ func (b *Builder) UpdateEdge(update *models.ChannelEdgePolicy, return err } - select { - case b.topologyUpdates <- update: - case <-b.quit: - return ErrGraphBuilderShuttingDown - } - return nil } diff --git a/graph/db/graph.go b/graph/db/graph.go index 0a643144e0..9e35e58dd5 100644 --- a/graph/db/graph.go +++ b/graph/db/graph.go @@ -1,584 +1,215 @@ package graphdb import ( - "bytes" - "crypto/sha256" - "encoding/binary" "errors" "fmt" - "io" - "math" - "net" - "sort" "sync" - "testing" + "sync/atomic" "time" - "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcwallet/walletdb" - "github.com/lightningnetwork/lnd/aliasmgr" "github.com/lightningnetwork/lnd/batch" "github.com/lightningnetwork/lnd/graph/db/models" - "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" ) -var ( - // nodeBucket is a bucket which houses all the vertices or nodes within - // the channel graph. This bucket has a single-sub bucket which adds an - // additional index from pubkey -> alias. Within the top-level of this - // bucket, the key space maps a node's compressed public key to the - // serialized information for that node. Additionally, there's a - // special key "source" which stores the pubkey of the source node. The - // source node is used as the starting point for all graph/queries and - // traversals. The graph is formed as a star-graph with the source node - // at the center. - // - // maps: pubKey -> nodeInfo - // maps: source -> selfPubKey - nodeBucket = []byte("graph-node") +// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is +// busy shutting down. +var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down") - // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket - // will be used to quickly look up the "freshness" of a node's last - // update to the network. The bucket only contains keys, and no values, - // it's mapping: - // - // maps: updateTime || nodeID -> nil - nodeUpdateIndexBucket = []byte("graph-node-update-index") +// Config is a struct that holds all the necessary dependencies for a +// ChannelGraph. +type Config struct { + // KVDB is the kvdb.Backend that will be used for initializing the + // KVStore CRUD layer. + KVDB kvdb.Backend - // sourceKey is a special key that resides within the nodeBucket. The - // sourceKey maps a key to the public key of the "self node". - sourceKey = []byte("source") - - // aliasIndexBucket is a sub-bucket that's nested within the main - // nodeBucket. This bucket maps the public key of a node to its - // current alias. This bucket is provided as it can be used within a - // future UI layer to add an additional degree of confirmation. - aliasIndexBucket = []byte("alias") - - // edgeBucket is a bucket which houses all of the edge or channel - // information within the channel graph. This bucket essentially acts - // as an adjacency list, which in conjunction with a range scan, can be - // used to iterate over all the incoming and outgoing edges for a - // particular node. Key in the bucket use a prefix scheme which leads - // with the node's public key and sends with the compact edge ID. - // For each chanID, there will be two entries within the bucket, as the - // graph is directed: nodes may have different policies w.r.t to fees - // for their respective directions. - // - // maps: pubKey || chanID -> channel edge policy for node - edgeBucket = []byte("graph-edge") - - // unknownPolicy is represented as an empty slice. It is - // used as the value in edgeBucket for unknown channel edge policies. - // Unknown policies are still stored in the database to enable efficient - // lookup of incoming channel edges. - unknownPolicy = []byte{} - - // chanStart is an array of all zero bytes which is used to perform - // range scans within the edgeBucket to obtain all of the outgoing - // edges for a particular node. - chanStart [8]byte - - // edgeIndexBucket is an index which can be used to iterate all edges - // in the bucket, grouping them according to their in/out nodes. - // Additionally, the items in this bucket also contain the complete - // edge information for a channel. The edge information includes the - // capacity of the channel, the nodes that made the channel, etc. This - // bucket resides within the edgeBucket above. Creation of an edge - // proceeds in two phases: first the edge is added to the edge index, - // afterwards the edgeBucket can be updated with the latest details of - // the edge as they are announced on the network. - // - // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo - edgeIndexBucket = []byte("edge-index") - - // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This - // bucket contains an index which allows us to gauge the "freshness" of - // a channel's last updates. - // - // maps: updateTime || chanID -> nil - edgeUpdateIndexBucket = []byte("edge-update-index") - - // channelPointBucket maps a channel's full outpoint (txid:index) to - // its short 8-byte channel ID. This bucket resides within the - // edgeBucket above, and can be used to quickly remove an edge due to - // the outpoint being spent, or to query for existence of a channel. - // - // maps: outPoint -> chanID - channelPointBucket = []byte("chan-index") - - // zombieBucket is a sub-bucket of the main edgeBucket bucket - // responsible for maintaining an index of zombie channels. Each entry - // exists within the bucket as follows: - // - // maps: chanID -> pubKey1 || pubKey2 - // - // The chanID represents the channel ID of the edge that is marked as a - // zombie and is used as the key, which maps to the public keys of the - // edge's participants. - zombieBucket = []byte("zombie-index") - - // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket - // bucket responsible for maintaining an index of disabled edge - // policies. Each entry exists within the bucket as follows: - // - // maps: -> []byte{} - // - // The chanID represents the channel ID of the edge and the direction is - // one byte representing the direction of the edge. The main purpose of - // this index is to allow pruning disabled channels in a fast way without - // the need to iterate all over the graph. - disabledEdgePolicyBucket = []byte("disabled-edge-policy-index") - - // graphMetaBucket is a top-level bucket which stores various meta-deta - // related to the on-disk channel graph. Data stored in this bucket - // includes the block to which the graph has been synced to, the total - // number of channels, etc. - graphMetaBucket = []byte("graph-meta") - - // pruneLogBucket is a bucket within the graphMetaBucket that stores - // a mapping from the block height to the hash for the blocks used to - // prune the graph. - // Once a new block is discovered, any channels that have been closed - // (by spending the outpoint) can safely be removed from the graph, and - // the block is added to the prune log. We need to keep such a log for - // the case where a reorg happens, and we must "rewind" the state of the - // graph by removing channels that were previously confirmed. In such a - // case we'll remove all entries from the prune log with a block height - // that no longer exists. - pruneLogBucket = []byte("prune-log") + // KVStoreOpts is a list of functional options that will be used when + // initializing the KVStore. + KVStoreOpts []KVStoreOptionModifier +} - // closedScidBucket is a top-level bucket that stores scids for - // channels that we know to be closed. This is used so that we don't - // need to perform expensive validation checks if we receive a channel - // announcement for the channel again. - // - // maps: scid -> []byte{} - closedScidBucket = []byte("closed-scid") -) +// ChannelGraph is a layer above the graph's CRUD layer. +// +// NOTE: currently, this is purely a pass-through layer directly to the backing +// KVStore. Upcoming commits will move the graph cache out of the KVStore and +// into this layer so that the KVStore is only responsible for CRUD operations. +type ChannelGraph struct { + started atomic.Bool + stopped atomic.Bool -const ( - // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that - // we'll permit to be written to disk. We limit this as otherwise, it - // would be possible for a node to create a ton of updates and slowly - // fill our disk, and also waste bandwidth due to relaying. - MaxAllowedExtraOpaqueBytes = 10000 -) + // cacheMu guards any writes to the graphCache. It should be held + // across the DB write call and the graphCache update to make the + // two updates as atomic as possible. + cacheMu sync.Mutex -// ChannelGraph is a persistent, on-disk graph representation of the Lightning -// Network. This struct can be used to implement path finding algorithms on top -// of, and also to update a node's view based on information received from the -// p2p network. Internally, the graph is stored using a modified adjacency list -// representation with some added object interaction possible with each -// serialized edge/node. The graph is stored is directed, meaning that are two -// edges stored for each channel: an inbound/outbound edge for each node pair. -// Nodes, edges, and edge information can all be added to the graph -// independently. Edge removal results in the deletion of all edge information -// for that edge. -type ChannelGraph struct { - db kvdb.Backend + graphCache *GraphCache - // cacheMu guards all caches (rejectCache, chanCache, graphCache). If - // this mutex will be acquired at the same time as the DB mutex then - // the cacheMu MUST be acquired first to prevent deadlock. - cacheMu sync.RWMutex - rejectCache *rejectCache - chanCache *channelCache - graphCache *GraphCache + *KVStore + *topologyManager - chanScheduler batch.Scheduler - nodeScheduler batch.Scheduler + quit chan struct{} + wg sync.WaitGroup } -// NewChannelGraph allocates a new ChannelGraph backed by a DB instance. The -// returned instance has its own unique reject cache and channel cache. -func NewChannelGraph(db kvdb.Backend, options ...OptionModifier) (*ChannelGraph, +// NewChannelGraph creates a new ChannelGraph instance with the given backend. +func NewChannelGraph(cfg *Config, options ...ChanGraphOption) (*ChannelGraph, error) { - opts := DefaultOptions() + opts := defaultChanGraphOptions() for _, o := range options { o(opts) } - if !opts.NoMigration { - if err := initChannelGraph(db); err != nil { - return nil, err - } + store, err := NewKVStore(cfg.KVDB, cfg.KVStoreOpts...) + if err != nil { + return nil, err } g := &ChannelGraph{ - db: db, - rejectCache: newRejectCache(opts.RejectCacheSize), - chanCache: newChannelCache(opts.ChannelCacheSize), + KVStore: store, + topologyManager: newTopologyManager(), + quit: make(chan struct{}), } - g.chanScheduler = batch.NewTimeScheduler( - db, &g.cacheMu, opts.BatchCommitInterval, - ) - g.nodeScheduler = batch.NewTimeScheduler( - db, nil, opts.BatchCommitInterval, - ) // The graph cache can be turned off (e.g. for mobile users) for a // speed/memory usage tradeoff. - if opts.UseGraphCache { - g.graphCache = NewGraphCache(opts.PreAllocCacheNumNodes) - startTime := time.Now() - log.Debugf("Populating in-memory channel graph, this might " + - "take a while...") - - err := g.ForEachNodeCacheable(func(node route.Vertex, - features *lnwire.FeatureVector) error { - - g.graphCache.AddNodeFeatures(node, features) - - return nil - }) - if err != nil { - return nil, err - } - - err = g.ForEachChannel(func(info *models.ChannelEdgeInfo, - policy1, policy2 *models.ChannelEdgePolicy) error { - - g.graphCache.AddChannel(info, policy1, policy2) - - return nil - }) - if err != nil { - return nil, err - } - - log.Debugf("Finished populating in-memory channel graph (took "+ - "%v, %s)", time.Since(startTime), g.graphCache.Stats()) + if opts.useGraphCache { + g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes) } return g, nil } -// channelMapKey is the key structure used for storing channel edge policies. -type channelMapKey struct { - nodeKey route.Vertex - chanID [8]byte -} - -// getChannelMap loads all channel edge policies from the database and stores -// them in a map. -func (c *ChannelGraph) getChannelMap(edges kvdb.RBucket) ( - map[channelMapKey]*models.ChannelEdgePolicy, error) { - - // Create a map to store all channel edge policies. - channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy) - - err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error { - // Skip embedded buckets. - if bytes.Equal(k, edgeIndexBucket) || - bytes.Equal(k, edgeUpdateIndexBucket) || - bytes.Equal(k, zombieBucket) || - bytes.Equal(k, disabledEdgePolicyBucket) || - bytes.Equal(k, channelPointBucket) { - - return nil - } - - // Validate key length. - if len(k) != 33+8 { - return fmt.Errorf("invalid edge key %x encountered", k) - } - - var key channelMapKey - copy(key.nodeKey[:], k[:33]) - copy(key.chanID[:], k[33:]) - - // No need to deserialize unknown policy. - if bytes.Equal(edgeBytes, unknownPolicy) { - return nil - } - - edgeReader := bytes.NewReader(edgeBytes) - edge, err := deserializeChanEdgePolicyRaw( - edgeReader, - ) - - switch { - // If the db policy was missing an expected optional field, we - // return nil as if the policy was unknown. - case err == ErrEdgePolicyOptionalFieldNotFound: - return nil - - case err != nil: - return err - } - - channelMap[key] = edge - +// Start kicks off any goroutines required for the ChannelGraph to function. +// If the graph cache is enabled, then it will be populated with the contents of +// the database. +func (c *ChannelGraph) Start() error { + if !c.started.CompareAndSwap(false, true) { return nil - }) - if err != nil { - return nil, err } + log.Debugf("ChannelGraph starting") + defer log.Debug("ChannelGraph started") - return channelMap, nil -} - -var graphTopLevelBuckets = [][]byte{ - nodeBucket, - edgeBucket, - graphMetaBucket, - closedScidBucket, -} - -// Wipe completely deletes all saved state within all used buckets within the -// database. The deletion is done in a single transaction, therefore this -// operation is fully atomic. -func (c *ChannelGraph) Wipe() error { - err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { - for _, tlb := range graphTopLevelBuckets { - err := tx.DeleteTopLevelBucket(tlb) - if err != nil && err != kvdb.ErrBucketNotFound { - return err - } + if c.graphCache != nil { + if err := c.populateCache(); err != nil { + return fmt.Errorf("could not populate the graph "+ + "cache: %w", err) } - return nil - }, func() {}) - if err != nil { - return err } - return initChannelGraph(c.db) -} - -// createChannelDB creates and initializes a fresh version of In -// the case that the target path has not yet been created or doesn't yet exist, -// then the path is created. Additionally, all required top-level buckets used -// within the database are created. -func initChannelGraph(db kvdb.Backend) error { - err := kvdb.Update(db, func(tx kvdb.RwTx) error { - for _, tlb := range graphTopLevelBuckets { - if _, err := tx.CreateTopLevelBucket(tlb); err != nil { - return err - } - } - - nodes := tx.ReadWriteBucket(nodeBucket) - _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) - if err != nil { - return err - } - _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket) - if err != nil { - return err - } - - edges := tx.ReadWriteBucket(edgeBucket) - _, err = edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) - if err != nil { - return err - } - _, err = edges.CreateBucketIfNotExists(channelPointBucket) - if err != nil { - return err - } - _, err = edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - graphMeta := tx.ReadWriteBucket(graphMetaBucket) - _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket) - return err - }, func() {}) - if err != nil { - return fmt.Errorf("unable to create new channel graph: %w", err) - } + c.wg.Add(1) + go c.handleTopologySubscriptions() return nil } -// AddrsForNode returns all known addresses for the target node public key that -// the graph DB is aware of. The returned boolean indicates if the given node is -// unknown to the graph DB or not. -// -// NOTE: this is part of the channeldb.AddrSource interface. -func (c *ChannelGraph) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr, - error) { - - pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed()) - if err != nil { - return false, nil, err +// Stop signals any active goroutines for a graceful closure. +func (c *ChannelGraph) Stop() error { + if !c.stopped.CompareAndSwap(false, true) { + return nil } - node, err := c.FetchLightningNode(pubKey) - // We don't consider it an error if the graph is unaware of the node. - switch { - case err != nil && !errors.Is(err, ErrGraphNodeNotFound): - return false, nil, err + log.Debugf("ChannelGraph shutting down...") + defer log.Debug("ChannelGraph shutdown complete") - case errors.Is(err, ErrGraphNodeNotFound): - return false, nil, nil - } + close(c.quit) + c.wg.Wait() - return true, node.Addresses, nil + return nil } -// ForEachChannel iterates through all the channel edges stored within the -// graph and invokes the passed callback for each edge. The callback takes two -// edges as since this is a directed graph, both the in/out edges are visited. -// If the callback returns an error, then the transaction is aborted and the -// iteration stops early. +// handleTopologySubscriptions ensures that topology client subscriptions, +// subscription cancellations and topology notifications are handled +// synchronously. // -// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer -// for that particular channel edge routing policy will be passed into the -// callback. -func (c *ChannelGraph) ForEachChannel(cb func(*models.ChannelEdgeInfo, - *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error { - - return c.db.View(func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - - // First, load all edges in memory indexed by node and channel - // id. - channelMap, err := c.getChannelMap(edges) - if err != nil { - return err - } - - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // Load edge index, recombine each channel with the policies - // loaded above and invoke the callback. - return kvdb.ForAll( - edgeIndex, func(k, edgeInfoBytes []byte) error { - var chanID [8]byte - copy(chanID[:], k) - - edgeInfoReader := bytes.NewReader(edgeInfoBytes) - info, err := deserializeChanEdgeInfo( - edgeInfoReader, +// NOTE: this MUST be run in a goroutine. +func (c *ChannelGraph) handleTopologySubscriptions() { + defer c.wg.Done() + + for { + select { + // A new fully validated topology update has just arrived. + // We'll notify any registered clients. + case update := <-c.topologyUpdate: + // TODO(elle): change topology handling to be handled + // synchronously so that we can guarantee the order of + // notification delivery. + c.wg.Add(1) + go c.handleTopologyUpdate(update) + + // TODO(roasbeef): remove all unconnected vertexes + // after N blocks pass with no corresponding + // announcements. + + // A new notification client update has arrived. We're either + // gaining a new client, or cancelling notifications for an + // existing client. + case ntfnUpdate := <-c.ntfnClientUpdates: + clientID := ntfnUpdate.clientID + + if ntfnUpdate.cancel { + client, ok := c.topologyClients.LoadAndDelete( + clientID, ) - if err != nil { - return err + if ok { + close(client.exit) + client.wg.Wait() + + close(client.ntfnChan) } - policy1 := channelMap[channelMapKey{ - nodeKey: info.NodeKey1Bytes, - chanID: chanID, - }] + continue + } - policy2 := channelMap[channelMapKey{ - nodeKey: info.NodeKey2Bytes, - chanID: chanID, - }] + c.topologyClients.Store(clientID, &topologyClient{ + ntfnChan: ntfnUpdate.ntfnChan, + exit: make(chan struct{}), + }) - return cb(&info, policy1, policy2) - }, - ) - }, func() {}) + case <-c.quit: + return + } + } } -// forEachNodeDirectedChannel iterates through all channels of a given node, -// executing the passed callback on the directed edge representing the channel -// and its incoming policy. If the callback returns an error, then the iteration -// is halted with the error propagated back up to the caller. An optional read -// transaction may be provided. If none is provided, a new one will be created. +// populateCache loads the entire channel graph into the in-memory graph cache. // -// Unknown policies are passed into the callback as nil values. -func (c *ChannelGraph) forEachNodeDirectedChannel(tx kvdb.RTx, - node route.Vertex, cb func(channel *DirectedChannel) error) error { +// NOTE: This should only be called if the graphCache has been constructed. +func (c *ChannelGraph) populateCache() error { + startTime := time.Now() + log.Info("Populating in-memory channel graph, this might take a " + + "while...") - if c.graphCache != nil { - return c.graphCache.ForEachChannel(node, cb) - } + err := c.KVStore.ForEachNodeCacheable(func(node route.Vertex, + features *lnwire.FeatureVector) error { - // Fallback that uses the database. - toNodeCallback := func() route.Vertex { - return node - } - toNodeFeatures, err := c.fetchNodeFeatures(tx, node) + c.graphCache.AddNodeFeatures(node, features) + + return nil + }) if err != nil { return err } - dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1, - p2 *models.ChannelEdgePolicy) error { - - var cachedInPolicy *models.CachedEdgePolicy - if p2 != nil { - cachedInPolicy = models.NewCachedPolicy(p2) - cachedInPolicy.ToNodePubKey = toNodeCallback - cachedInPolicy.ToNodeFeatures = toNodeFeatures - } - - var inboundFee lnwire.Fee - if p1 != nil { - // Extract inbound fee. If there is a decoding error, - // skip this edge. - _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee) - if err != nil { - return nil - } - } - - directedChannel := &DirectedChannel{ - ChannelID: e.ChannelID, - IsNode1: node == e.NodeKey1Bytes, - OtherNode: e.NodeKey2Bytes, - Capacity: e.Capacity, - OutPolicySet: p1 != nil, - InPolicy: cachedInPolicy, - InboundFee: inboundFee, - } - - if node == e.NodeKey2Bytes { - directedChannel.OtherNode = e.NodeKey1Bytes - } - - return cb(directedChannel) - } - return nodeTraversal(tx, node[:], c.db, dbCallback) -} + err = c.KVStore.ForEachChannel(func(info *models.ChannelEdgeInfo, + policy1, policy2 *models.ChannelEdgePolicy) error { -// fetchNodeFeatures returns the features of a given node. If no features are -// known for the node, an empty feature vector is returned. An optional read -// transaction may be provided. If none is provided, a new one will be created. -func (c *ChannelGraph) fetchNodeFeatures(tx kvdb.RTx, - node route.Vertex) (*lnwire.FeatureVector, error) { + c.graphCache.AddChannel(info, policy1, policy2) - if c.graphCache != nil { - return c.graphCache.GetFeatures(node), nil + return nil + }) + if err != nil { + return err } - // Fallback that uses the database. - targetNode, err := c.FetchLightningNodeTx(tx, node) - switch err { - // If the node exists and has features, return them directly. - case nil: - return targetNode.Features, nil + log.Infof("Finished populating in-memory channel graph (took %v, %s)", + time.Since(startTime), c.graphCache.Stats()) - // If we couldn't find a node announcement, populate a blank feature - // vector. - case ErrGraphNodeNotFound: - return lnwire.EmptyFeatureVector(), nil - - // Otherwise, bubble the error up. - default: - return nil, err - } + return nil } // ForEachNodeDirectedChannel iterates through all channels of a given node, @@ -591,10 +222,14 @@ func (c *ChannelGraph) fetchNodeFeatures(tx kvdb.RTx, // Unknown policies are passed into the callback as nil values. // // NOTE: this is part of the graphdb.NodeTraverser interface. -func (c *ChannelGraph) ForEachNodeDirectedChannel(nodePub route.Vertex, +func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex, cb func(channel *DirectedChannel) error) error { - return c.forEachNodeDirectedChannel(nil, nodePub, cb) + if c.graphCache != nil { + return c.graphCache.ForEachChannel(node, cb) + } + + return c.KVStore.ForEachNodeDirectedChannel(node, cb) } // FetchNodeFeatures returns the features of the given node. If no features are @@ -603,15 +238,30 @@ func (c *ChannelGraph) ForEachNodeDirectedChannel(nodePub route.Vertex, // features instead of the database. // // NOTE: this is part of the graphdb.NodeTraverser interface. -func (c *ChannelGraph) FetchNodeFeatures(nodePub route.Vertex) ( +func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) ( *lnwire.FeatureVector, error) { - return c.fetchNodeFeatures(nil, nodePub) + if c.graphCache != nil { + return c.graphCache.GetFeatures(node), nil + } + + return c.KVStore.FetchNodeFeatures(node) +} + +// GraphSession will provide the call-back with access to a NodeTraverser +// instance which can be used to perform queries against the channel graph. If +// the graph cache is not enabled, then the call-back will be provided with +// access to the graph via a consistent read-only transaction. +func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error { + if c.graphCache != nil { + return cb(c) + } + + return c.KVStore.GraphSession(cb) } -// ForEachNodeCached is similar to forEachNode, but it utilizes the channel -// graph cache instead. Note that this doesn't return all the information the -// regular forEachNode method does. +// ForEachNodeCached iterates through all the stored vertices/nodes in the +// graph, executing the passed callback with each node encountered. // // NOTE: The callback contents MUST not be modified. func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex, @@ -621,4216 +271,362 @@ func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex, return c.graphCache.ForEachNode(cb) } - // Otherwise call back to a version that uses the database directly. - // We'll iterate over each node, then the set of channels for each - // node, and construct a similar callback functiopn signature as the - // main funcotin expects. - return c.forEachNode(func(tx kvdb.RTx, - node *models.LightningNode) error { - - channels := make(map[uint64]*DirectedChannel) - - err := c.ForEachNodeChannelTx(tx, node.PubKeyBytes, - func(tx kvdb.RTx, e *models.ChannelEdgeInfo, - p1 *models.ChannelEdgePolicy, - p2 *models.ChannelEdgePolicy) error { - - toNodeCallback := func() route.Vertex { - return node.PubKeyBytes - } - toNodeFeatures, err := c.fetchNodeFeatures( - tx, node.PubKeyBytes, - ) - if err != nil { - return err - } - - var cachedInPolicy *models.CachedEdgePolicy - if p2 != nil { - cachedInPolicy = - models.NewCachedPolicy(p2) - cachedInPolicy.ToNodePubKey = - toNodeCallback - cachedInPolicy.ToNodeFeatures = - toNodeFeatures - } - - directedChannel := &DirectedChannel{ - ChannelID: e.ChannelID, - IsNode1: node.PubKeyBytes == - e.NodeKey1Bytes, - OtherNode: e.NodeKey2Bytes, - Capacity: e.Capacity, - OutPolicySet: p1 != nil, - InPolicy: cachedInPolicy, - } - - if node.PubKeyBytes == e.NodeKey2Bytes { - directedChannel.OtherNode = - e.NodeKey1Bytes - } - - channels[e.ChannelID] = directedChannel - - return nil - }) - if err != nil { - return err - } - - return cb(node.PubKeyBytes, channels) - }) + return c.KVStore.ForEachNodeCached(cb) } -// DisabledChannelIDs returns the channel ids of disabled channels. -// A channel is disabled when two of the associated ChanelEdgePolicies -// have their disabled bit on. -func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) { - var disabledChanIDs []uint64 - var chanEdgeFound map[uint64]struct{} +// AddLightningNode adds a vertex/node to the graph database. If the node is not +// in the database from before, this will add a new, unconnected one to the +// graph. If it is present from before, this will update that node's +// information. Note that this method is expected to only be called to update an +// already present node from a node announcement, or to insert a node found in a +// channel update. +func (c *ChannelGraph) AddLightningNode(node *models.LightningNode, + op ...batch.SchedulerOption) error { + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } + err := c.KVStore.AddLightningNode(node, op...) + if err != nil { + return err + } - disabledEdgePolicyIndex := edges.NestedReadBucket( - disabledEdgePolicyBucket, + if c.graphCache != nil { + c.graphCache.AddNodeFeatures( + node.PubKeyBytes, node.Features, ) - if disabledEdgePolicyIndex == nil { - return nil - } + } - // We iterate over all disabled policies and we add each channel - // that has more than one disabled policy to disabledChanIDs - // array. - return disabledEdgePolicyIndex.ForEach( - func(k, v []byte) error { - chanID := byteOrder.Uint64(k[:8]) - _, edgeFound := chanEdgeFound[chanID] - if edgeFound { - delete(chanEdgeFound, chanID) - disabledChanIDs = append( - disabledChanIDs, chanID, - ) + select { + case c.topologyUpdate <- node: + case <-c.quit: + return ErrChanGraphShuttingDown + } - return nil - } + return nil +} - chanEdgeFound[chanID] = struct{}{} +// DeleteLightningNode starts a new database transaction to remove a vertex/node +// from the database according to the node's public key. +func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - return nil - }, - ) - }, func() { - disabledChanIDs = nil - chanEdgeFound = make(map[uint64]struct{}) - }) + err := c.KVStore.DeleteLightningNode(nodePub) if err != nil { - return nil, err + return err } - return disabledChanIDs, nil -} - -// ForEachNode iterates through all the stored vertices/nodes in the graph, -// executing the passed callback with each node encountered. If the callback -// returns an error, then the transaction is aborted and the iteration stops -// early. Any operations performed on the NodeTx passed to the call-back are -// executed under the same read transaction and so, methods on the NodeTx object -// _MUST_ only be called from within the call-back. -func (c *ChannelGraph) ForEachNode(cb func(tx NodeRTx) error) error { - return c.forEachNode(func(tx kvdb.RTx, - node *models.LightningNode) error { + if c.graphCache != nil { + c.graphCache.RemoveNode(nodePub) + } - return cb(newChanGraphNodeTx(tx, c, node)) - }) + return nil } -// forEachNode iterates through all the stored vertices/nodes in the graph, -// executing the passed callback with each node encountered. If the callback -// returns an error, then the transaction is aborted and the iteration stops -// early. -// -// TODO(roasbeef): add iterator interface to allow for memory efficient graph -// traversal when graph gets mega. -func (c *ChannelGraph) forEachNode( - cb func(kvdb.RTx, *models.LightningNode) error) error { +// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An +// undirected edge from the two target nodes are created. The information stored +// denotes the static attributes of the channel, such as the channelID, the keys +// involved in creation of the channel, and the set of features that the channel +// supports. The chanPoint and chanID are used to uniquely identify the edge +// globally within the database. +func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo, + op ...batch.SchedulerOption) error { - traversal := func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - return nodes.ForEach(func(pubKey, nodeBytes []byte) error { - // If this is the source key, then we skip this - // iteration as the value for this key is a pubKey - // rather than raw node information. - if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { - return nil - } + err := c.KVStore.AddChannelEdge(edge, op...) + if err != nil { + return err + } - nodeReader := bytes.NewReader(nodeBytes) - node, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } + if c.graphCache != nil { + c.graphCache.AddChannel(edge, nil, nil) + } - // Execute the callback, the transaction will abort if - // this returns an error. - return cb(tx, &node) - }) + select { + case c.topologyUpdate <- edge: + case <-c.quit: + return ErrChanGraphShuttingDown } - return kvdb.View(c.db, traversal, func() {}) + return nil } -// ForEachNodeCacheable iterates through all the stored vertices/nodes in the -// graph, executing the passed callback with each node encountered. If the -// callback returns an error, then the transaction is aborted and the iteration -// stops early. -func (c *ChannelGraph) ForEachNodeCacheable(cb func(route.Vertex, - *lnwire.FeatureVector) error) error { - - traversal := func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - return nodes.ForEach(func(pubKey, nodeBytes []byte) error { - // If this is the source key, then we skip this - // iteration as the value for this key is a pubKey - // rather than raw node information. - if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { - return nil - } - - nodeReader := bytes.NewReader(nodeBytes) - node, features, err := deserializeLightningNodeCacheable( //nolint:ll - nodeReader, - ) - if err != nil { - return err - } - - // Execute the callback, the transaction will abort if - // this returns an error. - return cb(node, features) - }) - } - - return kvdb.View(c.db, traversal, func() {}) -} - -// SourceNode returns the source node of the graph. The source node is treated -// as the center node within a star-graph. This method may be used to kick off -// a path finding algorithm in order to explore the reachability of another -// node based off the source node. -func (c *ChannelGraph) SourceNode() (*models.LightningNode, error) { - var source *models.LightningNode - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - node, err := c.sourceNode(nodes) - if err != nil { - return err - } - source = node - - return nil - }, func() { - source = nil - }) - if err != nil { - return nil, err - } - - return source, nil -} - -// sourceNode uses an existing database transaction and returns the source node -// of the graph. The source node is treated as the center node within a -// star-graph. This method may be used to kick off a path finding algorithm in -// order to explore the reachability of another node based off the source node. -func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*models.LightningNode, - error) { - - selfPub := nodes.Get(sourceKey) - if selfPub == nil { - return nil, ErrSourceNodeNotSet - } - - // With the pubKey of the source node retrieved, we're able to - // fetch the full node information. - node, err := fetchLightningNode(nodes, selfPub) - if err != nil { - return nil, err - } - - return &node, nil -} - -// SetSourceNode sets the source node within the graph database. The source -// node is to be used as the center of a star-graph within path finding -// algorithms. -func (c *ChannelGraph) SetSourceNode(node *models.LightningNode) error { - nodePubBytes := node.PubKeyBytes[:] - - return kvdb.Update(c.db, func(tx kvdb.RwTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - // Next we create the mapping from source to the targeted - // public key. - if err := nodes.Put(sourceKey, nodePubBytes); err != nil { - return err - } - - // Finally, we commit the information of the lightning node - // itself. - return addLightningNode(tx, node) - }, func() {}) -} - -// AddLightningNode adds a vertex/node to the graph database. If the node is not -// in the database from before, this will add a new, unconnected one to the -// graph. If it is present from before, this will update that node's -// information. Note that this method is expected to only be called to update an -// already present node from a node announcement, or to insert a node found in a -// channel update. -// -// TODO(roasbeef): also need sig of announcement -func (c *ChannelGraph) AddLightningNode(node *models.LightningNode, - op ...batch.SchedulerOption) error { - - r := &batch.Request{ - Update: func(tx kvdb.RwTx) error { - if c.graphCache != nil { - c.graphCache.AddNodeFeatures( - node.PubKeyBytes, node.Features, - ) - } - - return addLightningNode(tx, node) - }, - } - - for _, f := range op { - f(r) - } - - return c.nodeScheduler.Execute(r) -} - -func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error { - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) - if err != nil { - return err - } - - updateIndex, err := nodes.CreateBucketIfNotExists( - nodeUpdateIndexBucket, - ) - if err != nil { - return err - } - - return putLightningNode(nodes, aliases, updateIndex, node) -} - -// LookupAlias attempts to return the alias as advertised by the target node. -// TODO(roasbeef): currently assumes that aliases are unique... -func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) { - var alias string - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound - } - - aliases := nodes.NestedReadBucket(aliasIndexBucket) - if aliases == nil { - return ErrGraphNodesNotFound - } - - nodePub := pub.SerializeCompressed() - a := aliases.Get(nodePub) - if a == nil { - return ErrNodeAliasNotFound - } - - // TODO(roasbeef): should actually be using the utf-8 - // package... - alias = string(a) - return nil - }, func() { - alias = "" - }) - if err != nil { - return "", err - } - - return alias, nil -} - -// DeleteLightningNode starts a new database transaction to remove a vertex/node -// from the database according to the node's public key. -func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error { - // TODO(roasbeef): ensure dangling edges are removed... - return kvdb.Update(c.db, func(tx kvdb.RwTx) error { - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound - } - - if c.graphCache != nil { - c.graphCache.RemoveNode(nodePub) - } - - return c.deleteLightningNode(nodes, nodePub[:]) - }, func() {}) -} - -// deleteLightningNode uses an existing database transaction to remove a -// vertex/node from the database according to the node's public key. -func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket, - compressedPubKey []byte) error { - - aliases := nodes.NestedReadWriteBucket(aliasIndexBucket) - if aliases == nil { - return ErrGraphNodesNotFound - } - - if err := aliases.Delete(compressedPubKey); err != nil { - return err - } - - // Before we delete the node, we'll fetch its current state so we can - // determine when its last update was to clear out the node update - // index. - node, err := fetchLightningNode(nodes, compressedPubKey) - if err != nil { - return err - } - - if err := nodes.Delete(compressedPubKey); err != nil { - return err - } - - // Finally, we'll delete the index entry for the node within the - // nodeUpdateIndexBucket as this node is no longer active, so we don't - // need to track its last update. - nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket) - if nodeUpdateIndex == nil { - return ErrGraphNodesNotFound - } - - // In order to delete the entry, we'll need to reconstruct the key for - // its last update. - updateUnix := uint64(node.LastUpdate.Unix()) - var indexKey [8 + 33]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - copy(indexKey[8:], compressedPubKey) - - return nodeUpdateIndex.Delete(indexKey[:]) -} - -// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An -// undirected edge from the two target nodes are created. The information stored -// denotes the static attributes of the channel, such as the channelID, the keys -// involved in creation of the channel, and the set of features that the channel -// supports. The chanPoint and chanID are used to uniquely identify the edge -// globally within the database. -func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo, - op ...batch.SchedulerOption) error { - - var alreadyExists bool - r := &batch.Request{ - Reset: func() { - alreadyExists = false - }, - Update: func(tx kvdb.RwTx) error { - err := c.addChannelEdge(tx, edge) - - // Silence ErrEdgeAlreadyExist so that the batch can - // succeed, but propagate the error via local state. - if err == ErrEdgeAlreadyExist { - alreadyExists = true - return nil - } - - return err - }, - OnCommit: func(err error) error { - switch { - case err != nil: - return err - case alreadyExists: - return ErrEdgeAlreadyExist - default: - c.rejectCache.remove(edge.ChannelID) - c.chanCache.remove(edge.ChannelID) - return nil - } - }, - } - - for _, f := range op { - if f == nil { - return fmt.Errorf("nil scheduler option was used") - } - - f(r) - } - - return c.chanScheduler.Execute(r) -} - -// addChannelEdge is the private form of AddChannelEdge that allows callers to -// utilize an existing db transaction. -func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, - edge *models.ChannelEdgeInfo) error { - - // Construct the channel's primary key which is the 8-byte channel ID. - var chanKey [8]byte - binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) - - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket) - if err != nil { - return err - } - - // First, attempt to check if this edge has already been created. If - // so, then we can exit early as this method is meant to be idempotent. - if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil { - return ErrEdgeAlreadyExist - } - - if c.graphCache != nil { - c.graphCache.AddChannel(edge, nil, nil) - } - - // Before we insert the channel into the database, we'll ensure that - // both nodes already exist in the channel graph. If either node - // doesn't, then we'll insert a "shell" node that just includes its - // public key, so subsequent validation and queries can work properly. - _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:]) - switch { - case node1Err == ErrGraphNodeNotFound: - node1Shell := models.LightningNode{ - PubKeyBytes: edge.NodeKey1Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node1Shell) - if err != nil { - return fmt.Errorf("unable to create shell node "+ - "for: %x: %w", edge.NodeKey1Bytes, err) - } - case node1Err != nil: - return node1Err - } - - _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:]) - switch { - case node2Err == ErrGraphNodeNotFound: - node2Shell := models.LightningNode{ - PubKeyBytes: edge.NodeKey2Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node2Shell) - if err != nil { - return fmt.Errorf("unable to create shell node "+ - "for: %x: %w", edge.NodeKey2Bytes, err) - } - case node2Err != nil: - return node2Err - } - - // If the edge hasn't been created yet, then we'll first add it to the - // edge index in order to associate the edge between two nodes and also - // store the static components of the channel. - if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil { - return err - } - - // Mark edge policies for both sides as unknown. This is to enable - // efficient incoming channel lookup for a node. - keys := []*[33]byte{ - &edge.NodeKey1Bytes, - &edge.NodeKey2Bytes, - } - for _, key := range keys { - err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:]) - if err != nil { - return err - } - } - - // Finally we add it to the channel index which maps channel points - // (outpoints) to the shorter channel ID's. - var b bytes.Buffer - if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil { - return err - } - return chanIndex.Put(b.Bytes(), chanKey[:]) -} - -// HasChannelEdge returns true if the database knows of a channel edge with the -// passed channel ID, and false otherwise. If an edge with that ID is found -// within the graph, then two time stamps representing the last time the edge -// was updated for both directed edges are returned along with the boolean. If -// it is not found, then the zombie index is checked and its result is returned -// as the second boolean. -func (c *ChannelGraph) HasChannelEdge( - chanID uint64) (time.Time, time.Time, bool, bool, error) { - - var ( - upd1Time time.Time - upd2Time time.Time - exists bool - isZombie bool - ) - - // We'll query the cache with the shared lock held to allow multiple - // readers to access values in the cache concurrently if they exist. - c.cacheMu.RLock() - if entry, ok := c.rejectCache.get(chanID); ok { - c.cacheMu.RUnlock() - upd1Time = time.Unix(entry.upd1Time, 0) - upd2Time = time.Unix(entry.upd2Time, 0) - exists, isZombie = entry.flags.unpack() - return upd1Time, upd2Time, exists, isZombie, nil - } - c.cacheMu.RUnlock() - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - // The item was not found with the shared lock, so we'll acquire the - // exclusive lock and check the cache again in case another method added - // the entry to the cache while no lock was held. - if entry, ok := c.rejectCache.get(chanID); ok { - upd1Time = time.Unix(entry.upd1Time, 0) - upd2Time = time.Unix(entry.upd2Time, 0) - exists, isZombie = entry.flags.unpack() - return upd1Time, upd2Time, exists, isZombie, nil - } - - if err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - var channelID [8]byte - byteOrder.PutUint64(channelID[:], chanID) - - // If the edge doesn't exist, then we'll also check our zombie - // index. - if edgeIndex.Get(channelID[:]) == nil { - exists = false - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex != nil { - isZombie, _, _ = isZombieEdge( - zombieIndex, chanID, - ) - } - - return nil - } - - exists = true - isZombie = false - - // If the channel has been found in the graph, then retrieve - // the edges itself so we can return the last updated - // timestamps. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound - } - - e1, e2, err := fetchChanEdgePolicies( - edgeIndex, edges, channelID[:], - ) - if err != nil { - return err - } - - // As we may have only one of the edges populated, only set the - // update time if the edge was found in the database. - if e1 != nil { - upd1Time = e1.LastUpdate - } - if e2 != nil { - upd2Time = e2.LastUpdate - } - - return nil - }, func() {}); err != nil { - return time.Time{}, time.Time{}, exists, isZombie, err - } - - c.rejectCache.insert(chanID, rejectCacheEntry{ - upd1Time: upd1Time.Unix(), - upd2Time: upd2Time.Unix(), - flags: packRejectFlags(exists, isZombie), - }) - - return upd1Time, upd2Time, exists, isZombie, nil -} - -// AddEdgeProof sets the proof of an existing edge in the graph database. -func (c *ChannelGraph) AddEdgeProof(chanID lnwire.ShortChannelID, - proof *models.ChannelAuthProof) error { - - // Construct the channel's primary key which is the 8-byte channel ID. - var chanKey [8]byte - binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64()) - - return kvdb.Update(c.db, func(tx kvdb.RwTx) error { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrEdgeNotFound - } - - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrEdgeNotFound - } - - edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:]) - if err != nil { - return err - } - - edge.AuthProof = proof - - return putChanEdgeInfo(edgeIndex, &edge, chanKey) - }, func() {}) -} - -const ( - // pruneTipBytes is the total size of the value which stores a prune - // entry of the graph in the prune log. The "prune tip" is the last - // entry in the prune log, and indicates if the channel graph is in - // sync with the current UTXO state. The structure of the value - // is: blockHash, taking 32 bytes total. - pruneTipBytes = 32 -) - -// PruneGraph prunes newly closed channels from the channel graph in response -// to a new block being solved on the network. Any transactions which spend the -// funding output of any known channels within he graph will be deleted. -// Additionally, the "prune tip", or the last block which has been used to -// prune the graph is stored so callers can ensure the graph is fully in sync -// with the current UTXO state. A slice of channels that have been closed by -// the target block are returned if the function succeeds without error. -func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, - blockHash *chainhash.Hash, blockHeight uint32) ( - []*models.ChannelEdgeInfo, error) { - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - var chansClosed []*models.ChannelEdgeInfo - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { - // First grab the edges bucket which houses the information - // we'd like to delete - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - - // Next grab the two edge indexes which will also need to be - // updated. - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists( - channelPointBucket, - ) - if err != nil { - return err - } - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrSourceNodeNotSet - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - // For each of the outpoints that have been spent within the - // block, we attempt to delete them from the graph as if that - // outpoint was a channel, then it has now been closed. - for _, chanPoint := range spentOutputs { - // TODO(roasbeef): load channel bloom filter, continue - // if NOT if filter - - var opBytes bytes.Buffer - err := WriteOutpoint(&opBytes, chanPoint) - if err != nil { - return err - } - - // First attempt to see if the channel exists within - // the database, if not, then we can exit early. - chanID := chanIndex.Get(opBytes.Bytes()) - if chanID == nil { - continue - } - - // However, if it does, then we'll read out the full - // version so we can add it to the set of deleted - // channels. - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - - // Attempt to delete the channel, an ErrEdgeNotFound - // will be returned if that outpoint isn't known to be - // a channel. If no error is returned, then a channel - // was successfully pruned. - err = c.delChannelEdgeUnsafe( - edges, edgeIndex, chanIndex, zombieIndex, - chanID, false, false, - ) - if err != nil && !errors.Is(err, ErrEdgeNotFound) { - return err - } - - chansClosed = append(chansClosed, &edgeInfo) - } - - metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) - if err != nil { - return err - } - - pruneBucket, err := metaBucket.CreateBucketIfNotExists( - pruneLogBucket, - ) - if err != nil { - return err - } - - // With the graph pruned, add a new entry to the prune log, - // which can be used to check if the graph is fully synced with - // the current UTXO state. - var blockHeightBytes [4]byte - byteOrder.PutUint32(blockHeightBytes[:], blockHeight) - - var newTip [pruneTipBytes]byte - copy(newTip[:], blockHash[:]) - - err = pruneBucket.Put(blockHeightBytes[:], newTip[:]) - if err != nil { - return err - } - - // Now that the graph has been pruned, we'll also attempt to - // prune any nodes that have had a channel closed within the - // latest block. - return c.pruneGraphNodes(nodes, edgeIndex) - }, func() { - chansClosed = nil - }) - if err != nil { - return nil, err - } - - for _, channel := range chansClosed { - c.rejectCache.remove(channel.ChannelID) - c.chanCache.remove(channel.ChannelID) - } - - if c.graphCache != nil { - log.Debugf("Pruned graph, cache now has %s", - c.graphCache.Stats()) - } - - return chansClosed, nil -} - -// PruneGraphNodes is a garbage collection method which attempts to prune out -// any nodes from the channel graph that are currently unconnected. This ensure -// that we only maintain a graph of reachable nodes. In the event that a pruned -// node gains more channels, it will be re-added back to the graph. -func (c *ChannelGraph) PruneGraphNodes() error { - return kvdb.Update(c.db, func(tx kvdb.RwTx) error { - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound - } - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrGraphNotFound - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - return c.pruneGraphNodes(nodes, edgeIndex) - }, func() {}) -} - -// pruneGraphNodes attempts to remove any nodes from the graph who have had a -// channel closed within the current block. If the node still has existing -// channels in the graph, this will act as a no-op. -func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket, - edgeIndex kvdb.RwBucket) error { - - log.Trace("Pruning nodes from graph with no open channels") - - // We'll retrieve the graph's source node to ensure we don't remove it - // even if it no longer has any open channels. - sourceNode, err := c.sourceNode(nodes) - if err != nil { - return err - } - - // We'll use this map to keep count the number of references to a node - // in the graph. A node should only be removed once it has no more - // references in the graph. - nodeRefCounts := make(map[[33]byte]int) - err = nodes.ForEach(func(pubKey, nodeBytes []byte) error { - // If this is the source key, then we skip this - // iteration as the value for this key is a pubKey - // rather than raw node information. - if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { - return nil - } - - var nodePub [33]byte - copy(nodePub[:], pubKey) - nodeRefCounts[nodePub] = 0 - - return nil - }) - if err != nil { - return err - } - - // To ensure we never delete the source node, we'll start off by - // bumping its ref count to 1. - nodeRefCounts[sourceNode.PubKeyBytes] = 1 - - // Next, we'll run through the edgeIndex which maps a channel ID to the - // edge info. We'll use this scan to populate our reference count map - // above. - err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error { - // The first 66 bytes of the edge info contain the pubkeys of - // the nodes that this edge attaches. We'll extract them, and - // add them to the ref count map. - var node1, node2 [33]byte - copy(node1[:], edgeInfoBytes[:33]) - copy(node2[:], edgeInfoBytes[33:]) - - // With the nodes extracted, we'll increase the ref count of - // each of the nodes. - nodeRefCounts[node1]++ - nodeRefCounts[node2]++ - - return nil - }) - if err != nil { - return err - } - - // Finally, we'll make a second pass over the set of nodes, and delete - // any nodes that have a ref count of zero. - var numNodesPruned int - for nodePubKey, refCount := range nodeRefCounts { - // If the ref count of the node isn't zero, then we can safely - // skip it as it still has edges to or from it within the - // graph. - if refCount != 0 { - continue - } - - if c.graphCache != nil { - c.graphCache.RemoveNode(nodePubKey) - } - - // If we reach this point, then there are no longer any edges - // that connect this node, so we can delete it. - err := c.deleteLightningNode(nodes, nodePubKey[:]) - if err != nil { - if errors.Is(err, ErrGraphNodeNotFound) || - errors.Is(err, ErrGraphNodesNotFound) { - - log.Warnf("Unable to prune node %x from the "+ - "graph: %v", nodePubKey, err) - continue - } - - return err - } - - log.Infof("Pruned unconnected node %x from channel graph", - nodePubKey[:]) - - numNodesPruned++ - } - - if numNodesPruned > 0 { - log.Infof("Pruned %v unconnected nodes from the channel graph", - numNodesPruned) - } - - return nil -} - -// DisconnectBlockAtHeight is used to indicate that the block specified -// by the passed height has been disconnected from the main chain. This -// will "rewind" the graph back to the height below, deleting channels -// that are no longer confirmed from the graph. The prune log will be -// set to the last prune height valid for the remaining chain. -// Channels that were removed from the graph resulting from the -// disconnected block are returned. -func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ( - []*models.ChannelEdgeInfo, error) { - - // Every channel having a ShortChannelID starting at 'height' - // will no longer be confirmed. - startShortChanID := lnwire.ShortChannelID{ - BlockHeight: height, - } - - // Delete everything after this height from the db up until the - // SCID alias range. - endShortChanID := aliasmgr.StartingAlias - - // The block height will be the 3 first bytes of the channel IDs. - var chanIDStart [8]byte - byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64()) - var chanIDEnd [8]byte - byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64()) - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - // Keep track of the channels that are removed from the graph. - var removedChans []*models.ChannelEdgeInfo - - if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists( - channelPointBucket, - ) - if err != nil { - return err - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - // Scan from chanIDStart to chanIDEnd, deleting every - // found edge. - // NOTE: we must delete the edges after the cursor loop, since - // modifying the bucket while traversing is not safe. - // NOTE: We use a < comparison in bytes.Compare instead of <= - // so that the StartingAlias itself isn't deleted. - var keys [][]byte - cursor := edgeIndex.ReadWriteCursor() - - //nolint:ll - for k, v := cursor.Seek(chanIDStart[:]); k != nil && - bytes.Compare(k, chanIDEnd[:]) < 0; k, v = cursor.Next() { - edgeInfoReader := bytes.NewReader(v) - edgeInfo, err := deserializeChanEdgeInfo(edgeInfoReader) - if err != nil { - return err - } - - keys = append(keys, k) - removedChans = append(removedChans, &edgeInfo) - } - - for _, k := range keys { - err = c.delChannelEdgeUnsafe( - edges, edgeIndex, chanIndex, zombieIndex, - k, false, false, - ) - if err != nil && !errors.Is(err, ErrEdgeNotFound) { - return err - } - } - - // Delete all the entries in the prune log having a height - // greater or equal to the block disconnected. - metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) - if err != nil { - return err - } - - pruneBucket, err := metaBucket.CreateBucketIfNotExists( - pruneLogBucket, - ) - if err != nil { - return err - } - - var pruneKeyStart [4]byte - byteOrder.PutUint32(pruneKeyStart[:], height) - - var pruneKeyEnd [4]byte - byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32) - - // To avoid modifying the bucket while traversing, we delete - // the keys in a second loop. - var pruneKeys [][]byte - pruneCursor := pruneBucket.ReadWriteCursor() - //nolint:ll - for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil && - bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() { - pruneKeys = append(pruneKeys, k) - } - - for _, k := range pruneKeys { - if err := pruneBucket.Delete(k); err != nil { - return err - } - } - - return nil - }, func() { - removedChans = nil - }); err != nil { - return nil, err - } - - for _, channel := range removedChans { - c.rejectCache.remove(channel.ChannelID) - c.chanCache.remove(channel.ChannelID) - } - - return removedChans, nil -} - -// PruneTip returns the block height and hash of the latest block that has been -// used to prune channels in the graph. Knowing the "prune tip" allows callers -// to tell if the graph is currently in sync with the current best known UTXO -// state. -func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) { - var ( - tipHash chainhash.Hash - tipHeight uint32 - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - graphMeta := tx.ReadBucket(graphMetaBucket) - if graphMeta == nil { - return ErrGraphNotFound - } - pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket) - if pruneBucket == nil { - return ErrGraphNeverPruned - } - - pruneCursor := pruneBucket.ReadCursor() - - // The prune key with the largest block height will be our - // prune tip. - k, v := pruneCursor.Last() - if k == nil { - return ErrGraphNeverPruned - } - - // Once we have the prune tip, the value will be the block hash, - // and the key the block height. - copy(tipHash[:], v[:]) - tipHeight = byteOrder.Uint32(k[:]) - - return nil - }, func() {}) - if err != nil { - return nil, 0, err - } - - return &tipHash, tipHeight, nil -} - -// DeleteChannelEdges removes edges with the given channel IDs from the -// database and marks them as zombies. This ensures that we're unable to re-add -// it to our database once again. If an edge does not exist within the -// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is -// true, then when we mark these edges as zombies, we'll set up the keys such -// that we require the node that failed to send the fresh update to be the one -// that resurrects the channel from its zombie state. The markZombie bool -// denotes whether or not to mark the channel as a zombie. -func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool, - chanIDs ...uint64) error { - - // TODO(roasbeef): possibly delete from node bucket if node has no more - // channels - // TODO(roasbeef): don't delete both edges? - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrEdgeNotFound - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrEdgeNotFound - } - chanIndex := edges.NestedReadWriteBucket(channelPointBucket) - if chanIndex == nil { - return ErrEdgeNotFound - } - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - var rawChanID [8]byte - for _, chanID := range chanIDs { - byteOrder.PutUint64(rawChanID[:], chanID) - err := c.delChannelEdgeUnsafe( - edges, edgeIndex, chanIndex, zombieIndex, - rawChanID[:], markZombie, strictZombiePruning, - ) - if err != nil { - return err - } - } - - return nil - }, func() {}) - if err != nil { - return err - } - - for _, chanID := range chanIDs { - c.rejectCache.remove(chanID) - c.chanCache.remove(chanID) - } - - return nil -} - -// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the -// passed channel point (outpoint). If the passed channel doesn't exist within -// the database, then ErrEdgeNotFound is returned. -func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) { - var chanID uint64 - if err := kvdb.View(c.db, func(tx kvdb.RTx) error { - var err error - chanID, err = getChanID(tx, chanPoint) - return err - }, func() { - chanID = 0 - }); err != nil { - return 0, err - } - - return chanID, nil -} - -// getChanID returns the assigned channel ID for a given channel point. -func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) { - var b bytes.Buffer - if err := WriteOutpoint(&b, chanPoint); err != nil { - return 0, err - } - - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return 0, ErrGraphNoEdgesFound - } - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return 0, ErrGraphNoEdgesFound - } - - chanIDBytes := chanIndex.Get(b.Bytes()) - if chanIDBytes == nil { - return 0, ErrEdgeNotFound - } - - chanID := byteOrder.Uint64(chanIDBytes) - - return chanID, nil -} - -// TODO(roasbeef): allow updates to use Batch? - -// HighestChanID returns the "highest" known channel ID in the channel graph. -// This represents the "newest" channel from the PoV of the chain. This method -// can be used by peers to quickly determine if they're graphs are in sync. -func (c *ChannelGraph) HighestChanID() (uint64, error) { - var cid uint64 - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // In order to find the highest chan ID, we'll fetch a cursor - // and use that to seek to the "end" of our known rage. - cidCursor := edgeIndex.ReadCursor() - - lastChanID, _ := cidCursor.Last() - - // If there's no key, then this means that we don't actually - // know of any channels, so we'll return a predicable error. - if lastChanID == nil { - return ErrGraphNoEdgesFound - } - - // Otherwise, we'll de serialize the channel ID and return it - // to the caller. - cid = byteOrder.Uint64(lastChanID) - return nil - }, func() { - cid = 0 - }) - if err != nil && err != ErrGraphNoEdgesFound { - return 0, err - } - - return cid, nil -} - -// ChannelEdge represents the complete set of information for a channel edge in -// the known channel graph. This struct couples the core information of the -// edge as well as each of the known advertised edge policies. -type ChannelEdge struct { - // Info contains all the static information describing the channel. - Info *models.ChannelEdgeInfo - - // Policy1 points to the "first" edge policy of the channel containing - // the dynamic information required to properly route through the edge. - Policy1 *models.ChannelEdgePolicy - - // Policy2 points to the "second" edge policy of the channel containing - // the dynamic information required to properly route through the edge. - Policy2 *models.ChannelEdgePolicy - - // Node1 is "node 1" in the channel. This is the node that would have - // produced Policy1 if it exists. - Node1 *models.LightningNode - - // Node2 is "node 2" in the channel. This is the node that would have - // produced Policy2 if it exists. - Node2 *models.LightningNode -} - -// ChanUpdatesInHorizon returns all the known channel edges which have at least -// one edge that has an update timestamp within the specified horizon. -func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, - endTime time.Time) ([]ChannelEdge, error) { - - // To ensure we don't return duplicate ChannelEdges, we'll use an - // additional map to keep track of the edges already seen to prevent - // re-adding it. - var edgesSeen map[uint64]struct{} - var edgesToCache map[uint64]ChannelEdge - var edgesInHorizon []ChannelEdge - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - var hits int - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket) - if edgeUpdateIndex == nil { - return ErrGraphNoEdgesFound - } - - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound - } - - // We'll now obtain a cursor to perform a range query within - // the index to find all channels within the horizon. - updateCursor := edgeUpdateIndex.ReadCursor() - - var startTimeBytes, endTimeBytes [8 + 8]byte - byteOrder.PutUint64( - startTimeBytes[:8], uint64(startTime.Unix()), - ) - byteOrder.PutUint64( - endTimeBytes[:8], uint64(endTime.Unix()), - ) - - // With our start and end times constructed, we'll step through - // the index collecting the info and policy of each update of - // each channel that has a last update within the time range. - // - //nolint:ll - for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && - bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { - - // We have a new eligible entry, so we'll slice of the - // chan ID so we can query it in the DB. - chanID := indexKey[8:] - - // If we've already retrieved the info and policies for - // this edge, then we can skip it as we don't need to do - // so again. - chanIDInt := byteOrder.Uint64(chanID) - if _, ok := edgesSeen[chanIDInt]; ok { - continue - } - - if channel, ok := c.chanCache.get(chanIDInt); ok { - hits++ - edgesSeen[chanIDInt] = struct{}{} - edgesInHorizon = append(edgesInHorizon, channel) - continue - } - - // First, we'll fetch the static edge information. - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - chanID := byteOrder.Uint64(chanID) - return fmt.Errorf("unable to fetch info for "+ - "edge with chan_id=%v: %v", chanID, err) - } - - // With the static information obtained, we'll now - // fetch the dynamic policy info. - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, chanID, - ) - if err != nil { - chanID := byteOrder.Uint64(chanID) - return fmt.Errorf("unable to fetch policies "+ - "for edge with chan_id=%v: %v", chanID, - err) - } - - node1, err := fetchLightningNode( - nodes, edgeInfo.NodeKey1Bytes[:], - ) - if err != nil { - return err - } - - node2, err := fetchLightningNode( - nodes, edgeInfo.NodeKey2Bytes[:], - ) - if err != nil { - return err - } - - // Finally, we'll collate this edge with the rest of - // edges to be returned. - edgesSeen[chanIDInt] = struct{}{} - channel := ChannelEdge{ - Info: &edgeInfo, - Policy1: edge1, - Policy2: edge2, - Node1: &node1, - Node2: &node2, - } - edgesInHorizon = append(edgesInHorizon, channel) - edgesToCache[chanIDInt] = channel - } - - return nil - }, func() { - edgesSeen = make(map[uint64]struct{}) - edgesToCache = make(map[uint64]ChannelEdge) - edgesInHorizon = nil - }) - switch { - case err == ErrGraphNoEdgesFound: - fallthrough - case err == ErrGraphNodesNotFound: - break - - case err != nil: - return nil, err - } - - // Insert any edges loaded from disk into the cache. - for chanid, channel := range edgesToCache { - c.chanCache.insert(chanid, channel) - } - - log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)", - float64(hits)/float64(len(edgesInHorizon)), hits, - len(edgesInHorizon)) - - return edgesInHorizon, nil -} - -// NodeUpdatesInHorizon returns all the known lightning node which have an -// update timestamp within the passed range. This method can be used by two -// nodes to quickly determine if they have the same set of up to date node -// announcements. -func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, - endTime time.Time) ([]models.LightningNode, error) { - - var nodesInHorizon []models.LightningNode - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound - } - - nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket) - if nodeUpdateIndex == nil { - return ErrGraphNodesNotFound - } - - // We'll now obtain a cursor to perform a range query within - // the index to find all node announcements within the horizon. - updateCursor := nodeUpdateIndex.ReadCursor() - - var startTimeBytes, endTimeBytes [8 + 33]byte - byteOrder.PutUint64( - startTimeBytes[:8], uint64(startTime.Unix()), - ) - byteOrder.PutUint64( - endTimeBytes[:8], uint64(endTime.Unix()), - ) - - // With our start and end times constructed, we'll step through - // the index collecting info for each node within the time - // range. - // - //nolint:ll - for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && - bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { - - nodePub := indexKey[8:] - node, err := fetchLightningNode(nodes, nodePub) - if err != nil { - return err - } - - nodesInHorizon = append(nodesInHorizon, node) - } - - return nil - }, func() { - nodesInHorizon = nil - }) - switch { - case err == ErrGraphNoEdgesFound: - fallthrough - case err == ErrGraphNodesNotFound: - break - - case err != nil: - return nil, err - } - - return nodesInHorizon, nil -} - -// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan -// ID's that we don't know and are not known zombies of the passed set. In other -// words, we perform a set difference of our set of chan ID's and the ones -// passed in. This method can be used by callers to determine the set of -// channels another peer knows of that we don't. -func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo, - isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) { - - var newChanIDs []uint64 - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // Fetch the zombie index, it may not exist if no edges have - // ever been marked as zombies. If the index has been - // initialized, we will use it later to skip known zombie edges. - zombieIndex := edges.NestedReadBucket(zombieBucket) - - // We'll run through the set of chanIDs and collate only the - // set of channel that are unable to be found within our db. - var cidBytes [8]byte - for _, info := range chansInfo { - scid := info.ShortChannelID.ToUint64() - byteOrder.PutUint64(cidBytes[:], scid) - - // If the edge is already known, skip it. - if v := edgeIndex.Get(cidBytes[:]); v != nil { - continue - } - - // If the edge is a known zombie, skip it. - if zombieIndex != nil { - isZombie, _, _ := isZombieEdge( - zombieIndex, scid, - ) - - // TODO(ziggie): Make sure that for the strict - // pruning case we compare the pubkeys and - // whether the right timestamp is not older than - // the `ChannelPruneExpiry`. - // - // NOTE: The timestamp data has no verification - // attached to it in the `ReplyChannelRange` msg - // so we are trusting this data at this point. - // However it is not critical because we are - // just removing the channel from the db when - // the timestamps are more recent. During the - // querying of the gossip msg verification - // happens as usual. - // However we should start punishing peers when - // they don't provide us honest data ? - isStillZombie := isZombieChan( - info.Node1UpdateTimestamp, - info.Node2UpdateTimestamp, - ) - - switch { - // If the edge is a known zombie and if we - // would still consider it a zombie given the - // latest update timestamps, then we skip this - // channel. - case isZombie && isStillZombie: - continue - - // Otherwise, if we have marked it as a zombie - // but the latest update timestamps could bring - // it back from the dead, then we mark it alive, - // and we let it be added to the set of IDs to - // query our peer for. - case isZombie && !isStillZombie: - err := c.markEdgeLiveUnsafe(tx, scid) - if err != nil { - return err - } - } - } - - newChanIDs = append(newChanIDs, scid) - } - - return nil - }, func() { - newChanIDs = nil - }) - switch { - // If we don't know of any edges yet, then we'll return the entire set - // of chan IDs specified. - case err == ErrGraphNoEdgesFound: - ogChanIDs := make([]uint64, len(chansInfo)) - for i, info := range chansInfo { - ogChanIDs[i] = info.ShortChannelID.ToUint64() - } - - return ogChanIDs, nil - - case err != nil: - return nil, err - } - - return newChanIDs, nil -} - -// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the -// latest received channel updates for the channel. -type ChannelUpdateInfo struct { - // ShortChannelID is the SCID identifier of the channel. - ShortChannelID lnwire.ShortChannelID - - // Node1UpdateTimestamp is the timestamp of the latest received update - // from the node 1 channel peer. This will be set to zero time if no - // update has yet been received from this node. - Node1UpdateTimestamp time.Time - - // Node2UpdateTimestamp is the timestamp of the latest received update - // from the node 2 channel peer. This will be set to zero time if no - // update has yet been received from this node. - Node2UpdateTimestamp time.Time -} - -// NewChannelUpdateInfo is a constructor which makes sure we initialize the -// timestamps with zero seconds unix timestamp which equals -// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`. -func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp, - node2Timestamp time.Time) ChannelUpdateInfo { - - chanInfo := ChannelUpdateInfo{ - ShortChannelID: scid, - Node1UpdateTimestamp: node1Timestamp, - Node2UpdateTimestamp: node2Timestamp, - } - - if node1Timestamp.IsZero() { - chanInfo.Node1UpdateTimestamp = time.Unix(0, 0) - } - - if node2Timestamp.IsZero() { - chanInfo.Node2UpdateTimestamp = time.Unix(0, 0) - } - - return chanInfo -} - -// BlockChannelRange represents a range of channels for a given block height. -type BlockChannelRange struct { - // Height is the height of the block all of the channels below were - // included in. - Height uint32 - - // Channels is the list of channels identified by their short ID - // representation known to us that were included in the block height - // above. The list may include channel update timestamp information if - // requested. - Channels []ChannelUpdateInfo -} - -// FilterChannelRange returns the channel ID's of all known channels which were -// mined in a block height within the passed range. The channel IDs are grouped -// by their common block height. This method can be used to quickly share with a -// peer the set of channels we know of within a particular range to catch them -// up after a period of time offline. If withTimestamps is true then the -// timestamp info of the latest received channel update messages of the channel -// will be included in the response. -func (c *ChannelGraph) FilterChannelRange(startHeight, - endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) { - - startChanID := &lnwire.ShortChannelID{ - BlockHeight: startHeight, - } - - endChanID := lnwire.ShortChannelID{ - BlockHeight: endHeight, - TxIndex: math.MaxUint32 & 0x00ffffff, - TxPosition: math.MaxUint16, - } - - // As we need to perform a range scan, we'll convert the starting and - // ending height to their corresponding values when encoded using short - // channel ID's. - var chanIDStart, chanIDEnd [8]byte - byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64()) - byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64()) - - var channelsPerBlock map[uint32][]ChannelUpdateInfo - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - cursor := edgeIndex.ReadCursor() - - // We'll now iterate through the database, and find each - // channel ID that resides within the specified range. - // - //nolint:ll - for k, v := cursor.Seek(chanIDStart[:]); k != nil && - bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() { - // Don't send alias SCIDs during gossip sync. - edgeReader := bytes.NewReader(v) - edgeInfo, err := deserializeChanEdgeInfo(edgeReader) - if err != nil { - return err - } - - if edgeInfo.AuthProof == nil { - continue - } - - // This channel ID rests within the target range, so - // we'll add it to our returned set. - rawCid := byteOrder.Uint64(k) - cid := lnwire.NewShortChanIDFromInt(rawCid) - - chanInfo := NewChannelUpdateInfo( - cid, time.Time{}, time.Time{}, - ) - - if !withTimestamps { - channelsPerBlock[cid.BlockHeight] = append( - channelsPerBlock[cid.BlockHeight], - chanInfo, - ) - - continue - } - - node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo) - - rawPolicy := edges.Get(node1Key) - if len(rawPolicy) != 0 { - r := bytes.NewReader(rawPolicy) - - edge, err := deserializeChanEdgePolicyRaw(r) - if err != nil && !errors.Is( - err, ErrEdgePolicyOptionalFieldNotFound, - ) { - - return err - } - - chanInfo.Node1UpdateTimestamp = edge.LastUpdate - } - - rawPolicy = edges.Get(node2Key) - if len(rawPolicy) != 0 { - r := bytes.NewReader(rawPolicy) - - edge, err := deserializeChanEdgePolicyRaw(r) - if err != nil && !errors.Is( - err, ErrEdgePolicyOptionalFieldNotFound, - ) { - - return err - } - - chanInfo.Node2UpdateTimestamp = edge.LastUpdate - } - - channelsPerBlock[cid.BlockHeight] = append( - channelsPerBlock[cid.BlockHeight], chanInfo, - ) - } - - return nil - }, func() { - channelsPerBlock = make(map[uint32][]ChannelUpdateInfo) - }) - - switch { - // If we don't know of any channels yet, then there's nothing to - // filter, so we'll return an empty slice. - case err == ErrGraphNoEdgesFound || len(channelsPerBlock) == 0: - return nil, nil - - case err != nil: - return nil, err - } - - // Return the channel ranges in ascending block height order. - blocks := make([]uint32, 0, len(channelsPerBlock)) - for block := range channelsPerBlock { - blocks = append(blocks, block) - } - sort.Slice(blocks, func(i, j int) bool { - return blocks[i] < blocks[j] - }) - - channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock)) - for _, block := range blocks { - channelRanges = append(channelRanges, BlockChannelRange{ - Height: block, - Channels: channelsPerBlock[block], - }) - } - - return channelRanges, nil -} - -// FetchChanInfos returns the set of channel edges that correspond to the passed -// channel ID's. If an edge is the query is unknown to the database, it will -// skipped and the result will contain only those edges that exist at the time -// of the query. This can be used to respond to peer queries that are seeking to -// fill in gaps in their view of the channel graph. -func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) { - return c.fetchChanInfos(nil, chanIDs) -} - -// fetchChanInfos returns the set of channel edges that correspond to the passed -// channel ID's. If an edge is the query is unknown to the database, it will -// skipped and the result will contain only those edges that exist at the time -// of the query. This can be used to respond to peer queries that are seeking to -// fill in gaps in their view of the channel graph. -// -// NOTE: An optional transaction may be provided. If none is provided, then a -// new one will be created. -func (c *ChannelGraph) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) ( - []ChannelEdge, error) { - // TODO(roasbeef): sort cids? - - var ( - chanEdges []ChannelEdge - cidBytes [8]byte - ) - - fetchChanInfos := func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - for _, cid := range chanIDs { - byteOrder.PutUint64(cidBytes[:], cid) - - // First, we'll fetch the static edge information. If - // the edge is unknown, we will skip the edge and - // continue gathering all known edges. - edgeInfo, err := fetchChanEdgeInfo( - edgeIndex, cidBytes[:], - ) - switch { - case errors.Is(err, ErrEdgeNotFound): - continue - case err != nil: - return err - } - - // With the static information obtained, we'll now - // fetch the dynamic policy info. - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, cidBytes[:], - ) - if err != nil { - return err - } - - node1, err := fetchLightningNode( - nodes, edgeInfo.NodeKey1Bytes[:], - ) - if err != nil { - return err - } - - node2, err := fetchLightningNode( - nodes, edgeInfo.NodeKey2Bytes[:], - ) - if err != nil { - return err - } - - chanEdges = append(chanEdges, ChannelEdge{ - Info: &edgeInfo, - Policy1: edge1, - Policy2: edge2, - Node1: &node1, - Node2: &node2, - }) - } - return nil - } - - if tx == nil { - err := kvdb.View(c.db, fetchChanInfos, func() { - chanEdges = nil - }) - if err != nil { - return nil, err - } - - return chanEdges, nil - } - - err := fetchChanInfos(tx) - if err != nil { - return nil, err - } - - return chanEdges, nil -} - -func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64, - edge1, edge2 *models.ChannelEdgePolicy) error { - - // First, we'll fetch the edge update index bucket which currently - // stores an entry for the channel we're about to delete. - updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket) - if updateIndex == nil { - // No edges in bucket, return early. - return nil - } - - // Now that we have the bucket, we'll attempt to construct a template - // for the index key: updateTime || chanid. - var indexKey [8 + 8]byte - byteOrder.PutUint64(indexKey[8:], chanID) - - // With the template constructed, we'll attempt to delete an entry that - // would have been created by both edges: we'll alternate the update - // times, as one may had overridden the other. - if edge1 != nil { - byteOrder.PutUint64( - indexKey[:8], uint64(edge1.LastUpdate.Unix()), - ) - if err := updateIndex.Delete(indexKey[:]); err != nil { - return err - } - } - - // We'll also attempt to delete the entry that may have been created by - // the second edge. - if edge2 != nil { - byteOrder.PutUint64( - indexKey[:8], uint64(edge2.LastUpdate.Unix()), - ) - if err := updateIndex.Delete(indexKey[:]); err != nil { - return err - } - } - - return nil -} - -// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph -// cache. It then goes on to delete any policy info and edge info for this -// channel from the DB and finally, if isZombie is true, it will add an entry -// for this channel in the zombie index. -// -// NOTE: this method MUST only be called if the cacheMu has already been -// acquired. -func (c *ChannelGraph) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex, - zombieIndex kvdb.RwBucket, chanID []byte, isZombie, - strictZombie bool) error { - - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - - if c.graphCache != nil { - c.graphCache.RemoveChannel( - edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes, - edgeInfo.ChannelID, - ) - } - - // We'll also remove the entry in the edge update index bucket before - // we delete the edges themselves so we can access their last update - // times. - cid := byteOrder.Uint64(chanID) - edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID) - if err != nil { - return err - } - err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2) - if err != nil { - return err - } - - // The edge key is of the format pubKey || chanID. First we construct - // the latter half, populating the channel ID. - var edgeKey [33 + 8]byte - copy(edgeKey[33:], chanID) - - // With the latter half constructed, copy over the first public key to - // delete the edge in this direction, then the second to delete the - // edge in the opposite direction. - copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:]) - if edges.Get(edgeKey[:]) != nil { - if err := edges.Delete(edgeKey[:]); err != nil { - return err - } - } - copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:]) - if edges.Get(edgeKey[:]) != nil { - if err := edges.Delete(edgeKey[:]); err != nil { - return err - } - } - - // As part of deleting the edge we also remove all disabled entries - // from the edgePolicyDisabledIndex bucket. We do that for both - // directions. - err = updateEdgePolicyDisabledIndex(edges, cid, false, false) - if err != nil { - return err - } - err = updateEdgePolicyDisabledIndex(edges, cid, true, false) - if err != nil { - return err - } - - // With the edge data deleted, we can purge the information from the two - // edge indexes. - if err := edgeIndex.Delete(chanID); err != nil { - return err - } - var b bytes.Buffer - if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { - return err - } - if err := chanIndex.Delete(b.Bytes()); err != nil { - return err - } - - // Finally, we'll mark the edge as a zombie within our index if it's - // being removed due to the channel becoming a zombie. We do this to - // ensure we don't store unnecessary data for spent channels. - if !isZombie { - return nil - } - - nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes - if strictZombie { - nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2) - } - - return markEdgeZombie( - zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2, - ) -} - -// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a -// particular pair of channel policies. The return values are one of: -// 1. (pubkey1, pubkey2) -// 2. (pubkey1, blank) -// 3. (blank, pubkey2) -// -// A blank pubkey means that corresponding node will be unable to resurrect a -// channel on its own. For example, node1 may continue to publish recent -// updates, but node2 has fallen way behind. After marking an edge as a zombie, -// we don't want another fresh update from node1 to resurrect, as the edge can -// only become live once node2 finally sends something recent. -// -// In the case where we have neither update, we allow either party to resurrect -// the channel. If the channel were to be marked zombie again, it would be -// marked with the correct lagging channel since we received an update from only -// one side. -func makeZombiePubkeys(info *models.ChannelEdgeInfo, - e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) { - - switch { - // If we don't have either edge policy, we'll return both pubkeys so - // that the channel can be resurrected by either party. - case e1 == nil && e2 == nil: - return info.NodeKey1Bytes, info.NodeKey2Bytes - - // If we're missing edge1, or if both edges are present but edge1 is - // older, we'll return edge1's pubkey and a blank pubkey for edge2. This - // means that only an update from edge1 will be able to resurrect the - // channel. - case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)): - return info.NodeKey1Bytes, [33]byte{} - - // Otherwise, we're missing edge2 or edge2 is the older side, so we - // return a blank pubkey for edge1. In this case, only an update from - // edge2 can resurect the channel. - default: - return [33]byte{}, info.NodeKey2Bytes - } -} - -// UpdateEdgePolicy updates the edge routing policy for a single directed edge -// within the database for the referenced channel. The `flags` attribute within -// the ChannelEdgePolicy determines which of the directed edges are being -// updated. If the flag is 1, then the first node's information is being -// updated, otherwise it's the second node's information. The node ordering is -// determined by the lexicographical ordering of the identity public keys of the -// nodes on either side of the channel. -func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy, - op ...batch.SchedulerOption) error { - - var ( - isUpdate1 bool - edgeNotFound bool - ) - - r := &batch.Request{ - Reset: func() { - isUpdate1 = false - edgeNotFound = false - }, - Update: func(tx kvdb.RwTx) error { - var err error - isUpdate1, err = updateEdgePolicy( - tx, edge, c.graphCache, - ) - - if err != nil { - log.Errorf("UpdateEdgePolicy faild: %v", err) - } - - // Silence ErrEdgeNotFound so that the batch can - // succeed, but propagate the error via local state. - if errors.Is(err, ErrEdgeNotFound) { - edgeNotFound = true - return nil - } - - return err - }, - OnCommit: func(err error) error { - switch { - case err != nil: - return err - case edgeNotFound: - return ErrEdgeNotFound - default: - c.updateEdgeCache(edge, isUpdate1) - return nil - } - }, - } - - for _, f := range op { - f(r) - } - - return c.chanScheduler.Execute(r) -} - -func (c *ChannelGraph) updateEdgeCache(e *models.ChannelEdgePolicy, - isUpdate1 bool) { - - // If an entry for this channel is found in reject cache, we'll modify - // the entry with the updated timestamp for the direction that was just - // written. If the edge doesn't exist, we'll load the cache entry lazily - // during the next query for this edge. - if entry, ok := c.rejectCache.get(e.ChannelID); ok { - if isUpdate1 { - entry.upd1Time = e.LastUpdate.Unix() - } else { - entry.upd2Time = e.LastUpdate.Unix() - } - c.rejectCache.insert(e.ChannelID, entry) - } - - // If an entry for this channel is found in channel cache, we'll modify - // the entry with the updated policy for the direction that was just - // written. If the edge doesn't exist, we'll defer loading the info and - // policies and lazily read from disk during the next query. - if channel, ok := c.chanCache.get(e.ChannelID); ok { - if isUpdate1 { - channel.Policy1 = e - } else { - channel.Policy2 = e - } - c.chanCache.insert(e.ChannelID, channel) - } -} - -// updateEdgePolicy attempts to update an edge's policy within the relevant -// buckets using an existing database transaction. The returned boolean will be -// true if the updated policy belongs to node1, and false if the policy belonged -// to node2. -func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy, - graphCache *GraphCache) (bool, error) { - - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return false, ErrEdgeNotFound - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return false, ErrEdgeNotFound - } - - // Create the channelID key be converting the channel ID - // integer into a byte slice. - var chanID [8]byte - byteOrder.PutUint64(chanID[:], edge.ChannelID) - - // With the channel ID, we then fetch the value storing the two - // nodes which connect this channel edge. - nodeInfo := edgeIndex.Get(chanID[:]) - if nodeInfo == nil { - return false, ErrEdgeNotFound - } - - // Depending on the flags value passed above, either the first - // or second edge policy is being updated. - var fromNode, toNode []byte - var isUpdate1 bool - if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { - fromNode = nodeInfo[:33] - toNode = nodeInfo[33:66] - isUpdate1 = true - } else { - fromNode = nodeInfo[33:66] - toNode = nodeInfo[:33] - isUpdate1 = false - } - - // Finally, with the direction of the edge being updated - // identified, we update the on-disk edge representation. - err := putChanEdgePolicy(edges, edge, fromNode, toNode) - if err != nil { - return false, err - } - - var ( - fromNodePubKey route.Vertex - toNodePubKey route.Vertex - ) - copy(fromNodePubKey[:], fromNode) - copy(toNodePubKey[:], toNode) - - if graphCache != nil { - graphCache.UpdatePolicy( - edge, fromNodePubKey, toNodePubKey, isUpdate1, - ) - } - - return isUpdate1, nil -} - -// isPublic determines whether the node is seen as public within the graph from -// the source node's point of view. An existing database transaction can also be -// specified. -func (c *ChannelGraph) isPublic(tx kvdb.RTx, nodePub route.Vertex, - sourcePubKey []byte) (bool, error) { - - // In order to determine whether this node is publicly advertised within - // the graph, we'll need to look at all of its edges and check whether - // they extend to any other node than the source node. errDone will be - // used to terminate the check early. - nodeIsPublic := false - errDone := errors.New("done") - err := c.ForEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx, - info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy, - _ *models.ChannelEdgePolicy) error { - - // If this edge doesn't extend to the source node, we'll - // terminate our search as we can now conclude that the node is - // publicly advertised within the graph due to the local node - // knowing of the current edge. - if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) && - !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) { - - nodeIsPublic = true - return errDone - } - - // Since the edge _does_ extend to the source node, we'll also - // need to ensure that this is a public edge. - if info.AuthProof != nil { - nodeIsPublic = true - return errDone - } - - // Otherwise, we'll continue our search. - return nil - }) - if err != nil && err != errDone { - return false, err - } - - return nodeIsPublic, nil -} - -// FetchLightningNodeTx attempts to look up a target node by its identity -// public key. If the node isn't found in the database, then -// ErrGraphNodeNotFound is returned. An optional transaction may be provided. -// If none is provided, then a new one will be created. -func (c *ChannelGraph) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) ( - *models.LightningNode, error) { - - return c.fetchLightningNode(tx, nodePub) -} - -// FetchLightningNode attempts to look up a target node by its identity public -// key. If the node isn't found in the database, then ErrGraphNodeNotFound is -// returned. -func (c *ChannelGraph) FetchLightningNode(nodePub route.Vertex) ( - *models.LightningNode, error) { - - return c.fetchLightningNode(nil, nodePub) -} - -// fetchLightningNode attempts to look up a target node by its identity public -// key. If the node isn't found in the database, then ErrGraphNodeNotFound is -// returned. An optional transaction may be provided. If none is provided, then -// a new one will be created. -func (c *ChannelGraph) fetchLightningNode(tx kvdb.RTx, - nodePub route.Vertex) (*models.LightningNode, error) { - - var node *models.LightningNode - fetch := func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - // If a key for this serialized public key isn't found, then - // the target node doesn't exist within the database. - nodeBytes := nodes.Get(nodePub[:]) - if nodeBytes == nil { - return ErrGraphNodeNotFound - } - - // If the node is found, then we can de deserialize the node - // information to return to the user. - nodeReader := bytes.NewReader(nodeBytes) - n, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } - - node = &n - - return nil - } - - if tx == nil { - err := kvdb.View( - c.db, fetch, func() { - node = nil - }, - ) - if err != nil { - return nil, err - } - - return node, nil - } - - err := fetch(tx) - if err != nil { - return nil, err - } - - return node, nil -} - -// HasLightningNode determines if the graph has a vertex identified by the -// target node identity public key. If the node exists in the database, a -// timestamp of when the data for the node was lasted updated is returned along -// with a true boolean. Otherwise, an empty time.Time is returned with a false -// boolean. -func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, - error) { - - var ( - updateTime time.Time - exists bool - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - // If a key for this serialized public key isn't found, we can - // exit early. - nodeBytes := nodes.Get(nodePub[:]) - if nodeBytes == nil { - exists = false - return nil - } - - // Otherwise we continue on to obtain the time stamp - // representing the last time the data for this node was - // updated. - nodeReader := bytes.NewReader(nodeBytes) - node, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } - - exists = true - updateTime = node.LastUpdate - return nil - }, func() { - updateTime = time.Time{} - exists = false - }) - if err != nil { - return time.Time{}, exists, err - } - - return updateTime, exists, nil -} - -// nodeTraversal is used to traverse all channels of a node given by its -// public key and passes channel information into the specified callback. -func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend, - cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy) error) error { - - traversal := func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNotFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // In order to reach all the edges for this node, we take - // advantage of the construction of the key-space within the - // edge bucket. The keys are stored in the form: pubKey || - // chanID. Therefore, starting from a chanID of zero, we can - // scan forward in the bucket, grabbing all the edges for the - // node. Once the prefix no longer matches, then we know we're - // done. - var nodeStart [33 + 8]byte - copy(nodeStart[:], nodePub) - copy(nodeStart[33:], chanStart[:]) - - // Starting from the key pubKey || 0, we seek forward in the - // bucket until the retrieved key no longer has the public key - // as its prefix. This indicates that we've stepped over into - // another node's edges, so we can terminate our scan. - edgeCursor := edges.ReadCursor() - for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll - // If the prefix still matches, the channel id is - // returned in nodeEdge. Channel id is used to lookup - // the node at the other end of the channel and both - // edge policies. - chanID := nodeEdge[33:] - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - - outgoingPolicy, err := fetchChanEdgePolicy( - edges, chanID, nodePub, - ) - if err != nil { - return err - } - - otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub) - if err != nil { - return err - } - - incomingPolicy, err := fetchChanEdgePolicy( - edges, chanID, otherNode[:], - ) - if err != nil { - return err - } - - // Finally, we execute the callback. - err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy) - if err != nil { - return err - } - } - - return nil - } - - // If no transaction was provided, then we'll create a new transaction - // to execute the transaction within. - if tx == nil { - return kvdb.View(db, traversal, func() {}) - } - - // Otherwise, we re-use the existing transaction to execute the graph - // traversal. - return traversal(tx) -} - -// ForEachNodeChannel iterates through all channels of the given node, -// executing the passed callback with an edge info structure and the policies -// of each end of the channel. The first edge policy is the outgoing edge *to* -// the connecting node, while the second is the incoming edge *from* the -// connecting node. If the callback returns an error, then the iteration is -// halted with the error propagated back up to the caller. -// -// Unknown policies are passed into the callback as nil values. -func (c *ChannelGraph) ForEachNodeChannel(nodePub route.Vertex, - cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy) error) error { - - return nodeTraversal(nil, nodePub[:], c.db, cb) -} - -// ForEachNodeChannelTx iterates through all channels of the given node, -// executing the passed callback with an edge info structure and the policies -// of each end of the channel. The first edge policy is the outgoing edge *to* -// the connecting node, while the second is the incoming edge *from* the -// connecting node. If the callback returns an error, then the iteration is -// halted with the error propagated back up to the caller. -// -// Unknown policies are passed into the callback as nil values. -// -// If the caller wishes to re-use an existing boltdb transaction, then it -// should be passed as the first argument. Otherwise, the first argument should -// be nil and a fresh transaction will be created to execute the graph -// traversal. -func (c *ChannelGraph) ForEachNodeChannelTx(tx kvdb.RTx, - nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo, - *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy) error) error { - - return nodeTraversal(tx, nodePub[:], c.db, cb) -} - -// FetchOtherNode attempts to fetch the full LightningNode that's opposite of -// the target node in the channel. This is useful when one knows the pubkey of -// one of the nodes, and wishes to obtain the full LightningNode for the other -// end of the channel. -func (c *ChannelGraph) FetchOtherNode(tx kvdb.RTx, - channel *models.ChannelEdgeInfo, thisNodeKey []byte) ( - *models.LightningNode, error) { - - // Ensure that the node passed in is actually a member of the channel. - var targetNodeBytes [33]byte - switch { - case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey): - targetNodeBytes = channel.NodeKey2Bytes - case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey): - targetNodeBytes = channel.NodeKey1Bytes - default: - return nil, fmt.Errorf("node not participating in this channel") - } - - var targetNode *models.LightningNode - fetchNodeFunc := func(tx kvdb.RTx) error { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - node, err := fetchLightningNode(nodes, targetNodeBytes[:]) - if err != nil { - return err - } - - targetNode = &node - - return nil - } - - // If the transaction is nil, then we'll need to create a new one, - // otherwise we can use the existing db transaction. - var err error - if tx == nil { - err = kvdb.View(c.db, fetchNodeFunc, func() { - targetNode = nil - }) - } else { - err = fetchNodeFunc(tx) - } - - return targetNode, err -} - -// computeEdgePolicyKeys is a helper function that can be used to compute the -// keys used to index the channel edge policy info for the two nodes of the -// edge. The keys for node 1 and node 2 are returned respectively. -func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) { - var ( - node1Key [33 + 8]byte - node2Key [33 + 8]byte - ) - - copy(node1Key[:], info.NodeKey1Bytes[:]) - copy(node2Key[:], info.NodeKey2Bytes[:]) - - byteOrder.PutUint64(node1Key[33:], info.ChannelID) - byteOrder.PutUint64(node2Key[33:], info.ChannelID) - - return node1Key[:], node2Key[:] -} - -// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for -// the channel identified by the funding outpoint. If the channel can't be -// found, then ErrEdgeNotFound is returned. A struct which houses the general -// information for the channel itself is returned as well as two structs that -// contain the routing policies for the channel in either direction. -func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint) ( - *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy, error) { - - var ( - edgeInfo *models.ChannelEdgeInfo - policy1 *models.ChannelEdgePolicy - policy2 *models.ChannelEdgePolicy - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - // First, grab the node bucket. This will be used to populate - // the Node pointers in each edge read from disk. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - // Next, grab the edge bucket which stores the edges, and also - // the index itself so we can group the directed edges together - // logically. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // If the channel's outpoint doesn't exist within the outpoint - // index, then the edge does not exist. - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return ErrGraphNoEdgesFound - } - var b bytes.Buffer - if err := WriteOutpoint(&b, op); err != nil { - return err - } - chanID := chanIndex.Get(b.Bytes()) - if chanID == nil { - return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op) - } - - // If the channel is found to exists, then we'll first retrieve - // the general information for the channel. - edge, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return fmt.Errorf("%w: chanID=%x", err, chanID) - } - edgeInfo = &edge - - // Once we have the information about the channels' parameters, - // we'll fetch the routing policies for each for the directed - // edges. - e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID) - if err != nil { - return fmt.Errorf("failed to find policy: %w", err) - } - - policy1 = e1 - policy2 = e2 - return nil - }, func() { - edgeInfo = nil - policy1 = nil - policy2 = nil - }) - if err != nil { - return nil, nil, nil, err - } - - return edgeInfo, policy1, policy2, nil -} - -// FetchChannelEdgesByID attempts to lookup the two directed edges for the -// channel identified by the channel ID. If the channel can't be found, then -// ErrEdgeNotFound is returned. A struct which houses the general information -// for the channel itself is returned as well as two structs that contain the -// routing policies for the channel in either direction. -// -// ErrZombieEdge an be returned if the edge is currently marked as a zombie -// within the database. In this case, the ChannelEdgePolicy's will be nil, and -// the ChannelEdgeInfo will only include the public keys of each node. -func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64) ( - *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, - *models.ChannelEdgePolicy, error) { - - var ( - edgeInfo *models.ChannelEdgeInfo - policy1 *models.ChannelEdgePolicy - policy2 *models.ChannelEdgePolicy - channelID [8]byte - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - // First, grab the node bucket. This will be used to populate - // the Node pointers in each edge read from disk. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound - } - - // Next, grab the edge bucket which stores the edges, and also - // the index itself so we can group the directed edges together - // logically. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - byteOrder.PutUint64(channelID[:], chanID) - - // Now, attempt to fetch edge. - edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:]) - - // If it doesn't exist, we'll quickly check our zombie index to - // see if we've previously marked it as so. - if errors.Is(err, ErrEdgeNotFound) { - // If the zombie index doesn't exist, or the edge is not - // marked as a zombie within it, then we'll return the - // original ErrEdgeNotFound error. - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return ErrEdgeNotFound - } - - isZombie, pubKey1, pubKey2 := isZombieEdge( - zombieIndex, chanID, - ) - if !isZombie { - return ErrEdgeNotFound - } - - // Otherwise, the edge is marked as a zombie, so we'll - // populate the edge info with the public keys of each - // party as this is the only information we have about - // it and return an error signaling so. - edgeInfo = &models.ChannelEdgeInfo{ - NodeKey1Bytes: pubKey1, - NodeKey2Bytes: pubKey2, - } - return ErrZombieEdge - } - - // Otherwise, we'll just return the error if any. - if err != nil { - return err - } - - edgeInfo = &edge - - // Then we'll attempt to fetch the accompanying policies of this - // edge. - e1, e2, err := fetchChanEdgePolicies( - edgeIndex, edges, channelID[:], - ) - if err != nil { - return err - } - - policy1 = e1 - policy2 = e2 - return nil - }, func() { - edgeInfo = nil - policy1 = nil - policy2 = nil - }) - if err == ErrZombieEdge { - return edgeInfo, nil, nil, err - } - if err != nil { - return nil, nil, nil, err - } - - return edgeInfo, policy1, policy2, nil -} - -// IsPublicNode is a helper method that determines whether the node with the -// given public key is seen as a public node in the graph from the graph's -// source node's point of view. -func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, error) { - var nodeIsPublic bool - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound - } - ourPubKey := nodes.Get(sourceKey) - if ourPubKey == nil { - return ErrSourceNodeNotSet - } - node, err := fetchLightningNode(nodes, pubKey[:]) - if err != nil { - return err - } - - nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey) - return err - }, func() { - nodeIsPublic = false - }) - if err != nil { - return false, err - } - - return nodeIsPublic, nil -} - -// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys. -func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) { - witnessScript, err := input.GenMultiSigScript(aPub, bPub) - if err != nil { - return nil, err - } - - // With the witness script generated, we'll now turn it into a p2wsh - // script: - // * OP_0 - bldr := txscript.NewScriptBuilder( - txscript.WithScriptAllocSize(input.P2WSHSize), - ) - bldr.AddOp(txscript.OP_0) - scriptHash := sha256.Sum256(witnessScript) - bldr.AddData(scriptHash[:]) - - return bldr.Script() -} - -// EdgePoint couples the outpoint of a channel with the funding script that it -// creates. The FilteredChainView will use this to watch for spends of this -// edge point on chain. We require both of these values as depending on the -// concrete implementation, either the pkScript, or the out point will be used. -type EdgePoint struct { - // FundingPkScript is the p2wsh multi-sig script of the target channel. - FundingPkScript []byte - - // OutPoint is the outpoint of the target channel. - OutPoint wire.OutPoint -} - -// String returns a human readable version of the target EdgePoint. We return -// the outpoint directly as it is enough to uniquely identify the edge point. -func (e *EdgePoint) String() string { - return e.OutPoint.String() -} - -// ChannelView returns the verifiable edge information for each active channel -// within the known channel graph. The set of UTXO's (along with their scripts) -// returned are the ones that need to be watched on chain to detect channel -// closes on the resident blockchain. -func (c *ChannelGraph) ChannelView() ([]EdgePoint, error) { - var edgePoints []EdgePoint - if err := kvdb.View(c.db, func(tx kvdb.RTx) error { - // We're going to iterate over the entire channel index, so - // we'll need to fetch the edgeBucket to get to the index as - // it's a sub-bucket. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return ErrGraphNoEdgesFound - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound - } - - // Once we have the proper bucket, we'll range over each key - // (which is the channel point for the channel) and decode it, - // accumulating each entry. - return chanIndex.ForEach( - func(chanPointBytes, chanID []byte) error { - chanPointReader := bytes.NewReader( - chanPointBytes, - ) - - var chanPoint wire.OutPoint - err := ReadOutpoint(chanPointReader, &chanPoint) - if err != nil { - return err - } - - edgeInfo, err := fetchChanEdgeInfo( - edgeIndex, chanID, - ) - if err != nil { - return err - } - - pkScript, err := genMultiSigP2WSH( - edgeInfo.BitcoinKey1Bytes[:], - edgeInfo.BitcoinKey2Bytes[:], - ) - if err != nil { - return err - } - - edgePoints = append(edgePoints, EdgePoint{ - FundingPkScript: pkScript, - OutPoint: chanPoint, - }) - - return nil - }, - ) - }, func() { - edgePoints = nil - }); err != nil { - return nil, err - } - - return edgePoints, nil -} - -// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a -// zombie. This method is used on an ad-hoc basis, when channels need to be -// marked as zombies outside the normal pruning cycle. -func (c *ChannelGraph) MarkEdgeZombie(chanID uint64, - pubKey1, pubKey2 [33]byte) error { - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return fmt.Errorf("unable to create zombie "+ - "bucket: %w", err) - } - - if c.graphCache != nil { - c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID) - } - - return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2) - }) - if err != nil { - return err - } - - c.rejectCache.remove(chanID) - c.chanCache.remove(chanID) - - return nil -} - -// markEdgeZombie marks an edge as a zombie within our zombie index. The public -// keys should represent the node public keys of the two parties involved in the -// edge. -func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1, - pubKey2 [33]byte) error { - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - - var v [66]byte - copy(v[:33], pubKey1[:]) - copy(v[33:], pubKey2[:]) - - return zombieIndex.Put(k[:], v[:]) -} - -// MarkEdgeLive clears an edge from our zombie index, deeming it as live. -func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error { - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - return c.markEdgeLiveUnsafe(nil, chanID) -} - -// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be -// called with an existing kvdb.RwTx or the argument can be set to nil in which -// case a new transaction will be created. -// -// NOTE: this method MUST only be called if the cacheMu has already been -// acquired. -func (c *ChannelGraph) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error { - dbFn := func(tx kvdb.RwTx) error { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - zombieIndex := edges.NestedReadWriteBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - - if len(zombieIndex.Get(k[:])) == 0 { - return ErrZombieEdgeNotFound - } - - return zombieIndex.Delete(k[:]) - } - - // If the transaction is nil, we'll create a new one. Otherwise, we use - // the existing transaction - var err error - if tx == nil { - err = kvdb.Update(c.db, dbFn, func() {}) - } else { - err = dbFn(tx) - } - if err != nil { - return err - } - - c.rejectCache.remove(chanID) - c.chanCache.remove(chanID) - - // We need to add the channel back into our graph cache, otherwise we - // won't use it for path finding. - if c.graphCache != nil { - edgeInfos, err := c.fetchChanInfos(tx, []uint64{chanID}) - if err != nil { - return err - } - - for _, edgeInfo := range edgeInfos { - c.graphCache.AddChannel( - edgeInfo.Info, edgeInfo.Policy1, - edgeInfo.Policy2, - ) - } - } - - return nil -} - -// IsZombieEdge returns whether the edge is considered zombie. If it is a -// zombie, then the two node public keys corresponding to this edge are also -// returned. -func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) { - var ( - isZombie bool - pubKey1, pubKey2 [33]byte - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound - } - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID) - return nil - }, func() { - isZombie = false - pubKey1 = [33]byte{} - pubKey2 = [33]byte{} - }) - if err != nil { - return false, [33]byte{}, [33]byte{} - } - - return isZombie, pubKey1, pubKey2 -} - -// isZombieEdge returns whether an entry exists for the given channel in the -// zombie index. If an entry exists, then the two node public keys corresponding -// to this edge are also returned. -func isZombieEdge(zombieIndex kvdb.RBucket, - chanID uint64) (bool, [33]byte, [33]byte) { - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - - v := zombieIndex.Get(k[:]) - if v == nil { - return false, [33]byte{}, [33]byte{} - } - - var pubKey1, pubKey2 [33]byte - copy(pubKey1[:], v[:33]) - copy(pubKey2[:], v[33:]) - - return true, pubKey1, pubKey2 -} - -// NumZombies returns the current number of zombie channels in the graph. -func (c *ChannelGraph) NumZombies() (uint64, error) { - var numZombies uint64 - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return nil - } - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - return zombieIndex.ForEach(func(_, _ []byte) error { - numZombies++ - return nil - }) - }, func() { - numZombies = 0 - }) - if err != nil { - return 0, err - } - - return numZombies, nil -} - -// PutClosedScid stores a SCID for a closed channel in the database. This is so -// that we can ignore channel announcements that we know to be closed without -// having to validate them and fetch a block. -func (c *ChannelGraph) PutClosedScid(scid lnwire.ShortChannelID) error { - return kvdb.Update(c.db, func(tx kvdb.RwTx) error { - closedScids, err := tx.CreateTopLevelBucket(closedScidBucket) - if err != nil { - return err - } - - var k [8]byte - byteOrder.PutUint64(k[:], scid.ToUint64()) - - return closedScids.Put(k[:], []byte{}) - }, func() {}) -} - -// IsClosedScid checks whether a channel identified by the passed in scid is -// closed. This helps avoid having to perform expensive validation checks. -// TODO: Add an LRU cache to cut down on disc reads. -func (c *ChannelGraph) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) { - var isClosed bool - err := kvdb.View(c.db, func(tx kvdb.RTx) error { - closedScids := tx.ReadBucket(closedScidBucket) - if closedScids == nil { - return ErrClosedScidsNotFound - } - - var k [8]byte - byteOrder.PutUint64(k[:], scid.ToUint64()) - - if closedScids.Get(k[:]) != nil { - isClosed = true - return nil - } - - return nil - }, func() { - isClosed = false - }) - if err != nil { - return false, err - } - - return isClosed, nil -} - -// GraphSession will provide the call-back with access to a NodeTraverser -// instance which can be used to perform queries against the channel graph. If -// the graph cache is not enabled, then the call-back will be provided with -// access to the graph via a consistent read-only transaction. -func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error { - if c.graphCache != nil { - return cb(&nodeTraverserSession{db: c}) - } - - return c.db.View(func(tx walletdb.ReadTx) error { - return cb(&nodeTraverserSession{ - db: c, - tx: tx, - }) - }, func() {}) -} - -// nodeTraverserSession implements the NodeTraverser interface but with a -// backing read only transaction for a consistent view of the graph in the case -// where the graph Cache has not been enabled. -type nodeTraverserSession struct { - tx kvdb.RTx - db *ChannelGraph -} - -// ForEachNodeDirectedChannel calls the callback for every channel of the given -// node. -// -// NOTE: Part of the NodeTraverser interface. -func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex, - cb func(channel *DirectedChannel) error) error { - - return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb) -} - -// FetchNodeFeatures returns the features of the given node. If the node is -// unknown, assume no additional features are supported. -// -// NOTE: Part of the NodeTraverser interface. -func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) ( - *lnwire.FeatureVector, error) { - - return c.db.fetchNodeFeatures(c.tx, nodePub) -} - -func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, // nolint:dupl - updateIndex kvdb.RwBucket, node *models.LightningNode) error { - - var ( - scratch [16]byte - b bytes.Buffer - ) - - pub, err := node.PubKey() - if err != nil { - return err - } - nodePub := pub.SerializeCompressed() - - // If the node has the update time set, write it, else write 0. - updateUnix := uint64(0) - if node.LastUpdate.Unix() > 0 { - updateUnix = uint64(node.LastUpdate.Unix()) - } - - byteOrder.PutUint64(scratch[:8], updateUnix) - if _, err := b.Write(scratch[:8]); err != nil { - return err - } - - if _, err := b.Write(nodePub); err != nil { - return err - } - - // If we got a node announcement for this node, we will have the rest - // of the data available. If not we don't have more data to write. - if !node.HaveNodeAnnouncement { - // Write HaveNodeAnnouncement=0. - byteOrder.PutUint16(scratch[:2], 0) - if _, err := b.Write(scratch[:2]); err != nil { - return err - } - - return nodeBucket.Put(nodePub, b.Bytes()) - } - - // Write HaveNodeAnnouncement=1. - byteOrder.PutUint16(scratch[:2], 1) - if _, err := b.Write(scratch[:2]); err != nil { - return err - } - - if err := binary.Write(&b, byteOrder, node.Color.R); err != nil { - return err - } - if err := binary.Write(&b, byteOrder, node.Color.G); err != nil { - return err - } - if err := binary.Write(&b, byteOrder, node.Color.B); err != nil { - return err - } - - if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { - return err - } - - if err := node.Features.Encode(&b); err != nil { - return err - } - - numAddresses := uint16(len(node.Addresses)) - byteOrder.PutUint16(scratch[:2], numAddresses) - if _, err := b.Write(scratch[:2]); err != nil { - return err - } - - for _, address := range node.Addresses { - if err := SerializeAddr(&b, address); err != nil { - return err - } - } - - sigLen := len(node.AuthSigBytes) - if sigLen > 80 { - return fmt.Errorf("max sig len allowed is 80, had %v", - sigLen) - } - - err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes) - if err != nil { - return err - } - - if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData)) - } - err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData) - if err != nil { - return err - } - - if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { - return err - } - - // With the alias bucket updated, we'll now update the index that - // tracks the time series of node updates. - var indexKey [8 + 33]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - copy(indexKey[8:], nodePub) - - // If there was already an old index entry for this node, then we'll - // delete the old one before we write the new entry. - if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil { - // Extract out the old update time to we can reconstruct the - // prior index key to delete it from the index. - oldUpdateTime := nodeBytes[:8] - - var oldIndexKey [8 + 33]byte - copy(oldIndexKey[:8], oldUpdateTime) - copy(oldIndexKey[8:], nodePub) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err - } - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - return nodeBucket.Put(nodePub, b.Bytes()) -} - -func fetchLightningNode(nodeBucket kvdb.RBucket, - nodePub []byte) (models.LightningNode, error) { - - nodeBytes := nodeBucket.Get(nodePub) - if nodeBytes == nil { - return models.LightningNode{}, ErrGraphNodeNotFound - } - - nodeReader := bytes.NewReader(nodeBytes) - return deserializeLightningNode(nodeReader) -} - -func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex, - *lnwire.FeatureVector, error) { - - var ( - pubKey route.Vertex - features = lnwire.EmptyFeatureVector() - nodeScratch [8]byte - ) - - // Skip ahead: - // - LastUpdate (8 bytes) - if _, err := r.Read(nodeScratch[:]); err != nil { - return pubKey, nil, err - } - - if _, err := io.ReadFull(r, pubKey[:]); err != nil { - return pubKey, nil, err - } - - // Read the node announcement flag. - if _, err := r.Read(nodeScratch[:2]); err != nil { - return pubKey, nil, err - } - hasNodeAnn := byteOrder.Uint16(nodeScratch[:2]) - - // The rest of the data is optional, and will only be there if we got a - // node announcement for this node. - if hasNodeAnn == 0 { - return pubKey, features, nil - } - - // We did get a node announcement for this node, so we'll have the rest - // of the data available. - var rgb uint8 - if err := binary.Read(r, byteOrder, &rgb); err != nil { - return pubKey, nil, err - } - if err := binary.Read(r, byteOrder, &rgb); err != nil { - return pubKey, nil, err - } - if err := binary.Read(r, byteOrder, &rgb); err != nil { - return pubKey, nil, err - } - - if _, err := wire.ReadVarString(r, 0); err != nil { - return pubKey, nil, err - } - - if err := features.Decode(r); err != nil { - return pubKey, nil, err - } - - return pubKey, features, nil -} - -func deserializeLightningNode(r io.Reader) (models.LightningNode, error) { - var ( - node models.LightningNode - scratch [8]byte - err error - ) - - // Always populate a feature vector, even if we don't have a node - // announcement and short circuit below. - node.Features = lnwire.EmptyFeatureVector() - - if _, err := r.Read(scratch[:]); err != nil { - return models.LightningNode{}, err - } - - unix := int64(byteOrder.Uint64(scratch[:])) - node.LastUpdate = time.Unix(unix, 0) - - if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil { - return models.LightningNode{}, err - } - - if _, err := r.Read(scratch[:2]); err != nil { - return models.LightningNode{}, err - } - - hasNodeAnn := byteOrder.Uint16(scratch[:2]) - if hasNodeAnn == 1 { - node.HaveNodeAnnouncement = true - } else { - node.HaveNodeAnnouncement = false - } - - // The rest of the data is optional, and will only be there if we got a - // node announcement for this node. - if !node.HaveNodeAnnouncement { - return node, nil - } - - // We did get a node announcement for this node, so we'll have the rest - // of the data available. - if err := binary.Read(r, byteOrder, &node.Color.R); err != nil { - return models.LightningNode{}, err - } - if err := binary.Read(r, byteOrder, &node.Color.G); err != nil { - return models.LightningNode{}, err - } - if err := binary.Read(r, byteOrder, &node.Color.B); err != nil { - return models.LightningNode{}, err - } - - node.Alias, err = wire.ReadVarString(r, 0) - if err != nil { - return models.LightningNode{}, err - } - - err = node.Features.Decode(r) - if err != nil { - return models.LightningNode{}, err - } - - if _, err := r.Read(scratch[:2]); err != nil { - return models.LightningNode{}, err - } - numAddresses := int(byteOrder.Uint16(scratch[:2])) - - var addresses []net.Addr - for i := 0; i < numAddresses; i++ { - address, err := DeserializeAddr(r) - if err != nil { - return models.LightningNode{}, err - } - addresses = append(addresses, address) - } - node.Addresses = addresses - - node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") - if err != nil { - return models.LightningNode{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the node as is. - node.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case err == io.ErrUnexpectedEOF: - case err == io.EOF: - case err != nil: - return models.LightningNode{}, err - } - - return node, nil -} - -func putChanEdgeInfo(edgeIndex kvdb.RwBucket, - edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error { - - var b bytes.Buffer - - if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil { - return err - } - if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil { - return err - } - if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return err - } - if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return err - } - - if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil { - return err - } - - authProof := edgeInfo.AuthProof - var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte - if authProof != nil { - nodeSig1 = authProof.NodeSig1Bytes - nodeSig2 = authProof.NodeSig2Bytes - bitcoinSig1 = authProof.BitcoinSig1Bytes - bitcoinSig2 = authProof.BitcoinSig2Bytes - } - - if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil { - return err - } - - if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { - return err - } - err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity)) - if err != nil { - return err - } - if _, err := b.Write(chanID[:]); err != nil { - return err - } - if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil { - return err - } - - if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData)) - } - err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData) - if err != nil { - return err - } - - return edgeIndex.Put(chanID[:], b.Bytes()) -} - -func fetchChanEdgeInfo(edgeIndex kvdb.RBucket, - chanID []byte) (models.ChannelEdgeInfo, error) { - - edgeInfoBytes := edgeIndex.Get(chanID) - if edgeInfoBytes == nil { - return models.ChannelEdgeInfo{}, ErrEdgeNotFound - } - - edgeInfoReader := bytes.NewReader(edgeInfoBytes) - return deserializeChanEdgeInfo(edgeInfoReader) -} - -func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { - var ( - err error - edgeInfo models.ChannelEdgeInfo - ) - - if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err - } - if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err - } - if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err - } - if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return models.ChannelEdgeInfo{}, err - } - - edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features") - if err != nil { - return models.ChannelEdgeInfo{}, err - } - - proof := &models.ChannelAuthProof{} - - proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return models.ChannelEdgeInfo{}, err - } - proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return models.ChannelEdgeInfo{}, err - } - proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return models.ChannelEdgeInfo{}, err - } - proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return models.ChannelEdgeInfo{}, err - } - - if !proof.IsEmpty() { - edgeInfo.AuthProof = proof - } - - edgeInfo.ChannelPoint = wire.OutPoint{} - if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil { - return models.ChannelEdgeInfo{}, err - } - if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil { - return models.ChannelEdgeInfo{}, err - } - if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil { - return models.ChannelEdgeInfo{}, err - } - - if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { - return models.ChannelEdgeInfo{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case err == io.ErrUnexpectedEOF: - case err == io.EOF: - case err != nil: - return models.ChannelEdgeInfo{}, err - } - - return edgeInfo, nil -} - -func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy, - from, to []byte) error { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], edge.ChannelID) - - var b bytes.Buffer - if err := serializeChanEdgePolicy(&b, edge, to); err != nil { - return err - } - - // Before we write out the new edge, we'll create a new entry in the - // update index in order to keep it fresh. - updateUnix := uint64(edge.LastUpdate.Unix()) - var indexKey [8 + 8]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - byteOrder.PutUint64(indexKey[8:], edge.ChannelID) +// MarkEdgeLive clears an edge from our zombie index, deeming it as live. +// If the cache is enabled, the edge will be added back to the graph cache if +// we still have a record of this channel in the DB. +func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) + err := c.KVStore.MarkEdgeLive(chanID) if err != nil { return err } - // If there was already an entry for this edge, then we'll need to - // delete the old one to ensure we don't leave around any after-images. - // An unknown policy value does not have a update time recorded, so - // it also does not need to be removed. - if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil && - !bytes.Equal(edgeBytes[:], unknownPolicy) { - - // In order to delete the old entry, we'll need to obtain the - // *prior* update time in order to delete it. To do this, we'll - // need to deserialize the existing policy within the database - // (now outdated by the new one), and delete its corresponding - // entry within the update index. We'll ignore any - // ErrEdgePolicyOptionalFieldNotFound error, as we only need - // the channel ID and update time to delete the entry. - // TODO(halseth): get rid of these invalid policies in a - // migration. - oldEdgePolicy, err := deserializeChanEdgePolicy( - bytes.NewReader(edgeBytes), - ) - if err != nil && err != ErrEdgePolicyOptionalFieldNotFound { + if c.graphCache != nil { + // We need to add the channel back into our graph cache, + // otherwise we won't use it for path finding. + infos, err := c.KVStore.FetchChanInfos([]uint64{chanID}) + if err != nil { return err } - oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix()) - - var oldIndexKey [8 + 8]byte - byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime) - byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err + if len(infos) == 0 { + return nil } - } - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } + info := infos[0] - err = updateEdgePolicyDisabledIndex( - edges, edge.ChannelID, - edge.ChannelFlags&lnwire.ChanUpdateDirection > 0, - edge.IsDisabled(), - ) - if err != nil { - return err + c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2) } - return edges.Put(edgeKey[:], b.Bytes()[:]) + return nil } -// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex -// bucket by either add a new disabled ChannelEdgePolicy or remove an existing -// one. -// The direction represents the direction of the edge and disabled is used for -// deciding whether to remove or add an entry to the bucket. -// In general a channel is disabled if two entries for the same chanID exist -// in this bucket. -// Maintaining the bucket this way allows a fast retrieval of disabled -// channels, for example when prune is needed. -func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, - direction bool, disabled bool) error { +// DeleteChannelEdges removes edges with the given channel IDs from the +// database and marks them as zombies. This ensures that we're unable to re-add +// it to our database once again. If an edge does not exist within the +// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is +// true, then when we mark these edges as zombies, we'll set up the keys such +// that we require the node that failed to send the fresh update to be the one +// that resurrects the channel from its zombie state. The markZombie bool +// denotes whether to mark the channel as a zombie. +func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool, + chanIDs ...uint64) error { - var disabledEdgeKey [8 + 1]byte - byteOrder.PutUint64(disabledEdgeKey[0:], chanID) - if direction { - disabledEdgeKey[8] = 1 - } + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists( - disabledEdgePolicyBucket, + infos, err := c.KVStore.DeleteChannelEdges( + strictZombiePruning, markZombie, chanIDs..., ) if err != nil { return err } - if disabled { - return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{}) + if c.graphCache != nil { + for _, info := range infos { + c.graphCache.RemoveChannel( + info.NodeKey1Bytes, info.NodeKey2Bytes, + info.ChannelID, + ) + } } - return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:]) + return err } -// putChanEdgePolicyUnknown marks the edge policy as unknown -// in the edges bucket. -func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, - from []byte) error { +// DisconnectBlockAtHeight is used to indicate that the block specified +// by the passed height has been disconnected from the main chain. This +// will "rewind" the graph back to the height below, deleting channels +// that are no longer confirmed from the graph. The prune log will be +// set to the last prune height valid for the remaining chain. +// Channels that were removed from the graph resulting from the +// disconnected block are returned. +func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ( + []*models.ChannelEdgeInfo, error) { - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], channelID) + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - if edges.Get(edgeKey[:]) != nil { - return fmt.Errorf("cannot write unknown policy for channel %v "+ - " when there is already a policy present", channelID) + edges, err := c.KVStore.DisconnectBlockAtHeight(height) + if err != nil { + return nil, err } - return edges.Put(edgeKey[:], unknownPolicy) -} - -func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte, - nodePub []byte) (*models.ChannelEdgePolicy, error) { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], nodePub) - copy(edgeKey[33:], chanID[:]) - - edgeBytes := edges.Get(edgeKey[:]) - if edgeBytes == nil { - return nil, ErrEdgeNotFound + if c.graphCache != nil { + for _, edge := range edges { + c.graphCache.RemoveChannel( + edge.NodeKey1Bytes, edge.NodeKey2Bytes, + edge.ChannelID, + ) + } } - // No need to deserialize unknown policy. - if bytes.Equal(edgeBytes[:], unknownPolicy) { - return nil, nil - } + return edges, nil +} - edgeReader := bytes.NewReader(edgeBytes) +// PruneGraph prunes newly closed channels from the channel graph in response +// to a new block being solved on the network. Any transactions which spend the +// funding output of any known channels within he graph will be deleted. +// Additionally, the "prune tip", or the last block which has been used to +// prune the graph is stored so callers can ensure the graph is fully in sync +// with the current UTXO state. A slice of channels that have been closed by +// the target block are returned if the function succeeds without error. +func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, + blockHash *chainhash.Hash, blockHeight uint32) ( + []*models.ChannelEdgeInfo, error) { - ep, err := deserializeChanEdgePolicy(edgeReader) - switch { - // If the db policy was missing an expected optional field, we return - // nil as if the policy was unknown. - case err == ErrEdgePolicyOptionalFieldNotFound: - return nil, nil + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - case err != nil: + edges, nodes, err := c.KVStore.PruneGraph( + spentOutputs, blockHash, blockHeight, + ) + if err != nil { return nil, err } - return ep, nil -} - -func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket, - chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy, - error) { + if c.graphCache != nil { + for _, edge := range edges { + c.graphCache.RemoveChannel( + edge.NodeKey1Bytes, edge.NodeKey2Bytes, + edge.ChannelID, + ) + } - edgeInfo := edgeIndex.Get(chanID) - if edgeInfo == nil { - return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound, - chanID) - } + for _, node := range nodes { + c.graphCache.RemoveNode(node) + } - // The first node is contained within the first half of the edge - // information. We only propagate the error here and below if it's - // something other than edge non-existence. - node1Pub := edgeInfo[:33] - edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub) - if err != nil { - return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound, - node1Pub) + log.Debugf("Pruned graph, cache now has %s", + c.graphCache.Stats()) } - // Similarly, the second node is contained within the latter - // half of the edge information. - node2Pub := edgeInfo[33:66] - edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub) - if err != nil { - return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound, - node2Pub) + if len(edges) != 0 { + // Notify all currently registered clients of the newly closed + // channels. + closeSummaries := createCloseSummaries( + blockHeight, edges..., + ) + c.notifyTopologyChange(&TopologyChange{ + ClosedChannels: closeSummaries, + }) } - return edge1, edge2, nil + return edges, nil } -func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy, - to []byte) error { - - err := wire.WriteVarBytes(w, 0, edge.SigBytes) - if err != nil { - return err - } - - if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil { - return err - } - - var scratch [8]byte - updateUnix := uint64(edge.LastUpdate.Unix()) - byteOrder.PutUint64(scratch[:], updateUnix) - if _, err := w.Write(scratch[:]); err != nil { - return err - } +// PruneGraphNodes is a garbage collection method which attempts to prune out +// any nodes from the channel graph that are currently unconnected. This ensure +// that we only maintain a graph of reachable nodes. In the event that a pruned +// node gains more channels, it will be re-added back to the graph. +func (c *ChannelGraph) PruneGraphNodes() error { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil { - return err - } - if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil { - return err - } - if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil { - return err - } - if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil { - return err - } - err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat)) - if err != nil { - return err - } - err = binary.Write( - w, byteOrder, uint64(edge.FeeProportionalMillionths), - ) + nodes, err := c.KVStore.PruneGraphNodes() if err != nil { return err } - if _, err := w.Write(to); err != nil { - return err - } - - // If the max_htlc field is present, we write it. To be compatible with - // older versions that wasn't aware of this field, we write it as part - // of the opaque data. - // TODO(halseth): clean up when moving to TLV. - var opaqueBuf bytes.Buffer - if edge.MessageFlags.HasMaxHtlc() { - err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC)) - if err != nil { - return err + if c.graphCache != nil { + for _, node := range nodes { + c.graphCache.RemoveNode(node) } } - if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData)) - } - if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil { - return err - } - - if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil { - return err - } return nil } -func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) { - // Deserialize the policy. Note that in case an optional field is not - // found, both an error and a populated policy object are returned. - edge, deserializeErr := deserializeChanEdgePolicyRaw(r) - if deserializeErr != nil && - deserializeErr != ErrEdgePolicyOptionalFieldNotFound { - - return nil, deserializeErr - } - - return edge, deserializeErr -} - -func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy, - error) { - - edge := &models.ChannelEdgePolicy{} +// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan +// ID's that we don't know and are not known zombies of the passed set. In other +// words, we perform a set difference of our set of chan ID's and the ones +// passed in. This method can be used by callers to determine the set of +// channels another peer knows of that we don't. +func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo, + isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) { - var err error - edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") + unknown, knownZombies, err := c.KVStore.FilterKnownChanIDs(chansInfo) if err != nil { return nil, err } - if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil { - return nil, err - } - - var scratch [8]byte - if _, err := r.Read(scratch[:]); err != nil { - return nil, err - } - unix := int64(byteOrder.Uint64(scratch[:])) - edge.LastUpdate = time.Unix(unix, 0) - - if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil { - return nil, err - } - if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil { - return nil, err - } - if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil { - return nil, err - } - - var n uint64 - if err := binary.Read(r, byteOrder, &n); err != nil { - return nil, err - } - edge.MinHTLC = lnwire.MilliSatoshi(n) - - if err := binary.Read(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeBaseMSat = lnwire.MilliSatoshi(n) - - if err := binary.Read(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n) - - if _, err := r.Read(edge.ToNode[:]); err != nil { - return nil, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edge.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case err == io.ErrUnexpectedEOF: - case err == io.EOF: - case err != nil: - return nil, err - } - - // See if optional fields are present. - if edge.MessageFlags.HasMaxHtlc() { - // The max_htlc field should be at the beginning of the opaque - // bytes. - opq := edge.ExtraOpaqueData + for _, info := range knownZombies { + // TODO(ziggie): Make sure that for the strict pruning case we + // compare the pubkeys and whether the right timestamp is not + // older than the `ChannelPruneExpiry`. + // + // NOTE: The timestamp data has no verification attached to it + // in the `ReplyChannelRange` msg so we are trusting this data + // at this point. However it is not critical because we are just + // removing the channel from the db when the timestamps are more + // recent. During the querying of the gossip msg verification + // happens as usual. However we should start punishing peers + // when they don't provide us honest data ? + isStillZombie := isZombieChan( + info.Node1UpdateTimestamp, info.Node2UpdateTimestamp, + ) - // If the max_htlc field is not present, it might be old data - // stored before this field was validated. We'll return the - // edge along with an error. - if len(opq) < 8 { - return edge, ErrEdgePolicyOptionalFieldNotFound + if isStillZombie { + continue } - maxHtlc := byteOrder.Uint64(opq[:8]) - edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc) - - // Exclude the parsed field from the rest of the opaque data. - edge.ExtraOpaqueData = opq[8:] + // If we have marked it as a zombie but the latest update + // timestamps could bring it back from the dead, then we mark it + // alive, and we let it be added to the set of IDs to query our + // peer for. + err := c.KVStore.MarkEdgeLive( + info.ShortChannelID.ToUint64(), + ) + // Since there is a chance that the edge could have been marked + // as "live" between the FilterKnownChanIDs call and the + // MarkEdgeLive call, we ignore the error if the edge is already + // marked as live. + if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) { + return nil, err + } } - return edge, nil -} - -// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the -// ChannelGraph and a kvdb.RTx. -type chanGraphNodeTx struct { - tx kvdb.RTx - db *ChannelGraph - node *models.LightningNode + return unknown, nil } -// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx -// interface. -var _ NodeRTx = (*chanGraphNodeTx)(nil) - -func newChanGraphNodeTx(tx kvdb.RTx, db *ChannelGraph, - node *models.LightningNode) *chanGraphNodeTx { - - return &chanGraphNodeTx{ - tx: tx, - db: db, - node: node, - } -} +// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a +// zombie. This method is used on an ad-hoc basis, when channels need to be +// marked as zombies outside the normal pruning cycle. +func (c *ChannelGraph) MarkEdgeZombie(chanID uint64, + pubKey1, pubKey2 [33]byte) error { -// Node returns the raw information of the node. -// -// NOTE: This is a part of the NodeRTx interface. -func (c *chanGraphNodeTx) Node() *models.LightningNode { - return c.node -} + c.cacheMu.Lock() + defer c.cacheMu.Unlock() -// FetchNode fetches the node with the given pub key under the same transaction -// used to fetch the current node. The returned node is also a NodeRTx and any -// operations on that NodeRTx will also be done under the same transaction. -// -// NOTE: This is a part of the NodeRTx interface. -func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) { - node, err := c.db.FetchLightningNodeTx(c.tx, nodePub) + err := c.KVStore.MarkEdgeZombie(chanID, pubKey1, pubKey2) if err != nil { - return nil, err + return err } - return newChanGraphNodeTx(c.tx, c.db, node), nil -} - -// ForEachChannel can be used to iterate over the node's channels under -// the same transaction used to fetch the node. -// -// NOTE: This is a part of the NodeRTx interface. -func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo, - *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error { - - return c.db.ForEachNodeChannelTx(c.tx, c.node.PubKeyBytes, - func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1, - policy2 *models.ChannelEdgePolicy) error { + if c.graphCache != nil { + c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID) + } - return f(info, policy1, policy2) - }, - ) + return nil } -// MakeTestGraph creates a new instance of the ChannelGraph for testing -// purposes. -func MakeTestGraph(t testing.TB, modifiers ...OptionModifier) (*ChannelGraph, - error) { +// UpdateEdgePolicy updates the edge routing policy for a single directed edge +// within the database for the referenced channel. The `flags` attribute within +// the ChannelEdgePolicy determines which of the directed edges are being +// updated. If the flag is 1, then the first node's information is being +// updated, otherwise it's the second node's information. The node ordering is +// determined by the lexicographical ordering of the identity public keys of the +// nodes on either side of the channel. +func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy, + op ...batch.SchedulerOption) error { - opts := DefaultOptions() - for _, modifier := range modifiers { - modifier(opts) - } + c.cacheMu.Lock() + defer c.cacheMu.Unlock() - // Next, create channelgraph for the first time. - backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr") + from, to, err := c.KVStore.UpdateEdgePolicy(edge, op...) if err != nil { - backendCleanup() - return nil, err + return err } - graph, err := NewChannelGraph(backend) - if err != nil { - backendCleanup() - return nil, err + if c.graphCache != nil { + var isUpdate1 bool + if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { + isUpdate1 = true + } + + c.graphCache.UpdatePolicy(edge, from, to, isUpdate1) } - t.Cleanup(func() { - _ = backend.Close() - backendCleanup() - }) + select { + case c.topologyUpdate <- edge: + case <-c.quit: + return ErrChanGraphShuttingDown + } - return graph, nil + return nil } diff --git a/graph/db/graph_test.go b/graph/db/graph_test.go index 62b9cd4e14..754da5eef3 100644 --- a/graph/db/graph_test.go +++ b/graph/db/graph_test.go @@ -155,62 +155,70 @@ func TestNodeInsertionAndDeletion(t *testing.T) { } // TestPartialNode checks that we can add and retrieve a LightningNode where -// where only the pubkey is known to the database. +// only the pubkey is known to the database. func TestPartialNode(t *testing.T) { t.Parallel() graph, err := MakeTestGraph(t) require.NoError(t, err, "unable to make test database") - // We want to be able to insert nodes into the graph that only has the - // PubKey set. - node := &models.LightningNode{ - HaveNodeAnnouncement: false, - PubKeyBytes: testPub, - } + // To insert a partial node, we need to add a channel edge that has + // node keys for nodes we are not yet aware + var node1, node2 models.LightningNode + copy(node1.PubKeyBytes[:], pubKey1Bytes) + copy(node2.PubKeyBytes[:], pubKey2Bytes) - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - assertNodeInCache(t, graph, node, nil) + // Create an edge attached to these nodes and add it to the graph. + edgeInfo, _ := createEdge(140, 0, 0, 0, &node1, &node2) + require.NoError(t, graph.AddChannelEdge(&edgeInfo)) - // Next, fetch the node from the database to ensure everything was + // Both of the nodes should now be in both the graph (as partial/shell) + // nodes _and_ the cache should also have an awareness of both nodes. + assertNodeInCache(t, graph, &node1, nil) + assertNodeInCache(t, graph, &node2, nil) + + // Next, fetch the node2 from the database to ensure everything was // serialized properly. - dbNode, err := graph.FetchLightningNode(testPub) - require.NoError(t, err, "unable to locate node") + dbNode1, err := graph.FetchLightningNode(pubKey1) + require.NoError(t, err) + dbNode2, err := graph.FetchLightningNode(pubKey2) + require.NoError(t, err) - _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes) - if err != nil { - t.Fatalf("unable to query for node: %v", err) - } else if !exists { - t.Fatalf("node should be found but wasn't") - } + _, exists, err := graph.HasLightningNode(dbNode1.PubKeyBytes) + require.NoError(t, err) + require.True(t, exists) // The two nodes should match exactly! (with default values for // LastUpdate and db set to satisfy compareNodes()) - node = &models.LightningNode{ + expectedNode1 := &models.LightningNode{ HaveNodeAnnouncement: false, LastUpdate: time.Unix(0, 0), - PubKeyBytes: testPub, + PubKeyBytes: pubKey1, } + require.NoError(t, compareNodes(dbNode1, expectedNode1)) - if err := compareNodes(node, dbNode); err != nil { - t.Fatalf("nodes don't match: %v", err) + _, exists, err = graph.HasLightningNode(dbNode2.PubKeyBytes) + require.NoError(t, err) + require.True(t, exists) + + // The two nodes should match exactly! (with default values for + // LastUpdate and db set to satisfy compareNodes()) + expectedNode2 := &models.LightningNode{ + HaveNodeAnnouncement: false, + LastUpdate: time.Unix(0, 0), + PubKeyBytes: pubKey2, } + require.NoError(t, compareNodes(dbNode2, expectedNode2)) // Next, delete the node from the graph, this should purge all data // related to the node. - if err := graph.DeleteLightningNode(testPub); err != nil { - t.Fatalf("unable to delete node: %v", err) - } + require.NoError(t, graph.DeleteLightningNode(pubKey1)) assertNodeNotInCache(t, graph, testPub) // Finally, attempt to fetch the node again. This should fail as the // node should have been deleted from the database. _, err = graph.FetchLightningNode(testPub) - if err != ErrGraphNodeNotFound { - t.Fatalf("fetch after delete should fail!") - } + require.ErrorIs(t, err, ErrGraphNodeNotFound) } func TestAliasLookup(t *testing.T) { @@ -964,6 +972,23 @@ func randEdgePolicy(chanID uint64, db kvdb.Backend) *models.ChannelEdgePolicy { return newEdgePolicy(chanID, db, update) } +func copyEdgePolicy(p *models.ChannelEdgePolicy) *models.ChannelEdgePolicy { + return &models.ChannelEdgePolicy{ + SigBytes: p.SigBytes, + ChannelID: p.ChannelID, + LastUpdate: p.LastUpdate, + MessageFlags: p.MessageFlags, + ChannelFlags: p.ChannelFlags, + TimeLockDelta: p.TimeLockDelta, + MinHTLC: p.MinHTLC, + MaxHTLC: p.MaxHTLC, + FeeBaseMSat: p.FeeBaseMSat, + FeeProportionalMillionths: p.FeeProportionalMillionths, + ToNode: p.ToNode, + ExtraOpaqueData: p.ExtraOpaqueData, + } +} + func newEdgePolicy(chanID uint64, db kvdb.Backend, updateTime int64) *models.ChannelEdgePolicy { @@ -1919,6 +1944,76 @@ func TestNodeUpdatesInHorizon(t *testing.T) { } } +// TestFilterKnownChanIDsZombieRevival tests that if a ChannelUpdateInfo is +// passed to FilterKnownChanIDs that contains a channel that we have marked as +// a zombie, then we will mark it as live again if the new ChannelUpdate has +// timestamps that would make the channel be considered live again. +// +// NOTE: this tests focuses on zombie revival. The main logic of +// FilterKnownChanIDs is tested in TestFilterKnownChanIDs. +func TestFilterKnownChanIDsZombieRevival(t *testing.T) { + t.Parallel() + + graph, err := MakeTestGraph(t) + require.NoError(t, err) + + var ( + scid1 = lnwire.ShortChannelID{BlockHeight: 1} + scid2 = lnwire.ShortChannelID{BlockHeight: 2} + scid3 = lnwire.ShortChannelID{BlockHeight: 3} + ) + + isZombie := func(scid lnwire.ShortChannelID) bool { + zombie, _, _ := graph.IsZombieEdge(scid.ToUint64()) + return zombie + } + + // Mark channel 1 and 2 as zombies. + err = graph.MarkEdgeZombie(scid1.ToUint64(), [33]byte{}, [33]byte{}) + require.NoError(t, err) + err = graph.MarkEdgeZombie(scid2.ToUint64(), [33]byte{}, [33]byte{}) + require.NoError(t, err) + + require.True(t, isZombie(scid1)) + require.True(t, isZombie(scid2)) + require.False(t, isZombie(scid3)) + + // Call FilterKnownChanIDs with an isStillZombie call-back that would + // result in the current zombies still be considered as zombies. + _, err = graph.FilterKnownChanIDs([]ChannelUpdateInfo{ + {ShortChannelID: scid1}, + {ShortChannelID: scid2}, + {ShortChannelID: scid3}, + }, func(_ time.Time, _ time.Time) bool { + return true + }) + require.NoError(t, err) + + require.True(t, isZombie(scid1)) + require.True(t, isZombie(scid2)) + require.False(t, isZombie(scid3)) + + // Now call it again but this time with a isStillZombie call-back that + // would result in channel with SCID 2 no longer being considered a + // zombie. + _, err = graph.FilterKnownChanIDs([]ChannelUpdateInfo{ + {ShortChannelID: scid1}, + { + ShortChannelID: scid2, + Node1UpdateTimestamp: time.Unix(1000, 0), + }, + {ShortChannelID: scid3}, + }, func(t1 time.Time, _ time.Time) bool { + return !t1.Equal(time.Unix(1000, 0)) + }) + require.NoError(t, err) + + // Show that SCID 2 has been marked as live. + require.True(t, isZombie(scid1)) + require.False(t, isZombie(scid2)) + require.False(t, isZombie(scid3)) +} + // TestFilterKnownChanIDs tests that we're able to properly perform the set // differences of an incoming set of channel ID's, and those that we already // know of on disk. @@ -2859,6 +2954,7 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { if err := graph.UpdateEdgePolicy(edge1); err != nil { t.Fatalf("unable to update edge: %v", err) } + edge1 = copyEdgePolicy(edge1) // Avoid read/write race conditions. edge2 := randEdgePolicy(chanID.ToUint64(), graph.db) edge2.ChannelFlags = 1 @@ -2867,6 +2963,7 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { if err := graph.UpdateEdgePolicy(edge2); err != nil { t.Fatalf("unable to update edge: %v", err) } + edge2 = copyEdgePolicy(edge2) // Avoid read/write race conditions. // checkIndexTimestamps is a helper function that checks the edge update // index only includes the given timestamps. @@ -3952,7 +4049,7 @@ func TestGraphCacheForEachNodeChannel(t *testing.T) { getSingleChannel := func() *DirectedChannel { var ch *DirectedChannel - err = graph.forEachNodeDirectedChannel(nil, node1.PubKeyBytes, + err = graph.ForEachNodeDirectedChannel(node1.PubKeyBytes, func(c *DirectedChannel) error { require.Nil(t, ch) ch = c @@ -3974,6 +4071,7 @@ func TestGraphCacheForEachNodeChannel(t *testing.T) { 253, 217, 3, 8, 0, 0, 0, 10, 0, 0, 0, 20, } require.NoError(t, graph.UpdateEdgePolicy(edge1)) + edge1 = copyEdgePolicy(edge1) // Avoid read/write race conditions. directedChan := getSingleChannel() require.NotNil(t, directedChan) @@ -4005,8 +4103,12 @@ func TestGraphLoading(t *testing.T) { defer backend.Close() defer backendCleanup() - graph, err := NewChannelGraph(backend) + graph, err := NewChannelGraph(&Config{KVDB: backend}) require.NoError(t, err) + require.NoError(t, graph.Start()) + t.Cleanup(func() { + require.NoError(t, graph.Stop()) + }) // Populate the graph with test data. const numNodes = 100 @@ -4015,8 +4117,12 @@ func TestGraphLoading(t *testing.T) { // Recreate the graph. This should cause the graph cache to be // populated. - graphReloaded, err := NewChannelGraph(backend) + graphReloaded, err := NewChannelGraph(&Config{KVDB: backend}) require.NoError(t, err) + require.NoError(t, graphReloaded.Start()) + t.Cleanup(func() { + require.NoError(t, graphReloaded.Stop()) + }) // Assert that the cache content is identical. require.Equal( diff --git a/graph/db/kv_store.go b/graph/db/kv_store.go new file mode 100644 index 0000000000..aa120a39a7 --- /dev/null +++ b/graph/db/kv_store.go @@ -0,0 +1,4735 @@ +package graphdb + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net" + "sort" + "sync" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcwallet/walletdb" + "github.com/lightningnetwork/lnd/aliasmgr" + "github.com/lightningnetwork/lnd/batch" + "github.com/lightningnetwork/lnd/graph/db/models" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/stretchr/testify/require" +) + +var ( + // nodeBucket is a bucket which houses all the vertices or nodes within + // the channel graph. This bucket has a single-sub bucket which adds an + // additional index from pubkey -> alias. Within the top-level of this + // bucket, the key space maps a node's compressed public key to the + // serialized information for that node. Additionally, there's a + // special key "source" which stores the pubkey of the source node. The + // source node is used as the starting point for all graph/queries and + // traversals. The graph is formed as a star-graph with the source node + // at the center. + // + // maps: pubKey -> nodeInfo + // maps: source -> selfPubKey + nodeBucket = []byte("graph-node") + + // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket + // will be used to quickly look up the "freshness" of a node's last + // update to the network. The bucket only contains keys, and no values, + // it's mapping: + // + // maps: updateTime || nodeID -> nil + nodeUpdateIndexBucket = []byte("graph-node-update-index") + + // sourceKey is a special key that resides within the nodeBucket. The + // sourceKey maps a key to the public key of the "self node". + sourceKey = []byte("source") + + // aliasIndexBucket is a sub-bucket that's nested within the main + // nodeBucket. This bucket maps the public key of a node to its + // current alias. This bucket is provided as it can be used within a + // future UI layer to add an additional degree of confirmation. + aliasIndexBucket = []byte("alias") + + // edgeBucket is a bucket which houses all of the edge or channel + // information within the channel graph. This bucket essentially acts + // as an adjacency list, which in conjunction with a range scan, can be + // used to iterate over all the incoming and outgoing edges for a + // particular node. Key in the bucket use a prefix scheme which leads + // with the node's public key and sends with the compact edge ID. + // For each chanID, there will be two entries within the bucket, as the + // graph is directed: nodes may have different policies w.r.t to fees + // for their respective directions. + // + // maps: pubKey || chanID -> channel edge policy for node + edgeBucket = []byte("graph-edge") + + // unknownPolicy is represented as an empty slice. It is + // used as the value in edgeBucket for unknown channel edge policies. + // Unknown policies are still stored in the database to enable efficient + // lookup of incoming channel edges. + unknownPolicy = []byte{} + + // chanStart is an array of all zero bytes which is used to perform + // range scans within the edgeBucket to obtain all of the outgoing + // edges for a particular node. + chanStart [8]byte + + // edgeIndexBucket is an index which can be used to iterate all edges + // in the bucket, grouping them according to their in/out nodes. + // Additionally, the items in this bucket also contain the complete + // edge information for a channel. The edge information includes the + // capacity of the channel, the nodes that made the channel, etc. This + // bucket resides within the edgeBucket above. Creation of an edge + // proceeds in two phases: first the edge is added to the edge index, + // afterwards the edgeBucket can be updated with the latest details of + // the edge as they are announced on the network. + // + // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo + edgeIndexBucket = []byte("edge-index") + + // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This + // bucket contains an index which allows us to gauge the "freshness" of + // a channel's last updates. + // + // maps: updateTime || chanID -> nil + edgeUpdateIndexBucket = []byte("edge-update-index") + + // channelPointBucket maps a channel's full outpoint (txid:index) to + // its short 8-byte channel ID. This bucket resides within the + // edgeBucket above, and can be used to quickly remove an edge due to + // the outpoint being spent, or to query for existence of a channel. + // + // maps: outPoint -> chanID + channelPointBucket = []byte("chan-index") + + // zombieBucket is a sub-bucket of the main edgeBucket bucket + // responsible for maintaining an index of zombie channels. Each entry + // exists within the bucket as follows: + // + // maps: chanID -> pubKey1 || pubKey2 + // + // The chanID represents the channel ID of the edge that is marked as a + // zombie and is used as the key, which maps to the public keys of the + // edge's participants. + zombieBucket = []byte("zombie-index") + + // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket + // bucket responsible for maintaining an index of disabled edge + // policies. Each entry exists within the bucket as follows: + // + // maps: -> []byte{} + // + // The chanID represents the channel ID of the edge and the direction is + // one byte representing the direction of the edge. The main purpose of + // this index is to allow pruning disabled channels in a fast way + // without the need to iterate all over the graph. + disabledEdgePolicyBucket = []byte("disabled-edge-policy-index") + + // graphMetaBucket is a top-level bucket which stores various meta-deta + // related to the on-disk channel graph. Data stored in this bucket + // includes the block to which the graph has been synced to, the total + // number of channels, etc. + graphMetaBucket = []byte("graph-meta") + + // pruneLogBucket is a bucket within the graphMetaBucket that stores + // a mapping from the block height to the hash for the blocks used to + // prune the graph. + // Once a new block is discovered, any channels that have been closed + // (by spending the outpoint) can safely be removed from the graph, and + // the block is added to the prune log. We need to keep such a log for + // the case where a reorg happens, and we must "rewind" the state of the + // graph by removing channels that were previously confirmed. In such a + // case we'll remove all entries from the prune log with a block height + // that no longer exists. + pruneLogBucket = []byte("prune-log") + + // closedScidBucket is a top-level bucket that stores scids for + // channels that we know to be closed. This is used so that we don't + // need to perform expensive validation checks if we receive a channel + // announcement for the channel again. + // + // maps: scid -> []byte{} + closedScidBucket = []byte("closed-scid") +) + +const ( + // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that + // we'll permit to be written to disk. We limit this as otherwise, it + // would be possible for a node to create a ton of updates and slowly + // fill our disk, and also waste bandwidth due to relaying. + MaxAllowedExtraOpaqueBytes = 10000 +) + +// KVStore is a persistent, on-disk graph representation of the Lightning +// Network. This struct can be used to implement path finding algorithms on top +// of, and also to update a node's view based on information received from the +// p2p network. Internally, the graph is stored using a modified adjacency list +// representation with some added object interaction possible with each +// serialized edge/node. The graph is stored is directed, meaning that are two +// edges stored for each channel: an inbound/outbound edge for each node pair. +// Nodes, edges, and edge information can all be added to the graph +// independently. Edge removal results in the deletion of all edge information +// for that edge. +type KVStore struct { + db kvdb.Backend + + // cacheMu guards all caches (rejectCache and chanCache). If + // this mutex will be acquired at the same time as the DB mutex then + // the cacheMu MUST be acquired first to prevent deadlock. + cacheMu sync.RWMutex + rejectCache *rejectCache + chanCache *channelCache + + chanScheduler batch.Scheduler + nodeScheduler batch.Scheduler +} + +// NewKVStore allocates a new KVStore backed by a DB instance. The +// returned instance has its own unique reject cache and channel cache. +func NewKVStore(db kvdb.Backend, options ...KVStoreOptionModifier) (*KVStore, + error) { + + opts := DefaultOptions() + for _, o := range options { + o(opts) + } + + if !opts.NoMigration { + if err := initKVStore(db); err != nil { + return nil, err + } + } + + g := &KVStore{ + db: db, + rejectCache: newRejectCache(opts.RejectCacheSize), + chanCache: newChannelCache(opts.ChannelCacheSize), + } + g.chanScheduler = batch.NewTimeScheduler( + db, &g.cacheMu, opts.BatchCommitInterval, + ) + g.nodeScheduler = batch.NewTimeScheduler( + db, nil, opts.BatchCommitInterval, + ) + + return g, nil +} + +// channelMapKey is the key structure used for storing channel edge policies. +type channelMapKey struct { + nodeKey route.Vertex + chanID [8]byte +} + +// getChannelMap loads all channel edge policies from the database and stores +// them in a map. +func (c *KVStore) getChannelMap(edges kvdb.RBucket) ( + map[channelMapKey]*models.ChannelEdgePolicy, error) { + + // Create a map to store all channel edge policies. + channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy) + + err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error { + // Skip embedded buckets. + if bytes.Equal(k, edgeIndexBucket) || + bytes.Equal(k, edgeUpdateIndexBucket) || + bytes.Equal(k, zombieBucket) || + bytes.Equal(k, disabledEdgePolicyBucket) || + bytes.Equal(k, channelPointBucket) { + + return nil + } + + // Validate key length. + if len(k) != 33+8 { + return fmt.Errorf("invalid edge key %x encountered", k) + } + + var key channelMapKey + copy(key.nodeKey[:], k[:33]) + copy(key.chanID[:], k[33:]) + + // No need to deserialize unknown policy. + if bytes.Equal(edgeBytes, unknownPolicy) { + return nil + } + + edgeReader := bytes.NewReader(edgeBytes) + edge, err := deserializeChanEdgePolicyRaw( + edgeReader, + ) + + switch { + // If the db policy was missing an expected optional field, we + // return nil as if the policy was unknown. + case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound): + return nil + + case err != nil: + return err + } + + channelMap[key] = edge + + return nil + }) + if err != nil { + return nil, err + } + + return channelMap, nil +} + +var graphTopLevelBuckets = [][]byte{ + nodeBucket, + edgeBucket, + graphMetaBucket, + closedScidBucket, +} + +// Wipe completely deletes all saved state within all used buckets within the +// database. The deletion is done in a single transaction, therefore this +// operation is fully atomic. +func (c *KVStore) Wipe() error { + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + for _, tlb := range graphTopLevelBuckets { + err := tx.DeleteTopLevelBucket(tlb) + if err != nil && + !errors.Is(err, kvdb.ErrBucketNotFound) { + + return err + } + } + + return nil + }, func() {}) + if err != nil { + return err + } + + return initKVStore(c.db) +} + +// createChannelDB creates and initializes a fresh version of In +// the case that the target path has not yet been created or doesn't yet exist, +// then the path is created. Additionally, all required top-level buckets used +// within the database are created. +func initKVStore(db kvdb.Backend) error { + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + for _, tlb := range graphTopLevelBuckets { + if _, err := tx.CreateTopLevelBucket(tlb); err != nil { + return err + } + } + + nodes := tx.ReadWriteBucket(nodeBucket) + _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) + if err != nil { + return err + } + _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket) + if err != nil { + return err + } + + edges := tx.ReadWriteBucket(edgeBucket) + _, err = edges.CreateBucketIfNotExists(edgeIndexBucket) + if err != nil { + return err + } + _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) + if err != nil { + return err + } + _, err = edges.CreateBucketIfNotExists(channelPointBucket) + if err != nil { + return err + } + _, err = edges.CreateBucketIfNotExists(zombieBucket) + if err != nil { + return err + } + + graphMeta := tx.ReadWriteBucket(graphMetaBucket) + _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket) + + return err + }, func() {}) + if err != nil { + return fmt.Errorf("unable to create new channel graph: %w", err) + } + + return nil +} + +// AddrsForNode returns all known addresses for the target node public key that +// the graph DB is aware of. The returned boolean indicates if the given node is +// unknown to the graph DB or not. +// +// NOTE: this is part of the channeldb.AddrSource interface. +func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr, + error) { + + pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed()) + if err != nil { + return false, nil, err + } + + node, err := c.FetchLightningNode(pubKey) + // We don't consider it an error if the graph is unaware of the node. + switch { + case err != nil && !errors.Is(err, ErrGraphNodeNotFound): + return false, nil, err + + case errors.Is(err, ErrGraphNodeNotFound): + return false, nil, nil + } + + return true, node.Addresses, nil +} + +// ForEachChannel iterates through all the channel edges stored within the +// graph and invokes the passed callback for each edge. The callback takes two +// edges as since this is a directed graph, both the in/out edges are visited. +// If the callback returns an error, then the transaction is aborted and the +// iteration stops early. +// +// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer +// for that particular channel edge routing policy will be passed into the +// callback. +func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo, + *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error { + + return c.db.View(func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + + // First, load all edges in memory indexed by node and channel + // id. + channelMap, err := c.getChannelMap(edges) + if err != nil { + return err + } + + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // Load edge index, recombine each channel with the policies + // loaded above and invoke the callback. + return kvdb.ForAll( + edgeIndex, func(k, edgeInfoBytes []byte) error { + var chanID [8]byte + copy(chanID[:], k) + + edgeInfoReader := bytes.NewReader(edgeInfoBytes) + info, err := deserializeChanEdgeInfo( + edgeInfoReader, + ) + if err != nil { + return err + } + + policy1 := channelMap[channelMapKey{ + nodeKey: info.NodeKey1Bytes, + chanID: chanID, + }] + + policy2 := channelMap[channelMapKey{ + nodeKey: info.NodeKey2Bytes, + chanID: chanID, + }] + + return cb(&info, policy1, policy2) + }, + ) + }, func() {}) +} + +// forEachNodeDirectedChannel iterates through all channels of a given node, +// executing the passed callback on the directed edge representing the channel +// and its incoming policy. If the callback returns an error, then the iteration +// is halted with the error propagated back up to the caller. An optional read +// transaction may be provided. If none is provided, a new one will be created. +// +// Unknown policies are passed into the callback as nil values. +func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx, + node route.Vertex, cb func(channel *DirectedChannel) error) error { + + // Fallback that uses the database. + toNodeCallback := func() route.Vertex { + return node + } + toNodeFeatures, err := c.fetchNodeFeatures(tx, node) + if err != nil { + return err + } + + dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1, + p2 *models.ChannelEdgePolicy) error { + + var cachedInPolicy *models.CachedEdgePolicy + if p2 != nil { + cachedInPolicy = models.NewCachedPolicy(p2) + cachedInPolicy.ToNodePubKey = toNodeCallback + cachedInPolicy.ToNodeFeatures = toNodeFeatures + } + + var inboundFee lnwire.Fee + if p1 != nil { + // Extract inbound fee. If there is a decoding error, + // skip this edge. + _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee) + if err != nil { + return nil + } + } + + directedChannel := &DirectedChannel{ + ChannelID: e.ChannelID, + IsNode1: node == e.NodeKey1Bytes, + OtherNode: e.NodeKey2Bytes, + Capacity: e.Capacity, + OutPolicySet: p1 != nil, + InPolicy: cachedInPolicy, + InboundFee: inboundFee, + } + + if node == e.NodeKey2Bytes { + directedChannel.OtherNode = e.NodeKey1Bytes + } + + return cb(directedChannel) + } + + return nodeTraversal(tx, node[:], c.db, dbCallback) +} + +// fetchNodeFeatures returns the features of a given node. If no features are +// known for the node, an empty feature vector is returned. An optional read +// transaction may be provided. If none is provided, a new one will be created. +func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx, + node route.Vertex) (*lnwire.FeatureVector, error) { + + // Fallback that uses the database. + targetNode, err := c.FetchLightningNodeTx(tx, node) + switch { + // If the node exists and has features, return them directly. + case err == nil: + return targetNode.Features, nil + + // If we couldn't find a node announcement, populate a blank feature + // vector. + case errors.Is(err, ErrGraphNodeNotFound): + return lnwire.EmptyFeatureVector(), nil + + // Otherwise, bubble the error up. + default: + return nil, err + } +} + +// ForEachNodeDirectedChannel iterates through all channels of a given node, +// executing the passed callback on the directed edge representing the channel +// and its incoming policy. If the callback returns an error, then the iteration +// is halted with the error propagated back up to the caller. +// +// Unknown policies are passed into the callback as nil values. +// +// NOTE: this is part of the graphdb.NodeTraverser interface. +func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex, + cb func(channel *DirectedChannel) error) error { + + return c.forEachNodeDirectedChannel(nil, nodePub, cb) +} + +// FetchNodeFeatures returns the features of the given node. If no features are +// known for the node, an empty feature vector is returned. +// +// NOTE: this is part of the graphdb.NodeTraverser interface. +func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) ( + *lnwire.FeatureVector, error) { + + return c.fetchNodeFeatures(nil, nodePub) +} + +// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel +// data to the call-back. +// +// NOTE: The callback contents MUST not be modified. +func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex, + chans map[uint64]*DirectedChannel) error) error { + + // Otherwise call back to a version that uses the database directly. + // We'll iterate over each node, then the set of channels for each + // node, and construct a similar callback functiopn signature as the + // main funcotin expects. + return c.forEachNode(func(tx kvdb.RTx, + node *models.LightningNode) error { + + channels := make(map[uint64]*DirectedChannel) + + err := c.ForEachNodeChannelTx(tx, node.PubKeyBytes, + func(tx kvdb.RTx, e *models.ChannelEdgeInfo, + p1 *models.ChannelEdgePolicy, + p2 *models.ChannelEdgePolicy) error { + + toNodeCallback := func() route.Vertex { + return node.PubKeyBytes + } + toNodeFeatures, err := c.fetchNodeFeatures( + tx, node.PubKeyBytes, + ) + if err != nil { + return err + } + + var cachedInPolicy *models.CachedEdgePolicy + if p2 != nil { + cachedInPolicy = + models.NewCachedPolicy(p2) + cachedInPolicy.ToNodePubKey = + toNodeCallback + cachedInPolicy.ToNodeFeatures = + toNodeFeatures + } + + directedChannel := &DirectedChannel{ + ChannelID: e.ChannelID, + IsNode1: node.PubKeyBytes == + e.NodeKey1Bytes, + OtherNode: e.NodeKey2Bytes, + Capacity: e.Capacity, + OutPolicySet: p1 != nil, + InPolicy: cachedInPolicy, + } + + if node.PubKeyBytes == e.NodeKey2Bytes { + directedChannel.OtherNode = + e.NodeKey1Bytes + } + + channels[e.ChannelID] = directedChannel + + return nil + }) + if err != nil { + return err + } + + return cb(node.PubKeyBytes, channels) + }) +} + +// DisabledChannelIDs returns the channel ids of disabled channels. +// A channel is disabled when two of the associated ChanelEdgePolicies +// have their disabled bit on. +func (c *KVStore) DisabledChannelIDs() ([]uint64, error) { + var disabledChanIDs []uint64 + var chanEdgeFound map[uint64]struct{} + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + + disabledEdgePolicyIndex := edges.NestedReadBucket( + disabledEdgePolicyBucket, + ) + if disabledEdgePolicyIndex == nil { + return nil + } + + // We iterate over all disabled policies and we add each channel + // that has more than one disabled policy to disabledChanIDs + // array. + return disabledEdgePolicyIndex.ForEach( + func(k, v []byte) error { + chanID := byteOrder.Uint64(k[:8]) + _, edgeFound := chanEdgeFound[chanID] + if edgeFound { + delete(chanEdgeFound, chanID) + disabledChanIDs = append( + disabledChanIDs, chanID, + ) + + return nil + } + + chanEdgeFound[chanID] = struct{}{} + + return nil + }, + ) + }, func() { + disabledChanIDs = nil + chanEdgeFound = make(map[uint64]struct{}) + }) + if err != nil { + return nil, err + } + + return disabledChanIDs, nil +} + +// ForEachNode iterates through all the stored vertices/nodes in the graph, +// executing the passed callback with each node encountered. If the callback +// returns an error, then the transaction is aborted and the iteration stops +// early. Any operations performed on the NodeTx passed to the call-back are +// executed under the same read transaction and so, methods on the NodeTx object +// _MUST_ only be called from within the call-back. +func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error { + return c.forEachNode(func(tx kvdb.RTx, + node *models.LightningNode) error { + + return cb(newChanGraphNodeTx(tx, c, node)) + }) +} + +// forEachNode iterates through all the stored vertices/nodes in the graph, +// executing the passed callback with each node encountered. If the callback +// returns an error, then the transaction is aborted and the iteration stops +// early. +// +// TODO(roasbeef): add iterator interface to allow for memory efficient graph +// traversal when graph gets mega. +func (c *KVStore) forEachNode( + cb func(kvdb.RTx, *models.LightningNode) error) error { + + traversal := func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + return nodes.ForEach(func(pubKey, nodeBytes []byte) error { + // If this is the source key, then we skip this + // iteration as the value for this key is a pubKey + // rather than raw node information. + if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { + return nil + } + + nodeReader := bytes.NewReader(nodeBytes) + node, err := deserializeLightningNode(nodeReader) + if err != nil { + return err + } + + // Execute the callback, the transaction will abort if + // this returns an error. + return cb(tx, &node) + }) + } + + return kvdb.View(c.db, traversal, func() {}) +} + +// ForEachNodeCacheable iterates through all the stored vertices/nodes in the +// graph, executing the passed callback with each node encountered. If the +// callback returns an error, then the transaction is aborted and the iteration +// stops early. +func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex, + *lnwire.FeatureVector) error) error { + + traversal := func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + return nodes.ForEach(func(pubKey, nodeBytes []byte) error { + // If this is the source key, then we skip this + // iteration as the value for this key is a pubKey + // rather than raw node information. + if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { + return nil + } + + nodeReader := bytes.NewReader(nodeBytes) + node, features, err := deserializeLightningNodeCacheable( //nolint:ll + nodeReader, + ) + if err != nil { + return err + } + + // Execute the callback, the transaction will abort if + // this returns an error. + return cb(node, features) + }) + } + + return kvdb.View(c.db, traversal, func() {}) +} + +// SourceNode returns the source node of the graph. The source node is treated +// as the center node within a star-graph. This method may be used to kick off +// a path finding algorithm in order to explore the reachability of another +// node based off the source node. +func (c *KVStore) SourceNode() (*models.LightningNode, error) { + var source *models.LightningNode + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + node, err := c.sourceNode(nodes) + if err != nil { + return err + } + source = node + + return nil + }, func() { + source = nil + }) + if err != nil { + return nil, err + } + + return source, nil +} + +// sourceNode uses an existing database transaction and returns the source node +// of the graph. The source node is treated as the center node within a +// star-graph. This method may be used to kick off a path finding algorithm in +// order to explore the reachability of another node based off the source node. +func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode, + error) { + + selfPub := nodes.Get(sourceKey) + if selfPub == nil { + return nil, ErrSourceNodeNotSet + } + + // With the pubKey of the source node retrieved, we're able to + // fetch the full node information. + node, err := fetchLightningNode(nodes, selfPub) + if err != nil { + return nil, err + } + + return &node, nil +} + +// SetSourceNode sets the source node within the graph database. The source +// node is to be used as the center of a star-graph within path finding +// algorithms. +func (c *KVStore) SetSourceNode(node *models.LightningNode) error { + nodePubBytes := node.PubKeyBytes[:] + + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + + // Next we create the mapping from source to the targeted + // public key. + if err := nodes.Put(sourceKey, nodePubBytes); err != nil { + return err + } + + // Finally, we commit the information of the lightning node + // itself. + return addLightningNode(tx, node) + }, func() {}) +} + +// AddLightningNode adds a vertex/node to the graph database. If the node is not +// in the database from before, this will add a new, unconnected one to the +// graph. If it is present from before, this will update that node's +// information. Note that this method is expected to only be called to update an +// already present node from a node announcement, or to insert a node found in a +// channel update. +// +// TODO(roasbeef): also need sig of announcement. +func (c *KVStore) AddLightningNode(node *models.LightningNode, + op ...batch.SchedulerOption) error { + + r := &batch.Request{ + Update: func(tx kvdb.RwTx) error { + return addLightningNode(tx, node) + }, + } + + for _, f := range op { + f(r) + } + + return c.nodeScheduler.Execute(r) +} + +func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error { + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + + aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) + if err != nil { + return err + } + + updateIndex, err := nodes.CreateBucketIfNotExists( + nodeUpdateIndexBucket, + ) + if err != nil { + return err + } + + return putLightningNode(nodes, aliases, updateIndex, node) +} + +// LookupAlias attempts to return the alias as advertised by the target node. +// TODO(roasbeef): currently assumes that aliases are unique... +func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) { + var alias string + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodesNotFound + } + + aliases := nodes.NestedReadBucket(aliasIndexBucket) + if aliases == nil { + return ErrGraphNodesNotFound + } + + nodePub := pub.SerializeCompressed() + a := aliases.Get(nodePub) + if a == nil { + return ErrNodeAliasNotFound + } + + // TODO(roasbeef): should actually be using the utf-8 + // package... + alias = string(a) + + return nil + }, func() { + alias = "" + }) + if err != nil { + return "", err + } + + return alias, nil +} + +// DeleteLightningNode starts a new database transaction to remove a vertex/node +// from the database according to the node's public key. +func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error { + // TODO(roasbeef): ensure dangling edges are removed... + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + nodes := tx.ReadWriteBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodeNotFound + } + + return c.deleteLightningNode(nodes, nodePub[:]) + }, func() {}) +} + +// deleteLightningNode uses an existing database transaction to remove a +// vertex/node from the database according to the node's public key. +func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket, + compressedPubKey []byte) error { + + aliases := nodes.NestedReadWriteBucket(aliasIndexBucket) + if aliases == nil { + return ErrGraphNodesNotFound + } + + if err := aliases.Delete(compressedPubKey); err != nil { + return err + } + + // Before we delete the node, we'll fetch its current state so we can + // determine when its last update was to clear out the node update + // index. + node, err := fetchLightningNode(nodes, compressedPubKey) + if err != nil { + return err + } + + if err := nodes.Delete(compressedPubKey); err != nil { + return err + } + + // Finally, we'll delete the index entry for the node within the + // nodeUpdateIndexBucket as this node is no longer active, so we don't + // need to track its last update. + nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket) + if nodeUpdateIndex == nil { + return ErrGraphNodesNotFound + } + + // In order to delete the entry, we'll need to reconstruct the key for + // its last update. + updateUnix := uint64(node.LastUpdate.Unix()) + var indexKey [8 + 33]byte + byteOrder.PutUint64(indexKey[:8], updateUnix) + copy(indexKey[8:], compressedPubKey) + + return nodeUpdateIndex.Delete(indexKey[:]) +} + +// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An +// undirected edge from the two target nodes are created. The information stored +// denotes the static attributes of the channel, such as the channelID, the keys +// involved in creation of the channel, and the set of features that the channel +// supports. The chanPoint and chanID are used to uniquely identify the edge +// globally within the database. +func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo, + op ...batch.SchedulerOption) error { + + var alreadyExists bool + r := &batch.Request{ + Reset: func() { + alreadyExists = false + }, + Update: func(tx kvdb.RwTx) error { + err := c.addChannelEdge(tx, edge) + + // Silence ErrEdgeAlreadyExist so that the batch can + // succeed, but propagate the error via local state. + if errors.Is(err, ErrEdgeAlreadyExist) { + alreadyExists = true + return nil + } + + return err + }, + OnCommit: func(err error) error { + switch { + case err != nil: + return err + case alreadyExists: + return ErrEdgeAlreadyExist + default: + c.rejectCache.remove(edge.ChannelID) + c.chanCache.remove(edge.ChannelID) + return nil + } + }, + } + + for _, f := range op { + if f == nil { + return fmt.Errorf("nil scheduler option was used") + } + + f(r) + } + + return c.chanScheduler.Execute(r) +} + +// addChannelEdge is the private form of AddChannelEdge that allows callers to +// utilize an existing db transaction. +func (c *KVStore) addChannelEdge(tx kvdb.RwTx, + edge *models.ChannelEdgeInfo) error { + + // Construct the channel's primary key which is the 8-byte channel ID. + var chanKey [8]byte + binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) + + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + edges, err := tx.CreateTopLevelBucket(edgeBucket) + if err != nil { + return err + } + edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) + if err != nil { + return err + } + chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket) + if err != nil { + return err + } + + // First, attempt to check if this edge has already been created. If + // so, then we can exit early as this method is meant to be idempotent. + if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil { + return ErrEdgeAlreadyExist + } + + // Before we insert the channel into the database, we'll ensure that + // both nodes already exist in the channel graph. If either node + // doesn't, then we'll insert a "shell" node that just includes its + // public key, so subsequent validation and queries can work properly. + _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:]) + switch { + case errors.Is(node1Err, ErrGraphNodeNotFound): + node1Shell := models.LightningNode{ + PubKeyBytes: edge.NodeKey1Bytes, + HaveNodeAnnouncement: false, + } + err := addLightningNode(tx, &node1Shell) + if err != nil { + return fmt.Errorf("unable to create shell node "+ + "for: %x: %w", edge.NodeKey1Bytes, err) + } + case node1Err != nil: + return node1Err + } + + _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:]) + switch { + case errors.Is(node2Err, ErrGraphNodeNotFound): + node2Shell := models.LightningNode{ + PubKeyBytes: edge.NodeKey2Bytes, + HaveNodeAnnouncement: false, + } + err := addLightningNode(tx, &node2Shell) + if err != nil { + return fmt.Errorf("unable to create shell node "+ + "for: %x: %w", edge.NodeKey2Bytes, err) + } + case node2Err != nil: + return node2Err + } + + // If the edge hasn't been created yet, then we'll first add it to the + // edge index in order to associate the edge between two nodes and also + // store the static components of the channel. + if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil { + return err + } + + // Mark edge policies for both sides as unknown. This is to enable + // efficient incoming channel lookup for a node. + keys := []*[33]byte{ + &edge.NodeKey1Bytes, + &edge.NodeKey2Bytes, + } + for _, key := range keys { + err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:]) + if err != nil { + return err + } + } + + // Finally we add it to the channel index which maps channel points + // (outpoints) to the shorter channel ID's. + var b bytes.Buffer + if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil { + return err + } + + return chanIndex.Put(b.Bytes(), chanKey[:]) +} + +// HasChannelEdge returns true if the database knows of a channel edge with the +// passed channel ID, and false otherwise. If an edge with that ID is found +// within the graph, then two time stamps representing the last time the edge +// was updated for both directed edges are returned along with the boolean. If +// it is not found, then the zombie index is checked and its result is returned +// as the second boolean. +func (c *KVStore) HasChannelEdge( + chanID uint64) (time.Time, time.Time, bool, bool, error) { + + var ( + upd1Time time.Time + upd2Time time.Time + exists bool + isZombie bool + ) + + // We'll query the cache with the shared lock held to allow multiple + // readers to access values in the cache concurrently if they exist. + c.cacheMu.RLock() + if entry, ok := c.rejectCache.get(chanID); ok { + c.cacheMu.RUnlock() + upd1Time = time.Unix(entry.upd1Time, 0) + upd2Time = time.Unix(entry.upd2Time, 0) + exists, isZombie = entry.flags.unpack() + + return upd1Time, upd2Time, exists, isZombie, nil + } + c.cacheMu.RUnlock() + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + // The item was not found with the shared lock, so we'll acquire the + // exclusive lock and check the cache again in case another method added + // the entry to the cache while no lock was held. + if entry, ok := c.rejectCache.get(chanID); ok { + upd1Time = time.Unix(entry.upd1Time, 0) + upd2Time = time.Unix(entry.upd2Time, 0) + exists, isZombie = entry.flags.unpack() + + return upd1Time, upd2Time, exists, isZombie, nil + } + + if err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + var channelID [8]byte + byteOrder.PutUint64(channelID[:], chanID) + + // If the edge doesn't exist, then we'll also check our zombie + // index. + if edgeIndex.Get(channelID[:]) == nil { + exists = false + zombieIndex := edges.NestedReadBucket(zombieBucket) + if zombieIndex != nil { + isZombie, _, _ = isZombieEdge( + zombieIndex, chanID, + ) + } + + return nil + } + + exists = true + isZombie = false + + // If the channel has been found in the graph, then retrieve + // the edges itself so we can return the last updated + // timestamps. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodeNotFound + } + + e1, e2, err := fetchChanEdgePolicies( + edgeIndex, edges, channelID[:], + ) + if err != nil { + return err + } + + // As we may have only one of the edges populated, only set the + // update time if the edge was found in the database. + if e1 != nil { + upd1Time = e1.LastUpdate + } + if e2 != nil { + upd2Time = e2.LastUpdate + } + + return nil + }, func() {}); err != nil { + return time.Time{}, time.Time{}, exists, isZombie, err + } + + c.rejectCache.insert(chanID, rejectCacheEntry{ + upd1Time: upd1Time.Unix(), + upd2Time: upd2Time.Unix(), + flags: packRejectFlags(exists, isZombie), + }) + + return upd1Time, upd2Time, exists, isZombie, nil +} + +// AddEdgeProof sets the proof of an existing edge in the graph database. +func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID, + proof *models.ChannelAuthProof) error { + + // Construct the channel's primary key which is the 8-byte channel ID. + var chanKey [8]byte + binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64()) + + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return ErrEdgeNotFound + } + + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrEdgeNotFound + } + + edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:]) + if err != nil { + return err + } + + edge.AuthProof = proof + + return putChanEdgeInfo(edgeIndex, &edge, chanKey) + }, func() {}) +} + +const ( + // pruneTipBytes is the total size of the value which stores a prune + // entry of the graph in the prune log. The "prune tip" is the last + // entry in the prune log, and indicates if the channel graph is in + // sync with the current UTXO state. The structure of the value + // is: blockHash, taking 32 bytes total. + pruneTipBytes = 32 +) + +// PruneGraph prunes newly closed channels from the channel graph in response +// to a new block being solved on the network. Any transactions which spend the +// funding output of any known channels within he graph will be deleted. +// Additionally, the "prune tip", or the last block which has been used to +// prune the graph is stored so callers can ensure the graph is fully in sync +// with the current UTXO state. A slice of channels that have been closed by +// the target block along with any pruned nodes are returned if the function +// succeeds without error. +func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint, + blockHash *chainhash.Hash, blockHeight uint32) ( + []*models.ChannelEdgeInfo, []route.Vertex, error) { + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + var ( + chansClosed []*models.ChannelEdgeInfo + prunedNodes []route.Vertex + ) + + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + // First grab the edges bucket which houses the information + // we'd like to delete + edges, err := tx.CreateTopLevelBucket(edgeBucket) + if err != nil { + return err + } + + // Next grab the two edge indexes which will also need to be + // updated. + edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) + if err != nil { + return err + } + chanIndex, err := edges.CreateBucketIfNotExists( + channelPointBucket, + ) + if err != nil { + return err + } + nodes := tx.ReadWriteBucket(nodeBucket) + if nodes == nil { + return ErrSourceNodeNotSet + } + zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) + if err != nil { + return err + } + + // For each of the outpoints that have been spent within the + // block, we attempt to delete them from the graph as if that + // outpoint was a channel, then it has now been closed. + for _, chanPoint := range spentOutputs { + // TODO(roasbeef): load channel bloom filter, continue + // if NOT if filter + + var opBytes bytes.Buffer + err := WriteOutpoint(&opBytes, chanPoint) + if err != nil { + return err + } + + // First attempt to see if the channel exists within + // the database, if not, then we can exit early. + chanID := chanIndex.Get(opBytes.Bytes()) + if chanID == nil { + continue + } + + // Attempt to delete the channel, an ErrEdgeNotFound + // will be returned if that outpoint isn't known to be + // a channel. If no error is returned, then a channel + // was successfully pruned. + edgeInfo, err := c.delChannelEdgeUnsafe( + edges, edgeIndex, chanIndex, zombieIndex, + chanID, false, false, + ) + if err != nil && !errors.Is(err, ErrEdgeNotFound) { + return err + } + + chansClosed = append(chansClosed, edgeInfo) + } + + metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) + if err != nil { + return err + } + + pruneBucket, err := metaBucket.CreateBucketIfNotExists( + pruneLogBucket, + ) + if err != nil { + return err + } + + // With the graph pruned, add a new entry to the prune log, + // which can be used to check if the graph is fully synced with + // the current UTXO state. + var blockHeightBytes [4]byte + byteOrder.PutUint32(blockHeightBytes[:], blockHeight) + + var newTip [pruneTipBytes]byte + copy(newTip[:], blockHash[:]) + + err = pruneBucket.Put(blockHeightBytes[:], newTip[:]) + if err != nil { + return err + } + + // Now that the graph has been pruned, we'll also attempt to + // prune any nodes that have had a channel closed within the + // latest block. + prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex) + + return err + }, func() { + chansClosed = nil + prunedNodes = nil + }) + if err != nil { + return nil, nil, err + } + + for _, channel := range chansClosed { + c.rejectCache.remove(channel.ChannelID) + c.chanCache.remove(channel.ChannelID) + } + + return chansClosed, prunedNodes, nil +} + +// PruneGraphNodes is a garbage collection method which attempts to prune out +// any nodes from the channel graph that are currently unconnected. This ensure +// that we only maintain a graph of reachable nodes. In the event that a pruned +// node gains more channels, it will be re-added back to the graph. +func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) { + var prunedNodes []route.Vertex + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + nodes := tx.ReadWriteBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodesNotFound + } + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return ErrGraphNotFound + } + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + var err error + prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex) + if err != nil { + return err + } + + return nil + }, func() { + prunedNodes = nil + }) + + return prunedNodes, err +} + +// pruneGraphNodes attempts to remove any nodes from the graph who have had a +// channel closed within the current block. If the node still has existing +// channels in the graph, this will act as a no-op. +func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket, + edgeIndex kvdb.RwBucket) ([]route.Vertex, error) { + + log.Trace("Pruning nodes from graph with no open channels") + + // We'll retrieve the graph's source node to ensure we don't remove it + // even if it no longer has any open channels. + sourceNode, err := c.sourceNode(nodes) + if err != nil { + return nil, err + } + + // We'll use this map to keep count the number of references to a node + // in the graph. A node should only be removed once it has no more + // references in the graph. + nodeRefCounts := make(map[[33]byte]int) + err = nodes.ForEach(func(pubKey, nodeBytes []byte) error { + // If this is the source key, then we skip this + // iteration as the value for this key is a pubKey + // rather than raw node information. + if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { + return nil + } + + var nodePub [33]byte + copy(nodePub[:], pubKey) + nodeRefCounts[nodePub] = 0 + + return nil + }) + if err != nil { + return nil, err + } + + // To ensure we never delete the source node, we'll start off by + // bumping its ref count to 1. + nodeRefCounts[sourceNode.PubKeyBytes] = 1 + + // Next, we'll run through the edgeIndex which maps a channel ID to the + // edge info. We'll use this scan to populate our reference count map + // above. + err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error { + // The first 66 bytes of the edge info contain the pubkeys of + // the nodes that this edge attaches. We'll extract them, and + // add them to the ref count map. + var node1, node2 [33]byte + copy(node1[:], edgeInfoBytes[:33]) + copy(node2[:], edgeInfoBytes[33:]) + + // With the nodes extracted, we'll increase the ref count of + // each of the nodes. + nodeRefCounts[node1]++ + nodeRefCounts[node2]++ + + return nil + }) + if err != nil { + return nil, err + } + + // Finally, we'll make a second pass over the set of nodes, and delete + // any nodes that have a ref count of zero. + var pruned []route.Vertex + for nodePubKey, refCount := range nodeRefCounts { + // If the ref count of the node isn't zero, then we can safely + // skip it as it still has edges to or from it within the + // graph. + if refCount != 0 { + continue + } + + // If we reach this point, then there are no longer any edges + // that connect this node, so we can delete it. + err := c.deleteLightningNode(nodes, nodePubKey[:]) + if err != nil { + if errors.Is(err, ErrGraphNodeNotFound) || + errors.Is(err, ErrGraphNodesNotFound) { + + log.Warnf("Unable to prune node %x from the "+ + "graph: %v", nodePubKey, err) + continue + } + + return nil, err + } + + log.Infof("Pruned unconnected node %x from channel graph", + nodePubKey[:]) + + pruned = append(pruned, nodePubKey) + } + + if len(pruned) > 0 { + log.Infof("Pruned %v unconnected nodes from the channel graph", + len(pruned)) + } + + return pruned, err +} + +// DisconnectBlockAtHeight is used to indicate that the block specified +// by the passed height has been disconnected from the main chain. This +// will "rewind" the graph back to the height below, deleting channels +// that are no longer confirmed from the graph. The prune log will be +// set to the last prune height valid for the remaining chain. +// Channels that were removed from the graph resulting from the +// disconnected block are returned. +func (c *KVStore) DisconnectBlockAtHeight(height uint32) ( + []*models.ChannelEdgeInfo, error) { + + // Every channel having a ShortChannelID starting at 'height' + // will no longer be confirmed. + startShortChanID := lnwire.ShortChannelID{ + BlockHeight: height, + } + + // Delete everything after this height from the db up until the + // SCID alias range. + endShortChanID := aliasmgr.StartingAlias + + // The block height will be the 3 first bytes of the channel IDs. + var chanIDStart [8]byte + byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64()) + var chanIDEnd [8]byte + byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64()) + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + // Keep track of the channels that are removed from the graph. + var removedChans []*models.ChannelEdgeInfo + + if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges, err := tx.CreateTopLevelBucket(edgeBucket) + if err != nil { + return err + } + edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) + if err != nil { + return err + } + chanIndex, err := edges.CreateBucketIfNotExists( + channelPointBucket, + ) + if err != nil { + return err + } + zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) + if err != nil { + return err + } + + // Scan from chanIDStart to chanIDEnd, deleting every + // found edge. + // NOTE: we must delete the edges after the cursor loop, since + // modifying the bucket while traversing is not safe. + // NOTE: We use a < comparison in bytes.Compare instead of <= + // so that the StartingAlias itself isn't deleted. + var keys [][]byte + cursor := edgeIndex.ReadWriteCursor() + + //nolint:ll + for k, _ := cursor.Seek(chanIDStart[:]); k != nil && + bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() { + keys = append(keys, k) + } + + for _, k := range keys { + edgeInfo, err := c.delChannelEdgeUnsafe( + edges, edgeIndex, chanIndex, zombieIndex, + k, false, false, + ) + if err != nil && !errors.Is(err, ErrEdgeNotFound) { + return err + } + + removedChans = append(removedChans, edgeInfo) + } + + // Delete all the entries in the prune log having a height + // greater or equal to the block disconnected. + metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) + if err != nil { + return err + } + + pruneBucket, err := metaBucket.CreateBucketIfNotExists( + pruneLogBucket, + ) + if err != nil { + return err + } + + var pruneKeyStart [4]byte + byteOrder.PutUint32(pruneKeyStart[:], height) + + var pruneKeyEnd [4]byte + byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32) + + // To avoid modifying the bucket while traversing, we delete + // the keys in a second loop. + var pruneKeys [][]byte + pruneCursor := pruneBucket.ReadWriteCursor() + //nolint:ll + for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil && + bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() { + pruneKeys = append(pruneKeys, k) + } + + for _, k := range pruneKeys { + if err := pruneBucket.Delete(k); err != nil { + return err + } + } + + return nil + }, func() { + removedChans = nil + }); err != nil { + return nil, err + } + + for _, channel := range removedChans { + c.rejectCache.remove(channel.ChannelID) + c.chanCache.remove(channel.ChannelID) + } + + return removedChans, nil +} + +// PruneTip returns the block height and hash of the latest block that has been +// used to prune channels in the graph. Knowing the "prune tip" allows callers +// to tell if the graph is currently in sync with the current best known UTXO +// state. +func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) { + var ( + tipHash chainhash.Hash + tipHeight uint32 + ) + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + graphMeta := tx.ReadBucket(graphMetaBucket) + if graphMeta == nil { + return ErrGraphNotFound + } + pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket) + if pruneBucket == nil { + return ErrGraphNeverPruned + } + + pruneCursor := pruneBucket.ReadCursor() + + // The prune key with the largest block height will be our + // prune tip. + k, v := pruneCursor.Last() + if k == nil { + return ErrGraphNeverPruned + } + + // Once we have the prune tip, the value will be the block hash, + // and the key the block height. + copy(tipHash[:], v) + tipHeight = byteOrder.Uint32(k) + + return nil + }, func() {}) + if err != nil { + return nil, 0, err + } + + return &tipHash, tipHeight, nil +} + +// DeleteChannelEdges removes edges with the given channel IDs from the +// database and marks them as zombies. This ensures that we're unable to re-add +// it to our database once again. If an edge does not exist within the +// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is +// true, then when we mark these edges as zombies, we'll set up the keys such +// that we require the node that failed to send the fresh update to be the one +// that resurrects the channel from its zombie state. The markZombie bool +// denotes whether or not to mark the channel as a zombie. +func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool, + chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) { + + // TODO(roasbeef): possibly delete from node bucket if node has no more + // channels + // TODO(roasbeef): don't delete both edges? + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + var infos []*models.ChannelEdgeInfo + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return ErrEdgeNotFound + } + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrEdgeNotFound + } + chanIndex := edges.NestedReadWriteBucket(channelPointBucket) + if chanIndex == nil { + return ErrEdgeNotFound + } + nodes := tx.ReadWriteBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodeNotFound + } + zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) + if err != nil { + return err + } + + var rawChanID [8]byte + for _, chanID := range chanIDs { + byteOrder.PutUint64(rawChanID[:], chanID) + edgeInfo, err := c.delChannelEdgeUnsafe( + edges, edgeIndex, chanIndex, zombieIndex, + rawChanID[:], markZombie, strictZombiePruning, + ) + if err != nil { + return err + } + + infos = append(infos, edgeInfo) + } + + return nil + }, func() { + infos = nil + }) + if err != nil { + return nil, err + } + + for _, chanID := range chanIDs { + c.rejectCache.remove(chanID) + c.chanCache.remove(chanID) + } + + return infos, nil +} + +// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the +// passed channel point (outpoint). If the passed channel doesn't exist within +// the database, then ErrEdgeNotFound is returned. +func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) { + var chanID uint64 + if err := kvdb.View(c.db, func(tx kvdb.RTx) error { + var err error + chanID, err = getChanID(tx, chanPoint) + return err + }, func() { + chanID = 0 + }); err != nil { + return 0, err + } + + return chanID, nil +} + +// getChanID returns the assigned channel ID for a given channel point. +func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) { + var b bytes.Buffer + if err := WriteOutpoint(&b, chanPoint); err != nil { + return 0, err + } + + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return 0, ErrGraphNoEdgesFound + } + chanIndex := edges.NestedReadBucket(channelPointBucket) + if chanIndex == nil { + return 0, ErrGraphNoEdgesFound + } + + chanIDBytes := chanIndex.Get(b.Bytes()) + if chanIDBytes == nil { + return 0, ErrEdgeNotFound + } + + chanID := byteOrder.Uint64(chanIDBytes) + + return chanID, nil +} + +// TODO(roasbeef): allow updates to use Batch? + +// HighestChanID returns the "highest" known channel ID in the channel graph. +// This represents the "newest" channel from the PoV of the chain. This method +// can be used by peers to quickly determine if they're graphs are in sync. +func (c *KVStore) HighestChanID() (uint64, error) { + var cid uint64 + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // In order to find the highest chan ID, we'll fetch a cursor + // and use that to seek to the "end" of our known rage. + cidCursor := edgeIndex.ReadCursor() + + lastChanID, _ := cidCursor.Last() + + // If there's no key, then this means that we don't actually + // know of any channels, so we'll return a predicable error. + if lastChanID == nil { + return ErrGraphNoEdgesFound + } + + // Otherwise, we'll de serialize the channel ID and return it + // to the caller. + cid = byteOrder.Uint64(lastChanID) + + return nil + }, func() { + cid = 0 + }) + if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) { + return 0, err + } + + return cid, nil +} + +// ChannelEdge represents the complete set of information for a channel edge in +// the known channel graph. This struct couples the core information of the +// edge as well as each of the known advertised edge policies. +type ChannelEdge struct { + // Info contains all the static information describing the channel. + Info *models.ChannelEdgeInfo + + // Policy1 points to the "first" edge policy of the channel containing + // the dynamic information required to properly route through the edge. + Policy1 *models.ChannelEdgePolicy + + // Policy2 points to the "second" edge policy of the channel containing + // the dynamic information required to properly route through the edge. + Policy2 *models.ChannelEdgePolicy + + // Node1 is "node 1" in the channel. This is the node that would have + // produced Policy1 if it exists. + Node1 *models.LightningNode + + // Node2 is "node 2" in the channel. This is the node that would have + // produced Policy2 if it exists. + Node2 *models.LightningNode +} + +// ChanUpdatesInHorizon returns all the known channel edges which have at least +// one edge that has an update timestamp within the specified horizon. +func (c *KVStore) ChanUpdatesInHorizon(startTime, + endTime time.Time) ([]ChannelEdge, error) { + + // To ensure we don't return duplicate ChannelEdges, we'll use an + // additional map to keep track of the edges already seen to prevent + // re-adding it. + var edgesSeen map[uint64]struct{} + var edgesToCache map[uint64]ChannelEdge + var edgesInHorizon []ChannelEdge + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + var hits int + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket) + if edgeUpdateIndex == nil { + return ErrGraphNoEdgesFound + } + + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodesNotFound + } + + // We'll now obtain a cursor to perform a range query within + // the index to find all channels within the horizon. + updateCursor := edgeUpdateIndex.ReadCursor() + + var startTimeBytes, endTimeBytes [8 + 8]byte + byteOrder.PutUint64( + startTimeBytes[:8], uint64(startTime.Unix()), + ) + byteOrder.PutUint64( + endTimeBytes[:8], uint64(endTime.Unix()), + ) + + // With our start and end times constructed, we'll step through + // the index collecting the info and policy of each update of + // each channel that has a last update within the time range. + // + //nolint:ll + for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && + bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { + // We have a new eligible entry, so we'll slice of the + // chan ID so we can query it in the DB. + chanID := indexKey[8:] + + // If we've already retrieved the info and policies for + // this edge, then we can skip it as we don't need to do + // so again. + chanIDInt := byteOrder.Uint64(chanID) + if _, ok := edgesSeen[chanIDInt]; ok { + continue + } + + if channel, ok := c.chanCache.get(chanIDInt); ok { + hits++ + edgesSeen[chanIDInt] = struct{}{} + edgesInHorizon = append(edgesInHorizon, channel) + + continue + } + + // First, we'll fetch the static edge information. + edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) + if err != nil { + chanID := byteOrder.Uint64(chanID) + return fmt.Errorf("unable to fetch info for "+ + "edge with chan_id=%v: %v", chanID, err) + } + + // With the static information obtained, we'll now + // fetch the dynamic policy info. + edge1, edge2, err := fetchChanEdgePolicies( + edgeIndex, edges, chanID, + ) + if err != nil { + chanID := byteOrder.Uint64(chanID) + return fmt.Errorf("unable to fetch policies "+ + "for edge with chan_id=%v: %v", chanID, + err) + } + + node1, err := fetchLightningNode( + nodes, edgeInfo.NodeKey1Bytes[:], + ) + if err != nil { + return err + } + + node2, err := fetchLightningNode( + nodes, edgeInfo.NodeKey2Bytes[:], + ) + if err != nil { + return err + } + + // Finally, we'll collate this edge with the rest of + // edges to be returned. + edgesSeen[chanIDInt] = struct{}{} + channel := ChannelEdge{ + Info: &edgeInfo, + Policy1: edge1, + Policy2: edge2, + Node1: &node1, + Node2: &node2, + } + edgesInHorizon = append(edgesInHorizon, channel) + edgesToCache[chanIDInt] = channel + } + + return nil + }, func() { + edgesSeen = make(map[uint64]struct{}) + edgesToCache = make(map[uint64]ChannelEdge) + edgesInHorizon = nil + }) + switch { + case errors.Is(err, ErrGraphNoEdgesFound): + fallthrough + case errors.Is(err, ErrGraphNodesNotFound): + break + + case err != nil: + return nil, err + } + + // Insert any edges loaded from disk into the cache. + for chanid, channel := range edgesToCache { + c.chanCache.insert(chanid, channel) + } + + log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)", + float64(hits)/float64(len(edgesInHorizon)), hits, + len(edgesInHorizon)) + + return edgesInHorizon, nil +} + +// NodeUpdatesInHorizon returns all the known lightning node which have an +// update timestamp within the passed range. This method can be used by two +// nodes to quickly determine if they have the same set of up to date node +// announcements. +func (c *KVStore) NodeUpdatesInHorizon(startTime, + endTime time.Time) ([]models.LightningNode, error) { + + var nodesInHorizon []models.LightningNode + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodesNotFound + } + + nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket) + if nodeUpdateIndex == nil { + return ErrGraphNodesNotFound + } + + // We'll now obtain a cursor to perform a range query within + // the index to find all node announcements within the horizon. + updateCursor := nodeUpdateIndex.ReadCursor() + + var startTimeBytes, endTimeBytes [8 + 33]byte + byteOrder.PutUint64( + startTimeBytes[:8], uint64(startTime.Unix()), + ) + byteOrder.PutUint64( + endTimeBytes[:8], uint64(endTime.Unix()), + ) + + // With our start and end times constructed, we'll step through + // the index collecting info for each node within the time + // range. + // + //nolint:ll + for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && + bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { + nodePub := indexKey[8:] + node, err := fetchLightningNode(nodes, nodePub) + if err != nil { + return err + } + + nodesInHorizon = append(nodesInHorizon, node) + } + + return nil + }, func() { + nodesInHorizon = nil + }) + switch { + case errors.Is(err, ErrGraphNoEdgesFound): + fallthrough + case errors.Is(err, ErrGraphNodesNotFound): + break + + case err != nil: + return nil, err + } + + return nodesInHorizon, nil +} + +// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan +// ID's that we don't know and are not known zombies of the passed set. In other +// words, we perform a set difference of our set of chan ID's and the ones +// passed in. This method can be used by callers to determine the set of +// channels another peer knows of that we don't. The ChannelUpdateInfos for the +// known zombies is also returned. +func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64, + []ChannelUpdateInfo, error) { + + var ( + newChanIDs []uint64 + knownZombies []ChannelUpdateInfo + ) + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // Fetch the zombie index, it may not exist if no edges have + // ever been marked as zombies. If the index has been + // initialized, we will use it later to skip known zombie edges. + zombieIndex := edges.NestedReadBucket(zombieBucket) + + // We'll run through the set of chanIDs and collate only the + // set of channel that are unable to be found within our db. + var cidBytes [8]byte + for _, info := range chansInfo { + scid := info.ShortChannelID.ToUint64() + byteOrder.PutUint64(cidBytes[:], scid) + + // If the edge is already known, skip it. + if v := edgeIndex.Get(cidBytes[:]); v != nil { + continue + } + + // If the edge is a known zombie, skip it. + if zombieIndex != nil { + isZombie, _, _ := isZombieEdge( + zombieIndex, scid, + ) + + if isZombie { + knownZombies = append( + knownZombies, info, + ) + + continue + } + } + + newChanIDs = append(newChanIDs, scid) + } + + return nil + }, func() { + newChanIDs = nil + knownZombies = nil + }) + switch { + // If we don't know of any edges yet, then we'll return the entire set + // of chan IDs specified. + case errors.Is(err, ErrGraphNoEdgesFound): + ogChanIDs := make([]uint64, len(chansInfo)) + for i, info := range chansInfo { + ogChanIDs[i] = info.ShortChannelID.ToUint64() + } + + return ogChanIDs, nil, nil + + case err != nil: + return nil, nil, err + } + + return newChanIDs, knownZombies, nil +} + +// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the +// latest received channel updates for the channel. +type ChannelUpdateInfo struct { + // ShortChannelID is the SCID identifier of the channel. + ShortChannelID lnwire.ShortChannelID + + // Node1UpdateTimestamp is the timestamp of the latest received update + // from the node 1 channel peer. This will be set to zero time if no + // update has yet been received from this node. + Node1UpdateTimestamp time.Time + + // Node2UpdateTimestamp is the timestamp of the latest received update + // from the node 2 channel peer. This will be set to zero time if no + // update has yet been received from this node. + Node2UpdateTimestamp time.Time +} + +// NewChannelUpdateInfo is a constructor which makes sure we initialize the +// timestamps with zero seconds unix timestamp which equals +// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`. +func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp, + node2Timestamp time.Time) ChannelUpdateInfo { + + chanInfo := ChannelUpdateInfo{ + ShortChannelID: scid, + Node1UpdateTimestamp: node1Timestamp, + Node2UpdateTimestamp: node2Timestamp, + } + + if node1Timestamp.IsZero() { + chanInfo.Node1UpdateTimestamp = time.Unix(0, 0) + } + + if node2Timestamp.IsZero() { + chanInfo.Node2UpdateTimestamp = time.Unix(0, 0) + } + + return chanInfo +} + +// BlockChannelRange represents a range of channels for a given block height. +type BlockChannelRange struct { + // Height is the height of the block all of the channels below were + // included in. + Height uint32 + + // Channels is the list of channels identified by their short ID + // representation known to us that were included in the block height + // above. The list may include channel update timestamp information if + // requested. + Channels []ChannelUpdateInfo +} + +// FilterChannelRange returns the channel ID's of all known channels which were +// mined in a block height within the passed range. The channel IDs are grouped +// by their common block height. This method can be used to quickly share with a +// peer the set of channels we know of within a particular range to catch them +// up after a period of time offline. If withTimestamps is true then the +// timestamp info of the latest received channel update messages of the channel +// will be included in the response. +func (c *KVStore) FilterChannelRange(startHeight, + endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) { + + startChanID := &lnwire.ShortChannelID{ + BlockHeight: startHeight, + } + + endChanID := lnwire.ShortChannelID{ + BlockHeight: endHeight, + TxIndex: math.MaxUint32 & 0x00ffffff, + TxPosition: math.MaxUint16, + } + + // As we need to perform a range scan, we'll convert the starting and + // ending height to their corresponding values when encoded using short + // channel ID's. + var chanIDStart, chanIDEnd [8]byte + byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64()) + byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64()) + + var channelsPerBlock map[uint32][]ChannelUpdateInfo + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + cursor := edgeIndex.ReadCursor() + + // We'll now iterate through the database, and find each + // channel ID that resides within the specified range. + // + //nolint:ll + for k, v := cursor.Seek(chanIDStart[:]); k != nil && + bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() { + // Don't send alias SCIDs during gossip sync. + edgeReader := bytes.NewReader(v) + edgeInfo, err := deserializeChanEdgeInfo(edgeReader) + if err != nil { + return err + } + + if edgeInfo.AuthProof == nil { + continue + } + + // This channel ID rests within the target range, so + // we'll add it to our returned set. + rawCid := byteOrder.Uint64(k) + cid := lnwire.NewShortChanIDFromInt(rawCid) + + chanInfo := NewChannelUpdateInfo( + cid, time.Time{}, time.Time{}, + ) + + if !withTimestamps { + channelsPerBlock[cid.BlockHeight] = append( + channelsPerBlock[cid.BlockHeight], + chanInfo, + ) + + continue + } + + node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo) + + rawPolicy := edges.Get(node1Key) + if len(rawPolicy) != 0 { + r := bytes.NewReader(rawPolicy) + + edge, err := deserializeChanEdgePolicyRaw(r) + if err != nil && !errors.Is( + err, ErrEdgePolicyOptionalFieldNotFound, + ) { + + return err + } + + chanInfo.Node1UpdateTimestamp = edge.LastUpdate + } + + rawPolicy = edges.Get(node2Key) + if len(rawPolicy) != 0 { + r := bytes.NewReader(rawPolicy) + + edge, err := deserializeChanEdgePolicyRaw(r) + if err != nil && !errors.Is( + err, ErrEdgePolicyOptionalFieldNotFound, + ) { + + return err + } + + chanInfo.Node2UpdateTimestamp = edge.LastUpdate + } + + channelsPerBlock[cid.BlockHeight] = append( + channelsPerBlock[cid.BlockHeight], chanInfo, + ) + } + + return nil + }, func() { + channelsPerBlock = make(map[uint32][]ChannelUpdateInfo) + }) + + switch { + // If we don't know of any channels yet, then there's nothing to + // filter, so we'll return an empty slice. + case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0: + return nil, nil + + case err != nil: + return nil, err + } + + // Return the channel ranges in ascending block height order. + blocks := make([]uint32, 0, len(channelsPerBlock)) + for block := range channelsPerBlock { + blocks = append(blocks, block) + } + sort.Slice(blocks, func(i, j int) bool { + return blocks[i] < blocks[j] + }) + + channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock)) + for _, block := range blocks { + channelRanges = append(channelRanges, BlockChannelRange{ + Height: block, + Channels: channelsPerBlock[block], + }) + } + + return channelRanges, nil +} + +// FetchChanInfos returns the set of channel edges that correspond to the passed +// channel ID's. If an edge is the query is unknown to the database, it will +// skipped and the result will contain only those edges that exist at the time +// of the query. This can be used to respond to peer queries that are seeking to +// fill in gaps in their view of the channel graph. +func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) { + return c.fetchChanInfos(nil, chanIDs) +} + +// fetchChanInfos returns the set of channel edges that correspond to the passed +// channel ID's. If an edge is the query is unknown to the database, it will +// skipped and the result will contain only those edges that exist at the time +// of the query. This can be used to respond to peer queries that are seeking to +// fill in gaps in their view of the channel graph. +// +// NOTE: An optional transaction may be provided. If none is provided, then a +// new one will be created. +func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) ( + []ChannelEdge, error) { + // TODO(roasbeef): sort cids? + + var ( + chanEdges []ChannelEdge + cidBytes [8]byte + ) + + fetchChanInfos := func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + for _, cid := range chanIDs { + byteOrder.PutUint64(cidBytes[:], cid) + + // First, we'll fetch the static edge information. If + // the edge is unknown, we will skip the edge and + // continue gathering all known edges. + edgeInfo, err := fetchChanEdgeInfo( + edgeIndex, cidBytes[:], + ) + switch { + case errors.Is(err, ErrEdgeNotFound): + continue + case err != nil: + return err + } + + // With the static information obtained, we'll now + // fetch the dynamic policy info. + edge1, edge2, err := fetchChanEdgePolicies( + edgeIndex, edges, cidBytes[:], + ) + if err != nil { + return err + } + + node1, err := fetchLightningNode( + nodes, edgeInfo.NodeKey1Bytes[:], + ) + if err != nil { + return err + } + + node2, err := fetchLightningNode( + nodes, edgeInfo.NodeKey2Bytes[:], + ) + if err != nil { + return err + } + + chanEdges = append(chanEdges, ChannelEdge{ + Info: &edgeInfo, + Policy1: edge1, + Policy2: edge2, + Node1: &node1, + Node2: &node2, + }) + } + + return nil + } + + if tx == nil { + err := kvdb.View(c.db, fetchChanInfos, func() { + chanEdges = nil + }) + if err != nil { + return nil, err + } + + return chanEdges, nil + } + + err := fetchChanInfos(tx) + if err != nil { + return nil, err + } + + return chanEdges, nil +} + +func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64, + edge1, edge2 *models.ChannelEdgePolicy) error { + + // First, we'll fetch the edge update index bucket which currently + // stores an entry for the channel we're about to delete. + updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket) + if updateIndex == nil { + // No edges in bucket, return early. + return nil + } + + // Now that we have the bucket, we'll attempt to construct a template + // for the index key: updateTime || chanid. + var indexKey [8 + 8]byte + byteOrder.PutUint64(indexKey[8:], chanID) + + // With the template constructed, we'll attempt to delete an entry that + // would have been created by both edges: we'll alternate the update + // times, as one may had overridden the other. + if edge1 != nil { + byteOrder.PutUint64( + indexKey[:8], uint64(edge1.LastUpdate.Unix()), + ) + if err := updateIndex.Delete(indexKey[:]); err != nil { + return err + } + } + + // We'll also attempt to delete the entry that may have been created by + // the second edge. + if edge2 != nil { + byteOrder.PutUint64( + indexKey[:8], uint64(edge2.LastUpdate.Unix()), + ) + if err := updateIndex.Delete(indexKey[:]); err != nil { + return err + } + } + + return nil +} + +// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph +// cache. It then goes on to delete any policy info and edge info for this +// channel from the DB and finally, if isZombie is true, it will add an entry +// for this channel in the zombie index. +// +// NOTE: this method MUST only be called if the cacheMu has already been +// acquired. +func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex, + zombieIndex kvdb.RwBucket, chanID []byte, isZombie, + strictZombie bool) (*models.ChannelEdgeInfo, error) { + + edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) + if err != nil { + return nil, err + } + + // We'll also remove the entry in the edge update index bucket before + // we delete the edges themselves so we can access their last update + // times. + cid := byteOrder.Uint64(chanID) + edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID) + if err != nil { + return nil, err + } + err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2) + if err != nil { + return nil, err + } + + // The edge key is of the format pubKey || chanID. First we construct + // the latter half, populating the channel ID. + var edgeKey [33 + 8]byte + copy(edgeKey[33:], chanID) + + // With the latter half constructed, copy over the first public key to + // delete the edge in this direction, then the second to delete the + // edge in the opposite direction. + copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:]) + if edges.Get(edgeKey[:]) != nil { + if err := edges.Delete(edgeKey[:]); err != nil { + return nil, err + } + } + copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:]) + if edges.Get(edgeKey[:]) != nil { + if err := edges.Delete(edgeKey[:]); err != nil { + return nil, err + } + } + + // As part of deleting the edge we also remove all disabled entries + // from the edgePolicyDisabledIndex bucket. We do that for both + // directions. + err = updateEdgePolicyDisabledIndex(edges, cid, false, false) + if err != nil { + return nil, err + } + err = updateEdgePolicyDisabledIndex(edges, cid, true, false) + if err != nil { + return nil, err + } + + // With the edge data deleted, we can purge the information from the two + // edge indexes. + if err := edgeIndex.Delete(chanID); err != nil { + return nil, err + } + var b bytes.Buffer + if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { + return nil, err + } + if err := chanIndex.Delete(b.Bytes()); err != nil { + return nil, err + } + + // Finally, we'll mark the edge as a zombie within our index if it's + // being removed due to the channel becoming a zombie. We do this to + // ensure we don't store unnecessary data for spent channels. + if !isZombie { + return &edgeInfo, nil + } + + nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes + if strictZombie { + nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2) + } + + return &edgeInfo, markEdgeZombie( + zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2, + ) +} + +// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a +// particular pair of channel policies. The return values are one of: +// 1. (pubkey1, pubkey2) +// 2. (pubkey1, blank) +// 3. (blank, pubkey2) +// +// A blank pubkey means that corresponding node will be unable to resurrect a +// channel on its own. For example, node1 may continue to publish recent +// updates, but node2 has fallen way behind. After marking an edge as a zombie, +// we don't want another fresh update from node1 to resurrect, as the edge can +// only become live once node2 finally sends something recent. +// +// In the case where we have neither update, we allow either party to resurrect +// the channel. If the channel were to be marked zombie again, it would be +// marked with the correct lagging channel since we received an update from only +// one side. +func makeZombiePubkeys(info *models.ChannelEdgeInfo, + e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) { + + switch { + // If we don't have either edge policy, we'll return both pubkeys so + // that the channel can be resurrected by either party. + case e1 == nil && e2 == nil: + return info.NodeKey1Bytes, info.NodeKey2Bytes + + // If we're missing edge1, or if both edges are present but edge1 is + // older, we'll return edge1's pubkey and a blank pubkey for edge2. This + // means that only an update from edge1 will be able to resurrect the + // channel. + case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)): + return info.NodeKey1Bytes, [33]byte{} + + // Otherwise, we're missing edge2 or edge2 is the older side, so we + // return a blank pubkey for edge1. In this case, only an update from + // edge2 can resurect the channel. + default: + return [33]byte{}, info.NodeKey2Bytes + } +} + +// UpdateEdgePolicy updates the edge routing policy for a single directed edge +// within the database for the referenced channel. The `flags` attribute within +// the ChannelEdgePolicy determines which of the directed edges are being +// updated. If the flag is 1, then the first node's information is being +// updated, otherwise it's the second node's information. The node ordering is +// determined by the lexicographical ordering of the identity public keys of the +// nodes on either side of the channel. +func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy, + op ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) { + + var ( + isUpdate1 bool + edgeNotFound bool + from, to route.Vertex + ) + + r := &batch.Request{ + Reset: func() { + isUpdate1 = false + edgeNotFound = false + }, + Update: func(tx kvdb.RwTx) error { + var err error + from, to, isUpdate1, err = updateEdgePolicy(tx, edge) + if err != nil { + log.Errorf("UpdateEdgePolicy faild: %v", err) + } + + // Silence ErrEdgeNotFound so that the batch can + // succeed, but propagate the error via local state. + if errors.Is(err, ErrEdgeNotFound) { + edgeNotFound = true + return nil + } + + return err + }, + OnCommit: func(err error) error { + switch { + case err != nil: + return err + case edgeNotFound: + return ErrEdgeNotFound + default: + c.updateEdgeCache(edge, isUpdate1) + return nil + } + }, + } + + for _, f := range op { + f(r) + } + + err := c.chanScheduler.Execute(r) + + return from, to, err +} + +func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy, + isUpdate1 bool) { + + // If an entry for this channel is found in reject cache, we'll modify + // the entry with the updated timestamp for the direction that was just + // written. If the edge doesn't exist, we'll load the cache entry lazily + // during the next query for this edge. + if entry, ok := c.rejectCache.get(e.ChannelID); ok { + if isUpdate1 { + entry.upd1Time = e.LastUpdate.Unix() + } else { + entry.upd2Time = e.LastUpdate.Unix() + } + c.rejectCache.insert(e.ChannelID, entry) + } + + // If an entry for this channel is found in channel cache, we'll modify + // the entry with the updated policy for the direction that was just + // written. If the edge doesn't exist, we'll defer loading the info and + // policies and lazily read from disk during the next query. + if channel, ok := c.chanCache.get(e.ChannelID); ok { + if isUpdate1 { + channel.Policy1 = e + } else { + channel.Policy2 = e + } + c.chanCache.insert(e.ChannelID, channel) + } +} + +// updateEdgePolicy attempts to update an edge's policy within the relevant +// buckets using an existing database transaction. The returned boolean will be +// true if the updated policy belongs to node1, and false if the policy belonged +// to node2. +func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) ( + route.Vertex, route.Vertex, bool, error) { + + var noVertex route.Vertex + + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return noVertex, noVertex, false, ErrEdgeNotFound + } + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) + if edgeIndex == nil { + return noVertex, noVertex, false, ErrEdgeNotFound + } + + // Create the channelID key be converting the channel ID + // integer into a byte slice. + var chanID [8]byte + byteOrder.PutUint64(chanID[:], edge.ChannelID) + + // With the channel ID, we then fetch the value storing the two + // nodes which connect this channel edge. + nodeInfo := edgeIndex.Get(chanID[:]) + if nodeInfo == nil { + return noVertex, noVertex, false, ErrEdgeNotFound + } + + // Depending on the flags value passed above, either the first + // or second edge policy is being updated. + var fromNode, toNode []byte + var isUpdate1 bool + if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { + fromNode = nodeInfo[:33] + toNode = nodeInfo[33:66] + isUpdate1 = true + } else { + fromNode = nodeInfo[33:66] + toNode = nodeInfo[:33] + isUpdate1 = false + } + + // Finally, with the direction of the edge being updated + // identified, we update the on-disk edge representation. + err := putChanEdgePolicy(edges, edge, fromNode, toNode) + if err != nil { + return noVertex, noVertex, false, err + } + + var ( + fromNodePubKey route.Vertex + toNodePubKey route.Vertex + ) + copy(fromNodePubKey[:], fromNode) + copy(toNodePubKey[:], toNode) + + return fromNodePubKey, toNodePubKey, isUpdate1, nil +} + +// isPublic determines whether the node is seen as public within the graph from +// the source node's point of view. An existing database transaction can also be +// specified. +func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex, + sourcePubKey []byte) (bool, error) { + + // In order to determine whether this node is publicly advertised within + // the graph, we'll need to look at all of its edges and check whether + // they extend to any other node than the source node. errDone will be + // used to terminate the check early. + nodeIsPublic := false + errDone := errors.New("done") + err := c.ForEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx, + info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy, + _ *models.ChannelEdgePolicy) error { + + // If this edge doesn't extend to the source node, we'll + // terminate our search as we can now conclude that the node is + // publicly advertised within the graph due to the local node + // knowing of the current edge. + if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) && + !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) { + + nodeIsPublic = true + return errDone + } + + // Since the edge _does_ extend to the source node, we'll also + // need to ensure that this is a public edge. + if info.AuthProof != nil { + nodeIsPublic = true + return errDone + } + + // Otherwise, we'll continue our search. + return nil + }) + if err != nil && !errors.Is(err, errDone) { + return false, err + } + + return nodeIsPublic, nil +} + +// FetchLightningNodeTx attempts to look up a target node by its identity +// public key. If the node isn't found in the database, then +// ErrGraphNodeNotFound is returned. An optional transaction may be provided. +// If none is provided, then a new one will be created. +func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) ( + *models.LightningNode, error) { + + return c.fetchLightningNode(tx, nodePub) +} + +// FetchLightningNode attempts to look up a target node by its identity public +// key. If the node isn't found in the database, then ErrGraphNodeNotFound is +// returned. +func (c *KVStore) FetchLightningNode(nodePub route.Vertex) ( + *models.LightningNode, error) { + + return c.fetchLightningNode(nil, nodePub) +} + +// fetchLightningNode attempts to look up a target node by its identity public +// key. If the node isn't found in the database, then ErrGraphNodeNotFound is +// returned. An optional transaction may be provided. If none is provided, then +// a new one will be created. +func (c *KVStore) fetchLightningNode(tx kvdb.RTx, + nodePub route.Vertex) (*models.LightningNode, error) { + + var node *models.LightningNode + fetch := func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + // If a key for this serialized public key isn't found, then + // the target node doesn't exist within the database. + nodeBytes := nodes.Get(nodePub[:]) + if nodeBytes == nil { + return ErrGraphNodeNotFound + } + + // If the node is found, then we can de deserialize the node + // information to return to the user. + nodeReader := bytes.NewReader(nodeBytes) + n, err := deserializeLightningNode(nodeReader) + if err != nil { + return err + } + + node = &n + + return nil + } + + if tx == nil { + err := kvdb.View( + c.db, fetch, func() { + node = nil + }, + ) + if err != nil { + return nil, err + } + + return node, nil + } + + err := fetch(tx) + if err != nil { + return nil, err + } + + return node, nil +} + +// HasLightningNode determines if the graph has a vertex identified by the +// target node identity public key. If the node exists in the database, a +// timestamp of when the data for the node was lasted updated is returned along +// with a true boolean. Otherwise, an empty time.Time is returned with a false +// boolean. +func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool, + error) { + + var ( + updateTime time.Time + exists bool + ) + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + // If a key for this serialized public key isn't found, we can + // exit early. + nodeBytes := nodes.Get(nodePub[:]) + if nodeBytes == nil { + exists = false + return nil + } + + // Otherwise we continue on to obtain the time stamp + // representing the last time the data for this node was + // updated. + nodeReader := bytes.NewReader(nodeBytes) + node, err := deserializeLightningNode(nodeReader) + if err != nil { + return err + } + + exists = true + updateTime = node.LastUpdate + + return nil + }, func() { + updateTime = time.Time{} + exists = false + }) + if err != nil { + return time.Time{}, exists, err + } + + return updateTime, exists, nil +} + +// nodeTraversal is used to traverse all channels of a node given by its +// public key and passes channel information into the specified callback. +func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend, + cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, + *models.ChannelEdgePolicy) error) error { + + traversal := func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNotFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // In order to reach all the edges for this node, we take + // advantage of the construction of the key-space within the + // edge bucket. The keys are stored in the form: pubKey || + // chanID. Therefore, starting from a chanID of zero, we can + // scan forward in the bucket, grabbing all the edges for the + // node. Once the prefix no longer matches, then we know we're + // done. + var nodeStart [33 + 8]byte + copy(nodeStart[:], nodePub) + copy(nodeStart[33:], chanStart[:]) + + // Starting from the key pubKey || 0, we seek forward in the + // bucket until the retrieved key no longer has the public key + // as its prefix. This indicates that we've stepped over into + // another node's edges, so we can terminate our scan. + edgeCursor := edges.ReadCursor() + for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll + // If the prefix still matches, the channel id is + // returned in nodeEdge. Channel id is used to lookup + // the node at the other end of the channel and both + // edge policies. + chanID := nodeEdge[33:] + edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) + if err != nil { + return err + } + + outgoingPolicy, err := fetchChanEdgePolicy( + edges, chanID, nodePub, + ) + if err != nil { + return err + } + + otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub) + if err != nil { + return err + } + + incomingPolicy, err := fetchChanEdgePolicy( + edges, chanID, otherNode[:], + ) + if err != nil { + return err + } + + // Finally, we execute the callback. + err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy) + if err != nil { + return err + } + } + + return nil + } + + // If no transaction was provided, then we'll create a new transaction + // to execute the transaction within. + if tx == nil { + return kvdb.View(db, traversal, func() {}) + } + + // Otherwise, we re-use the existing transaction to execute the graph + // traversal. + return traversal(tx) +} + +// ForEachNodeChannel iterates through all channels of the given node, +// executing the passed callback with an edge info structure and the policies +// of each end of the channel. The first edge policy is the outgoing edge *to* +// the connecting node, while the second is the incoming edge *from* the +// connecting node. If the callback returns an error, then the iteration is +// halted with the error propagated back up to the caller. +// +// Unknown policies are passed into the callback as nil values. +func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex, + cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, + *models.ChannelEdgePolicy) error) error { + + return nodeTraversal(nil, nodePub[:], c.db, cb) +} + +// ForEachNodeChannelTx iterates through all channels of the given node, +// executing the passed callback with an edge info structure and the policies +// of each end of the channel. The first edge policy is the outgoing edge *to* +// the connecting node, while the second is the incoming edge *from* the +// connecting node. If the callback returns an error, then the iteration is +// halted with the error propagated back up to the caller. +// +// Unknown policies are passed into the callback as nil values. +// +// If the caller wishes to re-use an existing boltdb transaction, then it +// should be passed as the first argument. Otherwise, the first argument should +// be nil and a fresh transaction will be created to execute the graph +// traversal. +func (c *KVStore) ForEachNodeChannelTx(tx kvdb.RTx, + nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo, + *models.ChannelEdgePolicy, + *models.ChannelEdgePolicy) error) error { + + return nodeTraversal(tx, nodePub[:], c.db, cb) +} + +// FetchOtherNode attempts to fetch the full LightningNode that's opposite of +// the target node in the channel. This is useful when one knows the pubkey of +// one of the nodes, and wishes to obtain the full LightningNode for the other +// end of the channel. +func (c *KVStore) FetchOtherNode(tx kvdb.RTx, + channel *models.ChannelEdgeInfo, thisNodeKey []byte) ( + *models.LightningNode, error) { + + // Ensure that the node passed in is actually a member of the channel. + var targetNodeBytes [33]byte + switch { + case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey): + targetNodeBytes = channel.NodeKey2Bytes + case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey): + targetNodeBytes = channel.NodeKey1Bytes + default: + return nil, fmt.Errorf("node not participating in this channel") + } + + var targetNode *models.LightningNode + fetchNodeFunc := func(tx kvdb.RTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + node, err := fetchLightningNode(nodes, targetNodeBytes[:]) + if err != nil { + return err + } + + targetNode = &node + + return nil + } + + // If the transaction is nil, then we'll need to create a new one, + // otherwise we can use the existing db transaction. + var err error + if tx == nil { + err = kvdb.View(c.db, fetchNodeFunc, func() { + targetNode = nil + }) + } else { + err = fetchNodeFunc(tx) + } + + return targetNode, err +} + +// computeEdgePolicyKeys is a helper function that can be used to compute the +// keys used to index the channel edge policy info for the two nodes of the +// edge. The keys for node 1 and node 2 are returned respectively. +func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) { + var ( + node1Key [33 + 8]byte + node2Key [33 + 8]byte + ) + + copy(node1Key[:], info.NodeKey1Bytes[:]) + copy(node2Key[:], info.NodeKey2Bytes[:]) + + byteOrder.PutUint64(node1Key[33:], info.ChannelID) + byteOrder.PutUint64(node2Key[33:], info.ChannelID) + + return node1Key[:], node2Key[:] +} + +// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for +// the channel identified by the funding outpoint. If the channel can't be +// found, then ErrEdgeNotFound is returned. A struct which houses the general +// information for the channel itself is returned as well as two structs that +// contain the routing policies for the channel in either direction. +func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) ( + *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, + *models.ChannelEdgePolicy, error) { + + var ( + edgeInfo *models.ChannelEdgeInfo + policy1 *models.ChannelEdgePolicy + policy2 *models.ChannelEdgePolicy + ) + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + // First, grab the node bucket. This will be used to populate + // the Node pointers in each edge read from disk. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + // Next, grab the edge bucket which stores the edges, and also + // the index itself so we can group the directed edges together + // logically. + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // If the channel's outpoint doesn't exist within the outpoint + // index, then the edge does not exist. + chanIndex := edges.NestedReadBucket(channelPointBucket) + if chanIndex == nil { + return ErrGraphNoEdgesFound + } + var b bytes.Buffer + if err := WriteOutpoint(&b, op); err != nil { + return err + } + chanID := chanIndex.Get(b.Bytes()) + if chanID == nil { + return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op) + } + + // If the channel is found to exists, then we'll first retrieve + // the general information for the channel. + edge, err := fetchChanEdgeInfo(edgeIndex, chanID) + if err != nil { + return fmt.Errorf("%w: chanID=%x", err, chanID) + } + edgeInfo = &edge + + // Once we have the information about the channels' parameters, + // we'll fetch the routing policies for each for the directed + // edges. + e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID) + if err != nil { + return fmt.Errorf("failed to find policy: %w", err) + } + + policy1 = e1 + policy2 = e2 + + return nil + }, func() { + edgeInfo = nil + policy1 = nil + policy2 = nil + }) + if err != nil { + return nil, nil, nil, err + } + + return edgeInfo, policy1, policy2, nil +} + +// FetchChannelEdgesByID attempts to lookup the two directed edges for the +// channel identified by the channel ID. If the channel can't be found, then +// ErrEdgeNotFound is returned. A struct which houses the general information +// for the channel itself is returned as well as two structs that contain the +// routing policies for the channel in either direction. +// +// ErrZombieEdge an be returned if the edge is currently marked as a zombie +// within the database. In this case, the ChannelEdgePolicy's will be nil, and +// the ChannelEdgeInfo will only include the public keys of each node. +func (c *KVStore) FetchChannelEdgesByID(chanID uint64) ( + *models.ChannelEdgeInfo, *models.ChannelEdgePolicy, + *models.ChannelEdgePolicy, error) { + + var ( + edgeInfo *models.ChannelEdgeInfo + policy1 *models.ChannelEdgePolicy + policy2 *models.ChannelEdgePolicy + channelID [8]byte + ) + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + // First, grab the node bucket. This will be used to populate + // the Node pointers in each edge read from disk. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + // Next, grab the edge bucket which stores the edges, and also + // the index itself so we can group the directed edges together + // logically. + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + byteOrder.PutUint64(channelID[:], chanID) + + // Now, attempt to fetch edge. + edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:]) + + // If it doesn't exist, we'll quickly check our zombie index to + // see if we've previously marked it as so. + if errors.Is(err, ErrEdgeNotFound) { + // If the zombie index doesn't exist, or the edge is not + // marked as a zombie within it, then we'll return the + // original ErrEdgeNotFound error. + zombieIndex := edges.NestedReadBucket(zombieBucket) + if zombieIndex == nil { + return ErrEdgeNotFound + } + + isZombie, pubKey1, pubKey2 := isZombieEdge( + zombieIndex, chanID, + ) + if !isZombie { + return ErrEdgeNotFound + } + + // Otherwise, the edge is marked as a zombie, so we'll + // populate the edge info with the public keys of each + // party as this is the only information we have about + // it and return an error signaling so. + edgeInfo = &models.ChannelEdgeInfo{ + NodeKey1Bytes: pubKey1, + NodeKey2Bytes: pubKey2, + } + + return ErrZombieEdge + } + + // Otherwise, we'll just return the error if any. + if err != nil { + return err + } + + edgeInfo = &edge + + // Then we'll attempt to fetch the accompanying policies of this + // edge. + e1, e2, err := fetchChanEdgePolicies( + edgeIndex, edges, channelID[:], + ) + if err != nil { + return err + } + + policy1 = e1 + policy2 = e2 + + return nil + }, func() { + edgeInfo = nil + policy1 = nil + policy2 = nil + }) + if errors.Is(err, ErrZombieEdge) { + return edgeInfo, nil, nil, err + } + if err != nil { + return nil, nil, nil, err + } + + return edgeInfo, policy1, policy2, nil +} + +// IsPublicNode is a helper method that determines whether the node with the +// given public key is seen as a public node in the graph from the graph's +// source node's point of view. +func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) { + var nodeIsPublic bool + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNodesNotFound + } + ourPubKey := nodes.Get(sourceKey) + if ourPubKey == nil { + return ErrSourceNodeNotSet + } + node, err := fetchLightningNode(nodes, pubKey[:]) + if err != nil { + return err + } + + nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey) + + return err + }, func() { + nodeIsPublic = false + }) + if err != nil { + return false, err + } + + return nodeIsPublic, nil +} + +// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys. +func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) { + witnessScript, err := input.GenMultiSigScript(aPub, bPub) + if err != nil { + return nil, err + } + + // With the witness script generated, we'll now turn it into a p2wsh + // script: + // * OP_0 + bldr := txscript.NewScriptBuilder( + txscript.WithScriptAllocSize(input.P2WSHSize), + ) + bldr.AddOp(txscript.OP_0) + scriptHash := sha256.Sum256(witnessScript) + bldr.AddData(scriptHash[:]) + + return bldr.Script() +} + +// EdgePoint couples the outpoint of a channel with the funding script that it +// creates. The FilteredChainView will use this to watch for spends of this +// edge point on chain. We require both of these values as depending on the +// concrete implementation, either the pkScript, or the out point will be used. +type EdgePoint struct { + // FundingPkScript is the p2wsh multi-sig script of the target channel. + FundingPkScript []byte + + // OutPoint is the outpoint of the target channel. + OutPoint wire.OutPoint +} + +// String returns a human readable version of the target EdgePoint. We return +// the outpoint directly as it is enough to uniquely identify the edge point. +func (e *EdgePoint) String() string { + return e.OutPoint.String() +} + +// ChannelView returns the verifiable edge information for each active channel +// within the known channel graph. The set of UTXO's (along with their scripts) +// returned are the ones that need to be watched on chain to detect channel +// closes on the resident blockchain. +func (c *KVStore) ChannelView() ([]EdgePoint, error) { + var edgePoints []EdgePoint + if err := kvdb.View(c.db, func(tx kvdb.RTx) error { + // We're going to iterate over the entire channel index, so + // we'll need to fetch the edgeBucket to get to the index as + // it's a sub-bucket. + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + chanIndex := edges.NestedReadBucket(channelPointBucket) + if chanIndex == nil { + return ErrGraphNoEdgesFound + } + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) + if edgeIndex == nil { + return ErrGraphNoEdgesFound + } + + // Once we have the proper bucket, we'll range over each key + // (which is the channel point for the channel) and decode it, + // accumulating each entry. + return chanIndex.ForEach( + func(chanPointBytes, chanID []byte) error { + chanPointReader := bytes.NewReader( + chanPointBytes, + ) + + var chanPoint wire.OutPoint + err := ReadOutpoint(chanPointReader, &chanPoint) + if err != nil { + return err + } + + edgeInfo, err := fetchChanEdgeInfo( + edgeIndex, chanID, + ) + if err != nil { + return err + } + + pkScript, err := genMultiSigP2WSH( + edgeInfo.BitcoinKey1Bytes[:], + edgeInfo.BitcoinKey2Bytes[:], + ) + if err != nil { + return err + } + + edgePoints = append(edgePoints, EdgePoint{ + FundingPkScript: pkScript, + OutPoint: chanPoint, + }) + + return nil + }, + ) + }, func() { + edgePoints = nil + }); err != nil { + return nil, err + } + + return edgePoints, nil +} + +// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a +// zombie. This method is used on an ad-hoc basis, when channels need to be +// marked as zombies outside the normal pruning cycle. +func (c *KVStore) MarkEdgeZombie(chanID uint64, + pubKey1, pubKey2 [33]byte) error { + + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) + if err != nil { + return fmt.Errorf("unable to create zombie "+ + "bucket: %w", err) + } + + return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2) + }) + if err != nil { + return err + } + + c.rejectCache.remove(chanID) + c.chanCache.remove(chanID) + + return nil +} + +// markEdgeZombie marks an edge as a zombie within our zombie index. The public +// keys should represent the node public keys of the two parties involved in the +// edge. +func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1, + pubKey2 [33]byte) error { + + var k [8]byte + byteOrder.PutUint64(k[:], chanID) + + var v [66]byte + copy(v[:33], pubKey1[:]) + copy(v[33:], pubKey2[:]) + + return zombieIndex.Put(k[:], v[:]) +} + +// MarkEdgeLive clears an edge from our zombie index, deeming it as live. +func (c *KVStore) MarkEdgeLive(chanID uint64) error { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + + return c.markEdgeLiveUnsafe(nil, chanID) +} + +// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be +// called with an existing kvdb.RwTx or the argument can be set to nil in which +// case a new transaction will be created. +// +// NOTE: this method MUST only be called if the cacheMu has already been +// acquired. +func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error { + dbFn := func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + zombieIndex := edges.NestedReadWriteBucket(zombieBucket) + if zombieIndex == nil { + return nil + } + + var k [8]byte + byteOrder.PutUint64(k[:], chanID) + + if len(zombieIndex.Get(k[:])) == 0 { + return ErrZombieEdgeNotFound + } + + return zombieIndex.Delete(k[:]) + } + + // If the transaction is nil, we'll create a new one. Otherwise, we use + // the existing transaction + var err error + if tx == nil { + err = kvdb.Update(c.db, dbFn, func() {}) + } else { + err = dbFn(tx) + } + if err != nil { + return err + } + + c.rejectCache.remove(chanID) + c.chanCache.remove(chanID) + + return nil +} + +// IsZombieEdge returns whether the edge is considered zombie. If it is a +// zombie, then the two node public keys corresponding to this edge are also +// returned. +func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) { + var ( + isZombie bool + pubKey1, pubKey2 [33]byte + ) + + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return ErrGraphNoEdgesFound + } + zombieIndex := edges.NestedReadBucket(zombieBucket) + if zombieIndex == nil { + return nil + } + + isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID) + + return nil + }, func() { + isZombie = false + pubKey1 = [33]byte{} + pubKey2 = [33]byte{} + }) + if err != nil { + return false, [33]byte{}, [33]byte{} + } + + return isZombie, pubKey1, pubKey2 +} + +// isZombieEdge returns whether an entry exists for the given channel in the +// zombie index. If an entry exists, then the two node public keys corresponding +// to this edge are also returned. +func isZombieEdge(zombieIndex kvdb.RBucket, + chanID uint64) (bool, [33]byte, [33]byte) { + + var k [8]byte + byteOrder.PutUint64(k[:], chanID) + + v := zombieIndex.Get(k[:]) + if v == nil { + return false, [33]byte{}, [33]byte{} + } + + var pubKey1, pubKey2 [33]byte + copy(pubKey1[:], v[:33]) + copy(pubKey2[:], v[33:]) + + return true, pubKey1, pubKey2 +} + +// NumZombies returns the current number of zombie channels in the graph. +func (c *KVStore) NumZombies() (uint64, error) { + var numZombies uint64 + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + edges := tx.ReadBucket(edgeBucket) + if edges == nil { + return nil + } + zombieIndex := edges.NestedReadBucket(zombieBucket) + if zombieIndex == nil { + return nil + } + + return zombieIndex.ForEach(func(_, _ []byte) error { + numZombies++ + return nil + }) + }, func() { + numZombies = 0 + }) + if err != nil { + return 0, err + } + + return numZombies, nil +} + +// PutClosedScid stores a SCID for a closed channel in the database. This is so +// that we can ignore channel announcements that we know to be closed without +// having to validate them and fetch a block. +func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error { + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + closedScids, err := tx.CreateTopLevelBucket(closedScidBucket) + if err != nil { + return err + } + + var k [8]byte + byteOrder.PutUint64(k[:], scid.ToUint64()) + + return closedScids.Put(k[:], []byte{}) + }, func() {}) +} + +// IsClosedScid checks whether a channel identified by the passed in scid is +// closed. This helps avoid having to perform expensive validation checks. +// TODO: Add an LRU cache to cut down on disc reads. +func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) { + var isClosed bool + err := kvdb.View(c.db, func(tx kvdb.RTx) error { + closedScids := tx.ReadBucket(closedScidBucket) + if closedScids == nil { + return ErrClosedScidsNotFound + } + + var k [8]byte + byteOrder.PutUint64(k[:], scid.ToUint64()) + + if closedScids.Get(k[:]) != nil { + isClosed = true + return nil + } + + return nil + }, func() { + isClosed = false + }) + if err != nil { + return false, err + } + + return isClosed, nil +} + +// GraphSession will provide the call-back with access to a NodeTraverser +// instance which can be used to perform queries against the channel graph. +func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error { + return c.db.View(func(tx walletdb.ReadTx) error { + return cb(&nodeTraverserSession{ + db: c, + tx: tx, + }) + }, func() {}) +} + +// nodeTraverserSession implements the NodeTraverser interface but with a +// backing read only transaction for a consistent view of the graph. +type nodeTraverserSession struct { + tx kvdb.RTx + db *KVStore +} + +// ForEachNodeDirectedChannel calls the callback for every channel of the given +// node. +// +// NOTE: Part of the NodeTraverser interface. +func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex, + cb func(channel *DirectedChannel) error) error { + + return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb) +} + +// FetchNodeFeatures returns the features of the given node. If the node is +// unknown, assume no additional features are supported. +// +// NOTE: Part of the NodeTraverser interface. +func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) ( + *lnwire.FeatureVector, error) { + + return c.db.fetchNodeFeatures(c.tx, nodePub) +} + +func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket, + node *models.LightningNode) error { + + var ( + scratch [16]byte + b bytes.Buffer + ) + + pub, err := node.PubKey() + if err != nil { + return err + } + nodePub := pub.SerializeCompressed() + + // If the node has the update time set, write it, else write 0. + updateUnix := uint64(0) + if node.LastUpdate.Unix() > 0 { + updateUnix = uint64(node.LastUpdate.Unix()) + } + + byteOrder.PutUint64(scratch[:8], updateUnix) + if _, err := b.Write(scratch[:8]); err != nil { + return err + } + + if _, err := b.Write(nodePub); err != nil { + return err + } + + // If we got a node announcement for this node, we will have the rest + // of the data available. If not we don't have more data to write. + if !node.HaveNodeAnnouncement { + // Write HaveNodeAnnouncement=0. + byteOrder.PutUint16(scratch[:2], 0) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + return nodeBucket.Put(nodePub, b.Bytes()) + } + + // Write HaveNodeAnnouncement=1. + byteOrder.PutUint16(scratch[:2], 1) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + if err := binary.Write(&b, byteOrder, node.Color.R); err != nil { + return err + } + if err := binary.Write(&b, byteOrder, node.Color.G); err != nil { + return err + } + if err := binary.Write(&b, byteOrder, node.Color.B); err != nil { + return err + } + + if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { + return err + } + + if err := node.Features.Encode(&b); err != nil { + return err + } + + numAddresses := uint16(len(node.Addresses)) + byteOrder.PutUint16(scratch[:2], numAddresses) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + for _, address := range node.Addresses { + if err := SerializeAddr(&b, address); err != nil { + return err + } + } + + sigLen := len(node.AuthSigBytes) + if sigLen > 80 { + return fmt.Errorf("max sig len allowed is 80, had %v", + sigLen) + } + + err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes) + if err != nil { + return err + } + + if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { + return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData)) + } + err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData) + if err != nil { + return err + } + + if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { + return err + } + + // With the alias bucket updated, we'll now update the index that + // tracks the time series of node updates. + var indexKey [8 + 33]byte + byteOrder.PutUint64(indexKey[:8], updateUnix) + copy(indexKey[8:], nodePub) + + // If there was already an old index entry for this node, then we'll + // delete the old one before we write the new entry. + if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil { + // Extract out the old update time to we can reconstruct the + // prior index key to delete it from the index. + oldUpdateTime := nodeBytes[:8] + + var oldIndexKey [8 + 33]byte + copy(oldIndexKey[:8], oldUpdateTime) + copy(oldIndexKey[8:], nodePub) + + if err := updateIndex.Delete(oldIndexKey[:]); err != nil { + return err + } + } + + if err := updateIndex.Put(indexKey[:], nil); err != nil { + return err + } + + return nodeBucket.Put(nodePub, b.Bytes()) +} + +func fetchLightningNode(nodeBucket kvdb.RBucket, + nodePub []byte) (models.LightningNode, error) { + + nodeBytes := nodeBucket.Get(nodePub) + if nodeBytes == nil { + return models.LightningNode{}, ErrGraphNodeNotFound + } + + nodeReader := bytes.NewReader(nodeBytes) + + return deserializeLightningNode(nodeReader) +} + +func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex, + *lnwire.FeatureVector, error) { + + var ( + pubKey route.Vertex + features = lnwire.EmptyFeatureVector() + nodeScratch [8]byte + ) + + // Skip ahead: + // - LastUpdate (8 bytes) + if _, err := r.Read(nodeScratch[:]); err != nil { + return pubKey, nil, err + } + + if _, err := io.ReadFull(r, pubKey[:]); err != nil { + return pubKey, nil, err + } + + // Read the node announcement flag. + if _, err := r.Read(nodeScratch[:2]); err != nil { + return pubKey, nil, err + } + hasNodeAnn := byteOrder.Uint16(nodeScratch[:2]) + + // The rest of the data is optional, and will only be there if we got a + // node announcement for this node. + if hasNodeAnn == 0 { + return pubKey, features, nil + } + + // We did get a node announcement for this node, so we'll have the rest + // of the data available. + var rgb uint8 + if err := binary.Read(r, byteOrder, &rgb); err != nil { + return pubKey, nil, err + } + if err := binary.Read(r, byteOrder, &rgb); err != nil { + return pubKey, nil, err + } + if err := binary.Read(r, byteOrder, &rgb); err != nil { + return pubKey, nil, err + } + + if _, err := wire.ReadVarString(r, 0); err != nil { + return pubKey, nil, err + } + + if err := features.Decode(r); err != nil { + return pubKey, nil, err + } + + return pubKey, features, nil +} + +func deserializeLightningNode(r io.Reader) (models.LightningNode, error) { + var ( + node models.LightningNode + scratch [8]byte + err error + ) + + // Always populate a feature vector, even if we don't have a node + // announcement and short circuit below. + node.Features = lnwire.EmptyFeatureVector() + + if _, err := r.Read(scratch[:]); err != nil { + return models.LightningNode{}, err + } + + unix := int64(byteOrder.Uint64(scratch[:])) + node.LastUpdate = time.Unix(unix, 0) + + if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil { + return models.LightningNode{}, err + } + + if _, err := r.Read(scratch[:2]); err != nil { + return models.LightningNode{}, err + } + + hasNodeAnn := byteOrder.Uint16(scratch[:2]) + if hasNodeAnn == 1 { + node.HaveNodeAnnouncement = true + } else { + node.HaveNodeAnnouncement = false + } + + // The rest of the data is optional, and will only be there if we got a + // node announcement for this node. + if !node.HaveNodeAnnouncement { + return node, nil + } + + // We did get a node announcement for this node, so we'll have the rest + // of the data available. + if err := binary.Read(r, byteOrder, &node.Color.R); err != nil { + return models.LightningNode{}, err + } + if err := binary.Read(r, byteOrder, &node.Color.G); err != nil { + return models.LightningNode{}, err + } + if err := binary.Read(r, byteOrder, &node.Color.B); err != nil { + return models.LightningNode{}, err + } + + node.Alias, err = wire.ReadVarString(r, 0) + if err != nil { + return models.LightningNode{}, err + } + + err = node.Features.Decode(r) + if err != nil { + return models.LightningNode{}, err + } + + if _, err := r.Read(scratch[:2]); err != nil { + return models.LightningNode{}, err + } + numAddresses := int(byteOrder.Uint16(scratch[:2])) + + var addresses []net.Addr + for i := 0; i < numAddresses; i++ { + address, err := DeserializeAddr(r) + if err != nil { + return models.LightningNode{}, err + } + addresses = append(addresses, address) + } + node.Addresses = addresses + + node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") + if err != nil { + return models.LightningNode{}, err + } + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the node as is. + node.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case errors.Is(err, io.ErrUnexpectedEOF): + case errors.Is(err, io.EOF): + case err != nil: + return models.LightningNode{}, err + } + + return node, nil +} + +func putChanEdgeInfo(edgeIndex kvdb.RwBucket, + edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error { + + var b bytes.Buffer + + if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil { + return err + } + if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil { + return err + } + if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil { + return err + } + if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil { + return err + } + + if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil { + return err + } + + authProof := edgeInfo.AuthProof + var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte + if authProof != nil { + nodeSig1 = authProof.NodeSig1Bytes + nodeSig2 = authProof.NodeSig2Bytes + bitcoinSig1 = authProof.BitcoinSig1Bytes + bitcoinSig2 = authProof.BitcoinSig2Bytes + } + + if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil { + return err + } + if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil { + return err + } + if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil { + return err + } + if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil { + return err + } + + if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { + return err + } + err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity)) + if err != nil { + return err + } + if _, err := b.Write(chanID[:]); err != nil { + return err + } + if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil { + return err + } + + if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { + return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData)) + } + err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData) + if err != nil { + return err + } + + return edgeIndex.Put(chanID[:], b.Bytes()) +} + +func fetchChanEdgeInfo(edgeIndex kvdb.RBucket, + chanID []byte) (models.ChannelEdgeInfo, error) { + + edgeInfoBytes := edgeIndex.Get(chanID) + if edgeInfoBytes == nil { + return models.ChannelEdgeInfo{}, ErrEdgeNotFound + } + + edgeInfoReader := bytes.NewReader(edgeInfoBytes) + + return deserializeChanEdgeInfo(edgeInfoReader) +} + +func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) { + var ( + err error + edgeInfo models.ChannelEdgeInfo + ) + + if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { + return models.ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { + return models.ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { + return models.ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { + return models.ChannelEdgeInfo{}, err + } + + edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features") + if err != nil { + return models.ChannelEdgeInfo{}, err + } + + proof := &models.ChannelAuthProof{} + + proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return models.ChannelEdgeInfo{}, err + } + proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return models.ChannelEdgeInfo{}, err + } + proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return models.ChannelEdgeInfo{}, err + } + proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return models.ChannelEdgeInfo{}, err + } + + if !proof.IsEmpty() { + edgeInfo.AuthProof = proof + } + + edgeInfo.ChannelPoint = wire.OutPoint{} + if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil { + return models.ChannelEdgeInfo{}, err + } + if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil { + return models.ChannelEdgeInfo{}, err + } + if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil { + return models.ChannelEdgeInfo{}, err + } + + if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { + return models.ChannelEdgeInfo{}, err + } + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the edge as is. + edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case errors.Is(err, io.ErrUnexpectedEOF): + case errors.Is(err, io.EOF): + case err != nil: + return models.ChannelEdgeInfo{}, err + } + + return edgeInfo, nil +} + +func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy, + from, to []byte) error { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], from) + byteOrder.PutUint64(edgeKey[33:], edge.ChannelID) + + var b bytes.Buffer + if err := serializeChanEdgePolicy(&b, edge, to); err != nil { + return err + } + + // Before we write out the new edge, we'll create a new entry in the + // update index in order to keep it fresh. + updateUnix := uint64(edge.LastUpdate.Unix()) + var indexKey [8 + 8]byte + byteOrder.PutUint64(indexKey[:8], updateUnix) + byteOrder.PutUint64(indexKey[8:], edge.ChannelID) + + updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) + if err != nil { + return err + } + + // If there was already an entry for this edge, then we'll need to + // delete the old one to ensure we don't leave around any after-images. + // An unknown policy value does not have a update time recorded, so + // it also does not need to be removed. + if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil && + !bytes.Equal(edgeBytes, unknownPolicy) { + + // In order to delete the old entry, we'll need to obtain the + // *prior* update time in order to delete it. To do this, we'll + // need to deserialize the existing policy within the database + // (now outdated by the new one), and delete its corresponding + // entry within the update index. We'll ignore any + // ErrEdgePolicyOptionalFieldNotFound error, as we only need + // the channel ID and update time to delete the entry. + // TODO(halseth): get rid of these invalid policies in a + // migration. + oldEdgePolicy, err := deserializeChanEdgePolicy( + bytes.NewReader(edgeBytes), + ) + if err != nil && + !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) { + + return err + } + + oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix()) + + var oldIndexKey [8 + 8]byte + byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime) + byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID) + + if err := updateIndex.Delete(oldIndexKey[:]); err != nil { + return err + } + } + + if err := updateIndex.Put(indexKey[:], nil); err != nil { + return err + } + + err = updateEdgePolicyDisabledIndex( + edges, edge.ChannelID, + edge.ChannelFlags&lnwire.ChanUpdateDirection > 0, + edge.IsDisabled(), + ) + if err != nil { + return err + } + + return edges.Put(edgeKey[:], b.Bytes()) +} + +// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex +// bucket by either add a new disabled ChannelEdgePolicy or remove an existing +// one. +// The direction represents the direction of the edge and disabled is used for +// deciding whether to remove or add an entry to the bucket. +// In general a channel is disabled if two entries for the same chanID exist +// in this bucket. +// Maintaining the bucket this way allows a fast retrieval of disabled +// channels, for example when prune is needed. +func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, + direction bool, disabled bool) error { + + var disabledEdgeKey [8 + 1]byte + byteOrder.PutUint64(disabledEdgeKey[0:], chanID) + if direction { + disabledEdgeKey[8] = 1 + } + + disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists( + disabledEdgePolicyBucket, + ) + if err != nil { + return err + } + + if disabled { + return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{}) + } + + return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:]) +} + +// putChanEdgePolicyUnknown marks the edge policy as unknown +// in the edges bucket. +func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, + from []byte) error { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], from) + byteOrder.PutUint64(edgeKey[33:], channelID) + + if edges.Get(edgeKey[:]) != nil { + return fmt.Errorf("cannot write unknown policy for channel %v "+ + " when there is already a policy present", channelID) + } + + return edges.Put(edgeKey[:], unknownPolicy) +} + +func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte, + nodePub []byte) (*models.ChannelEdgePolicy, error) { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], nodePub) + copy(edgeKey[33:], chanID) + + edgeBytes := edges.Get(edgeKey[:]) + if edgeBytes == nil { + return nil, ErrEdgeNotFound + } + + // No need to deserialize unknown policy. + if bytes.Equal(edgeBytes, unknownPolicy) { + return nil, nil + } + + edgeReader := bytes.NewReader(edgeBytes) + + ep, err := deserializeChanEdgePolicy(edgeReader) + switch { + // If the db policy was missing an expected optional field, we return + // nil as if the policy was unknown. + case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound): + return nil, nil + + case err != nil: + return nil, err + } + + return ep, nil +} + +func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket, + chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy, + error) { + + edgeInfo := edgeIndex.Get(chanID) + if edgeInfo == nil { + return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound, + chanID) + } + + // The first node is contained within the first half of the edge + // information. We only propagate the error here and below if it's + // something other than edge non-existence. + node1Pub := edgeInfo[:33] + edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub) + if err != nil { + return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound, + node1Pub) + } + + // Similarly, the second node is contained within the latter + // half of the edge information. + node2Pub := edgeInfo[33:66] + edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub) + if err != nil { + return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound, + node2Pub) + } + + return edge1, edge2, nil +} + +func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy, + to []byte) error { + + err := wire.WriteVarBytes(w, 0, edge.SigBytes) + if err != nil { + return err + } + + if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil { + return err + } + + var scratch [8]byte + updateUnix := uint64(edge.LastUpdate.Unix()) + byteOrder.PutUint64(scratch[:], updateUnix) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil { + return err + } + if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil { + return err + } + if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil { + return err + } + if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil { + return err + } + err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat)) + if err != nil { + return err + } + err = binary.Write( + w, byteOrder, uint64(edge.FeeProportionalMillionths), + ) + if err != nil { + return err + } + + if _, err := w.Write(to); err != nil { + return err + } + + // If the max_htlc field is present, we write it. To be compatible with + // older versions that wasn't aware of this field, we write it as part + // of the opaque data. + // TODO(halseth): clean up when moving to TLV. + var opaqueBuf bytes.Buffer + if edge.MessageFlags.HasMaxHtlc() { + err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC)) + if err != nil { + return err + } + } + + if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { + return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData)) + } + if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil { + return err + } + + return nil +} + +func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) { + // Deserialize the policy. Note that in case an optional field is not + // found, both an error and a populated policy object are returned. + edge, deserializeErr := deserializeChanEdgePolicyRaw(r) + if deserializeErr != nil && + !errors.Is(deserializeErr, ErrEdgePolicyOptionalFieldNotFound) { + + return nil, deserializeErr + } + + return edge, deserializeErr +} + +func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy, + error) { + + edge := &models.ChannelEdgePolicy{} + + var err error + edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") + if err != nil { + return nil, err + } + + if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil { + return nil, err + } + + var scratch [8]byte + if _, err := r.Read(scratch[:]); err != nil { + return nil, err + } + unix := int64(byteOrder.Uint64(scratch[:])) + edge.LastUpdate = time.Unix(unix, 0) + + if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil { + return nil, err + } + if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil { + return nil, err + } + if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil { + return nil, err + } + + var n uint64 + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.MinHTLC = lnwire.MilliSatoshi(n) + + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.FeeBaseMSat = lnwire.MilliSatoshi(n) + + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n) + + if _, err := r.Read(edge.ToNode[:]); err != nil { + return nil, err + } + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the edge as is. + edge.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case errors.Is(err, io.ErrUnexpectedEOF): + case errors.Is(err, io.EOF): + case err != nil: + return nil, err + } + + // See if optional fields are present. + if edge.MessageFlags.HasMaxHtlc() { + // The max_htlc field should be at the beginning of the opaque + // bytes. + opq := edge.ExtraOpaqueData + + // If the max_htlc field is not present, it might be old data + // stored before this field was validated. We'll return the + // edge along with an error. + if len(opq) < 8 { + return edge, ErrEdgePolicyOptionalFieldNotFound + } + + maxHtlc := byteOrder.Uint64(opq[:8]) + edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc) + + // Exclude the parsed field from the rest of the opaque data. + edge.ExtraOpaqueData = opq[8:] + } + + return edge, nil +} + +// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the +// KVStore and a kvdb.RTx. +type chanGraphNodeTx struct { + tx kvdb.RTx + db *KVStore + node *models.LightningNode +} + +// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx +// interface. +var _ NodeRTx = (*chanGraphNodeTx)(nil) + +func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore, + node *models.LightningNode) *chanGraphNodeTx { + + return &chanGraphNodeTx{ + tx: tx, + db: db, + node: node, + } +} + +// Node returns the raw information of the node. +// +// NOTE: This is a part of the NodeRTx interface. +func (c *chanGraphNodeTx) Node() *models.LightningNode { + return c.node +} + +// FetchNode fetches the node with the given pub key under the same transaction +// used to fetch the current node. The returned node is also a NodeRTx and any +// operations on that NodeRTx will also be done under the same transaction. +// +// NOTE: This is a part of the NodeRTx interface. +func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) { + node, err := c.db.FetchLightningNodeTx(c.tx, nodePub) + if err != nil { + return nil, err + } + + return newChanGraphNodeTx(c.tx, c.db, node), nil +} + +// ForEachChannel can be used to iterate over the node's channels under +// the same transaction used to fetch the node. +// +// NOTE: This is a part of the NodeRTx interface. +func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo, + *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error { + + return c.db.ForEachNodeChannelTx(c.tx, c.node.PubKeyBytes, + func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1, + policy2 *models.ChannelEdgePolicy) error { + + return f(info, policy1, policy2) + }, + ) +} + +// MakeTestGraph creates a new instance of the KVStore for testing +// purposes. +func MakeTestGraph(t testing.TB, modifiers ...KVStoreOptionModifier) ( + *ChannelGraph, error) { + + opts := DefaultOptions() + for _, modifier := range modifiers { + modifier(opts) + } + + // Next, create KVStore for the first time. + backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr") + if err != nil { + backendCleanup() + + return nil, err + } + + graph, err := NewChannelGraph(&Config{ + KVDB: backend, + KVStoreOpts: modifiers, + }) + if err != nil { + backendCleanup() + + return nil, err + } + require.NoError(t, graph.Start()) + + t.Cleanup(func() { + _ = backend.Close() + backendCleanup() + require.NoError(t, graph.Stop()) + }) + + return graph, nil +} diff --git a/graph/notifications.go b/graph/db/notifications.go similarity index 83% rename from graph/notifications.go rename to graph/db/notifications.go index 76eabdb02f..2ed2be16f3 100644 --- a/graph/notifications.go +++ b/graph/db/notifications.go @@ -1,19 +1,54 @@ -package graph +package graphdb import ( "fmt" "image/color" "net" "sync" + "sync/atomic" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/wire" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/graph/db/models" + "github.com/lightningnetwork/lnd/lnutils" "github.com/lightningnetwork/lnd/lnwire" ) +// topologyManager holds all the fields required to manage the network topology +// subscriptions and notifications. +type topologyManager struct { + // ntfnClientCounter is an atomic counter that's used to assign unique + // notification client IDs to new clients. + ntfnClientCounter atomic.Uint64 + + // topologyUpdate is a channel that carries new topology updates + // messages from outside the ChannelGraph to be processed by the + // networkHandler. + topologyUpdate chan any + + // topologyClients maps a client's unique notification ID to a + // topologyClient client that contains its notification dispatch + // channel. + topologyClients *lnutils.SyncMap[uint64, *topologyClient] + + // ntfnClientUpdates is a channel that's used to send new updates to + // topology notification clients to the ChannelGraph. Updates either + // add a new notification client, or cancel notifications for an + // existing client. + ntfnClientUpdates chan *topologyClientUpdate +} + +// newTopologyManager creates a new instance of the topologyManager. +func newTopologyManager() *topologyManager { + return &topologyManager{ + topologyUpdate: make(chan any), + topologyClients: &lnutils.SyncMap[uint64, *topologyClient]{}, + ntfnClientUpdates: make(chan *topologyClientUpdate), + } +} + // TopologyClient represents an intent to receive notifications from the // channel router regarding changes to the topology of the channel graph. The // TopologyChanges channel will be sent upon with new updates to the channel @@ -54,16 +89,16 @@ type topologyClientUpdate struct { // topology occurs. Changes that will be sent at notifications include: new // nodes appearing, node updating their attributes, new channels, channels // closing, and updates in the routing policies of a channel's directed edges. -func (b *Builder) SubscribeTopology() (*TopologyClient, error) { +func (c *ChannelGraph) SubscribeTopology() (*TopologyClient, error) { // If the router is not yet started, return an error to avoid a // deadlock waiting for it to handle the subscription request. - if !b.started.Load() { + if !c.started.Load() { return nil, fmt.Errorf("router not started") } // We'll first atomically obtain the next ID for this client from the // incrementing client ID counter. - clientID := b.ntfnClientCounter.Add(1) + clientID := c.ntfnClientCounter.Add(1) log.Debugf("New graph topology client subscription, client %v", clientID) @@ -71,12 +106,12 @@ func (b *Builder) SubscribeTopology() (*TopologyClient, error) { ntfnChan := make(chan *TopologyChange, 10) select { - case b.ntfnClientUpdates <- &topologyClientUpdate{ + case c.ntfnClientUpdates <- &topologyClientUpdate{ cancel: false, clientID: clientID, ntfnChan: ntfnChan, }: - case <-b.quit: + case <-c.quit: return nil, errors.New("ChannelRouter shutting down") } @@ -84,11 +119,11 @@ func (b *Builder) SubscribeTopology() (*TopologyClient, error) { TopologyChanges: ntfnChan, Cancel: func() { select { - case b.ntfnClientUpdates <- &topologyClientUpdate{ + case c.ntfnClientUpdates <- &topologyClientUpdate{ cancel: true, clientID: clientID, }: - case <-b.quit: + case <-c.quit: return } }, @@ -114,7 +149,7 @@ type topologyClient struct { // notifyTopologyChange notifies all registered clients of a new change in // graph topology in a non-blocking. -func (b *Builder) notifyTopologyChange(topologyDiff *TopologyChange) { +func (c *ChannelGraph) notifyTopologyChange(topologyDiff *TopologyChange) { // notifyClient is a helper closure that will send topology updates to // the given client. notifyClient := func(clientID uint64, client *topologyClient) bool { @@ -127,23 +162,22 @@ func (b *Builder) notifyTopologyChange(topologyDiff *TopologyChange) { len(topologyDiff.ChannelEdgeUpdates), len(topologyDiff.ClosedChannels)) - go func(c *topologyClient) { - defer c.wg.Done() + go func(t *topologyClient) { + defer t.wg.Done() select { // In this case we'll try to send the notification // directly to the upstream client consumer. - case c.ntfnChan <- topologyDiff: + case t.ntfnChan <- topologyDiff: // If the client cancels the notifications, then we'll // exit early. - case <-c.exit: + case <-t.exit: // Similarly, if the ChannelRouter itself exists early, // then we'll also exit ourselves. - case <-b.quit: - + case <-c.quit: } }(client) @@ -154,7 +188,29 @@ func (b *Builder) notifyTopologyChange(topologyDiff *TopologyChange) { // Range over the set of active clients, and attempt to send the // topology updates. - b.topologyClients.Range(notifyClient) + c.topologyClients.Range(notifyClient) +} + +// handleTopologyUpdate is responsible for sending any topology changes +// notifications to registered clients. +// +// NOTE: must be run inside goroutine. +func (c *ChannelGraph) handleTopologyUpdate(update any) { + defer c.wg.Done() + + topChange := &TopologyChange{} + err := c.addToTopologyChange(topChange, update) + if err != nil { + log.Errorf("unable to update topology change notification: %v", + err) + return + } + + if topChange.isEmpty() { + return + } + + c.notifyTopologyChange(topChange) } // TopologyChange represents a new set of modifications to the channel graph. @@ -310,8 +366,8 @@ type ChannelEdgeUpdate struct { // constitutes. This function will also fetch any required auxiliary // information required to create the topology change update from the graph // database. -func addToTopologyChange(graph DB, update *TopologyChange, - msg interface{}) error { +func (c *ChannelGraph) addToTopologyChange(update *TopologyChange, + msg any) error { switch m := msg.(type) { @@ -345,7 +401,7 @@ func addToTopologyChange(graph DB, update *TopologyChange, // We'll need to fetch the edge's information from the database // in order to get the information concerning which nodes are // being connected. - edgeInfo, _, _, err := graph.FetchChannelEdgesByID(m.ChannelID) + edgeInfo, _, _, err := c.FetchChannelEdgesByID(m.ChannelID) if err != nil { return errors.Errorf("unable fetch channel edge: %v", err) diff --git a/graph/db/options.go b/graph/db/options.go index a512ec4bce..7bff8637ab 100644 --- a/graph/db/options.go +++ b/graph/db/options.go @@ -20,8 +20,49 @@ const ( DefaultPreAllocCacheNumNodes = 15000 ) -// Options holds parameters for tuning and customizing a graph.DB. -type Options struct { +// chanGraphOptions holds parameters for tuning and customizing the +// ChannelGraph. +type chanGraphOptions struct { + // useGraphCache denotes whether the in-memory graph cache should be + // used or a fallback version that uses the underlying database for + // path finding. + useGraphCache bool + + // preAllocCacheNumNodes is the number of nodes we expect to be in the + // graph cache, so we can pre-allocate the map accordingly. + preAllocCacheNumNodes int +} + +// defaultChanGraphOptions returns a new chanGraphOptions instance populated +// with default values. +func defaultChanGraphOptions() *chanGraphOptions { + return &chanGraphOptions{ + useGraphCache: true, + preAllocCacheNumNodes: DefaultPreAllocCacheNumNodes, + } +} + +// ChanGraphOption describes the signature of a functional option that can be +// used to customize a ChannelGraph instance. +type ChanGraphOption func(*chanGraphOptions) + +// WithUseGraphCache sets whether the in-memory graph cache should be used. +func WithUseGraphCache(use bool) ChanGraphOption { + return func(o *chanGraphOptions) { + o.useGraphCache = use + } +} + +// WithPreAllocCacheNumNodes sets the number of nodes we expect to be in the +// graph cache, so we can pre-allocate the map accordingly. +func WithPreAllocCacheNumNodes(n int) ChanGraphOption { + return func(o *chanGraphOptions) { + o.preAllocCacheNumNodes = n + } +} + +// KVStoreOptions holds parameters for tuning and customizing a graph.DB. +type KVStoreOptions struct { // RejectCacheSize is the maximum number of rejectCacheEntries to hold // in the rejection cache. RejectCacheSize int @@ -34,67 +75,43 @@ type Options struct { // wait before attempting to commit a pending set of updates. BatchCommitInterval time.Duration - // PreAllocCacheNumNodes is the number of nodes we expect to be in the - // graph cache, so we can pre-allocate the map accordingly. - PreAllocCacheNumNodes int - - // UseGraphCache denotes whether the in-memory graph cache should be - // used or a fallback version that uses the underlying database for - // path finding. - UseGraphCache bool - // NoMigration specifies that underlying backend was opened in read-only // mode and migrations shouldn't be performed. This can be useful for // applications that use the channeldb package as a library. NoMigration bool } -// DefaultOptions returns an Options populated with default values. -func DefaultOptions() *Options { - return &Options{ - RejectCacheSize: DefaultRejectCacheSize, - ChannelCacheSize: DefaultChannelCacheSize, - PreAllocCacheNumNodes: DefaultPreAllocCacheNumNodes, - UseGraphCache: true, - NoMigration: false, +// DefaultOptions returns a KVStoreOptions populated with default values. +func DefaultOptions() *KVStoreOptions { + return &KVStoreOptions{ + RejectCacheSize: DefaultRejectCacheSize, + ChannelCacheSize: DefaultChannelCacheSize, + NoMigration: false, } } -// OptionModifier is a function signature for modifying the default Options. -type OptionModifier func(*Options) +// KVStoreOptionModifier is a function signature for modifying the default +// KVStoreOptions. +type KVStoreOptionModifier func(*KVStoreOptions) // WithRejectCacheSize sets the RejectCacheSize to n. -func WithRejectCacheSize(n int) OptionModifier { - return func(o *Options) { +func WithRejectCacheSize(n int) KVStoreOptionModifier { + return func(o *KVStoreOptions) { o.RejectCacheSize = n } } // WithChannelCacheSize sets the ChannelCacheSize to n. -func WithChannelCacheSize(n int) OptionModifier { - return func(o *Options) { +func WithChannelCacheSize(n int) KVStoreOptionModifier { + return func(o *KVStoreOptions) { o.ChannelCacheSize = n } } -// WithPreAllocCacheNumNodes sets the PreAllocCacheNumNodes to n. -func WithPreAllocCacheNumNodes(n int) OptionModifier { - return func(o *Options) { - o.PreAllocCacheNumNodes = n - } -} - // WithBatchCommitInterval sets the batch commit interval for the interval batch // schedulers. -func WithBatchCommitInterval(interval time.Duration) OptionModifier { - return func(o *Options) { +func WithBatchCommitInterval(interval time.Duration) KVStoreOptionModifier { + return func(o *KVStoreOptions) { o.BatchCommitInterval = interval } } - -// WithUseGraphCache sets the UseGraphCache option to the given value. -func WithUseGraphCache(use bool) OptionModifier { - return func(o *Options) { - o.UseGraphCache = use - } -} diff --git a/graph/notifications_test.go b/graph/notifications_test.go index 4049c9f81b..0e2ec7afba 100644 --- a/graph/notifications_test.go +++ b/graph/notifications_test.go @@ -469,7 +469,7 @@ func TestEdgeUpdateNotification(t *testing.T) { // With the channel edge now in place, we'll subscribe for topology // notifications. - ntfnClient, err := ctx.builder.SubscribeTopology() + ntfnClient, err := ctx.graph.SubscribeTopology() require.NoError(t, err, "unable to subscribe for channel notifications") // Create random policy edges that are stemmed to the channel id @@ -489,7 +489,8 @@ func TestEdgeUpdateNotification(t *testing.T) { t.Fatalf("unable to add edge update: %v", err) } - assertEdgeCorrect := func(t *testing.T, edgeUpdate *ChannelEdgeUpdate, + assertEdgeCorrect := func(t *testing.T, + edgeUpdate *graphdb.ChannelEdgeUpdate, edgeAnn *models.ChannelEdgePolicy) { if edgeUpdate.ChanID != edgeAnn.ChannelID { @@ -659,7 +660,7 @@ func TestNodeUpdateNotification(t *testing.T) { } // Create a new client to receive notifications. - ntfnClient, err := ctx.builder.SubscribeTopology() + ntfnClient, err := ctx.graph.SubscribeTopology() require.NoError(t, err, "unable to subscribe for channel notifications") // Change network topology by adding the updated info for the two nodes @@ -672,7 +673,7 @@ func TestNodeUpdateNotification(t *testing.T) { } assertNodeNtfnCorrect := func(t *testing.T, ann *models.LightningNode, - nodeUpdate *NetworkNodeUpdate) { + nodeUpdate *graphdb.NetworkNodeUpdate) { nodeKey, _ := ann.PubKey() @@ -699,9 +700,10 @@ func TestNodeUpdateNotification(t *testing.T) { t.Fatalf("node alias doesn't match: expected %v, got %v", ann.Alias, nodeUpdate.Alias) } - if nodeUpdate.Color != EncodeHexColor(ann.Color) { - t.Fatalf("node color doesn't match: expected %v, got %v", - EncodeHexColor(ann.Color), nodeUpdate.Color) + if nodeUpdate.Color != graphdb.EncodeHexColor(ann.Color) { + t.Fatalf("node color doesn't match: expected %v, "+ + "got %v", graphdb.EncodeHexColor(ann.Color), + nodeUpdate.Color) } } @@ -793,7 +795,7 @@ func TestNotificationCancellation(t *testing.T) { ctx := createTestCtxSingleNode(t, startingBlockHeight) // Create a new client to receive notifications. - ntfnClient, err := ctx.builder.SubscribeTopology() + ntfnClient, err := ctx.graph.SubscribeTopology() require.NoError(t, err, "unable to subscribe for channel notifications") // We'll create the utxo for a new channel. @@ -919,7 +921,7 @@ func TestChannelCloseNotification(t *testing.T) { // With the channel edge now in place, we'll subscribe for topology // notifications. - ntfnClient, err := ctx.builder.SubscribeTopology() + ntfnClient, err := ctx.graph.SubscribeTopology() require.NoError(t, err, "unable to subscribe for channel notifications") // Next, we'll simulate the closure of our channel by generating a new @@ -1002,7 +1004,9 @@ func TestEncodeHexColor(t *testing.T) { } for _, tc := range colorTestCases { - encoded := EncodeHexColor(color.RGBA{tc.R, tc.G, tc.B, 0}) + encoded := graphdb.EncodeHexColor( + color.RGBA{tc.R, tc.G, tc.B, 0}, + ) if (encoded == tc.encoded) != tc.isValid { t.Fatalf("incorrect color encoding, "+ "want: %v, got: %v", tc.encoded, encoded) @@ -1094,11 +1098,16 @@ func makeTestGraph(t *testing.T, useCache bool) (*graphdb.ChannelGraph, t.Cleanup(backendCleanup) graph, err := graphdb.NewChannelGraph( - backend, graphdb.WithUseGraphCache(useCache), + &graphdb.Config{KVDB: backend}, + graphdb.WithUseGraphCache(useCache), ) if err != nil { return nil, nil, err } + require.NoError(t, graph.Start()) + t.Cleanup(func() { + require.NoError(t, graph.Stop()) + }) return graph, backend, nil } diff --git a/itest/lnd_channel_policy_test.go b/itest/lnd_channel_policy_test.go index 18c2328e97..7a333f0730 100644 --- a/itest/lnd_channel_policy_test.go +++ b/itest/lnd_channel_policy_test.go @@ -341,10 +341,10 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) { // but not the second, as she only allows two updates per day and a day // has yet to elapse from the previous update. - // assertAliceAndBob is a helper closure which updates Alice's policy - // and asserts that both Alice and Bob have heard and updated the + // updateAndAssertAliceAndBob is a helper closure which updates Alice's + // policy and asserts that both Alice and Bob have heard and updated the // policy in their graph. - assertAliceAndBob := func(req *lnrpc.PolicyUpdateRequest, + updateAndAssertAliceAndBob := func(req *lnrpc.PolicyUpdateRequest, expectedPolicy *lnrpc.RoutingPolicy) { alice.RPC.UpdateChannelPolicy(req) @@ -384,7 +384,7 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) { expectedPolicy.FeeBaseMsat = baseFee1 req.BaseFeeMsat = baseFee1 req.InboundFee = nil - assertAliceAndBob(req, expectedPolicy) + updateAndAssertAliceAndBob(req, expectedPolicy) // Check that Carol has both heard the policy and updated it in her // graph. @@ -407,7 +407,7 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) { baseFee2 := baseFee1 * 2 expectedPolicy.FeeBaseMsat = baseFee2 req.BaseFeeMsat = baseFee2 - assertAliceAndBob(req, expectedPolicy) + updateAndAssertAliceAndBob(req, expectedPolicy) // Since Carol didn't receive the last update, she still has Alice's // old policy. We validate this by checking the base fee is the older diff --git a/peer/test_utils.go b/peer/test_utils.go index 34c42e2f7c..0fe88e2674 100644 --- a/peer/test_utils.go +++ b/peer/test_utils.go @@ -615,8 +615,14 @@ func createTestPeer(t *testing.T) *peerTestCtx { }) require.NoError(t, err) - dbAliceGraph, err := graphdb.NewChannelGraph(graphBackend) + dbAliceGraph, err := graphdb.NewChannelGraph(&graphdb.Config{ + KVDB: graphBackend, + }) require.NoError(t, err) + require.NoError(t, dbAliceGraph.Start()) + t.Cleanup(func() { + require.NoError(t, dbAliceGraph.Stop()) + }) dbAliceChannel := channeldb.OpenForTesting(t, dbPath) diff --git a/pilot.go b/pilot.go index 11333a0722..8cbf23cc65 100644 --- a/pilot.go +++ b/pilot.go @@ -295,6 +295,6 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot, }, nil }, SubscribeTransactions: svr.cc.Wallet.SubscribeTransactions, - SubscribeTopology: svr.graphBuilder.SubscribeTopology, + SubscribeTopology: svr.graphDB.SubscribeTopology, }, nil } diff --git a/routing/pathfind_test.go b/routing/pathfind_test.go index fd92ed9343..8a0280686b 100644 --- a/routing/pathfind_test.go +++ b/routing/pathfind_test.go @@ -167,11 +167,16 @@ func makeTestGraph(t *testing.T, useCache bool) (*graphdb.ChannelGraph, t.Cleanup(backendCleanup) graph, err := graphdb.NewChannelGraph( - backend, graphdb.WithUseGraphCache(useCache), + &graphdb.Config{KVDB: backend}, + graphdb.WithUseGraphCache(useCache), ) if err != nil { return nil, nil, err } + require.NoError(t, graph.Start()) + t.Cleanup(func() { + require.NoError(t, graph.Stop()) + }) return graph, backend, nil } diff --git a/rpcserver.go b/rpcserver.go index 7236a5dad5..692ba74d2b 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -48,7 +48,6 @@ import ( "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/funding" - "github.com/lightningnetwork/lnd/graph" graphdb "github.com/lightningnetwork/lnd/graph/db" "github.com/lightningnetwork/lnd/graph/db/models" "github.com/lightningnetwork/lnd/htlcswitch" @@ -3294,7 +3293,7 @@ func (r *rpcServer) GetInfo(_ context.Context, // TODO(roasbeef): add synced height n stuff isTestNet := chainreg.IsTestnet(&r.cfg.ActiveNetParams) - nodeColor := graph.EncodeHexColor(nodeAnn.RGBColor) + nodeColor := graphdb.EncodeHexColor(nodeAnn.RGBColor) version := build.Version() + " commit=" + build.Commit return &lnrpc.GetInfoResponse{ @@ -6886,7 +6885,7 @@ func marshalNode(node *models.LightningNode) *lnrpc.LightningNode { PubKey: hex.EncodeToString(node.PubKeyBytes[:]), Addresses: nodeAddrs, Alias: node.Alias, - Color: graph.EncodeHexColor(node.Color), + Color: graphdb.EncodeHexColor(node.Color), Features: features, CustomRecords: customRecords, } @@ -7084,7 +7083,7 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription, // First, we start by subscribing to a new intent to receive // notifications from the channel router. - client, err := r.server.graphBuilder.SubscribeTopology() + client, err := r.server.graphDB.SubscribeTopology() if err != nil { return err } @@ -7137,7 +7136,7 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription, // returned by the router to the form of notifications expected by the current // gRPC service. func marshallTopologyChange( - topChange *graph.TopologyChange) *lnrpc.GraphTopologyUpdate { + topChange *graphdb.TopologyChange) *lnrpc.GraphTopologyUpdate { // encodeKey is a simple helper function that converts a live public // key into a hex-encoded version of the compressed serialization for diff --git a/server.go b/server.go index ecb3eceae3..deae1f5915 100644 --- a/server.go +++ b/server.go @@ -368,7 +368,7 @@ type server struct { // updatePersistentPeerAddrs subscribes to topology changes and stores // advertised addresses for any NodeAnnouncements from our persisted peers. func (s *server) updatePersistentPeerAddrs() error { - graphSub, err := s.graphBuilder.SubscribeTopology() + graphSub, err := s.graphDB.SubscribeTopology() if err != nil { return err } @@ -2287,6 +2287,12 @@ func (s *server) Start() error { return } + cleanup = cleanup.add(s.graphDB.Stop) + if err := s.graphDB.Start(); err != nil { + startErr = err + return + } + cleanup = cleanup.add(s.graphBuilder.Stop) if err := s.graphBuilder.Start(); err != nil { startErr = err @@ -2588,6 +2594,9 @@ func (s *server) Stop() error { if err := s.graphBuilder.Stop(); err != nil { srvrLog.Warnf("failed to stop graphBuilder %v", err) } + if err := s.graphDB.Stop(); err != nil { + srvrLog.Warnf("failed to stop graphDB %v", err) + } if err := s.chainArb.Stop(); err != nil { srvrLog.Warnf("failed to stop chainArb: %v", err) }