graph_test.go 116 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035
  1. package channeldb
  2. import (
  3. "bytes"
  4. "crypto/sha256"
  5. "encoding/hex"
  6. "errors"
  7. "fmt"
  8. "image/color"
  9. "math"
  10. prand "math/rand"
  11. "net"
  12. "reflect"
  13. "runtime"
  14. "sync"
  15. "testing"
  16. "time"
  17. "github.com/btcsuite/btcd/btcec/v2"
  18. "github.com/btcsuite/btcd/btcec/v2/ecdsa"
  19. "github.com/btcsuite/btcd/btcutil"
  20. "github.com/btcsuite/btcd/chaincfg/chainhash"
  21. "github.com/btcsuite/btcd/wire"
  22. "github.com/davecgh/go-spew/spew"
  23. "github.com/lightningnetwork/lnd/channeldb/models"
  24. "github.com/lightningnetwork/lnd/kvdb"
  25. "github.com/lightningnetwork/lnd/lnwire"
  26. "github.com/lightningnetwork/lnd/routing/route"
  27. "github.com/stretchr/testify/require"
  28. "golang.org/x/exp/rand"
  29. )
  30. var (
  31. testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
  32. Port: 9000}
  33. anotherAddr, _ = net.ResolveTCPAddr("tcp",
  34. "[2001:db8:85a3:0:0:8a2e:370:7334]:80")
  35. testAddrs = []net.Addr{testAddr, anotherAddr}
  36. testRBytes, _ = hex.DecodeString("8ce2bc69281ce27da07e6683571319d18e949ddfa2965fb6caa1bf0314f882d7")
  37. testSBytes, _ = hex.DecodeString("299105481d63e0f4bc2a88121167221b6700d72a0ead154c03be696a292d24ae")
  38. testRScalar = new(btcec.ModNScalar)
  39. testSScalar = new(btcec.ModNScalar)
  40. _ = testRScalar.SetByteSlice(testRBytes)
  41. _ = testSScalar.SetByteSlice(testSBytes)
  42. testSig = ecdsa.NewSignature(testRScalar, testSScalar)
  43. testFeatures = lnwire.NewFeatureVector(
  44. lnwire.NewRawFeatureVector(lnwire.GossipQueriesRequired),
  45. lnwire.Features,
  46. )
  47. testPub = route.Vertex{2, 202, 4}
  48. )
  49. // MakeTestGraph creates a new instance of the ChannelGraph for testing purposes.
  50. func MakeTestGraph(t testing.TB, modifiers ...OptionModifier) (*ChannelGraph, error) {
  51. opts := DefaultOptions()
  52. for _, modifier := range modifiers {
  53. modifier(&opts)
  54. }
  55. // Next, create channelgraph for the first time.
  56. backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
  57. if err != nil {
  58. backendCleanup()
  59. return nil, err
  60. }
  61. graph, err := NewChannelGraph(
  62. backend, opts.RejectCacheSize, opts.ChannelCacheSize,
  63. opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
  64. true, false,
  65. )
  66. if err != nil {
  67. backendCleanup()
  68. return nil, err
  69. }
  70. t.Cleanup(func() {
  71. _ = backend.Close()
  72. backendCleanup()
  73. })
  74. return graph, nil
  75. }
  76. func createLightningNode(db kvdb.Backend, priv *btcec.PrivateKey) (*LightningNode, error) {
  77. updateTime := prand.Int63()
  78. pub := priv.PubKey().SerializeCompressed()
  79. n := &LightningNode{
  80. HaveNodeAnnouncement: true,
  81. AuthSigBytes: testSig.Serialize(),
  82. LastUpdate: time.Unix(updateTime, 0),
  83. Color: color.RGBA{1, 2, 3, 0},
  84. Alias: "kek" + string(pub[:]),
  85. Features: testFeatures,
  86. Addresses: testAddrs,
  87. }
  88. copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed())
  89. return n, nil
  90. }
  91. func createTestVertex(db kvdb.Backend) (*LightningNode, error) {
  92. priv, err := btcec.NewPrivateKey()
  93. if err != nil {
  94. return nil, err
  95. }
  96. return createLightningNode(db, priv)
  97. }
  98. func TestNodeInsertionAndDeletion(t *testing.T) {
  99. t.Parallel()
  100. graph, err := MakeTestGraph(t)
  101. require.NoError(t, err, "unable to make test database")
  102. // We'd like to test basic insertion/deletion for vertexes from the
  103. // graph, so we'll create a test vertex to start with.
  104. node := &LightningNode{
  105. HaveNodeAnnouncement: true,
  106. AuthSigBytes: testSig.Serialize(),
  107. LastUpdate: time.Unix(1232342, 0),
  108. Color: color.RGBA{1, 2, 3, 0},
  109. Alias: "kek",
  110. Features: testFeatures,
  111. Addresses: testAddrs,
  112. ExtraOpaqueData: []byte("extra new data"),
  113. PubKeyBytes: testPub,
  114. }
  115. // First, insert the node into the graph DB. This should succeed
  116. // without any errors.
  117. if err := graph.AddLightningNode(node); err != nil {
  118. t.Fatalf("unable to add node: %v", err)
  119. }
  120. assertNodeInCache(t, graph, node, testFeatures)
  121. // Next, fetch the node from the database to ensure everything was
  122. // serialized properly.
  123. dbNode, err := graph.FetchLightningNode(nil, testPub)
  124. require.NoError(t, err, "unable to locate node")
  125. if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil {
  126. t.Fatalf("unable to query for node: %v", err)
  127. } else if !exists {
  128. t.Fatalf("node should be found but wasn't")
  129. }
  130. // The two nodes should match exactly!
  131. if err := compareNodes(node, dbNode); err != nil {
  132. t.Fatalf("nodes don't match: %v", err)
  133. }
  134. // Next, delete the node from the graph, this should purge all data
  135. // related to the node.
  136. if err := graph.DeleteLightningNode(testPub); err != nil {
  137. t.Fatalf("unable to delete node; %v", err)
  138. }
  139. assertNodeNotInCache(t, graph, testPub)
  140. // Finally, attempt to fetch the node again. This should fail as the
  141. // node should have been deleted from the database.
  142. _, err = graph.FetchLightningNode(nil, testPub)
  143. if err != ErrGraphNodeNotFound {
  144. t.Fatalf("fetch after delete should fail!")
  145. }
  146. }
  147. // TestPartialNode checks that we can add and retrieve a LightningNode where
  148. // where only the pubkey is known to the database.
  149. func TestPartialNode(t *testing.T) {
  150. t.Parallel()
  151. graph, err := MakeTestGraph(t)
  152. require.NoError(t, err, "unable to make test database")
  153. // We want to be able to insert nodes into the graph that only has the
  154. // PubKey set.
  155. node := &LightningNode{
  156. HaveNodeAnnouncement: false,
  157. PubKeyBytes: testPub,
  158. }
  159. if err := graph.AddLightningNode(node); err != nil {
  160. t.Fatalf("unable to add node: %v", err)
  161. }
  162. assertNodeInCache(t, graph, node, nil)
  163. // Next, fetch the node from the database to ensure everything was
  164. // serialized properly.
  165. dbNode, err := graph.FetchLightningNode(nil, testPub)
  166. require.NoError(t, err, "unable to locate node")
  167. if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil {
  168. t.Fatalf("unable to query for node: %v", err)
  169. } else if !exists {
  170. t.Fatalf("node should be found but wasn't")
  171. }
  172. // The two nodes should match exactly! (with default values for
  173. // LastUpdate and db set to satisfy compareNodes())
  174. node = &LightningNode{
  175. HaveNodeAnnouncement: false,
  176. LastUpdate: time.Unix(0, 0),
  177. PubKeyBytes: testPub,
  178. }
  179. if err := compareNodes(node, dbNode); err != nil {
  180. t.Fatalf("nodes don't match: %v", err)
  181. }
  182. // Next, delete the node from the graph, this should purge all data
  183. // related to the node.
  184. if err := graph.DeleteLightningNode(testPub); err != nil {
  185. t.Fatalf("unable to delete node: %v", err)
  186. }
  187. assertNodeNotInCache(t, graph, testPub)
  188. // Finally, attempt to fetch the node again. This should fail as the
  189. // node should have been deleted from the database.
  190. _, err = graph.FetchLightningNode(nil, testPub)
  191. if err != ErrGraphNodeNotFound {
  192. t.Fatalf("fetch after delete should fail!")
  193. }
  194. }
  195. func TestAliasLookup(t *testing.T) {
  196. t.Parallel()
  197. graph, err := MakeTestGraph(t)
  198. require.NoError(t, err, "unable to make test database")
  199. // We'd like to test the alias index within the database, so first
  200. // create a new test node.
  201. testNode, err := createTestVertex(graph.db)
  202. require.NoError(t, err, "unable to create test node")
  203. // Add the node to the graph's database, this should also insert an
  204. // entry into the alias index for this node.
  205. if err := graph.AddLightningNode(testNode); err != nil {
  206. t.Fatalf("unable to add node: %v", err)
  207. }
  208. // Next, attempt to lookup the alias. The alias should exactly match
  209. // the one which the test node was assigned.
  210. nodePub, err := testNode.PubKey()
  211. require.NoError(t, err, "unable to generate pubkey")
  212. dbAlias, err := graph.LookupAlias(nodePub)
  213. require.NoError(t, err, "unable to find alias")
  214. if dbAlias != testNode.Alias {
  215. t.Fatalf("aliases don't match, expected %v got %v",
  216. testNode.Alias, dbAlias)
  217. }
  218. // Ensure that looking up a non-existent alias results in an error.
  219. node, err := createTestVertex(graph.db)
  220. require.NoError(t, err, "unable to create test node")
  221. nodePub, err = node.PubKey()
  222. require.NoError(t, err, "unable to generate pubkey")
  223. _, err = graph.LookupAlias(nodePub)
  224. if err != ErrNodeAliasNotFound {
  225. t.Fatalf("alias lookup should fail for non-existent pubkey")
  226. }
  227. }
  228. func TestSourceNode(t *testing.T) {
  229. t.Parallel()
  230. graph, err := MakeTestGraph(t)
  231. require.NoError(t, err, "unable to make test database")
  232. // We'd like to test the setting/getting of the source node, so we
  233. // first create a fake node to use within the test.
  234. testNode, err := createTestVertex(graph.db)
  235. require.NoError(t, err, "unable to create test node")
  236. // Attempt to fetch the source node, this should return an error as the
  237. // source node hasn't yet been set.
  238. if _, err := graph.SourceNode(); err != ErrSourceNodeNotSet {
  239. t.Fatalf("source node shouldn't be set in new graph")
  240. }
  241. // Set the source the source node, this should insert the node into the
  242. // database in a special way indicating it's the source node.
  243. if err := graph.SetSourceNode(testNode); err != nil {
  244. t.Fatalf("unable to set source node: %v", err)
  245. }
  246. // Retrieve the source node from the database, it should exactly match
  247. // the one we set above.
  248. sourceNode, err := graph.SourceNode()
  249. require.NoError(t, err, "unable to fetch source node")
  250. if err := compareNodes(testNode, sourceNode); err != nil {
  251. t.Fatalf("nodes don't match: %v", err)
  252. }
  253. }
  254. func TestEdgeInsertionDeletion(t *testing.T) {
  255. t.Parallel()
  256. graph, err := MakeTestGraph(t)
  257. require.NoError(t, err, "unable to make test database")
  258. // We'd like to test the insertion/deletion of edges, so we create two
  259. // vertexes to connect.
  260. node1, err := createTestVertex(graph.db)
  261. require.NoError(t, err, "unable to create test node")
  262. node2, err := createTestVertex(graph.db)
  263. require.NoError(t, err, "unable to create test node")
  264. // In addition to the fake vertexes we create some fake channel
  265. // identifiers.
  266. chanID := uint64(prand.Int63())
  267. outpoint := wire.OutPoint{
  268. Hash: rev,
  269. Index: 9,
  270. }
  271. // Add the new edge to the database, this should proceed without any
  272. // errors.
  273. node1Pub, err := node1.PubKey()
  274. require.NoError(t, err, "unable to generate node key")
  275. node2Pub, err := node2.PubKey()
  276. require.NoError(t, err, "unable to generate node key")
  277. edgeInfo := models.ChannelEdgeInfo{
  278. ChannelID: chanID,
  279. ChainHash: key,
  280. AuthProof: &models.ChannelAuthProof{
  281. NodeSig1Bytes: testSig.Serialize(),
  282. NodeSig2Bytes: testSig.Serialize(),
  283. BitcoinSig1Bytes: testSig.Serialize(),
  284. BitcoinSig2Bytes: testSig.Serialize(),
  285. },
  286. ChannelPoint: outpoint,
  287. Capacity: 9000,
  288. }
  289. copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed())
  290. copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed())
  291. copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed())
  292. copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed())
  293. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  294. t.Fatalf("unable to create channel edge: %v", err)
  295. }
  296. assertEdgeWithNoPoliciesInCache(t, graph, &edgeInfo)
  297. // Ensure that both policies are returned as unknown (nil).
  298. _, e1, e2, err := graph.FetchChannelEdgesByID(chanID)
  299. if err != nil {
  300. t.Fatalf("unable to fetch channel edge")
  301. }
  302. if e1 != nil || e2 != nil {
  303. t.Fatalf("channel edges not unknown")
  304. }
  305. // Next, attempt to delete the edge from the database, again this
  306. // should proceed without any issues.
  307. if err := graph.DeleteChannelEdges(false, true, chanID); err != nil {
  308. t.Fatalf("unable to delete edge: %v", err)
  309. }
  310. assertNoEdge(t, graph, chanID)
  311. // Ensure that any query attempts to lookup the delete channel edge are
  312. // properly deleted.
  313. if _, _, _, err := graph.FetchChannelEdgesByOutpoint(&outpoint); err == nil {
  314. t.Fatalf("channel edge not deleted")
  315. }
  316. if _, _, _, err := graph.FetchChannelEdgesByID(chanID); err == nil {
  317. t.Fatalf("channel edge not deleted")
  318. }
  319. isZombie, _, _ := graph.IsZombieEdge(chanID)
  320. if !isZombie {
  321. t.Fatal("channel edge not marked as zombie")
  322. }
  323. // Finally, attempt to delete a (now) non-existent edge within the
  324. // database, this should result in an error.
  325. err = graph.DeleteChannelEdges(false, true, chanID)
  326. if err != ErrEdgeNotFound {
  327. t.Fatalf("deleting a non-existent edge should fail!")
  328. }
  329. }
  330. func createEdge(height, txIndex uint32, txPosition uint16, outPointIndex uint32,
  331. node1, node2 *LightningNode) (models.ChannelEdgeInfo,
  332. lnwire.ShortChannelID) {
  333. shortChanID := lnwire.ShortChannelID{
  334. BlockHeight: height,
  335. TxIndex: txIndex,
  336. TxPosition: txPosition,
  337. }
  338. outpoint := wire.OutPoint{
  339. Hash: rev,
  340. Index: outPointIndex,
  341. }
  342. node1Pub, _ := node1.PubKey()
  343. node2Pub, _ := node2.PubKey()
  344. edgeInfo := models.ChannelEdgeInfo{
  345. ChannelID: shortChanID.ToUint64(),
  346. ChainHash: key,
  347. AuthProof: &models.ChannelAuthProof{
  348. NodeSig1Bytes: testSig.Serialize(),
  349. NodeSig2Bytes: testSig.Serialize(),
  350. BitcoinSig1Bytes: testSig.Serialize(),
  351. BitcoinSig2Bytes: testSig.Serialize(),
  352. },
  353. ChannelPoint: outpoint,
  354. Capacity: 9000,
  355. }
  356. copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed())
  357. copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed())
  358. copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed())
  359. copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed())
  360. return edgeInfo, shortChanID
  361. }
  362. // TestDisconnectBlockAtHeight checks that the pruned state of the channel
  363. // database is what we expect after calling DisconnectBlockAtHeight.
  364. func TestDisconnectBlockAtHeight(t *testing.T) {
  365. t.Parallel()
  366. graph, err := MakeTestGraph(t)
  367. require.NoError(t, err, "unable to make test database")
  368. sourceNode, err := createTestVertex(graph.db)
  369. require.NoError(t, err, "unable to create source node")
  370. if err := graph.SetSourceNode(sourceNode); err != nil {
  371. t.Fatalf("unable to set source node: %v", err)
  372. }
  373. // We'd like to test the insertion/deletion of edges, so we create two
  374. // vertexes to connect.
  375. node1, err := createTestVertex(graph.db)
  376. require.NoError(t, err, "unable to create test node")
  377. node2, err := createTestVertex(graph.db)
  378. require.NoError(t, err, "unable to create test node")
  379. // In addition to the fake vertexes we create some fake channel
  380. // identifiers.
  381. var spendOutputs []*wire.OutPoint
  382. var blockHash chainhash.Hash
  383. copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
  384. // Prune the graph a few times to make sure we have entries in the
  385. // prune log.
  386. _, err = graph.PruneGraph(spendOutputs, &blockHash, 155)
  387. require.NoError(t, err, "unable to prune graph")
  388. var blockHash2 chainhash.Hash
  389. copy(blockHash2[:], bytes.Repeat([]byte{2}, 32))
  390. _, err = graph.PruneGraph(spendOutputs, &blockHash2, 156)
  391. require.NoError(t, err, "unable to prune graph")
  392. // We'll create 3 almost identical edges, so first create a helper
  393. // method containing all logic for doing so.
  394. // Create an edge which has its block height at 156.
  395. height := uint32(156)
  396. edgeInfo, _ := createEdge(height, 0, 0, 0, node1, node2)
  397. // Create an edge with block height 157. We give it
  398. // maximum values for tx index and position, to make
  399. // sure our database range scan get edges from the
  400. // entire range.
  401. edgeInfo2, _ := createEdge(
  402. height+1, math.MaxUint32&0x00ffffff, math.MaxUint16, 1,
  403. node1, node2,
  404. )
  405. // Create a third edge, this with a block height of 155.
  406. edgeInfo3, _ := createEdge(height-1, 0, 0, 2, node1, node2)
  407. // Now add all these new edges to the database.
  408. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  409. t.Fatalf("unable to create channel edge: %v", err)
  410. }
  411. if err := graph.AddChannelEdge(&edgeInfo2); err != nil {
  412. t.Fatalf("unable to create channel edge: %v", err)
  413. }
  414. if err := graph.AddChannelEdge(&edgeInfo3); err != nil {
  415. t.Fatalf("unable to create channel edge: %v", err)
  416. }
  417. assertEdgeWithNoPoliciesInCache(t, graph, &edgeInfo)
  418. assertEdgeWithNoPoliciesInCache(t, graph, &edgeInfo2)
  419. assertEdgeWithNoPoliciesInCache(t, graph, &edgeInfo3)
  420. // Call DisconnectBlockAtHeight, which should prune every channel
  421. // that has a funding height of 'height' or greater.
  422. removed, err := graph.DisconnectBlockAtHeight(uint32(height))
  423. if err != nil {
  424. t.Fatalf("unable to prune %v", err)
  425. }
  426. assertNoEdge(t, graph, edgeInfo.ChannelID)
  427. assertNoEdge(t, graph, edgeInfo2.ChannelID)
  428. assertEdgeWithNoPoliciesInCache(t, graph, &edgeInfo3)
  429. // The two edges should have been removed.
  430. if len(removed) != 2 {
  431. t.Fatalf("expected two edges to be removed from graph, "+
  432. "only %d were", len(removed))
  433. }
  434. if removed[0].ChannelID != edgeInfo.ChannelID {
  435. t.Fatalf("expected edge to be removed from graph")
  436. }
  437. if removed[1].ChannelID != edgeInfo2.ChannelID {
  438. t.Fatalf("expected edge to be removed from graph")
  439. }
  440. // The two first edges should be removed from the db.
  441. _, _, has, isZombie, err := graph.HasChannelEdge(edgeInfo.ChannelID)
  442. require.NoError(t, err, "unable to query for edge")
  443. if has {
  444. t.Fatalf("edge1 was not pruned from the graph")
  445. }
  446. if isZombie {
  447. t.Fatal("reorged edge1 should not be marked as zombie")
  448. }
  449. _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo2.ChannelID)
  450. require.NoError(t, err, "unable to query for edge")
  451. if has {
  452. t.Fatalf("edge2 was not pruned from the graph")
  453. }
  454. if isZombie {
  455. t.Fatal("reorged edge2 should not be marked as zombie")
  456. }
  457. // Edge 3 should not be removed.
  458. _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo3.ChannelID)
  459. require.NoError(t, err, "unable to query for edge")
  460. if !has {
  461. t.Fatalf("edge3 was pruned from the graph")
  462. }
  463. if isZombie {
  464. t.Fatal("edge3 was marked as zombie")
  465. }
  466. // PruneTip should be set to the blockHash we specified for the block
  467. // at height 155.
  468. hash, h, err := graph.PruneTip()
  469. require.NoError(t, err, "unable to get prune tip")
  470. if !blockHash.IsEqual(hash) {
  471. t.Fatalf("expected best block to be %x, was %x", blockHash, hash)
  472. }
  473. if h != height-1 {
  474. t.Fatalf("expected best block height to be %d, was %d", height-1, h)
  475. }
  476. }
  477. func assertEdgeInfoEqual(t *testing.T, e1 *models.ChannelEdgeInfo,
  478. e2 *models.ChannelEdgeInfo) {
  479. if e1.ChannelID != e2.ChannelID {
  480. t.Fatalf("chan id's don't match: %v vs %v", e1.ChannelID,
  481. e2.ChannelID)
  482. }
  483. if e1.ChainHash != e2.ChainHash {
  484. t.Fatalf("chain hashes don't match: %v vs %v", e1.ChainHash,
  485. e2.ChainHash)
  486. }
  487. if !bytes.Equal(e1.NodeKey1Bytes[:], e2.NodeKey1Bytes[:]) {
  488. t.Fatalf("nodekey1 doesn't match")
  489. }
  490. if !bytes.Equal(e1.NodeKey2Bytes[:], e2.NodeKey2Bytes[:]) {
  491. t.Fatalf("nodekey2 doesn't match")
  492. }
  493. if !bytes.Equal(e1.BitcoinKey1Bytes[:], e2.BitcoinKey1Bytes[:]) {
  494. t.Fatalf("bitcoinkey1 doesn't match")
  495. }
  496. if !bytes.Equal(e1.BitcoinKey2Bytes[:], e2.BitcoinKey2Bytes[:]) {
  497. t.Fatalf("bitcoinkey2 doesn't match")
  498. }
  499. if !bytes.Equal(e1.Features, e2.Features) {
  500. t.Fatalf("features doesn't match: %x vs %x", e1.Features,
  501. e2.Features)
  502. }
  503. if !bytes.Equal(e1.AuthProof.NodeSig1Bytes, e2.AuthProof.NodeSig1Bytes) {
  504. t.Fatalf("nodesig1 doesn't match: %v vs %v",
  505. spew.Sdump(e1.AuthProof.NodeSig1Bytes),
  506. spew.Sdump(e2.AuthProof.NodeSig1Bytes))
  507. }
  508. if !bytes.Equal(e1.AuthProof.NodeSig2Bytes, e2.AuthProof.NodeSig2Bytes) {
  509. t.Fatalf("nodesig2 doesn't match")
  510. }
  511. if !bytes.Equal(e1.AuthProof.BitcoinSig1Bytes, e2.AuthProof.BitcoinSig1Bytes) {
  512. t.Fatalf("bitcoinsig1 doesn't match")
  513. }
  514. if !bytes.Equal(e1.AuthProof.BitcoinSig2Bytes, e2.AuthProof.BitcoinSig2Bytes) {
  515. t.Fatalf("bitcoinsig2 doesn't match")
  516. }
  517. if e1.ChannelPoint != e2.ChannelPoint {
  518. t.Fatalf("channel point match: %v vs %v", e1.ChannelPoint,
  519. e2.ChannelPoint)
  520. }
  521. if e1.Capacity != e2.Capacity {
  522. t.Fatalf("capacity doesn't match: %v vs %v", e1.Capacity,
  523. e2.Capacity)
  524. }
  525. if !bytes.Equal(e1.ExtraOpaqueData, e2.ExtraOpaqueData) {
  526. t.Fatalf("extra data doesn't match: %v vs %v",
  527. e2.ExtraOpaqueData, e2.ExtraOpaqueData)
  528. }
  529. }
  530. func createChannelEdge(db kvdb.Backend, node1, node2 *LightningNode) (
  531. *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
  532. *models.ChannelEdgePolicy) {
  533. var (
  534. firstNode [33]byte
  535. secondNode [33]byte
  536. )
  537. if bytes.Compare(node1.PubKeyBytes[:], node2.PubKeyBytes[:]) == -1 {
  538. firstNode = node1.PubKeyBytes
  539. secondNode = node2.PubKeyBytes
  540. } else {
  541. firstNode = node2.PubKeyBytes
  542. secondNode = node1.PubKeyBytes
  543. }
  544. // In addition to the fake vertexes we create some fake channel
  545. // identifiers.
  546. chanID := uint64(prand.Int63())
  547. outpoint := wire.OutPoint{
  548. Hash: rev,
  549. Index: 9,
  550. }
  551. // Add the new edge to the database, this should proceed without any
  552. // errors.
  553. edgeInfo := &models.ChannelEdgeInfo{
  554. ChannelID: chanID,
  555. ChainHash: key,
  556. AuthProof: &models.ChannelAuthProof{
  557. NodeSig1Bytes: testSig.Serialize(),
  558. NodeSig2Bytes: testSig.Serialize(),
  559. BitcoinSig1Bytes: testSig.Serialize(),
  560. BitcoinSig2Bytes: testSig.Serialize(),
  561. },
  562. ChannelPoint: outpoint,
  563. Capacity: 1000,
  564. ExtraOpaqueData: []byte("new unknown feature"),
  565. }
  566. copy(edgeInfo.NodeKey1Bytes[:], firstNode[:])
  567. copy(edgeInfo.NodeKey2Bytes[:], secondNode[:])
  568. copy(edgeInfo.BitcoinKey1Bytes[:], firstNode[:])
  569. copy(edgeInfo.BitcoinKey2Bytes[:], secondNode[:])
  570. edge1 := &models.ChannelEdgePolicy{
  571. SigBytes: testSig.Serialize(),
  572. ChannelID: chanID,
  573. LastUpdate: time.Unix(433453, 0),
  574. MessageFlags: 1,
  575. ChannelFlags: 0,
  576. TimeLockDelta: 99,
  577. MinHTLC: 2342135,
  578. MaxHTLC: 13928598,
  579. FeeBaseMSat: 4352345,
  580. FeeProportionalMillionths: 3452352,
  581. ToNode: secondNode,
  582. ExtraOpaqueData: []byte{1, 0},
  583. }
  584. edge2 := &models.ChannelEdgePolicy{
  585. SigBytes: testSig.Serialize(),
  586. ChannelID: chanID,
  587. LastUpdate: time.Unix(124234, 0),
  588. MessageFlags: 1,
  589. ChannelFlags: 1,
  590. TimeLockDelta: 99,
  591. MinHTLC: 2342135,
  592. MaxHTLC: 13928598,
  593. FeeBaseMSat: 4352345,
  594. FeeProportionalMillionths: 90392423,
  595. ToNode: firstNode,
  596. ExtraOpaqueData: []byte{1, 0},
  597. }
  598. return edgeInfo, edge1, edge2
  599. }
  600. func TestEdgeInfoUpdates(t *testing.T) {
  601. t.Parallel()
  602. graph, err := MakeTestGraph(t)
  603. require.NoError(t, err, "unable to make test database")
  604. // We'd like to test the update of edges inserted into the database, so
  605. // we create two vertexes to connect.
  606. node1, err := createTestVertex(graph.db)
  607. require.NoError(t, err, "unable to create test node")
  608. if err := graph.AddLightningNode(node1); err != nil {
  609. t.Fatalf("unable to add node: %v", err)
  610. }
  611. assertNodeInCache(t, graph, node1, testFeatures)
  612. node2, err := createTestVertex(graph.db)
  613. require.NoError(t, err, "unable to create test node")
  614. if err := graph.AddLightningNode(node2); err != nil {
  615. t.Fatalf("unable to add node: %v", err)
  616. }
  617. assertNodeInCache(t, graph, node2, testFeatures)
  618. // Create an edge and add it to the db.
  619. edgeInfo, edge1, edge2 := createChannelEdge(graph.db, node1, node2)
  620. // Make sure inserting the policy at this point, before the edge info
  621. // is added, will fail.
  622. if err := graph.UpdateEdgePolicy(edge1); err != ErrEdgeNotFound {
  623. t.Fatalf("expected ErrEdgeNotFound, got: %v", err)
  624. }
  625. require.Len(t, graph.graphCache.nodeChannels, 0)
  626. // Add the edge info.
  627. if err := graph.AddChannelEdge(edgeInfo); err != nil {
  628. t.Fatalf("unable to create channel edge: %v", err)
  629. }
  630. assertEdgeWithNoPoliciesInCache(t, graph, edgeInfo)
  631. chanID := edgeInfo.ChannelID
  632. outpoint := edgeInfo.ChannelPoint
  633. // Next, insert both edge policies into the database, they should both
  634. // be inserted without any issues.
  635. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  636. t.Fatalf("unable to update edge: %v", err)
  637. }
  638. assertEdgeWithPolicyInCache(t, graph, edgeInfo, edge1, true)
  639. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  640. t.Fatalf("unable to update edge: %v", err)
  641. }
  642. assertEdgeWithPolicyInCache(t, graph, edgeInfo, edge2, false)
  643. // Check for existence of the edge within the database, it should be
  644. // found.
  645. _, _, found, isZombie, err := graph.HasChannelEdge(chanID)
  646. require.NoError(t, err, "unable to query for edge")
  647. if !found {
  648. t.Fatalf("graph should have of inserted edge")
  649. }
  650. if isZombie {
  651. t.Fatal("live edge should not be marked as zombie")
  652. }
  653. // We should also be able to retrieve the channelID only knowing the
  654. // channel point of the channel.
  655. dbChanID, err := graph.ChannelID(&outpoint)
  656. require.NoError(t, err, "unable to retrieve channel ID")
  657. if dbChanID != chanID {
  658. t.Fatalf("chan ID's mismatch, expected %v got %v", dbChanID,
  659. chanID)
  660. }
  661. // With the edges inserted, perform some queries to ensure that they've
  662. // been inserted properly.
  663. dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID)
  664. require.NoError(t, err, "unable to fetch channel by ID")
  665. if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
  666. t.Fatalf("edge doesn't match: %v", err)
  667. }
  668. if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
  669. t.Fatalf("edge doesn't match: %v", err)
  670. }
  671. assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
  672. // Next, attempt to query the channel edges according to the outpoint
  673. // of the channel.
  674. dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByOutpoint(&outpoint)
  675. require.NoError(t, err, "unable to fetch channel by ID")
  676. if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
  677. t.Fatalf("edge doesn't match: %v", err)
  678. }
  679. if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
  680. t.Fatalf("edge doesn't match: %v", err)
  681. }
  682. assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
  683. }
  684. func assertNodeInCache(t *testing.T, g *ChannelGraph, n *LightningNode,
  685. expectedFeatures *lnwire.FeatureVector) {
  686. // Let's check the internal view first.
  687. require.Equal(
  688. t, expectedFeatures, g.graphCache.nodeFeatures[n.PubKeyBytes],
  689. )
  690. // The external view should reflect this as well. Except when we expect
  691. // the features to be nil internally, we return an empty feature vector
  692. // on the public interface instead.
  693. if expectedFeatures == nil {
  694. expectedFeatures = lnwire.EmptyFeatureVector()
  695. }
  696. features := g.graphCache.GetFeatures(n.PubKeyBytes)
  697. require.Equal(t, expectedFeatures, features)
  698. }
  699. func assertNodeNotInCache(t *testing.T, g *ChannelGraph, n route.Vertex) {
  700. _, ok := g.graphCache.nodeFeatures[n]
  701. require.False(t, ok)
  702. _, ok = g.graphCache.nodeChannels[n]
  703. require.False(t, ok)
  704. // We should get the default features for this node.
  705. features := g.graphCache.GetFeatures(n)
  706. require.Equal(t, lnwire.EmptyFeatureVector(), features)
  707. }
  708. func assertEdgeWithNoPoliciesInCache(t *testing.T, g *ChannelGraph,
  709. e *models.ChannelEdgeInfo) {
  710. // Let's check the internal view first.
  711. require.NotEmpty(t, g.graphCache.nodeChannels[e.NodeKey1Bytes])
  712. require.NotEmpty(t, g.graphCache.nodeChannels[e.NodeKey2Bytes])
  713. expectedNode1Channel := &DirectedChannel{
  714. ChannelID: e.ChannelID,
  715. IsNode1: true,
  716. OtherNode: e.NodeKey2Bytes,
  717. Capacity: e.Capacity,
  718. OutPolicySet: false,
  719. InPolicy: nil,
  720. }
  721. require.Contains(
  722. t, g.graphCache.nodeChannels[e.NodeKey1Bytes], e.ChannelID,
  723. )
  724. require.Equal(
  725. t, expectedNode1Channel,
  726. g.graphCache.nodeChannels[e.NodeKey1Bytes][e.ChannelID],
  727. )
  728. expectedNode2Channel := &DirectedChannel{
  729. ChannelID: e.ChannelID,
  730. IsNode1: false,
  731. OtherNode: e.NodeKey1Bytes,
  732. Capacity: e.Capacity,
  733. OutPolicySet: false,
  734. InPolicy: nil,
  735. }
  736. require.Contains(
  737. t, g.graphCache.nodeChannels[e.NodeKey2Bytes], e.ChannelID,
  738. )
  739. require.Equal(
  740. t, expectedNode2Channel,
  741. g.graphCache.nodeChannels[e.NodeKey2Bytes][e.ChannelID],
  742. )
  743. // The external view should reflect this as well.
  744. var foundChannel *DirectedChannel
  745. err := g.graphCache.ForEachChannel(
  746. e.NodeKey1Bytes, func(c *DirectedChannel) error {
  747. if c.ChannelID == e.ChannelID {
  748. foundChannel = c
  749. }
  750. return nil
  751. },
  752. )
  753. require.NoError(t, err)
  754. require.NotNil(t, foundChannel)
  755. require.Equal(t, expectedNode1Channel, foundChannel)
  756. err = g.graphCache.ForEachChannel(
  757. e.NodeKey2Bytes, func(c *DirectedChannel) error {
  758. if c.ChannelID == e.ChannelID {
  759. foundChannel = c
  760. }
  761. return nil
  762. },
  763. )
  764. require.NoError(t, err)
  765. require.NotNil(t, foundChannel)
  766. require.Equal(t, expectedNode2Channel, foundChannel)
  767. }
  768. func assertNoEdge(t *testing.T, g *ChannelGraph, chanID uint64) {
  769. // Make sure no channel in the cache has the given channel ID. If there
  770. // are no channels at all, that is fine as well.
  771. for _, channels := range g.graphCache.nodeChannels {
  772. for _, channel := range channels {
  773. require.NotEqual(t, channel.ChannelID, chanID)
  774. }
  775. }
  776. }
  777. func assertEdgeWithPolicyInCache(t *testing.T, g *ChannelGraph,
  778. e *models.ChannelEdgeInfo, p *models.ChannelEdgePolicy, policy1 bool) {
  779. // Check the internal state first.
  780. c1, ok := g.graphCache.nodeChannels[e.NodeKey1Bytes][e.ChannelID]
  781. require.True(t, ok)
  782. if policy1 {
  783. require.True(t, c1.OutPolicySet)
  784. } else {
  785. require.NotNil(t, c1.InPolicy)
  786. require.Equal(
  787. t, p.FeeProportionalMillionths,
  788. c1.InPolicy.FeeProportionalMillionths,
  789. )
  790. }
  791. c2, ok := g.graphCache.nodeChannels[e.NodeKey2Bytes][e.ChannelID]
  792. require.True(t, ok)
  793. if policy1 {
  794. require.NotNil(t, c2.InPolicy)
  795. require.Equal(
  796. t, p.FeeProportionalMillionths,
  797. c2.InPolicy.FeeProportionalMillionths,
  798. )
  799. } else {
  800. require.True(t, c2.OutPolicySet)
  801. }
  802. // Now for both nodes make sure that the external view is also correct.
  803. var (
  804. c1Ext *DirectedChannel
  805. c2Ext *DirectedChannel
  806. )
  807. require.NoError(t, g.graphCache.ForEachChannel(
  808. e.NodeKey1Bytes, func(c *DirectedChannel) error {
  809. c1Ext = c
  810. return nil
  811. },
  812. ))
  813. require.NoError(t, g.graphCache.ForEachChannel(
  814. e.NodeKey2Bytes, func(c *DirectedChannel) error {
  815. c2Ext = c
  816. return nil
  817. },
  818. ))
  819. // Only compare the fields that are actually copied, then compare the
  820. // values of the functions separately.
  821. require.Equal(t, c1, c1Ext.DeepCopy())
  822. require.Equal(t, c2, c2Ext.DeepCopy())
  823. if policy1 {
  824. require.Equal(
  825. t, p.FeeProportionalMillionths,
  826. c2Ext.InPolicy.FeeProportionalMillionths,
  827. )
  828. require.Equal(
  829. t, route.Vertex(e.NodeKey2Bytes),
  830. c2Ext.InPolicy.ToNodePubKey(),
  831. )
  832. require.Equal(t, testFeatures, c2Ext.InPolicy.ToNodeFeatures)
  833. } else {
  834. require.Equal(
  835. t, p.FeeProportionalMillionths,
  836. c1Ext.InPolicy.FeeProportionalMillionths,
  837. )
  838. require.Equal(
  839. t, route.Vertex(e.NodeKey1Bytes),
  840. c1Ext.InPolicy.ToNodePubKey(),
  841. )
  842. require.Equal(t, testFeatures, c1Ext.InPolicy.ToNodeFeatures)
  843. }
  844. }
  845. func randEdgePolicy(chanID uint64, db kvdb.Backend) *models.ChannelEdgePolicy {
  846. update := prand.Int63()
  847. return newEdgePolicy(chanID, db, update)
  848. }
  849. func newEdgePolicy(chanID uint64, db kvdb.Backend,
  850. updateTime int64) *models.ChannelEdgePolicy {
  851. return &models.ChannelEdgePolicy{
  852. ChannelID: chanID,
  853. LastUpdate: time.Unix(updateTime, 0),
  854. MessageFlags: 1,
  855. ChannelFlags: 0,
  856. TimeLockDelta: uint16(prand.Int63()),
  857. MinHTLC: lnwire.MilliSatoshi(prand.Int63()),
  858. MaxHTLC: lnwire.MilliSatoshi(prand.Int63()),
  859. FeeBaseMSat: lnwire.MilliSatoshi(prand.Int63()),
  860. FeeProportionalMillionths: lnwire.MilliSatoshi(prand.Int63()),
  861. }
  862. }
  863. func TestGraphTraversal(t *testing.T) {
  864. t.Parallel()
  865. graph, err := MakeTestGraph(t)
  866. require.NoError(t, err, "unable to make test database")
  867. // We'd like to test some of the graph traversal capabilities within
  868. // the DB, so we'll create a series of fake nodes to insert into the
  869. // graph. And we'll create 5 channels between each node pair.
  870. const numNodes = 20
  871. const numChannels = 5
  872. chanIndex, nodeList := fillTestGraph(t, graph, numNodes, numChannels)
  873. // Make an index of the node list for easy look up below.
  874. nodeIndex := make(map[route.Vertex]struct{})
  875. for _, node := range nodeList {
  876. nodeIndex[node.PubKeyBytes] = struct{}{}
  877. }
  878. // If we turn the channel graph cache _off_, then iterate through the
  879. // set of channels (to force the fall back), we should find all the
  880. // channel as well as the nodes included.
  881. graph.graphCache = nil
  882. err = graph.ForEachNodeCached(func(node route.Vertex,
  883. chans map[uint64]*DirectedChannel) error {
  884. if _, ok := nodeIndex[node]; !ok {
  885. return fmt.Errorf("node %x not found in graph", node)
  886. }
  887. for chanID := range chans {
  888. if _, ok := chanIndex[chanID]; !ok {
  889. return fmt.Errorf("chan %v not found in "+
  890. "graph", chanID)
  891. }
  892. }
  893. return nil
  894. })
  895. require.NoError(t, err)
  896. // Iterate through all the known channels within the graph DB, once
  897. // again if the map is empty that indicates that all edges have
  898. // properly been reached.
  899. err = graph.ForEachChannel(func(ei *models.ChannelEdgeInfo,
  900. _ *models.ChannelEdgePolicy,
  901. _ *models.ChannelEdgePolicy) error {
  902. delete(chanIndex, ei.ChannelID)
  903. return nil
  904. })
  905. require.NoError(t, err)
  906. require.Len(t, chanIndex, 0)
  907. // Finally, we want to test the ability to iterate over all the
  908. // outgoing channels for a particular node.
  909. numNodeChans := 0
  910. firstNode, secondNode := nodeList[0], nodeList[1]
  911. err = graph.ForEachNodeChannel(nil, firstNode.PubKeyBytes,
  912. func(_ kvdb.RTx, _ *models.ChannelEdgeInfo, outEdge,
  913. inEdge *models.ChannelEdgePolicy) error {
  914. // All channels between first and second node should
  915. // have fully (both sides) specified policies.
  916. if inEdge == nil || outEdge == nil {
  917. return fmt.Errorf("channel policy not present")
  918. }
  919. // Each should indicate that it's outgoing (pointed
  920. // towards the second node).
  921. if !bytes.Equal(
  922. outEdge.ToNode[:], secondNode.PubKeyBytes[:],
  923. ) {
  924. return fmt.Errorf("wrong outgoing edge")
  925. }
  926. // The incoming edge should also indicate that it's
  927. // pointing to the origin node.
  928. if !bytes.Equal(
  929. inEdge.ToNode[:], firstNode.PubKeyBytes[:],
  930. ) {
  931. return fmt.Errorf("wrong outgoing edge")
  932. }
  933. numNodeChans++
  934. return nil
  935. })
  936. require.NoError(t, err)
  937. require.Equal(t, numChannels, numNodeChans)
  938. }
  939. // TestGraphTraversalCacheable tests that the memory optimized node traversal is
  940. // working correctly.
  941. func TestGraphTraversalCacheable(t *testing.T) {
  942. t.Parallel()
  943. graph, err := MakeTestGraph(t)
  944. require.NoError(t, err, "unable to make test database")
  945. // We'd like to test some of the graph traversal capabilities within
  946. // the DB, so we'll create a series of fake nodes to insert into the
  947. // graph. And we'll create 5 channels between the first two nodes.
  948. const numNodes = 20
  949. const numChannels = 5
  950. chanIndex, _ := fillTestGraph(t, graph, numNodes, numChannels)
  951. // Create a map of all nodes with the iteration we know works (because
  952. // it is tested in another test).
  953. nodeMap := make(map[route.Vertex]struct{})
  954. err = graph.ForEachNode(func(tx kvdb.RTx, n *LightningNode) error {
  955. nodeMap[n.PubKeyBytes] = struct{}{}
  956. return nil
  957. })
  958. require.NoError(t, err)
  959. require.Len(t, nodeMap, numNodes)
  960. // Iterate through all the known channels within the graph DB by
  961. // iterating over each node, once again if the map is empty that
  962. // indicates that all edges have properly been reached.
  963. var nodes []GraphCacheNode
  964. err = graph.ForEachNodeCacheable(
  965. func(tx kvdb.RTx, node GraphCacheNode) error {
  966. delete(nodeMap, node.PubKey())
  967. nodes = append(nodes, node)
  968. return nil
  969. },
  970. )
  971. require.NoError(t, err)
  972. require.Len(t, nodeMap, 0)
  973. err = graph.db.View(func(tx kvdb.RTx) error {
  974. for _, node := range nodes {
  975. err := node.ForEachChannel(
  976. tx, func(tx kvdb.RTx,
  977. info *models.ChannelEdgeInfo,
  978. policy *models.ChannelEdgePolicy,
  979. policy2 *models.ChannelEdgePolicy) error { //nolint:lll
  980. delete(chanIndex, info.ChannelID)
  981. return nil
  982. },
  983. )
  984. if err != nil {
  985. return err
  986. }
  987. }
  988. return nil
  989. }, func() {})
  990. require.NoError(t, err)
  991. require.Len(t, chanIndex, 0)
  992. }
  993. func TestGraphCacheTraversal(t *testing.T) {
  994. t.Parallel()
  995. graph, err := MakeTestGraph(t)
  996. require.NoError(t, err)
  997. // We'd like to test some of the graph traversal capabilities within
  998. // the DB, so we'll create a series of fake nodes to insert into the
  999. // graph. And we'll create 5 channels between each node pair.
  1000. const numNodes = 20
  1001. const numChannels = 5
  1002. chanIndex, nodeList := fillTestGraph(t, graph, numNodes, numChannels)
  1003. // Iterate through all the known channels within the graph DB, once
  1004. // again if the map is empty that indicates that all edges have
  1005. // properly been reached.
  1006. numNodeChans := 0
  1007. for _, node := range nodeList {
  1008. node := node
  1009. err = graph.graphCache.ForEachChannel(
  1010. node.PubKeyBytes, func(d *DirectedChannel) error {
  1011. delete(chanIndex, d.ChannelID)
  1012. if !d.OutPolicySet || d.InPolicy == nil {
  1013. return fmt.Errorf("channel policy not " +
  1014. "present")
  1015. }
  1016. // The incoming edge should also indicate that
  1017. // it's pointing to the origin node.
  1018. inPolicyNodeKey := d.InPolicy.ToNodePubKey()
  1019. if !bytes.Equal(
  1020. inPolicyNodeKey[:], node.PubKeyBytes[:],
  1021. ) {
  1022. return fmt.Errorf("wrong outgoing edge")
  1023. }
  1024. numNodeChans++
  1025. return nil
  1026. },
  1027. )
  1028. require.NoError(t, err)
  1029. }
  1030. require.Len(t, chanIndex, 0)
  1031. // We count the channels for both nodes, so there should be double the
  1032. // amount now. Except for the very last node, that doesn't have any
  1033. // channels to make the loop easier in fillTestGraph().
  1034. require.Equal(t, numChannels*2*(numNodes-1), numNodeChans)
  1035. }
  1036. func fillTestGraph(t require.TestingT, graph *ChannelGraph, numNodes,
  1037. numChannels int) (map[uint64]struct{}, []*LightningNode) {
  1038. nodes := make([]*LightningNode, numNodes)
  1039. nodeIndex := map[string]struct{}{}
  1040. for i := 0; i < numNodes; i++ {
  1041. node, err := createTestVertex(graph.db)
  1042. require.NoError(t, err)
  1043. nodes[i] = node
  1044. nodeIndex[node.Alias] = struct{}{}
  1045. }
  1046. // Add each of the nodes into the graph, they should be inserted
  1047. // without error.
  1048. for _, node := range nodes {
  1049. require.NoError(t, graph.AddLightningNode(node))
  1050. }
  1051. // Iterate over each node as returned by the graph, if all nodes are
  1052. // reached, then the map created above should be empty.
  1053. err := graph.ForEachNode(func(_ kvdb.RTx, node *LightningNode) error {
  1054. delete(nodeIndex, node.Alias)
  1055. return nil
  1056. })
  1057. require.NoError(t, err)
  1058. require.Len(t, nodeIndex, 0)
  1059. // Create a number of channels between each of the node pairs generated
  1060. // above. This will result in numChannels*(numNodes-1) channels.
  1061. chanIndex := map[uint64]struct{}{}
  1062. for n := 0; n < numNodes-1; n++ {
  1063. node1 := nodes[n]
  1064. node2 := nodes[n+1]
  1065. if bytes.Compare(node1.PubKeyBytes[:], node2.PubKeyBytes[:]) == -1 {
  1066. node1, node2 = node2, node1
  1067. }
  1068. for i := 0; i < numChannels; i++ {
  1069. txHash := sha256.Sum256([]byte{byte(i)})
  1070. chanID := uint64((n << 8) + i + 1)
  1071. op := wire.OutPoint{
  1072. Hash: txHash,
  1073. Index: 0,
  1074. }
  1075. edgeInfo := models.ChannelEdgeInfo{
  1076. ChannelID: chanID,
  1077. ChainHash: key,
  1078. AuthProof: &models.ChannelAuthProof{
  1079. NodeSig1Bytes: testSig.Serialize(),
  1080. NodeSig2Bytes: testSig.Serialize(),
  1081. BitcoinSig1Bytes: testSig.Serialize(),
  1082. BitcoinSig2Bytes: testSig.Serialize(),
  1083. },
  1084. ChannelPoint: op,
  1085. Capacity: 1000,
  1086. }
  1087. copy(edgeInfo.NodeKey1Bytes[:], node1.PubKeyBytes[:])
  1088. copy(edgeInfo.NodeKey2Bytes[:], node2.PubKeyBytes[:])
  1089. copy(edgeInfo.BitcoinKey1Bytes[:], node1.PubKeyBytes[:])
  1090. copy(edgeInfo.BitcoinKey2Bytes[:], node2.PubKeyBytes[:])
  1091. err := graph.AddChannelEdge(&edgeInfo)
  1092. require.NoError(t, err)
  1093. // Create and add an edge with random data that points
  1094. // from node1 -> node2.
  1095. edge := randEdgePolicy(chanID, graph.db)
  1096. edge.ChannelFlags = 0
  1097. edge.ToNode = node2.PubKeyBytes
  1098. edge.SigBytes = testSig.Serialize()
  1099. require.NoError(t, graph.UpdateEdgePolicy(edge))
  1100. // Create another random edge that points from
  1101. // node2 -> node1 this time.
  1102. edge = randEdgePolicy(chanID, graph.db)
  1103. edge.ChannelFlags = 1
  1104. edge.ToNode = node1.PubKeyBytes
  1105. edge.SigBytes = testSig.Serialize()
  1106. require.NoError(t, graph.UpdateEdgePolicy(edge))
  1107. chanIndex[chanID] = struct{}{}
  1108. }
  1109. }
  1110. return chanIndex, nodes
  1111. }
  1112. func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash,
  1113. blockHeight uint32) {
  1114. pruneHash, pruneHeight, err := graph.PruneTip()
  1115. if err != nil {
  1116. _, _, line, _ := runtime.Caller(1)
  1117. t.Fatalf("line %v: unable to fetch prune tip: %v", line, err)
  1118. }
  1119. if !bytes.Equal(blockHash[:], pruneHash[:]) {
  1120. _, _, line, _ := runtime.Caller(1)
  1121. t.Fatalf("line: %v, prune tips don't match, expected %x got %x",
  1122. line, blockHash, pruneHash)
  1123. }
  1124. if pruneHeight != blockHeight {
  1125. _, _, line, _ := runtime.Caller(1)
  1126. t.Fatalf("line %v: prune heights don't match, expected %v "+
  1127. "got %v", line, blockHeight, pruneHeight)
  1128. }
  1129. }
  1130. func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
  1131. numChans := 0
  1132. if err := graph.ForEachChannel(func(*models.ChannelEdgeInfo,
  1133. *models.ChannelEdgePolicy,
  1134. *models.ChannelEdgePolicy) error {
  1135. numChans++
  1136. return nil
  1137. }); err != nil {
  1138. _, _, line, _ := runtime.Caller(1)
  1139. t.Fatalf("line %v: unable to scan channels: %v", line, err)
  1140. }
  1141. if numChans != n {
  1142. _, _, line, _ := runtime.Caller(1)
  1143. t.Fatalf("line %v: expected %v chans instead have %v", line,
  1144. n, numChans)
  1145. }
  1146. }
  1147. func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) {
  1148. numNodes := 0
  1149. err := graph.ForEachNode(func(_ kvdb.RTx, _ *LightningNode) error {
  1150. numNodes++
  1151. return nil
  1152. })
  1153. if err != nil {
  1154. _, _, line, _ := runtime.Caller(1)
  1155. t.Fatalf("line %v: unable to scan nodes: %v", line, err)
  1156. }
  1157. if numNodes != n {
  1158. _, _, line, _ := runtime.Caller(1)
  1159. t.Fatalf("line %v: expected %v nodes, got %v", line, n, numNodes)
  1160. }
  1161. }
  1162. func assertChanViewEqual(t *testing.T, a []EdgePoint, b []EdgePoint) {
  1163. if len(a) != len(b) {
  1164. _, _, line, _ := runtime.Caller(1)
  1165. t.Fatalf("line %v: chan views don't match", line)
  1166. }
  1167. chanViewSet := make(map[wire.OutPoint]struct{})
  1168. for _, op := range a {
  1169. chanViewSet[op.OutPoint] = struct{}{}
  1170. }
  1171. for _, op := range b {
  1172. if _, ok := chanViewSet[op.OutPoint]; !ok {
  1173. _, _, line, _ := runtime.Caller(1)
  1174. t.Fatalf("line %v: chanPoint(%v) not found in first "+
  1175. "view", line, op)
  1176. }
  1177. }
  1178. }
  1179. func assertChanViewEqualChanPoints(t *testing.T, a []EdgePoint, b []*wire.OutPoint) {
  1180. if len(a) != len(b) {
  1181. _, _, line, _ := runtime.Caller(1)
  1182. t.Fatalf("line %v: chan views don't match", line)
  1183. }
  1184. chanViewSet := make(map[wire.OutPoint]struct{})
  1185. for _, op := range a {
  1186. chanViewSet[op.OutPoint] = struct{}{}
  1187. }
  1188. for _, op := range b {
  1189. if _, ok := chanViewSet[*op]; !ok {
  1190. _, _, line, _ := runtime.Caller(1)
  1191. t.Fatalf("line %v: chanPoint(%v) not found in first "+
  1192. "view", line, op)
  1193. }
  1194. }
  1195. }
  1196. func TestGraphPruning(t *testing.T) {
  1197. t.Parallel()
  1198. graph, err := MakeTestGraph(t)
  1199. require.NoError(t, err, "unable to make test database")
  1200. sourceNode, err := createTestVertex(graph.db)
  1201. require.NoError(t, err, "unable to create source node")
  1202. if err := graph.SetSourceNode(sourceNode); err != nil {
  1203. t.Fatalf("unable to set source node: %v", err)
  1204. }
  1205. // As initial set up for the test, we'll create a graph with 5 vertexes
  1206. // and enough edges to create a fully connected graph. The graph will
  1207. // be rather simple, representing a straight line.
  1208. const numNodes = 5
  1209. graphNodes := make([]*LightningNode, numNodes)
  1210. for i := 0; i < numNodes; i++ {
  1211. node, err := createTestVertex(graph.db)
  1212. if err != nil {
  1213. t.Fatalf("unable to create node: %v", err)
  1214. }
  1215. if err := graph.AddLightningNode(node); err != nil {
  1216. t.Fatalf("unable to add node: %v", err)
  1217. }
  1218. graphNodes[i] = node
  1219. }
  1220. // With the vertexes created, we'll next create a series of channels
  1221. // between them.
  1222. channelPoints := make([]*wire.OutPoint, 0, numNodes-1)
  1223. edgePoints := make([]EdgePoint, 0, numNodes-1)
  1224. for i := 0; i < numNodes-1; i++ {
  1225. txHash := sha256.Sum256([]byte{byte(i)})
  1226. chanID := uint64(i + 1)
  1227. op := wire.OutPoint{
  1228. Hash: txHash,
  1229. Index: 0,
  1230. }
  1231. channelPoints = append(channelPoints, &op)
  1232. edgeInfo := models.ChannelEdgeInfo{
  1233. ChannelID: chanID,
  1234. ChainHash: key,
  1235. AuthProof: &models.ChannelAuthProof{
  1236. NodeSig1Bytes: testSig.Serialize(),
  1237. NodeSig2Bytes: testSig.Serialize(),
  1238. BitcoinSig1Bytes: testSig.Serialize(),
  1239. BitcoinSig2Bytes: testSig.Serialize(),
  1240. },
  1241. ChannelPoint: op,
  1242. Capacity: 1000,
  1243. }
  1244. copy(edgeInfo.NodeKey1Bytes[:], graphNodes[i].PubKeyBytes[:])
  1245. copy(edgeInfo.NodeKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:])
  1246. copy(edgeInfo.BitcoinKey1Bytes[:], graphNodes[i].PubKeyBytes[:])
  1247. copy(edgeInfo.BitcoinKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:])
  1248. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  1249. t.Fatalf("unable to add node: %v", err)
  1250. }
  1251. pkScript, err := genMultiSigP2WSH(
  1252. edgeInfo.BitcoinKey1Bytes[:], edgeInfo.BitcoinKey2Bytes[:],
  1253. )
  1254. if err != nil {
  1255. t.Fatalf("unable to gen multi-sig p2wsh: %v", err)
  1256. }
  1257. edgePoints = append(edgePoints, EdgePoint{
  1258. FundingPkScript: pkScript,
  1259. OutPoint: op,
  1260. })
  1261. // Create and add an edge with random data that points from
  1262. // node_i -> node_i+1
  1263. edge := randEdgePolicy(chanID, graph.db)
  1264. edge.ChannelFlags = 0
  1265. edge.ToNode = graphNodes[i].PubKeyBytes
  1266. edge.SigBytes = testSig.Serialize()
  1267. if err := graph.UpdateEdgePolicy(edge); err != nil {
  1268. t.Fatalf("unable to update edge: %v", err)
  1269. }
  1270. // Create another random edge that points from node_i+1 ->
  1271. // node_i this time.
  1272. edge = randEdgePolicy(chanID, graph.db)
  1273. edge.ChannelFlags = 1
  1274. edge.ToNode = graphNodes[i].PubKeyBytes
  1275. edge.SigBytes = testSig.Serialize()
  1276. if err := graph.UpdateEdgePolicy(edge); err != nil {
  1277. t.Fatalf("unable to update edge: %v", err)
  1278. }
  1279. }
  1280. // With all the channel points added, we'll consult the graph to ensure
  1281. // it has the same channel view as the one we just constructed.
  1282. channelView, err := graph.ChannelView()
  1283. require.NoError(t, err, "unable to get graph channel view")
  1284. assertChanViewEqual(t, channelView, edgePoints)
  1285. // Now with our test graph created, we can test the pruning
  1286. // capabilities of the channel graph.
  1287. // First we create a mock block that ends up closing the first two
  1288. // channels.
  1289. var blockHash chainhash.Hash
  1290. copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
  1291. blockHeight := uint32(1)
  1292. block := channelPoints[:2]
  1293. prunedChans, err := graph.PruneGraph(block, &blockHash, blockHeight)
  1294. require.NoError(t, err, "unable to prune graph")
  1295. if len(prunedChans) != 2 {
  1296. t.Fatalf("incorrect number of channels pruned: "+
  1297. "expected %v, got %v", 2, prunedChans)
  1298. }
  1299. // Now ensure that the prune tip has been updated.
  1300. assertPruneTip(t, graph, &blockHash, blockHeight)
  1301. // Count up the number of channels known within the graph, only 2
  1302. // should be remaining.
  1303. assertNumChans(t, graph, 2)
  1304. // Those channels should also be missing from the channel view.
  1305. channelView, err = graph.ChannelView()
  1306. require.NoError(t, err, "unable to get graph channel view")
  1307. assertChanViewEqualChanPoints(t, channelView, channelPoints[2:])
  1308. // Next we'll create a block that doesn't close any channels within the
  1309. // graph to test the negative error case.
  1310. fakeHash := sha256.Sum256([]byte("test prune"))
  1311. nonChannel := &wire.OutPoint{
  1312. Hash: fakeHash,
  1313. Index: 9,
  1314. }
  1315. blockHash = sha256.Sum256(blockHash[:])
  1316. blockHeight = 2
  1317. prunedChans, err = graph.PruneGraph(
  1318. []*wire.OutPoint{nonChannel}, &blockHash, blockHeight,
  1319. )
  1320. require.NoError(t, err, "unable to prune graph")
  1321. // No channels should have been detected as pruned.
  1322. if len(prunedChans) != 0 {
  1323. t.Fatalf("channels were pruned but shouldn't have been")
  1324. }
  1325. // Once again, the prune tip should have been updated. We should still
  1326. // see both channels and their participants, along with the source node.
  1327. assertPruneTip(t, graph, &blockHash, blockHeight)
  1328. assertNumChans(t, graph, 2)
  1329. assertNumNodes(t, graph, 4)
  1330. // Finally, create a block that prunes the remainder of the channels
  1331. // from the graph.
  1332. blockHash = sha256.Sum256(blockHash[:])
  1333. blockHeight = 3
  1334. prunedChans, err = graph.PruneGraph(
  1335. channelPoints[2:], &blockHash, blockHeight,
  1336. )
  1337. require.NoError(t, err, "unable to prune graph")
  1338. // The remainder of the channels should have been pruned from the
  1339. // graph.
  1340. if len(prunedChans) != 2 {
  1341. t.Fatalf("incorrect number of channels pruned: "+
  1342. "expected %v, got %v", 2, len(prunedChans))
  1343. }
  1344. // The prune tip should be updated, no channels should be found, and
  1345. // only the source node should remain within the current graph.
  1346. assertPruneTip(t, graph, &blockHash, blockHeight)
  1347. assertNumChans(t, graph, 0)
  1348. assertNumNodes(t, graph, 1)
  1349. // Finally, the channel view at this point in the graph should now be
  1350. // completely empty. Those channels should also be missing from the
  1351. // channel view.
  1352. channelView, err = graph.ChannelView()
  1353. require.NoError(t, err, "unable to get graph channel view")
  1354. if len(channelView) != 0 {
  1355. t.Fatalf("channel view should be empty, instead have: %v",
  1356. channelView)
  1357. }
  1358. }
  1359. // TestHighestChanID tests that we're able to properly retrieve the highest
  1360. // known channel ID in the database.
  1361. func TestHighestChanID(t *testing.T) {
  1362. t.Parallel()
  1363. graph, err := MakeTestGraph(t)
  1364. require.NoError(t, err, "unable to make test database")
  1365. // If we don't yet have any channels in the database, then we should
  1366. // get a channel ID of zero if we ask for the highest channel ID.
  1367. bestID, err := graph.HighestChanID()
  1368. require.NoError(t, err, "unable to get highest ID")
  1369. if bestID != 0 {
  1370. t.Fatalf("best ID w/ no chan should be zero, is instead: %v",
  1371. bestID)
  1372. }
  1373. // Next, we'll insert two channels into the database, with each channel
  1374. // connecting the same two nodes.
  1375. node1, err := createTestVertex(graph.db)
  1376. require.NoError(t, err, "unable to create test node")
  1377. node2, err := createTestVertex(graph.db)
  1378. require.NoError(t, err, "unable to create test node")
  1379. // The first channel with be at height 10, while the other will be at
  1380. // height 100.
  1381. edge1, _ := createEdge(10, 0, 0, 0, node1, node2)
  1382. edge2, chanID2 := createEdge(100, 0, 0, 0, node1, node2)
  1383. if err := graph.AddChannelEdge(&edge1); err != nil {
  1384. t.Fatalf("unable to create channel edge: %v", err)
  1385. }
  1386. if err := graph.AddChannelEdge(&edge2); err != nil {
  1387. t.Fatalf("unable to create channel edge: %v", err)
  1388. }
  1389. // Now that the edges has been inserted, we'll query for the highest
  1390. // known channel ID in the database.
  1391. bestID, err = graph.HighestChanID()
  1392. require.NoError(t, err, "unable to get highest ID")
  1393. if bestID != chanID2.ToUint64() {
  1394. t.Fatalf("expected %v got %v for best chan ID: ",
  1395. chanID2.ToUint64(), bestID)
  1396. }
  1397. // If we add another edge, then the current best chan ID should be
  1398. // updated as well.
  1399. edge3, chanID3 := createEdge(1000, 0, 0, 0, node1, node2)
  1400. if err := graph.AddChannelEdge(&edge3); err != nil {
  1401. t.Fatalf("unable to create channel edge: %v", err)
  1402. }
  1403. bestID, err = graph.HighestChanID()
  1404. require.NoError(t, err, "unable to get highest ID")
  1405. if bestID != chanID3.ToUint64() {
  1406. t.Fatalf("expected %v got %v for best chan ID: ",
  1407. chanID3.ToUint64(), bestID)
  1408. }
  1409. }
  1410. // TestChanUpdatesInHorizon tests the we're able to properly retrieve all known
  1411. // channel updates within a specific time horizon. It also tests that upon
  1412. // insertion of a new edge, the edge update index is updated properly.
  1413. func TestChanUpdatesInHorizon(t *testing.T) {
  1414. t.Parallel()
  1415. graph, err := MakeTestGraph(t)
  1416. require.NoError(t, err, "unable to make test database")
  1417. // If we issue an arbitrary query before any channel updates are
  1418. // inserted in the database, we should get zero results.
  1419. chanUpdates, err := graph.ChanUpdatesInHorizon(
  1420. time.Unix(999, 0), time.Unix(9999, 0),
  1421. )
  1422. require.NoError(t, err, "unable to updates for updates")
  1423. if len(chanUpdates) != 0 {
  1424. t.Fatalf("expected 0 chan updates, instead got %v",
  1425. len(chanUpdates))
  1426. }
  1427. // We'll start by creating two nodes which will seed our test graph.
  1428. node1, err := createTestVertex(graph.db)
  1429. require.NoError(t, err, "unable to create test node")
  1430. if err := graph.AddLightningNode(node1); err != nil {
  1431. t.Fatalf("unable to add node: %v", err)
  1432. }
  1433. node2, err := createTestVertex(graph.db)
  1434. require.NoError(t, err, "unable to create test node")
  1435. if err := graph.AddLightningNode(node2); err != nil {
  1436. t.Fatalf("unable to add node: %v", err)
  1437. }
  1438. // We'll now create 10 channels between the two nodes, with update
  1439. // times 10 seconds after each other.
  1440. const numChans = 10
  1441. startTime := time.Unix(1234, 0)
  1442. endTime := startTime
  1443. edges := make([]ChannelEdge, 0, numChans)
  1444. for i := 0; i < numChans; i++ {
  1445. channel, chanID := createEdge(
  1446. uint32(i*10), 0, 0, 0, node1, node2,
  1447. )
  1448. if err := graph.AddChannelEdge(&channel); err != nil {
  1449. t.Fatalf("unable to create channel edge: %v", err)
  1450. }
  1451. edge1UpdateTime := endTime
  1452. edge2UpdateTime := edge1UpdateTime.Add(time.Second)
  1453. endTime = endTime.Add(time.Second * 10)
  1454. edge1 := newEdgePolicy(
  1455. chanID.ToUint64(), graph.db, edge1UpdateTime.Unix(),
  1456. )
  1457. edge1.ChannelFlags = 0
  1458. edge1.ToNode = node2.PubKeyBytes
  1459. edge1.SigBytes = testSig.Serialize()
  1460. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  1461. t.Fatalf("unable to update edge: %v", err)
  1462. }
  1463. edge2 := newEdgePolicy(
  1464. chanID.ToUint64(), graph.db, edge2UpdateTime.Unix(),
  1465. )
  1466. edge2.ChannelFlags = 1
  1467. edge2.ToNode = node1.PubKeyBytes
  1468. edge2.SigBytes = testSig.Serialize()
  1469. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  1470. t.Fatalf("unable to update edge: %v", err)
  1471. }
  1472. edges = append(edges, ChannelEdge{
  1473. Info: &channel,
  1474. Policy1: edge1,
  1475. Policy2: edge2,
  1476. })
  1477. }
  1478. // With our channels loaded, we'll now start our series of queries.
  1479. queryCases := []struct {
  1480. start time.Time
  1481. end time.Time
  1482. resp []ChannelEdge
  1483. }{
  1484. // If we query for a time range that's strictly below our set
  1485. // of updates, then we'll get an empty result back.
  1486. {
  1487. start: time.Unix(100, 0),
  1488. end: time.Unix(200, 0),
  1489. },
  1490. // If we query for a time range that's well beyond our set of
  1491. // updates, we should get an empty set of results back.
  1492. {
  1493. start: time.Unix(99999, 0),
  1494. end: time.Unix(999999, 0),
  1495. },
  1496. // If we query for the start time, and 10 seconds directly
  1497. // after it, we should only get a single update, that first
  1498. // one.
  1499. {
  1500. start: time.Unix(1234, 0),
  1501. end: startTime.Add(time.Second * 10),
  1502. resp: []ChannelEdge{edges[0]},
  1503. },
  1504. // If we add 10 seconds past the first update, and then
  1505. // subtract 10 from the last update, then we should only get
  1506. // the 8 edges in the middle.
  1507. {
  1508. start: startTime.Add(time.Second * 10),
  1509. end: endTime.Add(-time.Second * 10),
  1510. resp: edges[1:9],
  1511. },
  1512. // If we use the start and end time as is, we should get the
  1513. // entire range.
  1514. {
  1515. start: startTime,
  1516. end: endTime,
  1517. resp: edges,
  1518. },
  1519. }
  1520. for _, queryCase := range queryCases {
  1521. resp, err := graph.ChanUpdatesInHorizon(
  1522. queryCase.start, queryCase.end,
  1523. )
  1524. if err != nil {
  1525. t.Fatalf("unable to query for updates: %v", err)
  1526. }
  1527. if len(resp) != len(queryCase.resp) {
  1528. t.Fatalf("expected %v chans, got %v chans",
  1529. len(queryCase.resp), len(resp))
  1530. }
  1531. for i := 0; i < len(resp); i++ {
  1532. chanExp := queryCase.resp[i]
  1533. chanRet := resp[i]
  1534. assertEdgeInfoEqual(t, chanExp.Info, chanRet.Info)
  1535. err := compareEdgePolicies(chanExp.Policy1, chanRet.Policy1)
  1536. if err != nil {
  1537. t.Fatal(err)
  1538. }
  1539. compareEdgePolicies(chanExp.Policy2, chanRet.Policy2)
  1540. if err != nil {
  1541. t.Fatal(err)
  1542. }
  1543. }
  1544. }
  1545. }
  1546. // TestNodeUpdatesInHorizon tests that we're able to properly scan and retrieve
  1547. // the most recent node updates within a particular time horizon.
  1548. func TestNodeUpdatesInHorizon(t *testing.T) {
  1549. t.Parallel()
  1550. graph, err := MakeTestGraph(t)
  1551. require.NoError(t, err, "unable to make test database")
  1552. startTime := time.Unix(1234, 0)
  1553. endTime := startTime
  1554. // If we issue an arbitrary query before we insert any nodes into the
  1555. // database, then we shouldn't get any results back.
  1556. nodeUpdates, err := graph.NodeUpdatesInHorizon(
  1557. time.Unix(999, 0), time.Unix(9999, 0),
  1558. )
  1559. require.NoError(t, err, "unable to query for node updates")
  1560. if len(nodeUpdates) != 0 {
  1561. t.Fatalf("expected 0 node updates, instead got %v",
  1562. len(nodeUpdates))
  1563. }
  1564. // We'll create 10 node announcements, each with an update timestamp 10
  1565. // seconds after the other.
  1566. const numNodes = 10
  1567. nodeAnns := make([]LightningNode, 0, numNodes)
  1568. for i := 0; i < numNodes; i++ {
  1569. nodeAnn, err := createTestVertex(graph.db)
  1570. if err != nil {
  1571. t.Fatalf("unable to create test vertex: %v", err)
  1572. }
  1573. // The node ann will use the current end time as its last
  1574. // update them, then we'll add 10 seconds in order to create
  1575. // the proper update time for the next node announcement.
  1576. updateTime := endTime
  1577. endTime = updateTime.Add(time.Second * 10)
  1578. nodeAnn.LastUpdate = updateTime
  1579. nodeAnns = append(nodeAnns, *nodeAnn)
  1580. if err := graph.AddLightningNode(nodeAnn); err != nil {
  1581. t.Fatalf("unable to add lightning node: %v", err)
  1582. }
  1583. }
  1584. queryCases := []struct {
  1585. start time.Time
  1586. end time.Time
  1587. resp []LightningNode
  1588. }{
  1589. // If we query for a time range that's strictly below our set
  1590. // of updates, then we'll get an empty result back.
  1591. {
  1592. start: time.Unix(100, 0),
  1593. end: time.Unix(200, 0),
  1594. },
  1595. // If we query for a time range that's well beyond our set of
  1596. // updates, we should get an empty set of results back.
  1597. {
  1598. start: time.Unix(99999, 0),
  1599. end: time.Unix(999999, 0),
  1600. },
  1601. // If we skip he first time epoch with out start time, then we
  1602. // should get back every now but the first.
  1603. {
  1604. start: startTime.Add(time.Second * 10),
  1605. end: endTime,
  1606. resp: nodeAnns[1:],
  1607. },
  1608. // If we query for the range as is, we should get all 10
  1609. // announcements back.
  1610. {
  1611. start: startTime,
  1612. end: endTime,
  1613. resp: nodeAnns,
  1614. },
  1615. // If we reduce the ending time by 10 seconds, then we should
  1616. // get all but the last node we inserted.
  1617. {
  1618. start: startTime,
  1619. end: endTime.Add(-time.Second * 10),
  1620. resp: nodeAnns[:9],
  1621. },
  1622. }
  1623. for _, queryCase := range queryCases {
  1624. resp, err := graph.NodeUpdatesInHorizon(queryCase.start, queryCase.end)
  1625. if err != nil {
  1626. t.Fatalf("unable to query for nodes: %v", err)
  1627. }
  1628. if len(resp) != len(queryCase.resp) {
  1629. t.Fatalf("expected %v nodes, got %v nodes",
  1630. len(queryCase.resp), len(resp))
  1631. }
  1632. for i := 0; i < len(resp); i++ {
  1633. err := compareNodes(&queryCase.resp[i], &resp[i])
  1634. if err != nil {
  1635. t.Fatal(err)
  1636. }
  1637. }
  1638. }
  1639. }
  1640. // TestFilterKnownChanIDs tests that we're able to properly perform the set
  1641. // differences of an incoming set of channel ID's, and those that we already
  1642. // know of on disk.
  1643. func TestFilterKnownChanIDs(t *testing.T) {
  1644. t.Parallel()
  1645. graph, err := MakeTestGraph(t)
  1646. require.NoError(t, err, "unable to make test database")
  1647. isZombieUpdate := func(updateTime1 time.Time,
  1648. updateTime2 time.Time) bool {
  1649. return true
  1650. }
  1651. var (
  1652. scid1 = lnwire.ShortChannelID{BlockHeight: 1}
  1653. scid2 = lnwire.ShortChannelID{BlockHeight: 2}
  1654. scid3 = lnwire.ShortChannelID{BlockHeight: 3}
  1655. )
  1656. // If we try to filter out a set of channel ID's before we even know of
  1657. // any channels, then we should get the entire set back.
  1658. preChanIDs := []ChannelUpdateInfo{
  1659. {ShortChannelID: scid1},
  1660. {ShortChannelID: scid2},
  1661. {ShortChannelID: scid3},
  1662. }
  1663. filteredIDs, err := graph.FilterKnownChanIDs(preChanIDs, isZombieUpdate)
  1664. require.NoError(t, err, "unable to filter chan IDs")
  1665. require.EqualValues(t, []uint64{
  1666. scid1.ToUint64(),
  1667. scid2.ToUint64(),
  1668. scid3.ToUint64(),
  1669. }, filteredIDs)
  1670. // We'll start by creating two nodes which will seed our test graph.
  1671. node1, err := createTestVertex(graph.db)
  1672. require.NoError(t, err, "unable to create test node")
  1673. if err := graph.AddLightningNode(node1); err != nil {
  1674. t.Fatalf("unable to add node: %v", err)
  1675. }
  1676. node2, err := createTestVertex(graph.db)
  1677. require.NoError(t, err, "unable to create test node")
  1678. if err := graph.AddLightningNode(node2); err != nil {
  1679. t.Fatalf("unable to add node: %v", err)
  1680. }
  1681. // Next, we'll add 5 channel ID's to the graph, each of them having a
  1682. // block height 10 blocks after the previous.
  1683. const numChans = 5
  1684. chanIDs := make([]ChannelUpdateInfo, 0, numChans)
  1685. for i := 0; i < numChans; i++ {
  1686. channel, chanID := createEdge(
  1687. uint32(i*10), 0, 0, 0, node1, node2,
  1688. )
  1689. if err := graph.AddChannelEdge(&channel); err != nil {
  1690. t.Fatalf("unable to create channel edge: %v", err)
  1691. }
  1692. chanIDs = append(chanIDs, ChannelUpdateInfo{
  1693. ShortChannelID: chanID,
  1694. })
  1695. }
  1696. const numZombies = 5
  1697. zombieIDs := make([]ChannelUpdateInfo, 0, numZombies)
  1698. for i := 0; i < numZombies; i++ {
  1699. channel, chanID := createEdge(
  1700. uint32(i*10+1), 0, 0, 0, node1, node2,
  1701. )
  1702. if err := graph.AddChannelEdge(&channel); err != nil {
  1703. t.Fatalf("unable to create channel edge: %v", err)
  1704. }
  1705. err := graph.DeleteChannelEdges(false, true, channel.ChannelID)
  1706. if err != nil {
  1707. t.Fatalf("unable to mark edge zombie: %v", err)
  1708. }
  1709. zombieIDs = append(
  1710. zombieIDs, ChannelUpdateInfo{ShortChannelID: chanID},
  1711. )
  1712. }
  1713. queryCases := []struct {
  1714. queryIDs []ChannelUpdateInfo
  1715. resp []ChannelUpdateInfo
  1716. }{
  1717. // If we attempt to filter out all chanIDs we know of, the
  1718. // response should be the empty set.
  1719. {
  1720. queryIDs: chanIDs,
  1721. },
  1722. // If we attempt to filter out all zombies that we know of, the
  1723. // response should be the empty set.
  1724. {
  1725. queryIDs: zombieIDs,
  1726. },
  1727. // If we query for a set of ID's that we didn't insert, we
  1728. // should get the same set back.
  1729. {
  1730. queryIDs: []ChannelUpdateInfo{
  1731. {ShortChannelID: lnwire.ShortChannelID{
  1732. BlockHeight: 99,
  1733. }},
  1734. {ShortChannelID: lnwire.ShortChannelID{
  1735. BlockHeight: 100,
  1736. }},
  1737. },
  1738. resp: []ChannelUpdateInfo{
  1739. {ShortChannelID: lnwire.ShortChannelID{
  1740. BlockHeight: 99,
  1741. }},
  1742. {ShortChannelID: lnwire.ShortChannelID{
  1743. BlockHeight: 100,
  1744. }},
  1745. },
  1746. },
  1747. // If we query for a super-set of our the chan ID's inserted,
  1748. // we should only get those new chanIDs back.
  1749. {
  1750. queryIDs: append(chanIDs, []ChannelUpdateInfo{
  1751. {
  1752. ShortChannelID: lnwire.ShortChannelID{
  1753. BlockHeight: 99,
  1754. },
  1755. },
  1756. {
  1757. ShortChannelID: lnwire.ShortChannelID{
  1758. BlockHeight: 101,
  1759. },
  1760. },
  1761. }...),
  1762. resp: []ChannelUpdateInfo{
  1763. {
  1764. ShortChannelID: lnwire.ShortChannelID{
  1765. BlockHeight: 99,
  1766. },
  1767. },
  1768. {
  1769. ShortChannelID: lnwire.ShortChannelID{
  1770. BlockHeight: 101,
  1771. },
  1772. },
  1773. },
  1774. },
  1775. }
  1776. for _, queryCase := range queryCases {
  1777. resp, err := graph.FilterKnownChanIDs(
  1778. queryCase.queryIDs, isZombieUpdate,
  1779. )
  1780. require.NoError(t, err)
  1781. expectedSCIDs := make([]uint64, len(queryCase.resp))
  1782. for i, info := range queryCase.resp {
  1783. expectedSCIDs[i] = info.ShortChannelID.ToUint64()
  1784. }
  1785. if len(expectedSCIDs) == 0 {
  1786. expectedSCIDs = nil
  1787. }
  1788. require.EqualValues(t, expectedSCIDs, resp)
  1789. }
  1790. }
  1791. // TestStressTestChannelGraphAPI is a stress test that concurrently calls some
  1792. // of the ChannelGraph methods in various orders in order to ensure that no
  1793. // deadlock can occur. This test currently focuses on stress testing all the
  1794. // methods that acquire the cache mutex along with the DB mutex.
  1795. func TestStressTestChannelGraphAPI(t *testing.T) {
  1796. t.Parallel()
  1797. graph, err := MakeTestGraph(t)
  1798. require.NoError(t, err)
  1799. node1, err := createTestVertex(graph.db)
  1800. require.NoError(t, err, "unable to create test node")
  1801. require.NoError(t, graph.AddLightningNode(node1))
  1802. node2, err := createTestVertex(graph.db)
  1803. require.NoError(t, err, "unable to create test node")
  1804. require.NoError(t, graph.AddLightningNode(node2))
  1805. err = graph.SetSourceNode(node1)
  1806. require.NoError(t, err)
  1807. type chanInfo struct {
  1808. info models.ChannelEdgeInfo
  1809. id lnwire.ShortChannelID
  1810. }
  1811. var (
  1812. chans []*chanInfo
  1813. mu sync.RWMutex
  1814. )
  1815. // newBlockHeight returns a random block height between 0 and 100.
  1816. newBlockHeight := func() uint32 {
  1817. return uint32(rand.Int31n(100))
  1818. }
  1819. // addNewChan is a will create and return a new random channel and will
  1820. // add it to the set of channels.
  1821. addNewChan := func() *chanInfo {
  1822. mu.Lock()
  1823. defer mu.Unlock()
  1824. channel, chanID := createEdge(
  1825. newBlockHeight(), rand.Uint32(), uint16(rand.Int()),
  1826. rand.Uint32(), node1, node2,
  1827. )
  1828. newChan := &chanInfo{
  1829. info: channel,
  1830. id: chanID,
  1831. }
  1832. chans = append(chans, newChan)
  1833. return newChan
  1834. }
  1835. // getRandChan picks a random channel from the set and returns it.
  1836. getRandChan := func() *chanInfo {
  1837. mu.RLock()
  1838. defer mu.RUnlock()
  1839. if len(chans) == 0 {
  1840. return nil
  1841. }
  1842. return chans[rand.Intn(len(chans))]
  1843. }
  1844. // getRandChanSet returns a random set of channels.
  1845. getRandChanSet := func() []*chanInfo {
  1846. mu.RLock()
  1847. defer mu.RUnlock()
  1848. if len(chans) == 0 {
  1849. return nil
  1850. }
  1851. start := rand.Intn(len(chans))
  1852. end := rand.Intn(len(chans))
  1853. if end < start {
  1854. start, end = end, start
  1855. }
  1856. var infoCopy []*chanInfo
  1857. for i := start; i < end; i++ {
  1858. infoCopy = append(infoCopy, &chanInfo{
  1859. info: chans[i].info,
  1860. id: chans[i].id,
  1861. })
  1862. }
  1863. return infoCopy
  1864. }
  1865. // delChan deletes the channel with the given ID from the set if it
  1866. // exists.
  1867. delChan := func(id lnwire.ShortChannelID) {
  1868. mu.Lock()
  1869. defer mu.Unlock()
  1870. index := -1
  1871. for i, c := range chans {
  1872. if c.id == id {
  1873. index = i
  1874. break
  1875. }
  1876. }
  1877. if index == -1 {
  1878. return
  1879. }
  1880. chans = append(chans[:index], chans[index+1:]...)
  1881. }
  1882. var blockHash chainhash.Hash
  1883. copy(blockHash[:], bytes.Repeat([]byte{2}, 32))
  1884. var methodsMu sync.Mutex
  1885. methods := []struct {
  1886. name string
  1887. fn func() error
  1888. }{
  1889. {
  1890. name: "MarkEdgeZombie",
  1891. fn: func() error {
  1892. channel := getRandChan()
  1893. if channel == nil {
  1894. return nil
  1895. }
  1896. return graph.MarkEdgeZombie(
  1897. channel.id.ToUint64(),
  1898. node1.PubKeyBytes,
  1899. node2.PubKeyBytes,
  1900. )
  1901. },
  1902. },
  1903. {
  1904. name: "FilterKnownChanIDs",
  1905. fn: func() error {
  1906. chanSet := getRandChanSet()
  1907. var chanIDs []ChannelUpdateInfo
  1908. for _, c := range chanSet {
  1909. chanIDs = append(
  1910. chanIDs,
  1911. ChannelUpdateInfo{
  1912. ShortChannelID: c.id,
  1913. },
  1914. )
  1915. }
  1916. _, err := graph.FilterKnownChanIDs(
  1917. chanIDs,
  1918. func(t time.Time, t2 time.Time) bool {
  1919. return rand.Intn(2) == 0
  1920. },
  1921. )
  1922. return err
  1923. },
  1924. },
  1925. {
  1926. name: "HasChannelEdge",
  1927. fn: func() error {
  1928. channel := getRandChan()
  1929. if channel == nil {
  1930. return nil
  1931. }
  1932. _, _, _, _, err := graph.HasChannelEdge(
  1933. channel.id.ToUint64(),
  1934. )
  1935. return err
  1936. },
  1937. },
  1938. {
  1939. name: "PruneGraph",
  1940. fn: func() error {
  1941. chanSet := getRandChanSet()
  1942. var spentOutpoints []*wire.OutPoint
  1943. for _, c := range chanSet {
  1944. spentOutpoints = append(
  1945. spentOutpoints,
  1946. &c.info.ChannelPoint,
  1947. )
  1948. }
  1949. _, err := graph.PruneGraph(
  1950. spentOutpoints, &blockHash, 100,
  1951. )
  1952. return err
  1953. },
  1954. },
  1955. {
  1956. name: "ChanUpdateInHorizon",
  1957. fn: func() error {
  1958. _, err := graph.ChanUpdatesInHorizon(
  1959. time.Now().Add(-time.Hour), time.Now(),
  1960. )
  1961. return err
  1962. },
  1963. },
  1964. {
  1965. name: "DeleteChannelEdges",
  1966. fn: func() error {
  1967. var (
  1968. strictPruning = rand.Intn(2) == 0
  1969. markZombie = rand.Intn(2) == 0
  1970. channels = getRandChanSet()
  1971. chanIDs []uint64
  1972. )
  1973. for _, c := range channels {
  1974. chanIDs = append(
  1975. chanIDs, c.id.ToUint64(),
  1976. )
  1977. delChan(c.id)
  1978. }
  1979. err := graph.DeleteChannelEdges(
  1980. strictPruning, markZombie, chanIDs...,
  1981. )
  1982. if err != nil &&
  1983. !errors.Is(err, ErrEdgeNotFound) {
  1984. return err
  1985. }
  1986. return nil
  1987. },
  1988. },
  1989. {
  1990. name: "DisconnectBlockAtHeight",
  1991. fn: func() error {
  1992. _, err := graph.DisconnectBlockAtHeight(
  1993. newBlockHeight(),
  1994. )
  1995. return err
  1996. },
  1997. },
  1998. {
  1999. name: "AddChannelEdge",
  2000. fn: func() error {
  2001. channel := addNewChan()
  2002. return graph.AddChannelEdge(&channel.info)
  2003. },
  2004. },
  2005. }
  2006. const (
  2007. // concurrencyLevel is the number of concurrent goroutines that
  2008. // will be run simultaneously.
  2009. concurrencyLevel = 10
  2010. // executionCount is the number of methods that will be called
  2011. // per goroutine.
  2012. executionCount = 100
  2013. )
  2014. for i := 0; i < concurrencyLevel; i++ {
  2015. i := i
  2016. t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
  2017. t.Parallel()
  2018. for j := 0; j < executionCount; j++ {
  2019. // Randomly select a method to execute.
  2020. methodIndex := rand.Intn(len(methods))
  2021. methodsMu.Lock()
  2022. fn := methods[methodIndex].fn
  2023. name := methods[methodIndex].name
  2024. methodsMu.Unlock()
  2025. err := fn()
  2026. require.NoErrorf(t, err, fmt.Sprintf(name))
  2027. }
  2028. })
  2029. }
  2030. }
  2031. // TestFilterChannelRange tests that we're able to properly retrieve the full
  2032. // set of short channel ID's for a given block range.
  2033. func TestFilterChannelRange(t *testing.T) {
  2034. t.Parallel()
  2035. graph, err := MakeTestGraph(t)
  2036. require.NoError(t, err)
  2037. // We'll first populate our graph with two nodes. All channels created
  2038. // below will be made between these two nodes.
  2039. node1, err := createTestVertex(graph.db)
  2040. require.NoError(t, err)
  2041. require.NoError(t, graph.AddLightningNode(node1))
  2042. node2, err := createTestVertex(graph.db)
  2043. require.NoError(t, err)
  2044. require.NoError(t, graph.AddLightningNode(node2))
  2045. // If we try to filter a channel range before we have any channels
  2046. // inserted, we should get an empty slice of results.
  2047. resp, err := graph.FilterChannelRange(10, 100, false)
  2048. require.NoError(t, err)
  2049. require.Empty(t, resp)
  2050. // To start, we'll create a set of channels, two mined in a block 10
  2051. // blocks after the prior one.
  2052. startHeight := uint32(100)
  2053. endHeight := startHeight
  2054. const numChans = 10
  2055. var (
  2056. channelRanges = make(
  2057. []BlockChannelRange, 0, numChans/2,
  2058. )
  2059. channelRangesWithTimestamps = make(
  2060. []BlockChannelRange, 0, numChans/2,
  2061. )
  2062. )
  2063. updateTimeSeed := int64(1)
  2064. maybeAddPolicy := func(chanID uint64, node *LightningNode,
  2065. node2 bool) time.Time {
  2066. var chanFlags lnwire.ChanUpdateChanFlags
  2067. if node2 {
  2068. chanFlags = lnwire.ChanUpdateDirection
  2069. }
  2070. var updateTime time.Time
  2071. if rand.Int31n(2) == 0 {
  2072. updateTime = time.Unix(updateTimeSeed, 0)
  2073. err = graph.UpdateEdgePolicy(&models.ChannelEdgePolicy{
  2074. ToNode: node.PubKeyBytes,
  2075. ChannelFlags: chanFlags,
  2076. ChannelID: chanID,
  2077. LastUpdate: updateTime,
  2078. })
  2079. require.NoError(t, err)
  2080. }
  2081. updateTimeSeed++
  2082. return updateTime
  2083. }
  2084. for i := 0; i < numChans/2; i++ {
  2085. chanHeight := endHeight
  2086. channel1, chanID1 := createEdge(
  2087. chanHeight, uint32(i+1), 0, 0, node1, node2,
  2088. )
  2089. require.NoError(t, graph.AddChannelEdge(&channel1))
  2090. channel2, chanID2 := createEdge(
  2091. chanHeight, uint32(i+2), 0, 0, node1, node2,
  2092. )
  2093. require.NoError(t, graph.AddChannelEdge(&channel2))
  2094. channelRanges = append(channelRanges, BlockChannelRange{
  2095. Height: chanHeight,
  2096. Channels: []ChannelUpdateInfo{
  2097. {ShortChannelID: chanID1},
  2098. {ShortChannelID: chanID2},
  2099. },
  2100. })
  2101. var (
  2102. time1 = maybeAddPolicy(channel1.ChannelID, node1, false)
  2103. time2 = maybeAddPolicy(channel1.ChannelID, node2, true)
  2104. time3 = maybeAddPolicy(channel2.ChannelID, node1, false)
  2105. time4 = maybeAddPolicy(channel2.ChannelID, node2, true)
  2106. )
  2107. channelRangesWithTimestamps = append(
  2108. channelRangesWithTimestamps, BlockChannelRange{
  2109. Height: chanHeight,
  2110. Channels: []ChannelUpdateInfo{
  2111. {
  2112. ShortChannelID: chanID1,
  2113. Node1UpdateTimestamp: time1,
  2114. Node2UpdateTimestamp: time2,
  2115. },
  2116. {
  2117. ShortChannelID: chanID2,
  2118. Node1UpdateTimestamp: time3,
  2119. Node2UpdateTimestamp: time4,
  2120. },
  2121. },
  2122. },
  2123. )
  2124. endHeight += 10
  2125. }
  2126. // With our channels inserted, we'll construct a series of queries that
  2127. // we'll execute below in order to exercise the features of the
  2128. // FilterKnownChanIDs method.
  2129. tests := []struct {
  2130. name string
  2131. startHeight uint32
  2132. endHeight uint32
  2133. resp []BlockChannelRange
  2134. expStartIndex int
  2135. expEndIndex int
  2136. }{
  2137. // If we query for the entire range, then we should get the same
  2138. // set of short channel IDs back.
  2139. {
  2140. name: "entire range",
  2141. startHeight: startHeight,
  2142. endHeight: endHeight,
  2143. resp: channelRanges,
  2144. expStartIndex: 0,
  2145. expEndIndex: len(channelRanges),
  2146. },
  2147. // If we query for a range of channels right before our range,
  2148. // we shouldn't get any results back.
  2149. {
  2150. name: "range before",
  2151. startHeight: 0,
  2152. endHeight: 10,
  2153. },
  2154. // If we only query for the last height (range wise), we should
  2155. // only get that last channel.
  2156. {
  2157. name: "last height",
  2158. startHeight: endHeight - 10,
  2159. endHeight: endHeight - 10,
  2160. resp: channelRanges[4:],
  2161. expStartIndex: 4,
  2162. expEndIndex: len(channelRanges),
  2163. },
  2164. // If we query for just the first height, we should only get a
  2165. // single channel back (the first one).
  2166. {
  2167. name: "first height",
  2168. startHeight: startHeight,
  2169. endHeight: startHeight,
  2170. resp: channelRanges[:1],
  2171. expStartIndex: 0,
  2172. expEndIndex: 1,
  2173. },
  2174. {
  2175. name: "subset",
  2176. startHeight: startHeight + 10,
  2177. endHeight: endHeight - 10,
  2178. resp: channelRanges[1:5],
  2179. expStartIndex: 1,
  2180. expEndIndex: 5,
  2181. },
  2182. }
  2183. for _, test := range tests {
  2184. test := test
  2185. t.Run(test.name, func(t *testing.T) {
  2186. t.Parallel()
  2187. // First, do the query without requesting timestamps.
  2188. resp, err := graph.FilterChannelRange(
  2189. test.startHeight, test.endHeight, false,
  2190. )
  2191. require.NoError(t, err)
  2192. expRes := channelRanges[test.expStartIndex:test.expEndIndex] //nolint:lll
  2193. if len(expRes) == 0 {
  2194. require.Nil(t, resp)
  2195. } else {
  2196. require.Equal(t, expRes, resp)
  2197. }
  2198. // Now, query the timestamps as well.
  2199. resp, err = graph.FilterChannelRange(
  2200. test.startHeight, test.endHeight, true,
  2201. )
  2202. require.NoError(t, err)
  2203. expRes = channelRangesWithTimestamps[test.expStartIndex:test.expEndIndex] //nolint:lll
  2204. if len(expRes) == 0 {
  2205. require.Nil(t, resp)
  2206. } else {
  2207. require.Equal(t, expRes, resp)
  2208. }
  2209. })
  2210. }
  2211. }
  2212. // TestFetchChanInfos tests that we're able to properly retrieve the full set
  2213. // of ChannelEdge structs for a given set of short channel ID's.
  2214. func TestFetchChanInfos(t *testing.T) {
  2215. t.Parallel()
  2216. graph, err := MakeTestGraph(t)
  2217. require.NoError(t, err, "unable to make test database")
  2218. // We'll first populate our graph with two nodes. All channels created
  2219. // below will be made between these two nodes.
  2220. node1, err := createTestVertex(graph.db)
  2221. require.NoError(t, err, "unable to create test node")
  2222. if err := graph.AddLightningNode(node1); err != nil {
  2223. t.Fatalf("unable to add node: %v", err)
  2224. }
  2225. node2, err := createTestVertex(graph.db)
  2226. require.NoError(t, err, "unable to create test node")
  2227. if err := graph.AddLightningNode(node2); err != nil {
  2228. t.Fatalf("unable to add node: %v", err)
  2229. }
  2230. // We'll make 5 test channels, ensuring we keep track of which channel
  2231. // ID corresponds to a particular ChannelEdge.
  2232. const numChans = 5
  2233. startTime := time.Unix(1234, 0)
  2234. endTime := startTime
  2235. edges := make([]ChannelEdge, 0, numChans)
  2236. edgeQuery := make([]uint64, 0, numChans)
  2237. for i := 0; i < numChans; i++ {
  2238. channel, chanID := createEdge(
  2239. uint32(i*10), 0, 0, 0, node1, node2,
  2240. )
  2241. if err := graph.AddChannelEdge(&channel); err != nil {
  2242. t.Fatalf("unable to create channel edge: %v", err)
  2243. }
  2244. updateTime := endTime
  2245. endTime = updateTime.Add(time.Second * 10)
  2246. edge1 := newEdgePolicy(
  2247. chanID.ToUint64(), graph.db, updateTime.Unix(),
  2248. )
  2249. edge1.ChannelFlags = 0
  2250. edge1.ToNode = node2.PubKeyBytes
  2251. edge1.SigBytes = testSig.Serialize()
  2252. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2253. t.Fatalf("unable to update edge: %v", err)
  2254. }
  2255. edge2 := newEdgePolicy(
  2256. chanID.ToUint64(), graph.db, updateTime.Unix(),
  2257. )
  2258. edge2.ChannelFlags = 1
  2259. edge2.ToNode = node1.PubKeyBytes
  2260. edge2.SigBytes = testSig.Serialize()
  2261. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  2262. t.Fatalf("unable to update edge: %v", err)
  2263. }
  2264. edges = append(edges, ChannelEdge{
  2265. Info: &channel,
  2266. Policy1: edge1,
  2267. Policy2: edge2,
  2268. })
  2269. edgeQuery = append(edgeQuery, chanID.ToUint64())
  2270. }
  2271. // Add an additional edge that does not exist. The query should skip
  2272. // this channel and return only infos for the edges that exist.
  2273. edgeQuery = append(edgeQuery, 500)
  2274. // Add an another edge to the query that has been marked as a zombie
  2275. // edge. The query should also skip this channel.
  2276. zombieChan, zombieChanID := createEdge(
  2277. 666, 0, 0, 0, node1, node2,
  2278. )
  2279. if err := graph.AddChannelEdge(&zombieChan); err != nil {
  2280. t.Fatalf("unable to create channel edge: %v", err)
  2281. }
  2282. err = graph.DeleteChannelEdges(false, true, zombieChan.ChannelID)
  2283. require.NoError(t, err, "unable to delete and mark edge zombie")
  2284. edgeQuery = append(edgeQuery, zombieChanID.ToUint64())
  2285. // We'll now attempt to query for the range of channel ID's we just
  2286. // inserted into the database. We should get the exact same set of
  2287. // edges back.
  2288. resp, err := graph.FetchChanInfos(nil, edgeQuery)
  2289. require.NoError(t, err, "unable to fetch chan edges")
  2290. if len(resp) != len(edges) {
  2291. t.Fatalf("expected %v edges, instead got %v", len(edges),
  2292. len(resp))
  2293. }
  2294. for i := 0; i < len(resp); i++ {
  2295. err := compareEdgePolicies(resp[i].Policy1, edges[i].Policy1)
  2296. if err != nil {
  2297. t.Fatalf("edge doesn't match: %v", err)
  2298. }
  2299. err = compareEdgePolicies(resp[i].Policy2, edges[i].Policy2)
  2300. if err != nil {
  2301. t.Fatalf("edge doesn't match: %v", err)
  2302. }
  2303. assertEdgeInfoEqual(t, resp[i].Info, edges[i].Info)
  2304. }
  2305. }
  2306. // TestIncompleteChannelPolicies tests that a channel that only has a policy
  2307. // specified on one end is properly returned in ForEachChannel calls from
  2308. // both sides.
  2309. func TestIncompleteChannelPolicies(t *testing.T) {
  2310. t.Parallel()
  2311. graph, err := MakeTestGraph(t)
  2312. require.NoError(t, err, "unable to make test database")
  2313. // Create two nodes.
  2314. node1, err := createTestVertex(graph.db)
  2315. require.NoError(t, err, "unable to create test node")
  2316. if err := graph.AddLightningNode(node1); err != nil {
  2317. t.Fatalf("unable to add node: %v", err)
  2318. }
  2319. node2, err := createTestVertex(graph.db)
  2320. require.NoError(t, err, "unable to create test node")
  2321. if err := graph.AddLightningNode(node2); err != nil {
  2322. t.Fatalf("unable to add node: %v", err)
  2323. }
  2324. channel, chanID := createEdge(
  2325. uint32(0), 0, 0, 0, node1, node2,
  2326. )
  2327. if err := graph.AddChannelEdge(&channel); err != nil {
  2328. t.Fatalf("unable to create channel edge: %v", err)
  2329. }
  2330. // Ensure that channel is reported with unknown policies.
  2331. checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) {
  2332. calls := 0
  2333. err := graph.ForEachNodeChannel(nil, node.PubKeyBytes,
  2334. func(_ kvdb.RTx, _ *models.ChannelEdgeInfo, outEdge,
  2335. inEdge *models.ChannelEdgePolicy) error {
  2336. if !expectedOut && outEdge != nil {
  2337. t.Fatalf("Expected no outgoing policy")
  2338. }
  2339. if expectedOut && outEdge == nil {
  2340. t.Fatalf("Expected an outgoing policy")
  2341. }
  2342. if !expectedIn && inEdge != nil {
  2343. t.Fatalf("Expected no incoming policy")
  2344. }
  2345. if expectedIn && inEdge == nil {
  2346. t.Fatalf("Expected an incoming policy")
  2347. }
  2348. calls++
  2349. return nil
  2350. })
  2351. if err != nil {
  2352. t.Fatalf("unable to scan channels: %v", err)
  2353. }
  2354. if calls != 1 {
  2355. t.Fatalf("Expected only one callback call")
  2356. }
  2357. }
  2358. checkPolicies(node2, false, false)
  2359. // Only create an edge policy for node1 and leave the policy for node2
  2360. // unknown.
  2361. updateTime := time.Unix(1234, 0)
  2362. edgePolicy := newEdgePolicy(
  2363. chanID.ToUint64(), graph.db, updateTime.Unix(),
  2364. )
  2365. edgePolicy.ChannelFlags = 0
  2366. edgePolicy.ToNode = node2.PubKeyBytes
  2367. edgePolicy.SigBytes = testSig.Serialize()
  2368. if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
  2369. t.Fatalf("unable to update edge: %v", err)
  2370. }
  2371. checkPolicies(node1, false, true)
  2372. checkPolicies(node2, true, false)
  2373. // Create second policy and assert that both policies are reported
  2374. // as present.
  2375. edgePolicy = newEdgePolicy(
  2376. chanID.ToUint64(), graph.db, updateTime.Unix(),
  2377. )
  2378. edgePolicy.ChannelFlags = 1
  2379. edgePolicy.ToNode = node1.PubKeyBytes
  2380. edgePolicy.SigBytes = testSig.Serialize()
  2381. if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
  2382. t.Fatalf("unable to update edge: %v", err)
  2383. }
  2384. checkPolicies(node1, true, true)
  2385. checkPolicies(node2, true, true)
  2386. }
  2387. // TestChannelEdgePruningUpdateIndexDeletion tests that once edges are deleted
  2388. // from the graph, then their entries within the update index are also cleaned
  2389. // up.
  2390. func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) {
  2391. t.Parallel()
  2392. graph, err := MakeTestGraph(t)
  2393. require.NoError(t, err, "unable to make test database")
  2394. sourceNode, err := createTestVertex(graph.db)
  2395. require.NoError(t, err, "unable to create source node")
  2396. if err := graph.SetSourceNode(sourceNode); err != nil {
  2397. t.Fatalf("unable to set source node: %v", err)
  2398. }
  2399. // We'll first populate our graph with two nodes. All channels created
  2400. // below will be made between these two nodes.
  2401. node1, err := createTestVertex(graph.db)
  2402. require.NoError(t, err, "unable to create test node")
  2403. if err := graph.AddLightningNode(node1); err != nil {
  2404. t.Fatalf("unable to add node: %v", err)
  2405. }
  2406. node2, err := createTestVertex(graph.db)
  2407. require.NoError(t, err, "unable to create test node")
  2408. if err := graph.AddLightningNode(node2); err != nil {
  2409. t.Fatalf("unable to add node: %v", err)
  2410. }
  2411. // With the two nodes created, we'll now create a random channel, as
  2412. // well as two edges in the database with distinct update times.
  2413. edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2)
  2414. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  2415. t.Fatalf("unable to add edge: %v", err)
  2416. }
  2417. edge1 := randEdgePolicy(chanID.ToUint64(), graph.db)
  2418. edge1.ChannelFlags = 0
  2419. edge1.ToNode = node1.PubKeyBytes
  2420. edge1.SigBytes = testSig.Serialize()
  2421. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2422. t.Fatalf("unable to update edge: %v", err)
  2423. }
  2424. edge2 := randEdgePolicy(chanID.ToUint64(), graph.db)
  2425. edge2.ChannelFlags = 1
  2426. edge2.ToNode = node2.PubKeyBytes
  2427. edge2.SigBytes = testSig.Serialize()
  2428. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  2429. t.Fatalf("unable to update edge: %v", err)
  2430. }
  2431. // checkIndexTimestamps is a helper function that checks the edge update
  2432. // index only includes the given timestamps.
  2433. checkIndexTimestamps := func(timestamps ...uint64) {
  2434. timestampSet := make(map[uint64]struct{})
  2435. for _, t := range timestamps {
  2436. timestampSet[t] = struct{}{}
  2437. }
  2438. err := kvdb.View(graph.db, func(tx kvdb.RTx) error {
  2439. edges := tx.ReadBucket(edgeBucket)
  2440. if edges == nil {
  2441. return ErrGraphNoEdgesFound
  2442. }
  2443. edgeUpdateIndex := edges.NestedReadBucket(
  2444. edgeUpdateIndexBucket,
  2445. )
  2446. if edgeUpdateIndex == nil {
  2447. return ErrGraphNoEdgesFound
  2448. }
  2449. var numEntries int
  2450. err := edgeUpdateIndex.ForEach(func(k, v []byte) error {
  2451. numEntries++
  2452. return nil
  2453. })
  2454. if err != nil {
  2455. return err
  2456. }
  2457. expectedEntries := len(timestampSet)
  2458. if numEntries != expectedEntries {
  2459. return fmt.Errorf("expected %v entries in the "+
  2460. "update index, got %v", expectedEntries,
  2461. numEntries)
  2462. }
  2463. return edgeUpdateIndex.ForEach(func(k, _ []byte) error {
  2464. t := byteOrder.Uint64(k[:8])
  2465. if _, ok := timestampSet[t]; !ok {
  2466. return fmt.Errorf("found unexpected "+
  2467. "timestamp "+"%d", t)
  2468. }
  2469. return nil
  2470. })
  2471. }, func() {})
  2472. if err != nil {
  2473. t.Fatal(err)
  2474. }
  2475. }
  2476. // With both edges policies added, we'll make sure to check they exist
  2477. // within the edge update index.
  2478. checkIndexTimestamps(
  2479. uint64(edge1.LastUpdate.Unix()),
  2480. uint64(edge2.LastUpdate.Unix()),
  2481. )
  2482. // Now, we'll update the edge policies to ensure the old timestamps are
  2483. // removed from the update index.
  2484. edge1.ChannelFlags = 2
  2485. edge1.LastUpdate = time.Now()
  2486. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2487. t.Fatalf("unable to update edge: %v", err)
  2488. }
  2489. edge2.ChannelFlags = 3
  2490. edge2.LastUpdate = edge1.LastUpdate.Add(time.Hour)
  2491. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  2492. t.Fatalf("unable to update edge: %v", err)
  2493. }
  2494. // With the policies updated, we should now be able to find their
  2495. // updated entries within the update index.
  2496. checkIndexTimestamps(
  2497. uint64(edge1.LastUpdate.Unix()),
  2498. uint64(edge2.LastUpdate.Unix()),
  2499. )
  2500. // Now we'll prune the graph, removing the edges, and also the update
  2501. // index entries from the database all together.
  2502. var blockHash chainhash.Hash
  2503. copy(blockHash[:], bytes.Repeat([]byte{2}, 32))
  2504. _, err = graph.PruneGraph(
  2505. []*wire.OutPoint{&edgeInfo.ChannelPoint}, &blockHash, 101,
  2506. )
  2507. require.NoError(t, err, "unable to prune graph")
  2508. // Finally, we'll check the database state one last time to conclude
  2509. // that we should no longer be able to locate _any_ entries within the
  2510. // edge update index.
  2511. checkIndexTimestamps()
  2512. }
  2513. // TestPruneGraphNodes tests that unconnected vertexes are pruned via the
  2514. // PruneSyncState method.
  2515. func TestPruneGraphNodes(t *testing.T) {
  2516. t.Parallel()
  2517. graph, err := MakeTestGraph(t)
  2518. require.NoError(t, err, "unable to make test database")
  2519. // We'll start off by inserting our source node, to ensure that it's
  2520. // the only node left after we prune the graph.
  2521. sourceNode, err := createTestVertex(graph.db)
  2522. require.NoError(t, err, "unable to create source node")
  2523. if err := graph.SetSourceNode(sourceNode); err != nil {
  2524. t.Fatalf("unable to set source node: %v", err)
  2525. }
  2526. // With the source node inserted, we'll now add three nodes to the
  2527. // channel graph, at the end of the scenario, only two of these nodes
  2528. // should still be in the graph.
  2529. node1, err := createTestVertex(graph.db)
  2530. require.NoError(t, err, "unable to create test node")
  2531. if err := graph.AddLightningNode(node1); err != nil {
  2532. t.Fatalf("unable to add node: %v", err)
  2533. }
  2534. node2, err := createTestVertex(graph.db)
  2535. require.NoError(t, err, "unable to create test node")
  2536. if err := graph.AddLightningNode(node2); err != nil {
  2537. t.Fatalf("unable to add node: %v", err)
  2538. }
  2539. node3, err := createTestVertex(graph.db)
  2540. require.NoError(t, err, "unable to create test node")
  2541. if err := graph.AddLightningNode(node3); err != nil {
  2542. t.Fatalf("unable to add node: %v", err)
  2543. }
  2544. // We'll now add a new edge to the graph, but only actually advertise
  2545. // the edge of *one* of the nodes.
  2546. edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2)
  2547. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  2548. t.Fatalf("unable to add edge: %v", err)
  2549. }
  2550. // We'll now insert an advertised edge, but it'll only be the edge that
  2551. // points from the first to the second node.
  2552. edge1 := randEdgePolicy(chanID.ToUint64(), graph.db)
  2553. edge1.ChannelFlags = 0
  2554. edge1.ToNode = node1.PubKeyBytes
  2555. edge1.SigBytes = testSig.Serialize()
  2556. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2557. t.Fatalf("unable to update edge: %v", err)
  2558. }
  2559. // We'll now initiate a around of graph pruning.
  2560. if err := graph.PruneGraphNodes(); err != nil {
  2561. t.Fatalf("unable to prune graph nodes: %v", err)
  2562. }
  2563. // At this point, there should be 3 nodes left in the graph still: the
  2564. // source node (which can't be pruned), and node 1+2. Nodes 1 and two
  2565. // should still be left in the graph as there's half of an advertised
  2566. // edge between them.
  2567. assertNumNodes(t, graph, 3)
  2568. // Finally, we'll ensure that node3, the only fully unconnected node as
  2569. // properly deleted from the graph and not another node in its place.
  2570. _, err = graph.FetchLightningNode(nil, node3.PubKeyBytes)
  2571. if err == nil {
  2572. t.Fatalf("node 3 should have been deleted!")
  2573. }
  2574. }
  2575. // TestAddChannelEdgeShellNodes tests that when we attempt to add a ChannelEdge
  2576. // to the graph, one or both of the nodes the edge involves aren't found in the
  2577. // database, then shell edges are created for each node if needed.
  2578. func TestAddChannelEdgeShellNodes(t *testing.T) {
  2579. t.Parallel()
  2580. graph, err := MakeTestGraph(t)
  2581. require.NoError(t, err, "unable to make test database")
  2582. // To start, we'll create two nodes, and only add one of them to the
  2583. // channel graph.
  2584. node1, err := createTestVertex(graph.db)
  2585. require.NoError(t, err, "unable to create test node")
  2586. if err := graph.AddLightningNode(node1); err != nil {
  2587. t.Fatalf("unable to add node: %v", err)
  2588. }
  2589. node2, err := createTestVertex(graph.db)
  2590. require.NoError(t, err, "unable to create test node")
  2591. // We'll now create an edge between the two nodes, as a result, node2
  2592. // should be inserted into the database as a shell node.
  2593. edgeInfo, _ := createEdge(100, 0, 0, 0, node1, node2)
  2594. if err := graph.AddChannelEdge(&edgeInfo); err != nil {
  2595. t.Fatalf("unable to add edge: %v", err)
  2596. }
  2597. // Ensure that node1 was inserted as a full node, while node2 only has
  2598. // a shell node present.
  2599. node1, err = graph.FetchLightningNode(nil, node1.PubKeyBytes)
  2600. require.NoError(t, err, "unable to fetch node1")
  2601. if !node1.HaveNodeAnnouncement {
  2602. t.Fatalf("have shell announcement for node1, shouldn't")
  2603. }
  2604. node2, err = graph.FetchLightningNode(nil, node2.PubKeyBytes)
  2605. require.NoError(t, err, "unable to fetch node2")
  2606. if node2.HaveNodeAnnouncement {
  2607. t.Fatalf("should have shell announcement for node2, but is full")
  2608. }
  2609. }
  2610. // TestNodePruningUpdateIndexDeletion tests that once a node has been removed
  2611. // from the channel graph, we also remove the entry from the update index as
  2612. // well.
  2613. func TestNodePruningUpdateIndexDeletion(t *testing.T) {
  2614. t.Parallel()
  2615. graph, err := MakeTestGraph(t)
  2616. require.NoError(t, err, "unable to make test database")
  2617. // We'll first populate our graph with a single node that will be
  2618. // removed shortly.
  2619. node1, err := createTestVertex(graph.db)
  2620. require.NoError(t, err, "unable to create test node")
  2621. if err := graph.AddLightningNode(node1); err != nil {
  2622. t.Fatalf("unable to add node: %v", err)
  2623. }
  2624. // We'll confirm that we can retrieve the node using
  2625. // NodeUpdatesInHorizon, using a time that's slightly beyond the last
  2626. // update time of our test node.
  2627. startTime := time.Unix(9, 0)
  2628. endTime := node1.LastUpdate.Add(time.Minute)
  2629. nodesInHorizon, err := graph.NodeUpdatesInHorizon(startTime, endTime)
  2630. require.NoError(t, err, "unable to fetch nodes in horizon")
  2631. // We should only have a single node, and that node should exactly
  2632. // match the node we just inserted.
  2633. if len(nodesInHorizon) != 1 {
  2634. t.Fatalf("should have 1 nodes instead have: %v",
  2635. len(nodesInHorizon))
  2636. }
  2637. if err := compareNodes(node1, &nodesInHorizon[0]); err != nil {
  2638. t.Fatalf("nodes don't match: %v", err)
  2639. }
  2640. // We'll now delete the node from the graph, this should result in it
  2641. // being removed from the update index as well.
  2642. if err := graph.DeleteLightningNode(node1.PubKeyBytes); err != nil {
  2643. t.Fatalf("unable to delete node: %v", err)
  2644. }
  2645. // Now that the node has been deleted, we'll again query the nodes in
  2646. // the horizon. This time we should have no nodes at all.
  2647. nodesInHorizon, err = graph.NodeUpdatesInHorizon(startTime, endTime)
  2648. require.NoError(t, err, "unable to fetch nodes in horizon")
  2649. if len(nodesInHorizon) != 0 {
  2650. t.Fatalf("should have zero nodes instead have: %v",
  2651. len(nodesInHorizon))
  2652. }
  2653. }
  2654. // TestNodeIsPublic ensures that we properly detect nodes that are seen as
  2655. // public within the network graph.
  2656. func TestNodeIsPublic(t *testing.T) {
  2657. t.Parallel()
  2658. // We'll start off the test by creating a small network of 3
  2659. // participants with the following graph:
  2660. //
  2661. // Alice <-> Bob <-> Carol
  2662. //
  2663. // We'll need to create a separate database and channel graph for each
  2664. // participant to replicate real-world scenarios (private edges being in
  2665. // some graphs but not others, etc.).
  2666. aliceGraph, err := MakeTestGraph(t)
  2667. require.NoError(t, err, "unable to make test database")
  2668. aliceNode, err := createTestVertex(aliceGraph.db)
  2669. require.NoError(t, err, "unable to create test node")
  2670. if err := aliceGraph.SetSourceNode(aliceNode); err != nil {
  2671. t.Fatalf("unable to set source node: %v", err)
  2672. }
  2673. bobGraph, err := MakeTestGraph(t)
  2674. require.NoError(t, err, "unable to make test database")
  2675. bobNode, err := createTestVertex(bobGraph.db)
  2676. require.NoError(t, err, "unable to create test node")
  2677. if err := bobGraph.SetSourceNode(bobNode); err != nil {
  2678. t.Fatalf("unable to set source node: %v", err)
  2679. }
  2680. carolGraph, err := MakeTestGraph(t)
  2681. require.NoError(t, err, "unable to make test database")
  2682. carolNode, err := createTestVertex(carolGraph.db)
  2683. require.NoError(t, err, "unable to create test node")
  2684. if err := carolGraph.SetSourceNode(carolNode); err != nil {
  2685. t.Fatalf("unable to set source node: %v", err)
  2686. }
  2687. aliceBobEdge, _ := createEdge(10, 0, 0, 0, aliceNode, bobNode)
  2688. bobCarolEdge, _ := createEdge(10, 1, 0, 1, bobNode, carolNode)
  2689. // After creating all of our nodes and edges, we'll add them to each
  2690. // participant's graph.
  2691. nodes := []*LightningNode{aliceNode, bobNode, carolNode}
  2692. edges := []*models.ChannelEdgeInfo{&aliceBobEdge, &bobCarolEdge}
  2693. graphs := []*ChannelGraph{aliceGraph, bobGraph, carolGraph}
  2694. for _, graph := range graphs {
  2695. for _, node := range nodes {
  2696. if err := graph.AddLightningNode(node); err != nil {
  2697. t.Fatalf("unable to add node: %v", err)
  2698. }
  2699. }
  2700. for _, edge := range edges {
  2701. if err := graph.AddChannelEdge(edge); err != nil {
  2702. t.Fatalf("unable to add edge: %v", err)
  2703. }
  2704. }
  2705. }
  2706. // checkNodes is a helper closure that will be used to assert that the
  2707. // given nodes are seen as public/private within the given graphs.
  2708. checkNodes := func(nodes []*LightningNode, graphs []*ChannelGraph,
  2709. public bool) {
  2710. t.Helper()
  2711. for _, node := range nodes {
  2712. for _, graph := range graphs {
  2713. isPublic, err := graph.IsPublicNode(node.PubKeyBytes)
  2714. if err != nil {
  2715. t.Fatalf("unable to determine if pivot "+
  2716. "is public: %v", err)
  2717. }
  2718. switch {
  2719. case isPublic && !public:
  2720. t.Fatalf("expected %x to be private",
  2721. node.PubKeyBytes)
  2722. case !isPublic && public:
  2723. t.Fatalf("expected %x to be public",
  2724. node.PubKeyBytes)
  2725. }
  2726. }
  2727. }
  2728. }
  2729. // Due to the way the edges were set up above, we'll make sure each node
  2730. // can correctly determine that every other node is public.
  2731. checkNodes(nodes, graphs, true)
  2732. // Now, we'll remove the edge between Alice and Bob from everyone's
  2733. // graph. This will make Alice be seen as a private node as it no longer
  2734. // has any advertised edges.
  2735. for _, graph := range graphs {
  2736. err := graph.DeleteChannelEdges(
  2737. false, true, aliceBobEdge.ChannelID,
  2738. )
  2739. if err != nil {
  2740. t.Fatalf("unable to remove edge: %v", err)
  2741. }
  2742. }
  2743. checkNodes(
  2744. []*LightningNode{aliceNode},
  2745. []*ChannelGraph{bobGraph, carolGraph},
  2746. false,
  2747. )
  2748. // We'll also make the edge between Bob and Carol private. Within Bob's
  2749. // and Carol's graph, the edge will exist, but it will not have a proof
  2750. // that allows it to be advertised. Within Alice's graph, we'll
  2751. // completely remove the edge as it is not possible for her to know of
  2752. // it without it being advertised.
  2753. for _, graph := range graphs {
  2754. err := graph.DeleteChannelEdges(
  2755. false, true, bobCarolEdge.ChannelID,
  2756. )
  2757. if err != nil {
  2758. t.Fatalf("unable to remove edge: %v", err)
  2759. }
  2760. if graph == aliceGraph {
  2761. continue
  2762. }
  2763. bobCarolEdge.AuthProof = nil
  2764. if err := graph.AddChannelEdge(&bobCarolEdge); err != nil {
  2765. t.Fatalf("unable to add edge: %v", err)
  2766. }
  2767. }
  2768. // With the modifications above, Bob should now be seen as a private
  2769. // node from both Alice's and Carol's perspective.
  2770. checkNodes(
  2771. []*LightningNode{bobNode},
  2772. []*ChannelGraph{aliceGraph, carolGraph},
  2773. false,
  2774. )
  2775. }
  2776. // TestDisabledChannelIDs ensures that the disabled channels within the
  2777. // disabledEdgePolicyBucket are managed properly and the list returned from
  2778. // DisabledChannelIDs is correct.
  2779. func TestDisabledChannelIDs(t *testing.T) {
  2780. t.Parallel()
  2781. graph, err := MakeTestGraph(t)
  2782. require.NoError(t, err, "unable to make test database")
  2783. // Create first node and add it to the graph.
  2784. node1, err := createTestVertex(graph.db)
  2785. require.NoError(t, err, "unable to create test node")
  2786. if err := graph.AddLightningNode(node1); err != nil {
  2787. t.Fatalf("unable to add node: %v", err)
  2788. }
  2789. // Create second node and add it to the graph.
  2790. node2, err := createTestVertex(graph.db)
  2791. require.NoError(t, err, "unable to create test node")
  2792. if err := graph.AddLightningNode(node2); err != nil {
  2793. t.Fatalf("unable to add node: %v", err)
  2794. }
  2795. // Adding a new channel edge to the graph.
  2796. edgeInfo, edge1, edge2 := createChannelEdge(graph.db, node1, node2)
  2797. if err := graph.AddLightningNode(node2); err != nil {
  2798. t.Fatalf("unable to add node: %v", err)
  2799. }
  2800. if err := graph.AddChannelEdge(edgeInfo); err != nil {
  2801. t.Fatalf("unable to create channel edge: %v", err)
  2802. }
  2803. // Ensure no disabled channels exist in the bucket on start.
  2804. disabledChanIds, err := graph.DisabledChannelIDs()
  2805. require.NoError(t, err, "unable to get disabled channel ids")
  2806. if len(disabledChanIds) > 0 {
  2807. t.Fatalf("expected empty disabled channels, got %v disabled channels",
  2808. len(disabledChanIds))
  2809. }
  2810. // Add one disabled policy and ensure the channel is still not in the
  2811. // disabled list.
  2812. edge1.ChannelFlags |= lnwire.ChanUpdateDisabled
  2813. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2814. t.Fatalf("unable to update edge: %v", err)
  2815. }
  2816. disabledChanIds, err = graph.DisabledChannelIDs()
  2817. require.NoError(t, err, "unable to get disabled channel ids")
  2818. if len(disabledChanIds) > 0 {
  2819. t.Fatalf("expected empty disabled channels, got %v disabled channels",
  2820. len(disabledChanIds))
  2821. }
  2822. // Add second disabled policy and ensure the channel is now in the
  2823. // disabled list.
  2824. edge2.ChannelFlags |= lnwire.ChanUpdateDisabled
  2825. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  2826. t.Fatalf("unable to update edge: %v", err)
  2827. }
  2828. disabledChanIds, err = graph.DisabledChannelIDs()
  2829. require.NoError(t, err, "unable to get disabled channel ids")
  2830. if len(disabledChanIds) != 1 || disabledChanIds[0] != edgeInfo.ChannelID {
  2831. t.Fatalf("expected disabled channel with id %v, "+
  2832. "got %v", edgeInfo.ChannelID, disabledChanIds)
  2833. }
  2834. // Delete the channel edge and ensure it is removed from the disabled list.
  2835. if err = graph.DeleteChannelEdges(
  2836. false, true, edgeInfo.ChannelID,
  2837. ); err != nil {
  2838. t.Fatalf("unable to delete channel edge: %v", err)
  2839. }
  2840. disabledChanIds, err = graph.DisabledChannelIDs()
  2841. require.NoError(t, err, "unable to get disabled channel ids")
  2842. if len(disabledChanIds) > 0 {
  2843. t.Fatalf("expected empty disabled channels, got %v disabled channels",
  2844. len(disabledChanIds))
  2845. }
  2846. }
  2847. // TestEdgePolicyMissingMaxHtcl tests that if we find a ChannelEdgePolicy in
  2848. // the DB that indicates that it should support the htlc_maximum_value_msat
  2849. // field, but it is not part of the opaque data, then we'll handle it as it is
  2850. // unknown. It also checks that we are correctly able to overwrite it when we
  2851. // receive the proper update.
  2852. func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
  2853. t.Parallel()
  2854. graph, err := MakeTestGraph(t)
  2855. require.NoError(t, err, "unable to make test database")
  2856. // We'd like to test the update of edges inserted into the database, so
  2857. // we create two vertexes to connect.
  2858. node1, err := createTestVertex(graph.db)
  2859. require.NoError(t, err, "unable to create test node")
  2860. if err := graph.AddLightningNode(node1); err != nil {
  2861. t.Fatalf("unable to add node: %v", err)
  2862. }
  2863. node2, err := createTestVertex(graph.db)
  2864. require.NoError(t, err, "unable to create test node")
  2865. edgeInfo, edge1, edge2 := createChannelEdge(graph.db, node1, node2)
  2866. if err := graph.AddLightningNode(node2); err != nil {
  2867. t.Fatalf("unable to add node: %v", err)
  2868. }
  2869. if err := graph.AddChannelEdge(edgeInfo); err != nil {
  2870. t.Fatalf("unable to create channel edge: %v", err)
  2871. }
  2872. chanID := edgeInfo.ChannelID
  2873. from := edge2.ToNode[:]
  2874. to := edge1.ToNode[:]
  2875. // We'll remove the no max_htlc field from the first edge policy, and
  2876. // all other opaque data, and serialize it.
  2877. edge1.MessageFlags = 0
  2878. edge1.ExtraOpaqueData = nil
  2879. var b bytes.Buffer
  2880. err = serializeChanEdgePolicy(&b, edge1, to)
  2881. if err != nil {
  2882. t.Fatalf("unable to serialize policy")
  2883. }
  2884. // Set the max_htlc field. The extra bytes added to the serialization
  2885. // will be the opaque data containing the serialized field.
  2886. edge1.MessageFlags = lnwire.ChanUpdateRequiredMaxHtlc
  2887. edge1.MaxHTLC = 13928598
  2888. var b2 bytes.Buffer
  2889. err = serializeChanEdgePolicy(&b2, edge1, to)
  2890. if err != nil {
  2891. t.Fatalf("unable to serialize policy")
  2892. }
  2893. withMaxHtlc := b2.Bytes()
  2894. // Remove the opaque data from the serialization.
  2895. stripped := withMaxHtlc[:len(b.Bytes())]
  2896. // Attempting to deserialize these bytes should return an error.
  2897. r := bytes.NewReader(stripped)
  2898. err = kvdb.View(graph.db, func(tx kvdb.RTx) error {
  2899. nodes := tx.ReadBucket(nodeBucket)
  2900. if nodes == nil {
  2901. return ErrGraphNotFound
  2902. }
  2903. _, err = deserializeChanEdgePolicy(r)
  2904. if err != ErrEdgePolicyOptionalFieldNotFound {
  2905. t.Fatalf("expected "+
  2906. "ErrEdgePolicyOptionalFieldNotFound, got %v",
  2907. err)
  2908. }
  2909. return nil
  2910. }, func() {})
  2911. require.NoError(t, err, "error reading db")
  2912. // Put the stripped bytes in the DB.
  2913. err = kvdb.Update(graph.db, func(tx kvdb.RwTx) error {
  2914. edges := tx.ReadWriteBucket(edgeBucket)
  2915. if edges == nil {
  2916. return ErrEdgeNotFound
  2917. }
  2918. edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
  2919. if edgeIndex == nil {
  2920. return ErrEdgeNotFound
  2921. }
  2922. var edgeKey [33 + 8]byte
  2923. copy(edgeKey[:], from)
  2924. byteOrder.PutUint64(edgeKey[33:], edge1.ChannelID)
  2925. var scratch [8]byte
  2926. var indexKey [8 + 8]byte
  2927. copy(indexKey[:], scratch[:])
  2928. byteOrder.PutUint64(indexKey[8:], edge1.ChannelID)
  2929. updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
  2930. if err != nil {
  2931. return err
  2932. }
  2933. if err := updateIndex.Put(indexKey[:], nil); err != nil {
  2934. return err
  2935. }
  2936. return edges.Put(edgeKey[:], stripped)
  2937. }, func() {})
  2938. require.NoError(t, err, "error writing db")
  2939. // And add the second, unmodified edge.
  2940. if err := graph.UpdateEdgePolicy(edge2); err != nil {
  2941. t.Fatalf("unable to update edge: %v", err)
  2942. }
  2943. // Attempt to fetch the edge and policies from the DB. Since the policy
  2944. // we added is invalid according to the new format, it should be as we
  2945. // are not aware of the policy (indicated by the policy returned being
  2946. // nil)
  2947. dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID)
  2948. require.NoError(t, err, "unable to fetch channel by ID")
  2949. // The first edge should have a nil-policy returned
  2950. if dbEdge1 != nil {
  2951. t.Fatalf("expected db edge to be nil")
  2952. }
  2953. if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
  2954. t.Fatalf("edge doesn't match: %v", err)
  2955. }
  2956. assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
  2957. // Now add the original, unmodified edge policy, and make sure the edge
  2958. // policies then become fully populated.
  2959. if err := graph.UpdateEdgePolicy(edge1); err != nil {
  2960. t.Fatalf("unable to update edge: %v", err)
  2961. }
  2962. dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByID(chanID)
  2963. require.NoError(t, err, "unable to fetch channel by ID")
  2964. if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
  2965. t.Fatalf("edge doesn't match: %v", err)
  2966. }
  2967. if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
  2968. t.Fatalf("edge doesn't match: %v", err)
  2969. }
  2970. assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
  2971. }
  2972. // assertNumZombies queries the provided ChannelGraph for NumZombies, and
  2973. // asserts that the returned number is equal to expZombies.
  2974. func assertNumZombies(t *testing.T, graph *ChannelGraph, expZombies uint64) {
  2975. t.Helper()
  2976. numZombies, err := graph.NumZombies()
  2977. require.NoError(t, err, "unable to query number of zombies")
  2978. if numZombies != expZombies {
  2979. t.Fatalf("expected %d zombies, found %d",
  2980. expZombies, numZombies)
  2981. }
  2982. }
  2983. // TestGraphZombieIndex ensures that we can mark edges correctly as zombie/live.
  2984. func TestGraphZombieIndex(t *testing.T) {
  2985. t.Parallel()
  2986. // We'll start by creating our test graph along with a test edge.
  2987. graph, err := MakeTestGraph(t)
  2988. require.NoError(t, err, "unable to create test database")
  2989. node1, err := createTestVertex(graph.db)
  2990. require.NoError(t, err, "unable to create test vertex")
  2991. node2, err := createTestVertex(graph.db)
  2992. require.NoError(t, err, "unable to create test vertex")
  2993. // Swap the nodes if the second's pubkey is smaller than the first.
  2994. // Without this, the comparisons at the end will fail probabilistically.
  2995. if bytes.Compare(node2.PubKeyBytes[:], node1.PubKeyBytes[:]) < 0 {
  2996. node1, node2 = node2, node1
  2997. }
  2998. edge, _, _ := createChannelEdge(graph.db, node1, node2)
  2999. require.NoError(t, graph.AddChannelEdge(edge))
  3000. // Since the edge is known the graph and it isn't a zombie, IsZombieEdge
  3001. // should not report the channel as a zombie.
  3002. isZombie, _, _ := graph.IsZombieEdge(edge.ChannelID)
  3003. require.False(t, isZombie)
  3004. assertNumZombies(t, graph, 0)
  3005. // If we delete the edge and mark it as a zombie, then we should expect
  3006. // to see it within the index.
  3007. err = graph.DeleteChannelEdges(false, true, edge.ChannelID)
  3008. require.NoError(t, err, "unable to mark edge as zombie")
  3009. isZombie, pubKey1, pubKey2 := graph.IsZombieEdge(edge.ChannelID)
  3010. require.True(t, isZombie)
  3011. require.Equal(t, node1.PubKeyBytes, pubKey1)
  3012. require.Equal(t, node2.PubKeyBytes, pubKey2)
  3013. assertNumZombies(t, graph, 1)
  3014. // Similarly, if we mark the same edge as live, we should no longer see
  3015. // it within the index.
  3016. require.NoError(t, graph.MarkEdgeLive(edge.ChannelID))
  3017. // Attempting to mark the edge as live again now that it is no longer
  3018. // in the zombie index should fail.
  3019. require.ErrorIs(
  3020. t, graph.MarkEdgeLive(edge.ChannelID), ErrZombieEdgeNotFound,
  3021. )
  3022. isZombie, _, _ = graph.IsZombieEdge(edge.ChannelID)
  3023. require.False(t, isZombie)
  3024. assertNumZombies(t, graph, 0)
  3025. // If we mark the edge as a zombie manually, then it should show up as
  3026. // being a zombie once again.
  3027. err = graph.MarkEdgeZombie(
  3028. edge.ChannelID, node1.PubKeyBytes, node2.PubKeyBytes,
  3029. )
  3030. require.NoError(t, err, "unable to mark edge as zombie")
  3031. isZombie, _, _ = graph.IsZombieEdge(edge.ChannelID)
  3032. require.True(t, isZombie)
  3033. assertNumZombies(t, graph, 1)
  3034. }
  3035. // compareNodes is used to compare two LightningNodes while excluding the
  3036. // Features struct, which cannot be compared as the semantics for reserializing
  3037. // the featuresMap have not been defined.
  3038. func compareNodes(a, b *LightningNode) error {
  3039. if a.LastUpdate != b.LastUpdate {
  3040. return fmt.Errorf("node LastUpdate doesn't match: expected %v, \n"+
  3041. "got %v", a.LastUpdate, b.LastUpdate)
  3042. }
  3043. if !reflect.DeepEqual(a.Addresses, b.Addresses) {
  3044. return fmt.Errorf("Addresses doesn't match: expected %#v, \n "+
  3045. "got %#v", a.Addresses, b.Addresses)
  3046. }
  3047. if !reflect.DeepEqual(a.PubKeyBytes, b.PubKeyBytes) {
  3048. return fmt.Errorf("PubKey doesn't match: expected %#v, \n "+
  3049. "got %#v", a.PubKeyBytes, b.PubKeyBytes)
  3050. }
  3051. if !reflect.DeepEqual(a.Color, b.Color) {
  3052. return fmt.Errorf("Color doesn't match: expected %#v, \n "+
  3053. "got %#v", a.Color, b.Color)
  3054. }
  3055. if !reflect.DeepEqual(a.Alias, b.Alias) {
  3056. return fmt.Errorf("Alias doesn't match: expected %#v, \n "+
  3057. "got %#v", a.Alias, b.Alias)
  3058. }
  3059. if !reflect.DeepEqual(a.HaveNodeAnnouncement, b.HaveNodeAnnouncement) {
  3060. return fmt.Errorf("HaveNodeAnnouncement doesn't match: expected %#v, \n "+
  3061. "got %#v", a.HaveNodeAnnouncement, b.HaveNodeAnnouncement)
  3062. }
  3063. if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) {
  3064. return fmt.Errorf("extra data doesn't match: %v vs %v",
  3065. a.ExtraOpaqueData, b.ExtraOpaqueData)
  3066. }
  3067. return nil
  3068. }
  3069. // compareEdgePolicies is used to compare two ChannelEdgePolices using
  3070. // compareNodes, so as to exclude comparisons of the Nodes' Features struct.
  3071. func compareEdgePolicies(a, b *models.ChannelEdgePolicy) error {
  3072. if a.ChannelID != b.ChannelID {
  3073. return fmt.Errorf("ChannelID doesn't match: expected %v, "+
  3074. "got %v", a.ChannelID, b.ChannelID)
  3075. }
  3076. if !reflect.DeepEqual(a.LastUpdate, b.LastUpdate) {
  3077. return fmt.Errorf("edge LastUpdate doesn't match: expected %#v, \n "+
  3078. "got %#v", a.LastUpdate, b.LastUpdate)
  3079. }
  3080. if a.MessageFlags != b.MessageFlags {
  3081. return fmt.Errorf("MessageFlags doesn't match: expected %v, "+
  3082. "got %v", a.MessageFlags, b.MessageFlags)
  3083. }
  3084. if a.ChannelFlags != b.ChannelFlags {
  3085. return fmt.Errorf("ChannelFlags doesn't match: expected %v, "+
  3086. "got %v", a.ChannelFlags, b.ChannelFlags)
  3087. }
  3088. if a.TimeLockDelta != b.TimeLockDelta {
  3089. return fmt.Errorf("TimeLockDelta doesn't match: expected %v, "+
  3090. "got %v", a.TimeLockDelta, b.TimeLockDelta)
  3091. }
  3092. if a.MinHTLC != b.MinHTLC {
  3093. return fmt.Errorf("MinHTLC doesn't match: expected %v, "+
  3094. "got %v", a.MinHTLC, b.MinHTLC)
  3095. }
  3096. if a.MaxHTLC != b.MaxHTLC {
  3097. return fmt.Errorf("MaxHTLC doesn't match: expected %v, "+
  3098. "got %v", a.MaxHTLC, b.MaxHTLC)
  3099. }
  3100. if a.FeeBaseMSat != b.FeeBaseMSat {
  3101. return fmt.Errorf("FeeBaseMSat doesn't match: expected %v, "+
  3102. "got %v", a.FeeBaseMSat, b.FeeBaseMSat)
  3103. }
  3104. if a.FeeProportionalMillionths != b.FeeProportionalMillionths {
  3105. return fmt.Errorf("FeeProportionalMillionths doesn't match: "+
  3106. "expected %v, got %v", a.FeeProportionalMillionths,
  3107. b.FeeProportionalMillionths)
  3108. }
  3109. if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) {
  3110. return fmt.Errorf("extra data doesn't match: %v vs %v",
  3111. a.ExtraOpaqueData, b.ExtraOpaqueData)
  3112. }
  3113. if !bytes.Equal(a.ToNode[:], b.ToNode[:]) {
  3114. return fmt.Errorf("ToNode doesn't match: expected %x, got %x",
  3115. a.ToNode, b.ToNode)
  3116. }
  3117. return nil
  3118. }
  3119. // TestLightningNodeSigVerification checks that we can use the LightningNode's
  3120. // pubkey to verify signatures.
  3121. func TestLightningNodeSigVerification(t *testing.T) {
  3122. t.Parallel()
  3123. // Create some dummy data to sign.
  3124. var data [32]byte
  3125. if _, err := prand.Read(data[:]); err != nil {
  3126. t.Fatalf("unable to read prand: %v", err)
  3127. }
  3128. // Create private key and sign the data with it.
  3129. priv, err := btcec.NewPrivateKey()
  3130. require.NoError(t, err, "unable to crete priv key")
  3131. sign := ecdsa.Sign(priv, data[:])
  3132. // Sanity check that the signature checks out.
  3133. if !sign.Verify(data[:], priv.PubKey()) {
  3134. t.Fatalf("signature doesn't check out")
  3135. }
  3136. // Create a LightningNode from the same private key.
  3137. graph, err := MakeTestGraph(t)
  3138. require.NoError(t, err, "unable to make test database")
  3139. node, err := createLightningNode(graph.db, priv)
  3140. require.NoError(t, err, "unable to create node")
  3141. // And finally check that we can verify the same signature from the
  3142. // pubkey returned from the lightning node.
  3143. nodePub, err := node.PubKey()
  3144. require.NoError(t, err, "unable to get pubkey")
  3145. if !sign.Verify(data[:], nodePub) {
  3146. t.Fatalf("unable to verify sig")
  3147. }
  3148. }
  3149. // TestComputeFee tests fee calculation based on both in- and outgoing amt.
  3150. func TestComputeFee(t *testing.T) {
  3151. var (
  3152. policy = models.ChannelEdgePolicy{
  3153. FeeBaseMSat: 10000,
  3154. FeeProportionalMillionths: 30000,
  3155. }
  3156. outgoingAmt = lnwire.MilliSatoshi(1000000)
  3157. expectedFee = lnwire.MilliSatoshi(40000)
  3158. )
  3159. fee := policy.ComputeFee(outgoingAmt)
  3160. if fee != expectedFee {
  3161. t.Fatalf("expected fee %v, got %v", expectedFee, fee)
  3162. }
  3163. fwdFee := policy.ComputeFeeFromIncoming(outgoingAmt + fee)
  3164. if fwdFee != expectedFee {
  3165. t.Fatalf("expected fee %v, but got %v", fee, fwdFee)
  3166. }
  3167. }
  3168. // TestBatchedAddChannelEdge asserts that BatchedAddChannelEdge properly
  3169. // executes multiple AddChannelEdge requests in a single txn.
  3170. func TestBatchedAddChannelEdge(t *testing.T) {
  3171. t.Parallel()
  3172. graph, err := MakeTestGraph(t)
  3173. require.Nil(t, err)
  3174. sourceNode, err := createTestVertex(graph.db)
  3175. require.Nil(t, err)
  3176. err = graph.SetSourceNode(sourceNode)
  3177. require.Nil(t, err)
  3178. // We'd like to test the insertion/deletion of edges, so we create two
  3179. // vertexes to connect.
  3180. node1, err := createTestVertex(graph.db)
  3181. require.Nil(t, err)
  3182. node2, err := createTestVertex(graph.db)
  3183. require.Nil(t, err)
  3184. // In addition to the fake vertexes we create some fake channel
  3185. // identifiers.
  3186. var spendOutputs []*wire.OutPoint
  3187. var blockHash chainhash.Hash
  3188. copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
  3189. // Prune the graph a few times to make sure we have entries in the
  3190. // prune log.
  3191. _, err = graph.PruneGraph(spendOutputs, &blockHash, 155)
  3192. require.Nil(t, err)
  3193. var blockHash2 chainhash.Hash
  3194. copy(blockHash2[:], bytes.Repeat([]byte{2}, 32))
  3195. _, err = graph.PruneGraph(spendOutputs, &blockHash2, 156)
  3196. require.Nil(t, err)
  3197. // We'll create 3 almost identical edges, so first create a helper
  3198. // method containing all logic for doing so.
  3199. // Create an edge which has its block height at 156.
  3200. height := uint32(156)
  3201. edgeInfo, _ := createEdge(height, 0, 0, 0, node1, node2)
  3202. // Create an edge with block height 157. We give it
  3203. // maximum values for tx index and position, to make
  3204. // sure our database range scan get edges from the
  3205. // entire range.
  3206. edgeInfo2, _ := createEdge(
  3207. height+1, math.MaxUint32&0x00ffffff, math.MaxUint16, 1,
  3208. node1, node2,
  3209. )
  3210. // Create a third edge, this with a block height of 155.
  3211. edgeInfo3, _ := createEdge(height-1, 0, 0, 2, node1, node2)
  3212. edges := []models.ChannelEdgeInfo{edgeInfo, edgeInfo2, edgeInfo3}
  3213. errChan := make(chan error, len(edges))
  3214. errTimeout := errors.New("timeout adding batched channel")
  3215. // Now add all these new edges to the database.
  3216. var wg sync.WaitGroup
  3217. for _, edge := range edges {
  3218. wg.Add(1)
  3219. go func(edge models.ChannelEdgeInfo) {
  3220. defer wg.Done()
  3221. select {
  3222. case errChan <- graph.AddChannelEdge(&edge):
  3223. case <-time.After(2 * time.Second):
  3224. errChan <- errTimeout
  3225. }
  3226. }(edge)
  3227. }
  3228. wg.Wait()
  3229. for i := 0; i < len(edges); i++ {
  3230. err := <-errChan
  3231. require.Nil(t, err)
  3232. }
  3233. }
  3234. // TestBatchedUpdateEdgePolicy asserts that BatchedUpdateEdgePolicy properly
  3235. // executes multiple UpdateEdgePolicy requests in a single txn.
  3236. func TestBatchedUpdateEdgePolicy(t *testing.T) {
  3237. t.Parallel()
  3238. graph, err := MakeTestGraph(t)
  3239. require.Nil(t, err)
  3240. // We'd like to test the update of edges inserted into the database, so
  3241. // we create two vertexes to connect.
  3242. node1, err := createTestVertex(graph.db)
  3243. require.Nil(t, err)
  3244. err = graph.AddLightningNode(node1)
  3245. require.Nil(t, err)
  3246. node2, err := createTestVertex(graph.db)
  3247. require.Nil(t, err)
  3248. err = graph.AddLightningNode(node2)
  3249. require.Nil(t, err)
  3250. // Create an edge and add it to the db.
  3251. edgeInfo, edge1, edge2 := createChannelEdge(graph.db, node1, node2)
  3252. // Make sure inserting the policy at this point, before the edge info
  3253. // is added, will fail.
  3254. err = graph.UpdateEdgePolicy(edge1)
  3255. require.Error(t, ErrEdgeNotFound, err)
  3256. // Add the edge info.
  3257. err = graph.AddChannelEdge(edgeInfo)
  3258. require.Nil(t, err)
  3259. errTimeout := errors.New("timeout adding batched channel")
  3260. updates := []*models.ChannelEdgePolicy{edge1, edge2}
  3261. errChan := make(chan error, len(updates))
  3262. // Now add all these new edges to the database.
  3263. var wg sync.WaitGroup
  3264. for _, update := range updates {
  3265. wg.Add(1)
  3266. go func(update *models.ChannelEdgePolicy) {
  3267. defer wg.Done()
  3268. select {
  3269. case errChan <- graph.UpdateEdgePolicy(update):
  3270. case <-time.After(2 * time.Second):
  3271. errChan <- errTimeout
  3272. }
  3273. }(update)
  3274. }
  3275. wg.Wait()
  3276. for i := 0; i < len(updates); i++ {
  3277. err := <-errChan
  3278. require.Nil(t, err)
  3279. }
  3280. }
  3281. // BenchmarkForEachChannel is a benchmark test that measures the number of
  3282. // allocations and the total memory consumed by the full graph traversal.
  3283. func BenchmarkForEachChannel(b *testing.B) {
  3284. graph, err := MakeTestGraph(b)
  3285. require.Nil(b, err)
  3286. const numNodes = 100
  3287. const numChannels = 4
  3288. _, _ = fillTestGraph(b, graph, numNodes, numChannels)
  3289. b.ReportAllocs()
  3290. b.ResetTimer()
  3291. for i := 0; i < b.N; i++ {
  3292. var (
  3293. totalCapacity btcutil.Amount
  3294. maxHTLCs lnwire.MilliSatoshi
  3295. )
  3296. var nodes []GraphCacheNode
  3297. err = graph.ForEachNodeCacheable(
  3298. func(tx kvdb.RTx, node GraphCacheNode) error {
  3299. nodes = append(nodes, node)
  3300. return nil
  3301. },
  3302. )
  3303. require.NoError(b, err)
  3304. err = graph.db.View(func(tx kvdb.RTx) error {
  3305. for _, n := range nodes {
  3306. cb := func(tx kvdb.RTx,
  3307. info *models.ChannelEdgeInfo,
  3308. policy *models.ChannelEdgePolicy,
  3309. policy2 *models.ChannelEdgePolicy) error { //nolint:lll
  3310. // We need to do something with
  3311. // the data here, otherwise the
  3312. // compiler is going to optimize
  3313. // this away, and we get bogus
  3314. // results.
  3315. totalCapacity += info.Capacity
  3316. maxHTLCs += policy.MaxHTLC
  3317. maxHTLCs += policy2.MaxHTLC
  3318. return nil
  3319. }
  3320. err := n.ForEachChannel(tx, cb)
  3321. if err != nil {
  3322. return err
  3323. }
  3324. }
  3325. return nil
  3326. }, func() {})
  3327. require.NoError(b, err)
  3328. }
  3329. }
  3330. // TestGraphCacheForEachNodeChannel tests that the ForEachNodeDirectedChannel
  3331. // method works as expected, and is able to handle nil self edges.
  3332. func TestGraphCacheForEachNodeChannel(t *testing.T) {
  3333. graph, err := MakeTestGraph(t)
  3334. require.NoError(t, err)
  3335. // Unset the channel graph cache to simulate the user running with the
  3336. // option turned off.
  3337. graph.graphCache = nil
  3338. node1, err := createTestVertex(graph.db)
  3339. require.Nil(t, err)
  3340. err = graph.AddLightningNode(node1)
  3341. require.Nil(t, err)
  3342. node2, err := createTestVertex(graph.db)
  3343. require.Nil(t, err)
  3344. err = graph.AddLightningNode(node2)
  3345. require.Nil(t, err)
  3346. // Create an edge and add it to the db.
  3347. edgeInfo, e1, e2 := createChannelEdge(graph.db, node1, node2)
  3348. // Because of lexigraphical sorting and the usage of random node keys in
  3349. // this test, we need to determine which edge belongs to node 1 at
  3350. // runtime.
  3351. var edge1 *models.ChannelEdgePolicy
  3352. if e1.ToNode == node2.PubKeyBytes {
  3353. edge1 = e1
  3354. } else {
  3355. edge1 = e2
  3356. }
  3357. // Add the channel, but only insert a single edge into the graph.
  3358. require.NoError(t, graph.AddChannelEdge(edgeInfo))
  3359. getSingleChannel := func() *DirectedChannel {
  3360. var ch *DirectedChannel
  3361. err = graph.ForEachNodeDirectedChannel(nil, node1.PubKeyBytes,
  3362. func(c *DirectedChannel) error {
  3363. require.Nil(t, ch)
  3364. ch = c
  3365. return nil
  3366. },
  3367. )
  3368. require.NoError(t, err)
  3369. return ch
  3370. }
  3371. // We should be able to accumulate the single channel added, even
  3372. // though we have a nil edge policy here.
  3373. require.NotNil(t, getSingleChannel())
  3374. // Set an inbound fee and check that it is properly returned.
  3375. edge1.ExtraOpaqueData = []byte{
  3376. 253, 217, 3, 8, 0, 0, 0, 10, 0, 0, 0, 20,
  3377. }
  3378. require.NoError(t, graph.UpdateEdgePolicy(edge1))
  3379. directedChan := getSingleChannel()
  3380. require.NotNil(t, directedChan)
  3381. require.Equal(t, directedChan.InboundFee, lnwire.Fee{
  3382. BaseFee: 10,
  3383. FeeRate: 20,
  3384. })
  3385. // Set an invalid inbound fee and check that the edge is no longer
  3386. // returned.
  3387. edge1.ExtraOpaqueData = []byte{
  3388. 253, 217, 3, 8, 0,
  3389. }
  3390. require.NoError(t, graph.UpdateEdgePolicy(edge1))
  3391. require.Nil(t, getSingleChannel())
  3392. }
  3393. // TestGraphLoading asserts that the cache is properly reconstructed after a
  3394. // restart.
  3395. func TestGraphLoading(t *testing.T) {
  3396. // First, create a temporary directory to be used for the duration of
  3397. // this test.
  3398. tempDirName := t.TempDir()
  3399. // Next, create the graph for the first time.
  3400. backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cgr")
  3401. require.NoError(t, err)
  3402. defer backend.Close()
  3403. defer backendCleanup()
  3404. opts := DefaultOptions()
  3405. graph, err := NewChannelGraph(
  3406. backend, opts.RejectCacheSize, opts.ChannelCacheSize,
  3407. opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
  3408. true, false,
  3409. )
  3410. require.NoError(t, err)
  3411. // Populate the graph with test data.
  3412. const numNodes = 100
  3413. const numChannels = 4
  3414. _, _ = fillTestGraph(t, graph, numNodes, numChannels)
  3415. // Recreate the graph. This should cause the graph cache to be
  3416. // populated.
  3417. graphReloaded, err := NewChannelGraph(
  3418. backend, opts.RejectCacheSize, opts.ChannelCacheSize,
  3419. opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
  3420. true, false,
  3421. )
  3422. require.NoError(t, err)
  3423. // Assert that the cache content is identical.
  3424. require.Equal(
  3425. t, graph.graphCache.nodeChannels,
  3426. graphReloaded.graphCache.nodeChannels,
  3427. )
  3428. require.Equal(
  3429. t, graph.graphCache.nodeFeatures,
  3430. graphReloaded.graphCache.nodeFeatures,
  3431. )
  3432. }