lnd_channel_backup_test.go 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676
  1. package itest
  2. import (
  3. "context"
  4. "fmt"
  5. "os"
  6. "path/filepath"
  7. "strconv"
  8. "strings"
  9. "sync"
  10. "testing"
  11. "time"
  12. "github.com/btcsuite/btcd/btcutil"
  13. "github.com/btcsuite/btcd/wire"
  14. "github.com/lightningnetwork/lnd/chanbackup"
  15. "github.com/lightningnetwork/lnd/funding"
  16. "github.com/lightningnetwork/lnd/lnrpc"
  17. "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
  18. "github.com/lightningnetwork/lnd/lntest"
  19. "github.com/lightningnetwork/lnd/lntest/node"
  20. "github.com/lightningnetwork/lnd/lntest/wait"
  21. "github.com/stretchr/testify/require"
  22. )
  23. type (
  24. // nodeRestorer is a function closure that allows each test case to
  25. // control exactly *how* the prior node is restored. This might be
  26. // using an backup obtained over RPC, or the file system, etc.
  27. nodeRestorer func() *node.HarnessNode
  28. // restoreMethod takes an old node, then returns a function closure
  29. // that'll return the same node, but with its state restored via a
  30. // custom method. We use this to abstract away _how_ a node is restored
  31. // from our assertions once the node has been fully restored itself.
  32. restoreMethodType func(ht *lntest.HarnessTest,
  33. oldNode *node.HarnessNode, backupFilePath string,
  34. password []byte, mnemonic []string) nodeRestorer
  35. )
  36. // revocationWindow is used when we specify the revocation window used when
  37. // restoring node.
  38. const revocationWindow = 100
  39. // chanRestoreScenario represents a test case used by testing the channel
  40. // restore methods.
  41. type chanRestoreScenario struct {
  42. carol *node.HarnessNode
  43. dave *node.HarnessNode
  44. password []byte
  45. mnemonic []string
  46. params lntest.OpenChannelParams
  47. }
  48. // newChanRestoreScenario creates a new scenario that has two nodes, Carol and
  49. // Dave, connected and funded.
  50. func newChanRestoreScenario(ht *lntest.HarnessTest, ct lnrpc.CommitmentType,
  51. zeroConf bool) *chanRestoreScenario {
  52. const (
  53. chanAmt = btcutil.Amount(10000000)
  54. pushAmt = btcutil.Amount(5000000)
  55. )
  56. password := []byte("El Psy Kongroo")
  57. nodeArgs := []string{
  58. "--minbackoff=50ms",
  59. "--maxbackoff=1s",
  60. }
  61. if ct != lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE {
  62. args := lntest.NodeArgsForCommitType(ct)
  63. nodeArgs = append(nodeArgs, args...)
  64. }
  65. if zeroConf {
  66. nodeArgs = append(
  67. nodeArgs, "--protocol.option-scid-alias",
  68. "--protocol.zero-conf",
  69. )
  70. }
  71. // First, we'll create a brand new node we'll use within the test. If
  72. // we have a custom backup file specified, then we'll also create that
  73. // for use.
  74. dave, mnemonic, _ := ht.NewNodeWithSeed(
  75. "dave", nodeArgs, password, false,
  76. )
  77. carol := ht.NewNode("carol", nodeArgs)
  78. // Now that our new nodes are created, we'll give them some coins for
  79. // channel opening and anchor sweeping.
  80. ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
  81. ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)
  82. // Mine a block to confirm the funds.
  83. ht.MineBlocks(1)
  84. // For the anchor output case we need two UTXOs for Carol so she can
  85. // sweep both the local and remote anchor.
  86. if lntest.CommitTypeHasAnchors(ct) {
  87. ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
  88. }
  89. // Next, we'll connect Dave to Carol, and open a new channel to her
  90. // with a portion pushed.
  91. ht.ConnectNodes(dave, carol)
  92. // If the commitment type is taproot, then the channel must also be
  93. // private.
  94. var privateChan bool
  95. if ct == lnrpc.CommitmentType_SIMPLE_TAPROOT {
  96. privateChan = true
  97. }
  98. return &chanRestoreScenario{
  99. carol: carol,
  100. dave: dave,
  101. mnemonic: mnemonic,
  102. password: password,
  103. params: lntest.OpenChannelParams{
  104. Amt: chanAmt,
  105. PushAmt: pushAmt,
  106. ZeroConf: zeroConf,
  107. CommitmentType: ct,
  108. Private: privateChan,
  109. },
  110. }
  111. }
  112. // restoreDave will call the `nodeRestorer` and asserts Dave is restored by
  113. // checking his wallet balance against zero.
  114. func (c *chanRestoreScenario) restoreDave(ht *lntest.HarnessTest,
  115. restoredNodeFunc nodeRestorer) *node.HarnessNode {
  116. // Next, we'll make a new Dave and start the bulk of our recovery
  117. // workflow.
  118. dave := restoredNodeFunc()
  119. // First ensure that the on-chain balance is restored.
  120. err := wait.NoError(func() error {
  121. daveBalResp := dave.RPC.WalletBalance()
  122. daveBal := daveBalResp.ConfirmedBalance
  123. if daveBal <= 0 {
  124. return fmt.Errorf("expected positive balance, had %v",
  125. daveBal)
  126. }
  127. return nil
  128. }, defaultTimeout)
  129. require.NoError(ht, err, "On-chain balance not restored")
  130. return dave
  131. }
  132. // testScenario runs a test case with a given setup and asserts the DLP is
  133. // executed as expected, in details, it will,
  134. // 1. shutdown Dave.
  135. // 2. suspend Carol.
  136. // 3. restore Dave.
  137. // 4. validate pending channel state and check we cannot force close it.
  138. // 5. validate Carol's UTXOs.
  139. // 6. assert DLP is executed.
  140. func (c *chanRestoreScenario) testScenario(ht *lntest.HarnessTest,
  141. restoredNodeFunc nodeRestorer) {
  142. carol, dave := c.carol, c.dave
  143. // Before we start the recovery, we'll record the balances of both
  144. // Carol and Dave to ensure they both sweep their coins at the end.
  145. carolBalResp := carol.RPC.WalletBalance()
  146. carolStartingBalance := carolBalResp.ConfirmedBalance
  147. daveBalance := dave.RPC.WalletBalance()
  148. daveStartingBalance := daveBalance.ConfirmedBalance
  149. // Now that we're able to make our restored now, we'll shutdown the old
  150. // Dave node as we'll be storing it shortly below.
  151. ht.Shutdown(dave)
  152. // To make sure the channel state is advanced correctly if the channel
  153. // peer is not online at first, we also shutdown Carol.
  154. restartCarol := ht.SuspendNode(carol)
  155. // We now restore Dave.
  156. dave = c.restoreDave(ht, restoredNodeFunc)
  157. // We now check that the restored channel is in the proper state. It
  158. // should not yet be force closing as no connection with the remote
  159. // peer was established yet. We should also not be able to close the
  160. // channel.
  161. channel := ht.AssertNumWaitingClose(dave, 1)[0]
  162. chanPointStr := channel.Channel.ChannelPoint
  163. // We also want to make sure we cannot force close in this state. That
  164. // would get the state machine in a weird state.
  165. chanPointParts := strings.Split(chanPointStr, ":")
  166. chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32)
  167. // We don't get an error directly but only when reading the first
  168. // message of the stream.
  169. err := ht.CloseChannelAssertErr(
  170. dave, &lnrpc.ChannelPoint{
  171. FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
  172. FundingTxidStr: chanPointParts[0],
  173. },
  174. OutputIndex: uint32(chanPointIndex),
  175. }, true,
  176. )
  177. require.Contains(ht, err.Error(), "cannot close channel with state: ")
  178. require.Contains(ht, err.Error(), "ChanStatusRestored")
  179. // Increase the fee estimate so that the following force close tx will
  180. // be cpfp'ed in case of anchor commitments.
  181. ht.SetFeeEstimate(30000)
  182. // Now that we have ensured that the channels restored by the backup
  183. // are in the correct state even without the remote peer telling us so,
  184. // let's start up Carol again.
  185. require.NoError(ht, restartCarol(), "restart carol failed")
  186. if lntest.CommitTypeHasAnchors(c.params.CommitmentType) {
  187. ht.AssertNumUTXOs(carol, 2)
  188. } else {
  189. ht.AssertNumUTXOs(carol, 1)
  190. }
  191. // Now we'll assert that both sides properly execute the DLP protocol.
  192. // We grab their balances now to ensure that they're made whole at the
  193. // end of the protocol.
  194. assertDLPExecuted(
  195. ht, carol, carolStartingBalance, dave,
  196. daveStartingBalance, c.params.CommitmentType,
  197. )
  198. }
  199. // testChannelBackupRestore tests that we're able to recover from, and initiate
  200. // the DLP protocol via: the RPC restore command, restoring on unlock, and
  201. // restoring from initial wallet creation. We'll also alternate between
  202. // restoring form the on disk file, and restoring from the exported RPC command
  203. // as well.
  204. func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
  205. var testCases = []struct {
  206. name string
  207. restoreMethod restoreMethodType
  208. }{
  209. // Restore from backups obtained via the RPC interface. Dave
  210. // was the initiator, of the non-advertised channel.
  211. {
  212. name: "restore from RPC backup",
  213. restoreMethod: func(st *lntest.HarnessTest,
  214. oldNode *node.HarnessNode,
  215. backupFilePath string,
  216. password []byte,
  217. mnemonic []string) nodeRestorer {
  218. // For this restoration method, we'll grab the
  219. // current multi-channel backup from the old
  220. // node, and use it to restore a new node
  221. // within the closure.
  222. chanBackup := oldNode.RPC.ExportAllChanBackups()
  223. multi := chanBackup.MultiChanBackup.
  224. MultiChanBackup
  225. // In our nodeRestorer function, we'll restore
  226. // the node from seed, then manually recover
  227. // the channel backup.
  228. return chanRestoreViaRPC(
  229. st, password, mnemonic, multi, oldNode,
  230. )
  231. },
  232. },
  233. // Restore the backup from the on-disk file, using the RPC
  234. // interface.
  235. {
  236. name: "restore from backup file",
  237. restoreMethod: func(st *lntest.HarnessTest,
  238. oldNode *node.HarnessNode,
  239. backupFilePath string,
  240. password []byte,
  241. mnemonic []string) nodeRestorer {
  242. // Read the entire Multi backup stored within
  243. // this node's channel.backup file.
  244. multi, err := os.ReadFile(backupFilePath)
  245. require.NoError(st, err)
  246. // Now that we have Dave's backup file, we'll
  247. // create a new nodeRestorer that will restore
  248. // using the on-disk channel.backup.
  249. return chanRestoreViaRPC(
  250. st, password, mnemonic, multi, oldNode,
  251. )
  252. },
  253. },
  254. // Restore the backup as part of node initialization with the
  255. // prior mnemonic and new backup seed.
  256. {
  257. name: "restore during creation",
  258. restoreMethod: func(st *lntest.HarnessTest,
  259. oldNode *node.HarnessNode,
  260. backupFilePath string,
  261. password []byte,
  262. mnemonic []string) nodeRestorer {
  263. // First, fetch the current backup state as is,
  264. // to obtain our latest Multi.
  265. chanBackup := oldNode.RPC.ExportAllChanBackups()
  266. backupSnapshot := &lnrpc.ChanBackupSnapshot{
  267. MultiChanBackup: chanBackup.
  268. MultiChanBackup,
  269. }
  270. // Create a new nodeRestorer that will restore
  271. // the node using the Multi backup we just
  272. // obtained above.
  273. return func() *node.HarnessNode {
  274. return st.RestoreNodeWithSeed(
  275. "dave", nil, password, mnemonic,
  276. "", revocationWindow,
  277. backupSnapshot,
  278. )
  279. }
  280. },
  281. },
  282. // Restore the backup once the node has already been
  283. // re-created, using the Unlock call.
  284. {
  285. name: "restore during unlock",
  286. restoreMethod: func(st *lntest.HarnessTest,
  287. oldNode *node.HarnessNode,
  288. backupFilePath string,
  289. password []byte,
  290. mnemonic []string) nodeRestorer {
  291. // First, fetch the current backup state as is,
  292. // to obtain our latest Multi.
  293. chanBackup := oldNode.RPC.ExportAllChanBackups()
  294. backupSnapshot := &lnrpc.ChanBackupSnapshot{
  295. MultiChanBackup: chanBackup.
  296. MultiChanBackup,
  297. }
  298. // Create a new nodeRestorer that will restore
  299. // the node with its seed, but no channel
  300. // backup, shutdown this initialized node, then
  301. // restart it again using Unlock.
  302. return func() *node.HarnessNode {
  303. newNode := st.RestoreNodeWithSeed(
  304. "dave", nil, password, mnemonic,
  305. "", revocationWindow, nil,
  306. )
  307. st.RestartNodeWithChanBackups(
  308. newNode, backupSnapshot,
  309. )
  310. return newNode
  311. }
  312. },
  313. },
  314. // Restore the backup from the on-disk file a second time to
  315. // make sure imports can be canceled and later resumed.
  316. {
  317. name: "restore from backup file twice",
  318. restoreMethod: func(st *lntest.HarnessTest,
  319. oldNode *node.HarnessNode,
  320. backupFilePath string,
  321. password []byte,
  322. mnemonic []string) nodeRestorer {
  323. // Read the entire Multi backup stored within
  324. // this node's channel.backup file.
  325. multi, err := os.ReadFile(backupFilePath)
  326. require.NoError(st, err)
  327. // Now that we have Dave's backup file, we'll
  328. // create a new nodeRestorer that will restore
  329. // using the on-disk channel.backup.
  330. //
  331. //nolint:lll
  332. backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
  333. MultiChanBackup: multi,
  334. }
  335. return func() *node.HarnessNode {
  336. newNode := st.RestoreNodeWithSeed(
  337. "dave", nil, password, mnemonic,
  338. "", revocationWindow, nil,
  339. )
  340. req := &lnrpc.RestoreChanBackupRequest{
  341. Backup: backup,
  342. }
  343. newNode.RPC.RestoreChanBackups(req)
  344. req = &lnrpc.RestoreChanBackupRequest{
  345. Backup: backup,
  346. }
  347. newNode.RPC.RestoreChanBackups(req)
  348. return newNode
  349. }
  350. },
  351. },
  352. }
  353. for _, testCase := range testCases {
  354. tc := testCase
  355. success := ht.Run(tc.name, func(t *testing.T) {
  356. h := ht.Subtest(t)
  357. runChanRestoreScenarioBasic(h, tc.restoreMethod)
  358. })
  359. if !success {
  360. break
  361. }
  362. }
  363. }
  364. // runChanRestoreScenarioBasic executes a given test case from end to end,
  365. // ensuring that after Dave restores his channel state according to the
  366. // testCase, the DLP protocol is executed properly and both nodes are made
  367. // whole.
  368. func runChanRestoreScenarioBasic(ht *lntest.HarnessTest,
  369. restoreMethod restoreMethodType) {
  370. // Create a new retore scenario.
  371. crs := newChanRestoreScenario(
  372. ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
  373. )
  374. carol, dave := crs.carol, crs.dave
  375. // Open a channel from Dave to Carol.
  376. ht.OpenChannel(dave, carol, crs.params)
  377. // At this point, we'll now execute the restore method to give us the
  378. // new node we should attempt our assertions against.
  379. backupFilePath := dave.Cfg.ChanBackupPath()
  380. restoredNodeFunc := restoreMethod(
  381. ht, dave, backupFilePath, crs.password, crs.mnemonic,
  382. )
  383. // Test the scenario.
  384. crs.testScenario(ht, restoredNodeFunc)
  385. }
  386. // testChannelBackupRestoreUnconfirmed tests that we're able to restore from
  387. // disk file and the exported RPC command for unconfirmed channel.
  388. func testChannelBackupRestoreUnconfirmed(ht *lntest.HarnessTest) {
  389. // Use the channel backup file that contains an unconfirmed channel and
  390. // make sure recovery works as well.
  391. ht.Run("restore unconfirmed channel file", func(t *testing.T) {
  392. st := ht.Subtest(t)
  393. runChanRestoreScenarioUnConfirmed(st, true)
  394. })
  395. // Create a backup using RPC that contains an unconfirmed channel and
  396. // make sure recovery works as well.
  397. ht.Run("restore unconfirmed channel RPC", func(t *testing.T) {
  398. st := ht.Subtest(t)
  399. runChanRestoreScenarioUnConfirmed(st, false)
  400. })
  401. }
  402. // runChanRestoreScenarioUnConfirmed checks that Dave is able to restore for an
  403. // unconfirmed channel.
  404. func runChanRestoreScenarioUnConfirmed(ht *lntest.HarnessTest, useFile bool) {
  405. // Create a new retore scenario.
  406. crs := newChanRestoreScenario(
  407. ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
  408. )
  409. carol, dave := crs.carol, crs.dave
  410. // Open a pending channel.
  411. ht.OpenChannelAssertPending(dave, carol, crs.params)
  412. // Give the pubsub some time to update the channel backup.
  413. err := wait.NoError(func() error {
  414. fi, err := os.Stat(dave.Cfg.ChanBackupPath())
  415. if err != nil {
  416. return err
  417. }
  418. if fi.Size() <= chanbackup.NilMultiSizePacked {
  419. return fmt.Errorf("backup file empty")
  420. }
  421. return nil
  422. }, defaultTimeout)
  423. require.NoError(ht, err, "channel backup not updated in time")
  424. // At this point, we'll now execute the restore method to give us the
  425. // new node we should attempt our assertions against.
  426. var multi []byte
  427. if useFile {
  428. backupFilePath := dave.Cfg.ChanBackupPath()
  429. // Read the entire Multi backup stored within this node's
  430. // channel.backup file.
  431. multi, err = os.ReadFile(backupFilePath)
  432. require.NoError(ht, err)
  433. } else {
  434. // For this restoration method, we'll grab the current
  435. // multi-channel backup from the old node. The channel should
  436. // be included, even if it is not confirmed yet.
  437. chanBackup := dave.RPC.ExportAllChanBackups()
  438. chanPoints := chanBackup.MultiChanBackup.ChanPoints
  439. require.NotEmpty(ht, chanPoints,
  440. "unconfirmed channel not found")
  441. multi = chanBackup.MultiChanBackup.MultiChanBackup
  442. }
  443. // Let's assume time passes, the channel confirms in the meantime but
  444. // for some reason the backup we made while it was still unconfirmed is
  445. // the only backup we have. We should still be able to restore it. To
  446. // simulate time passing, we mine some blocks to get the channel
  447. // confirmed _after_ we saved the backup.
  448. ht.MineBlocksAndAssertNumTxes(6, 1)
  449. // In our nodeRestorer function, we'll restore the node from seed, then
  450. // manually recover the channel backup.
  451. restoredNodeFunc := chanRestoreViaRPC(
  452. ht, crs.password, crs.mnemonic, multi, dave,
  453. )
  454. // Test the scenario.
  455. crs.testScenario(ht, restoredNodeFunc)
  456. }
  457. // testChannelBackupRestoreCommitTypes tests that we're able to recover from,
  458. // and initiate the DLP protocol for different channel commitment types and
  459. // zero-conf channel.
  460. func testChannelBackupRestoreCommitTypes(ht *lntest.HarnessTest) {
  461. var testCases = []struct {
  462. name string
  463. ct lnrpc.CommitmentType
  464. zeroConf bool
  465. }{
  466. // Restore the backup from the on-disk file, using the RPC
  467. // interface, for anchor commitment channels.
  468. {
  469. name: "restore from backup file anchors",
  470. ct: lnrpc.CommitmentType_ANCHORS,
  471. },
  472. // Restore the backup from the on-disk file, using the RPC
  473. // interface, for script-enforced leased channels.
  474. {
  475. name: "restore from backup file script " +
  476. "enforced lease",
  477. ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
  478. },
  479. // Restore the backup from the on-disk file, using the RPC
  480. // interface, for zero-conf anchor channels.
  481. {
  482. name: "restore from backup file for zero-conf " +
  483. "anchors channel",
  484. ct: lnrpc.CommitmentType_ANCHORS,
  485. zeroConf: true,
  486. },
  487. // Restore the backup from the on-disk file, using the RPC
  488. // interface for a zero-conf script-enforced leased channel.
  489. {
  490. name: "restore from backup file zero-conf " +
  491. "script-enforced leased channel",
  492. ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
  493. zeroConf: true,
  494. },
  495. // Restore a channel back up of a taproot channel that was
  496. // confirmed.
  497. {
  498. name: "restore from backup taproot",
  499. ct: lnrpc.CommitmentType_SIMPLE_TAPROOT,
  500. zeroConf: false,
  501. },
  502. // Restore a channel back up of an unconfirmed taproot channel.
  503. {
  504. name: "restore from backup taproot zero conf",
  505. ct: lnrpc.CommitmentType_SIMPLE_TAPROOT,
  506. zeroConf: true,
  507. },
  508. }
  509. for _, testCase := range testCases {
  510. tc := testCase
  511. success := ht.Run(tc.name, func(t *testing.T) {
  512. h := ht.Subtest(t)
  513. runChanRestoreScenarioCommitTypes(
  514. h, tc.ct, tc.zeroConf,
  515. )
  516. })
  517. if !success {
  518. break
  519. }
  520. }
  521. }
  522. // runChanRestoreScenarioCommitTypes tests that the DLP is applied for
  523. // different channel commitment types and zero-conf channel.
  524. func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
  525. ct lnrpc.CommitmentType, zeroConf bool) {
  526. // Create a new retore scenario.
  527. crs := newChanRestoreScenario(ht, ct, zeroConf)
  528. carol, dave := crs.carol, crs.dave
  529. // If we are testing zero-conf channels, setup a ChannelAcceptor for
  530. // the fundee.
  531. var cancelAcceptor context.CancelFunc
  532. if zeroConf {
  533. // Setup a ChannelAcceptor.
  534. acceptStream, cancel := carol.RPC.ChannelAcceptor()
  535. cancelAcceptor = cancel
  536. go acceptChannel(ht.T, true, acceptStream)
  537. }
  538. var fundingShim *lnrpc.FundingShim
  539. if ct == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
  540. _, minerHeight := ht.Miner.GetBestBlock()
  541. thawHeight := uint32(minerHeight + thawHeightDelta)
  542. fundingShim, _ = deriveFundingShim(
  543. ht, dave, carol, crs.params.Amt, thawHeight, true, ct,
  544. )
  545. crs.params.FundingShim = fundingShim
  546. }
  547. ht.OpenChannel(dave, carol, crs.params)
  548. // Remove the ChannelAcceptor.
  549. if zeroConf {
  550. cancelAcceptor()
  551. }
  552. // At this point, we'll now execute the restore method to give us the
  553. // new node we should attempt our assertions against.
  554. backupFilePath := dave.Cfg.ChanBackupPath()
  555. // Read the entire Multi backup stored within this node's
  556. // channels.backup file.
  557. multi, err := os.ReadFile(backupFilePath)
  558. require.NoError(ht, err)
  559. // If this was a zero conf taproot channel, then since it's private,
  560. // we'll need to mine an extra block (framework won't mine extra blocks
  561. // otherwise).
  562. if ct == lnrpc.CommitmentType_SIMPLE_TAPROOT && zeroConf {
  563. ht.MineBlocksAndAssertNumTxes(1, 1)
  564. }
  565. // Now that we have Dave's backup file, we'll create a new nodeRestorer
  566. // that we'll restore using the on-disk channels.backup.
  567. restoredNodeFunc := chanRestoreViaRPC(
  568. ht, crs.password, crs.mnemonic, multi, dave,
  569. )
  570. // Test the scenario.
  571. crs.testScenario(ht, restoredNodeFunc)
  572. }
  573. // testChannelBackupRestoreLegacy checks a channel with the legacy revocation
  574. // producer format and makes sure old SCBs can still be recovered.
  575. func testChannelBackupRestoreLegacy(ht *lntest.HarnessTest) {
  576. // Create a new retore scenario.
  577. crs := newChanRestoreScenario(
  578. ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
  579. )
  580. carol, dave := crs.carol, crs.dave
  581. createLegacyRevocationChannel(
  582. ht, crs.params.Amt, crs.params.PushAmt, dave, carol,
  583. )
  584. // For this restoration method, we'll grab the current multi-channel
  585. // backup from the old node, and use it to restore a new node within
  586. // the closure.
  587. chanBackup := dave.RPC.ExportAllChanBackups()
  588. multi := chanBackup.MultiChanBackup.MultiChanBackup
  589. // In our nodeRestorer function, we'll restore the node from seed, then
  590. // manually recover the channel backup.
  591. restoredNodeFunc := chanRestoreViaRPC(
  592. ht, crs.password, crs.mnemonic, multi, dave,
  593. )
  594. // Test the scenario.
  595. crs.testScenario(ht, restoredNodeFunc)
  596. }
  597. // testChannelBackupRestoreForceClose checks that Dave can restore from force
  598. // closed channels.
  599. func testChannelBackupRestoreForceClose(ht *lntest.HarnessTest) {
  600. // Restore a channel that was force closed by dave just before going
  601. // offline.
  602. success := ht.Run("from backup file anchors", func(t *testing.T) {
  603. st := ht.Subtest(t)
  604. runChanRestoreScenarioForceClose(st, false)
  605. })
  606. // Only run the second test if the first passed.
  607. if !success {
  608. return
  609. }
  610. // Restore a zero-conf anchors channel that was force closed by dave
  611. // just before going offline.
  612. ht.Run("from backup file anchors w/ zero-conf", func(t *testing.T) {
  613. st := ht.Subtest(t)
  614. runChanRestoreScenarioForceClose(st, true)
  615. })
  616. }
  617. // runChanRestoreScenarioForceClose creates anchor-enabled force close channels
  618. // and checks that Dave is able to restore from them.
  619. func runChanRestoreScenarioForceClose(ht *lntest.HarnessTest, zeroConf bool) {
  620. crs := newChanRestoreScenario(
  621. ht, lnrpc.CommitmentType_ANCHORS, zeroConf,
  622. )
  623. carol, dave := crs.carol, crs.dave
  624. // For neutrino backend, we give Dave once more UTXO to fund the anchor
  625. // sweep.
  626. if ht.IsNeutrinoBackend() {
  627. ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
  628. }
  629. // If we are testing zero-conf channels, setup a ChannelAcceptor for
  630. // the fundee.
  631. var cancelAcceptor context.CancelFunc
  632. if zeroConf {
  633. // Setup a ChannelAcceptor.
  634. acceptStream, cancel := carol.RPC.ChannelAcceptor()
  635. cancelAcceptor = cancel
  636. go acceptChannel(ht.T, true, acceptStream)
  637. }
  638. chanPoint := ht.OpenChannel(dave, carol, crs.params)
  639. // Remove the ChannelAcceptor.
  640. if zeroConf {
  641. cancelAcceptor()
  642. }
  643. // If we're testing that locally force closed channels can be restored
  644. // then we issue the force close now.
  645. ht.CloseChannelAssertPending(dave, chanPoint, true)
  646. // Dave should see one waiting close channel.
  647. ht.AssertNumWaitingClose(dave, 1)
  648. // Now we need to make sure that the channel is still in the backup.
  649. // Otherwise restoring won't work later.
  650. dave.RPC.ExportChanBackup(chanPoint)
  651. // Before we start the recovery, we'll record the balances of both
  652. // Carol and Dave to ensure they both sweep their coins at the end.
  653. carolBalResp := carol.RPC.WalletBalance()
  654. carolStartingBalance := carolBalResp.ConfirmedBalance
  655. daveBalance := dave.RPC.WalletBalance()
  656. daveStartingBalance := daveBalance.ConfirmedBalance
  657. // At this point, we'll now execute the restore method to give us the
  658. // new node we should attempt our assertions against.
  659. backupFilePath := dave.Cfg.ChanBackupPath()
  660. // Read the entire Multi backup stored within this node's
  661. // channel.backup file.
  662. multi, err := os.ReadFile(backupFilePath)
  663. require.NoError(ht, err)
  664. // Now that we have Dave's backup file, we'll create a new nodeRestorer
  665. // that will restore using the on-disk channel.backup.
  666. restoredNodeFunc := chanRestoreViaRPC(
  667. ht, crs.password, crs.mnemonic, multi, dave,
  668. )
  669. // We now wait until both Dave's closing tx.
  670. ht.Miner.AssertNumTxsInMempool(1)
  671. // Now that we're able to make our restored now, we'll shutdown the old
  672. // Dave node as we'll be storing it shortly below.
  673. ht.Shutdown(dave)
  674. // Mine a block to confirm the closing tx from Dave.
  675. ht.MineBlocksAndAssertNumTxes(1, 1)
  676. // To make sure the channel state is advanced correctly if the channel
  677. // peer is not online at first, we also shutdown Carol.
  678. restartCarol := ht.SuspendNode(carol)
  679. dave = crs.restoreDave(ht, restoredNodeFunc)
  680. // For our force close scenario we don't need the channel to be closed
  681. // by Carol since it was already force closed before we started the
  682. // recovery. All we need is for Carol to send us over the commit height
  683. // so we can sweep the time locked output with the correct commit
  684. // point.
  685. ht.AssertNumPendingForceClose(dave, 1)
  686. require.NoError(ht, restartCarol(), "restart carol failed")
  687. // Now that we have our new node up, we expect that it'll re-connect to
  688. // Carol automatically based on the restored backup.
  689. ht.EnsureConnected(dave, carol)
  690. assertTimeLockSwept(
  691. ht, carol, dave, carolStartingBalance, daveStartingBalance,
  692. )
  693. }
  694. // testChannelBackupUpdates tests that both the streaming channel update RPC,
  695. // and the on-disk channel.backup are updated each time a channel is
  696. // opened/closed.
  697. func testChannelBackupUpdates(ht *lntest.HarnessTest) {
  698. alice := ht.Alice
  699. // First, we'll make a temp directory that we'll use to store our
  700. // backup file, so we can check in on it during the test easily.
  701. backupDir := ht.T.TempDir()
  702. // First, we'll create a new node, Carol. We'll also create a temporary
  703. // file that Carol will use to store her channel backups.
  704. backupFilePath := filepath.Join(
  705. backupDir, chanbackup.DefaultBackupFileName,
  706. )
  707. carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath)
  708. carol := ht.NewNode("carol", []string{carolArgs})
  709. // Next, we'll register for streaming notifications for changes to the
  710. // backup file.
  711. backupStream := carol.RPC.SubscribeChannelBackups()
  712. // We'll use this goroutine to proxy any updates to a channel we can
  713. // easily use below.
  714. var wg sync.WaitGroup
  715. backupUpdates := make(chan *lnrpc.ChanBackupSnapshot)
  716. streamErr := make(chan error)
  717. streamQuit := make(chan struct{})
  718. wg.Add(1)
  719. go func() {
  720. defer wg.Done()
  721. for {
  722. snapshot, err := backupStream.Recv()
  723. if err != nil {
  724. select {
  725. case streamErr <- err:
  726. case <-streamQuit:
  727. return
  728. }
  729. }
  730. select {
  731. case backupUpdates <- snapshot:
  732. case <-streamQuit:
  733. return
  734. }
  735. }
  736. }()
  737. defer close(streamQuit)
  738. // With Carol up, we'll now connect her to Alice, and open a channel
  739. // between them.
  740. ht.ConnectNodes(carol, alice)
  741. // Next, we'll open two channels between Alice and Carol back to back.
  742. var chanPoints []*lnrpc.ChannelPoint
  743. numChans := 2
  744. chanAmt := btcutil.Amount(1000000)
  745. for i := 0; i < numChans; i++ {
  746. chanPoint := ht.OpenChannel(
  747. alice, carol, lntest.OpenChannelParams{Amt: chanAmt},
  748. )
  749. chanPoints = append(chanPoints, chanPoint)
  750. }
  751. // Using this helper function, we'll maintain a pointer to the latest
  752. // channel backup so we can compare it to the on disk state.
  753. var currentBackup *lnrpc.ChanBackupSnapshot
  754. assertBackupNtfns := func(numNtfns int) {
  755. for i := 0; i < numNtfns; i++ {
  756. select {
  757. case err := <-streamErr:
  758. require.Failf(ht, "stream err",
  759. "error with backup stream: %v", err)
  760. case currentBackup = <-backupUpdates:
  761. case <-time.After(time.Second * 5):
  762. require.Failf(ht, "timeout", "didn't "+
  763. "receive channel backup "+
  764. "notification %v", i+1)
  765. }
  766. }
  767. }
  768. // assertBackupFileState is a helper function that we'll use to compare
  769. // the on disk back up file to our currentBackup pointer above.
  770. assertBackupFileState := func() {
  771. err := wait.NoError(func() error {
  772. packedBackup, err := os.ReadFile(backupFilePath)
  773. if err != nil {
  774. return fmt.Errorf("unable to read backup "+
  775. "file: %v", err)
  776. }
  777. // As each back up file will be encrypted with a fresh
  778. // nonce, we can't compare them directly, so instead
  779. // we'll compare the length which is a proxy for the
  780. // number of channels that the multi-backup contains.
  781. backup := currentBackup.MultiChanBackup.MultiChanBackup
  782. if len(backup) != len(packedBackup) {
  783. return fmt.Errorf("backup files don't match: "+
  784. "expected %x got %x", backup,
  785. packedBackup)
  786. }
  787. // Additionally, we'll assert that both backups up
  788. // returned are valid.
  789. for _, backup := range [][]byte{backup, packedBackup} {
  790. snapshot := &lnrpc.ChanBackupSnapshot{
  791. MultiChanBackup: &lnrpc.MultiChanBackup{
  792. MultiChanBackup: backup,
  793. },
  794. }
  795. carol.RPC.VerifyChanBackup(snapshot)
  796. }
  797. return nil
  798. }, defaultTimeout)
  799. require.NoError(ht, err, "timeout while checking "+
  800. "backup state: %v", err)
  801. }
  802. // As these two channels were just opened, we should've got two times
  803. // the pending and open notifications for channel backups.
  804. assertBackupNtfns(2 * 2)
  805. // The on disk file should also exactly match the latest backup that we
  806. // have.
  807. assertBackupFileState()
  808. // Next, we'll close the channels one by one. After each channel
  809. // closure, we should get a notification, and the on-disk state should
  810. // match this state as well.
  811. for i := 0; i < numChans; i++ {
  812. // To ensure force closes also trigger an update, we'll force
  813. // close half of the channels.
  814. forceClose := i%2 == 0
  815. chanPoint := chanPoints[i]
  816. // If we force closed the channel, then we'll mine enough
  817. // blocks to ensure all outputs have been swept.
  818. if forceClose {
  819. ht.ForceCloseChannel(alice, chanPoint)
  820. // A local force closed channel will trigger a
  821. // notification once the commitment TX confirms on
  822. // chain. But that won't remove the channel from the
  823. // backup just yet, that will only happen once the time
  824. // locked contract was fully resolved on chain.
  825. assertBackupNtfns(1)
  826. // Now that the channel's been fully resolved, we
  827. // expect another notification.
  828. assertBackupNtfns(1)
  829. assertBackupFileState()
  830. } else {
  831. ht.CloseChannel(alice, chanPoint)
  832. // We should get a single notification after closing,
  833. // and the on-disk state should match this latest
  834. // notifications.
  835. assertBackupNtfns(1)
  836. assertBackupFileState()
  837. }
  838. }
  839. }
  840. // testExportChannelBackup tests that we're able to properly export either a
  841. // targeted channel's backup, or export backups of all the currents open
  842. // channels.
  843. func testExportChannelBackup(ht *lntest.HarnessTest) {
  844. // First, we'll create our primary test node: Carol. We'll use Carol to
  845. // open channels and also export backups that we'll examine throughout
  846. // the test.
  847. carol := ht.NewNode("carol", nil)
  848. // With Carol up, we'll now connect her to Alice, and open a channel
  849. // between them.
  850. alice := ht.Alice
  851. ht.ConnectNodes(carol, alice)
  852. // Next, we'll open two channels between Alice and Carol back to back.
  853. var chanPoints []*lnrpc.ChannelPoint
  854. numChans := 2
  855. chanAmt := btcutil.Amount(1000000)
  856. for i := 0; i < numChans; i++ {
  857. chanPoint := ht.OpenChannel(
  858. alice, carol, lntest.OpenChannelParams{Amt: chanAmt},
  859. )
  860. chanPoints = append(chanPoints, chanPoint)
  861. }
  862. // Now that the channels are open, we should be able to fetch the
  863. // backups of each of the channels.
  864. for _, chanPoint := range chanPoints {
  865. chanBackup := carol.RPC.ExportChanBackup(chanPoint)
  866. // The returned backup should be full populated. Since it's
  867. // encrypted, we can't assert any more than that atm.
  868. require.NotEmptyf(ht, chanBackup.ChanBackup,
  869. "obtained empty backup for channel: %v", chanPoint)
  870. // The specified chanPoint in the response should match our
  871. // requested chanPoint.
  872. require.Equal(ht, chanBackup.ChanPoint.String(),
  873. chanPoint.String())
  874. }
  875. // Before we proceed, we'll make two utility methods we'll use below
  876. // for our primary assertions.
  877. assertNumSingleBackups := func(numSingles int) {
  878. err := wait.NoError(func() error {
  879. chanSnapshot := carol.RPC.ExportAllChanBackups()
  880. if chanSnapshot.SingleChanBackups == nil {
  881. return fmt.Errorf("single chan backups not " +
  882. "populated")
  883. }
  884. backups := chanSnapshot.SingleChanBackups.ChanBackups
  885. if len(backups) != numSingles {
  886. return fmt.Errorf("expected %v singles, "+
  887. "got %v", len(backups), numSingles)
  888. }
  889. return nil
  890. }, defaultTimeout)
  891. require.NoError(ht, err, "timeout checking num single backup")
  892. }
  893. assertMultiBackupFound := func() func(bool,
  894. map[wire.OutPoint]struct{}) {
  895. chanSnapshot := carol.RPC.ExportAllChanBackups()
  896. return func(found bool, chanPoints map[wire.OutPoint]struct{}) {
  897. num := len(chanSnapshot.MultiChanBackup.MultiChanBackup)
  898. switch {
  899. case found && chanSnapshot.MultiChanBackup == nil:
  900. require.Fail(ht, "multi-backup not present")
  901. case !found && chanSnapshot.MultiChanBackup != nil &&
  902. num != chanbackup.NilMultiSizePacked:
  903. require.Fail(ht, "found multi-backup when "+
  904. "non should be found")
  905. }
  906. if !found {
  907. return
  908. }
  909. backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints
  910. require.Len(ht, backedUpChans, len(chanPoints))
  911. for _, chanPoint := range backedUpChans {
  912. wp := ht.OutPointFromChannelPoint(chanPoint)
  913. _, ok := chanPoints[wp]
  914. require.True(ht, ok, "unexpected "+
  915. "backup: %v", wp)
  916. }
  917. }
  918. }
  919. chans := make(map[wire.OutPoint]struct{})
  920. for _, chanPoint := range chanPoints {
  921. chans[ht.OutPointFromChannelPoint(chanPoint)] = struct{}{}
  922. }
  923. // We should have exactly two single channel backups contained, and we
  924. // should also have a multi-channel backup.
  925. assertNumSingleBackups(2)
  926. assertMultiBackupFound()(true, chans)
  927. // We'll now close each channel on by one. After we close a channel, we
  928. // shouldn't be able to find that channel as a backup still. We should
  929. // also have one less single written to disk.
  930. for i, chanPoint := range chanPoints {
  931. ht.CloseChannel(alice, chanPoint)
  932. assertNumSingleBackups(len(chanPoints) - i - 1)
  933. delete(chans, ht.OutPointFromChannelPoint(chanPoint))
  934. assertMultiBackupFound()(true, chans)
  935. }
  936. // At this point we shouldn't have any single or multi-chan backups at
  937. // all.
  938. assertNumSingleBackups(0)
  939. assertMultiBackupFound()(false, nil)
  940. }
  941. // testDataLossProtection tests that if one of the nodes in a channel
  942. // relationship lost state, they will detect this during channel sync, and the
  943. // up-to-date party will force close the channel, giving the outdated party the
  944. // opportunity to sweep its output.
  945. func testDataLossProtection(ht *lntest.HarnessTest) {
  946. const (
  947. chanAmt = funding.MaxBtcFundingAmount
  948. paymentAmt = 10000
  949. numInvoices = 6
  950. )
  951. // Carol will be the up-to-date party. We set --nolisten to ensure Dave
  952. // won't be able to connect to her and trigger the channel data
  953. // protection logic automatically. We also can't have Carol
  954. // automatically re-connect too early, otherwise DLP would be initiated
  955. // at the wrong moment.
  956. carol := ht.NewNode("Carol", []string{"--nolisten", "--minbackoff=1h"})
  957. // Dave will be the party losing his state.
  958. dave := ht.NewNode("Dave", nil)
  959. // Before we make a channel, we'll load up Carol with some coins sent
  960. // directly from the miner.
  961. ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
  962. // timeTravelDave is a method that will make Carol open a channel to
  963. // Dave, settle a series of payments, then Dave back to the state
  964. // before the payments happened. When this method returns Dave will
  965. // be unaware of the new state updates. The returned function can be
  966. // used to restart Dave in this state.
  967. timeTravelDave := func() (func() error, *lnrpc.ChannelPoint, int64) {
  968. // We must let the node communicate with Carol before they are
  969. // able to open channel, so we connect them.
  970. ht.EnsureConnected(carol, dave)
  971. // We'll first open up a channel between them with a 0.5 BTC
  972. // value.
  973. chanPoint := ht.OpenChannel(
  974. carol, dave, lntest.OpenChannelParams{
  975. Amt: chanAmt,
  976. },
  977. )
  978. // With the channel open, we'll create a few invoices for the
  979. // node that Carol will pay to in order to advance the state of
  980. // the channel.
  981. // TODO(halseth): have dangling HTLCs on the commitment, able to
  982. // retrieve funds?
  983. payReqs, _, _ := ht.CreatePayReqs(dave, paymentAmt, numInvoices)
  984. // Send payments from Carol using 3 of the payment hashes
  985. // generated above.
  986. ht.CompletePaymentRequests(carol, payReqs[:numInvoices/2])
  987. // Next query for Dave's channel state, as we sent 3 payments
  988. // of 10k satoshis each, it should now see his balance as being
  989. // 30k satoshis.
  990. nodeChan := ht.AssertChannelLocalBalance(
  991. dave, chanPoint, 30_000,
  992. )
  993. // Grab the current commitment height (update number), we'll
  994. // later revert him to this state after additional updates to
  995. // revoke this state.
  996. stateNumPreCopy := nodeChan.NumUpdates
  997. // With the temporary file created, copy the current state into
  998. // the temporary file we created above. Later after more
  999. // updates, we'll restore this state.
  1000. ht.BackupDB(dave)
  1001. // Reconnect the peers after the restart that was needed for
  1002. // the db backup.
  1003. ht.EnsureConnected(carol, dave)
  1004. // Finally, send more payments from Carol, using the remaining
  1005. // payment hashes.
  1006. ht.CompletePaymentRequests(carol, payReqs[numInvoices/2:])
  1007. // TODO(yy): remove the sleep once the following bug is fixed.
  1008. //
  1009. // While the payment is reported as settled, the commitment
  1010. // dance may not be finished, which leaves several HTLCs in the
  1011. // commitment. Later on, when Carol force closes this channel,
  1012. // she would have HTLCs there and the test won't pass.
  1013. time.Sleep(2 * time.Second)
  1014. // Now we shutdown Dave, copying over the its temporary
  1015. // database state which has the *prior* channel state over his
  1016. // current most up to date state. With this, we essentially
  1017. // force Dave to travel back in time within the channel's
  1018. // history.
  1019. ht.RestartNodeAndRestoreDB(dave)
  1020. // Make sure the channel is still there from the PoV of Dave.
  1021. ht.AssertNodeNumChannels(dave, 1)
  1022. // Now query for the channel state, it should show that it's at
  1023. // a state number in the past, not the *latest* state.
  1024. ht.AssertChannelNumUpdates(dave, stateNumPreCopy, chanPoint)
  1025. balResp := dave.RPC.WalletBalance()
  1026. restart := ht.SuspendNode(dave)
  1027. return restart, chanPoint, balResp.ConfirmedBalance
  1028. }
  1029. // Reset Dave to a state where he has an outdated channel state.
  1030. restartDave, _, daveStartingBalance := timeTravelDave()
  1031. // We make a note of the nodes' current on-chain balances, to make sure
  1032. // they are able to retrieve the channel funds eventually,
  1033. carolBalResp := carol.RPC.WalletBalance()
  1034. carolStartingBalance := carolBalResp.ConfirmedBalance
  1035. // Restart Dave to trigger a channel resync.
  1036. require.NoError(ht, restartDave(), "unable to restart dave")
  1037. // Assert that once Dave comes up, they reconnect, Carol force closes
  1038. // on chain, and both of them properly carry out the DLP protocol.
  1039. assertDLPExecuted(
  1040. ht, carol, carolStartingBalance, dave,
  1041. daveStartingBalance, lnrpc.CommitmentType_STATIC_REMOTE_KEY,
  1042. )
  1043. // As a second part of this test, we will test the scenario where a
  1044. // channel is closed while Dave is offline, loses his state and comes
  1045. // back online. In this case the node should attempt to resync the
  1046. // channel, and the peer should resend a channel sync message for the
  1047. // closed channel, such that Dave can retrieve his funds.
  1048. //
  1049. // We start by letting Dave time travel back to an outdated state.
  1050. restartDave, chanPoint2, daveStartingBalance := timeTravelDave()
  1051. carolBalResp = carol.RPC.WalletBalance()
  1052. carolStartingBalance = carolBalResp.ConfirmedBalance
  1053. // Now let Carol force close the channel while Dave is offline.
  1054. ht.ForceCloseChannel(carol, chanPoint2)
  1055. // Make sure Carol got her balance back.
  1056. carolBalResp = carol.RPC.WalletBalance()
  1057. carolBalance := carolBalResp.ConfirmedBalance
  1058. require.Greater(ht, carolBalance, carolStartingBalance,
  1059. "expected carol to have balance increased")
  1060. ht.AssertNodeNumChannels(carol, 0)
  1061. // When Dave comes online, he will reconnect to Carol, try to resync
  1062. // the channel, but it will already be closed. Carol should resend the
  1063. // information Dave needs to sweep his funds.
  1064. require.NoError(ht, restartDave(), "unable to restart Eve")
  1065. // Dave should have a pending sweep.
  1066. ht.AssertNumPendingSweeps(dave, 1)
  1067. // Mine a block to trigger the sweep.
  1068. ht.MineBlocks(1)
  1069. // Dave should sweep his funds.
  1070. ht.Miner.AssertNumTxsInMempool(1)
  1071. // Mine a block to confirm the sweep, and make sure Dave got his
  1072. // balance back.
  1073. ht.MineBlocksAndAssertNumTxes(1, 1)
  1074. ht.AssertNodeNumChannels(dave, 0)
  1075. err := wait.NoError(func() error {
  1076. daveBalResp := dave.RPC.WalletBalance()
  1077. daveBalance := daveBalResp.ConfirmedBalance
  1078. if daveBalance <= daveStartingBalance {
  1079. return fmt.Errorf("expected dave to have balance "+
  1080. "above %d, intead had %v", daveStartingBalance,
  1081. daveBalance)
  1082. }
  1083. return nil
  1084. }, defaultTimeout)
  1085. require.NoError(ht, err, "timeout while checking dave's balance")
  1086. }
  1087. // createLegacyRevocationChannel creates a single channel using the legacy
  1088. // revocation producer format by using PSBT to signal a special pending channel
  1089. // ID.
  1090. func createLegacyRevocationChannel(ht *lntest.HarnessTest,
  1091. chanAmt, pushAmt btcutil.Amount, from, to *node.HarnessNode) {
  1092. // We'll signal to the wallet that we also want to create a channel
  1093. // with the legacy revocation producer format that relies on deriving a
  1094. // private key from the key ring. This is only available during itests
  1095. // to make sure we don't hard depend on the DerivePrivKey method of the
  1096. // key ring. We can signal the wallet by setting a custom pending
  1097. // channel ID. To be able to do that, we need to set a funding shim
  1098. // which is easiest by using PSBT funding. The ID is the hex
  1099. // representation of the string "legacy-revocation".
  1100. itestLegacyFormatChanID := [32]byte{
  1101. 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2d, 0x72, 0x65, 0x76,
  1102. 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
  1103. }
  1104. shim := &lnrpc.FundingShim{
  1105. Shim: &lnrpc.FundingShim_PsbtShim{
  1106. PsbtShim: &lnrpc.PsbtShim{
  1107. PendingChanId: itestLegacyFormatChanID[:],
  1108. },
  1109. },
  1110. }
  1111. openChannelReq := lntest.OpenChannelParams{
  1112. Amt: chanAmt,
  1113. PushAmt: pushAmt,
  1114. FundingShim: shim,
  1115. }
  1116. chanUpdates, tempPsbt := ht.OpenChannelPsbt(from, to, openChannelReq)
  1117. // Fund the PSBT by using the source node's wallet.
  1118. fundReq := &walletrpc.FundPsbtRequest{
  1119. Template: &walletrpc.FundPsbtRequest_Psbt{
  1120. Psbt: tempPsbt,
  1121. },
  1122. Fees: &walletrpc.FundPsbtRequest_SatPerVbyte{
  1123. SatPerVbyte: 2,
  1124. },
  1125. }
  1126. fundResp := from.RPC.FundPsbt(fundReq)
  1127. // We have a PSBT that has no witness data yet, which is exactly what
  1128. // we need for the next step of verifying the PSBT with the funding
  1129. // intents.
  1130. msg := &lnrpc.FundingTransitionMsg{
  1131. Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
  1132. PsbtVerify: &lnrpc.FundingPsbtVerify{
  1133. PendingChanId: itestLegacyFormatChanID[:],
  1134. FundedPsbt: fundResp.FundedPsbt,
  1135. },
  1136. },
  1137. }
  1138. from.RPC.FundingStateStep(msg)
  1139. // Now we'll ask the source node's wallet to sign the PSBT so we can
  1140. // finish the funding flow.
  1141. finalizeReq := &walletrpc.FinalizePsbtRequest{
  1142. FundedPsbt: fundResp.FundedPsbt,
  1143. }
  1144. finalizeRes := from.RPC.FinalizePsbt(finalizeReq)
  1145. // We've signed our PSBT now, let's pass it to the intent again.
  1146. msg = &lnrpc.FundingTransitionMsg{
  1147. Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
  1148. PsbtFinalize: &lnrpc.FundingPsbtFinalize{
  1149. PendingChanId: itestLegacyFormatChanID[:],
  1150. SignedPsbt: finalizeRes.SignedPsbt,
  1151. },
  1152. },
  1153. }
  1154. from.RPC.FundingStateStep(msg)
  1155. // Consume the "channel pending" update. This waits until the funding
  1156. // transaction was fully compiled.
  1157. updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
  1158. upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
  1159. require.True(ht, ok)
  1160. chanPoint := &lnrpc.ChannelPoint{
  1161. FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
  1162. FundingTxidBytes: upd.ChanPending.Txid,
  1163. },
  1164. OutputIndex: upd.ChanPending.OutputIndex,
  1165. }
  1166. ht.MineBlocksAndAssertNumTxes(6, 1)
  1167. ht.AssertTopologyChannelOpen(from, chanPoint)
  1168. ht.AssertTopologyChannelOpen(to, chanPoint)
  1169. }
  1170. // chanRestoreViaRPC is a helper test method that returns a nodeRestorer
  1171. // instance which will restore the target node from a password+seed, then
  1172. // trigger a SCB restore using the RPC interface.
  1173. func chanRestoreViaRPC(ht *lntest.HarnessTest, password []byte,
  1174. mnemonic []string, multi []byte,
  1175. oldNode *node.HarnessNode) nodeRestorer {
  1176. backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
  1177. MultiChanBackup: multi,
  1178. }
  1179. return func() *node.HarnessNode {
  1180. newNode := ht.RestoreNodeWithSeed(
  1181. "dave", nil, password, mnemonic, "", revocationWindow,
  1182. nil,
  1183. )
  1184. req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
  1185. newNode.RPC.RestoreChanBackups(req)
  1186. return newNode
  1187. }
  1188. }
  1189. // assertTimeLockSwept when dave's outputs matures, he should claim them. This
  1190. // function will advance 2 blocks such that all the pending closing
  1191. // transactions would be swept in the end.
  1192. //
  1193. // Note: this function is only used in this test file and has been made
  1194. // specifically for testChanRestoreScenario.
  1195. func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
  1196. carolStartingBalance, daveStartingBalance int64) {
  1197. // Carol should sweep her funds immediately, as they are not
  1198. // timelocked.
  1199. ht.AssertNumPendingSweeps(carol, 2)
  1200. ht.AssertNumPendingSweeps(dave, 1)
  1201. // We expect Carol to sweep her funds and her anchor in a single sweep
  1202. // tx. In addition, Dave will attempt to sweep his anchor output but
  1203. // fail due to the sweeping tx being uneconomical.
  1204. expectedTxes := 1
  1205. // Mine a block to trigger the sweeps.
  1206. ht.MineBlocks(1)
  1207. ht.Miner.AssertNumTxsInMempool(expectedTxes)
  1208. // Carol should consider the channel pending force close (since she is
  1209. // waiting for her sweep to confirm).
  1210. ht.AssertNumPendingForceClose(carol, 1)
  1211. // Dave is considering it "pending force close", as we must wait before
  1212. // he can sweep her outputs.
  1213. ht.AssertNumPendingForceClose(dave, 1)
  1214. // Mine the sweep (and anchor) tx(ns).
  1215. ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
  1216. // Now Carol should consider the channel fully closed.
  1217. ht.AssertNumPendingForceClose(carol, 0)
  1218. // We query Carol's balance to make sure it increased after the channel
  1219. // closed. This checks that she was able to sweep the funds she had in
  1220. // the channel.
  1221. carolBalResp := carol.RPC.WalletBalance()
  1222. carolBalance := carolBalResp.ConfirmedBalance
  1223. require.Greater(ht, carolBalance, carolStartingBalance,
  1224. "balance not increased")
  1225. // After the Dave's output matures, he should reclaim his funds.
  1226. //
  1227. // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
  1228. // we already mined one block after the commitment was published, and
  1229. // one block to trigger Carol's sweeps, so take that into account.
  1230. ht.MineEmptyBlocks(1)
  1231. ht.AssertNumPendingSweeps(dave, 2)
  1232. // Mine a block to trigger the sweeps.
  1233. ht.MineEmptyBlocks(1)
  1234. daveSweep := ht.Miner.AssertNumTxsInMempool(1)[0]
  1235. block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
  1236. ht.Miner.AssertTxInBlock(block, daveSweep)
  1237. // Now the channel should be fully closed also from Dave's POV.
  1238. ht.AssertNumPendingForceClose(dave, 0)
  1239. // Make sure Dave got his balance back.
  1240. err := wait.NoError(func() error {
  1241. daveBalResp := dave.RPC.WalletBalance()
  1242. daveBalance := daveBalResp.ConfirmedBalance
  1243. if daveBalance <= daveStartingBalance {
  1244. return fmt.Errorf("expected dave to have balance "+
  1245. "above %d, instead had %v", daveStartingBalance,
  1246. daveBalance)
  1247. }
  1248. return nil
  1249. }, defaultTimeout)
  1250. require.NoError(ht, err)
  1251. ht.AssertNodeNumChannels(dave, 0)
  1252. ht.AssertNodeNumChannels(carol, 0)
  1253. }
  1254. // assertDLPExecuted asserts that Dave is a node that has recovered their state
  1255. // form scratch. Carol should then force close on chain, with Dave sweeping his
  1256. // funds immediately, and Carol sweeping her fund after her CSV delay is up. If
  1257. // the blankSlate value is true, then this means that Dave won't need to sweep
  1258. // on chain as he has no funds in the channel.
  1259. func assertDLPExecuted(ht *lntest.HarnessTest,
  1260. carol *node.HarnessNode, carolStartingBalance int64,
  1261. dave *node.HarnessNode, daveStartingBalance int64,
  1262. commitType lnrpc.CommitmentType) {
  1263. ht.Helper()
  1264. // Increase the fee estimate so that the following force close tx will
  1265. // be cpfp'ed.
  1266. ht.SetFeeEstimate(30000)
  1267. // We disabled auto-reconnect for some tests to avoid timing issues.
  1268. // To make sure the nodes are initiating DLP now, we have to manually
  1269. // re-connect them.
  1270. ht.EnsureConnected(carol, dave)
  1271. // Upon reconnection, the nodes should detect that Dave is out of sync.
  1272. // Carol should force close the channel using her latest commitment.
  1273. ht.Miner.AssertNumTxsInMempool(1)
  1274. // Channel should be in the state "waiting close" for Carol since she
  1275. // broadcasted the force close tx.
  1276. ht.AssertNumWaitingClose(carol, 1)
  1277. // Dave should also consider the channel "waiting close", as he noticed
  1278. // the channel was out of sync, and is now waiting for a force close to
  1279. // hit the chain.
  1280. ht.AssertNumWaitingClose(dave, 1)
  1281. // Restart Dave to make sure he is able to sweep the funds after
  1282. // shutdown.
  1283. ht.RestartNode(dave)
  1284. // Generate a single block, which should confirm the closing tx.
  1285. ht.MineBlocksAndAssertNumTxes(1, 1)
  1286. blocksMined := uint32(1)
  1287. // Dave should consider the channel pending force close (since he is
  1288. // waiting for his sweep to confirm).
  1289. ht.AssertNumPendingForceClose(dave, 1)
  1290. // Carol is considering it "pending force close", as we must wait
  1291. // before she can sweep her outputs.
  1292. ht.AssertNumPendingForceClose(carol, 1)
  1293. if commitType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
  1294. // Dave should sweep his anchor only, since he still has the
  1295. // lease CLTV constraint on his commitment output. We'd also
  1296. // see Carol's anchor sweep here.
  1297. // Both Dave and Carol should have an anchor sweep request.
  1298. // Note that they cannot sweep them as these anchor sweepings
  1299. // are uneconomical.
  1300. ht.AssertNumPendingSweeps(dave, 1)
  1301. ht.AssertNumPendingSweeps(carol, 1)
  1302. // After Carol's output matures, she should also reclaim her
  1303. // funds.
  1304. //
  1305. // The commit sweep resolver publishes the sweep tx at
  1306. // defaultCSV-1 and we already mined one block after the
  1307. // commitmment was published, so take that into account.
  1308. ht.MineEmptyBlocks(int(defaultCSV - blocksMined))
  1309. // Carol should have two sweep requests - one for her commit
  1310. // output and the other for her anchor.
  1311. ht.AssertNumPendingSweeps(carol, 2)
  1312. // Mine a block to trigger the sweep.
  1313. ht.MineEmptyBlocks(1)
  1314. ht.MineBlocksAndAssertNumTxes(1, 1)
  1315. // Now the channel should be fully closed also from Carol's POV.
  1316. ht.AssertNumPendingForceClose(carol, 0)
  1317. // We'll now mine the remaining blocks to prompt Dave to sweep
  1318. // his CLTV-constrained output.
  1319. resp := dave.RPC.PendingChannels()
  1320. blocksTilMaturity :=
  1321. resp.PendingForceClosingChannels[0].BlocksTilMaturity
  1322. require.Positive(ht, blocksTilMaturity)
  1323. ht.MineEmptyBlocks(int(blocksTilMaturity))
  1324. // Dave should have two sweep requests - one for his commit
  1325. // output and the other for his anchor.
  1326. ht.AssertNumPendingSweeps(dave, 2)
  1327. // Mine a block to trigger the sweep.
  1328. ht.MineEmptyBlocks(1)
  1329. ht.MineBlocksAndAssertNumTxes(1, 1)
  1330. // Now Dave should consider the channel fully closed.
  1331. ht.AssertNumPendingForceClose(dave, 0)
  1332. } else {
  1333. // Dave should sweep his funds immediately, as they are not
  1334. // timelocked. We also expect Carol and Dave sweep their
  1335. // anchors if it's an anchor channel.
  1336. if lntest.CommitTypeHasAnchors(commitType) {
  1337. ht.AssertNumPendingSweeps(carol, 1)
  1338. ht.AssertNumPendingSweeps(dave, 2)
  1339. } else {
  1340. ht.AssertNumPendingSweeps(dave, 1)
  1341. }
  1342. // Mine one block to trigger the sweeper to sweep.
  1343. ht.MineEmptyBlocks(1)
  1344. blocksMined++
  1345. // Expect one tx - the commitment sweep from Dave. For anchor
  1346. // channels, we expect the two anchor sweeping txns to be
  1347. // failed due they are uneconomical.
  1348. ht.MineBlocksAndAssertNumTxes(1, 1)
  1349. blocksMined++
  1350. // Now Dave should consider the channel fully closed.
  1351. ht.AssertNumPendingForceClose(dave, 0)
  1352. // After Carol's output matures, she should also reclaim her
  1353. // funds.
  1354. //
  1355. // The commit sweep resolver publishes the sweep tx at
  1356. // defaultCSV-1 and we already have blocks mined after the
  1357. // commitmment was published, so take that into account.
  1358. ht.MineEmptyBlocks(int(defaultCSV - blocksMined))
  1359. // Mine one block to trigger the sweeper to sweep.
  1360. ht.MineEmptyBlocks(1)
  1361. // Carol should have two pending sweeps:
  1362. // 1. her commit output.
  1363. // 2. her anchor output, if this is anchor channel.
  1364. if lntest.CommitTypeHasAnchors(commitType) {
  1365. ht.AssertNumPendingSweeps(carol, 2)
  1366. } else {
  1367. ht.AssertNumPendingSweeps(carol, 1)
  1368. }
  1369. // Assert the sweeping tx is mined.
  1370. ht.MineBlocksAndAssertNumTxes(1, 1)
  1371. // Now the channel should be fully closed also from Carol's
  1372. // POV.
  1373. ht.AssertNumPendingForceClose(carol, 0)
  1374. }
  1375. // We query Dave's balance to make sure it increased after the channel
  1376. // closed. This checks that he was able to sweep the funds he had in
  1377. // the channel.
  1378. daveBalResp := dave.RPC.WalletBalance()
  1379. daveBalance := daveBalResp.ConfirmedBalance
  1380. require.Greater(ht, daveBalance, daveStartingBalance,
  1381. "balance not increased")
  1382. // Make sure Carol got her balance back.
  1383. err := wait.NoError(func() error {
  1384. carolBalResp := carol.RPC.WalletBalance()
  1385. carolBalance := carolBalResp.ConfirmedBalance
  1386. // With Neutrino we don't get a backend error when trying to
  1387. // publish an orphan TX (which is what the sweep for the remote
  1388. // anchor is since the remote commitment TX was not broadcast).
  1389. // That's why the wallet still sees that as unconfirmed and we
  1390. // need to count the total balance instead of the confirmed.
  1391. if ht.IsNeutrinoBackend() {
  1392. carolBalance = carolBalResp.TotalBalance
  1393. }
  1394. if carolBalance <= carolStartingBalance {
  1395. return fmt.Errorf("expected carol to have balance "+
  1396. "above %d, instead had %v",
  1397. carolStartingBalance, carolBalance)
  1398. }
  1399. return nil
  1400. }, defaultTimeout)
  1401. require.NoError(ht, err, "timeout while checking carol's balance")
  1402. ht.AssertNodeNumChannels(dave, 0)
  1403. ht.AssertNodeNumChannels(carol, 0)
  1404. }