mega.go 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. // Package mega provides an interface to the Mega
  2. // object storage system.
  3. package mega
  4. /*
  5. Open questions
  6. * Does mega support a content hash - what exactly are the mega hashes?
  7. * Can mega support setting modification times?
  8. Improvements:
  9. * Uploads could be done in parallel
  10. * Downloads would be more efficient done in one go
  11. * Uploads would be more efficient with bigger chunks
  12. * Looks like mega can support server-side copy, but it isn't implemented in go-mega
  13. * Upload can set modtime... - set as int64_t - can set ctime and mtime?
  14. */
  15. import (
  16. "context"
  17. "errors"
  18. "fmt"
  19. "io"
  20. "path"
  21. "strings"
  22. "sync"
  23. "time"
  24. "github.com/rclone/rclone/fs"
  25. "github.com/rclone/rclone/fs/config"
  26. "github.com/rclone/rclone/fs/config/configmap"
  27. "github.com/rclone/rclone/fs/config/configstruct"
  28. "github.com/rclone/rclone/fs/config/obscure"
  29. "github.com/rclone/rclone/fs/fserrors"
  30. "github.com/rclone/rclone/fs/fshttp"
  31. "github.com/rclone/rclone/fs/hash"
  32. "github.com/rclone/rclone/lib/encoder"
  33. "github.com/rclone/rclone/lib/pacer"
  34. "github.com/rclone/rclone/lib/readers"
  35. mega "github.com/t3rm1n4l/go-mega"
  36. )
  37. const (
  38. minSleep = 10 * time.Millisecond
  39. maxSleep = 2 * time.Second
  40. eventWaitTime = 500 * time.Millisecond
  41. decayConstant = 2 // bigger for slower decay, exponential
  42. )
  43. var (
  44. megaCacheMu sync.Mutex // mutex for the below
  45. megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
  46. )
  47. // Register with Fs
  48. func init() {
  49. fs.Register(&fs.RegInfo{
  50. Name: "mega",
  51. Description: "Mega",
  52. NewFs: NewFs,
  53. Options: []fs.Option{{
  54. Name: "user",
  55. Help: "User name.",
  56. Required: true,
  57. Sensitive: true,
  58. }, {
  59. Name: "pass",
  60. Help: "Password.",
  61. Required: true,
  62. IsPassword: true,
  63. }, {
  64. Name: "debug",
  65. Help: `Output more debug from Mega.
  66. If this flag is set (along with -vv) it will print further debugging
  67. information from the mega backend.`,
  68. Default: false,
  69. Advanced: true,
  70. }, {
  71. Name: "hard_delete",
  72. Help: `Delete files permanently rather than putting them into the trash.
  73. Normally the mega backend will put all deletions into the trash rather
  74. than permanently deleting them. If you specify this then rclone will
  75. permanently delete objects instead.`,
  76. Default: false,
  77. Advanced: true,
  78. }, {
  79. Name: "use_https",
  80. Help: `Use HTTPS for transfers.
  81. MEGA uses plain text HTTP connections by default.
  82. Some ISPs throttle HTTP connections, this causes transfers to become very slow.
  83. Enabling this will force MEGA to use HTTPS for all transfers.
  84. HTTPS is normally not necessary since all data is already encrypted anyway.
  85. Enabling it will increase CPU usage and add network overhead.`,
  86. Default: false,
  87. Advanced: true,
  88. }, {
  89. Name: config.ConfigEncoding,
  90. Help: config.ConfigEncodingHelp,
  91. Advanced: true,
  92. // Encode invalid UTF-8 bytes as json doesn't handle them properly.
  93. Default: (encoder.Base |
  94. encoder.EncodeInvalidUtf8),
  95. }},
  96. })
  97. }
  98. // Options defines the configuration for this backend
  99. type Options struct {
  100. User string `config:"user"`
  101. Pass string `config:"pass"`
  102. Debug bool `config:"debug"`
  103. HardDelete bool `config:"hard_delete"`
  104. UseHTTPS bool `config:"use_https"`
  105. Enc encoder.MultiEncoder `config:"encoding"`
  106. }
  107. // Fs represents a remote mega
  108. type Fs struct {
  109. name string // name of this remote
  110. root string // the path we are working on
  111. opt Options // parsed config options
  112. features *fs.Features // optional features
  113. srv *mega.Mega // the connection to the server
  114. pacer *fs.Pacer // pacer for API calls
  115. rootNodeMu sync.Mutex // mutex for _rootNode
  116. _rootNode *mega.Node // root node - call findRoot to use this
  117. mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
  118. }
  119. // Object describes a mega object
  120. //
  121. // Will definitely have info but maybe not meta.
  122. //
  123. // Normally rclone would just store an ID here but go-mega and mega.nz
  124. // expect you to build an entire tree of all the objects in memory.
  125. // In this case we just store a pointer to the object.
  126. type Object struct {
  127. fs *Fs // what this object is part of
  128. remote string // The remote path
  129. info *mega.Node // pointer to the mega node
  130. }
  131. // ------------------------------------------------------------
  132. // Name of the remote (as passed into NewFs)
  133. func (f *Fs) Name() string {
  134. return f.name
  135. }
  136. // Root of the remote (as passed into NewFs)
  137. func (f *Fs) Root() string {
  138. return f.root
  139. }
  140. // String converts this Fs to a string
  141. func (f *Fs) String() string {
  142. return fmt.Sprintf("mega root '%s'", f.root)
  143. }
  144. // Features returns the optional features of this Fs
  145. func (f *Fs) Features() *fs.Features {
  146. return f.features
  147. }
  148. // parsePath parses a mega 'url'
  149. func parsePath(path string) (root string) {
  150. root = strings.Trim(path, "/")
  151. return
  152. }
  153. // shouldRetry returns a boolean as to whether this err deserves to be
  154. // retried. It returns the err as a convenience
  155. func shouldRetry(ctx context.Context, err error) (bool, error) {
  156. if fserrors.ContextError(ctx, &err) {
  157. return false, err
  158. }
  159. // Let the mega library handle the low level retries
  160. return false, err
  161. }
  162. // readMetaDataForPath reads the metadata from the path
  163. func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
  164. rootNode, err := f.findRoot(ctx, false)
  165. if err != nil {
  166. return nil, err
  167. }
  168. return f.findObject(rootNode, remote)
  169. }
  170. // NewFs constructs an Fs from the path, container:path
  171. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
  172. // Parse config into Options struct
  173. opt := new(Options)
  174. err := configstruct.Set(m, opt)
  175. if err != nil {
  176. return nil, err
  177. }
  178. if opt.Pass != "" {
  179. var err error
  180. opt.Pass, err = obscure.Reveal(opt.Pass)
  181. if err != nil {
  182. return nil, fmt.Errorf("couldn't decrypt password: %w", err)
  183. }
  184. }
  185. ci := fs.GetConfig(ctx)
  186. // cache *mega.Mega on username so we can reuse and share
  187. // them between remotes. They are expensive to make as they
  188. // contain all the objects and sharing the objects makes the
  189. // move code easier as we don't have to worry about mixing
  190. // them up between different remotes.
  191. megaCacheMu.Lock()
  192. defer megaCacheMu.Unlock()
  193. srv := megaCache[opt.User]
  194. if srv == nil {
  195. srv = mega.New().SetClient(fshttp.NewClient(ctx))
  196. srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
  197. srv.SetHTTPS(opt.UseHTTPS)
  198. srv.SetLogger(func(format string, v ...interface{}) {
  199. fs.Infof("*go-mega*", format, v...)
  200. })
  201. if opt.Debug {
  202. srv.SetDebugger(func(format string, v ...interface{}) {
  203. fs.Debugf("*go-mega*", format, v...)
  204. })
  205. }
  206. err := srv.Login(opt.User, opt.Pass)
  207. if err != nil {
  208. return nil, fmt.Errorf("couldn't login: %w", err)
  209. }
  210. megaCache[opt.User] = srv
  211. }
  212. root = parsePath(root)
  213. f := &Fs{
  214. name: name,
  215. root: root,
  216. opt: *opt,
  217. srv: srv,
  218. pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
  219. }
  220. f.features = (&fs.Features{
  221. DuplicateFiles: true,
  222. CanHaveEmptyDirectories: true,
  223. }).Fill(ctx, f)
  224. // Find the root node and check if it is a file or not
  225. _, err = f.findRoot(ctx, false)
  226. switch err {
  227. case nil:
  228. // root node found and is a directory
  229. case fs.ErrorDirNotFound:
  230. // root node not found, so can't be a file
  231. case fs.ErrorIsFile:
  232. // root node is a file so point to parent directory
  233. root = path.Dir(root)
  234. if root == "." {
  235. root = ""
  236. }
  237. f.root = root
  238. return f, err
  239. }
  240. return f, nil
  241. }
  242. // splitNodePath splits nodePath into / separated parts, returning nil if it
  243. // should refer to the root.
  244. // It also encodes the parts into backend-specific encoding
  245. func (f *Fs) splitNodePath(nodePath string) (parts []string) {
  246. nodePath = path.Clean(nodePath)
  247. if nodePath == "." || nodePath == "/" {
  248. return nil
  249. }
  250. nodePath = f.opt.Enc.FromStandardPath(nodePath)
  251. return strings.Split(nodePath, "/")
  252. }
  253. // findNode looks up the node for the path of the name given from the root given
  254. //
  255. // It returns mega.ENOENT if it wasn't found
  256. func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
  257. parts := f.splitNodePath(nodePath)
  258. if parts == nil {
  259. return rootNode, nil
  260. }
  261. nodes, err := f.srv.FS.PathLookup(rootNode, parts)
  262. if err != nil {
  263. return nil, err
  264. }
  265. return nodes[len(nodes)-1], nil
  266. }
  267. // findDir finds the directory rooted from the node passed in
  268. func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
  269. node, err = f.findNode(rootNode, dir)
  270. if err == mega.ENOENT {
  271. return nil, fs.ErrorDirNotFound
  272. } else if err == nil && node.GetType() == mega.FILE {
  273. return nil, fs.ErrorIsFile
  274. }
  275. return node, err
  276. }
  277. // findObject looks up the node for the object of the name given
  278. func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) {
  279. node, err = f.findNode(rootNode, file)
  280. if err == mega.ENOENT {
  281. return nil, fs.ErrorObjectNotFound
  282. } else if err == nil && node.GetType() != mega.FILE {
  283. return nil, fs.ErrorIsDir // all other node types are directories
  284. }
  285. return node, err
  286. }
  287. // lookupDir looks up the node for the directory of the name given
  288. //
  289. // if create is true it tries to create the root directory if not found
  290. func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
  291. rootNode, err := f.findRoot(ctx, false)
  292. if err != nil {
  293. return nil, err
  294. }
  295. return f.findDir(rootNode, dir)
  296. }
  297. // lookupParentDir finds the parent node for the remote passed in
  298. func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
  299. parent, leaf := path.Split(remote)
  300. dirNode, err = f.lookupDir(ctx, parent)
  301. return dirNode, leaf, err
  302. }
  303. // mkdir makes the directory and any parent directories for the
  304. // directory of the name given
  305. func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
  306. f.mkdirMu.Lock()
  307. defer f.mkdirMu.Unlock()
  308. parts := f.splitNodePath(dir)
  309. if parts == nil {
  310. return rootNode, nil
  311. }
  312. var i int
  313. // look up until we find a directory which exists
  314. for i = 0; i <= len(parts); i++ {
  315. var nodes []*mega.Node
  316. nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i])
  317. if err == nil {
  318. if len(nodes) == 0 {
  319. node = rootNode
  320. } else {
  321. node = nodes[len(nodes)-1]
  322. }
  323. break
  324. }
  325. if err != mega.ENOENT {
  326. return nil, fmt.Errorf("mkdir lookup failed: %w", err)
  327. }
  328. }
  329. if err != nil {
  330. return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
  331. }
  332. // i is number of directories to create (may be 0)
  333. // node is directory to create them from
  334. for _, name := range parts[len(parts)-i:] {
  335. // create directory called name in node
  336. err = f.pacer.Call(func() (bool, error) {
  337. node, err = f.srv.CreateDir(name, node)
  338. return shouldRetry(ctx, err)
  339. })
  340. if err != nil {
  341. return nil, fmt.Errorf("mkdir create node failed: %w", err)
  342. }
  343. }
  344. return node, nil
  345. }
  346. // mkdirParent creates the parent directory of remote
  347. func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
  348. rootNode, err := f.findRoot(ctx, true)
  349. if err != nil {
  350. return nil, "", err
  351. }
  352. parent, leaf := path.Split(remote)
  353. dirNode, err = f.mkdir(ctx, rootNode, parent)
  354. return dirNode, leaf, err
  355. }
  356. // findRoot looks up the root directory node and returns it.
  357. //
  358. // if create is true it tries to create the root directory if not found
  359. func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
  360. f.rootNodeMu.Lock()
  361. defer f.rootNodeMu.Unlock()
  362. // Check if we haven't found it already
  363. if f._rootNode != nil {
  364. return f._rootNode, nil
  365. }
  366. // Check for preexisting root
  367. absRoot := f.srv.FS.GetRoot()
  368. node, err := f.findDir(absRoot, f.root)
  369. //log.Printf("findRoot findDir %p %v", node, err)
  370. if err == nil {
  371. f._rootNode = node
  372. return node, nil
  373. }
  374. if !create || err != fs.ErrorDirNotFound {
  375. return nil, err
  376. }
  377. //..not found so create the root directory
  378. f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
  379. return f._rootNode, err
  380. }
  381. // clearRoot unsets the root directory
  382. func (f *Fs) clearRoot() {
  383. f.rootNodeMu.Lock()
  384. f._rootNode = nil
  385. f.rootNodeMu.Unlock()
  386. //log.Printf("cleared root directory")
  387. }
  388. // CleanUp deletes all files currently in trash
  389. func (f *Fs) CleanUp(ctx context.Context) (err error) {
  390. trash := f.srv.FS.GetTrash()
  391. items := []*mega.Node{}
  392. _, err = f.list(ctx, trash, func(item *mega.Node) bool {
  393. items = append(items, item)
  394. return false
  395. })
  396. if err != nil {
  397. return fmt.Errorf("CleanUp failed to list items in trash: %w", err)
  398. }
  399. fs.Infof(f, "Deleting %d items from the trash", len(items))
  400. errors := 0
  401. // similar to f.deleteNode(trash) but with HardDelete as true
  402. for _, item := range items {
  403. fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
  404. deleteErr := f.pacer.Call(func() (bool, error) {
  405. err := f.srv.Delete(item, true)
  406. return shouldRetry(ctx, err)
  407. })
  408. if deleteErr != nil {
  409. err = deleteErr
  410. errors++
  411. }
  412. }
  413. fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors)
  414. return err
  415. }
  416. // Return an Object from a path
  417. //
  418. // If it can't be found it returns the error fs.ErrorObjectNotFound.
  419. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
  420. o := &Object{
  421. fs: f,
  422. remote: remote,
  423. }
  424. var err error
  425. if info != nil {
  426. // Set info
  427. err = o.setMetaData(info)
  428. } else {
  429. err = o.readMetaData(ctx) // reads info and meta, returning an error
  430. }
  431. if err != nil {
  432. return nil, err
  433. }
  434. return o, nil
  435. }
  436. // NewObject finds the Object at remote. If it can't be found
  437. // it returns the error fs.ErrorObjectNotFound.
  438. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  439. return f.newObjectWithInfo(ctx, remote, nil)
  440. }
  441. // list the objects into the function supplied
  442. //
  443. // If directories is set it only sends directories
  444. // User function to process a File item from listAll
  445. //
  446. // Should return true to finish processing
  447. type listFn func(*mega.Node) bool
  448. // Lists the directory required calling the user function on each item found
  449. //
  450. // If the user fn ever returns true then it early exits with found = true
  451. func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
  452. nodes, err := f.srv.FS.GetChildren(dir)
  453. if err != nil {
  454. return false, fmt.Errorf("list failed: %w", err)
  455. }
  456. for _, item := range nodes {
  457. if fn(item) {
  458. found = true
  459. break
  460. }
  461. }
  462. return
  463. }
  464. // List the objects and directories in dir into entries. The
  465. // entries can be returned in any order but should be for a
  466. // complete directory.
  467. //
  468. // dir should be "" to list the root, and should not have
  469. // trailing slashes.
  470. //
  471. // This should return ErrDirNotFound if the directory isn't
  472. // found.
  473. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  474. dirNode, err := f.lookupDir(ctx, dir)
  475. if err != nil {
  476. return nil, err
  477. }
  478. var iErr error
  479. _, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
  480. remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
  481. switch info.GetType() {
  482. case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
  483. d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
  484. entries = append(entries, d)
  485. case mega.FILE:
  486. o, err := f.newObjectWithInfo(ctx, remote, info)
  487. if err != nil {
  488. iErr = err
  489. return true
  490. }
  491. entries = append(entries, o)
  492. }
  493. return false
  494. })
  495. if err != nil {
  496. return nil, err
  497. }
  498. if iErr != nil {
  499. return nil, iErr
  500. }
  501. return entries, nil
  502. }
  503. // Creates from the parameters passed in a half finished Object which
  504. // must have setMetaData called on it
  505. //
  506. // Returns the dirNode, object, leaf and error.
  507. //
  508. // Used to create new objects
  509. func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
  510. dirNode, leaf, err = f.mkdirParent(ctx, remote)
  511. if err != nil {
  512. return nil, nil, leaf, err
  513. }
  514. // Temporary Object under construction
  515. o = &Object{
  516. fs: f,
  517. remote: remote,
  518. }
  519. return o, dirNode, leaf, nil
  520. }
  521. // Put the object
  522. //
  523. // Copy the reader in to the new object which is returned.
  524. //
  525. // The new object may have been created if an error is returned
  526. // PutUnchecked uploads the object
  527. //
  528. // This will create a duplicate if we upload a new file without
  529. // checking to see if there is one already - use Put() for that.
  530. func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  531. existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
  532. switch err {
  533. case nil:
  534. return existingObj, existingObj.Update(ctx, in, src, options...)
  535. case fs.ErrorObjectNotFound:
  536. // Not found so create it
  537. return f.PutUnchecked(ctx, in, src)
  538. default:
  539. return nil, err
  540. }
  541. }
  542. // PutUnchecked the object
  543. //
  544. // Copy the reader in to the new object which is returned.
  545. //
  546. // The new object may have been created if an error is returned
  547. // PutUnchecked uploads the object
  548. //
  549. // This will create a duplicate if we upload a new file without
  550. // checking to see if there is one already - use Put() for that.
  551. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  552. remote := src.Remote()
  553. size := src.Size()
  554. modTime := src.ModTime(ctx)
  555. o, _, _, err := f.createObject(ctx, remote, modTime, size)
  556. if err != nil {
  557. return nil, err
  558. }
  559. return o, o.Update(ctx, in, src, options...)
  560. }
  561. // Mkdir creates the directory if it doesn't exist
  562. func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  563. rootNode, err := f.findRoot(ctx, true)
  564. if err != nil {
  565. return err
  566. }
  567. _, err = f.mkdir(ctx, rootNode, dir)
  568. if err != nil {
  569. return fmt.Errorf("Mkdir failed: %w", err)
  570. }
  571. return nil
  572. }
  573. // deleteNode removes a file or directory, observing useTrash
  574. func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
  575. err = f.pacer.Call(func() (bool, error) {
  576. err = f.srv.Delete(node, f.opt.HardDelete)
  577. return shouldRetry(ctx, err)
  578. })
  579. return err
  580. }
  581. // purgeCheck removes the directory dir, if check is set then it
  582. // refuses to do so if it has anything in
  583. func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
  584. f.mkdirMu.Lock()
  585. defer f.mkdirMu.Unlock()
  586. rootNode, err := f.findRoot(ctx, false)
  587. if err != nil {
  588. return err
  589. }
  590. dirNode, err := f.findDir(rootNode, dir)
  591. if err != nil {
  592. return err
  593. }
  594. if check {
  595. children, err := f.srv.FS.GetChildren(dirNode)
  596. if err != nil {
  597. return fmt.Errorf("purgeCheck GetChildren failed: %w", err)
  598. }
  599. if len(children) > 0 {
  600. return fs.ErrorDirectoryNotEmpty
  601. }
  602. }
  603. waitEvent := f.srv.WaitEventsStart()
  604. err = f.deleteNode(ctx, dirNode)
  605. if err != nil {
  606. return fmt.Errorf("delete directory node failed: %w", err)
  607. }
  608. // Remove the root node if we just deleted it
  609. if dirNode == rootNode {
  610. f.clearRoot()
  611. }
  612. f.srv.WaitEvents(waitEvent, eventWaitTime)
  613. return nil
  614. }
  615. // Rmdir deletes the root folder
  616. //
  617. // Returns an error if it isn't empty
  618. func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  619. return f.purgeCheck(ctx, dir, true)
  620. }
  621. // Precision return the precision of this Fs
  622. func (f *Fs) Precision() time.Duration {
  623. return fs.ModTimeNotSupported
  624. }
  625. // Purge deletes all the files in the directory
  626. //
  627. // Optional interface: Only implement this if you have a way of
  628. // deleting all the files quicker than just running Remove() on the
  629. // result of List()
  630. func (f *Fs) Purge(ctx context.Context, dir string) error {
  631. return f.purgeCheck(ctx, dir, false)
  632. }
  633. // move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
  634. //
  635. // info will be updates
  636. func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
  637. var (
  638. dstFs = f
  639. srcDirNode, dstDirNode *mega.Node
  640. srcParent, dstParent string
  641. srcLeaf, dstLeaf string
  642. )
  643. if dstRemote != "" {
  644. // lookup or create the destination parent directory
  645. dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
  646. } else {
  647. // find or create the parent of the root directory
  648. absRoot := dstFs.srv.FS.GetRoot()
  649. dstParent, dstLeaf = path.Split(dstFs.root)
  650. dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
  651. }
  652. if err != nil {
  653. return fmt.Errorf("server-side move failed to make dst parent dir: %w", err)
  654. }
  655. if srcRemote != "" {
  656. // lookup the existing parent directory
  657. srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
  658. } else {
  659. // lookup the existing root parent
  660. absRoot := srcFs.srv.FS.GetRoot()
  661. srcParent, srcLeaf = path.Split(srcFs.root)
  662. srcDirNode, err = f.findDir(absRoot, srcParent)
  663. }
  664. if err != nil {
  665. return fmt.Errorf("server-side move failed to lookup src parent dir: %w", err)
  666. }
  667. // move the object into its new directory if required
  668. if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() {
  669. //log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
  670. err = f.pacer.Call(func() (bool, error) {
  671. err = f.srv.Move(info, dstDirNode)
  672. return shouldRetry(ctx, err)
  673. })
  674. if err != nil {
  675. return fmt.Errorf("server-side move failed: %w", err)
  676. }
  677. }
  678. waitEvent := f.srv.WaitEventsStart()
  679. // rename the object if required
  680. if srcLeaf != dstLeaf {
  681. //log.Printf("rename %q to %q", srcLeaf, dstLeaf)
  682. err = f.pacer.Call(func() (bool, error) {
  683. err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
  684. return shouldRetry(ctx, err)
  685. })
  686. if err != nil {
  687. return fmt.Errorf("server-side rename failed: %w", err)
  688. }
  689. }
  690. f.srv.WaitEvents(waitEvent, eventWaitTime)
  691. return nil
  692. }
  693. // Move src to this remote using server-side move operations.
  694. //
  695. // This is stored with the remote path given.
  696. //
  697. // It returns the destination Object and a possible error.
  698. //
  699. // Will only be called if src.Fs().Name() == f.Name()
  700. //
  701. // If it isn't possible then return fs.ErrorCantMove
  702. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  703. dstFs := f
  704. //log.Printf("Move %q -> %q", src.Remote(), remote)
  705. srcObj, ok := src.(*Object)
  706. if !ok {
  707. fs.Debugf(src, "Can't move - not same remote type")
  708. return nil, fs.ErrorCantMove
  709. }
  710. // Do the move
  711. err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
  712. if err != nil {
  713. return nil, err
  714. }
  715. // Create a destination object
  716. dstObj := &Object{
  717. fs: dstFs,
  718. remote: remote,
  719. info: srcObj.info,
  720. }
  721. return dstObj, nil
  722. }
  723. // DirMove moves src, srcRemote to this remote at dstRemote
  724. // using server-side move operations.
  725. //
  726. // Will only be called if src.Fs().Name() == f.Name()
  727. //
  728. // If it isn't possible then return fs.ErrorCantDirMove
  729. //
  730. // If destination exists then return fs.ErrorDirExists
  731. func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  732. dstFs := f
  733. srcFs, ok := src.(*Fs)
  734. if !ok {
  735. fs.Debugf(srcFs, "Can't move directory - not same remote type")
  736. return fs.ErrorCantDirMove
  737. }
  738. // find the source
  739. info, err := srcFs.lookupDir(ctx, srcRemote)
  740. if err != nil {
  741. return err
  742. }
  743. // check the destination doesn't exist
  744. _, err = dstFs.lookupDir(ctx, dstRemote)
  745. if err == nil {
  746. return fs.ErrorDirExists
  747. } else if err != fs.ErrorDirNotFound {
  748. return fmt.Errorf("DirMove error while checking dest directory: %w", err)
  749. }
  750. // Do the move
  751. err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
  752. if err != nil {
  753. return err
  754. }
  755. // Clear src if it was the root
  756. if srcRemote == "" {
  757. srcFs.clearRoot()
  758. }
  759. return nil
  760. }
  761. // DirCacheFlush an optional interface to flush internal directory cache
  762. func (f *Fs) DirCacheFlush() {
  763. // f.dirCache.ResetRoot()
  764. // FIXME Flush the mega somehow?
  765. }
  766. // Hashes returns the supported hash sets.
  767. func (f *Fs) Hashes() hash.Set {
  768. return hash.Set(hash.None)
  769. }
  770. // PublicLink generates a public link to the remote path (usually readable by anyone)
  771. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  772. root, err := f.findRoot(ctx, false)
  773. if err != nil {
  774. return "", fmt.Errorf("PublicLink failed to find root node: %w", err)
  775. }
  776. node, err := f.findNode(root, remote)
  777. if err != nil {
  778. return "", fmt.Errorf("PublicLink failed to find path: %w", err)
  779. }
  780. link, err = f.srv.Link(node, true)
  781. if err != nil {
  782. return "", fmt.Errorf("PublicLink failed to create link: %w", err)
  783. }
  784. return link, nil
  785. }
  786. // MergeDirs merges the contents of all the directories passed
  787. // in into the first one and rmdirs the other directories.
  788. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
  789. if len(dirs) < 2 {
  790. return nil
  791. }
  792. // find dst directory
  793. dstDir := dirs[0]
  794. dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
  795. if dstDirNode == nil {
  796. return fmt.Errorf("MergeDirs failed to find node for: %v", dstDir)
  797. }
  798. for _, srcDir := range dirs[1:] {
  799. // find src directory
  800. srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
  801. if srcDirNode == nil {
  802. return fmt.Errorf("MergeDirs failed to find node for: %v", srcDir)
  803. }
  804. // list the objects
  805. infos := []*mega.Node{}
  806. _, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
  807. infos = append(infos, info)
  808. return false
  809. })
  810. if err != nil {
  811. return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
  812. }
  813. // move them into place
  814. for _, info := range infos {
  815. fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
  816. err = f.pacer.Call(func() (bool, error) {
  817. err = f.srv.Move(info, dstDirNode)
  818. return shouldRetry(ctx, err)
  819. })
  820. if err != nil {
  821. return fmt.Errorf("MergeDirs move failed on %q in %v: %w", f.opt.Enc.ToStandardName(info.GetName()), srcDir, err)
  822. }
  823. }
  824. // rmdir (into trash) the now empty source directory
  825. fs.Infof(srcDir, "removing empty directory")
  826. err = f.deleteNode(ctx, srcDirNode)
  827. if err != nil {
  828. return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
  829. }
  830. }
  831. return nil
  832. }
  833. // About gets quota information
  834. func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  835. var q mega.QuotaResp
  836. var err error
  837. err = f.pacer.Call(func() (bool, error) {
  838. q, err = f.srv.GetQuota()
  839. return shouldRetry(ctx, err)
  840. })
  841. if err != nil {
  842. return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
  843. }
  844. usage := &fs.Usage{
  845. Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
  846. Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
  847. Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
  848. }
  849. return usage, nil
  850. }
  851. // ------------------------------------------------------------
  852. // Fs returns the parent Fs
  853. func (o *Object) Fs() fs.Info {
  854. return o.fs
  855. }
  856. // Return a string version
  857. func (o *Object) String() string {
  858. if o == nil {
  859. return "<nil>"
  860. }
  861. return o.remote
  862. }
  863. // Remote returns the remote path
  864. func (o *Object) Remote() string {
  865. return o.remote
  866. }
  867. // Hash returns the hashes of an object
  868. func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  869. return "", hash.ErrUnsupported
  870. }
  871. // Size returns the size of an object in bytes
  872. func (o *Object) Size() int64 {
  873. return o.info.GetSize()
  874. }
  875. // setMetaData sets the metadata from info
  876. func (o *Object) setMetaData(info *mega.Node) (err error) {
  877. if info.GetType() != mega.FILE {
  878. return fs.ErrorIsDir // all other node types are directories
  879. }
  880. o.info = info
  881. return nil
  882. }
  883. // readMetaData gets the metadata if it hasn't already been fetched
  884. //
  885. // it also sets the info
  886. func (o *Object) readMetaData(ctx context.Context) (err error) {
  887. if o.info != nil {
  888. return nil
  889. }
  890. info, err := o.fs.readMetaDataForPath(ctx, o.remote)
  891. if err != nil {
  892. if err == fs.ErrorDirNotFound {
  893. err = fs.ErrorObjectNotFound
  894. }
  895. return err
  896. }
  897. return o.setMetaData(info)
  898. }
  899. // ModTime returns the modification time of the object
  900. //
  901. // It attempts to read the objects mtime and if that isn't present the
  902. // LastModified returned in the http headers
  903. func (o *Object) ModTime(ctx context.Context) time.Time {
  904. return o.info.GetTimeStamp()
  905. }
  906. // SetModTime sets the modification time of the local fs object
  907. func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  908. return fs.ErrorCantSetModTime
  909. }
  910. // Storable returns a boolean showing whether this object storable
  911. func (o *Object) Storable() bool {
  912. return true
  913. }
  914. // openObject represents a download in progress
  915. type openObject struct {
  916. ctx context.Context
  917. mu sync.Mutex
  918. o *Object
  919. d *mega.Download
  920. id int
  921. skip int64
  922. chunk []byte
  923. closed bool
  924. }
  925. // get the next chunk
  926. func (oo *openObject) getChunk(ctx context.Context) (err error) {
  927. if oo.id >= oo.d.Chunks() {
  928. return io.EOF
  929. }
  930. var chunk []byte
  931. err = oo.o.fs.pacer.Call(func() (bool, error) {
  932. chunk, err = oo.d.DownloadChunk(oo.id)
  933. return shouldRetry(ctx, err)
  934. })
  935. if err != nil {
  936. return err
  937. }
  938. oo.id++
  939. oo.chunk = chunk
  940. return nil
  941. }
  942. // Read reads up to len(p) bytes into p.
  943. func (oo *openObject) Read(p []byte) (n int, err error) {
  944. oo.mu.Lock()
  945. defer oo.mu.Unlock()
  946. if oo.closed {
  947. return 0, errors.New("read on closed file")
  948. }
  949. // Skip data at the start if requested
  950. for oo.skip > 0 {
  951. _, size, err := oo.d.ChunkLocation(oo.id)
  952. if err != nil {
  953. return 0, err
  954. }
  955. if oo.skip < int64(size) {
  956. break
  957. }
  958. oo.id++
  959. oo.skip -= int64(size)
  960. }
  961. if len(oo.chunk) == 0 {
  962. err = oo.getChunk(oo.ctx)
  963. if err != nil {
  964. return 0, err
  965. }
  966. if oo.skip > 0 {
  967. oo.chunk = oo.chunk[oo.skip:]
  968. oo.skip = 0
  969. }
  970. }
  971. n = copy(p, oo.chunk)
  972. oo.chunk = oo.chunk[n:]
  973. return n, nil
  974. }
  975. // Close closed the file - MAC errors are reported here
  976. func (oo *openObject) Close() (err error) {
  977. oo.mu.Lock()
  978. defer oo.mu.Unlock()
  979. if oo.closed {
  980. return nil
  981. }
  982. err = oo.o.fs.pacer.Call(func() (bool, error) {
  983. err = oo.d.Finish()
  984. return shouldRetry(oo.ctx, err)
  985. })
  986. if err != nil {
  987. return fmt.Errorf("failed to finish download: %w", err)
  988. }
  989. oo.closed = true
  990. return nil
  991. }
  992. // Open an object for read
  993. func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  994. var offset, limit int64 = 0, -1
  995. for _, option := range options {
  996. switch x := option.(type) {
  997. case *fs.SeekOption:
  998. offset = x.Offset
  999. case *fs.RangeOption:
  1000. offset, limit = x.Decode(o.Size())
  1001. default:
  1002. if option.Mandatory() {
  1003. fs.Logf(o, "Unsupported mandatory option: %v", option)
  1004. }
  1005. }
  1006. }
  1007. var d *mega.Download
  1008. err = o.fs.pacer.Call(func() (bool, error) {
  1009. d, err = o.fs.srv.NewDownload(o.info)
  1010. return shouldRetry(ctx, err)
  1011. })
  1012. if err != nil {
  1013. return nil, fmt.Errorf("open download file failed: %w", err)
  1014. }
  1015. oo := &openObject{
  1016. ctx: ctx,
  1017. o: o,
  1018. d: d,
  1019. skip: offset,
  1020. }
  1021. return readers.NewLimitedReadCloser(oo, limit), nil
  1022. }
  1023. // Update the object with the contents of the io.Reader, modTime and size
  1024. //
  1025. // If existing is set then it updates the object rather than creating a new one.
  1026. //
  1027. // The new object may have been created if an error is returned
  1028. func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1029. size := src.Size()
  1030. if size < 0 {
  1031. return errors.New("mega backend can't upload a file of unknown length")
  1032. }
  1033. //modTime := src.ModTime(ctx)
  1034. remote := o.Remote()
  1035. // Create the parent directory
  1036. dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
  1037. if err != nil {
  1038. return fmt.Errorf("update make parent dir failed: %w", err)
  1039. }
  1040. var u *mega.Upload
  1041. err = o.fs.pacer.Call(func() (bool, error) {
  1042. u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
  1043. return shouldRetry(ctx, err)
  1044. })
  1045. if err != nil {
  1046. return fmt.Errorf("upload file failed to create session: %w", err)
  1047. }
  1048. // Upload the chunks
  1049. // FIXME do this in parallel
  1050. for id := 0; id < u.Chunks(); id++ {
  1051. _, chunkSize, err := u.ChunkLocation(id)
  1052. if err != nil {
  1053. return fmt.Errorf("upload failed to read chunk location: %w", err)
  1054. }
  1055. chunk := make([]byte, chunkSize)
  1056. _, err = io.ReadFull(in, chunk)
  1057. if err != nil {
  1058. return fmt.Errorf("upload failed to read data: %w", err)
  1059. }
  1060. err = o.fs.pacer.Call(func() (bool, error) {
  1061. err = u.UploadChunk(id, chunk)
  1062. return shouldRetry(ctx, err)
  1063. })
  1064. if err != nil {
  1065. return fmt.Errorf("upload file failed to upload chunk: %w", err)
  1066. }
  1067. }
  1068. // Finish the upload
  1069. var info *mega.Node
  1070. err = o.fs.pacer.Call(func() (bool, error) {
  1071. info, err = u.Finish()
  1072. return shouldRetry(ctx, err)
  1073. })
  1074. if err != nil {
  1075. return fmt.Errorf("failed to finish upload: %w", err)
  1076. }
  1077. // If the upload succeeded and the original object existed, then delete it
  1078. if o.info != nil {
  1079. err = o.fs.deleteNode(ctx, o.info)
  1080. if err != nil {
  1081. return fmt.Errorf("upload failed to remove old version: %w", err)
  1082. }
  1083. o.info = nil
  1084. }
  1085. return o.setMetaData(info)
  1086. }
  1087. // Remove an object
  1088. func (o *Object) Remove(ctx context.Context) error {
  1089. err := o.fs.deleteNode(ctx, o.info)
  1090. if err != nil {
  1091. return fmt.Errorf("Remove object failed: %w", err)
  1092. }
  1093. return nil
  1094. }
  1095. // ID returns the ID of the Object if known, or "" if not
  1096. func (o *Object) ID() string {
  1097. return o.info.GetHash()
  1098. }
  1099. // Check the interfaces are satisfied
  1100. var (
  1101. _ fs.Fs = (*Fs)(nil)
  1102. _ fs.Purger = (*Fs)(nil)
  1103. _ fs.Mover = (*Fs)(nil)
  1104. _ fs.PutUncheckeder = (*Fs)(nil)
  1105. _ fs.DirMover = (*Fs)(nil)
  1106. _ fs.DirCacheFlusher = (*Fs)(nil)
  1107. _ fs.PublicLinker = (*Fs)(nil)
  1108. _ fs.MergeDirser = (*Fs)(nil)
  1109. _ fs.Abouter = (*Fs)(nil)
  1110. _ fs.Object = (*Object)(nil)
  1111. _ fs.IDer = (*Object)(nil)
  1112. )