fstest.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. // Package fstest provides utilities for testing the Fs
  2. package fstest
  3. // FIXME put name of test FS in Fs structure
  4. import (
  5. "bytes"
  6. "compress/gzip"
  7. "context"
  8. "flag"
  9. "fmt"
  10. "io"
  11. "log"
  12. "os"
  13. "path"
  14. "path/filepath"
  15. "regexp"
  16. "runtime"
  17. "sort"
  18. "strings"
  19. "testing"
  20. "time"
  21. "github.com/rclone/rclone/fs"
  22. "github.com/rclone/rclone/fs/accounting"
  23. "github.com/rclone/rclone/fs/config"
  24. "github.com/rclone/rclone/fs/config/configfile"
  25. "github.com/rclone/rclone/fs/hash"
  26. "github.com/rclone/rclone/fs/walk"
  27. "github.com/rclone/rclone/lib/random"
  28. "github.com/stretchr/testify/assert"
  29. "github.com/stretchr/testify/require"
  30. "golang.org/x/text/unicode/norm"
  31. )
  32. // Globals
  33. var (
  34. RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
  35. Verbose = flag.Bool("verbose", false, "Set to enable logging")
  36. DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
  37. DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
  38. Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
  39. LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
  40. UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
  41. // SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
  42. SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
  43. // ListRetries is the number of times to retry a listing to overcome eventual consistency
  44. ListRetries = flag.Int("list-retries", 3, "Number or times to retry listing")
  45. // MatchTestRemote matches the remote names used for testing
  46. MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
  47. )
  48. // Initialise rclone for testing
  49. func Initialise() {
  50. ctx := context.Background()
  51. ci := fs.GetConfig(ctx)
  52. // Never ask for passwords, fail instead.
  53. // If your local config is encrypted set environment variable
  54. // "RCLONE_CONFIG_PASS=hunter2" (or your password)
  55. ci.AskPassword = false
  56. // Override the config file from the environment - we don't
  57. // parse the flags any more so this doesn't happen
  58. // automatically
  59. if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
  60. _ = config.SetConfigPath(envConfig)
  61. }
  62. configfile.Install()
  63. accounting.Start(ctx)
  64. if *Verbose {
  65. ci.LogLevel = fs.LogLevelDebug
  66. }
  67. if *DumpHeaders {
  68. ci.Dump |= fs.DumpHeaders
  69. }
  70. if *DumpBodies {
  71. ci.Dump |= fs.DumpBodies
  72. }
  73. ci.LowLevelRetries = *LowLevelRetries
  74. ci.UseListR = *UseListR
  75. }
  76. // Item represents an item for checking
  77. type Item struct {
  78. Path string
  79. Hashes map[hash.Type]string
  80. ModTime time.Time
  81. Size int64
  82. }
  83. // NewItem creates an item from a string content
  84. func NewItem(Path, Content string, modTime time.Time) Item {
  85. i := Item{
  86. Path: Path,
  87. ModTime: modTime,
  88. Size: int64(len(Content)),
  89. }
  90. hash := hash.NewMultiHasher()
  91. buf := bytes.NewBufferString(Content)
  92. _, err := io.Copy(hash, buf)
  93. if err != nil {
  94. log.Fatalf("Failed to create item: %v", err)
  95. }
  96. i.Hashes = hash.Sums()
  97. return i
  98. }
  99. // CheckTimeEqualWithPrecision checks the times are equal within the
  100. // precision, returns the delta and a flag
  101. func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
  102. dt := t0.Sub(t1)
  103. if dt >= precision || dt <= -precision {
  104. return dt, false
  105. }
  106. return dt, true
  107. }
  108. // AssertTimeEqualWithPrecision checks that want is within precision
  109. // of got, asserting that with t and logging remote
  110. func AssertTimeEqualWithPrecision(t *testing.T, remote string, want, got time.Time, precision time.Duration) {
  111. dt, ok := CheckTimeEqualWithPrecision(want, got, precision)
  112. assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (want %s vs got %s) (precision %s)", remote, dt, precision, want, got, precision))
  113. }
  114. // CheckModTime checks the mod time to the given precision
  115. func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
  116. AssertTimeEqualWithPrecision(t, obj.Remote(), i.ModTime, modTime, precision)
  117. }
  118. // CheckHashes checks all the hashes the object supports are correct
  119. func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
  120. require.NotNil(t, obj)
  121. types := obj.Fs().Hashes().Array()
  122. for _, Hash := range types {
  123. // Check attributes
  124. sum, err := obj.Hash(context.Background(), Hash)
  125. require.NoError(t, err)
  126. assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
  127. }
  128. }
  129. // Check checks all the attributes of the object are correct
  130. func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
  131. i.CheckHashes(t, obj)
  132. assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
  133. i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision)
  134. }
  135. // Normalize runs a utf8 normalization on the string if running on OS
  136. // X. This is because OS X denormalizes file names it writes to the
  137. // local file system.
  138. func Normalize(name string) string {
  139. if runtime.GOOS == "darwin" {
  140. name = norm.NFC.String(name)
  141. }
  142. return name
  143. }
  144. // Items represents all items for checking
  145. type Items struct {
  146. byName map[string]*Item
  147. byNameAlt map[string]*Item
  148. items []Item
  149. }
  150. // NewItems makes an Items
  151. func NewItems(items []Item) *Items {
  152. is := &Items{
  153. byName: make(map[string]*Item),
  154. byNameAlt: make(map[string]*Item),
  155. items: items,
  156. }
  157. // Fill up byName
  158. for i := range items {
  159. is.byName[Normalize(items[i].Path)] = &items[i]
  160. }
  161. return is
  162. }
  163. // Find checks off an item
  164. func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
  165. remote := Normalize(obj.Remote())
  166. i, ok := is.byName[remote]
  167. if !ok {
  168. i, ok = is.byNameAlt[remote]
  169. assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
  170. }
  171. if i != nil {
  172. delete(is.byName, i.Path)
  173. i.Check(t, obj, precision)
  174. }
  175. }
  176. // Done checks all finished
  177. func (is *Items) Done(t *testing.T) {
  178. if len(is.byName) != 0 {
  179. for name := range is.byName {
  180. t.Logf("Not found %q", name)
  181. }
  182. }
  183. assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
  184. }
  185. // makeListingFromItems returns a string representation of the items
  186. //
  187. // it returns two possible strings, one normal and one for windows
  188. func makeListingFromItems(items []Item) string {
  189. nameLengths := make([]string, len(items))
  190. for i, item := range items {
  191. remote := Normalize(item.Path)
  192. nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size)
  193. }
  194. sort.Strings(nameLengths)
  195. return strings.Join(nameLengths, ", ")
  196. }
  197. // makeListingFromObjects returns a string representation of the objects
  198. func makeListingFromObjects(objs []fs.Object) string {
  199. nameLengths := make([]string, len(objs))
  200. for i, obj := range objs {
  201. nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
  202. }
  203. sort.Strings(nameLengths)
  204. return strings.Join(nameLengths, ", ")
  205. }
  206. // filterEmptyDirs removes any empty (or containing only directories)
  207. // directories from expectedDirs
  208. func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
  209. dirs := map[string]struct{}{"": {}}
  210. for _, item := range items {
  211. base := item.Path
  212. for {
  213. base = path.Dir(base)
  214. if base == "." || base == "/" {
  215. break
  216. }
  217. dirs[base] = struct{}{}
  218. }
  219. }
  220. for _, expectedDir := range expectedDirs {
  221. if _, found := dirs[expectedDir]; found {
  222. newExpectedDirs = append(newExpectedDirs, expectedDir)
  223. } else {
  224. t.Logf("Filtering empty directory %q", expectedDir)
  225. }
  226. }
  227. return newExpectedDirs
  228. }
  229. // CheckListingWithRoot checks the fs to see if it has the
  230. // expected contents with the given precision.
  231. //
  232. // If expectedDirs is non nil then we check those too. Note that no
  233. // directories returned is also OK as some remotes don't return
  234. // directories.
  235. //
  236. // dir is the directory used for the listing.
  237. func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) {
  238. if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
  239. expectedDirs = filterEmptyDirs(t, items, expectedDirs)
  240. }
  241. is := NewItems(items)
  242. ctx := context.Background()
  243. oldErrors := accounting.Stats(ctx).GetErrors()
  244. var objs []fs.Object
  245. var dirs []fs.Directory
  246. var err error
  247. var retries = *ListRetries
  248. sleep := time.Second / 2
  249. wantListing := makeListingFromItems(items)
  250. gotListing := "<unset>"
  251. listingOK := false
  252. for i := 1; i <= retries; i++ {
  253. objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
  254. if err != nil && err != fs.ErrorDirNotFound {
  255. t.Fatalf("Error listing: %v", err)
  256. }
  257. gotListing = makeListingFromObjects(objs)
  258. listingOK = wantListing == gotListing
  259. if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
  260. // Put an extra sleep in if we did any retries just to make sure it really
  261. // is consistent
  262. if i != 1 {
  263. extraSleep := 5*time.Second + sleep
  264. t.Logf("Sleeping for %v just to make sure", extraSleep)
  265. time.Sleep(extraSleep)
  266. }
  267. break
  268. }
  269. sleep *= 2
  270. t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
  271. time.Sleep(sleep)
  272. if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
  273. t.Logf("Flushing the directory cache")
  274. doDirCacheFlush()
  275. }
  276. }
  277. assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s got\n %s", wantListing, gotListing))
  278. for _, obj := range objs {
  279. require.NotNil(t, obj)
  280. is.Find(t, obj, precision)
  281. }
  282. is.Done(t)
  283. // Don't notice an error when listing an empty directory
  284. if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 {
  285. accounting.Stats(ctx).ResetErrors()
  286. }
  287. // Check the directories
  288. if expectedDirs != nil {
  289. expectedDirsCopy := make([]string, len(expectedDirs))
  290. for i, dir := range expectedDirs {
  291. expectedDirsCopy[i] = Normalize(dir)
  292. }
  293. actualDirs := []string{}
  294. for _, dir := range dirs {
  295. actualDirs = append(actualDirs, Normalize(dir.Remote()))
  296. }
  297. sort.Strings(actualDirs)
  298. sort.Strings(expectedDirsCopy)
  299. assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
  300. }
  301. }
  302. // CheckListingWithPrecision checks the fs to see if it has the
  303. // expected contents with the given precision.
  304. //
  305. // If expectedDirs is non nil then we check those too. Note that no
  306. // directories returned is also OK as some remotes don't return
  307. // directories.
  308. func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
  309. CheckListingWithRoot(t, f, "", items, expectedDirs, precision)
  310. }
  311. // CheckListing checks the fs to see if it has the expected contents
  312. func CheckListing(t *testing.T, f fs.Fs, items []Item) {
  313. precision := f.Precision()
  314. CheckListingWithPrecision(t, f, items, nil, precision)
  315. }
  316. // CheckItemsWithPrecision checks the fs with the specified precision
  317. // to see if it has the expected items.
  318. func CheckItemsWithPrecision(t *testing.T, f fs.Fs, precision time.Duration, items ...Item) {
  319. CheckListingWithPrecision(t, f, items, nil, precision)
  320. }
  321. // CheckItems checks the fs to see if it has only the items passed in
  322. // using a precision of fs.Config.ModifyWindow
  323. func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
  324. CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(context.TODO(), f))
  325. }
  326. // CompareItems compares a set of DirEntries to a slice of items and a list of dirs
  327. // The modtimes are compared with the precision supplied
  328. func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) {
  329. is := NewItems(items)
  330. var objs []fs.Object
  331. var dirs []fs.Directory
  332. wantListing := makeListingFromItems(items)
  333. for _, entry := range entries {
  334. switch x := entry.(type) {
  335. case fs.Directory:
  336. dirs = append(dirs, x)
  337. case fs.Object:
  338. objs = append(objs, x)
  339. // do nothing
  340. default:
  341. t.Fatalf("unknown object type %T", entry)
  342. }
  343. }
  344. gotListing := makeListingFromObjects(objs)
  345. listingOK := wantListing == gotListing
  346. assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n %s got\n %s", what, wantListing, gotListing))
  347. for _, obj := range objs {
  348. require.NotNil(t, obj)
  349. is.Find(t, obj, precision)
  350. }
  351. is.Done(t)
  352. // Check the directories
  353. if expectedDirs != nil {
  354. expectedDirsCopy := make([]string, len(expectedDirs))
  355. for i, dir := range expectedDirs {
  356. expectedDirsCopy[i] = Normalize(dir)
  357. }
  358. actualDirs := []string{}
  359. for _, dir := range dirs {
  360. actualDirs = append(actualDirs, Normalize(dir.Remote()))
  361. }
  362. sort.Strings(actualDirs)
  363. sort.Strings(expectedDirsCopy)
  364. assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal")
  365. }
  366. }
  367. // Time parses a time string or logs a fatal error
  368. func Time(timeString string) time.Time {
  369. t, err := time.Parse(time.RFC3339Nano, timeString)
  370. if err != nil {
  371. log.Fatalf("Failed to parse time %q: %v", timeString, err)
  372. }
  373. return t
  374. }
  375. // LocalRemote creates a temporary directory name for local remotes
  376. func LocalRemote() (path string, err error) {
  377. path, err = os.MkdirTemp("", "rclone")
  378. if err == nil {
  379. // Now remove the directory
  380. err = os.Remove(path)
  381. }
  382. path = filepath.ToSlash(path)
  383. return
  384. }
  385. // RandomRemoteName makes a random bucket or subdirectory name
  386. //
  387. // Returns a random remote name plus the leaf name
  388. func RandomRemoteName(remoteName string) (string, string, error) {
  389. var err error
  390. var leafName string
  391. // Make a directory if remote name is null
  392. if remoteName == "" {
  393. remoteName, err = LocalRemote()
  394. if err != nil {
  395. return "", "", err
  396. }
  397. } else {
  398. if !strings.HasSuffix(remoteName, ":") {
  399. remoteName += "/"
  400. }
  401. leafName = "rclone-test-" + random.String(24)
  402. if !MatchTestRemote.MatchString(leafName) {
  403. log.Fatalf("%q didn't match the test remote name regexp", leafName)
  404. }
  405. remoteName += leafName
  406. }
  407. return remoteName, leafName, nil
  408. }
  409. // RandomRemote makes a random bucket or subdirectory on the remote
  410. // from the -remote parameter
  411. //
  412. // Call the finalise function returned to Purge the fs at the end (and
  413. // the parent if necessary)
  414. //
  415. // Returns the remote, its url, a finaliser and an error
  416. func RandomRemote() (fs.Fs, string, func(), error) {
  417. var err error
  418. var parentRemote fs.Fs
  419. remoteName := *RemoteName
  420. remoteName, _, err = RandomRemoteName(remoteName)
  421. if err != nil {
  422. return nil, "", nil, err
  423. }
  424. remote, err := fs.NewFs(context.Background(), remoteName)
  425. if err != nil {
  426. return nil, "", nil, err
  427. }
  428. finalise := func() {
  429. Purge(remote)
  430. if parentRemote != nil {
  431. Purge(parentRemote)
  432. if err != nil {
  433. log.Printf("Failed to purge %v: %v", parentRemote, err)
  434. }
  435. }
  436. }
  437. return remote, remoteName, finalise, nil
  438. }
  439. // Purge is a simplified re-implementation of operations.Purge for the
  440. // test routine cleanup to avoid circular dependencies.
  441. //
  442. // It logs errors rather than returning them
  443. func Purge(f fs.Fs) {
  444. ctx := context.Background()
  445. var err error
  446. doFallbackPurge := true
  447. if doPurge := f.Features().Purge; doPurge != nil {
  448. doFallbackPurge = false
  449. fs.Debugf(f, "Purge remote")
  450. err = doPurge(ctx, "")
  451. if err == fs.ErrorCantPurge {
  452. doFallbackPurge = true
  453. }
  454. }
  455. if doFallbackPurge {
  456. dirs := []string{""}
  457. err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
  458. var err error
  459. entries.ForObject(func(obj fs.Object) {
  460. fs.Debugf(f, "Purge object %q", obj.Remote())
  461. err = obj.Remove(ctx)
  462. if err != nil {
  463. log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
  464. }
  465. })
  466. entries.ForDir(func(dir fs.Directory) {
  467. dirs = append(dirs, dir.Remote())
  468. })
  469. return nil
  470. })
  471. sort.Strings(dirs)
  472. for i := len(dirs) - 1; i >= 0; i-- {
  473. dir := dirs[i]
  474. fs.Debugf(f, "Purge dir %q", dir)
  475. err := f.Rmdir(ctx, dir)
  476. if err != nil {
  477. log.Printf("purge failed to rmdir %q: %v", dir, err)
  478. }
  479. }
  480. }
  481. if err != nil {
  482. log.Printf("purge failed: %v", err)
  483. }
  484. }
  485. // NewObject finds the object on the remote
  486. func NewObject(ctx context.Context, t *testing.T, f fs.Fs, remote string) fs.Object {
  487. var obj fs.Object
  488. var err error
  489. sleepTime := 1 * time.Second
  490. for i := 1; i <= *ListRetries; i++ {
  491. obj, err = f.NewObject(ctx, remote)
  492. if err == nil {
  493. break
  494. }
  495. t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *ListRetries, err)
  496. time.Sleep(sleepTime)
  497. sleepTime = (sleepTime * 3) / 2
  498. }
  499. require.NoError(t, err)
  500. return obj
  501. }
  502. // NewDirectory finds the directory with remote in f
  503. //
  504. // One day this will be an rclone primitive
  505. func NewDirectory(ctx context.Context, t *testing.T, f fs.Fs, remote string) fs.Directory {
  506. var err error
  507. var dir fs.Directory
  508. sleepTime := 1 * time.Second
  509. root := path.Dir(remote)
  510. if root == "." {
  511. root = ""
  512. }
  513. for i := 1; i <= *ListRetries; i++ {
  514. var entries fs.DirEntries
  515. entries, err = f.List(ctx, root)
  516. if err != nil {
  517. continue
  518. }
  519. for _, entry := range entries {
  520. var ok bool
  521. dir, ok = entry.(fs.Directory)
  522. if ok && dir.Remote() == remote {
  523. return dir
  524. }
  525. }
  526. err = fmt.Errorf("directory %q not found in %q", remote, root)
  527. t.Logf("Sleeping for %v for findDir eventual consistency: %d/%d (%v)", sleepTime, i, *ListRetries, err)
  528. time.Sleep(sleepTime)
  529. sleepTime = (sleepTime * 3) / 2
  530. }
  531. require.NoError(t, err)
  532. return dir
  533. }
  534. // CheckEntryMetadata checks the metadata on the directory
  535. //
  536. // This checks a limited set of metadata on the directory
  537. func CheckEntryMetadata(ctx context.Context, t *testing.T, f fs.Fs, entry fs.DirEntry, wantMeta fs.Metadata) {
  538. features := f.Features()
  539. do, ok := entry.(fs.Metadataer)
  540. require.True(t, ok, "Didn't find expected Metadata() method on %T", entry)
  541. gotMeta, err := do.Metadata(ctx)
  542. require.NoError(t, err)
  543. for k, v := range wantMeta {
  544. switch k {
  545. case "mtime", "atime", "btime", "ctime":
  546. // Check the system time Metadata
  547. wantT, err := time.Parse(time.RFC3339, v)
  548. require.NoError(t, err)
  549. gotT, err := time.Parse(time.RFC3339, gotMeta[k])
  550. require.NoError(t, err)
  551. AssertTimeEqualWithPrecision(t, entry.Remote(), wantT, gotT, f.Precision())
  552. default:
  553. // Check the User metadata if we can
  554. _, isDir := entry.(fs.Directory)
  555. if (isDir && features.UserDirMetadata) || (!isDir && features.UserMetadata) {
  556. assert.Equal(t, v, gotMeta[k])
  557. }
  558. }
  559. }
  560. }
  561. // CheckDirModTime checks the modtime on the directory
  562. func CheckDirModTime(ctx context.Context, t *testing.T, f fs.Fs, dir fs.Directory, wantT time.Time) {
  563. if f.Features().DirSetModTime == nil && f.Features().MkdirMetadata == nil {
  564. fs.Debugf(f, "Skipping modtime test as remote does not support DirSetModTime or MkdirMetadata")
  565. return
  566. }
  567. gotT := dir.ModTime(ctx)
  568. AssertTimeEqualWithPrecision(t, dir.Remote(), wantT, gotT, f.Precision())
  569. }
  570. // Gz returns a compressed version of its input string
  571. func Gz(t *testing.T, s string) string {
  572. var buf bytes.Buffer
  573. zw := gzip.NewWriter(&buf)
  574. _, err := zw.Write([]byte(s))
  575. require.NoError(t, err)
  576. err = zw.Close()
  577. require.NoError(t, err)
  578. return buf.String()
  579. }