zstd.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-present, Facebook, Inc.
  4. * All rights reserved.
  5. *
  6. */
  7. #include <linux/bio.h>
  8. #include <linux/bitmap.h>
  9. #include <linux/err.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mm.h>
  13. #include <linux/sched/mm.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/refcount.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/zstd.h>
  19. #include "misc.h"
  20. #include "compression.h"
  21. #include "ctree.h"
  22. #define ZSTD_BTRFS_MAX_WINDOWLOG 17
  23. #define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
  24. #define ZSTD_BTRFS_DEFAULT_LEVEL 3
  25. #define ZSTD_BTRFS_MAX_LEVEL 15
  26. /* 307s to avoid pathologically clashing with transaction commit */
  27. #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
  28. static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
  29. size_t src_len)
  30. {
  31. ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
  32. if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
  33. params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
  34. WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
  35. return params;
  36. }
  37. struct workspace {
  38. void *mem;
  39. size_t size;
  40. char *buf;
  41. unsigned int level;
  42. unsigned int req_level;
  43. unsigned long last_used; /* jiffies */
  44. struct list_head list;
  45. struct list_head lru_list;
  46. ZSTD_inBuffer in_buf;
  47. ZSTD_outBuffer out_buf;
  48. };
  49. /*
  50. * Zstd Workspace Management
  51. *
  52. * Zstd workspaces have different memory requirements depending on the level.
  53. * The zstd workspaces are managed by having individual lists for each level
  54. * and a global lru. Forward progress is maintained by protecting a max level
  55. * workspace.
  56. *
  57. * Getting a workspace is done by using the bitmap to identify the levels that
  58. * have available workspaces and scans up. This lets us recycle higher level
  59. * workspaces because of the monotonic memory guarantee. A workspace's
  60. * last_used is only updated if it is being used by the corresponding memory
  61. * level. Putting a workspace involves adding it back to the appropriate places
  62. * and adding it back to the lru if necessary.
  63. *
  64. * A timer is used to reclaim workspaces if they have not been used for
  65. * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around.
  66. * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
  67. */
  68. struct zstd_workspace_manager {
  69. const struct btrfs_compress_op *ops;
  70. spinlock_t lock;
  71. struct list_head lru_list;
  72. struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
  73. unsigned long active_map;
  74. wait_queue_head_t wait;
  75. struct timer_list timer;
  76. };
  77. static struct zstd_workspace_manager wsm;
  78. static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
  79. static inline struct workspace *list_to_workspace(struct list_head *list)
  80. {
  81. return container_of(list, struct workspace, list);
  82. }
  83. static void zstd_free_workspace(struct list_head *ws);
  84. static struct list_head *zstd_alloc_workspace(unsigned int level);
  85. /*
  86. * zstd_reclaim_timer_fn - reclaim timer
  87. * @t: timer
  88. *
  89. * This scans the lru_list and attempts to reclaim any workspace that hasn't
  90. * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
  91. */
  92. static void zstd_reclaim_timer_fn(struct timer_list *timer)
  93. {
  94. unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
  95. struct list_head *pos, *next;
  96. spin_lock_bh(&wsm.lock);
  97. if (list_empty(&wsm.lru_list)) {
  98. spin_unlock_bh(&wsm.lock);
  99. return;
  100. }
  101. list_for_each_prev_safe(pos, next, &wsm.lru_list) {
  102. struct workspace *victim = container_of(pos, struct workspace,
  103. lru_list);
  104. unsigned int level;
  105. if (time_after(victim->last_used, reclaim_threshold))
  106. break;
  107. /* workspace is in use */
  108. if (victim->req_level)
  109. continue;
  110. level = victim->level;
  111. list_del(&victim->lru_list);
  112. list_del(&victim->list);
  113. zstd_free_workspace(&victim->list);
  114. if (list_empty(&wsm.idle_ws[level - 1]))
  115. clear_bit(level - 1, &wsm.active_map);
  116. }
  117. if (!list_empty(&wsm.lru_list))
  118. mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
  119. spin_unlock_bh(&wsm.lock);
  120. }
  121. /*
  122. * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
  123. *
  124. * It is possible based on the level configurations that a higher level
  125. * workspace uses less memory than a lower level workspace. In order to reuse
  126. * workspaces, this must be made a monotonic relationship. This precomputes
  127. * the required memory for each level and enforces the monotonicity between
  128. * level and memory required.
  129. */
  130. static void zstd_calc_ws_mem_sizes(void)
  131. {
  132. size_t max_size = 0;
  133. unsigned int level;
  134. for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
  135. ZSTD_parameters params =
  136. zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
  137. size_t level_size =
  138. max_t(size_t,
  139. ZSTD_CStreamWorkspaceBound(params.cParams),
  140. ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
  141. max_size = max_t(size_t, max_size, level_size);
  142. zstd_ws_mem_sizes[level - 1] = max_size;
  143. }
  144. }
  145. static void zstd_init_workspace_manager(void)
  146. {
  147. struct list_head *ws;
  148. int i;
  149. zstd_calc_ws_mem_sizes();
  150. wsm.ops = &btrfs_zstd_compress;
  151. spin_lock_init(&wsm.lock);
  152. init_waitqueue_head(&wsm.wait);
  153. timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
  154. INIT_LIST_HEAD(&wsm.lru_list);
  155. for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
  156. INIT_LIST_HEAD(&wsm.idle_ws[i]);
  157. ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
  158. if (IS_ERR(ws)) {
  159. pr_warn(
  160. "BTRFS: cannot preallocate zstd compression workspace\n");
  161. } else {
  162. set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
  163. list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
  164. }
  165. }
  166. static void zstd_cleanup_workspace_manager(void)
  167. {
  168. struct workspace *workspace;
  169. int i;
  170. spin_lock_bh(&wsm.lock);
  171. for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
  172. while (!list_empty(&wsm.idle_ws[i])) {
  173. workspace = container_of(wsm.idle_ws[i].next,
  174. struct workspace, list);
  175. list_del(&workspace->list);
  176. list_del(&workspace->lru_list);
  177. zstd_free_workspace(&workspace->list);
  178. }
  179. }
  180. spin_unlock_bh(&wsm.lock);
  181. del_timer_sync(&wsm.timer);
  182. }
  183. /*
  184. * zstd_find_workspace - find workspace
  185. * @level: compression level
  186. *
  187. * This iterates over the set bits in the active_map beginning at the requested
  188. * compression level. This lets us utilize already allocated workspaces before
  189. * allocating a new one. If the workspace is of a larger size, it is used, but
  190. * the place in the lru_list and last_used times are not updated. This is to
  191. * offer the opportunity to reclaim the workspace in favor of allocating an
  192. * appropriately sized one in the future.
  193. */
  194. static struct list_head *zstd_find_workspace(unsigned int level)
  195. {
  196. struct list_head *ws;
  197. struct workspace *workspace;
  198. int i = level - 1;
  199. spin_lock_bh(&wsm.lock);
  200. for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
  201. if (!list_empty(&wsm.idle_ws[i])) {
  202. ws = wsm.idle_ws[i].next;
  203. workspace = list_to_workspace(ws);
  204. list_del_init(ws);
  205. /* keep its place if it's a lower level using this */
  206. workspace->req_level = level;
  207. if (level == workspace->level)
  208. list_del(&workspace->lru_list);
  209. if (list_empty(&wsm.idle_ws[i]))
  210. clear_bit(i, &wsm.active_map);
  211. spin_unlock_bh(&wsm.lock);
  212. return ws;
  213. }
  214. }
  215. spin_unlock_bh(&wsm.lock);
  216. return NULL;
  217. }
  218. /*
  219. * zstd_get_workspace - zstd's get_workspace
  220. * @level: compression level
  221. *
  222. * If @level is 0, then any compression level can be used. Therefore, we begin
  223. * scanning from 1. We first scan through possible workspaces and then after
  224. * attempt to allocate a new workspace. If we fail to allocate one due to
  225. * memory pressure, go to sleep waiting for the max level workspace to free up.
  226. */
  227. static struct list_head *zstd_get_workspace(unsigned int level)
  228. {
  229. struct list_head *ws;
  230. unsigned int nofs_flag;
  231. /* level == 0 means we can use any workspace */
  232. if (!level)
  233. level = 1;
  234. again:
  235. ws = zstd_find_workspace(level);
  236. if (ws)
  237. return ws;
  238. nofs_flag = memalloc_nofs_save();
  239. ws = zstd_alloc_workspace(level);
  240. memalloc_nofs_restore(nofs_flag);
  241. if (IS_ERR(ws)) {
  242. DEFINE_WAIT(wait);
  243. prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
  244. schedule();
  245. finish_wait(&wsm.wait, &wait);
  246. goto again;
  247. }
  248. return ws;
  249. }
  250. /*
  251. * zstd_put_workspace - zstd put_workspace
  252. * @ws: list_head for the workspace
  253. *
  254. * When putting back a workspace, we only need to update the LRU if we are of
  255. * the requested compression level. Here is where we continue to protect the
  256. * max level workspace or update last_used accordingly. If the reclaim timer
  257. * isn't set, it is also set here. Only the max level workspace tries and wakes
  258. * up waiting workspaces.
  259. */
  260. static void zstd_put_workspace(struct list_head *ws)
  261. {
  262. struct workspace *workspace = list_to_workspace(ws);
  263. spin_lock_bh(&wsm.lock);
  264. /* A node is only taken off the lru if we are the corresponding level */
  265. if (workspace->req_level == workspace->level) {
  266. /* Hide a max level workspace from reclaim */
  267. if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
  268. INIT_LIST_HEAD(&workspace->lru_list);
  269. } else {
  270. workspace->last_used = jiffies;
  271. list_add(&workspace->lru_list, &wsm.lru_list);
  272. if (!timer_pending(&wsm.timer))
  273. mod_timer(&wsm.timer,
  274. jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
  275. }
  276. }
  277. set_bit(workspace->level - 1, &wsm.active_map);
  278. list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
  279. workspace->req_level = 0;
  280. spin_unlock_bh(&wsm.lock);
  281. if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
  282. cond_wake_up(&wsm.wait);
  283. }
  284. static void zstd_free_workspace(struct list_head *ws)
  285. {
  286. struct workspace *workspace = list_entry(ws, struct workspace, list);
  287. kvfree(workspace->mem);
  288. kfree(workspace->buf);
  289. kfree(workspace);
  290. }
  291. static struct list_head *zstd_alloc_workspace(unsigned int level)
  292. {
  293. struct workspace *workspace;
  294. workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
  295. if (!workspace)
  296. return ERR_PTR(-ENOMEM);
  297. workspace->size = zstd_ws_mem_sizes[level - 1];
  298. workspace->level = level;
  299. workspace->req_level = level;
  300. workspace->last_used = jiffies;
  301. workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
  302. workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  303. if (!workspace->mem || !workspace->buf)
  304. goto fail;
  305. INIT_LIST_HEAD(&workspace->list);
  306. INIT_LIST_HEAD(&workspace->lru_list);
  307. return &workspace->list;
  308. fail:
  309. zstd_free_workspace(&workspace->list);
  310. return ERR_PTR(-ENOMEM);
  311. }
  312. static int zstd_compress_pages(struct list_head *ws,
  313. struct address_space *mapping,
  314. u64 start,
  315. struct page **pages,
  316. unsigned long *out_pages,
  317. unsigned long *total_in,
  318. unsigned long *total_out)
  319. {
  320. struct workspace *workspace = list_entry(ws, struct workspace, list);
  321. ZSTD_CStream *stream;
  322. int ret = 0;
  323. int nr_pages = 0;
  324. struct page *in_page = NULL; /* The current page to read */
  325. struct page *out_page = NULL; /* The current page to write to */
  326. unsigned long tot_in = 0;
  327. unsigned long tot_out = 0;
  328. unsigned long len = *total_out;
  329. const unsigned long nr_dest_pages = *out_pages;
  330. unsigned long max_out = nr_dest_pages * PAGE_SIZE;
  331. ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
  332. len);
  333. *out_pages = 0;
  334. *total_out = 0;
  335. *total_in = 0;
  336. /* Initialize the stream */
  337. stream = ZSTD_initCStream(params, len, workspace->mem,
  338. workspace->size);
  339. if (!stream) {
  340. pr_warn("BTRFS: ZSTD_initCStream failed\n");
  341. ret = -EIO;
  342. goto out;
  343. }
  344. /* map in the first page of input data */
  345. in_page = find_get_page(mapping, start >> PAGE_SHIFT);
  346. workspace->in_buf.src = kmap(in_page);
  347. workspace->in_buf.pos = 0;
  348. workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
  349. /* Allocate and map in the output buffer */
  350. out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  351. if (out_page == NULL) {
  352. ret = -ENOMEM;
  353. goto out;
  354. }
  355. pages[nr_pages++] = out_page;
  356. workspace->out_buf.dst = kmap(out_page);
  357. workspace->out_buf.pos = 0;
  358. workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
  359. while (1) {
  360. size_t ret2;
  361. ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
  362. &workspace->in_buf);
  363. if (ZSTD_isError(ret2)) {
  364. pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
  365. ZSTD_getErrorCode(ret2));
  366. ret = -EIO;
  367. goto out;
  368. }
  369. /* Check to see if we are making it bigger */
  370. if (tot_in + workspace->in_buf.pos > 8192 &&
  371. tot_in + workspace->in_buf.pos <
  372. tot_out + workspace->out_buf.pos) {
  373. ret = -E2BIG;
  374. goto out;
  375. }
  376. /* We've reached the end of our output range */
  377. if (workspace->out_buf.pos >= max_out) {
  378. tot_out += workspace->out_buf.pos;
  379. ret = -E2BIG;
  380. goto out;
  381. }
  382. /* Check if we need more output space */
  383. if (workspace->out_buf.pos == workspace->out_buf.size) {
  384. tot_out += PAGE_SIZE;
  385. max_out -= PAGE_SIZE;
  386. kunmap(out_page);
  387. if (nr_pages == nr_dest_pages) {
  388. out_page = NULL;
  389. ret = -E2BIG;
  390. goto out;
  391. }
  392. out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  393. if (out_page == NULL) {
  394. ret = -ENOMEM;
  395. goto out;
  396. }
  397. pages[nr_pages++] = out_page;
  398. workspace->out_buf.dst = kmap(out_page);
  399. workspace->out_buf.pos = 0;
  400. workspace->out_buf.size = min_t(size_t, max_out,
  401. PAGE_SIZE);
  402. }
  403. /* We've reached the end of the input */
  404. if (workspace->in_buf.pos >= len) {
  405. tot_in += workspace->in_buf.pos;
  406. break;
  407. }
  408. /* Check if we need more input */
  409. if (workspace->in_buf.pos == workspace->in_buf.size) {
  410. tot_in += PAGE_SIZE;
  411. kunmap(in_page);
  412. put_page(in_page);
  413. start += PAGE_SIZE;
  414. len -= PAGE_SIZE;
  415. in_page = find_get_page(mapping, start >> PAGE_SHIFT);
  416. workspace->in_buf.src = kmap(in_page);
  417. workspace->in_buf.pos = 0;
  418. workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
  419. }
  420. }
  421. while (1) {
  422. size_t ret2;
  423. ret2 = ZSTD_endStream(stream, &workspace->out_buf);
  424. if (ZSTD_isError(ret2)) {
  425. pr_debug("BTRFS: ZSTD_endStream returned %d\n",
  426. ZSTD_getErrorCode(ret2));
  427. ret = -EIO;
  428. goto out;
  429. }
  430. if (ret2 == 0) {
  431. tot_out += workspace->out_buf.pos;
  432. break;
  433. }
  434. if (workspace->out_buf.pos >= max_out) {
  435. tot_out += workspace->out_buf.pos;
  436. ret = -E2BIG;
  437. goto out;
  438. }
  439. tot_out += PAGE_SIZE;
  440. max_out -= PAGE_SIZE;
  441. kunmap(out_page);
  442. if (nr_pages == nr_dest_pages) {
  443. out_page = NULL;
  444. ret = -E2BIG;
  445. goto out;
  446. }
  447. out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  448. if (out_page == NULL) {
  449. ret = -ENOMEM;
  450. goto out;
  451. }
  452. pages[nr_pages++] = out_page;
  453. workspace->out_buf.dst = kmap(out_page);
  454. workspace->out_buf.pos = 0;
  455. workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
  456. }
  457. if (tot_out >= tot_in) {
  458. ret = -E2BIG;
  459. goto out;
  460. }
  461. ret = 0;
  462. *total_in = tot_in;
  463. *total_out = tot_out;
  464. out:
  465. *out_pages = nr_pages;
  466. /* Cleanup */
  467. if (in_page) {
  468. kunmap(in_page);
  469. put_page(in_page);
  470. }
  471. if (out_page)
  472. kunmap(out_page);
  473. return ret;
  474. }
  475. static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
  476. {
  477. struct workspace *workspace = list_entry(ws, struct workspace, list);
  478. struct page **pages_in = cb->compressed_pages;
  479. u64 disk_start = cb->start;
  480. struct bio *orig_bio = cb->orig_bio;
  481. size_t srclen = cb->compressed_len;
  482. ZSTD_DStream *stream;
  483. int ret = 0;
  484. unsigned long page_in_index = 0;
  485. unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
  486. unsigned long buf_start;
  487. unsigned long total_out = 0;
  488. stream = ZSTD_initDStream(
  489. ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
  490. if (!stream) {
  491. pr_debug("BTRFS: ZSTD_initDStream failed\n");
  492. ret = -EIO;
  493. goto done;
  494. }
  495. workspace->in_buf.src = kmap(pages_in[page_in_index]);
  496. workspace->in_buf.pos = 0;
  497. workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
  498. workspace->out_buf.dst = workspace->buf;
  499. workspace->out_buf.pos = 0;
  500. workspace->out_buf.size = PAGE_SIZE;
  501. while (1) {
  502. size_t ret2;
  503. ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
  504. &workspace->in_buf);
  505. if (ZSTD_isError(ret2)) {
  506. pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
  507. ZSTD_getErrorCode(ret2));
  508. ret = -EIO;
  509. goto done;
  510. }
  511. buf_start = total_out;
  512. total_out += workspace->out_buf.pos;
  513. workspace->out_buf.pos = 0;
  514. ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
  515. buf_start, total_out, disk_start, orig_bio);
  516. if (ret == 0)
  517. break;
  518. if (workspace->in_buf.pos >= srclen)
  519. break;
  520. /* Check if we've hit the end of a frame */
  521. if (ret2 == 0)
  522. break;
  523. if (workspace->in_buf.pos == workspace->in_buf.size) {
  524. kunmap(pages_in[page_in_index++]);
  525. if (page_in_index >= total_pages_in) {
  526. workspace->in_buf.src = NULL;
  527. ret = -EIO;
  528. goto done;
  529. }
  530. srclen -= PAGE_SIZE;
  531. workspace->in_buf.src = kmap(pages_in[page_in_index]);
  532. workspace->in_buf.pos = 0;
  533. workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
  534. }
  535. }
  536. ret = 0;
  537. zero_fill_bio(orig_bio);
  538. done:
  539. if (workspace->in_buf.src)
  540. kunmap(pages_in[page_in_index]);
  541. return ret;
  542. }
  543. static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
  544. struct page *dest_page,
  545. unsigned long start_byte,
  546. size_t srclen, size_t destlen)
  547. {
  548. struct workspace *workspace = list_entry(ws, struct workspace, list);
  549. ZSTD_DStream *stream;
  550. int ret = 0;
  551. size_t ret2;
  552. unsigned long total_out = 0;
  553. unsigned long pg_offset = 0;
  554. char *kaddr;
  555. stream = ZSTD_initDStream(
  556. ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
  557. if (!stream) {
  558. pr_warn("BTRFS: ZSTD_initDStream failed\n");
  559. ret = -EIO;
  560. goto finish;
  561. }
  562. destlen = min_t(size_t, destlen, PAGE_SIZE);
  563. workspace->in_buf.src = data_in;
  564. workspace->in_buf.pos = 0;
  565. workspace->in_buf.size = srclen;
  566. workspace->out_buf.dst = workspace->buf;
  567. workspace->out_buf.pos = 0;
  568. workspace->out_buf.size = PAGE_SIZE;
  569. ret2 = 1;
  570. while (pg_offset < destlen
  571. && workspace->in_buf.pos < workspace->in_buf.size) {
  572. unsigned long buf_start;
  573. unsigned long buf_offset;
  574. unsigned long bytes;
  575. /* Check if the frame is over and we still need more input */
  576. if (ret2 == 0) {
  577. pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
  578. ret = -EIO;
  579. goto finish;
  580. }
  581. ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
  582. &workspace->in_buf);
  583. if (ZSTD_isError(ret2)) {
  584. pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
  585. ZSTD_getErrorCode(ret2));
  586. ret = -EIO;
  587. goto finish;
  588. }
  589. buf_start = total_out;
  590. total_out += workspace->out_buf.pos;
  591. workspace->out_buf.pos = 0;
  592. if (total_out <= start_byte)
  593. continue;
  594. if (total_out > start_byte && buf_start < start_byte)
  595. buf_offset = start_byte - buf_start;
  596. else
  597. buf_offset = 0;
  598. bytes = min_t(unsigned long, destlen - pg_offset,
  599. workspace->out_buf.size - buf_offset);
  600. kaddr = kmap_atomic(dest_page);
  601. memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset,
  602. bytes);
  603. kunmap_atomic(kaddr);
  604. pg_offset += bytes;
  605. }
  606. ret = 0;
  607. finish:
  608. if (pg_offset < destlen) {
  609. kaddr = kmap_atomic(dest_page);
  610. memset(kaddr + pg_offset, 0, destlen - pg_offset);
  611. kunmap_atomic(kaddr);
  612. }
  613. return ret;
  614. }
  615. const struct btrfs_compress_op btrfs_zstd_compress = {
  616. .init_workspace_manager = zstd_init_workspace_manager,
  617. .cleanup_workspace_manager = zstd_cleanup_workspace_manager,
  618. .get_workspace = zstd_get_workspace,
  619. .put_workspace = zstd_put_workspace,
  620. .alloc_workspace = zstd_alloc_workspace,
  621. .free_workspace = zstd_free_workspace,
  622. .compress_pages = zstd_compress_pages,
  623. .decompress_bio = zstd_decompress_bio,
  624. .decompress = zstd_decompress,
  625. .max_level = ZSTD_BTRFS_MAX_LEVEL,
  626. .default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
  627. };