z3fold.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * z3fold.c
  3. *
  4. * Author: Vitaly Wool <vitaly.wool@konsulko.com>
  5. * Copyright (C) 2016, Sony Mobile Communications Inc.
  6. *
  7. * This implementation is based on zbud written by Seth Jennings.
  8. *
  9. * z3fold is an special purpose allocator for storing compressed pages. It
  10. * can store up to three compressed pages per page which improves the
  11. * compression ratio of zbud while retaining its main concepts (e. g. always
  12. * storing an integral number of objects per page) and simplicity.
  13. * It still has simple and deterministic reclaim properties that make it
  14. * preferable to a higher density approach (with no requirement on integral
  15. * number of object per page) when reclaim is used.
  16. *
  17. * As in zbud, pages are divided into "chunks". The size of the chunks is
  18. * fixed at compile time and is determined by NCHUNKS_ORDER below.
  19. *
  20. * z3fold doesn't export any API and is meant to be used via zpool API.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/atomic.h>
  24. #include <linux/list.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/preempt.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/zpool.h>
  31. /*****************
  32. * Structures
  33. *****************/
  34. /*
  35. * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  36. * adjusting internal fragmentation. It also determines the number of
  37. * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  38. * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
  39. * in allocated page is occupied by z3fold header, NCHUNKS will be calculated
  40. * to 63 which shows the max number of free chunks in z3fold page, also there
  41. * will be 63 freelists per pool.
  42. */
  43. #define NCHUNKS_ORDER 6
  44. #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
  45. #define CHUNK_SIZE (1 << CHUNK_SHIFT)
  46. #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
  47. #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
  48. #define BUDDY_MASK ((1 << NCHUNKS_ORDER) - 1)
  49. struct z3fold_pool;
  50. struct z3fold_ops {
  51. int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  52. };
  53. /**
  54. * struct z3fold_pool - stores metadata for each z3fold pool
  55. * @lock: protects all pool fields and first|last_chunk fields of any
  56. * z3fold page in the pool
  57. * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
  58. * the lists each z3fold page is added to depends on the size of
  59. * its free region.
  60. * @buddied: list tracking the z3fold pages that contain 3 buddies;
  61. * these z3fold pages are full
  62. * @lru: list tracking the z3fold pages in LRU order by most recently
  63. * added buddy.
  64. * @pages_nr: number of z3fold pages in the pool.
  65. * @ops: pointer to a structure of user defined operations specified at
  66. * pool creation time.
  67. *
  68. * This structure is allocated at pool creation time and maintains metadata
  69. * pertaining to a particular z3fold pool.
  70. */
  71. struct z3fold_pool {
  72. spinlock_t lock;
  73. struct list_head unbuddied[NCHUNKS];
  74. struct list_head buddied;
  75. struct list_head lru;
  76. u64 pages_nr;
  77. const struct z3fold_ops *ops;
  78. struct zpool *zpool;
  79. const struct zpool_ops *zpool_ops;
  80. };
  81. enum buddy {
  82. HEADLESS = 0,
  83. FIRST,
  84. MIDDLE,
  85. LAST,
  86. BUDDIES_MAX
  87. };
  88. /*
  89. * struct z3fold_header - z3fold page metadata occupying the first chunk of each
  90. * z3fold page, except for HEADLESS pages
  91. * @buddy: links the z3fold page into the relevant list in the pool
  92. * @first_chunks: the size of the first buddy in chunks, 0 if free
  93. * @middle_chunks: the size of the middle buddy in chunks, 0 if free
  94. * @last_chunks: the size of the last buddy in chunks, 0 if free
  95. * @first_num: the starting number (for the first handle)
  96. */
  97. struct z3fold_header {
  98. struct list_head buddy;
  99. unsigned short first_chunks;
  100. unsigned short middle_chunks;
  101. unsigned short last_chunks;
  102. unsigned short start_middle;
  103. unsigned short first_num:NCHUNKS_ORDER;
  104. };
  105. /*
  106. * Internal z3fold page flags
  107. */
  108. enum z3fold_page_flags {
  109. UNDER_RECLAIM = 0,
  110. PAGE_HEADLESS,
  111. MIDDLE_CHUNK_MAPPED,
  112. };
  113. /*****************
  114. * Helpers
  115. *****************/
  116. /* Converts an allocation size in bytes to size in z3fold chunks */
  117. static int size_to_chunks(size_t size)
  118. {
  119. return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
  120. }
  121. #define for_each_unbuddied_list(_iter, _begin) \
  122. for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
  123. /* Initializes the z3fold header of a newly allocated z3fold page */
  124. static struct z3fold_header *init_z3fold_page(struct page *page)
  125. {
  126. struct z3fold_header *zhdr = page_address(page);
  127. INIT_LIST_HEAD(&page->lru);
  128. clear_bit(UNDER_RECLAIM, &page->private);
  129. clear_bit(PAGE_HEADLESS, &page->private);
  130. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  131. zhdr->first_chunks = 0;
  132. zhdr->middle_chunks = 0;
  133. zhdr->last_chunks = 0;
  134. zhdr->first_num = 0;
  135. zhdr->start_middle = 0;
  136. INIT_LIST_HEAD(&zhdr->buddy);
  137. return zhdr;
  138. }
  139. /* Resets the struct page fields and frees the page */
  140. static void free_z3fold_page(struct z3fold_header *zhdr)
  141. {
  142. __free_page(virt_to_page(zhdr));
  143. }
  144. /*
  145. * Encodes the handle of a particular buddy within a z3fold page
  146. * Pool lock should be held as this function accesses first_num
  147. */
  148. static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
  149. {
  150. unsigned long handle;
  151. handle = (unsigned long)zhdr;
  152. if (bud != HEADLESS)
  153. handle += (bud + zhdr->first_num) & BUDDY_MASK;
  154. return handle;
  155. }
  156. /* Returns the z3fold page where a given handle is stored */
  157. static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
  158. {
  159. return (struct z3fold_header *)(handle & PAGE_MASK);
  160. }
  161. /* Returns buddy number */
  162. static enum buddy handle_to_buddy(unsigned long handle)
  163. {
  164. struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
  165. return (handle - zhdr->first_num) & BUDDY_MASK;
  166. }
  167. /*
  168. * Returns the number of free chunks in a z3fold page.
  169. * NB: can't be used with HEADLESS pages.
  170. */
  171. static int num_free_chunks(struct z3fold_header *zhdr)
  172. {
  173. int nfree;
  174. /*
  175. * If there is a middle object, pick up the bigger free space
  176. * either before or after it. Otherwise just subtract the number
  177. * of chunks occupied by the first and the last objects.
  178. */
  179. if (zhdr->middle_chunks != 0) {
  180. int nfree_before = zhdr->first_chunks ?
  181. 0 : zhdr->start_middle - 1;
  182. int nfree_after = zhdr->last_chunks ?
  183. 0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
  184. nfree = max(nfree_before, nfree_after);
  185. } else
  186. nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
  187. return nfree;
  188. }
  189. /*****************
  190. * API Functions
  191. *****************/
  192. /**
  193. * z3fold_create_pool() - create a new z3fold pool
  194. * @gfp: gfp flags when allocating the z3fold pool structure
  195. * @ops: user-defined operations for the z3fold pool
  196. *
  197. * Return: pointer to the new z3fold pool or NULL if the metadata allocation
  198. * failed.
  199. */
  200. static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
  201. const struct z3fold_ops *ops)
  202. {
  203. struct z3fold_pool *pool;
  204. int i;
  205. pool = kzalloc(sizeof(struct z3fold_pool), gfp);
  206. if (!pool)
  207. return NULL;
  208. spin_lock_init(&pool->lock);
  209. for_each_unbuddied_list(i, 0)
  210. INIT_LIST_HEAD(&pool->unbuddied[i]);
  211. INIT_LIST_HEAD(&pool->buddied);
  212. INIT_LIST_HEAD(&pool->lru);
  213. pool->pages_nr = 0;
  214. pool->ops = ops;
  215. return pool;
  216. }
  217. /**
  218. * z3fold_destroy_pool() - destroys an existing z3fold pool
  219. * @pool: the z3fold pool to be destroyed
  220. *
  221. * The pool should be emptied before this function is called.
  222. */
  223. static void z3fold_destroy_pool(struct z3fold_pool *pool)
  224. {
  225. kfree(pool);
  226. }
  227. /* Has to be called with lock held */
  228. static int z3fold_compact_page(struct z3fold_header *zhdr)
  229. {
  230. struct page *page = virt_to_page(zhdr);
  231. void *beg = zhdr;
  232. if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
  233. zhdr->middle_chunks != 0 &&
  234. zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  235. memmove(beg + ZHDR_SIZE_ALIGNED,
  236. beg + (zhdr->start_middle << CHUNK_SHIFT),
  237. zhdr->middle_chunks << CHUNK_SHIFT);
  238. zhdr->first_chunks = zhdr->middle_chunks;
  239. zhdr->middle_chunks = 0;
  240. zhdr->start_middle = 0;
  241. zhdr->first_num++;
  242. return 1;
  243. }
  244. return 0;
  245. }
  246. /**
  247. * z3fold_alloc() - allocates a region of a given size
  248. * @pool: z3fold pool from which to allocate
  249. * @size: size in bytes of the desired allocation
  250. * @gfp: gfp flags used if the pool needs to grow
  251. * @handle: handle of the new allocation
  252. *
  253. * This function will attempt to find a free region in the pool large enough to
  254. * satisfy the allocation request. A search of the unbuddied lists is
  255. * performed first. If no suitable free region is found, then a new page is
  256. * allocated and added to the pool to satisfy the request.
  257. *
  258. * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
  259. * as z3fold pool pages.
  260. *
  261. * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  262. * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  263. * a new page.
  264. */
  265. static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
  266. unsigned long *handle)
  267. {
  268. int chunks = 0, i, freechunks;
  269. struct z3fold_header *zhdr = NULL;
  270. enum buddy bud;
  271. struct page *page;
  272. if (!size || (gfp & __GFP_HIGHMEM))
  273. return -EINVAL;
  274. if (size > PAGE_SIZE)
  275. return -ENOSPC;
  276. if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
  277. bud = HEADLESS;
  278. else {
  279. chunks = size_to_chunks(size);
  280. spin_lock(&pool->lock);
  281. /* First, try to find an unbuddied z3fold page. */
  282. zhdr = NULL;
  283. for_each_unbuddied_list(i, chunks) {
  284. if (!list_empty(&pool->unbuddied[i])) {
  285. zhdr = list_first_entry(&pool->unbuddied[i],
  286. struct z3fold_header, buddy);
  287. page = virt_to_page(zhdr);
  288. if (zhdr->first_chunks == 0) {
  289. if (zhdr->middle_chunks != 0 &&
  290. chunks >= zhdr->start_middle)
  291. bud = LAST;
  292. else
  293. bud = FIRST;
  294. } else if (zhdr->last_chunks == 0)
  295. bud = LAST;
  296. else if (zhdr->middle_chunks == 0)
  297. bud = MIDDLE;
  298. else {
  299. pr_err("No free chunks in unbuddied\n");
  300. WARN_ON(1);
  301. continue;
  302. }
  303. list_del(&zhdr->buddy);
  304. goto found;
  305. }
  306. }
  307. bud = FIRST;
  308. spin_unlock(&pool->lock);
  309. }
  310. /* Couldn't find unbuddied z3fold page, create new one */
  311. page = alloc_page(gfp);
  312. if (!page)
  313. return -ENOMEM;
  314. spin_lock(&pool->lock);
  315. pool->pages_nr++;
  316. zhdr = init_z3fold_page(page);
  317. if (bud == HEADLESS) {
  318. set_bit(PAGE_HEADLESS, &page->private);
  319. goto headless;
  320. }
  321. found:
  322. if (bud == FIRST)
  323. zhdr->first_chunks = chunks;
  324. else if (bud == LAST)
  325. zhdr->last_chunks = chunks;
  326. else {
  327. zhdr->middle_chunks = chunks;
  328. zhdr->start_middle = zhdr->first_chunks + 1;
  329. }
  330. if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
  331. zhdr->middle_chunks == 0) {
  332. /* Add to unbuddied list */
  333. freechunks = num_free_chunks(zhdr);
  334. list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
  335. } else {
  336. /* Add to buddied list */
  337. list_add(&zhdr->buddy, &pool->buddied);
  338. }
  339. headless:
  340. /* Add/move z3fold page to beginning of LRU */
  341. if (!list_empty(&page->lru))
  342. list_del(&page->lru);
  343. list_add(&page->lru, &pool->lru);
  344. *handle = encode_handle(zhdr, bud);
  345. spin_unlock(&pool->lock);
  346. return 0;
  347. }
  348. /**
  349. * z3fold_free() - frees the allocation associated with the given handle
  350. * @pool: pool in which the allocation resided
  351. * @handle: handle associated with the allocation returned by z3fold_alloc()
  352. *
  353. * In the case that the z3fold page in which the allocation resides is under
  354. * reclaim, as indicated by the PG_reclaim flag being set, this function
  355. * only sets the first|last_chunks to 0. The page is actually freed
  356. * once both buddies are evicted (see z3fold_reclaim_page() below).
  357. */
  358. static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
  359. {
  360. struct z3fold_header *zhdr;
  361. int freechunks;
  362. struct page *page;
  363. enum buddy bud;
  364. spin_lock(&pool->lock);
  365. zhdr = handle_to_z3fold_header(handle);
  366. page = virt_to_page(zhdr);
  367. if (test_bit(PAGE_HEADLESS, &page->private)) {
  368. /* HEADLESS page stored */
  369. bud = HEADLESS;
  370. } else {
  371. bud = handle_to_buddy(handle);
  372. switch (bud) {
  373. case FIRST:
  374. zhdr->first_chunks = 0;
  375. break;
  376. case MIDDLE:
  377. zhdr->middle_chunks = 0;
  378. zhdr->start_middle = 0;
  379. break;
  380. case LAST:
  381. zhdr->last_chunks = 0;
  382. break;
  383. default:
  384. pr_err("%s: unknown bud %d\n", __func__, bud);
  385. WARN_ON(1);
  386. spin_unlock(&pool->lock);
  387. return;
  388. }
  389. }
  390. if (test_bit(UNDER_RECLAIM, &page->private)) {
  391. /* z3fold page is under reclaim, reclaim will free */
  392. spin_unlock(&pool->lock);
  393. return;
  394. }
  395. if (bud != HEADLESS) {
  396. /* Remove from existing buddy list */
  397. list_del(&zhdr->buddy);
  398. }
  399. if (bud == HEADLESS ||
  400. (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
  401. zhdr->last_chunks == 0)) {
  402. /* z3fold page is empty, free */
  403. list_del(&page->lru);
  404. clear_bit(PAGE_HEADLESS, &page->private);
  405. free_z3fold_page(zhdr);
  406. pool->pages_nr--;
  407. } else {
  408. z3fold_compact_page(zhdr);
  409. /* Add to the unbuddied list */
  410. freechunks = num_free_chunks(zhdr);
  411. list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
  412. }
  413. spin_unlock(&pool->lock);
  414. }
  415. /**
  416. * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
  417. * @pool: pool from which a page will attempt to be evicted
  418. * @retires: number of pages on the LRU list for which eviction will
  419. * be attempted before failing
  420. *
  421. * z3fold reclaim is different from normal system reclaim in that it is done
  422. * from the bottom, up. This is because only the bottom layer, z3fold, has
  423. * information on how the allocations are organized within each z3fold page.
  424. * This has the potential to create interesting locking situations between
  425. * z3fold and the user, however.
  426. *
  427. * To avoid these, this is how z3fold_reclaim_page() should be called:
  428. * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
  429. * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
  430. * call the user-defined eviction handler with the pool and handle as
  431. * arguments.
  432. *
  433. * If the handle can not be evicted, the eviction handler should return
  434. * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
  435. * appropriate list and try the next z3fold page on the LRU up to
  436. * a user defined number of retries.
  437. *
  438. * If the handle is successfully evicted, the eviction handler should
  439. * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
  440. * contains logic to delay freeing the page if the page is under reclaim,
  441. * as indicated by the setting of the PG_reclaim flag on the underlying page.
  442. *
  443. * If all buddies in the z3fold page are successfully evicted, then the
  444. * z3fold page can be freed.
  445. *
  446. * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
  447. * no pages to evict or an eviction handler is not registered, -EAGAIN if
  448. * the retry limit was hit.
  449. */
  450. static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
  451. {
  452. int i, ret = 0, freechunks;
  453. struct z3fold_header *zhdr;
  454. struct page *page;
  455. unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
  456. spin_lock(&pool->lock);
  457. if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
  458. retries == 0) {
  459. spin_unlock(&pool->lock);
  460. return -EINVAL;
  461. }
  462. for (i = 0; i < retries; i++) {
  463. page = list_last_entry(&pool->lru, struct page, lru);
  464. list_del(&page->lru);
  465. /* Protect z3fold page against free */
  466. set_bit(UNDER_RECLAIM, &page->private);
  467. zhdr = page_address(page);
  468. if (!test_bit(PAGE_HEADLESS, &page->private)) {
  469. list_del(&zhdr->buddy);
  470. /*
  471. * We need encode the handles before unlocking, since
  472. * we can race with free that will set
  473. * (first|last)_chunks to 0
  474. */
  475. first_handle = 0;
  476. last_handle = 0;
  477. middle_handle = 0;
  478. if (zhdr->first_chunks)
  479. first_handle = encode_handle(zhdr, FIRST);
  480. if (zhdr->middle_chunks)
  481. middle_handle = encode_handle(zhdr, MIDDLE);
  482. if (zhdr->last_chunks)
  483. last_handle = encode_handle(zhdr, LAST);
  484. } else {
  485. first_handle = encode_handle(zhdr, HEADLESS);
  486. last_handle = middle_handle = 0;
  487. }
  488. spin_unlock(&pool->lock);
  489. /* Issue the eviction callback(s) */
  490. if (middle_handle) {
  491. ret = pool->ops->evict(pool, middle_handle);
  492. if (ret)
  493. goto next;
  494. }
  495. if (first_handle) {
  496. ret = pool->ops->evict(pool, first_handle);
  497. if (ret)
  498. goto next;
  499. }
  500. if (last_handle) {
  501. ret = pool->ops->evict(pool, last_handle);
  502. if (ret)
  503. goto next;
  504. }
  505. next:
  506. spin_lock(&pool->lock);
  507. clear_bit(UNDER_RECLAIM, &page->private);
  508. if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
  509. (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
  510. zhdr->middle_chunks == 0)) {
  511. /*
  512. * All buddies are now free, free the z3fold page and
  513. * return success.
  514. */
  515. clear_bit(PAGE_HEADLESS, &page->private);
  516. free_z3fold_page(zhdr);
  517. pool->pages_nr--;
  518. spin_unlock(&pool->lock);
  519. return 0;
  520. } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
  521. if (zhdr->first_chunks != 0 &&
  522. zhdr->last_chunks != 0 &&
  523. zhdr->middle_chunks != 0) {
  524. /* Full, add to buddied list */
  525. list_add(&zhdr->buddy, &pool->buddied);
  526. } else {
  527. z3fold_compact_page(zhdr);
  528. /* add to unbuddied list */
  529. freechunks = num_free_chunks(zhdr);
  530. list_add(&zhdr->buddy,
  531. &pool->unbuddied[freechunks]);
  532. }
  533. }
  534. /* add to beginning of LRU */
  535. list_add(&page->lru, &pool->lru);
  536. }
  537. spin_unlock(&pool->lock);
  538. return -EAGAIN;
  539. }
  540. /**
  541. * z3fold_map() - maps the allocation associated with the given handle
  542. * @pool: pool in which the allocation resides
  543. * @handle: handle associated with the allocation to be mapped
  544. *
  545. * Extracts the buddy number from handle and constructs the pointer to the
  546. * correct starting chunk within the page.
  547. *
  548. * Returns: a pointer to the mapped allocation
  549. */
  550. static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
  551. {
  552. struct z3fold_header *zhdr;
  553. struct page *page;
  554. void *addr;
  555. enum buddy buddy;
  556. spin_lock(&pool->lock);
  557. zhdr = handle_to_z3fold_header(handle);
  558. addr = zhdr;
  559. page = virt_to_page(zhdr);
  560. if (test_bit(PAGE_HEADLESS, &page->private))
  561. goto out;
  562. buddy = handle_to_buddy(handle);
  563. switch (buddy) {
  564. case FIRST:
  565. addr += ZHDR_SIZE_ALIGNED;
  566. break;
  567. case MIDDLE:
  568. addr += zhdr->start_middle << CHUNK_SHIFT;
  569. set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  570. break;
  571. case LAST:
  572. addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
  573. break;
  574. default:
  575. pr_err("unknown buddy id %d\n", buddy);
  576. WARN_ON(1);
  577. addr = NULL;
  578. break;
  579. }
  580. out:
  581. spin_unlock(&pool->lock);
  582. return addr;
  583. }
  584. /**
  585. * z3fold_unmap() - unmaps the allocation associated with the given handle
  586. * @pool: pool in which the allocation resides
  587. * @handle: handle associated with the allocation to be unmapped
  588. */
  589. static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
  590. {
  591. struct z3fold_header *zhdr;
  592. struct page *page;
  593. enum buddy buddy;
  594. spin_lock(&pool->lock);
  595. zhdr = handle_to_z3fold_header(handle);
  596. page = virt_to_page(zhdr);
  597. if (test_bit(PAGE_HEADLESS, &page->private)) {
  598. spin_unlock(&pool->lock);
  599. return;
  600. }
  601. buddy = handle_to_buddy(handle);
  602. if (buddy == MIDDLE)
  603. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  604. spin_unlock(&pool->lock);
  605. }
  606. /**
  607. * z3fold_get_pool_size() - gets the z3fold pool size in pages
  608. * @pool: pool whose size is being queried
  609. *
  610. * Returns: size in pages of the given pool. The pool lock need not be
  611. * taken to access pages_nr.
  612. */
  613. static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
  614. {
  615. return pool->pages_nr;
  616. }
  617. /*****************
  618. * zpool
  619. ****************/
  620. static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
  621. {
  622. if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
  623. return pool->zpool_ops->evict(pool->zpool, handle);
  624. else
  625. return -ENOENT;
  626. }
  627. static const struct z3fold_ops z3fold_zpool_ops = {
  628. .evict = z3fold_zpool_evict
  629. };
  630. static void *z3fold_zpool_create(const char *name, gfp_t gfp,
  631. const struct zpool_ops *zpool_ops,
  632. struct zpool *zpool)
  633. {
  634. struct z3fold_pool *pool;
  635. pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
  636. if (pool) {
  637. pool->zpool = zpool;
  638. pool->zpool_ops = zpool_ops;
  639. }
  640. return pool;
  641. }
  642. static void z3fold_zpool_destroy(void *pool)
  643. {
  644. z3fold_destroy_pool(pool);
  645. }
  646. static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
  647. unsigned long *handle)
  648. {
  649. return z3fold_alloc(pool, size, gfp, handle);
  650. }
  651. static void z3fold_zpool_free(void *pool, unsigned long handle)
  652. {
  653. z3fold_free(pool, handle);
  654. }
  655. static int z3fold_zpool_shrink(void *pool, unsigned int pages,
  656. unsigned int *reclaimed)
  657. {
  658. unsigned int total = 0;
  659. int ret = -EINVAL;
  660. while (total < pages) {
  661. ret = z3fold_reclaim_page(pool, 8);
  662. if (ret < 0)
  663. break;
  664. total++;
  665. }
  666. if (reclaimed)
  667. *reclaimed = total;
  668. return ret;
  669. }
  670. static void *z3fold_zpool_map(void *pool, unsigned long handle,
  671. enum zpool_mapmode mm)
  672. {
  673. return z3fold_map(pool, handle);
  674. }
  675. static void z3fold_zpool_unmap(void *pool, unsigned long handle)
  676. {
  677. z3fold_unmap(pool, handle);
  678. }
  679. static u64 z3fold_zpool_total_size(void *pool)
  680. {
  681. return z3fold_get_pool_size(pool) * PAGE_SIZE;
  682. }
  683. static struct zpool_driver z3fold_zpool_driver = {
  684. .type = "z3fold",
  685. .owner = THIS_MODULE,
  686. .create = z3fold_zpool_create,
  687. .destroy = z3fold_zpool_destroy,
  688. .malloc = z3fold_zpool_malloc,
  689. .free = z3fold_zpool_free,
  690. .shrink = z3fold_zpool_shrink,
  691. .map = z3fold_zpool_map,
  692. .unmap = z3fold_zpool_unmap,
  693. .total_size = z3fold_zpool_total_size,
  694. };
  695. MODULE_ALIAS("zpool-z3fold");
  696. static int __init init_z3fold(void)
  697. {
  698. /* Make sure the z3fold header will fit in one chunk */
  699. BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED);
  700. zpool_register_driver(&z3fold_zpool_driver);
  701. return 0;
  702. }
  703. static void __exit exit_z3fold(void)
  704. {
  705. zpool_unregister_driver(&z3fold_zpool_driver);
  706. }
  707. module_init(init_z3fold);
  708. module_exit(exit_z3fold);
  709. MODULE_LICENSE("GPL");
  710. MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
  711. MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");