mali_kbase_mem_pool.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /*
  2. *
  3. * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
  4. *
  5. * This program is free software and is provided to you under the terms of the
  6. * GNU General Public License version 2 as published by the Free Software
  7. * Foundation, and any use by you of this program is subject to the terms
  8. * of such GNU licence.
  9. *
  10. * A copy of the licence is included with the program, and can also be obtained
  11. * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  12. * Boston, MA 02110-1301, USA.
  13. *
  14. */
  15. #include <mali_kbase.h>
  16. #include <linux/mm.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/highmem.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/shrinker.h>
  21. #include <linux/atomic.h>
  22. #include <linux/version.h>
  23. /* This function is only provided for backwards compatibility with kernels
  24. * which use the old carveout allocator.
  25. *
  26. * The forward declaration is to keep sparse happy.
  27. */
  28. int __init kbase_carveout_mem_reserve(
  29. phys_addr_t size);
  30. int __init kbase_carveout_mem_reserve(phys_addr_t size)
  31. {
  32. return 0;
  33. }
  34. #define pool_dbg(pool, format, ...) \
  35. dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
  36. (pool->next_pool) ? "kctx" : "kbdev", \
  37. kbase_mem_pool_size(pool), \
  38. kbase_mem_pool_max_size(pool), \
  39. ##__VA_ARGS__)
  40. #define NOT_DIRTY false
  41. #define NOT_RECLAIMED false
  42. static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
  43. {
  44. spin_lock(&pool->pool_lock);
  45. }
  46. static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
  47. {
  48. spin_unlock(&pool->pool_lock);
  49. }
  50. static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
  51. {
  52. ssize_t max_size = kbase_mem_pool_max_size(pool);
  53. ssize_t cur_size = kbase_mem_pool_size(pool);
  54. return max(max_size - cur_size, (ssize_t)0);
  55. }
  56. static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
  57. {
  58. return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
  59. }
  60. static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
  61. {
  62. return kbase_mem_pool_size(pool) == 0;
  63. }
  64. static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
  65. struct page *p)
  66. {
  67. lockdep_assert_held(&pool->pool_lock);
  68. list_add(&p->lru, &pool->page_list);
  69. pool->cur_size++;
  70. zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
  71. pool_dbg(pool, "added page\n");
  72. }
  73. static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
  74. {
  75. kbase_mem_pool_lock(pool);
  76. kbase_mem_pool_add_locked(pool, p);
  77. kbase_mem_pool_unlock(pool);
  78. }
  79. static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
  80. struct list_head *page_list, size_t nr_pages)
  81. {
  82. struct page *p;
  83. lockdep_assert_held(&pool->pool_lock);
  84. list_for_each_entry(p, page_list, lru) {
  85. zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
  86. }
  87. list_splice(page_list, &pool->page_list);
  88. pool->cur_size += nr_pages;
  89. pool_dbg(pool, "added %zu pages\n", nr_pages);
  90. }
  91. static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
  92. struct list_head *page_list, size_t nr_pages)
  93. {
  94. kbase_mem_pool_lock(pool);
  95. kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
  96. kbase_mem_pool_unlock(pool);
  97. }
  98. static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
  99. {
  100. struct page *p;
  101. lockdep_assert_held(&pool->pool_lock);
  102. if (kbase_mem_pool_is_empty(pool))
  103. return NULL;
  104. p = list_first_entry(&pool->page_list, struct page, lru);
  105. list_del_init(&p->lru);
  106. pool->cur_size--;
  107. zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
  108. pool_dbg(pool, "removed page\n");
  109. return p;
  110. }
  111. static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
  112. {
  113. struct page *p;
  114. kbase_mem_pool_lock(pool);
  115. p = kbase_mem_pool_remove_locked(pool);
  116. kbase_mem_pool_unlock(pool);
  117. return p;
  118. }
  119. static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
  120. struct page *p)
  121. {
  122. struct device *dev = pool->kbdev->dev;
  123. dma_sync_single_for_device(dev, kbase_dma_addr(p),
  124. PAGE_SIZE, DMA_BIDIRECTIONAL);
  125. }
  126. static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
  127. struct page *p)
  128. {
  129. clear_highpage(p);
  130. kbase_mem_pool_sync_page(pool, p);
  131. }
  132. static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
  133. struct page *p)
  134. {
  135. /* Zero page before spilling */
  136. kbase_mem_pool_zero_page(next_pool, p);
  137. kbase_mem_pool_add(next_pool, p);
  138. }
  139. static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool)
  140. {
  141. struct page *p;
  142. gfp_t gfp;
  143. struct device *dev = pool->kbdev->dev;
  144. dma_addr_t dma_addr;
  145. #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
  146. LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
  147. /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
  148. gfp = GFP_USER | __GFP_ZERO;
  149. #else
  150. gfp = GFP_HIGHUSER | __GFP_ZERO;
  151. #endif
  152. if (current->flags & PF_KTHREAD) {
  153. /* Don't trigger OOM killer from kernel threads, e.g. when
  154. * growing memory on GPU page fault */
  155. gfp |= __GFP_NORETRY;
  156. }
  157. p = alloc_page(gfp);
  158. if (!p)
  159. return NULL;
  160. dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
  161. if (dma_mapping_error(dev, dma_addr)) {
  162. __free_page(p);
  163. return NULL;
  164. }
  165. WARN_ON(dma_addr != page_to_phys(p));
  166. kbase_set_dma_addr(p, dma_addr);
  167. pool_dbg(pool, "alloced page from kernel\n");
  168. return p;
  169. }
  170. static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
  171. struct page *p)
  172. {
  173. struct device *dev = pool->kbdev->dev;
  174. dma_addr_t dma_addr = kbase_dma_addr(p);
  175. dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
  176. kbase_clear_dma_addr(p);
  177. __free_page(p);
  178. pool_dbg(pool, "freed page to kernel\n");
  179. }
  180. static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
  181. size_t nr_to_shrink)
  182. {
  183. struct page *p;
  184. size_t i;
  185. lockdep_assert_held(&pool->pool_lock);
  186. for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
  187. p = kbase_mem_pool_remove_locked(pool);
  188. kbase_mem_pool_free_page(pool, p);
  189. }
  190. return i;
  191. }
  192. static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
  193. size_t nr_to_shrink)
  194. {
  195. size_t nr_freed;
  196. kbase_mem_pool_lock(pool);
  197. nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
  198. kbase_mem_pool_unlock(pool);
  199. return nr_freed;
  200. }
  201. static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
  202. size_t nr_to_grow)
  203. {
  204. struct page *p;
  205. size_t i;
  206. for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
  207. p = kbase_mem_pool_alloc_page(pool);
  208. if (!p)
  209. break;
  210. kbase_mem_pool_add(pool, p);
  211. }
  212. return i;
  213. }
  214. size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
  215. {
  216. size_t cur_size;
  217. cur_size = kbase_mem_pool_size(pool);
  218. if (new_size < cur_size)
  219. kbase_mem_pool_shrink(pool, cur_size - new_size);
  220. else if (new_size > cur_size)
  221. kbase_mem_pool_grow(pool, new_size - cur_size);
  222. cur_size = kbase_mem_pool_size(pool);
  223. return cur_size;
  224. }
  225. void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
  226. {
  227. size_t cur_size;
  228. size_t nr_to_shrink;
  229. kbase_mem_pool_lock(pool);
  230. pool->max_size = max_size;
  231. cur_size = kbase_mem_pool_size(pool);
  232. if (max_size < cur_size) {
  233. nr_to_shrink = cur_size - max_size;
  234. kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
  235. }
  236. kbase_mem_pool_unlock(pool);
  237. }
  238. static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
  239. struct shrink_control *sc)
  240. {
  241. struct kbase_mem_pool *pool;
  242. pool = container_of(s, struct kbase_mem_pool, reclaim);
  243. pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
  244. return kbase_mem_pool_size(pool);
  245. }
  246. static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
  247. struct shrink_control *sc)
  248. {
  249. struct kbase_mem_pool *pool;
  250. unsigned long freed;
  251. pool = container_of(s, struct kbase_mem_pool, reclaim);
  252. pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
  253. freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
  254. pool_dbg(pool, "reclaim freed %ld pages\n", freed);
  255. return freed;
  256. }
  257. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
  258. static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
  259. struct shrink_control *sc)
  260. {
  261. if (sc->nr_to_scan == 0)
  262. return kbase_mem_pool_reclaim_count_objects(s, sc);
  263. return kbase_mem_pool_reclaim_scan_objects(s, sc);
  264. }
  265. #endif
  266. int kbase_mem_pool_init(struct kbase_mem_pool *pool,
  267. size_t max_size,
  268. struct kbase_device *kbdev,
  269. struct kbase_mem_pool *next_pool)
  270. {
  271. pool->cur_size = 0;
  272. pool->max_size = max_size;
  273. pool->kbdev = kbdev;
  274. pool->next_pool = next_pool;
  275. spin_lock_init(&pool->pool_lock);
  276. INIT_LIST_HEAD(&pool->page_list);
  277. /* Register shrinker */
  278. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
  279. pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
  280. #else
  281. pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
  282. pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
  283. #endif
  284. pool->reclaim.seeks = DEFAULT_SEEKS;
  285. /* Kernel versions prior to 3.1 :
  286. * struct shrinker does not define batch */
  287. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
  288. pool->reclaim.batch = 0;
  289. #endif
  290. register_shrinker(&pool->reclaim);
  291. pool_dbg(pool, "initialized\n");
  292. return 0;
  293. }
  294. void kbase_mem_pool_term(struct kbase_mem_pool *pool)
  295. {
  296. struct kbase_mem_pool *next_pool = pool->next_pool;
  297. struct page *p;
  298. size_t nr_to_spill = 0;
  299. LIST_HEAD(spill_list);
  300. int i;
  301. pool_dbg(pool, "terminate()\n");
  302. unregister_shrinker(&pool->reclaim);
  303. kbase_mem_pool_lock(pool);
  304. pool->max_size = 0;
  305. if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
  306. /* Spill to next pool (may overspill) */
  307. nr_to_spill = kbase_mem_pool_capacity(next_pool);
  308. nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
  309. /* Zero pages first without holding the next_pool lock */
  310. for (i = 0; i < nr_to_spill; i++) {
  311. p = kbase_mem_pool_remove_locked(pool);
  312. kbase_mem_pool_zero_page(pool, p);
  313. list_add(&p->lru, &spill_list);
  314. }
  315. }
  316. while (!kbase_mem_pool_is_empty(pool)) {
  317. /* Free remaining pages to kernel */
  318. p = kbase_mem_pool_remove_locked(pool);
  319. kbase_mem_pool_free_page(pool, p);
  320. }
  321. kbase_mem_pool_unlock(pool);
  322. if (next_pool && nr_to_spill) {
  323. /* Add new page list to next_pool */
  324. kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
  325. pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
  326. }
  327. pool_dbg(pool, "terminated\n");
  328. }
  329. struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
  330. {
  331. struct page *p;
  332. pool_dbg(pool, "alloc()\n");
  333. p = kbase_mem_pool_remove(pool);
  334. if (!p && pool->next_pool) {
  335. /* Allocate via next pool */
  336. return kbase_mem_pool_alloc(pool->next_pool);
  337. }
  338. if (!p) {
  339. /* Get page from kernel */
  340. p = kbase_mem_pool_alloc_page(pool);
  341. }
  342. return p;
  343. }
  344. void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
  345. bool dirty)
  346. {
  347. struct kbase_mem_pool *next_pool = pool->next_pool;
  348. pool_dbg(pool, "free()\n");
  349. if (!kbase_mem_pool_is_full(pool)) {
  350. /* Add to our own pool */
  351. if (dirty)
  352. kbase_mem_pool_sync_page(pool, p);
  353. kbase_mem_pool_add(pool, p);
  354. } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
  355. /* Spill to next pool */
  356. kbase_mem_pool_spill(next_pool, p);
  357. } else {
  358. /* Free page */
  359. kbase_mem_pool_free_page(pool, p);
  360. }
  361. }
  362. int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
  363. phys_addr_t *pages)
  364. {
  365. struct page *p;
  366. size_t nr_from_pool;
  367. size_t i;
  368. int err = -ENOMEM;
  369. pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
  370. /* Get pages from this pool */
  371. kbase_mem_pool_lock(pool);
  372. nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
  373. for (i = 0; i < nr_from_pool; i++) {
  374. p = kbase_mem_pool_remove_locked(pool);
  375. pages[i] = page_to_phys(p);
  376. }
  377. kbase_mem_pool_unlock(pool);
  378. if (i != nr_pages && pool->next_pool) {
  379. /* Allocate via next pool */
  380. err = kbase_mem_pool_alloc_pages(pool->next_pool,
  381. nr_pages - i, pages + i);
  382. if (err)
  383. goto err_rollback;
  384. i += nr_pages - i;
  385. }
  386. /* Get any remaining pages from kernel */
  387. for (; i < nr_pages; i++) {
  388. p = kbase_mem_pool_alloc_page(pool);
  389. if (!p)
  390. goto err_rollback;
  391. pages[i] = page_to_phys(p);
  392. }
  393. pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
  394. return 0;
  395. err_rollback:
  396. kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
  397. return err;
  398. }
  399. static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
  400. size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
  401. {
  402. struct page *p;
  403. size_t nr_to_pool = 0;
  404. LIST_HEAD(new_page_list);
  405. size_t i;
  406. if (!nr_pages)
  407. return;
  408. pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
  409. nr_pages, zero, sync);
  410. /* Zero/sync pages first without holding the pool lock */
  411. for (i = 0; i < nr_pages; i++) {
  412. if (unlikely(!pages[i]))
  413. continue;
  414. p = phys_to_page(pages[i]);
  415. if (zero)
  416. kbase_mem_pool_zero_page(pool, p);
  417. else if (sync)
  418. kbase_mem_pool_sync_page(pool, p);
  419. list_add(&p->lru, &new_page_list);
  420. nr_to_pool++;
  421. pages[i] = 0;
  422. }
  423. /* Add new page list to pool */
  424. kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
  425. pool_dbg(pool, "add_array(%zu) added %zu pages\n",
  426. nr_pages, nr_to_pool);
  427. }
  428. void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
  429. phys_addr_t *pages, bool dirty, bool reclaimed)
  430. {
  431. struct kbase_mem_pool *next_pool = pool->next_pool;
  432. struct page *p;
  433. size_t nr_to_pool;
  434. LIST_HEAD(to_pool_list);
  435. size_t i = 0;
  436. pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
  437. if (!reclaimed) {
  438. /* Add to this pool */
  439. nr_to_pool = kbase_mem_pool_capacity(pool);
  440. nr_to_pool = min(nr_pages, nr_to_pool);
  441. kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
  442. i += nr_to_pool;
  443. if (i != nr_pages && next_pool) {
  444. /* Spill to next pool (may overspill) */
  445. nr_to_pool = kbase_mem_pool_capacity(next_pool);
  446. nr_to_pool = min(nr_pages - i, nr_to_pool);
  447. kbase_mem_pool_add_array(next_pool, nr_to_pool,
  448. pages + i, true, dirty);
  449. i += nr_to_pool;
  450. }
  451. }
  452. /* Free any remaining pages to kernel */
  453. for (; i < nr_pages; i++) {
  454. if (unlikely(!pages[i]))
  455. continue;
  456. p = phys_to_page(pages[i]);
  457. if (reclaimed)
  458. zone_page_state_add(-1, page_zone(p),
  459. NR_SLAB_RECLAIMABLE);
  460. kbase_mem_pool_free_page(pool, p);
  461. pages[i] = 0;
  462. }
  463. pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
  464. }