percpu-vm.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * mm/percpu-vm.c - vmalloc area based chunk allocation
  3. *
  4. * Copyright (C) 2010 SUSE Linux Products GmbH
  5. * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
  6. *
  7. * This file is released under the GPLv2.
  8. *
  9. * Chunks are mapped into vmalloc areas and populated page by page.
  10. * This is the default chunk allocator.
  11. */
  12. static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
  13. unsigned int cpu, int page_idx)
  14. {
  15. /* must not be used on pre-mapped chunk */
  16. WARN_ON(chunk->immutable);
  17. return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
  18. }
  19. /**
  20. * pcpu_get_pages - get temp pages array
  21. * @chunk: chunk of interest
  22. *
  23. * Returns pointer to array of pointers to struct page which can be indexed
  24. * with pcpu_page_idx(). Note that there is only one array and accesses
  25. * should be serialized by pcpu_alloc_mutex.
  26. *
  27. * RETURNS:
  28. * Pointer to temp pages array on success.
  29. */
  30. static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)
  31. {
  32. static struct page **pages;
  33. size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
  34. lockdep_assert_held(&pcpu_alloc_mutex);
  35. if (!pages)
  36. pages = pcpu_mem_zalloc(pages_size);
  37. return pages;
  38. }
  39. /**
  40. * pcpu_free_pages - free pages which were allocated for @chunk
  41. * @chunk: chunk pages were allocated for
  42. * @pages: array of pages to be freed, indexed by pcpu_page_idx()
  43. * @page_start: page index of the first page to be freed
  44. * @page_end: page index of the last page to be freed + 1
  45. *
  46. * Free pages [@page_start and @page_end) in @pages for all units.
  47. * The pages were allocated for @chunk.
  48. */
  49. static void pcpu_free_pages(struct pcpu_chunk *chunk,
  50. struct page **pages, int page_start, int page_end)
  51. {
  52. unsigned int cpu;
  53. int i;
  54. for_each_possible_cpu(cpu) {
  55. for (i = page_start; i < page_end; i++) {
  56. struct page *page = pages[pcpu_page_idx(cpu, i)];
  57. if (page)
  58. __free_page(page);
  59. }
  60. }
  61. }
  62. /**
  63. * pcpu_alloc_pages - allocates pages for @chunk
  64. * @chunk: target chunk
  65. * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
  66. * @page_start: page index of the first page to be allocated
  67. * @page_end: page index of the last page to be allocated + 1
  68. *
  69. * Allocate pages [@page_start,@page_end) into @pages for all units.
  70. * The allocation is for @chunk. Percpu core doesn't care about the
  71. * content of @pages and will pass it verbatim to pcpu_map_pages().
  72. */
  73. static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
  74. struct page **pages, int page_start, int page_end)
  75. {
  76. const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
  77. unsigned int cpu, tcpu;
  78. int i;
  79. for_each_possible_cpu(cpu) {
  80. for (i = page_start; i < page_end; i++) {
  81. struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
  82. *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
  83. if (!*pagep)
  84. goto err;
  85. }
  86. }
  87. return 0;
  88. err:
  89. while (--i >= page_start)
  90. __free_page(pages[pcpu_page_idx(cpu, i)]);
  91. for_each_possible_cpu(tcpu) {
  92. if (tcpu == cpu)
  93. break;
  94. for (i = page_start; i < page_end; i++)
  95. __free_page(pages[pcpu_page_idx(tcpu, i)]);
  96. }
  97. return -ENOMEM;
  98. }
  99. /**
  100. * pcpu_pre_unmap_flush - flush cache prior to unmapping
  101. * @chunk: chunk the regions to be flushed belongs to
  102. * @page_start: page index of the first page to be flushed
  103. * @page_end: page index of the last page to be flushed + 1
  104. *
  105. * Pages in [@page_start,@page_end) of @chunk are about to be
  106. * unmapped. Flush cache. As each flushing trial can be very
  107. * expensive, issue flush on the whole region at once rather than
  108. * doing it for each cpu. This could be an overkill but is more
  109. * scalable.
  110. */
  111. static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
  112. int page_start, int page_end)
  113. {
  114. flush_cache_vunmap(
  115. pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
  116. pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
  117. }
  118. static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
  119. {
  120. unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
  121. }
  122. /**
  123. * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
  124. * @chunk: chunk of interest
  125. * @pages: pages array which can be used to pass information to free
  126. * @page_start: page index of the first page to unmap
  127. * @page_end: page index of the last page to unmap + 1
  128. *
  129. * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
  130. * Corresponding elements in @pages were cleared by the caller and can
  131. * be used to carry information to pcpu_free_pages() which will be
  132. * called after all unmaps are finished. The caller should call
  133. * proper pre/post flush functions.
  134. */
  135. static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
  136. struct page **pages, int page_start, int page_end)
  137. {
  138. unsigned int cpu;
  139. int i;
  140. for_each_possible_cpu(cpu) {
  141. for (i = page_start; i < page_end; i++) {
  142. struct page *page;
  143. page = pcpu_chunk_page(chunk, cpu, i);
  144. WARN_ON(!page);
  145. pages[pcpu_page_idx(cpu, i)] = page;
  146. }
  147. __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
  148. page_end - page_start);
  149. }
  150. }
  151. /**
  152. * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
  153. * @chunk: pcpu_chunk the regions to be flushed belong to
  154. * @page_start: page index of the first page to be flushed
  155. * @page_end: page index of the last page to be flushed + 1
  156. *
  157. * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
  158. * TLB for the regions. This can be skipped if the area is to be
  159. * returned to vmalloc as vmalloc will handle TLB flushing lazily.
  160. *
  161. * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
  162. * for the whole region.
  163. */
  164. static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
  165. int page_start, int page_end)
  166. {
  167. flush_tlb_kernel_range(
  168. pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
  169. pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
  170. }
  171. static int __pcpu_map_pages(unsigned long addr, struct page **pages,
  172. int nr_pages)
  173. {
  174. return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
  175. PAGE_KERNEL, pages);
  176. }
  177. /**
  178. * pcpu_map_pages - map pages into a pcpu_chunk
  179. * @chunk: chunk of interest
  180. * @pages: pages array containing pages to be mapped
  181. * @page_start: page index of the first page to map
  182. * @page_end: page index of the last page to map + 1
  183. *
  184. * For each cpu, map pages [@page_start,@page_end) into @chunk. The
  185. * caller is responsible for calling pcpu_post_map_flush() after all
  186. * mappings are complete.
  187. *
  188. * This function is responsible for setting up whatever is necessary for
  189. * reverse lookup (addr -> chunk).
  190. */
  191. static int pcpu_map_pages(struct pcpu_chunk *chunk,
  192. struct page **pages, int page_start, int page_end)
  193. {
  194. unsigned int cpu, tcpu;
  195. int i, err;
  196. for_each_possible_cpu(cpu) {
  197. err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
  198. &pages[pcpu_page_idx(cpu, page_start)],
  199. page_end - page_start);
  200. if (err < 0)
  201. goto err;
  202. for (i = page_start; i < page_end; i++)
  203. pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
  204. chunk);
  205. }
  206. return 0;
  207. err:
  208. for_each_possible_cpu(tcpu) {
  209. if (tcpu == cpu)
  210. break;
  211. __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
  212. page_end - page_start);
  213. }
  214. pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
  215. return err;
  216. }
  217. /**
  218. * pcpu_post_map_flush - flush cache after mapping
  219. * @chunk: pcpu_chunk the regions to be flushed belong to
  220. * @page_start: page index of the first page to be flushed
  221. * @page_end: page index of the last page to be flushed + 1
  222. *
  223. * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
  224. * cache.
  225. *
  226. * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
  227. * for the whole region.
  228. */
  229. static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
  230. int page_start, int page_end)
  231. {
  232. flush_cache_vmap(
  233. pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
  234. pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
  235. }
  236. /**
  237. * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
  238. * @chunk: chunk of interest
  239. * @page_start: the start page
  240. * @page_end: the end page
  241. *
  242. * For each cpu, populate and map pages [@page_start,@page_end) into
  243. * @chunk.
  244. *
  245. * CONTEXT:
  246. * pcpu_alloc_mutex, does GFP_KERNEL allocation.
  247. */
  248. static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
  249. int page_start, int page_end)
  250. {
  251. struct page **pages;
  252. pages = pcpu_get_pages(chunk);
  253. if (!pages)
  254. return -ENOMEM;
  255. if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
  256. return -ENOMEM;
  257. if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
  258. pcpu_free_pages(chunk, pages, page_start, page_end);
  259. return -ENOMEM;
  260. }
  261. pcpu_post_map_flush(chunk, page_start, page_end);
  262. return 0;
  263. }
  264. /**
  265. * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
  266. * @chunk: chunk to depopulate
  267. * @page_start: the start page
  268. * @page_end: the end page
  269. *
  270. * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  271. * from @chunk.
  272. *
  273. * CONTEXT:
  274. * pcpu_alloc_mutex.
  275. */
  276. static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
  277. int page_start, int page_end)
  278. {
  279. struct page **pages;
  280. /*
  281. * If control reaches here, there must have been at least one
  282. * successful population attempt so the temp pages array must
  283. * be available now.
  284. */
  285. pages = pcpu_get_pages(chunk);
  286. BUG_ON(!pages);
  287. /* unmap and free */
  288. pcpu_pre_unmap_flush(chunk, page_start, page_end);
  289. pcpu_unmap_pages(chunk, pages, page_start, page_end);
  290. /* no need to flush tlb, vmalloc will handle it lazily */
  291. pcpu_free_pages(chunk, pages, page_start, page_end);
  292. }
  293. static struct pcpu_chunk *pcpu_create_chunk(void)
  294. {
  295. struct pcpu_chunk *chunk;
  296. struct vm_struct **vms;
  297. chunk = pcpu_alloc_chunk();
  298. if (!chunk)
  299. return NULL;
  300. vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
  301. pcpu_nr_groups, pcpu_atom_size);
  302. if (!vms) {
  303. pcpu_free_chunk(chunk);
  304. return NULL;
  305. }
  306. chunk->data = vms;
  307. chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
  308. return chunk;
  309. }
  310. static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
  311. {
  312. if (chunk && chunk->data)
  313. pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
  314. pcpu_free_chunk(chunk);
  315. }
  316. static struct page *pcpu_addr_to_page(void *addr)
  317. {
  318. return vmalloc_to_page(addr);
  319. }
  320. static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
  321. {
  322. /* no extra restriction */
  323. return 0;
  324. }