memmap.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Common EFI memory map functions.
  4. */
  5. #define pr_fmt(fmt) "efi: " fmt
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/efi.h>
  9. #include <linux/io.h>
  10. #include <asm/early_ioremap.h>
  11. #include <linux/memblock.h>
  12. #include <linux/slab.h>
  13. static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
  14. {
  15. return memblock_alloc(size, 0);
  16. }
  17. static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
  18. {
  19. unsigned int order = get_order(size);
  20. struct page *p = alloc_pages(GFP_KERNEL, order);
  21. if (!p)
  22. return 0;
  23. return PFN_PHYS(page_to_pfn(p));
  24. }
  25. /**
  26. * efi_memmap_alloc - Allocate memory for the EFI memory map
  27. * @num_entries: Number of entries in the allocated map.
  28. *
  29. * Depending on whether mm_init() has already been invoked or not,
  30. * either memblock or "normal" page allocation is used.
  31. *
  32. * Returns the physical address of the allocated memory map on
  33. * success, zero on failure.
  34. */
  35. phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
  36. {
  37. unsigned long size = num_entries * efi.memmap.desc_size;
  38. if (slab_is_available())
  39. return __efi_memmap_alloc_late(size);
  40. return __efi_memmap_alloc_early(size);
  41. }
  42. /**
  43. * __efi_memmap_init - Common code for mapping the EFI memory map
  44. * @data: EFI memory map data
  45. * @late: Use early or late mapping function?
  46. *
  47. * This function takes care of figuring out which function to use to
  48. * map the EFI memory map in efi.memmap based on how far into the boot
  49. * we are.
  50. *
  51. * During bootup @late should be %false since we only have access to
  52. * the early_memremap*() functions as the vmalloc space isn't setup.
  53. * Once the kernel is fully booted we can fallback to the more robust
  54. * memremap*() API.
  55. *
  56. * Returns zero on success, a negative error code on failure.
  57. */
  58. static int __init
  59. __efi_memmap_init(struct efi_memory_map_data *data, bool late)
  60. {
  61. struct efi_memory_map map;
  62. phys_addr_t phys_map;
  63. if (efi_enabled(EFI_PARAVIRT))
  64. return 0;
  65. phys_map = data->phys_map;
  66. if (late)
  67. map.map = memremap(phys_map, data->size, MEMREMAP_WB);
  68. else
  69. map.map = early_memremap(phys_map, data->size);
  70. if (!map.map) {
  71. pr_err("Could not map the memory map!\n");
  72. return -ENOMEM;
  73. }
  74. map.phys_map = data->phys_map;
  75. map.nr_map = data->size / data->desc_size;
  76. map.map_end = map.map + data->size;
  77. map.desc_version = data->desc_version;
  78. map.desc_size = data->desc_size;
  79. map.late = late;
  80. set_bit(EFI_MEMMAP, &efi.flags);
  81. efi.memmap = map;
  82. return 0;
  83. }
  84. /**
  85. * efi_memmap_init_early - Map the EFI memory map data structure
  86. * @data: EFI memory map data
  87. *
  88. * Use early_memremap() to map the passed in EFI memory map and assign
  89. * it to efi.memmap.
  90. */
  91. int __init efi_memmap_init_early(struct efi_memory_map_data *data)
  92. {
  93. /* Cannot go backwards */
  94. WARN_ON(efi.memmap.late);
  95. return __efi_memmap_init(data, false);
  96. }
  97. void __init efi_memmap_unmap(void)
  98. {
  99. if (!efi_enabled(EFI_MEMMAP))
  100. return;
  101. if (!efi.memmap.late) {
  102. unsigned long size;
  103. size = efi.memmap.desc_size * efi.memmap.nr_map;
  104. early_memunmap(efi.memmap.map, size);
  105. } else {
  106. memunmap(efi.memmap.map);
  107. }
  108. efi.memmap.map = NULL;
  109. clear_bit(EFI_MEMMAP, &efi.flags);
  110. }
  111. /**
  112. * efi_memmap_init_late - Map efi.memmap with memremap()
  113. * @phys_addr: Physical address of the new EFI memory map
  114. * @size: Size in bytes of the new EFI memory map
  115. *
  116. * Setup a mapping of the EFI memory map using ioremap_cache(). This
  117. * function should only be called once the vmalloc space has been
  118. * setup and is therefore not suitable for calling during early EFI
  119. * initialise, e.g. in efi_init(). Additionally, it expects
  120. * efi_memmap_init_early() to have already been called.
  121. *
  122. * The reason there are two EFI memmap initialisation
  123. * (efi_memmap_init_early() and this late version) is because the
  124. * early EFI memmap should be explicitly unmapped once EFI
  125. * initialisation is complete as the fixmap space used to map the EFI
  126. * memmap (via early_memremap()) is a scarce resource.
  127. *
  128. * This late mapping is intended to persist for the duration of
  129. * runtime so that things like efi_mem_desc_lookup() and
  130. * efi_mem_attributes() always work.
  131. *
  132. * Returns zero on success, a negative error code on failure.
  133. */
  134. int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
  135. {
  136. struct efi_memory_map_data data = {
  137. .phys_map = addr,
  138. .size = size,
  139. };
  140. /* Did we forget to unmap the early EFI memmap? */
  141. WARN_ON(efi.memmap.map);
  142. /* Were we already called? */
  143. WARN_ON(efi.memmap.late);
  144. /*
  145. * It makes no sense to allow callers to register different
  146. * values for the following fields. Copy them out of the
  147. * existing early EFI memmap.
  148. */
  149. data.desc_version = efi.memmap.desc_version;
  150. data.desc_size = efi.memmap.desc_size;
  151. return __efi_memmap_init(&data, true);
  152. }
  153. /**
  154. * efi_memmap_install - Install a new EFI memory map in efi.memmap
  155. * @addr: Physical address of the memory map
  156. * @nr_map: Number of entries in the memory map
  157. *
  158. * Unlike efi_memmap_init_*(), this function does not allow the caller
  159. * to switch from early to late mappings. It simply uses the existing
  160. * mapping function and installs the new memmap.
  161. *
  162. * Returns zero on success, a negative error code on failure.
  163. */
  164. int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
  165. {
  166. struct efi_memory_map_data data;
  167. efi_memmap_unmap();
  168. data.phys_map = addr;
  169. data.size = efi.memmap.desc_size * nr_map;
  170. data.desc_version = efi.memmap.desc_version;
  171. data.desc_size = efi.memmap.desc_size;
  172. return __efi_memmap_init(&data, efi.memmap.late);
  173. }
  174. /**
  175. * efi_memmap_split_count - Count number of additional EFI memmap entries
  176. * @md: EFI memory descriptor to split
  177. * @range: Address range (start, end) to split around
  178. *
  179. * Returns the number of additional EFI memmap entries required to
  180. * accomodate @range.
  181. */
  182. int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
  183. {
  184. u64 m_start, m_end;
  185. u64 start, end;
  186. int count = 0;
  187. start = md->phys_addr;
  188. end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
  189. /* modifying range */
  190. m_start = range->start;
  191. m_end = range->end;
  192. if (m_start <= start) {
  193. /* split into 2 parts */
  194. if (start < m_end && m_end < end)
  195. count++;
  196. }
  197. if (start < m_start && m_start < end) {
  198. /* split into 3 parts */
  199. if (m_end < end)
  200. count += 2;
  201. /* split into 2 parts */
  202. if (end <= m_end)
  203. count++;
  204. }
  205. return count;
  206. }
  207. /**
  208. * efi_memmap_insert - Insert a memory region in an EFI memmap
  209. * @old_memmap: The existing EFI memory map structure
  210. * @buf: Address of buffer to store new map
  211. * @mem: Memory map entry to insert
  212. *
  213. * It is suggested that you call efi_memmap_split_count() first
  214. * to see how large @buf needs to be.
  215. */
  216. void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
  217. struct efi_mem_range *mem)
  218. {
  219. u64 m_start, m_end, m_attr;
  220. efi_memory_desc_t *md;
  221. u64 start, end;
  222. void *old, *new;
  223. /* modifying range */
  224. m_start = mem->range.start;
  225. m_end = mem->range.end;
  226. m_attr = mem->attribute;
  227. /*
  228. * The EFI memory map deals with regions in EFI_PAGE_SIZE
  229. * units. Ensure that the region described by 'mem' is aligned
  230. * correctly.
  231. */
  232. if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
  233. !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
  234. WARN_ON(1);
  235. return;
  236. }
  237. for (old = old_memmap->map, new = buf;
  238. old < old_memmap->map_end;
  239. old += old_memmap->desc_size, new += old_memmap->desc_size) {
  240. /* copy original EFI memory descriptor */
  241. memcpy(new, old, old_memmap->desc_size);
  242. md = new;
  243. start = md->phys_addr;
  244. end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
  245. if (m_start <= start && end <= m_end)
  246. md->attribute |= m_attr;
  247. if (m_start <= start &&
  248. (start < m_end && m_end < end)) {
  249. /* first part */
  250. md->attribute |= m_attr;
  251. md->num_pages = (m_end - md->phys_addr + 1) >>
  252. EFI_PAGE_SHIFT;
  253. /* latter part */
  254. new += old_memmap->desc_size;
  255. memcpy(new, old, old_memmap->desc_size);
  256. md = new;
  257. md->phys_addr = m_end + 1;
  258. md->num_pages = (end - md->phys_addr + 1) >>
  259. EFI_PAGE_SHIFT;
  260. }
  261. if ((start < m_start && m_start < end) && m_end < end) {
  262. /* first part */
  263. md->num_pages = (m_start - md->phys_addr) >>
  264. EFI_PAGE_SHIFT;
  265. /* middle part */
  266. new += old_memmap->desc_size;
  267. memcpy(new, old, old_memmap->desc_size);
  268. md = new;
  269. md->attribute |= m_attr;
  270. md->phys_addr = m_start;
  271. md->num_pages = (m_end - m_start + 1) >>
  272. EFI_PAGE_SHIFT;
  273. /* last part */
  274. new += old_memmap->desc_size;
  275. memcpy(new, old, old_memmap->desc_size);
  276. md = new;
  277. md->phys_addr = m_end + 1;
  278. md->num_pages = (end - m_end) >>
  279. EFI_PAGE_SHIFT;
  280. }
  281. if ((start < m_start && m_start < end) &&
  282. (end <= m_end)) {
  283. /* first part */
  284. md->num_pages = (m_start - md->phys_addr) >>
  285. EFI_PAGE_SHIFT;
  286. /* latter part */
  287. new += old_memmap->desc_size;
  288. memcpy(new, old, old_memmap->desc_size);
  289. md = new;
  290. md->phys_addr = m_start;
  291. md->num_pages = (end - md->phys_addr + 1) >>
  292. EFI_PAGE_SHIFT;
  293. md->attribute |= m_attr;
  294. }
  295. }
  296. }