123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Copyright IBM Corp. 2006
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
- #include <linux/bootmem.h>
- #include <linux/pfn.h>
- #include <linux/mm.h>
- #include <linux/init.h>
- #include <linux/list.h>
- #include <linux/hugetlb.h>
- #include <linux/slab.h>
- #include <linux/memblock.h>
- #include <asm/cacheflush.h>
- #include <asm/pgalloc.h>
- #include <asm/pgtable.h>
- #include <asm/setup.h>
- #include <asm/tlbflush.h>
- #include <asm/sections.h>
- #include <asm/set_memory.h>
- static DEFINE_MUTEX(vmem_mutex);
- struct memory_segment {
- struct list_head list;
- unsigned long start;
- unsigned long size;
- };
- static LIST_HEAD(mem_segs);
- static void __ref *vmem_alloc_pages(unsigned int order)
- {
- unsigned long size = PAGE_SIZE << order;
- if (slab_is_available())
- return (void *)__get_free_pages(GFP_KERNEL, order);
- return (void *) memblock_alloc(size, size);
- }
- void *vmem_crst_alloc(unsigned long val)
- {
- unsigned long *table;
- table = vmem_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
- return table;
- }
- pte_t __ref *vmem_pte_alloc(void)
- {
- unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
- pte_t *pte;
- if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
- else
- pte = (pte_t *) memblock_alloc(size, size);
- if (!pte)
- return NULL;
- memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
- return pte;
- }
- /*
- * Add a physical memory range to the 1:1 mapping.
- */
- static int vmem_add_mem(unsigned long start, unsigned long size)
- {
- unsigned long pgt_prot, sgt_prot, r3_prot;
- unsigned long pages4k, pages1m, pages2g;
- unsigned long end = start + size;
- unsigned long address = start;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- int ret = -ENOMEM;
- pgt_prot = pgprot_val(PAGE_KERNEL);
- sgt_prot = pgprot_val(SEGMENT_KERNEL);
- r3_prot = pgprot_val(REGION3_KERNEL);
- if (!MACHINE_HAS_NX) {
- pgt_prot &= ~_PAGE_NOEXEC;
- sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
- r3_prot &= ~_REGION_ENTRY_NOEXEC;
- }
- pages4k = pages1m = pages2g = 0;
- while (address < end) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
- if (!p4_dir)
- goto out;
- pgd_populate(&init_mm, pg_dir, p4_dir);
- }
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
- if (!pu_dir)
- goto out;
- p4d_populate(&init_mm, p4_dir, pu_dir);
- }
- pu_dir = pud_offset(p4_dir, address);
- if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
- !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
- !debug_pagealloc_enabled()) {
- pud_val(*pu_dir) = address | r3_prot;
- address += PUD_SIZE;
- pages2g++;
- continue;
- }
- if (pud_none(*pu_dir)) {
- pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
- if (!pm_dir)
- goto out;
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
- pm_dir = pmd_offset(pu_dir, address);
- if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
- !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
- !debug_pagealloc_enabled()) {
- pmd_val(*pm_dir) = address | sgt_prot;
- address += PMD_SIZE;
- pages1m++;
- continue;
- }
- if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
- if (!pt_dir)
- goto out;
- pmd_populate(&init_mm, pm_dir, pt_dir);
- }
- pt_dir = pte_offset_kernel(pm_dir, address);
- pte_val(*pt_dir) = address | pgt_prot;
- address += PAGE_SIZE;
- pages4k++;
- }
- ret = 0;
- out:
- update_page_count(PG_DIRECT_MAP_4K, pages4k);
- update_page_count(PG_DIRECT_MAP_1M, pages1m);
- update_page_count(PG_DIRECT_MAP_2G, pages2g);
- return ret;
- }
- /*
- * Remove a physical memory range from the 1:1 mapping.
- * Currently only invalidates page table entries.
- */
- static void vmem_remove_range(unsigned long start, unsigned long size)
- {
- unsigned long pages4k, pages1m, pages2g;
- unsigned long end = start + size;
- unsigned long address = start;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pages4k = pages1m = pages2g = 0;
- while (address < end) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- address += PGDIR_SIZE;
- continue;
- }
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- address += P4D_SIZE;
- continue;
- }
- pu_dir = pud_offset(p4_dir, address);
- if (pud_none(*pu_dir)) {
- address += PUD_SIZE;
- continue;
- }
- if (pud_large(*pu_dir)) {
- pud_clear(pu_dir);
- address += PUD_SIZE;
- pages2g++;
- continue;
- }
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- address += PMD_SIZE;
- continue;
- }
- if (pmd_large(*pm_dir)) {
- pmd_clear(pm_dir);
- address += PMD_SIZE;
- pages1m++;
- continue;
- }
- pt_dir = pte_offset_kernel(pm_dir, address);
- pte_clear(&init_mm, address, pt_dir);
- address += PAGE_SIZE;
- pages4k++;
- }
- flush_tlb_kernel_range(start, end);
- update_page_count(PG_DIRECT_MAP_4K, -pages4k);
- update_page_count(PG_DIRECT_MAP_1M, -pages1m);
- update_page_count(PG_DIRECT_MAP_2G, -pages2g);
- }
- /*
- * Add a backed mem_map array to the virtual mem_map array.
- */
- int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
- struct vmem_altmap *altmap)
- {
- unsigned long pgt_prot, sgt_prot;
- unsigned long address = start;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- int ret = -ENOMEM;
- pgt_prot = pgprot_val(PAGE_KERNEL);
- sgt_prot = pgprot_val(SEGMENT_KERNEL);
- if (!MACHINE_HAS_NX) {
- pgt_prot &= ~_PAGE_NOEXEC;
- sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
- }
- for (address = start; address < end;) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
- if (!p4_dir)
- goto out;
- pgd_populate(&init_mm, pg_dir, p4_dir);
- }
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
- if (!pu_dir)
- goto out;
- p4d_populate(&init_mm, p4_dir, pu_dir);
- }
- pu_dir = pud_offset(p4_dir, address);
- if (pud_none(*pu_dir)) {
- pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
- if (!pm_dir)
- goto out;
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- /* Use 1MB frames for vmemmap if available. We always
- * use large frames even if they are only partially
- * used.
- * Otherwise we would have also page tables since
- * vmemmap_populate gets called for each section
- * separately. */
- if (MACHINE_HAS_EDAT1) {
- void *new_page;
- new_page = vmemmap_alloc_block(PMD_SIZE, node);
- if (!new_page)
- goto out;
- pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- pt_dir = vmem_pte_alloc();
- if (!pt_dir)
- goto out;
- pmd_populate(&init_mm, pm_dir, pt_dir);
- } else if (pmd_large(*pm_dir)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- pt_dir = pte_offset_kernel(pm_dir, address);
- if (pte_none(*pt_dir)) {
- void *new_page;
- new_page = vmemmap_alloc_block(PAGE_SIZE, node);
- if (!new_page)
- goto out;
- pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
- }
- address += PAGE_SIZE;
- }
- ret = 0;
- out:
- return ret;
- }
- void vmemmap_free(unsigned long start, unsigned long end,
- struct vmem_altmap *altmap)
- {
- }
- /*
- * Add memory segment to the segment list if it doesn't overlap with
- * an already present segment.
- */
- static int insert_memory_segment(struct memory_segment *seg)
- {
- struct memory_segment *tmp;
- if (seg->start + seg->size > VMEM_MAX_PHYS ||
- seg->start + seg->size < seg->start)
- return -ERANGE;
- list_for_each_entry(tmp, &mem_segs, list) {
- if (seg->start >= tmp->start + tmp->size)
- continue;
- if (seg->start + seg->size <= tmp->start)
- continue;
- return -ENOSPC;
- }
- list_add(&seg->list, &mem_segs);
- return 0;
- }
- /*
- * Remove memory segment from the segment list.
- */
- static void remove_memory_segment(struct memory_segment *seg)
- {
- list_del(&seg->list);
- }
- static void __remove_shared_memory(struct memory_segment *seg)
- {
- remove_memory_segment(seg);
- vmem_remove_range(seg->start, seg->size);
- }
- int vmem_remove_mapping(unsigned long start, unsigned long size)
- {
- struct memory_segment *seg;
- int ret;
- mutex_lock(&vmem_mutex);
- ret = -ENOENT;
- list_for_each_entry(seg, &mem_segs, list) {
- if (seg->start == start && seg->size == size)
- break;
- }
- if (seg->start != start || seg->size != size)
- goto out;
- ret = 0;
- __remove_shared_memory(seg);
- kfree(seg);
- out:
- mutex_unlock(&vmem_mutex);
- return ret;
- }
- int vmem_add_mapping(unsigned long start, unsigned long size)
- {
- struct memory_segment *seg;
- int ret;
- mutex_lock(&vmem_mutex);
- ret = -ENOMEM;
- seg = kzalloc(sizeof(*seg), GFP_KERNEL);
- if (!seg)
- goto out;
- seg->start = start;
- seg->size = size;
- ret = insert_memory_segment(seg);
- if (ret)
- goto out_free;
- ret = vmem_add_mem(start, size);
- if (ret)
- goto out_remove;
- goto out;
- out_remove:
- __remove_shared_memory(seg);
- out_free:
- kfree(seg);
- out:
- mutex_unlock(&vmem_mutex);
- return ret;
- }
- /*
- * map whole physical memory to virtual memory (identity mapping)
- * we reserve enough space in the vmalloc area for vmemmap to hotplug
- * additional memory segments.
- */
- void __init vmem_map_init(void)
- {
- struct memblock_region *reg;
- for_each_memblock(memory, reg)
- vmem_add_mem(reg->base, reg->size);
- __set_memory((unsigned long)_stext,
- (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long)_etext,
- (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
- SET_MEMORY_RO);
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- pr_info("Write protected kernel read-only data: %luk\n",
- (unsigned long)(__end_rodata - _stext) >> 10);
- }
- /*
- * Convert memblock.memory to a memory segment list so there is a single
- * list that contains all memory segments.
- */
- static int __init vmem_convert_memory_chunk(void)
- {
- struct memblock_region *reg;
- struct memory_segment *seg;
- mutex_lock(&vmem_mutex);
- for_each_memblock(memory, reg) {
- seg = kzalloc(sizeof(*seg), GFP_KERNEL);
- if (!seg)
- panic("Out of memory...\n");
- seg->start = reg->base;
- seg->size = reg->size;
- insert_memory_segment(seg);
- }
- mutex_unlock(&vmem_mutex);
- return 0;
- }
- core_initcall(vmem_convert_memory_chunk);
|