123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810 |
- /*
- * linux/arch/arm/mm/init.c
- *
- * Copyright (C) 1995-2005 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/swap.h>
- #include <linux/init.h>
- #include <linux/bootmem.h>
- #include <linux/mman.h>
- #include <linux/sched/signal.h>
- #include <linux/sched/task.h>
- #include <linux/export.h>
- #include <linux/nodemask.h>
- #include <linux/initrd.h>
- #include <linux/of_fdt.h>
- #include <linux/highmem.h>
- #include <linux/gfp.h>
- #include <linux/memblock.h>
- #include <linux/dma-contiguous.h>
- #include <linux/sizes.h>
- #include <linux/stop_machine.h>
- #include <asm/cp15.h>
- #include <asm/mach-types.h>
- #include <asm/memblock.h>
- #include <asm/memory.h>
- #include <asm/prom.h>
- #include <asm/sections.h>
- #include <asm/setup.h>
- #include <asm/system_info.h>
- #include <asm/tlb.h>
- #include <asm/fixmap.h>
- #include <asm/ptdump.h>
- #include <asm/mach/arch.h>
- #include <asm/mach/map.h>
- #include "mm.h"
- #ifdef CONFIG_CPU_CP15_MMU
- unsigned long __init __clear_cr(unsigned long mask)
- {
- cr_alignment = cr_alignment & ~mask;
- return cr_alignment;
- }
- #endif
- static phys_addr_t phys_initrd_start __initdata = 0;
- static unsigned long phys_initrd_size __initdata = 0;
- static int __init early_initrd(char *p)
- {
- phys_addr_t start;
- unsigned long size;
- char *endp;
- start = memparse(p, &endp);
- if (*endp == ',') {
- size = memparse(endp + 1, NULL);
- phys_initrd_start = start;
- phys_initrd_size = size;
- }
- return 0;
- }
- early_param("initrd", early_initrd);
- static int __init parse_tag_initrd(const struct tag *tag)
- {
- pr_warn("ATAG_INITRD is deprecated; "
- "please update your bootloader.\n");
- phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
- phys_initrd_size = tag->u.initrd.size;
- return 0;
- }
- __tagtable(ATAG_INITRD, parse_tag_initrd);
- static int __init parse_tag_initrd2(const struct tag *tag)
- {
- phys_initrd_start = tag->u.initrd.start;
- phys_initrd_size = tag->u.initrd.size;
- return 0;
- }
- __tagtable(ATAG_INITRD2, parse_tag_initrd2);
- static void __init find_limits(unsigned long *min, unsigned long *max_low,
- unsigned long *max_high)
- {
- *max_low = PFN_DOWN(memblock_get_current_limit());
- *min = PFN_UP(memblock_start_of_DRAM());
- *max_high = PFN_DOWN(memblock_end_of_DRAM());
- }
- #ifdef CONFIG_ZONE_DMA
- phys_addr_t arm_dma_zone_size __read_mostly;
- EXPORT_SYMBOL(arm_dma_zone_size);
- /*
- * The DMA mask corresponding to the maximum bus address allocatable
- * using GFP_DMA. The default here places no restriction on DMA
- * allocations. This must be the smallest DMA mask in the system,
- * so a successful GFP_DMA allocation will always satisfy this.
- */
- phys_addr_t arm_dma_limit;
- unsigned long arm_dma_pfn_limit;
- static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
- unsigned long dma_size)
- {
- if (size[0] <= dma_size)
- return;
- size[ZONE_NORMAL] = size[0] - dma_size;
- size[ZONE_DMA] = dma_size;
- hole[ZONE_NORMAL] = hole[0];
- hole[ZONE_DMA] = 0;
- }
- #endif
- void __init setup_dma_zone(const struct machine_desc *mdesc)
- {
- #ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- arm_dma_zone_size = mdesc->dma_zone_size;
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
- } else
- arm_dma_limit = 0xffffffff;
- arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
- #endif
- }
- static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
- unsigned long max_high)
- {
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- struct memblock_region *reg;
- /*
- * initialise the zones.
- */
- memset(zone_size, 0, sizeof(zone_size));
- /*
- * The memory size has already been determined. If we need
- * to do anything fancy with the allocation of this memory
- * to the zones, now is the time to do it.
- */
- zone_size[0] = max_low - min;
- #ifdef CONFIG_HIGHMEM
- zone_size[ZONE_HIGHMEM] = max_high - max_low;
- #endif
- /*
- * Calculate the size of the holes.
- * holes = node_size - sum(bank_sizes)
- */
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_memblock(memory, reg) {
- unsigned long start = memblock_region_memory_base_pfn(reg);
- unsigned long end = memblock_region_memory_end_pfn(reg);
- if (start < max_low) {
- unsigned long low_end = min(end, max_low);
- zhole_size[0] -= low_end - start;
- }
- #ifdef CONFIG_HIGHMEM
- if (end > max_low) {
- unsigned long high_start = max(start, max_low);
- zhole_size[ZONE_HIGHMEM] -= end - high_start;
- }
- #endif
- }
- #ifdef CONFIG_ZONE_DMA
- /*
- * Adjust the sizes according to any special requirements for
- * this machine type.
- */
- if (arm_dma_zone_size)
- arm_adjust_dma_zone(zone_size, zhole_size,
- arm_dma_zone_size >> PAGE_SHIFT);
- #endif
- free_area_init_node(0, zone_size, min, zhole_size);
- }
- #ifdef CONFIG_HAVE_ARCH_PFN_VALID
- int pfn_valid(unsigned long pfn)
- {
- phys_addr_t addr = __pfn_to_phys(pfn);
- if (__phys_to_pfn(addr) != pfn)
- return 0;
- return memblock_is_map_memory(__pfn_to_phys(pfn));
- }
- EXPORT_SYMBOL(pfn_valid);
- #endif
- #ifndef CONFIG_SPARSEMEM
- static void __init arm_memory_present(void)
- {
- }
- #else
- static void __init arm_memory_present(void)
- {
- struct memblock_region *reg;
- for_each_memblock(memory, reg)
- memory_present(0, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
- }
- #endif
- static bool arm_memblock_steal_permitted = true;
- phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
- {
- phys_addr_t phys;
- BUG_ON(!arm_memblock_steal_permitted);
- phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
- memblock_free(phys, size);
- memblock_remove(phys, size);
- return phys;
- }
- static void __init arm_initrd_init(void)
- {
- #ifdef CONFIG_BLK_DEV_INITRD
- phys_addr_t start;
- unsigned long size;
- /* FDT scan will populate initrd_start */
- if (initrd_start && !phys_initrd_size) {
- phys_initrd_start = __virt_to_phys(initrd_start);
- phys_initrd_size = initrd_end - initrd_start;
- }
- initrd_start = initrd_end = 0;
- if (!phys_initrd_size)
- return;
- /*
- * Round the memory region to page boundaries as per free_initrd_mem()
- * This allows us to detect whether the pages overlapping the initrd
- * are in use, but more importantly, reserves the entire set of pages
- * as we don't want these pages allocated for other purposes.
- */
- start = round_down(phys_initrd_start, PAGE_SIZE);
- size = phys_initrd_size + (phys_initrd_start - start);
- size = round_up(size, PAGE_SIZE);
- if (!memblock_is_region_memory(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
- (u64)start, size);
- return;
- }
- if (memblock_is_region_reserved(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
- (u64)start, size);
- return;
- }
- memblock_reserve(start, size);
- /* Now convert initrd to virtual addresses */
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- #endif
- }
- void __init arm_memblock_init(const struct machine_desc *mdesc)
- {
- /* Register the kernel text, kernel data and initrd with memblock. */
- memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
- arm_initrd_init();
- arm_mm_memblock_reserve();
- /* reserve any platform specific memblock areas */
- if (mdesc->reserve)
- mdesc->reserve();
- early_init_fdt_reserve_self();
- early_init_fdt_scan_reserved_mem();
- /* reserve memory for DMA contiguous allocations */
- dma_contiguous_reserve(arm_dma_limit);
- arm_memblock_steal_permitted = false;
- memblock_dump_all();
- }
- void __init bootmem_init(void)
- {
- unsigned long min, max_low, max_high;
- memblock_allow_resize();
- max_low = max_high = 0;
- find_limits(&min, &max_low, &max_high);
- early_memtest((phys_addr_t)min << PAGE_SHIFT,
- (phys_addr_t)max_low << PAGE_SHIFT);
- /*
- * Sparsemem tries to allocate bootmem in memory_present(),
- * so must be done after the fixed reservations
- */
- arm_memory_present();
- /*
- * sparse_init() needs the bootmem allocator up and running.
- */
- sparse_init();
- /*
- * Now free the memory - free_area_init_node needs
- * the sparse mem_map arrays initialized by sparse_init()
- * for memmap_init_zone(), otherwise all PFNs are invalid.
- */
- zone_sizes_init(min, max_low, max_high);
- /*
- * This doesn't seem to be used by the Linux memory manager any
- * more, but is used by ll_rw_block. If we can get rid of it, we
- * also get rid of some of the stuff above as well.
- */
- min_low_pfn = min;
- max_low_pfn = max_low;
- max_pfn = max_high;
- }
- /*
- * Poison init memory with an undefined instruction (ARM) or a branch to an
- * undefined instruction (Thumb).
- */
- static inline void poison_init_mem(void *s, size_t count)
- {
- u32 *p = (u32 *)s;
- for (; count != 0; count -= 4)
- *p++ = 0xe7fddef0;
- }
- static inline void __init
- free_memmap(unsigned long start_pfn, unsigned long end_pfn)
- {
- struct page *start_pg, *end_pg;
- phys_addr_t pg, pgend;
- /*
- * Convert start_pfn/end_pfn to a struct page pointer.
- */
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
- /*
- * Convert to physical addresses, and
- * round start upwards and end downwards.
- */
- pg = PAGE_ALIGN(__pa(start_pg));
- pgend = __pa(end_pg) & PAGE_MASK;
- /*
- * If there are free pages between these,
- * free the section of the memmap array.
- */
- if (pg < pgend)
- memblock_free_early(pg, pgend - pg);
- }
- /*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
- static void __init free_unused_memmap(void)
- {
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
- /*
- * This relies on each bank being in address order.
- * The banks are sorted previously in bootmem_init().
- */
- for_each_memblock(memory, reg) {
- start = memblock_region_memory_base_pfn(reg);
- #ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist
- * due to SPARSEMEM sections which aren't present.
- */
- start = min(start,
- ALIGN(prev_end, PAGES_PER_SECTION));
- #else
- /*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
- */
- start = round_down(start, MAX_ORDER_NR_PAGES);
- #endif
- /*
- * If we had a previous bank, and there is a space
- * between the current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
- MAX_ORDER_NR_PAGES);
- }
- #ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end,
- ALIGN(prev_end, PAGES_PER_SECTION));
- #endif
- }
- #ifdef CONFIG_HIGHMEM
- static inline void free_area_high(unsigned long pfn, unsigned long end)
- {
- for (; pfn < end; pfn++)
- free_highmem_page(pfn_to_page(pfn));
- }
- #endif
- static void __init free_highpages(void)
- {
- #ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn;
- struct memblock_region *mem, *res;
- /* set highmem page free */
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
- /* Ignore complete lowmem entries */
- if (end <= max_low)
- continue;
- if (memblock_is_nomap(mem))
- continue;
- /* Truncate partial highmem entries */
- if (start < max_low)
- start = max_low;
- /* Find and exclude any reserved regions */
- for_each_memblock(reserved, res) {
- unsigned long res_start, res_end;
- res_start = memblock_region_reserved_base_pfn(res);
- res_end = memblock_region_reserved_end_pfn(res);
- if (res_end < start)
- continue;
- if (res_start < start)
- res_start = start;
- if (res_start > end)
- res_start = end;
- if (res_end > end)
- res_end = end;
- if (res_start != start)
- free_area_high(start, res_start);
- start = res_end;
- if (start == end)
- break;
- }
- /* And now free anything which remains */
- if (start < end)
- free_area_high(start, end);
- }
- #endif
- }
- /*
- * mem_init() marks the free areas in the mem_map and tells us how much
- * memory is free. This is done after various parts of the system have
- * claimed their memory after the kernel image.
- */
- void __init mem_init(void)
- {
- #ifdef CONFIG_HAVE_TCM
- /* These pointers are filled in on TCM detection */
- extern u32 dtcm_end;
- extern u32 itcm_end;
- #endif
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
- /* this will put all unused low memory onto the freelists */
- free_unused_memmap();
- free_all_bootmem();
- #ifdef CONFIG_SA1111
- /* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
- #endif
- free_highpages();
- mem_init_print_info(NULL);
- #define MLK(b, t) b, t, ((t) - (b)) >> 10
- #define MLM(b, t) b, t, ((t) - (b)) >> 20
- #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
- pr_notice("Virtual kernel memory layout:\n"
- " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
- #ifdef CONFIG_HAVE_TCM
- " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
- #endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
- #ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
- #endif
- #ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
- #endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
- MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
- #ifdef CONFIG_HAVE_TCM
- MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
- MLK(ITCM_OFFSET, (unsigned long) itcm_end),
- #endif
- MLK(FIXADDR_START, FIXADDR_END),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
- #ifdef CONFIG_HIGHMEM
- MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
- (PAGE_SIZE)),
- #endif
- #ifdef CONFIG_MODULES
- MLM(MODULES_VADDR, MODULES_END),
- #endif
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
- #undef MLK
- #undef MLM
- #undef MLK_ROUNDUP
- /*
- * Check boundaries twice: Some fundamental inconsistencies can
- * be detected at build time already.
- */
- #ifdef CONFIG_MMU
- BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
- BUG_ON(TASK_SIZE > MODULES_VADDR);
- #endif
- #ifdef CONFIG_HIGHMEM
- BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
- BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
- #endif
- }
- #ifdef CONFIG_STRICT_KERNEL_RWX
- struct section_perm {
- const char *name;
- unsigned long start;
- unsigned long end;
- pmdval_t mask;
- pmdval_t prot;
- pmdval_t clear;
- };
- /* First section-aligned location at or after __start_rodata. */
- extern char __start_rodata_section_aligned[];
- static struct section_perm nx_perms[] = {
- /* Make pages tables, etc before _stext RW (set NX). */
- {
- .name = "pre-text NX",
- .start = PAGE_OFFSET,
- .end = (unsigned long)_stext,
- .mask = ~PMD_SECT_XN,
- .prot = PMD_SECT_XN,
- },
- /* Make init RW (set NX). */
- {
- .name = "init NX",
- .start = (unsigned long)__init_begin,
- .end = (unsigned long)_sdata,
- .mask = ~PMD_SECT_XN,
- .prot = PMD_SECT_XN,
- },
- /* Make rodata NX (set RO in ro_perms below). */
- {
- .name = "rodata NX",
- .start = (unsigned long)__start_rodata_section_aligned,
- .end = (unsigned long)__init_begin,
- .mask = ~PMD_SECT_XN,
- .prot = PMD_SECT_XN,
- },
- };
- static struct section_perm ro_perms[] = {
- /* Make kernel code and rodata RX (set RO). */
- {
- .name = "text/rodata RO",
- .start = (unsigned long)_stext,
- .end = (unsigned long)__init_begin,
- #ifdef CONFIG_ARM_LPAE
- .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
- .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
- #else
- .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
- .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
- .clear = PMD_SECT_AP_WRITE,
- #endif
- },
- };
- /*
- * Updates section permissions only for the current mm (sections are
- * copied into each mm). During startup, this is the init_mm. Is only
- * safe to be called with preemption disabled, as under stop_machine().
- */
- static inline void section_update(unsigned long addr, pmdval_t mask,
- pmdval_t prot, struct mm_struct *mm)
- {
- pmd_t *pmd;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
- #ifdef CONFIG_ARM_LPAE
- pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
- #else
- if (addr & SECTION_SIZE)
- pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
- else
- pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
- #endif
- flush_pmd_entry(pmd);
- local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
- }
- /* Make sure extended page tables are in use. */
- static inline bool arch_has_strict_perms(void)
- {
- if (cpu_architecture() < CPU_ARCH_ARMv6)
- return false;
- return !!(get_cr() & CR_XP);
- }
- void set_section_perms(struct section_perm *perms, int n, bool set,
- struct mm_struct *mm)
- {
- size_t i;
- unsigned long addr;
- if (!arch_has_strict_perms())
- return;
- for (i = 0; i < n; i++) {
- if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
- !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
- pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
- perms[i].name, perms[i].start, perms[i].end,
- SECTION_SIZE);
- continue;
- }
- for (addr = perms[i].start;
- addr < perms[i].end;
- addr += SECTION_SIZE)
- section_update(addr, perms[i].mask,
- set ? perms[i].prot : perms[i].clear, mm);
- }
- }
- /**
- * update_sections_early intended to be called only through stop_machine
- * framework and executed by only one CPU while all other CPUs will spin and
- * wait, so no locking is required in this function.
- */
- static void update_sections_early(struct section_perm perms[], int n)
- {
- struct task_struct *t, *s;
- for_each_process(t) {
- if (t->flags & PF_KTHREAD)
- continue;
- for_each_thread(t, s)
- if (s->mm)
- set_section_perms(perms, n, true, s->mm);
- }
- set_section_perms(perms, n, true, current->active_mm);
- set_section_perms(perms, n, true, &init_mm);
- }
- static int __fix_kernmem_perms(void *unused)
- {
- update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
- return 0;
- }
- static void fix_kernmem_perms(void)
- {
- stop_machine(__fix_kernmem_perms, NULL, NULL);
- }
- static int __mark_rodata_ro(void *unused)
- {
- update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
- return 0;
- }
- static int kernel_set_to_readonly __read_mostly;
- void mark_rodata_ro(void)
- {
- kernel_set_to_readonly = 1;
- stop_machine(__mark_rodata_ro, NULL, NULL);
- debug_checkwx();
- }
- void set_kernel_text_rw(void)
- {
- if (!kernel_set_to_readonly)
- return;
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
- current->active_mm);
- }
- void set_kernel_text_ro(void)
- {
- if (!kernel_set_to_readonly)
- return;
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
- current->active_mm);
- }
- #else
- static inline void fix_kernmem_perms(void) { }
- #endif /* CONFIG_STRICT_KERNEL_RWX */
- void free_initmem(void)
- {
- fix_kernmem_perms();
- poison_init_mem(__init_begin, __init_end - __init_begin);
- if (!machine_is_integrator() && !machine_is_cintegrator())
- free_initmem_default(-1);
- }
- #ifdef CONFIG_BLK_DEV_INITRD
- static int keep_initrd;
- void free_initrd_mem(unsigned long start, unsigned long end)
- {
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
- poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
- }
- }
- static int __init keepinitrd_setup(char *__unused)
- {
- keep_initrd = 1;
- return 1;
- }
- __setup("keepinitrd", keepinitrd_setup);
- #endif
|