internal.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. /* internal.h: mm/ internal definitions
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __MM_INTERNAL_H
  12. #define __MM_INTERNAL_H
  13. #include <linux/fs.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/tracepoint-defs.h>
  17. /*
  18. * The set of flags that only affect watermark checking and reclaim
  19. * behaviour. This is used by the MM to obey the caller constraints
  20. * about IO, FS and watermark checking while ignoring placement
  21. * hints such as HIGHMEM usage.
  22. */
  23. #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
  24. __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
  25. __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
  26. __GFP_ATOMIC)
  27. /* The GFP flags allowed during early boot */
  28. #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
  29. /* Control allocation cpuset and node placement constraints */
  30. #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
  31. /* Do not use these with a slab allocator */
  32. #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
  33. void page_writeback_init(void);
  34. vm_fault_t do_swap_page(struct vm_fault *vmf);
  35. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  36. unsigned long floor, unsigned long ceiling);
  37. static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
  38. {
  39. return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
  40. }
  41. void unmap_page_range(struct mmu_gather *tlb,
  42. struct vm_area_struct *vma,
  43. unsigned long addr, unsigned long end,
  44. struct zap_details *details);
  45. extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
  46. struct file *filp, pgoff_t offset, unsigned long nr_to_read,
  47. unsigned long lookahead_size);
  48. /*
  49. * Submit IO for the read-ahead request in file_ra_state.
  50. */
  51. static inline unsigned long ra_submit(struct file_ra_state *ra,
  52. struct address_space *mapping, struct file *filp)
  53. {
  54. return __do_page_cache_readahead(mapping, filp,
  55. ra->start, ra->size, ra->async_size);
  56. }
  57. /*
  58. * Turn a non-refcounted page (->_refcount == 0) into refcounted with
  59. * a count of one.
  60. */
  61. static inline void set_page_refcounted(struct page *page)
  62. {
  63. VM_BUG_ON_PAGE(PageTail(page), page);
  64. VM_BUG_ON_PAGE(page_ref_count(page), page);
  65. set_page_count(page, 1);
  66. }
  67. extern unsigned long highest_memmap_pfn;
  68. /*
  69. * Maximum number of reclaim retries without progress before the OOM
  70. * killer is consider the only way forward.
  71. */
  72. #define MAX_RECLAIM_RETRIES 16
  73. /*
  74. * in mm/vmscan.c:
  75. */
  76. extern int isolate_lru_page(struct page *page);
  77. extern void putback_lru_page(struct page *page);
  78. /*
  79. * in mm/rmap.c:
  80. */
  81. extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
  82. /*
  83. * in mm/page_alloc.c
  84. */
  85. /*
  86. * Structure for holding the mostly immutable allocation parameters passed
  87. * between functions involved in allocations, including the alloc_pages*
  88. * family of functions.
  89. *
  90. * nodemask, migratetype and high_zoneidx are initialized only once in
  91. * __alloc_pages_nodemask() and then never change.
  92. *
  93. * zonelist, preferred_zone and classzone_idx are set first in
  94. * __alloc_pages_nodemask() for the fast path, and might be later changed
  95. * in __alloc_pages_slowpath(). All other functions pass the whole strucure
  96. * by a const pointer.
  97. */
  98. struct alloc_context {
  99. struct zonelist *zonelist;
  100. nodemask_t *nodemask;
  101. struct zoneref *preferred_zoneref;
  102. int migratetype;
  103. enum zone_type high_zoneidx;
  104. bool spread_dirty_pages;
  105. };
  106. #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
  107. /*
  108. * Locate the struct page for both the matching buddy in our
  109. * pair (buddy1) and the combined O(n+1) page they form (page).
  110. *
  111. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  112. * the following equation:
  113. * B2 = B1 ^ (1 << O)
  114. * For example, if the starting buddy (buddy2) is #8 its order
  115. * 1 buddy is #10:
  116. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  117. *
  118. * 2) Any buddy B will have an order O+1 parent P which
  119. * satisfies the following equation:
  120. * P = B & ~(1 << O)
  121. *
  122. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  123. */
  124. static inline unsigned long
  125. __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
  126. {
  127. return page_pfn ^ (1 << order);
  128. }
  129. extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  130. unsigned long end_pfn, struct zone *zone);
  131. static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
  132. unsigned long end_pfn, struct zone *zone)
  133. {
  134. if (zone->contiguous)
  135. return pfn_to_page(start_pfn);
  136. return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
  137. }
  138. extern int __isolate_free_page(struct page *page, unsigned int order);
  139. extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
  140. unsigned int order);
  141. extern void prep_compound_page(struct page *page, unsigned int order);
  142. extern void post_alloc_hook(struct page *page, unsigned int order,
  143. gfp_t gfp_flags);
  144. extern int user_min_free_kbytes;
  145. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  146. /*
  147. * in mm/compaction.c
  148. */
  149. /*
  150. * compact_control is used to track pages being migrated and the free pages
  151. * they are being migrated to during memory compaction. The free_pfn starts
  152. * at the end of a zone and migrate_pfn begins at the start. Movable pages
  153. * are moved to the end of a zone during a compaction run and the run
  154. * completes when free_pfn <= migrate_pfn
  155. */
  156. struct compact_control {
  157. struct list_head freepages; /* List of free pages to migrate to */
  158. struct list_head migratepages; /* List of pages being migrated */
  159. struct zone *zone;
  160. unsigned long nr_freepages; /* Number of isolated free pages */
  161. unsigned long nr_migratepages; /* Number of pages to migrate */
  162. unsigned long total_migrate_scanned;
  163. unsigned long total_free_scanned;
  164. unsigned long free_pfn; /* isolate_freepages search base */
  165. unsigned long migrate_pfn; /* isolate_migratepages search base */
  166. unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
  167. const gfp_t gfp_mask; /* gfp mask of a direct compactor */
  168. int order; /* order a direct compactor needs */
  169. int migratetype; /* migratetype of direct compactor */
  170. const unsigned int alloc_flags; /* alloc flags of a direct compactor */
  171. const int classzone_idx; /* zone index of a direct compactor */
  172. enum migrate_mode mode; /* Async or sync migration mode */
  173. bool ignore_skip_hint; /* Scan blocks even if marked skip */
  174. bool no_set_skip_hint; /* Don't mark blocks for skipping */
  175. bool ignore_block_suitable; /* Scan blocks considered unsuitable */
  176. bool direct_compaction; /* False from kcompactd or /proc/... */
  177. bool whole_zone; /* Whole zone should/has been scanned */
  178. bool contended; /* Signal lock or sched contention */
  179. bool finishing_block; /* Finishing current pageblock */
  180. };
  181. unsigned long
  182. isolate_freepages_range(struct compact_control *cc,
  183. unsigned long start_pfn, unsigned long end_pfn);
  184. unsigned long
  185. isolate_migratepages_range(struct compact_control *cc,
  186. unsigned long low_pfn, unsigned long end_pfn);
  187. int find_suitable_fallback(struct free_area *area, unsigned int order,
  188. int migratetype, bool only_stealable, bool *can_steal);
  189. #endif
  190. /*
  191. * This function returns the order of a free page in the buddy system. In
  192. * general, page_zone(page)->lock must be held by the caller to prevent the
  193. * page from being allocated in parallel and returning garbage as the order.
  194. * If a caller does not hold page_zone(page)->lock, it must guarantee that the
  195. * page cannot be allocated or merged in parallel. Alternatively, it must
  196. * handle invalid values gracefully, and use page_order_unsafe() below.
  197. */
  198. static inline unsigned int page_order(struct page *page)
  199. {
  200. /* PageBuddy() must be checked by the caller */
  201. return page_private(page);
  202. }
  203. /*
  204. * Like page_order(), but for callers who cannot afford to hold the zone lock.
  205. * PageBuddy() should be checked first by the caller to minimize race window,
  206. * and invalid values must be handled gracefully.
  207. *
  208. * READ_ONCE is used so that if the caller assigns the result into a local
  209. * variable and e.g. tests it for valid range before using, the compiler cannot
  210. * decide to remove the variable and inline the page_private(page) multiple
  211. * times, potentially observing different values in the tests and the actual
  212. * use of the result.
  213. */
  214. #define page_order_unsafe(page) READ_ONCE(page_private(page))
  215. static inline bool is_cow_mapping(vm_flags_t flags)
  216. {
  217. return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  218. }
  219. /*
  220. * These three helpers classifies VMAs for virtual memory accounting.
  221. */
  222. /*
  223. * Executable code area - executable, not writable, not stack
  224. */
  225. static inline bool is_exec_mapping(vm_flags_t flags)
  226. {
  227. return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
  228. }
  229. /*
  230. * Stack area - atomatically grows in one direction
  231. *
  232. * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
  233. * do_mmap() forbids all other combinations.
  234. */
  235. static inline bool is_stack_mapping(vm_flags_t flags)
  236. {
  237. return (flags & VM_STACK) == VM_STACK;
  238. }
  239. /*
  240. * Data area - private, writable, not stack
  241. */
  242. static inline bool is_data_mapping(vm_flags_t flags)
  243. {
  244. return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
  245. }
  246. /* mm/util.c */
  247. void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  248. struct vm_area_struct *prev, struct rb_node *rb_parent);
  249. #ifdef CONFIG_MMU
  250. extern long populate_vma_page_range(struct vm_area_struct *vma,
  251. unsigned long start, unsigned long end, int *nonblocking);
  252. extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  253. unsigned long start, unsigned long end);
  254. static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  255. {
  256. munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  257. }
  258. /*
  259. * must be called with vma's mmap_sem held for read or write, and page locked.
  260. */
  261. extern void mlock_vma_page(struct page *page);
  262. extern unsigned int munlock_vma_page(struct page *page);
  263. /*
  264. * Clear the page's PageMlocked(). This can be useful in a situation where
  265. * we want to unconditionally remove a page from the pagecache -- e.g.,
  266. * on truncation or freeing.
  267. *
  268. * It is legal to call this function for any page, mlocked or not.
  269. * If called for a page that is still mapped by mlocked vmas, all we do
  270. * is revert to lazy LRU behaviour -- semantics are not broken.
  271. */
  272. extern void clear_page_mlock(struct page *page);
  273. /*
  274. * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
  275. * (because that does not go through the full procedure of migration ptes):
  276. * to migrate the Mlocked page flag; update statistics.
  277. */
  278. static inline void mlock_migrate_page(struct page *newpage, struct page *page)
  279. {
  280. if (TestClearPageMlocked(page)) {
  281. int nr_pages = hpage_nr_pages(page);
  282. /* Holding pmd lock, no change in irq context: __mod is safe */
  283. __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
  284. SetPageMlocked(newpage);
  285. __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
  286. }
  287. }
  288. extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
  289. /*
  290. * At what user virtual address is page expected in @vma?
  291. */
  292. static inline unsigned long
  293. __vma_address(struct page *page, struct vm_area_struct *vma)
  294. {
  295. pgoff_t pgoff = page_to_pgoff(page);
  296. return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  297. }
  298. static inline unsigned long
  299. vma_address(struct page *page, struct vm_area_struct *vma)
  300. {
  301. unsigned long start, end;
  302. start = __vma_address(page, vma);
  303. end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
  304. /* page should be within @vma mapping range */
  305. VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
  306. return max(start, vma->vm_start);
  307. }
  308. #else /* !CONFIG_MMU */
  309. static inline void clear_page_mlock(struct page *page) { }
  310. static inline void mlock_vma_page(struct page *page) { }
  311. static inline void mlock_migrate_page(struct page *new, struct page *old) { }
  312. #endif /* !CONFIG_MMU */
  313. /*
  314. * Return the mem_map entry representing the 'offset' subpage within
  315. * the maximally aligned gigantic page 'base'. Handle any discontiguity
  316. * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
  317. */
  318. static inline struct page *mem_map_offset(struct page *base, int offset)
  319. {
  320. if (unlikely(offset >= MAX_ORDER_NR_PAGES))
  321. return nth_page(base, offset);
  322. return base + offset;
  323. }
  324. /*
  325. * Iterator over all subpages within the maximally aligned gigantic
  326. * page 'base'. Handle any discontiguity in the mem_map.
  327. */
  328. static inline struct page *mem_map_next(struct page *iter,
  329. struct page *base, int offset)
  330. {
  331. if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
  332. unsigned long pfn = page_to_pfn(base) + offset;
  333. if (!pfn_valid(pfn))
  334. return NULL;
  335. return pfn_to_page(pfn);
  336. }
  337. return iter + 1;
  338. }
  339. /* Memory initialisation debug and verification */
  340. enum mminit_level {
  341. MMINIT_WARNING,
  342. MMINIT_VERIFY,
  343. MMINIT_TRACE
  344. };
  345. #ifdef CONFIG_DEBUG_MEMORY_INIT
  346. extern int mminit_loglevel;
  347. #define mminit_dprintk(level, prefix, fmt, arg...) \
  348. do { \
  349. if (level < mminit_loglevel) { \
  350. if (level <= MMINIT_WARNING) \
  351. pr_warn("mminit::" prefix " " fmt, ##arg); \
  352. else \
  353. printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
  354. } \
  355. } while (0)
  356. extern void mminit_verify_pageflags_layout(void);
  357. extern void mminit_verify_zonelist(void);
  358. #else
  359. static inline void mminit_dprintk(enum mminit_level level,
  360. const char *prefix, const char *fmt, ...)
  361. {
  362. }
  363. static inline void mminit_verify_pageflags_layout(void)
  364. {
  365. }
  366. static inline void mminit_verify_zonelist(void)
  367. {
  368. }
  369. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  370. /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  371. #if defined(CONFIG_SPARSEMEM)
  372. extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  373. unsigned long *end_pfn);
  374. #else
  375. static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  376. unsigned long *end_pfn)
  377. {
  378. }
  379. #endif /* CONFIG_SPARSEMEM */
  380. #define NODE_RECLAIM_NOSCAN -2
  381. #define NODE_RECLAIM_FULL -1
  382. #define NODE_RECLAIM_SOME 0
  383. #define NODE_RECLAIM_SUCCESS 1
  384. #ifdef CONFIG_NUMA
  385. extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
  386. #else
  387. static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
  388. unsigned int order)
  389. {
  390. return NODE_RECLAIM_NOSCAN;
  391. }
  392. #endif
  393. extern int hwpoison_filter(struct page *p);
  394. extern u32 hwpoison_filter_dev_major;
  395. extern u32 hwpoison_filter_dev_minor;
  396. extern u64 hwpoison_filter_flags_mask;
  397. extern u64 hwpoison_filter_flags_value;
  398. extern u64 hwpoison_filter_memcg;
  399. extern u32 hwpoison_filter_enable;
  400. extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
  401. unsigned long, unsigned long,
  402. unsigned long, unsigned long);
  403. extern void set_pageblock_order(void);
  404. unsigned long reclaim_clean_pages_from_list(struct zone *zone,
  405. struct list_head *page_list);
  406. /* The ALLOC_WMARK bits are used as an index to zone->watermark */
  407. #define ALLOC_WMARK_MIN WMARK_MIN
  408. #define ALLOC_WMARK_LOW WMARK_LOW
  409. #define ALLOC_WMARK_HIGH WMARK_HIGH
  410. #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
  411. /* Mask to get the watermark bits */
  412. #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
  413. /*
  414. * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
  415. * cannot assume a reduced access to memory reserves is sufficient for
  416. * !MMU
  417. */
  418. #ifdef CONFIG_MMU
  419. #define ALLOC_OOM 0x08
  420. #else
  421. #define ALLOC_OOM ALLOC_NO_WATERMARKS
  422. #endif
  423. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  424. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  425. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  426. #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
  427. enum ttu_flags;
  428. struct tlbflush_unmap_batch;
  429. /*
  430. * only for MM internal work items which do not depend on
  431. * any allocations or locks which might depend on allocations
  432. */
  433. extern struct workqueue_struct *mm_percpu_wq;
  434. #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
  435. void try_to_unmap_flush(void);
  436. void try_to_unmap_flush_dirty(void);
  437. void flush_tlb_batched_pending(struct mm_struct *mm);
  438. #else
  439. static inline void try_to_unmap_flush(void)
  440. {
  441. }
  442. static inline void try_to_unmap_flush_dirty(void)
  443. {
  444. }
  445. static inline void flush_tlb_batched_pending(struct mm_struct *mm)
  446. {
  447. }
  448. #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
  449. extern const struct trace_print_flags pageflag_names[];
  450. extern const struct trace_print_flags vmaflag_names[];
  451. extern const struct trace_print_flags gfpflag_names[];
  452. static inline bool is_migrate_highatomic(enum migratetype migratetype)
  453. {
  454. return migratetype == MIGRATE_HIGHATOMIC;
  455. }
  456. static inline bool is_migrate_highatomic_page(struct page *page)
  457. {
  458. return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
  459. }
  460. void setup_zone_pageset(struct zone *zone);
  461. extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
  462. #endif /* __MM_INTERNAL_H */