khugepaged.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/mm.h>
  3. #include <linux/sched.h>
  4. #include <linux/mmu_notifier.h>
  5. #include <linux/rmap.h>
  6. #include <linux/swap.h>
  7. #include <linux/mm_inline.h>
  8. #include <linux/kthread.h>
  9. #include <linux/khugepaged.h>
  10. #include <linux/freezer.h>
  11. #include <linux/mman.h>
  12. #include <linux/hashtable.h>
  13. #include <linux/userfaultfd_k.h>
  14. #include <linux/page_idle.h>
  15. #include <linux/swapops.h>
  16. #include <linux/shmem_fs.h>
  17. #include <asm/tlb.h>
  18. #include <asm/pgalloc.h>
  19. #include "internal.h"
  20. enum scan_result {
  21. SCAN_FAIL,
  22. SCAN_SUCCEED,
  23. SCAN_PMD_NULL,
  24. SCAN_EXCEED_NONE_PTE,
  25. SCAN_PTE_NON_PRESENT,
  26. SCAN_PAGE_RO,
  27. SCAN_LACK_REFERENCED_PAGE,
  28. SCAN_PAGE_NULL,
  29. SCAN_SCAN_ABORT,
  30. SCAN_PAGE_COUNT,
  31. SCAN_PAGE_LRU,
  32. SCAN_PAGE_LOCK,
  33. SCAN_PAGE_ANON,
  34. SCAN_PAGE_COMPOUND,
  35. SCAN_ANY_PROCESS,
  36. SCAN_VMA_NULL,
  37. SCAN_VMA_CHECK,
  38. SCAN_ADDRESS_RANGE,
  39. SCAN_SWAP_CACHE_PAGE,
  40. SCAN_DEL_PAGE_LRU,
  41. SCAN_ALLOC_HUGE_PAGE_FAIL,
  42. SCAN_CGROUP_CHARGE_FAIL,
  43. SCAN_EXCEED_SWAP_PTE,
  44. SCAN_TRUNCATED,
  45. };
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/huge_memory.h>
  48. /* default scan 8*512 pte (or vmas) every 30 second */
  49. static unsigned int khugepaged_pages_to_scan __read_mostly;
  50. static unsigned int khugepaged_pages_collapsed;
  51. static unsigned int khugepaged_full_scans;
  52. static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  53. /* during fragmentation poll the hugepage allocator once every minute */
  54. static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  55. static unsigned long khugepaged_sleep_expire;
  56. static DEFINE_SPINLOCK(khugepaged_mm_lock);
  57. static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  58. /*
  59. * default collapse hugepages if there is at least one pte mapped like
  60. * it would have happened if the vma was large enough during page
  61. * fault.
  62. */
  63. static unsigned int khugepaged_max_ptes_none __read_mostly;
  64. static unsigned int khugepaged_max_ptes_swap __read_mostly;
  65. #define MM_SLOTS_HASH_BITS 10
  66. static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  67. static struct kmem_cache *mm_slot_cache __read_mostly;
  68. /**
  69. * struct mm_slot - hash lookup from mm to mm_slot
  70. * @hash: hash collision list
  71. * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  72. * @mm: the mm that this information is valid for
  73. */
  74. struct mm_slot {
  75. struct hlist_node hash;
  76. struct list_head mm_node;
  77. struct mm_struct *mm;
  78. };
  79. /**
  80. * struct khugepaged_scan - cursor for scanning
  81. * @mm_head: the head of the mm list to scan
  82. * @mm_slot: the current mm_slot we are scanning
  83. * @address: the next address inside that to be scanned
  84. *
  85. * There is only the one khugepaged_scan instance of this cursor structure.
  86. */
  87. struct khugepaged_scan {
  88. struct list_head mm_head;
  89. struct mm_slot *mm_slot;
  90. unsigned long address;
  91. };
  92. static struct khugepaged_scan khugepaged_scan = {
  93. .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  94. };
  95. #ifdef CONFIG_SYSFS
  96. static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
  97. struct kobj_attribute *attr,
  98. char *buf)
  99. {
  100. return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
  101. }
  102. static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
  103. struct kobj_attribute *attr,
  104. const char *buf, size_t count)
  105. {
  106. unsigned long msecs;
  107. int err;
  108. err = kstrtoul(buf, 10, &msecs);
  109. if (err || msecs > UINT_MAX)
  110. return -EINVAL;
  111. khugepaged_scan_sleep_millisecs = msecs;
  112. khugepaged_sleep_expire = 0;
  113. wake_up_interruptible(&khugepaged_wait);
  114. return count;
  115. }
  116. static struct kobj_attribute scan_sleep_millisecs_attr =
  117. __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
  118. scan_sleep_millisecs_store);
  119. static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
  120. struct kobj_attribute *attr,
  121. char *buf)
  122. {
  123. return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
  124. }
  125. static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
  126. struct kobj_attribute *attr,
  127. const char *buf, size_t count)
  128. {
  129. unsigned long msecs;
  130. int err;
  131. err = kstrtoul(buf, 10, &msecs);
  132. if (err || msecs > UINT_MAX)
  133. return -EINVAL;
  134. khugepaged_alloc_sleep_millisecs = msecs;
  135. khugepaged_sleep_expire = 0;
  136. wake_up_interruptible(&khugepaged_wait);
  137. return count;
  138. }
  139. static struct kobj_attribute alloc_sleep_millisecs_attr =
  140. __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
  141. alloc_sleep_millisecs_store);
  142. static ssize_t pages_to_scan_show(struct kobject *kobj,
  143. struct kobj_attribute *attr,
  144. char *buf)
  145. {
  146. return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
  147. }
  148. static ssize_t pages_to_scan_store(struct kobject *kobj,
  149. struct kobj_attribute *attr,
  150. const char *buf, size_t count)
  151. {
  152. int err;
  153. unsigned long pages;
  154. err = kstrtoul(buf, 10, &pages);
  155. if (err || !pages || pages > UINT_MAX)
  156. return -EINVAL;
  157. khugepaged_pages_to_scan = pages;
  158. return count;
  159. }
  160. static struct kobj_attribute pages_to_scan_attr =
  161. __ATTR(pages_to_scan, 0644, pages_to_scan_show,
  162. pages_to_scan_store);
  163. static ssize_t pages_collapsed_show(struct kobject *kobj,
  164. struct kobj_attribute *attr,
  165. char *buf)
  166. {
  167. return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
  168. }
  169. static struct kobj_attribute pages_collapsed_attr =
  170. __ATTR_RO(pages_collapsed);
  171. static ssize_t full_scans_show(struct kobject *kobj,
  172. struct kobj_attribute *attr,
  173. char *buf)
  174. {
  175. return sprintf(buf, "%u\n", khugepaged_full_scans);
  176. }
  177. static struct kobj_attribute full_scans_attr =
  178. __ATTR_RO(full_scans);
  179. static ssize_t khugepaged_defrag_show(struct kobject *kobj,
  180. struct kobj_attribute *attr, char *buf)
  181. {
  182. return single_hugepage_flag_show(kobj, attr, buf,
  183. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  184. }
  185. static ssize_t khugepaged_defrag_store(struct kobject *kobj,
  186. struct kobj_attribute *attr,
  187. const char *buf, size_t count)
  188. {
  189. return single_hugepage_flag_store(kobj, attr, buf, count,
  190. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  191. }
  192. static struct kobj_attribute khugepaged_defrag_attr =
  193. __ATTR(defrag, 0644, khugepaged_defrag_show,
  194. khugepaged_defrag_store);
  195. /*
  196. * max_ptes_none controls if khugepaged should collapse hugepages over
  197. * any unmapped ptes in turn potentially increasing the memory
  198. * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
  199. * reduce the available free memory in the system as it
  200. * runs. Increasing max_ptes_none will instead potentially reduce the
  201. * free memory in the system during the khugepaged scan.
  202. */
  203. static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
  204. struct kobj_attribute *attr,
  205. char *buf)
  206. {
  207. return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
  208. }
  209. static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
  210. struct kobj_attribute *attr,
  211. const char *buf, size_t count)
  212. {
  213. int err;
  214. unsigned long max_ptes_none;
  215. err = kstrtoul(buf, 10, &max_ptes_none);
  216. if (err || max_ptes_none > HPAGE_PMD_NR-1)
  217. return -EINVAL;
  218. khugepaged_max_ptes_none = max_ptes_none;
  219. return count;
  220. }
  221. static struct kobj_attribute khugepaged_max_ptes_none_attr =
  222. __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
  223. khugepaged_max_ptes_none_store);
  224. static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
  225. struct kobj_attribute *attr,
  226. char *buf)
  227. {
  228. return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
  229. }
  230. static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
  231. struct kobj_attribute *attr,
  232. const char *buf, size_t count)
  233. {
  234. int err;
  235. unsigned long max_ptes_swap;
  236. err = kstrtoul(buf, 10, &max_ptes_swap);
  237. if (err || max_ptes_swap > HPAGE_PMD_NR-1)
  238. return -EINVAL;
  239. khugepaged_max_ptes_swap = max_ptes_swap;
  240. return count;
  241. }
  242. static struct kobj_attribute khugepaged_max_ptes_swap_attr =
  243. __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
  244. khugepaged_max_ptes_swap_store);
  245. static struct attribute *khugepaged_attr[] = {
  246. &khugepaged_defrag_attr.attr,
  247. &khugepaged_max_ptes_none_attr.attr,
  248. &pages_to_scan_attr.attr,
  249. &pages_collapsed_attr.attr,
  250. &full_scans_attr.attr,
  251. &scan_sleep_millisecs_attr.attr,
  252. &alloc_sleep_millisecs_attr.attr,
  253. &khugepaged_max_ptes_swap_attr.attr,
  254. NULL,
  255. };
  256. struct attribute_group khugepaged_attr_group = {
  257. .attrs = khugepaged_attr,
  258. .name = "khugepaged",
  259. };
  260. #endif /* CONFIG_SYSFS */
  261. #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
  262. int hugepage_madvise(struct vm_area_struct *vma,
  263. unsigned long *vm_flags, int advice)
  264. {
  265. switch (advice) {
  266. case MADV_HUGEPAGE:
  267. #ifdef CONFIG_S390
  268. /*
  269. * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
  270. * can't handle this properly after s390_enable_sie, so we simply
  271. * ignore the madvise to prevent qemu from causing a SIGSEGV.
  272. */
  273. if (mm_has_pgste(vma->vm_mm))
  274. return 0;
  275. #endif
  276. *vm_flags &= ~VM_NOHUGEPAGE;
  277. *vm_flags |= VM_HUGEPAGE;
  278. /*
  279. * If the vma become good for khugepaged to scan,
  280. * register it here without waiting a page fault that
  281. * may not happen any time soon.
  282. */
  283. if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
  284. khugepaged_enter_vma_merge(vma, *vm_flags))
  285. return -ENOMEM;
  286. break;
  287. case MADV_NOHUGEPAGE:
  288. *vm_flags &= ~VM_HUGEPAGE;
  289. *vm_flags |= VM_NOHUGEPAGE;
  290. /*
  291. * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
  292. * this vma even if we leave the mm registered in khugepaged if
  293. * it got registered before VM_NOHUGEPAGE was set.
  294. */
  295. break;
  296. }
  297. return 0;
  298. }
  299. int __init khugepaged_init(void)
  300. {
  301. mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
  302. sizeof(struct mm_slot),
  303. __alignof__(struct mm_slot), 0, NULL);
  304. if (!mm_slot_cache)
  305. return -ENOMEM;
  306. khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
  307. khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
  308. khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
  309. return 0;
  310. }
  311. void __init khugepaged_destroy(void)
  312. {
  313. kmem_cache_destroy(mm_slot_cache);
  314. }
  315. static inline struct mm_slot *alloc_mm_slot(void)
  316. {
  317. if (!mm_slot_cache) /* initialization failed */
  318. return NULL;
  319. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  320. }
  321. static inline void free_mm_slot(struct mm_slot *mm_slot)
  322. {
  323. kmem_cache_free(mm_slot_cache, mm_slot);
  324. }
  325. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  326. {
  327. struct mm_slot *mm_slot;
  328. hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
  329. if (mm == mm_slot->mm)
  330. return mm_slot;
  331. return NULL;
  332. }
  333. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  334. struct mm_slot *mm_slot)
  335. {
  336. mm_slot->mm = mm;
  337. hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
  338. }
  339. static inline int khugepaged_test_exit(struct mm_struct *mm)
  340. {
  341. return atomic_read(&mm->mm_users) == 0;
  342. }
  343. int __khugepaged_enter(struct mm_struct *mm)
  344. {
  345. struct mm_slot *mm_slot;
  346. int wakeup;
  347. mm_slot = alloc_mm_slot();
  348. if (!mm_slot)
  349. return -ENOMEM;
  350. /* __khugepaged_exit() must not run from under us */
  351. VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
  352. if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
  353. free_mm_slot(mm_slot);
  354. return 0;
  355. }
  356. spin_lock(&khugepaged_mm_lock);
  357. insert_to_mm_slots_hash(mm, mm_slot);
  358. /*
  359. * Insert just behind the scanning cursor, to let the area settle
  360. * down a little.
  361. */
  362. wakeup = list_empty(&khugepaged_scan.mm_head);
  363. list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
  364. spin_unlock(&khugepaged_mm_lock);
  365. atomic_inc(&mm->mm_count);
  366. if (wakeup)
  367. wake_up_interruptible(&khugepaged_wait);
  368. return 0;
  369. }
  370. int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  371. unsigned long vm_flags)
  372. {
  373. unsigned long hstart, hend;
  374. if (!vma->anon_vma)
  375. /*
  376. * Not yet faulted in so we will register later in the
  377. * page fault if needed.
  378. */
  379. return 0;
  380. if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
  381. /* khugepaged not yet working on file or special mappings */
  382. return 0;
  383. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  384. hend = vma->vm_end & HPAGE_PMD_MASK;
  385. if (hstart < hend)
  386. return khugepaged_enter(vma, vm_flags);
  387. return 0;
  388. }
  389. void __khugepaged_exit(struct mm_struct *mm)
  390. {
  391. struct mm_slot *mm_slot;
  392. int free = 0;
  393. spin_lock(&khugepaged_mm_lock);
  394. mm_slot = get_mm_slot(mm);
  395. if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
  396. hash_del(&mm_slot->hash);
  397. list_del(&mm_slot->mm_node);
  398. free = 1;
  399. }
  400. spin_unlock(&khugepaged_mm_lock);
  401. if (free) {
  402. clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  403. free_mm_slot(mm_slot);
  404. mmdrop(mm);
  405. } else if (mm_slot) {
  406. /*
  407. * This is required to serialize against
  408. * khugepaged_test_exit() (which is guaranteed to run
  409. * under mmap sem read mode). Stop here (after we
  410. * return all pagetables will be destroyed) until
  411. * khugepaged has finished working on the pagetables
  412. * under the mmap_sem.
  413. */
  414. down_write(&mm->mmap_sem);
  415. up_write(&mm->mmap_sem);
  416. }
  417. }
  418. static void release_pte_page(struct page *page)
  419. {
  420. /* 0 stands for page_is_file_cache(page) == false */
  421. dec_node_page_state(page, NR_ISOLATED_ANON + 0);
  422. unlock_page(page);
  423. putback_lru_page(page);
  424. }
  425. static void release_pte_pages(pte_t *pte, pte_t *_pte)
  426. {
  427. while (--_pte >= pte) {
  428. pte_t pteval = *_pte;
  429. if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
  430. release_pte_page(pte_page(pteval));
  431. }
  432. }
  433. static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
  434. unsigned long address,
  435. pte_t *pte)
  436. {
  437. struct page *page = NULL;
  438. pte_t *_pte;
  439. int none_or_zero = 0, result = 0, referenced = 0;
  440. bool writable = false;
  441. for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
  442. _pte++, address += PAGE_SIZE) {
  443. pte_t pteval = *_pte;
  444. if (pte_none(pteval) || (pte_present(pteval) &&
  445. is_zero_pfn(pte_pfn(pteval)))) {
  446. if (!userfaultfd_armed(vma) &&
  447. ++none_or_zero <= khugepaged_max_ptes_none) {
  448. continue;
  449. } else {
  450. result = SCAN_EXCEED_NONE_PTE;
  451. goto out;
  452. }
  453. }
  454. if (!pte_present(pteval)) {
  455. result = SCAN_PTE_NON_PRESENT;
  456. goto out;
  457. }
  458. page = vm_normal_page(vma, address, pteval);
  459. if (unlikely(!page)) {
  460. result = SCAN_PAGE_NULL;
  461. goto out;
  462. }
  463. /* TODO: teach khugepaged to collapse THP mapped with pte */
  464. if (PageCompound(page)) {
  465. result = SCAN_PAGE_COMPOUND;
  466. goto out;
  467. }
  468. VM_BUG_ON_PAGE(!PageAnon(page), page);
  469. VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
  470. /*
  471. * We can do it before isolate_lru_page because the
  472. * page can't be freed from under us. NOTE: PG_lock
  473. * is needed to serialize against split_huge_page
  474. * when invoked from the VM.
  475. */
  476. if (!trylock_page(page)) {
  477. result = SCAN_PAGE_LOCK;
  478. goto out;
  479. }
  480. /*
  481. * cannot use mapcount: can't collapse if there's a gup pin.
  482. * The page must only be referenced by the scanned process
  483. * and page swap cache.
  484. */
  485. if (page_count(page) != 1 + !!PageSwapCache(page)) {
  486. unlock_page(page);
  487. result = SCAN_PAGE_COUNT;
  488. goto out;
  489. }
  490. if (pte_write(pteval)) {
  491. writable = true;
  492. } else {
  493. if (PageSwapCache(page) &&
  494. !reuse_swap_page(page, NULL)) {
  495. unlock_page(page);
  496. result = SCAN_SWAP_CACHE_PAGE;
  497. goto out;
  498. }
  499. /*
  500. * Page is not in the swap cache. It can be collapsed
  501. * into a THP.
  502. */
  503. }
  504. /*
  505. * Isolate the page to avoid collapsing an hugepage
  506. * currently in use by the VM.
  507. */
  508. if (isolate_lru_page(page)) {
  509. unlock_page(page);
  510. result = SCAN_DEL_PAGE_LRU;
  511. goto out;
  512. }
  513. /* 0 stands for page_is_file_cache(page) == false */
  514. inc_node_page_state(page, NR_ISOLATED_ANON + 0);
  515. VM_BUG_ON_PAGE(!PageLocked(page), page);
  516. VM_BUG_ON_PAGE(PageLRU(page), page);
  517. /* There should be enough young pte to collapse the page */
  518. if (pte_young(pteval) ||
  519. page_is_young(page) || PageReferenced(page) ||
  520. mmu_notifier_test_young(vma->vm_mm, address))
  521. referenced++;
  522. }
  523. if (likely(writable)) {
  524. if (likely(referenced)) {
  525. result = SCAN_SUCCEED;
  526. trace_mm_collapse_huge_page_isolate(page, none_or_zero,
  527. referenced, writable, result);
  528. return 1;
  529. }
  530. } else {
  531. result = SCAN_PAGE_RO;
  532. }
  533. out:
  534. release_pte_pages(pte, _pte);
  535. trace_mm_collapse_huge_page_isolate(page, none_or_zero,
  536. referenced, writable, result);
  537. return 0;
  538. }
  539. static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
  540. struct vm_area_struct *vma,
  541. unsigned long address,
  542. spinlock_t *ptl)
  543. {
  544. pte_t *_pte;
  545. for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
  546. pte_t pteval = *_pte;
  547. struct page *src_page;
  548. if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
  549. clear_user_highpage(page, address);
  550. add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
  551. if (is_zero_pfn(pte_pfn(pteval))) {
  552. /*
  553. * ptl mostly unnecessary.
  554. */
  555. spin_lock(ptl);
  556. /*
  557. * paravirt calls inside pte_clear here are
  558. * superfluous.
  559. */
  560. pte_clear(vma->vm_mm, address, _pte);
  561. spin_unlock(ptl);
  562. }
  563. } else {
  564. src_page = pte_page(pteval);
  565. copy_user_highpage(page, src_page, address, vma);
  566. VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
  567. release_pte_page(src_page);
  568. /*
  569. * ptl mostly unnecessary, but preempt has to
  570. * be disabled to update the per-cpu stats
  571. * inside page_remove_rmap().
  572. */
  573. spin_lock(ptl);
  574. /*
  575. * paravirt calls inside pte_clear here are
  576. * superfluous.
  577. */
  578. pte_clear(vma->vm_mm, address, _pte);
  579. page_remove_rmap(src_page, false);
  580. spin_unlock(ptl);
  581. free_page_and_swap_cache(src_page);
  582. }
  583. address += PAGE_SIZE;
  584. page++;
  585. }
  586. }
  587. static void khugepaged_alloc_sleep(void)
  588. {
  589. DEFINE_WAIT(wait);
  590. add_wait_queue(&khugepaged_wait, &wait);
  591. freezable_schedule_timeout_interruptible(
  592. msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
  593. remove_wait_queue(&khugepaged_wait, &wait);
  594. }
  595. static int khugepaged_node_load[MAX_NUMNODES];
  596. static bool khugepaged_scan_abort(int nid)
  597. {
  598. int i;
  599. /*
  600. * If node_reclaim_mode is disabled, then no extra effort is made to
  601. * allocate memory locally.
  602. */
  603. if (!node_reclaim_mode)
  604. return false;
  605. /* If there is a count for this node already, it must be acceptable */
  606. if (khugepaged_node_load[nid])
  607. return false;
  608. for (i = 0; i < MAX_NUMNODES; i++) {
  609. if (!khugepaged_node_load[i])
  610. continue;
  611. if (node_distance(nid, i) > RECLAIM_DISTANCE)
  612. return true;
  613. }
  614. return false;
  615. }
  616. /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
  617. static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
  618. {
  619. return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
  620. }
  621. #ifdef CONFIG_NUMA
  622. static int khugepaged_find_target_node(void)
  623. {
  624. static int last_khugepaged_target_node = NUMA_NO_NODE;
  625. int nid, target_node = 0, max_value = 0;
  626. /* find first node with max normal pages hit */
  627. for (nid = 0; nid < MAX_NUMNODES; nid++)
  628. if (khugepaged_node_load[nid] > max_value) {
  629. max_value = khugepaged_node_load[nid];
  630. target_node = nid;
  631. }
  632. /* do some balance if several nodes have the same hit record */
  633. if (target_node <= last_khugepaged_target_node)
  634. for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
  635. nid++)
  636. if (max_value == khugepaged_node_load[nid]) {
  637. target_node = nid;
  638. break;
  639. }
  640. last_khugepaged_target_node = target_node;
  641. return target_node;
  642. }
  643. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  644. {
  645. if (IS_ERR(*hpage)) {
  646. if (!*wait)
  647. return false;
  648. *wait = false;
  649. *hpage = NULL;
  650. khugepaged_alloc_sleep();
  651. } else if (*hpage) {
  652. put_page(*hpage);
  653. *hpage = NULL;
  654. }
  655. return true;
  656. }
  657. static struct page *
  658. khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
  659. {
  660. VM_BUG_ON_PAGE(*hpage, *hpage);
  661. *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
  662. if (unlikely(!*hpage)) {
  663. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  664. *hpage = ERR_PTR(-ENOMEM);
  665. return NULL;
  666. }
  667. prep_transhuge_page(*hpage);
  668. count_vm_event(THP_COLLAPSE_ALLOC);
  669. return *hpage;
  670. }
  671. #else
  672. static int khugepaged_find_target_node(void)
  673. {
  674. return 0;
  675. }
  676. static inline struct page *alloc_khugepaged_hugepage(void)
  677. {
  678. struct page *page;
  679. page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
  680. HPAGE_PMD_ORDER);
  681. if (page)
  682. prep_transhuge_page(page);
  683. return page;
  684. }
  685. static struct page *khugepaged_alloc_hugepage(bool *wait)
  686. {
  687. struct page *hpage;
  688. do {
  689. hpage = alloc_khugepaged_hugepage();
  690. if (!hpage) {
  691. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  692. if (!*wait)
  693. return NULL;
  694. *wait = false;
  695. khugepaged_alloc_sleep();
  696. } else
  697. count_vm_event(THP_COLLAPSE_ALLOC);
  698. } while (unlikely(!hpage) && likely(khugepaged_enabled()));
  699. return hpage;
  700. }
  701. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  702. {
  703. if (!*hpage)
  704. *hpage = khugepaged_alloc_hugepage(wait);
  705. if (unlikely(!*hpage))
  706. return false;
  707. return true;
  708. }
  709. static struct page *
  710. khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
  711. {
  712. VM_BUG_ON(!*hpage);
  713. return *hpage;
  714. }
  715. #endif
  716. static bool hugepage_vma_check(struct vm_area_struct *vma)
  717. {
  718. if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
  719. (vma->vm_flags & VM_NOHUGEPAGE))
  720. return false;
  721. if (shmem_file(vma->vm_file)) {
  722. if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
  723. return false;
  724. return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
  725. HPAGE_PMD_NR);
  726. }
  727. if (!vma->anon_vma || vma->vm_ops)
  728. return false;
  729. if (is_vma_temporary_stack(vma))
  730. return false;
  731. return !(vma->vm_flags & VM_NO_KHUGEPAGED);
  732. }
  733. /*
  734. * If mmap_sem temporarily dropped, revalidate vma
  735. * before taking mmap_sem.
  736. * Return 0 if succeeds, otherwise return none-zero
  737. * value (scan code).
  738. */
  739. static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
  740. struct vm_area_struct **vmap)
  741. {
  742. struct vm_area_struct *vma;
  743. unsigned long hstart, hend;
  744. if (unlikely(khugepaged_test_exit(mm)))
  745. return SCAN_ANY_PROCESS;
  746. *vmap = vma = find_vma(mm, address);
  747. if (!vma)
  748. return SCAN_VMA_NULL;
  749. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  750. hend = vma->vm_end & HPAGE_PMD_MASK;
  751. if (address < hstart || address + HPAGE_PMD_SIZE > hend)
  752. return SCAN_ADDRESS_RANGE;
  753. if (!hugepage_vma_check(vma))
  754. return SCAN_VMA_CHECK;
  755. return 0;
  756. }
  757. /*
  758. * Bring missing pages in from swap, to complete THP collapse.
  759. * Only done if khugepaged_scan_pmd believes it is worthwhile.
  760. *
  761. * Called and returns without pte mapped or spinlocks held,
  762. * but with mmap_sem held to protect against vma changes.
  763. */
  764. static bool __collapse_huge_page_swapin(struct mm_struct *mm,
  765. struct vm_area_struct *vma,
  766. unsigned long address, pmd_t *pmd,
  767. int referenced)
  768. {
  769. pte_t pteval;
  770. int swapped_in = 0, ret = 0;
  771. struct fault_env fe = {
  772. .vma = vma,
  773. .address = address,
  774. .flags = FAULT_FLAG_ALLOW_RETRY,
  775. .pmd = pmd,
  776. };
  777. /* we only decide to swapin, if there is enough young ptes */
  778. if (referenced < HPAGE_PMD_NR/2) {
  779. trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
  780. return false;
  781. }
  782. fe.pte = pte_offset_map(pmd, address);
  783. for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
  784. fe.pte++, fe.address += PAGE_SIZE) {
  785. pteval = *fe.pte;
  786. if (!is_swap_pte(pteval))
  787. continue;
  788. swapped_in++;
  789. ret = do_swap_page(&fe, pteval);
  790. /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
  791. if (ret & VM_FAULT_RETRY) {
  792. down_read(&mm->mmap_sem);
  793. if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
  794. /* vma is no longer available, don't continue to swapin */
  795. trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
  796. return false;
  797. }
  798. /* check if the pmd is still valid */
  799. if (mm_find_pmd(mm, address) != pmd)
  800. return false;
  801. }
  802. if (ret & VM_FAULT_ERROR) {
  803. trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
  804. return false;
  805. }
  806. /* pte is unmapped now, we need to map it */
  807. fe.pte = pte_offset_map(pmd, fe.address);
  808. }
  809. fe.pte--;
  810. pte_unmap(fe.pte);
  811. trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
  812. return true;
  813. }
  814. static void collapse_huge_page(struct mm_struct *mm,
  815. unsigned long address,
  816. struct page **hpage,
  817. int node, int referenced)
  818. {
  819. pmd_t *pmd, _pmd;
  820. pte_t *pte;
  821. pgtable_t pgtable;
  822. struct page *new_page;
  823. spinlock_t *pmd_ptl, *pte_ptl;
  824. int isolated = 0, result = 0;
  825. struct mem_cgroup *memcg;
  826. struct vm_area_struct *vma;
  827. unsigned long mmun_start; /* For mmu_notifiers */
  828. unsigned long mmun_end; /* For mmu_notifiers */
  829. gfp_t gfp;
  830. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  831. /* Only allocate from the target node */
  832. gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
  833. /*
  834. * Before allocating the hugepage, release the mmap_sem read lock.
  835. * The allocation can take potentially a long time if it involves
  836. * sync compaction, and we do not need to hold the mmap_sem during
  837. * that. We will recheck the vma after taking it again in write mode.
  838. */
  839. up_read(&mm->mmap_sem);
  840. new_page = khugepaged_alloc_page(hpage, gfp, node);
  841. if (!new_page) {
  842. result = SCAN_ALLOC_HUGE_PAGE_FAIL;
  843. goto out_nolock;
  844. }
  845. /* Do not oom kill for khugepaged charges */
  846. if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
  847. &memcg, true))) {
  848. result = SCAN_CGROUP_CHARGE_FAIL;
  849. goto out_nolock;
  850. }
  851. down_read(&mm->mmap_sem);
  852. result = hugepage_vma_revalidate(mm, address, &vma);
  853. if (result) {
  854. mem_cgroup_cancel_charge(new_page, memcg, true);
  855. up_read(&mm->mmap_sem);
  856. goto out_nolock;
  857. }
  858. pmd = mm_find_pmd(mm, address);
  859. if (!pmd) {
  860. result = SCAN_PMD_NULL;
  861. mem_cgroup_cancel_charge(new_page, memcg, true);
  862. up_read(&mm->mmap_sem);
  863. goto out_nolock;
  864. }
  865. /*
  866. * __collapse_huge_page_swapin always returns with mmap_sem locked.
  867. * If it fails, we release mmap_sem and jump out_nolock.
  868. * Continuing to collapse causes inconsistency.
  869. */
  870. if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
  871. mem_cgroup_cancel_charge(new_page, memcg, true);
  872. up_read(&mm->mmap_sem);
  873. goto out_nolock;
  874. }
  875. up_read(&mm->mmap_sem);
  876. /*
  877. * Prevent all access to pagetables with the exception of
  878. * gup_fast later handled by the ptep_clear_flush and the VM
  879. * handled by the anon_vma lock + PG_lock.
  880. */
  881. down_write(&mm->mmap_sem);
  882. result = hugepage_vma_revalidate(mm, address, &vma);
  883. if (result)
  884. goto out;
  885. /* check if the pmd is still valid */
  886. if (mm_find_pmd(mm, address) != pmd)
  887. goto out;
  888. anon_vma_lock_write(vma->anon_vma);
  889. pte = pte_offset_map(pmd, address);
  890. pte_ptl = pte_lockptr(mm, pmd);
  891. mmun_start = address;
  892. mmun_end = address + HPAGE_PMD_SIZE;
  893. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  894. pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
  895. /*
  896. * After this gup_fast can't run anymore. This also removes
  897. * any huge TLB entry from the CPU so we won't allow
  898. * huge and small TLB entries for the same virtual address
  899. * to avoid the risk of CPU bugs in that area.
  900. */
  901. _pmd = pmdp_collapse_flush(vma, address, pmd);
  902. spin_unlock(pmd_ptl);
  903. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  904. spin_lock(pte_ptl);
  905. isolated = __collapse_huge_page_isolate(vma, address, pte);
  906. spin_unlock(pte_ptl);
  907. if (unlikely(!isolated)) {
  908. pte_unmap(pte);
  909. spin_lock(pmd_ptl);
  910. BUG_ON(!pmd_none(*pmd));
  911. /*
  912. * We can only use set_pmd_at when establishing
  913. * hugepmds and never for establishing regular pmds that
  914. * points to regular pagetables. Use pmd_populate for that
  915. */
  916. pmd_populate(mm, pmd, pmd_pgtable(_pmd));
  917. spin_unlock(pmd_ptl);
  918. anon_vma_unlock_write(vma->anon_vma);
  919. result = SCAN_FAIL;
  920. goto out;
  921. }
  922. /*
  923. * All pages are isolated and locked so anon_vma rmap
  924. * can't run anymore.
  925. */
  926. anon_vma_unlock_write(vma->anon_vma);
  927. __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
  928. pte_unmap(pte);
  929. __SetPageUptodate(new_page);
  930. pgtable = pmd_pgtable(_pmd);
  931. _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
  932. _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
  933. /*
  934. * spin_lock() below is not the equivalent of smp_wmb(), so
  935. * this is needed to avoid the copy_huge_page writes to become
  936. * visible after the set_pmd_at() write.
  937. */
  938. smp_wmb();
  939. spin_lock(pmd_ptl);
  940. BUG_ON(!pmd_none(*pmd));
  941. page_add_new_anon_rmap(new_page, vma, address, true);
  942. mem_cgroup_commit_charge(new_page, memcg, false, true);
  943. lru_cache_add_active_or_unevictable(new_page, vma);
  944. pgtable_trans_huge_deposit(mm, pmd, pgtable);
  945. set_pmd_at(mm, address, pmd, _pmd);
  946. update_mmu_cache_pmd(vma, address, pmd);
  947. spin_unlock(pmd_ptl);
  948. *hpage = NULL;
  949. khugepaged_pages_collapsed++;
  950. result = SCAN_SUCCEED;
  951. out_up_write:
  952. up_write(&mm->mmap_sem);
  953. out_nolock:
  954. trace_mm_collapse_huge_page(mm, isolated, result);
  955. return;
  956. out:
  957. mem_cgroup_cancel_charge(new_page, memcg, true);
  958. goto out_up_write;
  959. }
  960. static int khugepaged_scan_pmd(struct mm_struct *mm,
  961. struct vm_area_struct *vma,
  962. unsigned long address,
  963. struct page **hpage)
  964. {
  965. pmd_t *pmd;
  966. pte_t *pte, *_pte;
  967. int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
  968. struct page *page = NULL;
  969. unsigned long _address;
  970. spinlock_t *ptl;
  971. int node = NUMA_NO_NODE, unmapped = 0;
  972. bool writable = false;
  973. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  974. pmd = mm_find_pmd(mm, address);
  975. if (!pmd) {
  976. result = SCAN_PMD_NULL;
  977. goto out;
  978. }
  979. memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
  980. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  981. for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
  982. _pte++, _address += PAGE_SIZE) {
  983. pte_t pteval = *_pte;
  984. if (is_swap_pte(pteval)) {
  985. if (++unmapped <= khugepaged_max_ptes_swap) {
  986. continue;
  987. } else {
  988. result = SCAN_EXCEED_SWAP_PTE;
  989. goto out_unmap;
  990. }
  991. }
  992. if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
  993. if (!userfaultfd_armed(vma) &&
  994. ++none_or_zero <= khugepaged_max_ptes_none) {
  995. continue;
  996. } else {
  997. result = SCAN_EXCEED_NONE_PTE;
  998. goto out_unmap;
  999. }
  1000. }
  1001. if (!pte_present(pteval)) {
  1002. result = SCAN_PTE_NON_PRESENT;
  1003. goto out_unmap;
  1004. }
  1005. if (pte_write(pteval))
  1006. writable = true;
  1007. page = vm_normal_page(vma, _address, pteval);
  1008. if (unlikely(!page)) {
  1009. result = SCAN_PAGE_NULL;
  1010. goto out_unmap;
  1011. }
  1012. /* TODO: teach khugepaged to collapse THP mapped with pte */
  1013. if (PageCompound(page)) {
  1014. result = SCAN_PAGE_COMPOUND;
  1015. goto out_unmap;
  1016. }
  1017. /*
  1018. * Record which node the original page is from and save this
  1019. * information to khugepaged_node_load[].
  1020. * Khupaged will allocate hugepage from the node has the max
  1021. * hit record.
  1022. */
  1023. node = page_to_nid(page);
  1024. if (khugepaged_scan_abort(node)) {
  1025. result = SCAN_SCAN_ABORT;
  1026. goto out_unmap;
  1027. }
  1028. khugepaged_node_load[node]++;
  1029. if (!PageLRU(page)) {
  1030. result = SCAN_PAGE_LRU;
  1031. goto out_unmap;
  1032. }
  1033. if (PageLocked(page)) {
  1034. result = SCAN_PAGE_LOCK;
  1035. goto out_unmap;
  1036. }
  1037. if (!PageAnon(page)) {
  1038. result = SCAN_PAGE_ANON;
  1039. goto out_unmap;
  1040. }
  1041. /*
  1042. * cannot use mapcount: can't collapse if there's a gup pin.
  1043. * The page must only be referenced by the scanned process
  1044. * and page swap cache.
  1045. */
  1046. if (page_count(page) != 1 + !!PageSwapCache(page)) {
  1047. result = SCAN_PAGE_COUNT;
  1048. goto out_unmap;
  1049. }
  1050. if (pte_young(pteval) ||
  1051. page_is_young(page) || PageReferenced(page) ||
  1052. mmu_notifier_test_young(vma->vm_mm, address))
  1053. referenced++;
  1054. }
  1055. if (writable) {
  1056. if (referenced) {
  1057. result = SCAN_SUCCEED;
  1058. ret = 1;
  1059. } else {
  1060. result = SCAN_LACK_REFERENCED_PAGE;
  1061. }
  1062. } else {
  1063. result = SCAN_PAGE_RO;
  1064. }
  1065. out_unmap:
  1066. pte_unmap_unlock(pte, ptl);
  1067. if (ret) {
  1068. node = khugepaged_find_target_node();
  1069. /* collapse_huge_page will return with the mmap_sem released */
  1070. collapse_huge_page(mm, address, hpage, node, referenced);
  1071. }
  1072. out:
  1073. trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
  1074. none_or_zero, result, unmapped);
  1075. return ret;
  1076. }
  1077. static void collect_mm_slot(struct mm_slot *mm_slot)
  1078. {
  1079. struct mm_struct *mm = mm_slot->mm;
  1080. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  1081. if (khugepaged_test_exit(mm)) {
  1082. /* free mm_slot */
  1083. hash_del(&mm_slot->hash);
  1084. list_del(&mm_slot->mm_node);
  1085. /*
  1086. * Not strictly needed because the mm exited already.
  1087. *
  1088. * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1089. */
  1090. /* khugepaged_mm_lock actually not necessary for the below */
  1091. free_mm_slot(mm_slot);
  1092. mmdrop(mm);
  1093. }
  1094. }
  1095. #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
  1096. static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  1097. {
  1098. struct vm_area_struct *vma;
  1099. unsigned long addr;
  1100. pmd_t *pmd, _pmd;
  1101. i_mmap_lock_write(mapping);
  1102. vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
  1103. /* probably overkill */
  1104. if (vma->anon_vma)
  1105. continue;
  1106. addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  1107. if (addr & ~HPAGE_PMD_MASK)
  1108. continue;
  1109. if (vma->vm_end < addr + HPAGE_PMD_SIZE)
  1110. continue;
  1111. pmd = mm_find_pmd(vma->vm_mm, addr);
  1112. if (!pmd)
  1113. continue;
  1114. /*
  1115. * We need exclusive mmap_sem to retract page table.
  1116. * If trylock fails we would end up with pte-mapped THP after
  1117. * re-fault. Not ideal, but it's more important to not disturb
  1118. * the system too much.
  1119. */
  1120. if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
  1121. spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
  1122. /* assume page table is clear */
  1123. _pmd = pmdp_collapse_flush(vma, addr, pmd);
  1124. spin_unlock(ptl);
  1125. up_write(&vma->vm_mm->mmap_sem);
  1126. atomic_long_dec(&vma->vm_mm->nr_ptes);
  1127. pte_free(vma->vm_mm, pmd_pgtable(_pmd));
  1128. }
  1129. }
  1130. i_mmap_unlock_write(mapping);
  1131. }
  1132. /**
  1133. * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
  1134. *
  1135. * Basic scheme is simple, details are more complex:
  1136. * - allocate and lock a new huge page;
  1137. * - scan over radix tree replacing old pages the new one
  1138. * + swap in pages if necessary;
  1139. * + fill in gaps;
  1140. * + keep old pages around in case if rollback is required;
  1141. * - if replacing succeed:
  1142. * + copy data over;
  1143. * + free old pages;
  1144. * + unlock huge page;
  1145. * - if replacing failed;
  1146. * + put all pages back and unfreeze them;
  1147. * + restore gaps in the radix-tree;
  1148. * + unlock and free huge page;
  1149. */
  1150. static void collapse_shmem(struct mm_struct *mm,
  1151. struct address_space *mapping, pgoff_t start,
  1152. struct page **hpage, int node)
  1153. {
  1154. gfp_t gfp;
  1155. struct page *page, *new_page, *tmp;
  1156. struct mem_cgroup *memcg;
  1157. pgoff_t index, end = start + HPAGE_PMD_NR;
  1158. LIST_HEAD(pagelist);
  1159. struct radix_tree_iter iter;
  1160. void **slot;
  1161. int nr_none = 0, result = SCAN_SUCCEED;
  1162. VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
  1163. /* Only allocate from the target node */
  1164. gfp = alloc_hugepage_khugepaged_gfpmask() |
  1165. __GFP_OTHER_NODE | __GFP_THISNODE;
  1166. new_page = khugepaged_alloc_page(hpage, gfp, node);
  1167. if (!new_page) {
  1168. result = SCAN_ALLOC_HUGE_PAGE_FAIL;
  1169. goto out;
  1170. }
  1171. /* Do not oom kill for khugepaged charges */
  1172. if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
  1173. &memcg, true))) {
  1174. result = SCAN_CGROUP_CHARGE_FAIL;
  1175. goto out;
  1176. }
  1177. __SetPageLocked(new_page);
  1178. __SetPageSwapBacked(new_page);
  1179. new_page->index = start;
  1180. new_page->mapping = mapping;
  1181. /*
  1182. * At this point the new_page is locked and not up-to-date.
  1183. * It's safe to insert it into the page cache, because nobody would
  1184. * be able to map it or use it in another way until we unlock it.
  1185. */
  1186. index = start;
  1187. spin_lock_irq(&mapping->tree_lock);
  1188. radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
  1189. int n = min(iter.index, end) - index;
  1190. /*
  1191. * Stop if extent has been hole-punched, and is now completely
  1192. * empty (the more obvious i_size_read() check would take an
  1193. * irq-unsafe seqlock on 32-bit).
  1194. */
  1195. if (n >= HPAGE_PMD_NR) {
  1196. result = SCAN_TRUNCATED;
  1197. goto tree_locked;
  1198. }
  1199. /*
  1200. * Handle holes in the radix tree: charge it from shmem and
  1201. * insert relevant subpage of new_page into the radix-tree.
  1202. */
  1203. if (n && !shmem_charge(mapping->host, n)) {
  1204. result = SCAN_FAIL;
  1205. goto tree_locked;
  1206. }
  1207. for (; index < min(iter.index, end); index++) {
  1208. radix_tree_insert(&mapping->page_tree, index,
  1209. new_page + (index % HPAGE_PMD_NR));
  1210. }
  1211. nr_none += n;
  1212. /* We are done. */
  1213. if (index >= end)
  1214. break;
  1215. page = radix_tree_deref_slot_protected(slot,
  1216. &mapping->tree_lock);
  1217. if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
  1218. spin_unlock_irq(&mapping->tree_lock);
  1219. /* swap in or instantiate fallocated page */
  1220. if (shmem_getpage(mapping->host, index, &page,
  1221. SGP_NOHUGE)) {
  1222. result = SCAN_FAIL;
  1223. goto tree_unlocked;
  1224. }
  1225. } else if (trylock_page(page)) {
  1226. get_page(page);
  1227. spin_unlock_irq(&mapping->tree_lock);
  1228. } else {
  1229. result = SCAN_PAGE_LOCK;
  1230. goto tree_locked;
  1231. }
  1232. /*
  1233. * The page must be locked, so we can drop the tree_lock
  1234. * without racing with truncate.
  1235. */
  1236. VM_BUG_ON_PAGE(!PageLocked(page), page);
  1237. VM_BUG_ON_PAGE(!PageUptodate(page), page);
  1238. /*
  1239. * If file was truncated then extended, or hole-punched, before
  1240. * we locked the first page, then a THP might be there already.
  1241. */
  1242. if (PageTransCompound(page)) {
  1243. result = SCAN_PAGE_COMPOUND;
  1244. goto out_unlock;
  1245. }
  1246. if (page_mapping(page) != mapping) {
  1247. result = SCAN_TRUNCATED;
  1248. goto out_unlock;
  1249. }
  1250. if (isolate_lru_page(page)) {
  1251. result = SCAN_DEL_PAGE_LRU;
  1252. goto out_unlock;
  1253. }
  1254. if (page_mapped(page))
  1255. unmap_mapping_range(mapping, index << PAGE_SHIFT,
  1256. PAGE_SIZE, 0);
  1257. spin_lock_irq(&mapping->tree_lock);
  1258. slot = radix_tree_lookup_slot(&mapping->page_tree, index);
  1259. VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
  1260. &mapping->tree_lock), page);
  1261. VM_BUG_ON_PAGE(page_mapped(page), page);
  1262. /*
  1263. * The page is expected to have page_count() == 3:
  1264. * - we hold a pin on it;
  1265. * - one reference from radix tree;
  1266. * - one from isolate_lru_page;
  1267. */
  1268. if (!page_ref_freeze(page, 3)) {
  1269. result = SCAN_PAGE_COUNT;
  1270. spin_unlock_irq(&mapping->tree_lock);
  1271. putback_lru_page(page);
  1272. goto out_unlock;
  1273. }
  1274. /*
  1275. * Add the page to the list to be able to undo the collapse if
  1276. * something go wrong.
  1277. */
  1278. list_add_tail(&page->lru, &pagelist);
  1279. /* Finally, replace with the new page. */
  1280. radix_tree_replace_slot(slot,
  1281. new_page + (index % HPAGE_PMD_NR));
  1282. slot = radix_tree_iter_next(&iter);
  1283. index++;
  1284. continue;
  1285. out_unlock:
  1286. unlock_page(page);
  1287. put_page(page);
  1288. goto tree_unlocked;
  1289. }
  1290. /*
  1291. * Handle hole in radix tree at the end of the range.
  1292. * This code only triggers if there's nothing in radix tree
  1293. * beyond 'end'.
  1294. */
  1295. if (index < end) {
  1296. int n = end - index;
  1297. /* Stop if extent has been truncated, and is now empty */
  1298. if (n >= HPAGE_PMD_NR) {
  1299. result = SCAN_TRUNCATED;
  1300. goto tree_locked;
  1301. }
  1302. if (!shmem_charge(mapping->host, n)) {
  1303. result = SCAN_FAIL;
  1304. goto tree_locked;
  1305. }
  1306. for (; index < end; index++) {
  1307. radix_tree_insert(&mapping->page_tree, index,
  1308. new_page + (index % HPAGE_PMD_NR));
  1309. }
  1310. nr_none += n;
  1311. }
  1312. __inc_node_page_state(new_page, NR_SHMEM_THPS);
  1313. if (nr_none) {
  1314. struct zone *zone = page_zone(new_page);
  1315. __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
  1316. __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
  1317. }
  1318. tree_locked:
  1319. spin_unlock_irq(&mapping->tree_lock);
  1320. tree_unlocked:
  1321. if (result == SCAN_SUCCEED) {
  1322. /*
  1323. * Replacing old pages with new one has succeed, now we need to
  1324. * copy the content and free old pages.
  1325. */
  1326. index = start;
  1327. list_for_each_entry_safe(page, tmp, &pagelist, lru) {
  1328. while (index < page->index) {
  1329. clear_highpage(new_page + (index % HPAGE_PMD_NR));
  1330. index++;
  1331. }
  1332. copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
  1333. page);
  1334. list_del(&page->lru);
  1335. page->mapping = NULL;
  1336. page_ref_unfreeze(page, 1);
  1337. ClearPageActive(page);
  1338. ClearPageUnevictable(page);
  1339. unlock_page(page);
  1340. put_page(page);
  1341. index++;
  1342. }
  1343. while (index < end) {
  1344. clear_highpage(new_page + (index % HPAGE_PMD_NR));
  1345. index++;
  1346. }
  1347. SetPageUptodate(new_page);
  1348. page_ref_add(new_page, HPAGE_PMD_NR - 1);
  1349. set_page_dirty(new_page);
  1350. mem_cgroup_commit_charge(new_page, memcg, false, true);
  1351. lru_cache_add_anon(new_page);
  1352. /*
  1353. * Remove pte page tables, so we can re-fault the page as huge.
  1354. */
  1355. retract_page_tables(mapping, start);
  1356. *hpage = NULL;
  1357. } else {
  1358. /* Something went wrong: rollback changes to the radix-tree */
  1359. spin_lock_irq(&mapping->tree_lock);
  1360. mapping->nrpages -= nr_none;
  1361. shmem_uncharge(mapping->host, nr_none);
  1362. radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
  1363. start) {
  1364. if (iter.index >= end)
  1365. break;
  1366. page = list_first_entry_or_null(&pagelist,
  1367. struct page, lru);
  1368. if (!page || iter.index < page->index) {
  1369. if (!nr_none)
  1370. break;
  1371. nr_none--;
  1372. /* Put holes back where they were */
  1373. radix_tree_delete(&mapping->page_tree,
  1374. iter.index);
  1375. slot = radix_tree_iter_next(&iter);
  1376. continue;
  1377. }
  1378. VM_BUG_ON_PAGE(page->index != iter.index, page);
  1379. /* Unfreeze the page. */
  1380. list_del(&page->lru);
  1381. page_ref_unfreeze(page, 2);
  1382. radix_tree_replace_slot(slot, page);
  1383. spin_unlock_irq(&mapping->tree_lock);
  1384. unlock_page(page);
  1385. putback_lru_page(page);
  1386. spin_lock_irq(&mapping->tree_lock);
  1387. slot = radix_tree_iter_next(&iter);
  1388. }
  1389. VM_BUG_ON(nr_none);
  1390. spin_unlock_irq(&mapping->tree_lock);
  1391. mem_cgroup_cancel_charge(new_page, memcg, true);
  1392. new_page->mapping = NULL;
  1393. }
  1394. unlock_page(new_page);
  1395. out:
  1396. VM_BUG_ON(!list_empty(&pagelist));
  1397. /* TODO: tracepoints */
  1398. }
  1399. static void khugepaged_scan_shmem(struct mm_struct *mm,
  1400. struct address_space *mapping,
  1401. pgoff_t start, struct page **hpage)
  1402. {
  1403. struct page *page = NULL;
  1404. struct radix_tree_iter iter;
  1405. void **slot;
  1406. int present, swap;
  1407. int node = NUMA_NO_NODE;
  1408. int result = SCAN_SUCCEED;
  1409. present = 0;
  1410. swap = 0;
  1411. memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
  1412. rcu_read_lock();
  1413. radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
  1414. if (iter.index >= start + HPAGE_PMD_NR)
  1415. break;
  1416. page = radix_tree_deref_slot(slot);
  1417. if (radix_tree_deref_retry(page)) {
  1418. slot = radix_tree_iter_retry(&iter);
  1419. continue;
  1420. }
  1421. if (radix_tree_exception(page)) {
  1422. if (++swap > khugepaged_max_ptes_swap) {
  1423. result = SCAN_EXCEED_SWAP_PTE;
  1424. break;
  1425. }
  1426. continue;
  1427. }
  1428. if (PageTransCompound(page)) {
  1429. result = SCAN_PAGE_COMPOUND;
  1430. break;
  1431. }
  1432. node = page_to_nid(page);
  1433. if (khugepaged_scan_abort(node)) {
  1434. result = SCAN_SCAN_ABORT;
  1435. break;
  1436. }
  1437. khugepaged_node_load[node]++;
  1438. if (!PageLRU(page)) {
  1439. result = SCAN_PAGE_LRU;
  1440. break;
  1441. }
  1442. if (page_count(page) != 1 + page_mapcount(page)) {
  1443. result = SCAN_PAGE_COUNT;
  1444. break;
  1445. }
  1446. /*
  1447. * We probably should check if the page is referenced here, but
  1448. * nobody would transfer pte_young() to PageReferenced() for us.
  1449. * And rmap walk here is just too costly...
  1450. */
  1451. present++;
  1452. if (need_resched()) {
  1453. cond_resched_rcu();
  1454. slot = radix_tree_iter_next(&iter);
  1455. }
  1456. }
  1457. rcu_read_unlock();
  1458. if (result == SCAN_SUCCEED) {
  1459. if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
  1460. result = SCAN_EXCEED_NONE_PTE;
  1461. } else {
  1462. node = khugepaged_find_target_node();
  1463. collapse_shmem(mm, mapping, start, hpage, node);
  1464. }
  1465. }
  1466. /* TODO: tracepoints */
  1467. }
  1468. #else
  1469. static void khugepaged_scan_shmem(struct mm_struct *mm,
  1470. struct address_space *mapping,
  1471. pgoff_t start, struct page **hpage)
  1472. {
  1473. BUILD_BUG();
  1474. }
  1475. #endif
  1476. static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
  1477. struct page **hpage)
  1478. __releases(&khugepaged_mm_lock)
  1479. __acquires(&khugepaged_mm_lock)
  1480. {
  1481. struct mm_slot *mm_slot;
  1482. struct mm_struct *mm;
  1483. struct vm_area_struct *vma;
  1484. int progress = 0;
  1485. VM_BUG_ON(!pages);
  1486. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  1487. if (khugepaged_scan.mm_slot)
  1488. mm_slot = khugepaged_scan.mm_slot;
  1489. else {
  1490. mm_slot = list_entry(khugepaged_scan.mm_head.next,
  1491. struct mm_slot, mm_node);
  1492. khugepaged_scan.address = 0;
  1493. khugepaged_scan.mm_slot = mm_slot;
  1494. }
  1495. spin_unlock(&khugepaged_mm_lock);
  1496. mm = mm_slot->mm;
  1497. /*
  1498. * Don't wait for semaphore (to avoid long wait times). Just move to
  1499. * the next mm on the list.
  1500. */
  1501. vma = NULL;
  1502. if (unlikely(!down_read_trylock(&mm->mmap_sem)))
  1503. goto breakouterloop_mmap_sem;
  1504. if (likely(!khugepaged_test_exit(mm)))
  1505. vma = find_vma(mm, khugepaged_scan.address);
  1506. progress++;
  1507. for (; vma; vma = vma->vm_next) {
  1508. unsigned long hstart, hend;
  1509. cond_resched();
  1510. if (unlikely(khugepaged_test_exit(mm))) {
  1511. progress++;
  1512. break;
  1513. }
  1514. if (!hugepage_vma_check(vma)) {
  1515. skip:
  1516. progress++;
  1517. continue;
  1518. }
  1519. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1520. hend = vma->vm_end & HPAGE_PMD_MASK;
  1521. if (hstart >= hend)
  1522. goto skip;
  1523. if (khugepaged_scan.address > hend)
  1524. goto skip;
  1525. if (khugepaged_scan.address < hstart)
  1526. khugepaged_scan.address = hstart;
  1527. VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
  1528. while (khugepaged_scan.address < hend) {
  1529. int ret;
  1530. cond_resched();
  1531. if (unlikely(khugepaged_test_exit(mm)))
  1532. goto breakouterloop;
  1533. VM_BUG_ON(khugepaged_scan.address < hstart ||
  1534. khugepaged_scan.address + HPAGE_PMD_SIZE >
  1535. hend);
  1536. if (shmem_file(vma->vm_file)) {
  1537. struct file *file;
  1538. pgoff_t pgoff = linear_page_index(vma,
  1539. khugepaged_scan.address);
  1540. if (!shmem_huge_enabled(vma))
  1541. goto skip;
  1542. file = get_file(vma->vm_file);
  1543. up_read(&mm->mmap_sem);
  1544. ret = 1;
  1545. khugepaged_scan_shmem(mm, file->f_mapping,
  1546. pgoff, hpage);
  1547. fput(file);
  1548. } else {
  1549. ret = khugepaged_scan_pmd(mm, vma,
  1550. khugepaged_scan.address,
  1551. hpage);
  1552. }
  1553. /* move to next address */
  1554. khugepaged_scan.address += HPAGE_PMD_SIZE;
  1555. progress += HPAGE_PMD_NR;
  1556. if (ret)
  1557. /* we released mmap_sem so break loop */
  1558. goto breakouterloop_mmap_sem;
  1559. if (progress >= pages)
  1560. goto breakouterloop;
  1561. }
  1562. }
  1563. breakouterloop:
  1564. up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
  1565. breakouterloop_mmap_sem:
  1566. spin_lock(&khugepaged_mm_lock);
  1567. VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
  1568. /*
  1569. * Release the current mm_slot if this mm is about to die, or
  1570. * if we scanned all vmas of this mm.
  1571. */
  1572. if (khugepaged_test_exit(mm) || !vma) {
  1573. /*
  1574. * Make sure that if mm_users is reaching zero while
  1575. * khugepaged runs here, khugepaged_exit will find
  1576. * mm_slot not pointing to the exiting mm.
  1577. */
  1578. if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
  1579. khugepaged_scan.mm_slot = list_entry(
  1580. mm_slot->mm_node.next,
  1581. struct mm_slot, mm_node);
  1582. khugepaged_scan.address = 0;
  1583. } else {
  1584. khugepaged_scan.mm_slot = NULL;
  1585. khugepaged_full_scans++;
  1586. }
  1587. collect_mm_slot(mm_slot);
  1588. }
  1589. return progress;
  1590. }
  1591. static int khugepaged_has_work(void)
  1592. {
  1593. return !list_empty(&khugepaged_scan.mm_head) &&
  1594. khugepaged_enabled();
  1595. }
  1596. static int khugepaged_wait_event(void)
  1597. {
  1598. return !list_empty(&khugepaged_scan.mm_head) ||
  1599. kthread_should_stop();
  1600. }
  1601. static void khugepaged_do_scan(void)
  1602. {
  1603. struct page *hpage = NULL;
  1604. unsigned int progress = 0, pass_through_head = 0;
  1605. unsigned int pages = khugepaged_pages_to_scan;
  1606. bool wait = true;
  1607. barrier(); /* write khugepaged_pages_to_scan to local stack */
  1608. while (progress < pages) {
  1609. if (!khugepaged_prealloc_page(&hpage, &wait))
  1610. break;
  1611. cond_resched();
  1612. if (unlikely(kthread_should_stop() || try_to_freeze()))
  1613. break;
  1614. spin_lock(&khugepaged_mm_lock);
  1615. if (!khugepaged_scan.mm_slot)
  1616. pass_through_head++;
  1617. if (khugepaged_has_work() &&
  1618. pass_through_head < 2)
  1619. progress += khugepaged_scan_mm_slot(pages - progress,
  1620. &hpage);
  1621. else
  1622. progress = pages;
  1623. spin_unlock(&khugepaged_mm_lock);
  1624. }
  1625. if (!IS_ERR_OR_NULL(hpage))
  1626. put_page(hpage);
  1627. }
  1628. static bool khugepaged_should_wakeup(void)
  1629. {
  1630. return kthread_should_stop() ||
  1631. time_after_eq(jiffies, khugepaged_sleep_expire);
  1632. }
  1633. static void khugepaged_wait_work(void)
  1634. {
  1635. if (khugepaged_has_work()) {
  1636. const unsigned long scan_sleep_jiffies =
  1637. msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
  1638. if (!scan_sleep_jiffies)
  1639. return;
  1640. khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
  1641. wait_event_freezable_timeout(khugepaged_wait,
  1642. khugepaged_should_wakeup(),
  1643. scan_sleep_jiffies);
  1644. return;
  1645. }
  1646. if (khugepaged_enabled())
  1647. wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
  1648. }
  1649. static int khugepaged(void *none)
  1650. {
  1651. struct mm_slot *mm_slot;
  1652. set_freezable();
  1653. set_user_nice(current, MAX_NICE);
  1654. while (!kthread_should_stop()) {
  1655. khugepaged_do_scan();
  1656. khugepaged_wait_work();
  1657. }
  1658. spin_lock(&khugepaged_mm_lock);
  1659. mm_slot = khugepaged_scan.mm_slot;
  1660. khugepaged_scan.mm_slot = NULL;
  1661. if (mm_slot)
  1662. collect_mm_slot(mm_slot);
  1663. spin_unlock(&khugepaged_mm_lock);
  1664. return 0;
  1665. }
  1666. static void set_recommended_min_free_kbytes(void)
  1667. {
  1668. struct zone *zone;
  1669. int nr_zones = 0;
  1670. unsigned long recommended_min;
  1671. for_each_populated_zone(zone)
  1672. nr_zones++;
  1673. /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
  1674. recommended_min = pageblock_nr_pages * nr_zones * 2;
  1675. /*
  1676. * Make sure that on average at least two pageblocks are almost free
  1677. * of another type, one for a migratetype to fall back to and a
  1678. * second to avoid subsequent fallbacks of other types There are 3
  1679. * MIGRATE_TYPES we care about.
  1680. */
  1681. recommended_min += pageblock_nr_pages * nr_zones *
  1682. MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
  1683. /* don't ever allow to reserve more than 5% of the lowmem */
  1684. recommended_min = min(recommended_min,
  1685. (unsigned long) nr_free_buffer_pages() / 20);
  1686. recommended_min <<= (PAGE_SHIFT-10);
  1687. if (recommended_min > min_free_kbytes) {
  1688. if (user_min_free_kbytes >= 0)
  1689. pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
  1690. min_free_kbytes, recommended_min);
  1691. min_free_kbytes = recommended_min;
  1692. }
  1693. setup_per_zone_wmarks();
  1694. }
  1695. int start_stop_khugepaged(void)
  1696. {
  1697. static struct task_struct *khugepaged_thread __read_mostly;
  1698. static DEFINE_MUTEX(khugepaged_mutex);
  1699. int err = 0;
  1700. mutex_lock(&khugepaged_mutex);
  1701. if (khugepaged_enabled()) {
  1702. if (!khugepaged_thread)
  1703. khugepaged_thread = kthread_run(khugepaged, NULL,
  1704. "khugepaged");
  1705. if (IS_ERR(khugepaged_thread)) {
  1706. pr_err("khugepaged: kthread_run(khugepaged) failed\n");
  1707. err = PTR_ERR(khugepaged_thread);
  1708. khugepaged_thread = NULL;
  1709. goto fail;
  1710. }
  1711. if (!list_empty(&khugepaged_scan.mm_head))
  1712. wake_up_interruptible(&khugepaged_wait);
  1713. set_recommended_min_free_kbytes();
  1714. } else if (khugepaged_thread) {
  1715. kthread_stop(khugepaged_thread);
  1716. khugepaged_thread = NULL;
  1717. }
  1718. fail:
  1719. mutex_unlock(&khugepaged_mutex);
  1720. return err;
  1721. }