123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921 |
- /*
- * linux/mm/vmscan.c
- *
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * Swap reorganised 29.12.95, Stephen Tweedie.
- * kswapd added: 7.1.96 sct
- * Removed kswapd_ctl limits, and swap out as many pages as needed
- * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
- * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
- * Multiqueue VM started 5.8.00, Rik van Riel.
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/gfp.h>
- #include <linux/kernel_stat.h>
- #include <linux/swap.h>
- #include <linux/pagemap.h>
- #include <linux/init.h>
- #include <linux/highmem.h>
- #include <linux/vmpressure.h>
- #include <linux/vmstat.h>
- #include <linux/file.h>
- #include <linux/writeback.h>
- #include <linux/blkdev.h>
- #include <linux/buffer_head.h> /* for try_to_release_page(),
- buffer_heads_over_limit */
- #include <linux/mm_inline.h>
- #include <linux/backing-dev.h>
- #include <linux/rmap.h>
- #include <linux/topology.h>
- #include <linux/cpu.h>
- #include <linux/cpuset.h>
- #include <linux/compaction.h>
- #include <linux/notifier.h>
- #include <linux/rwsem.h>
- #include <linux/delay.h>
- #include <linux/kthread.h>
- #include <linux/freezer.h>
- #include <linux/memcontrol.h>
- #include <linux/delayacct.h>
- #include <linux/sysctl.h>
- #include <linux/oom.h>
- #include <linux/prefetch.h>
- #include <linux/printk.h>
- #include <linux/dax.h>
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
- #include <linux/swapops.h>
- #include <linux/balloon_compaction.h>
- #include "internal.h"
- #define CREATE_TRACE_POINTS
- #include <trace/events/vmscan.h>
- struct scan_control {
- /* How many pages shrink_list() should reclaim */
- unsigned long nr_to_reclaim;
- /* This context's GFP mask */
- gfp_t gfp_mask;
- /* Allocation order */
- int order;
- /*
- * Nodemask of nodes allowed by the caller. If NULL, all nodes
- * are scanned.
- */
- nodemask_t *nodemask;
- /*
- * The memory cgroup that hit its limit and as a result is the
- * primary target of this reclaim invocation.
- */
- struct mem_cgroup *target_mem_cgroup;
- /* Scan (total_size >> priority) pages at once */
- int priority;
- /* The highest zone to isolate pages for reclaim from */
- enum zone_type reclaim_idx;
- unsigned int may_writepage:1;
- /* Can mapped pages be reclaimed? */
- unsigned int may_unmap:1;
- /* Can pages be swapped as part of reclaim? */
- unsigned int may_swap:1;
- /* Can cgroups be reclaimed below their normal consumption range? */
- unsigned int may_thrash:1;
- unsigned int hibernation_mode:1;
- /* One of the zones is ready for compaction */
- unsigned int compaction_ready:1;
- /* Incremented by the number of inactive pages that were scanned */
- unsigned long nr_scanned;
- /* Number of pages freed so far during a call to shrink_zones() */
- unsigned long nr_reclaimed;
- };
- #ifdef ARCH_HAS_PREFETCH
- #define prefetch_prev_lru_page(_page, _base, _field) \
- do { \
- if ((_page)->lru.prev != _base) { \
- struct page *prev; \
- \
- prev = lru_to_page(&(_page->lru)); \
- prefetch(&prev->_field); \
- } \
- } while (0)
- #else
- #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
- #endif
- #ifdef ARCH_HAS_PREFETCHW
- #define prefetchw_prev_lru_page(_page, _base, _field) \
- do { \
- if ((_page)->lru.prev != _base) { \
- struct page *prev; \
- \
- prev = lru_to_page(&(_page->lru)); \
- prefetchw(&prev->_field); \
- } \
- } while (0)
- #else
- #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
- #endif
- /*
- * From 0 .. 100. Higher means more swappy.
- */
- int vm_swappiness = 60;
- /*
- * The total number of pages which are beyond the high watermark within all
- * zones.
- */
- unsigned long vm_total_pages;
- static LIST_HEAD(shrinker_list);
- static DECLARE_RWSEM(shrinker_rwsem);
- #ifdef CONFIG_MEMCG
- static bool global_reclaim(struct scan_control *sc)
- {
- return !sc->target_mem_cgroup;
- }
- /**
- * sane_reclaim - is the usual dirty throttling mechanism operational?
- * @sc: scan_control in question
- *
- * The normal page dirty throttling mechanism in balance_dirty_pages() is
- * completely broken with the legacy memcg and direct stalling in
- * shrink_page_list() is used for throttling instead, which lacks all the
- * niceties such as fairness, adaptive pausing, bandwidth proportional
- * allocation and configurability.
- *
- * This function tests whether the vmscan currently in progress can assume
- * that the normal dirty throttling mechanism is operational.
- */
- static bool sane_reclaim(struct scan_control *sc)
- {
- struct mem_cgroup *memcg = sc->target_mem_cgroup;
- if (!memcg)
- return true;
- #ifdef CONFIG_CGROUP_WRITEBACK
- if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return true;
- #endif
- return false;
- }
- #else
- static bool global_reclaim(struct scan_control *sc)
- {
- return true;
- }
- static bool sane_reclaim(struct scan_control *sc)
- {
- return true;
- }
- #endif
- /*
- * This misses isolated pages which are not accounted for to save counters.
- * As the data only determines if reclaim or compaction continues, it is
- * not expected that isolated pages will be a dominating factor.
- */
- unsigned long zone_reclaimable_pages(struct zone *zone)
- {
- unsigned long nr;
- nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
- zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
- zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
- return nr;
- }
- unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
- {
- unsigned long nr;
- nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) +
- node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) +
- node_page_state_snapshot(pgdat, NR_ISOLATED_FILE);
- if (get_nr_swap_pages() > 0)
- nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) +
- node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) +
- node_page_state_snapshot(pgdat, NR_ISOLATED_ANON);
- return nr;
- }
- bool pgdat_reclaimable(struct pglist_data *pgdat)
- {
- return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
- pgdat_reclaimable_pages(pgdat) * 6;
- }
- /**
- * lruvec_lru_size - Returns the number of pages on the given LRU list.
- * @lruvec: lru vector
- * @lru: lru to use
- * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
- */
- unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
- {
- unsigned long lru_size;
- int zid;
- if (!mem_cgroup_disabled())
- lru_size = mem_cgroup_get_lru_size(lruvec, lru);
- else
- lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
- for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
- struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
- unsigned long size;
- if (!managed_zone(zone))
- continue;
- if (!mem_cgroup_disabled())
- size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
- else
- size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
- NR_ZONE_LRU_BASE + lru);
- lru_size -= min(size, lru_size);
- }
- return lru_size;
- }
- /*
- * Add a shrinker callback to be called from the vm.
- */
- int register_shrinker(struct shrinker *shrinker)
- {
- size_t size = sizeof(*shrinker->nr_deferred);
- if (shrinker->flags & SHRINKER_NUMA_AWARE)
- size *= nr_node_ids;
- shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
- if (!shrinker->nr_deferred)
- return -ENOMEM;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- return 0;
- }
- EXPORT_SYMBOL(register_shrinker);
- /*
- * Remove one
- */
- void unregister_shrinker(struct shrinker *shrinker)
- {
- if (!shrinker->nr_deferred)
- return;
- down_write(&shrinker_rwsem);
- list_del(&shrinker->list);
- up_write(&shrinker_rwsem);
- kfree(shrinker->nr_deferred);
- shrinker->nr_deferred = NULL;
- }
- EXPORT_SYMBOL(unregister_shrinker);
- #define SHRINK_BATCH 128
- static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
- struct shrinker *shrinker,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
- {
- unsigned long freed = 0;
- unsigned long long delta;
- long total_scan;
- long freeable;
- long nr;
- long new_nr;
- int nid = shrinkctl->nid;
- long batch_size = shrinker->batch ? shrinker->batch
- : SHRINK_BATCH;
- long scanned = 0, next_deferred;
- freeable = shrinker->count_objects(shrinker, shrinkctl);
- if (freeable == 0)
- return 0;
- /*
- * copy the current shrinker scan count into a local variable
- * and zero it so that other concurrent shrinker invocations
- * don't also do this scanning work.
- */
- nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
- total_scan = nr;
- delta = (4 * nr_scanned) / shrinker->seeks;
- delta *= freeable;
- do_div(delta, nr_eligible + 1);
- total_scan += delta;
- if (total_scan < 0) {
- pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
- shrinker->scan_objects, total_scan);
- total_scan = freeable;
- next_deferred = nr;
- } else
- next_deferred = total_scan;
- /*
- * We need to avoid excessive windup on filesystem shrinkers
- * due to large numbers of GFP_NOFS allocations causing the
- * shrinkers to return -1 all the time. This results in a large
- * nr being built up so when a shrink that can do some work
- * comes along it empties the entire cache due to nr >>>
- * freeable. This is bad for sustaining a working set in
- * memory.
- *
- * Hence only allow the shrinker to scan the entire cache when
- * a large delta change is calculated directly.
- */
- if (delta < freeable / 4)
- total_scan = min(total_scan, freeable / 2);
- /*
- * Avoid risking looping forever due to too large nr value:
- * never try to free more than twice the estimate number of
- * freeable entries.
- */
- if (total_scan > freeable * 2)
- total_scan = freeable * 2;
- trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
- nr_scanned, nr_eligible,
- freeable, delta, total_scan);
- /*
- * Normally, we should not scan less than batch_size objects in one
- * pass to avoid too frequent shrinker calls, but if the slab has less
- * than batch_size objects in total and we are really tight on memory,
- * we will try to reclaim all available objects, otherwise we can end
- * up failing allocations although there are plenty of reclaimable
- * objects spread over several slabs with usage less than the
- * batch_size.
- *
- * We detect the "tight on memory" situations by looking at the total
- * number of objects we want to scan (total_scan). If it is greater
- * than the total number of objects on slab (freeable), we must be
- * scanning at high prio and therefore should try to reclaim as much as
- * possible.
- */
- while (total_scan >= batch_size ||
- total_scan >= freeable) {
- unsigned long ret;
- unsigned long nr_to_scan = min(batch_size, total_scan);
- shrinkctl->nr_to_scan = nr_to_scan;
- ret = shrinker->scan_objects(shrinker, shrinkctl);
- if (ret == SHRINK_STOP)
- break;
- freed += ret;
- count_vm_events(SLABS_SCANNED, nr_to_scan);
- total_scan -= nr_to_scan;
- scanned += nr_to_scan;
- cond_resched();
- }
- if (next_deferred >= scanned)
- next_deferred -= scanned;
- else
- next_deferred = 0;
- /*
- * move the unused scan count back into the shrinker in a
- * manner that handles concurrent updates. If we exhausted the
- * scan, there is no need to do an update.
- */
- if (next_deferred > 0)
- new_nr = atomic_long_add_return(next_deferred,
- &shrinker->nr_deferred[nid]);
- else
- new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
- trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
- return freed;
- }
- /**
- * shrink_slab - shrink slab caches
- * @gfp_mask: allocation context
- * @nid: node whose slab caches to target
- * @memcg: memory cgroup whose slab caches to target
- * @nr_scanned: pressure numerator
- * @nr_eligible: pressure denominator
- *
- * Call the shrink functions to age shrinkable caches.
- *
- * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
- * unaware shrinkers will receive a node id of 0 instead.
- *
- * @memcg specifies the memory cgroup to target. If it is not NULL,
- * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
- * objects from the memory cgroup specified. Otherwise, only unaware
- * shrinkers are called.
- *
- * @nr_scanned and @nr_eligible form a ratio that indicate how much of
- * the available objects should be scanned. Page reclaim for example
- * passes the number of pages scanned and the number of pages on the
- * LRU lists that it considered on @nid, plus a bias in @nr_scanned
- * when it encountered mapped pages. The ratio is further biased by
- * the ->seeks setting of the shrink function, which indicates the
- * cost to recreate an object relative to that of an LRU page.
- *
- * Returns the number of reclaimed slab objects.
- */
- static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
- struct mem_cgroup *memcg,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
- {
- struct shrinker *shrinker;
- unsigned long freed = 0;
- if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
- return 0;
- if (nr_scanned == 0)
- nr_scanned = SWAP_CLUSTER_MAX;
- if (!down_read_trylock(&shrinker_rwsem)) {
- /*
- * If we would return 0, our callers would understand that we
- * have nothing else to shrink and give up trying. By returning
- * 1 we keep it going and assume we'll be able to shrink next
- * time.
- */
- freed = 1;
- goto out;
- }
- list_for_each_entry(shrinker, &shrinker_list, list) {
- struct shrink_control sc = {
- .gfp_mask = gfp_mask,
- .nid = nid,
- .memcg = memcg,
- };
- /*
- * If kernel memory accounting is disabled, we ignore
- * SHRINKER_MEMCG_AWARE flag and call all shrinkers
- * passing NULL for memcg.
- */
- if (memcg_kmem_enabled() &&
- !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
- continue;
- if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
- sc.nid = 0;
- freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
- }
- up_read(&shrinker_rwsem);
- out:
- cond_resched();
- return freed;
- }
- void drop_slab_node(int nid)
- {
- unsigned long freed;
- do {
- struct mem_cgroup *memcg = NULL;
- freed = 0;
- do {
- freed += shrink_slab(GFP_KERNEL, nid, memcg,
- 1000, 1000);
- } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
- } while (freed > 10);
- }
- void drop_slab(void)
- {
- int nid;
- for_each_online_node(nid)
- drop_slab_node(nid);
- }
- static inline int is_page_cache_freeable(struct page *page)
- {
- /*
- * A freeable page cache page is referenced only by the caller
- * that isolated the page, the page cache radix tree and
- * optional buffer heads at page->private.
- */
- return page_count(page) - page_has_private(page) == 2;
- }
- static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
- {
- if (current->flags & PF_SWAPWRITE)
- return 1;
- if (!inode_write_congested(inode))
- return 1;
- if (inode_to_bdi(inode) == current->backing_dev_info)
- return 1;
- return 0;
- }
- /*
- * We detected a synchronous write error writing a page out. Probably
- * -ENOSPC. We need to propagate that into the address_space for a subsequent
- * fsync(), msync() or close().
- *
- * The tricky part is that after writepage we cannot touch the mapping: nothing
- * prevents it from being freed up. But we have a ref on the page and once
- * that page is locked, the mapping is pinned.
- *
- * We're allowed to run sleeping lock_page() here because we know the caller has
- * __GFP_FS.
- */
- static void handle_write_error(struct address_space *mapping,
- struct page *page, int error)
- {
- lock_page(page);
- if (page_mapping(page) == mapping)
- mapping_set_error(mapping, error);
- unlock_page(page);
- }
- /* possible outcome of pageout() */
- typedef enum {
- /* failed to write page out, page is locked */
- PAGE_KEEP,
- /* move page to the active list, page is locked */
- PAGE_ACTIVATE,
- /* page has been sent to the disk successfully, page is unlocked */
- PAGE_SUCCESS,
- /* page is clean and locked */
- PAGE_CLEAN,
- } pageout_t;
- /*
- * pageout is called by shrink_page_list() for each dirty page.
- * Calls ->writepage().
- */
- static pageout_t pageout(struct page *page, struct address_space *mapping,
- struct scan_control *sc)
- {
- /*
- * If the page is dirty, only perform writeback if that write
- * will be non-blocking. To prevent this allocation from being
- * stalled by pagecache activity. But note that there may be
- * stalls if we need to run get_block(). We could test
- * PagePrivate for that.
- *
- * If this process is currently in __generic_file_write_iter() against
- * this page's queue, we can perform writeback even if that
- * will block.
- *
- * If the page is swapcache, write it back even if that would
- * block, for some throttling. This happens by accident, because
- * swap_backing_dev_info is bust: it doesn't reflect the
- * congestion state of the swapdevs. Easy to fix, if needed.
- */
- if (!is_page_cache_freeable(page))
- return PAGE_KEEP;
- if (!mapping) {
- /*
- * Some data journaling orphaned pages can have
- * page->mapping == NULL while being dirty with clean buffers.
- */
- if (page_has_private(page)) {
- if (try_to_free_buffers(page)) {
- ClearPageDirty(page);
- pr_info("%s: orphaned page\n", __func__);
- return PAGE_CLEAN;
- }
- }
- return PAGE_KEEP;
- }
- if (mapping->a_ops->writepage == NULL)
- return PAGE_ACTIVATE;
- if (!may_write_to_inode(mapping->host, sc))
- return PAGE_KEEP;
- if (clear_page_dirty_for_io(page)) {
- int res;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = SWAP_CLUSTER_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1,
- };
- SetPageReclaim(page);
- res = mapping->a_ops->writepage(page, &wbc);
- if (res < 0)
- handle_write_error(mapping, page, res);
- if (res == AOP_WRITEPAGE_ACTIVATE) {
- ClearPageReclaim(page);
- return PAGE_ACTIVATE;
- }
- if (!PageWriteback(page)) {
- /* synchronous write or broken a_ops? */
- ClearPageReclaim(page);
- }
- trace_mm_vmscan_writepage(page);
- inc_node_page_state(page, NR_VMSCAN_WRITE);
- return PAGE_SUCCESS;
- }
- return PAGE_CLEAN;
- }
- /*
- * Same as remove_mapping, but if the page is removed from the mapping, it
- * gets returned with a refcount of 0.
- */
- static int __remove_mapping(struct address_space *mapping, struct page *page,
- bool reclaimed)
- {
- unsigned long flags;
- BUG_ON(!PageLocked(page));
- BUG_ON(mapping != page_mapping(page));
- spin_lock_irqsave(&mapping->tree_lock, flags);
- /*
- * The non racy check for a busy page.
- *
- * Must be careful with the order of the tests. When someone has
- * a ref to the page, it may be possible that they dirty it then
- * drop the reference. So if PageDirty is tested before page_count
- * here, then the following race may occur:
- *
- * get_user_pages(&page);
- * [user mapping goes away]
- * write_to(page);
- * !PageDirty(page) [good]
- * SetPageDirty(page);
- * put_page(page);
- * !page_count(page) [good, discard it]
- *
- * [oops, our write_to data is lost]
- *
- * Reversing the order of the tests ensures such a situation cannot
- * escape unnoticed. The smp_rmb is needed to ensure the page->flags
- * load is not satisfied before that of page->_refcount.
- *
- * Note that if SetPageDirty is always performed via set_page_dirty,
- * and thus under tree_lock, then this ordering is not required.
- */
- if (!page_ref_freeze(page, 2))
- goto cannot_free;
- /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
- if (unlikely(PageDirty(page))) {
- page_ref_unfreeze(page, 2);
- goto cannot_free;
- }
- if (PageSwapCache(page)) {
- swp_entry_t swap = { .val = page_private(page) };
- mem_cgroup_swapout(page, swap);
- __delete_from_swap_cache(page);
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
- swapcache_free(swap);
- } else {
- void (*freepage)(struct page *);
- void *shadow = NULL;
- freepage = mapping->a_ops->freepage;
- /*
- * Remember a shadow entry for reclaimed file cache in
- * order to detect refaults, thus thrashing, later on.
- *
- * But don't store shadows in an address space that is
- * already exiting. This is not just an optizimation,
- * inode reclaim needs to empty out the radix tree or
- * the nodes are lost. Don't plant shadows behind its
- * back.
- *
- * We also don't store shadows for DAX mappings because the
- * only page cache pages found in these are zero pages
- * covering holes, and because we don't want to mix DAX
- * exceptional entries and shadow exceptional entries in the
- * same page_tree.
- */
- if (reclaimed && page_is_file_cache(page) &&
- !mapping_exiting(mapping) && !dax_mapping(mapping))
- shadow = workingset_eviction(mapping, page);
- __delete_from_page_cache(page, shadow);
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
- if (freepage != NULL)
- freepage(page);
- }
- return 1;
- cannot_free:
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
- return 0;
- }
- /*
- * Attempt to detach a locked page from its ->mapping. If it is dirty or if
- * someone else has a ref on the page, abort and return 0. If it was
- * successfully detached, return 1. Assumes the caller has a single ref on
- * this page.
- */
- int remove_mapping(struct address_space *mapping, struct page *page)
- {
- if (__remove_mapping(mapping, page, false)) {
- /*
- * Unfreezing the refcount with 1 rather than 2 effectively
- * drops the pagecache ref for us without requiring another
- * atomic operation.
- */
- page_ref_unfreeze(page, 1);
- return 1;
- }
- return 0;
- }
- /**
- * putback_lru_page - put previously isolated page onto appropriate LRU list
- * @page: page to be put back to appropriate lru list
- *
- * Add previously isolated @page to appropriate LRU list.
- * Page may still be unevictable for other reasons.
- *
- * lru_lock must not be held, interrupts must be enabled.
- */
- void putback_lru_page(struct page *page)
- {
- bool is_unevictable;
- int was_unevictable = PageUnevictable(page);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- redo:
- ClearPageUnevictable(page);
- if (page_evictable(page)) {
- /*
- * For evictable pages, we can use the cache.
- * In event of a race, worst case is we end up with an
- * unevictable page on [in]active list.
- * We know how to handle that.
- */
- is_unevictable = false;
- lru_cache_add(page);
- } else {
- /*
- * Put unevictable pages directly on zone's unevictable
- * list.
- */
- is_unevictable = true;
- add_page_to_unevictable_list(page);
- /*
- * When racing with an mlock or AS_UNEVICTABLE clearing
- * (page is unlocked) make sure that if the other thread
- * does not observe our setting of PG_lru and fails
- * isolation/check_move_unevictable_pages,
- * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
- * the page back to the evictable list.
- *
- * The other side is TestClearPageMlocked() or shmem_lock().
- */
- smp_mb();
- }
- /*
- * page's status can change while we move it among lru. If an evictable
- * page is on unevictable list, it never be freed. To avoid that,
- * check after we added it to the list, again.
- */
- if (is_unevictable && page_evictable(page)) {
- if (!isolate_lru_page(page)) {
- put_page(page);
- goto redo;
- }
- /* This means someone else dropped this page from LRU
- * So, it will be freed or putback to LRU again. There is
- * nothing to do here.
- */
- }
- if (was_unevictable && !is_unevictable)
- count_vm_event(UNEVICTABLE_PGRESCUED);
- else if (!was_unevictable && is_unevictable)
- count_vm_event(UNEVICTABLE_PGCULLED);
- put_page(page); /* drop ref from isolate */
- }
- enum page_references {
- PAGEREF_RECLAIM,
- PAGEREF_RECLAIM_CLEAN,
- PAGEREF_KEEP,
- PAGEREF_ACTIVATE,
- };
- static enum page_references page_check_references(struct page *page,
- struct scan_control *sc)
- {
- int referenced_ptes, referenced_page;
- unsigned long vm_flags;
- referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
- &vm_flags);
- referenced_page = TestClearPageReferenced(page);
- /*
- * Mlock lost the isolation race with us. Let try_to_unmap()
- * move the page to the unevictable list.
- */
- if (vm_flags & VM_LOCKED)
- return PAGEREF_RECLAIM;
- if (referenced_ptes) {
- if (PageSwapBacked(page))
- return PAGEREF_ACTIVATE;
- /*
- * All mapped pages start out with page table
- * references from the instantiating fault, so we need
- * to look twice if a mapped file page is used more
- * than once.
- *
- * Mark it and spare it for another trip around the
- * inactive list. Another page table reference will
- * lead to its activation.
- *
- * Note: the mark is set for activated pages as well
- * so that recently deactivated but used pages are
- * quickly recovered.
- */
- SetPageReferenced(page);
- if (referenced_page || referenced_ptes > 1)
- return PAGEREF_ACTIVATE;
- /*
- * Activate file-backed executable pages after first usage.
- */
- if (vm_flags & VM_EXEC)
- return PAGEREF_ACTIVATE;
- return PAGEREF_KEEP;
- }
- /* Reclaim if clean, defer dirty pages to writeback */
- if (referenced_page && !PageSwapBacked(page))
- return PAGEREF_RECLAIM_CLEAN;
- return PAGEREF_RECLAIM;
- }
- /* Check if a page is dirty or under writeback */
- static void page_check_dirty_writeback(struct page *page,
- bool *dirty, bool *writeback)
- {
- struct address_space *mapping;
- /*
- * Anonymous pages are not handled by flushers and must be written
- * from reclaim context. Do not stall reclaim based on them
- */
- if (!page_is_file_cache(page)) {
- *dirty = false;
- *writeback = false;
- return;
- }
- /* By default assume that the page flags are accurate */
- *dirty = PageDirty(page);
- *writeback = PageWriteback(page);
- /* Verify dirty/writeback state if the filesystem supports it */
- if (!page_has_private(page))
- return;
- mapping = page_mapping(page);
- if (mapping && mapping->a_ops->is_dirty_writeback)
- mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
- }
- /*
- * shrink_page_list() returns the number of reclaimed pages
- */
- static unsigned long shrink_page_list(struct list_head *page_list,
- struct pglist_data *pgdat,
- struct scan_control *sc,
- enum ttu_flags ttu_flags,
- unsigned long *ret_nr_dirty,
- unsigned long *ret_nr_unqueued_dirty,
- unsigned long *ret_nr_congested,
- unsigned long *ret_nr_writeback,
- unsigned long *ret_nr_immediate,
- bool force_reclaim)
- {
- LIST_HEAD(ret_pages);
- LIST_HEAD(free_pages);
- int pgactivate = 0;
- unsigned long nr_unqueued_dirty = 0;
- unsigned long nr_dirty = 0;
- unsigned long nr_congested = 0;
- unsigned long nr_reclaimed = 0;
- unsigned long nr_writeback = 0;
- unsigned long nr_immediate = 0;
- cond_resched();
- while (!list_empty(page_list)) {
- struct address_space *mapping;
- struct page *page;
- int may_enter_fs;
- enum page_references references = PAGEREF_RECLAIM_CLEAN;
- bool dirty, writeback;
- bool lazyfree = false;
- int ret = SWAP_SUCCESS;
- cond_resched();
- page = lru_to_page(page_list);
- list_del(&page->lru);
- if (!trylock_page(page))
- goto keep;
- VM_BUG_ON_PAGE(PageActive(page), page);
- sc->nr_scanned++;
- if (unlikely(!page_evictable(page)))
- goto cull_mlocked;
- if (!sc->may_unmap && page_mapped(page))
- goto keep_locked;
- /* Double the slab pressure for mapped and swapcache pages */
- if (page_mapped(page) || PageSwapCache(page))
- sc->nr_scanned++;
- may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
- (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
- /*
- * The number of dirty pages determines if a zone is marked
- * reclaim_congested which affects wait_iff_congested. kswapd
- * will stall and start writing pages if the tail of the LRU
- * is all dirty unqueued pages.
- */
- page_check_dirty_writeback(page, &dirty, &writeback);
- if (dirty || writeback)
- nr_dirty++;
- if (dirty && !writeback)
- nr_unqueued_dirty++;
- /*
- * Treat this page as congested if the underlying BDI is or if
- * pages are cycling through the LRU so quickly that the
- * pages marked for immediate reclaim are making it to the
- * end of the LRU a second time.
- */
- mapping = page_mapping(page);
- if (((dirty || writeback) && mapping &&
- inode_write_congested(mapping->host)) ||
- (writeback && PageReclaim(page)))
- nr_congested++;
- /*
- * If a page at the tail of the LRU is under writeback, there
- * are three cases to consider.
- *
- * 1) If reclaim is encountering an excessive number of pages
- * under writeback and this page is both under writeback and
- * PageReclaim then it indicates that pages are being queued
- * for IO but are being recycled through the LRU before the
- * IO can complete. Waiting on the page itself risks an
- * indefinite stall if it is impossible to writeback the
- * page due to IO error or disconnected storage so instead
- * note that the LRU is being scanned too quickly and the
- * caller can stall after page list has been processed.
- *
- * 2) Global or new memcg reclaim encounters a page that is
- * not marked for immediate reclaim, or the caller does not
- * have __GFP_FS (or __GFP_IO if it's simply going to swap,
- * not to fs). In this case mark the page for immediate
- * reclaim and continue scanning.
- *
- * Require may_enter_fs because we would wait on fs, which
- * may not have submitted IO yet. And the loop driver might
- * enter reclaim, and deadlock if it waits on a page for
- * which it is needed to do the write (loop masks off
- * __GFP_IO|__GFP_FS for this reason); but more thought
- * would probably show more reasons.
- *
- * 3) Legacy memcg encounters a page that is already marked
- * PageReclaim. memcg does not have any dirty pages
- * throttling so we could easily OOM just because too many
- * pages are in writeback and there is nothing else to
- * reclaim. Wait for the writeback to complete.
- */
- if (PageWriteback(page)) {
- /* Case 1 above */
- if (current_is_kswapd() &&
- PageReclaim(page) &&
- test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
- nr_immediate++;
- goto keep_locked;
- /* Case 2 above */
- } else if (sane_reclaim(sc) ||
- !PageReclaim(page) || !may_enter_fs) {
- /*
- * This is slightly racy - end_page_writeback()
- * might have just cleared PageReclaim, then
- * setting PageReclaim here end up interpreted
- * as PageReadahead - but that does not matter
- * enough to care. What we do want is for this
- * page to have PageReclaim set next time memcg
- * reclaim reaches the tests above, so it will
- * then wait_on_page_writeback() to avoid OOM;
- * and it's also appropriate in global reclaim.
- */
- SetPageReclaim(page);
- nr_writeback++;
- goto keep_locked;
- /* Case 3 above */
- } else {
- unlock_page(page);
- wait_on_page_writeback(page);
- /* then go back and try same page again */
- list_add_tail(&page->lru, page_list);
- continue;
- }
- }
- if (!force_reclaim)
- references = page_check_references(page, sc);
- switch (references) {
- case PAGEREF_ACTIVATE:
- goto activate_locked;
- case PAGEREF_KEEP:
- goto keep_locked;
- case PAGEREF_RECLAIM:
- case PAGEREF_RECLAIM_CLEAN:
- ; /* try to reclaim the page below */
- }
- /*
- * Anonymous process memory has backing store?
- * Try to allocate it some swap space here.
- */
- if (PageAnon(page) && !PageSwapCache(page)) {
- if (!(sc->gfp_mask & __GFP_IO))
- goto keep_locked;
- if (!add_to_swap(page, page_list))
- goto activate_locked;
- lazyfree = true;
- may_enter_fs = 1;
- /* Adding to swap updated mapping */
- mapping = page_mapping(page);
- } else if (unlikely(PageTransHuge(page))) {
- /* Split file THP */
- if (split_huge_page_to_list(page, page_list))
- goto keep_locked;
- }
- VM_BUG_ON_PAGE(PageTransHuge(page), page);
- /*
- * The page is mapped into the page tables of one or more
- * processes. Try to unmap it here.
- */
- if (page_mapped(page) && mapping) {
- switch (ret = try_to_unmap(page, lazyfree ?
- (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
- (ttu_flags | TTU_BATCH_FLUSH))) {
- case SWAP_FAIL:
- goto activate_locked;
- case SWAP_AGAIN:
- goto keep_locked;
- case SWAP_MLOCK:
- goto cull_mlocked;
- case SWAP_LZFREE:
- goto lazyfree;
- case SWAP_SUCCESS:
- ; /* try to free the page below */
- }
- }
- if (PageDirty(page)) {
- /*
- * Only kswapd can writeback filesystem pages to
- * avoid risk of stack overflow but only writeback
- * if many dirty pages have been encountered.
- */
- if (page_is_file_cache(page) &&
- (!current_is_kswapd() ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
- /*
- * Immediately reclaim when written back.
- * Similar in principal to deactivate_page()
- * except we already have the page isolated
- * and know it's dirty
- */
- inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
- SetPageReclaim(page);
- goto keep_locked;
- }
- if (references == PAGEREF_RECLAIM_CLEAN)
- goto keep_locked;
- if (!may_enter_fs)
- goto keep_locked;
- if (!sc->may_writepage)
- goto keep_locked;
- /*
- * Page is dirty. Flush the TLB if a writable entry
- * potentially exists to avoid CPU writes after IO
- * starts and then write it out here.
- */
- try_to_unmap_flush_dirty();
- switch (pageout(page, mapping, sc)) {
- case PAGE_KEEP:
- goto keep_locked;
- case PAGE_ACTIVATE:
- goto activate_locked;
- case PAGE_SUCCESS:
- if (PageWriteback(page))
- goto keep;
- if (PageDirty(page))
- goto keep;
- /*
- * A synchronous write - probably a ramdisk. Go
- * ahead and try to reclaim the page.
- */
- if (!trylock_page(page))
- goto keep;
- if (PageDirty(page) || PageWriteback(page))
- goto keep_locked;
- mapping = page_mapping(page);
- case PAGE_CLEAN:
- ; /* try to free the page below */
- }
- }
- /*
- * If the page has buffers, try to free the buffer mappings
- * associated with this page. If we succeed we try to free
- * the page as well.
- *
- * We do this even if the page is PageDirty().
- * try_to_release_page() does not perform I/O, but it is
- * possible for a page to have PageDirty set, but it is actually
- * clean (all its buffers are clean). This happens if the
- * buffers were written out directly, with submit_bh(). ext3
- * will do this, as well as the blockdev mapping.
- * try_to_release_page() will discover that cleanness and will
- * drop the buffers and mark the page clean - it can be freed.
- *
- * Rarely, pages can have buffers and no ->mapping. These are
- * the pages which were not successfully invalidated in
- * truncate_complete_page(). We try to drop those buffers here
- * and if that worked, and the page is no longer mapped into
- * process address space (page_count == 1) it can be freed.
- * Otherwise, leave the page on the LRU so it is swappable.
- */
- if (page_has_private(page)) {
- if (!try_to_release_page(page, sc->gfp_mask))
- goto activate_locked;
- if (!mapping && page_count(page) == 1) {
- unlock_page(page);
- if (put_page_testzero(page))
- goto free_it;
- else {
- /*
- * rare race with speculative reference.
- * the speculative reference will free
- * this page shortly, so we may
- * increment nr_reclaimed here (and
- * leave it off the LRU).
- */
- nr_reclaimed++;
- continue;
- }
- }
- }
- lazyfree:
- if (!mapping || !__remove_mapping(mapping, page, true))
- goto keep_locked;
- /*
- * At this point, we have no other references and there is
- * no way to pick any more up (removed from LRU, removed
- * from pagecache). Can use non-atomic bitops now (and
- * we obviously don't have to worry about waking up a process
- * waiting on the page lock, because there are no references.
- */
- __ClearPageLocked(page);
- free_it:
- if (ret == SWAP_LZFREE)
- count_vm_event(PGLAZYFREED);
- nr_reclaimed++;
- /*
- * Is there need to periodically free_page_list? It would
- * appear not as the counts should be low
- */
- list_add(&page->lru, &free_pages);
- continue;
- cull_mlocked:
- if (PageSwapCache(page))
- try_to_free_swap(page);
- unlock_page(page);
- list_add(&page->lru, &ret_pages);
- continue;
- activate_locked:
- /* Not a candidate for swapping, so reclaim swap space. */
- if (PageSwapCache(page) && mem_cgroup_swap_full(page))
- try_to_free_swap(page);
- VM_BUG_ON_PAGE(PageActive(page), page);
- SetPageActive(page);
- pgactivate++;
- keep_locked:
- unlock_page(page);
- keep:
- list_add(&page->lru, &ret_pages);
- VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
- }
- mem_cgroup_uncharge_list(&free_pages);
- try_to_unmap_flush();
- free_hot_cold_page_list(&free_pages, true);
- list_splice(&ret_pages, page_list);
- count_vm_events(PGACTIVATE, pgactivate);
- *ret_nr_dirty += nr_dirty;
- *ret_nr_congested += nr_congested;
- *ret_nr_unqueued_dirty += nr_unqueued_dirty;
- *ret_nr_writeback += nr_writeback;
- *ret_nr_immediate += nr_immediate;
- return nr_reclaimed;
- }
- unsigned long reclaim_clean_pages_from_list(struct zone *zone,
- struct list_head *page_list)
- {
- struct scan_control sc = {
- .gfp_mask = GFP_KERNEL,
- .priority = DEF_PRIORITY,
- .may_unmap = 1,
- };
- unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
- struct page *page, *next;
- LIST_HEAD(clean_pages);
- list_for_each_entry_safe(page, next, page_list, lru) {
- if (page_is_file_cache(page) && !PageDirty(page) &&
- !__PageMovable(page)) {
- ClearPageActive(page);
- list_move(&page->lru, &clean_pages);
- }
- }
- ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
- TTU_UNMAP|TTU_IGNORE_ACCESS,
- &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
- list_splice(&clean_pages, page_list);
- mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
- return ret;
- }
- /*
- * Attempt to remove the specified page from its LRU. Only take this page
- * if it is of the appropriate PageActive status. Pages which are being
- * freed elsewhere are also ignored.
- *
- * page: page to consider
- * mode: one of the LRU isolation modes defined above
- *
- * returns 0 on success, -ve errno on failure.
- */
- int __isolate_lru_page(struct page *page, isolate_mode_t mode)
- {
- int ret = -EINVAL;
- /* Only take pages on the LRU. */
- if (!PageLRU(page))
- return ret;
- /* Compaction should not handle unevictable pages but CMA can do so */
- if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
- return ret;
- ret = -EBUSY;
- /*
- * To minimise LRU disruption, the caller can indicate that it only
- * wants to isolate pages it will be able to operate on without
- * blocking - clean pages for the most part.
- *
- * ISOLATE_CLEAN means that only clean pages should be isolated. This
- * is used by reclaim when it is cannot write to backing storage
- *
- * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
- * that it is possible to migrate without blocking
- */
- if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
- /* All the caller can do on PageWriteback is block */
- if (PageWriteback(page))
- return ret;
- if (PageDirty(page)) {
- struct address_space *mapping;
- bool migrate_dirty;
- /* ISOLATE_CLEAN means only clean pages */
- if (mode & ISOLATE_CLEAN)
- return ret;
- /*
- * Only pages without mappings or that have a
- * ->migratepage callback are possible to migrate
- * without blocking. However, we can be racing with
- * truncation so it's necessary to lock the page
- * to stabilise the mapping as truncation holds
- * the page lock until after the page is removed
- * from the page cache.
- */
- if (!trylock_page(page))
- return ret;
- mapping = page_mapping(page);
- migrate_dirty = !mapping || mapping->a_ops->migratepage;
- unlock_page(page);
- if (!migrate_dirty)
- return ret;
- }
- }
- if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
- return ret;
- if (likely(get_page_unless_zero(page))) {
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- ClearPageLRU(page);
- ret = 0;
- }
- return ret;
- }
- /*
- * Update LRU sizes after isolating pages. The LRU size updates must
- * be complete before mem_cgroup_update_lru_size due to a santity check.
- */
- static __always_inline void update_lru_sizes(struct lruvec *lruvec,
- enum lru_list lru, unsigned long *nr_zone_taken)
- {
- int zid;
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- if (!nr_zone_taken[zid])
- continue;
- __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
- #ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
- #endif
- }
- }
- /*
- * zone_lru_lock is heavily contended. Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
- *
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
- *
- * Appropriate locks must be held before calling this function.
- *
- * @nr_to_scan: The number of pages to look through on the list.
- * @lruvec: The LRU vector to pull pages from.
- * @dst: The temp list to put pages on to.
- * @nr_scanned: The number of pages that were scanned.
- * @sc: The scan_control struct for this reclaim session
- * @mode: One of the LRU isolation modes
- * @lru: LRU list id for isolating
- *
- * returns how many pages were moved onto *@dst.
- */
- static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
- struct lruvec *lruvec, struct list_head *dst,
- unsigned long *nr_scanned, struct scan_control *sc,
- isolate_mode_t mode, enum lru_list lru)
- {
- struct list_head *src = &lruvec->lists[lru];
- unsigned long nr_taken = 0;
- unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
- unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
- unsigned long scan, nr_pages;
- LIST_HEAD(pages_skipped);
- for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
- !list_empty(src);) {
- struct page *page;
- page = lru_to_page(src);
- prefetchw_prev_lru_page(page, src, flags);
- VM_BUG_ON_PAGE(!PageLRU(page), page);
- if (page_zonenum(page) > sc->reclaim_idx) {
- list_move(&page->lru, &pages_skipped);
- nr_skipped[page_zonenum(page)]++;
- continue;
- }
- /*
- * Account for scanned and skipped separetly to avoid the pgdat
- * being prematurely marked unreclaimable by pgdat_reclaimable.
- */
- scan++;
- switch (__isolate_lru_page(page, mode)) {
- case 0:
- nr_pages = hpage_nr_pages(page);
- nr_taken += nr_pages;
- nr_zone_taken[page_zonenum(page)] += nr_pages;
- list_move(&page->lru, dst);
- break;
- case -EBUSY:
- /* else it is being freed elsewhere */
- list_move(&page->lru, src);
- continue;
- default:
- BUG();
- }
- }
- /*
- * Splice any skipped pages to the start of the LRU list. Note that
- * this disrupts the LRU order when reclaiming for lower zones but
- * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
- * scanning would soon rescan the same pages to skip and put the
- * system at risk of premature OOM.
- */
- if (!list_empty(&pages_skipped)) {
- int zid;
- unsigned long total_skipped = 0;
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- if (!nr_skipped[zid])
- continue;
- __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
- total_skipped += nr_skipped[zid];
- }
- /*
- * Account skipped pages as a partial scan as the pgdat may be
- * close to unreclaimable. If the LRU list is empty, account
- * skipped pages as a full scan.
- */
- scan += list_empty(src) ? total_skipped : total_skipped >> 2;
- list_splice(&pages_skipped, src);
- }
- *nr_scanned = scan;
- trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
- nr_taken, mode, is_file_lru(lru));
- update_lru_sizes(lruvec, lru, nr_zone_taken);
- return nr_taken;
- }
- /**
- * isolate_lru_page - tries to isolate a page from its LRU list
- * @page: page to isolate from its LRU list
- *
- * Isolates a @page from an LRU list, clears PageLRU and adjusts the
- * vmstat statistic corresponding to whatever LRU list the page was on.
- *
- * Returns 0 if the page was removed from an LRU list.
- * Returns -EBUSY if the page was not on an LRU list.
- *
- * The returned page will have PageLRU() cleared. If it was found on
- * the active list, it will have PageActive set. If it was found on
- * the unevictable list, it will have the PageUnevictable bit set. That flag
- * may need to be cleared by the caller before letting the page go.
- *
- * The vmstat statistic corresponding to the list on which the page was
- * found will be decremented.
- *
- * Restrictions:
- * (1) Must be called with an elevated refcount on the page. This is a
- * fundamentnal difference from isolate_lru_pages (which is called
- * without a stable reference).
- * (2) the lru_lock must not be held.
- * (3) interrupts must be enabled.
- */
- int isolate_lru_page(struct page *page)
- {
- int ret = -EBUSY;
- VM_BUG_ON_PAGE(!page_count(page), page);
- WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
- if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
- struct lruvec *lruvec;
- spin_lock_irq(zone_lru_lock(zone));
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
- if (PageLRU(page)) {
- int lru = page_lru(page);
- get_page(page);
- ClearPageLRU(page);
- del_page_from_lru_list(page, lruvec, lru);
- ret = 0;
- }
- spin_unlock_irq(zone_lru_lock(zone));
- }
- return ret;
- }
- /*
- * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
- * then get resheduled. When there are massive number of tasks doing page
- * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
- * the LRU list will go small and be scanned faster than necessary, leading to
- * unnecessary swapping, thrashing and OOM.
- */
- static int too_many_isolated(struct pglist_data *pgdat, int file,
- struct scan_control *sc)
- {
- unsigned long inactive, isolated;
- if (current_is_kswapd())
- return 0;
- if (!sane_reclaim(sc))
- return 0;
- if (file) {
- inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
- isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
- } else {
- inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
- isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
- }
- /*
- * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
- * won't get blocked by normal direct-reclaimers, forming a circular
- * deadlock.
- */
- if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
- inactive >>= 3;
- return isolated > inactive;
- }
- static noinline_for_stack void
- putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
- {
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- LIST_HEAD(pages_to_free);
- /*
- * Put back any unfreeable pages.
- */
- while (!list_empty(page_list)) {
- struct page *page = lru_to_page(page_list);
- int lru;
- VM_BUG_ON_PAGE(PageLRU(page), page);
- list_del(&page->lru);
- if (unlikely(!page_evictable(page))) {
- spin_unlock_irq(&pgdat->lru_lock);
- putback_lru_page(page);
- spin_lock_irq(&pgdat->lru_lock);
- continue;
- }
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- SetPageLRU(page);
- lru = page_lru(page);
- add_page_to_lru_list(page, lruvec, lru);
- if (is_active_lru(lru)) {
- int file = is_file_lru(lru);
- int numpages = hpage_nr_pages(page);
- reclaim_stat->recent_rotated[file] += numpages;
- }
- if (put_page_testzero(page)) {
- __ClearPageLRU(page);
- __ClearPageActive(page);
- del_page_from_lru_list(page, lruvec, lru);
- if (unlikely(PageCompound(page))) {
- spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge(page);
- (*get_compound_page_dtor(page))(page);
- spin_lock_irq(&pgdat->lru_lock);
- } else
- list_add(&page->lru, &pages_to_free);
- }
- }
- /*
- * To save our caller's stack, now use input list for pages to free.
- */
- list_splice(&pages_to_free, page_list);
- }
- /*
- * If a kernel thread (such as nfsd for loop-back mounts) services
- * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
- * In that case we should only throttle if the backing device it is
- * writing to is congested. In other cases it is safe to throttle.
- */
- static int current_may_throttle(void)
- {
- return !(current->flags & PF_LESS_THROTTLE) ||
- current->backing_dev_info == NULL ||
- bdi_write_congested(current->backing_dev_info);
- }
- static bool inactive_reclaimable_pages(struct lruvec *lruvec,
- struct scan_control *sc, enum lru_list lru)
- {
- int zid;
- struct zone *zone;
- int file = is_file_lru(lru);
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- if (!global_reclaim(sc))
- return true;
- for (zid = sc->reclaim_idx; zid >= 0; zid--) {
- zone = &pgdat->node_zones[zid];
- if (!managed_zone(zone))
- continue;
- if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
- LRU_FILE * file) >= SWAP_CLUSTER_MAX)
- return true;
- }
- return false;
- }
- /*
- * shrink_inactive_list() is a helper for shrink_node(). It returns the number
- * of reclaimed pages
- */
- static noinline_for_stack unsigned long
- shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
- struct scan_control *sc, enum lru_list lru)
- {
- LIST_HEAD(page_list);
- unsigned long nr_scanned;
- unsigned long nr_reclaimed = 0;
- unsigned long nr_taken;
- unsigned long nr_dirty = 0;
- unsigned long nr_congested = 0;
- unsigned long nr_unqueued_dirty = 0;
- unsigned long nr_writeback = 0;
- unsigned long nr_immediate = 0;
- isolate_mode_t isolate_mode = 0;
- int file = is_file_lru(lru);
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- if (!inactive_reclaimable_pages(lruvec, sc, lru))
- return 0;
- while (unlikely(too_many_isolated(pgdat, file, sc))) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- /* We are about to die and free our memory. Return now. */
- if (fatal_signal_pending(current))
- return SWAP_CLUSTER_MAX;
- }
- lru_add_drain();
- if (!sc->may_unmap)
- isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
- spin_lock_irq(&pgdat->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
- &nr_scanned, sc, isolate_mode, lru);
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
- reclaim_stat->recent_scanned[file] += nr_taken;
- if (global_reclaim(sc)) {
- __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
- if (current_is_kswapd())
- __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
- else
- __count_vm_events(PGSCAN_DIRECT, nr_scanned);
- }
- spin_unlock_irq(&pgdat->lru_lock);
- if (nr_taken == 0)
- return 0;
- nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
- &nr_dirty, &nr_unqueued_dirty, &nr_congested,
- &nr_writeback, &nr_immediate,
- false);
- spin_lock_irq(&pgdat->lru_lock);
- if (global_reclaim(sc)) {
- if (current_is_kswapd())
- __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
- else
- __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
- }
- putback_inactive_pages(lruvec, &page_list);
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge_list(&page_list);
- free_hot_cold_page_list(&page_list, true);
- /*
- * If reclaim is isolating dirty pages under writeback, it implies
- * that the long-lived page allocation rate is exceeding the page
- * laundering rate. Either the global limits are not being effective
- * at throttling processes due to the page distribution throughout
- * zones or there is heavy usage of a slow backing device. The
- * only option is to throttle from reclaim context which is not ideal
- * as there is no guarantee the dirtying process is throttled in the
- * same way balance_dirty_pages() manages.
- *
- * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
- * of pages under pages flagged for immediate reclaim and stall if any
- * are encountered in the nr_immediate check below.
- */
- if (nr_writeback && nr_writeback == nr_taken)
- set_bit(PGDAT_WRITEBACK, &pgdat->flags);
- /*
- * Legacy memcg will stall in page writeback so avoid forcibly
- * stalling here.
- */
- if (sane_reclaim(sc)) {
- /*
- * Tag a zone as congested if all the dirty pages scanned were
- * backed by a congested BDI and wait_iff_congested will stall.
- */
- if (nr_dirty && nr_dirty == nr_congested)
- set_bit(PGDAT_CONGESTED, &pgdat->flags);
- /*
- * If dirty pages are scanned that are not queued for IO, it
- * implies that flushers are not keeping up. In this case, flag
- * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
- * reclaim context.
- */
- if (nr_unqueued_dirty == nr_taken)
- set_bit(PGDAT_DIRTY, &pgdat->flags);
- /*
- * If kswapd scans pages marked marked for immediate
- * reclaim and under writeback (nr_immediate), it implies
- * that pages are cycling through the LRU faster than
- * they are written so also forcibly stall.
- */
- if (nr_immediate && current_may_throttle())
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- }
- /*
- * Stall direct reclaim for IO completions if underlying BDIs or zone
- * is congested. Allow kswapd to continue until it starts encountering
- * unqueued dirty pages or cycling through the LRU too quickly.
- */
- if (!sc->hibernation_mode && !current_is_kswapd() &&
- current_may_throttle())
- wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10);
- trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
- nr_scanned, nr_reclaimed,
- sc->priority, file);
- return nr_reclaimed;
- }
- /*
- * This moves pages from the active list to the inactive list.
- *
- * We move them the other way if the page is referenced by one or more
- * processes, from rmap.
- *
- * If the pages are mostly unmapped, the processing is fast and it is
- * appropriate to hold zone_lru_lock across the whole operation. But if
- * the pages are mapped, the processing is slow (page_referenced()) so we
- * should drop zone_lru_lock around each page. It's impossible to balance
- * this, so instead we remove the pages from the LRU while processing them.
- * It is safe to rely on PG_active against the non-LRU pages in here because
- * nobody will play with that bit on a non-LRU page.
- *
- * The downside is that we have to touch page->_refcount against each page.
- * But we had to alter page->flags anyway.
- */
- static void move_active_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list,
- struct list_head *pages_to_free,
- enum lru_list lru)
- {
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- unsigned long pgmoved = 0;
- struct page *page;
- int nr_pages;
- while (!list_empty(list)) {
- page = lru_to_page(list);
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- SetPageLRU(page);
- nr_pages = hpage_nr_pages(page);
- update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
- list_move(&page->lru, &lruvec->lists[lru]);
- pgmoved += nr_pages;
- if (put_page_testzero(page)) {
- __ClearPageLRU(page);
- __ClearPageActive(page);
- del_page_from_lru_list(page, lruvec, lru);
- if (unlikely(PageCompound(page))) {
- spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge(page);
- (*get_compound_page_dtor(page))(page);
- spin_lock_irq(&pgdat->lru_lock);
- } else
- list_add(&page->lru, pages_to_free);
- }
- }
- if (!is_active_lru(lru))
- __count_vm_events(PGDEACTIVATE, pgmoved);
- }
- static void shrink_active_list(unsigned long nr_to_scan,
- struct lruvec *lruvec,
- struct scan_control *sc,
- enum lru_list lru)
- {
- unsigned long nr_taken;
- unsigned long nr_scanned;
- unsigned long vm_flags;
- LIST_HEAD(l_hold); /* The pages which were snipped off */
- LIST_HEAD(l_active);
- LIST_HEAD(l_inactive);
- struct page *page;
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- unsigned long nr_rotated = 0;
- isolate_mode_t isolate_mode = 0;
- int file = is_file_lru(lru);
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- lru_add_drain();
- if (!sc->may_unmap)
- isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
- spin_lock_irq(&pgdat->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
- &nr_scanned, sc, isolate_mode, lru);
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
- reclaim_stat->recent_scanned[file] += nr_taken;
- if (global_reclaim(sc))
- __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
- __count_vm_events(PGREFILL, nr_scanned);
- spin_unlock_irq(&pgdat->lru_lock);
- while (!list_empty(&l_hold)) {
- cond_resched();
- page = lru_to_page(&l_hold);
- list_del(&page->lru);
- if (unlikely(!page_evictable(page))) {
- putback_lru_page(page);
- continue;
- }
- if (unlikely(buffer_heads_over_limit)) {
- if (page_has_private(page) && trylock_page(page)) {
- if (page_has_private(page))
- try_to_release_page(page, 0);
- unlock_page(page);
- }
- }
- if (page_referenced(page, 0, sc->target_mem_cgroup,
- &vm_flags)) {
- nr_rotated += hpage_nr_pages(page);
- /*
- * Identify referenced, file-backed active pages and
- * give them one more trip around the active list. So
- * that executable code get better chances to stay in
- * memory under moderate memory pressure. Anon pages
- * are not likely to be evicted by use-once streaming
- * IO, plus JVM can create lots of anon VM_EXEC pages,
- * so we ignore them here.
- */
- if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
- list_add(&page->lru, &l_active);
- continue;
- }
- }
- ClearPageActive(page); /* we are de-activating */
- list_add(&page->lru, &l_inactive);
- }
- /*
- * Move pages back to the lru list.
- */
- spin_lock_irq(&pgdat->lru_lock);
- /*
- * Count referenced pages from currently used mappings as rotated,
- * even though only some of them are actually re-activated. This
- * helps balance scan pressure between file and anonymous pages in
- * get_scan_count.
- */
- reclaim_stat->recent_rotated[file] += nr_rotated;
- move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
- move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge_list(&l_hold);
- free_hot_cold_page_list(&l_hold, true);
- }
- /*
- * The inactive anon list should be small enough that the VM never has
- * to do too much work.
- *
- * The inactive file list should be small enough to leave most memory
- * to the established workingset on the scan-resistant active list,
- * but large enough to avoid thrashing the aggregate readahead window.
- *
- * Both inactive lists should also be large enough that each inactive
- * page has a chance to be referenced again before it is reclaimed.
- *
- * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
- * on this LRU, maintained by the pageout code. A zone->inactive_ratio
- * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
- *
- * total target max
- * memory ratio inactive
- * -------------------------------------
- * 10MB 1 5MB
- * 100MB 1 50MB
- * 1GB 3 250MB
- * 10GB 10 0.9GB
- * 100GB 31 3GB
- * 1TB 101 10GB
- * 10TB 320 32GB
- */
- static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
- struct scan_control *sc)
- {
- unsigned long inactive_ratio;
- unsigned long inactive, active;
- enum lru_list inactive_lru = file * LRU_FILE;
- enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
- unsigned long gb;
- /*
- * If we don't have swap space, anonymous page deactivation
- * is pointless.
- */
- if (!file && !total_swap_pages)
- return false;
- inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
- active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
- gb = (inactive + active) >> (30 - PAGE_SHIFT);
- if (gb)
- inactive_ratio = int_sqrt(10 * gb);
- else
- inactive_ratio = 1;
- return inactive * inactive_ratio < active;
- }
- static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
- struct lruvec *lruvec, struct scan_control *sc)
- {
- if (is_active_lru(lru)) {
- if (inactive_list_is_low(lruvec, is_file_lru(lru), sc))
- shrink_active_list(nr_to_scan, lruvec, sc, lru);
- return 0;
- }
- return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
- }
- enum scan_balance {
- SCAN_EQUAL,
- SCAN_FRACT,
- SCAN_ANON,
- SCAN_FILE,
- };
- /*
- * Determine how aggressively the anon and file LRU lists should be
- * scanned. The relative value of each set of LRU lists is determined
- * by looking at the fraction of the pages scanned we did rotate back
- * onto the active list instead of evict.
- *
- * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
- * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
- */
- static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
- struct scan_control *sc, unsigned long *nr,
- unsigned long *lru_pages)
- {
- int swappiness = mem_cgroup_swappiness(memcg);
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- u64 fraction[2];
- u64 denominator = 0; /* gcc */
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- unsigned long anon_prio, file_prio;
- enum scan_balance scan_balance;
- unsigned long anon, file;
- bool force_scan = false;
- unsigned long ap, fp;
- enum lru_list lru;
- bool some_scanned;
- int pass;
- /*
- * If the zone or memcg is small, nr[l] can be 0. This
- * results in no scanning on this priority and a potential
- * priority drop. Global direct reclaim can go to the next
- * zone and tends to have no problems. Global kswapd is for
- * zone balancing and it needs to scan a minimum amount. When
- * reclaiming for a memcg, a priority drop can cause high
- * latencies, so it's better to scan a minimum amount there as
- * well.
- */
- if (current_is_kswapd()) {
- if (!pgdat_reclaimable(pgdat))
- force_scan = true;
- if (!mem_cgroup_online(memcg))
- force_scan = true;
- }
- if (!global_reclaim(sc))
- force_scan = true;
- /* If we have no swap space, do not bother scanning anon pages. */
- if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
- scan_balance = SCAN_FILE;
- goto out;
- }
- /*
- * Global reclaim will swap to prevent OOM even with no
- * swappiness, but memcg users want to use this knob to
- * disable swapping for individual groups completely when
- * using the memory controller's swap limit feature would be
- * too expensive.
- */
- if (!global_reclaim(sc) && !swappiness) {
- scan_balance = SCAN_FILE;
- goto out;
- }
- /*
- * Do not apply any pressure balancing cleverness when the
- * system is close to OOM, scan both anon and file equally
- * (unless the swappiness setting disagrees with swapping).
- */
- if (!sc->priority && swappiness) {
- scan_balance = SCAN_EQUAL;
- goto out;
- }
- /*
- * Prevent the reclaimer from falling into the cache trap: as
- * cache pages start out inactive, every cache fault will tip
- * the scan balance towards the file LRU. And as the file LRU
- * shrinks, so does the window for rotation from references.
- * This means we have a runaway feedback loop where a tiny
- * thrashing file LRU becomes infinitely more attractive than
- * anon pages. Try to detect this based on file LRU size.
- */
- if (global_reclaim(sc)) {
- unsigned long pgdatfile;
- unsigned long pgdatfree;
- int z;
- unsigned long total_high_wmark = 0;
- pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
- pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
- node_page_state(pgdat, NR_INACTIVE_FILE);
- for (z = 0; z < MAX_NR_ZONES; z++) {
- struct zone *zone = &pgdat->node_zones[z];
- if (!managed_zone(zone))
- continue;
- total_high_wmark += high_wmark_pages(zone);
- }
- if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
- scan_balance = SCAN_ANON;
- goto out;
- }
- }
- /*
- * If there is enough inactive page cache, i.e. if the size of the
- * inactive list is greater than that of the active list *and* the
- * inactive list actually has some pages to scan on this priority, we
- * do not reclaim anything from the anonymous working set right now.
- * Without the second condition we could end up never scanning an
- * lruvec even if it has plenty of old anonymous pages unless the
- * system is under heavy pressure.
- */
- if (!inactive_list_is_low(lruvec, true, sc) &&
- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
- scan_balance = SCAN_FILE;
- goto out;
- }
- scan_balance = SCAN_FRACT;
- /*
- * With swappiness at 100, anonymous and file have the same priority.
- * This scanning priority is essentially the inverse of IO cost.
- */
- anon_prio = swappiness;
- file_prio = 200 - anon_prio;
- /*
- * OK, so we have swap space and a fair amount of page cache
- * pages. We use the recently rotated / recently scanned
- * ratios to determine how valuable each cache is.
- *
- * Because workloads change over time (and to avoid overflow)
- * we keep these statistics as a floating average, which ends
- * up weighing recent references more than old ones.
- *
- * anon in [0], file in [1]
- */
- anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
- file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
- spin_lock_irq(&pgdat->lru_lock);
- if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
- reclaim_stat->recent_scanned[0] /= 2;
- reclaim_stat->recent_rotated[0] /= 2;
- }
- if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
- reclaim_stat->recent_scanned[1] /= 2;
- reclaim_stat->recent_rotated[1] /= 2;
- }
- /*
- * The amount of pressure on anon vs file pages is inversely
- * proportional to the fraction of recently scanned pages on
- * each list that were recently referenced and in active use.
- */
- ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
- ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
- fp /= reclaim_stat->recent_rotated[1] + 1;
- spin_unlock_irq(&pgdat->lru_lock);
- fraction[0] = ap;
- fraction[1] = fp;
- denominator = ap + fp + 1;
- out:
- some_scanned = false;
- /* Only use force_scan on second pass. */
- for (pass = 0; !some_scanned && pass < 2; pass++) {
- *lru_pages = 0;
- for_each_evictable_lru(lru) {
- int file = is_file_lru(lru);
- unsigned long size;
- unsigned long scan;
- size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
- scan = size >> sc->priority;
- if (!scan && pass && force_scan)
- scan = min(size, SWAP_CLUSTER_MAX);
- switch (scan_balance) {
- case SCAN_EQUAL:
- /* Scan lists relative to size */
- break;
- case SCAN_FRACT:
- /*
- * Scan types proportional to swappiness and
- * their relative recent reclaim efficiency.
- */
- scan = div64_u64(scan * fraction[file],
- denominator);
- break;
- case SCAN_FILE:
- case SCAN_ANON:
- /* Scan one type exclusively */
- if ((scan_balance == SCAN_FILE) != file) {
- size = 0;
- scan = 0;
- }
- break;
- default:
- /* Look ma, no brain */
- BUG();
- }
- *lru_pages += size;
- nr[lru] = scan;
- /*
- * Skip the second pass and don't force_scan,
- * if we found something to scan.
- */
- some_scanned |= !!scan;
- }
- }
- }
- /*
- * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
- */
- static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
- struct scan_control *sc, unsigned long *lru_pages)
- {
- struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
- unsigned long nr[NR_LRU_LISTS];
- unsigned long targets[NR_LRU_LISTS];
- unsigned long nr_to_scan;
- enum lru_list lru;
- unsigned long nr_reclaimed = 0;
- unsigned long nr_to_reclaim = sc->nr_to_reclaim;
- struct blk_plug plug;
- bool scan_adjusted;
- get_scan_count(lruvec, memcg, sc, nr, lru_pages);
- /* Record the original scan target for proportional adjustments later */
- memcpy(targets, nr, sizeof(nr));
- /*
- * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
- * event that can occur when there is little memory pressure e.g.
- * multiple streaming readers/writers. Hence, we do not abort scanning
- * when the requested number of pages are reclaimed when scanning at
- * DEF_PRIORITY on the assumption that the fact we are direct
- * reclaiming implies that kswapd is not keeping up and it is best to
- * do a batch of work at once. For memcg reclaim one check is made to
- * abort proportional reclaim if either the file or anon lru has already
- * dropped to zero at the first pass.
- */
- scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
- sc->priority == DEF_PRIORITY);
- blk_start_plug(&plug);
- while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
- nr[LRU_INACTIVE_FILE]) {
- unsigned long nr_anon, nr_file, percentage;
- unsigned long nr_scanned;
- for_each_evictable_lru(lru) {
- if (nr[lru]) {
- nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
- nr[lru] -= nr_to_scan;
- nr_reclaimed += shrink_list(lru, nr_to_scan,
- lruvec, sc);
- }
- }
- cond_resched();
- if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
- continue;
- /*
- * For kswapd and memcg, reclaim at least the number of pages
- * requested. Ensure that the anon and file LRUs are scanned
- * proportionally what was requested by get_scan_count(). We
- * stop reclaiming one LRU and reduce the amount scanning
- * proportional to the original scan target.
- */
- nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
- nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
- /*
- * It's just vindictive to attack the larger once the smaller
- * has gone to zero. And given the way we stop scanning the
- * smaller below, this makes sure that we only make one nudge
- * towards proportionality once we've got nr_to_reclaim.
- */
- if (!nr_file || !nr_anon)
- break;
- if (nr_file > nr_anon) {
- unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
- targets[LRU_ACTIVE_ANON] + 1;
- lru = LRU_BASE;
- percentage = nr_anon * 100 / scan_target;
- } else {
- unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
- targets[LRU_ACTIVE_FILE] + 1;
- lru = LRU_FILE;
- percentage = nr_file * 100 / scan_target;
- }
- /* Stop scanning the smaller of the LRU */
- nr[lru] = 0;
- nr[lru + LRU_ACTIVE] = 0;
- /*
- * Recalculate the other LRU scan count based on its original
- * scan target and the percentage scanning already complete
- */
- lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
- nr_scanned = targets[lru] - nr[lru];
- nr[lru] = targets[lru] * (100 - percentage) / 100;
- nr[lru] -= min(nr[lru], nr_scanned);
- lru += LRU_ACTIVE;
- nr_scanned = targets[lru] - nr[lru];
- nr[lru] = targets[lru] * (100 - percentage) / 100;
- nr[lru] -= min(nr[lru], nr_scanned);
- scan_adjusted = true;
- }
- blk_finish_plug(&plug);
- sc->nr_reclaimed += nr_reclaimed;
- /*
- * Even if we did not try to evict anon pages at all, we want to
- * rebalance the anon lru active/inactive ratio.
- */
- if (inactive_list_is_low(lruvec, false, sc))
- shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
- sc, LRU_ACTIVE_ANON);
- }
- /* Use reclaim/compaction for costly allocs or under memory pressure */
- static bool in_reclaim_compaction(struct scan_control *sc)
- {
- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
- (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
- sc->priority < DEF_PRIORITY - 2))
- return true;
- return false;
- }
- /*
- * Reclaim/compaction is used for high-order allocation requests. It reclaims
- * order-0 pages before compacting the zone. should_continue_reclaim() returns
- * true if more pages should be reclaimed such that when the page allocator
- * calls try_to_compact_zone() that it will have enough free pages to succeed.
- * It will give up earlier than that if there is difficulty reclaiming pages.
- */
- static inline bool should_continue_reclaim(struct pglist_data *pgdat,
- unsigned long nr_reclaimed,
- unsigned long nr_scanned,
- struct scan_control *sc)
- {
- unsigned long pages_for_compaction;
- unsigned long inactive_lru_pages;
- int z;
- /* If not in reclaim/compaction mode, stop */
- if (!in_reclaim_compaction(sc))
- return false;
- /* Consider stopping depending on scan and reclaim activity */
- if (sc->gfp_mask & __GFP_REPEAT) {
- /*
- * For __GFP_REPEAT allocations, stop reclaiming if the
- * full LRU list has been scanned and we are still failing
- * to reclaim pages. This full LRU scan is potentially
- * expensive but a __GFP_REPEAT caller really wants to succeed
- */
- if (!nr_reclaimed && !nr_scanned)
- return false;
- } else {
- /*
- * For non-__GFP_REPEAT allocations which can presumably
- * fail without consequence, stop if we failed to reclaim
- * any pages from the last SWAP_CLUSTER_MAX number of
- * pages that were scanned. This will return to the
- * caller faster at the risk reclaim/compaction and
- * the resulting allocation attempt fails
- */
- if (!nr_reclaimed)
- return false;
- }
- /*
- * If we have not reclaimed enough pages for compaction and the
- * inactive lists are large enough, continue reclaiming
- */
- pages_for_compaction = compact_gap(sc->order);
- inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
- if (sc->nr_reclaimed < pages_for_compaction &&
- inactive_lru_pages > pages_for_compaction)
- return true;
- /* If compaction would go ahead or the allocation would succeed, stop */
- for (z = 0; z <= sc->reclaim_idx; z++) {
- struct zone *zone = &pgdat->node_zones[z];
- if (!managed_zone(zone))
- continue;
- switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
- case COMPACT_SUCCESS:
- case COMPACT_CONTINUE:
- return false;
- default:
- /* check next zone */
- ;
- }
- }
- return true;
- }
- static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
- {
- struct reclaim_state *reclaim_state = current->reclaim_state;
- unsigned long nr_reclaimed, nr_scanned;
- bool reclaimable = false;
- do {
- struct mem_cgroup *root = sc->target_mem_cgroup;
- struct mem_cgroup_reclaim_cookie reclaim = {
- .pgdat = pgdat,
- .priority = sc->priority,
- };
- unsigned long node_lru_pages = 0;
- struct mem_cgroup *memcg;
- nr_reclaimed = sc->nr_reclaimed;
- nr_scanned = sc->nr_scanned;
- memcg = mem_cgroup_iter(root, NULL, &reclaim);
- do {
- unsigned long lru_pages;
- unsigned long reclaimed;
- unsigned long scanned;
- if (mem_cgroup_low(root, memcg)) {
- if (!sc->may_thrash)
- continue;
- mem_cgroup_events(memcg, MEMCG_LOW, 1);
- }
- reclaimed = sc->nr_reclaimed;
- scanned = sc->nr_scanned;
- shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
- node_lru_pages += lru_pages;
- if (memcg)
- shrink_slab(sc->gfp_mask, pgdat->node_id,
- memcg, sc->nr_scanned - scanned,
- lru_pages);
- /* Record the group's reclaim efficiency */
- vmpressure(sc->gfp_mask, memcg, false,
- sc->nr_scanned - scanned,
- sc->nr_reclaimed - reclaimed);
- /*
- * Direct reclaim and kswapd have to scan all memory
- * cgroups to fulfill the overall scan target for the
- * node.
- *
- * Limit reclaim, on the other hand, only cares about
- * nr_to_reclaim pages to be reclaimed and it will
- * retry with decreasing priority if one round over the
- * whole hierarchy is not sufficient.
- */
- if (!global_reclaim(sc) &&
- sc->nr_reclaimed >= sc->nr_to_reclaim) {
- mem_cgroup_iter_break(root, memcg);
- break;
- }
- } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
- /*
- * Shrink the slab caches in the same proportion that
- * the eligible LRU pages were scanned.
- */
- if (global_reclaim(sc))
- shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
- sc->nr_scanned - nr_scanned,
- node_lru_pages);
- if (reclaim_state) {
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
- }
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
- if (sc->nr_reclaimed - nr_reclaimed)
- reclaimable = true;
- } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
- sc->nr_scanned - nr_scanned, sc));
- /*
- * Kswapd gives up on balancing particular nodes after too
- * many failures to reclaim anything from them and goes to
- * sleep. On reclaim progress, reset the failure counter. A
- * successful direct reclaim run will revive a dormant kswapd.
- */
- if (reclaimable)
- pgdat->kswapd_failures = 0;
- return reclaimable;
- }
- /*
- * Returns true if compaction should go ahead for a costly-order request, or
- * the allocation would already succeed without compaction. Return false if we
- * should reclaim first.
- */
- static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
- {
- unsigned long watermark;
- enum compact_result suitable;
- suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
- if (suitable == COMPACT_SUCCESS)
- /* Allocation should succeed already. Don't reclaim. */
- return true;
- if (suitable == COMPACT_SKIPPED)
- /* Compaction cannot yet proceed. Do reclaim. */
- return false;
- /*
- * Compaction is already possible, but it takes time to run and there
- * are potentially other callers using the pages just freed. So proceed
- * with reclaim to make a buffer of free pages available to give
- * compaction a reasonable chance of completing and allocating the page.
- * Note that we won't actually reclaim the whole buffer in one attempt
- * as the target watermark in should_continue_reclaim() is lower. But if
- * we are already above the high+gap watermark, don't reclaim at all.
- */
- watermark = high_wmark_pages(zone) + compact_gap(sc->order);
- return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
- }
- /*
- * This is the direct reclaim path, for page-allocating processes. We only
- * try to reclaim pages from zones which will satisfy the caller's allocation
- * request.
- *
- * If a zone is deemed to be full of pinned pages then just give it a light
- * scan then give up on it.
- */
- static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
- {
- struct zoneref *z;
- struct zone *zone;
- unsigned long nr_soft_reclaimed;
- unsigned long nr_soft_scanned;
- gfp_t orig_mask;
- pg_data_t *last_pgdat = NULL;
- /*
- * If the number of buffer_heads in the machine exceeds the maximum
- * allowed level, force direct reclaim to scan the highmem zone as
- * highmem pages could be pinning lowmem pages storing buffer_heads
- */
- orig_mask = sc->gfp_mask;
- if (buffer_heads_over_limit) {
- sc->gfp_mask |= __GFP_HIGHMEM;
- sc->reclaim_idx = gfp_zone(sc->gfp_mask);
- }
- for_each_zone_zonelist_nodemask(zone, z, zonelist,
- sc->reclaim_idx, sc->nodemask) {
- /*
- * Take care memory controller reclaiming has small influence
- * to global LRU.
- */
- if (global_reclaim(sc)) {
- if (!cpuset_zone_allowed(zone,
- GFP_KERNEL | __GFP_HARDWALL))
- continue;
- /*
- * If we already have plenty of memory free for
- * compaction in this zone, don't free any more.
- * Even though compaction is invoked for any
- * non-zero order, only frequent costly order
- * reclamation is disruptive enough to become a
- * noticeable problem, like transparent huge
- * page allocations.
- */
- if (IS_ENABLED(CONFIG_COMPACTION) &&
- sc->order > PAGE_ALLOC_COSTLY_ORDER &&
- compaction_ready(zone, sc)) {
- sc->compaction_ready = true;
- continue;
- }
- /*
- * Shrink each node in the zonelist once. If the
- * zonelist is ordered by zone (not the default) then a
- * node may be shrunk multiple times but in that case
- * the user prefers lower zones being preserved.
- */
- if (zone->zone_pgdat == last_pgdat)
- continue;
- /*
- * This steals pages from memory cgroups over softlimit
- * and returns the number of reclaimed pages and
- * scanned pages. This works for global memory pressure
- * and balancing, not for a memcg's limit.
- */
- nr_soft_scanned = 0;
- nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
- sc->order, sc->gfp_mask,
- &nr_soft_scanned);
- sc->nr_reclaimed += nr_soft_reclaimed;
- sc->nr_scanned += nr_soft_scanned;
- /* need some check for avoid more shrink_zone() */
- }
- /* See comment about same check for global reclaim above */
- if (zone->zone_pgdat == last_pgdat)
- continue;
- last_pgdat = zone->zone_pgdat;
- shrink_node(zone->zone_pgdat, sc);
- }
- /*
- * Restore to original mask to avoid the impact on the caller if we
- * promoted it to __GFP_HIGHMEM.
- */
- sc->gfp_mask = orig_mask;
- }
- /*
- * This is the main entry point to direct page reclaim.
- *
- * If a full scan of the inactive list fails to free enough memory then we
- * are "out of memory" and something needs to be killed.
- *
- * If the caller is !__GFP_FS then the probability of a failure is reasonably
- * high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about. We kick the writeback threads and take explicit
- * naps in the hope that some of these pages can be written. But if the
- * allocating task holds filesystem locks which prevent writeout this might not
- * work, and the allocation attempt will fail.
- *
- * returns: 0, if no pages reclaimed
- * else, the number of pages reclaimed
- */
- static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
- struct scan_control *sc)
- {
- int initial_priority = sc->priority;
- unsigned long total_scanned = 0;
- unsigned long writeback_threshold;
- retry:
- delayacct_freepages_start();
- if (global_reclaim(sc))
- __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
- do {
- vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
- sc->priority);
- sc->nr_scanned = 0;
- shrink_zones(zonelist, sc);
- total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->nr_to_reclaim)
- break;
- if (sc->compaction_ready)
- break;
- /*
- * If we're getting trouble reclaiming, start doing
- * writepage even in laptop mode.
- */
- if (sc->priority < DEF_PRIORITY - 2)
- sc->may_writepage = 1;
- /*
- * Try to write back as many pages as we just scanned. This
- * tends to cause slow streaming writers to write data to the
- * disk smoothly, at the dirtying rate, which is nice. But
- * that's undesirable in laptop mode, where we *want* lumpy
- * writeout. So in laptop mode, write out the whole world.
- */
- writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
- if (total_scanned > writeback_threshold) {
- wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
- WB_REASON_TRY_TO_FREE_PAGES);
- sc->may_writepage = 1;
- }
- } while (--sc->priority >= 0);
- delayacct_freepages_end();
- if (sc->nr_reclaimed)
- return sc->nr_reclaimed;
- /* Aborted reclaim to try compaction? don't OOM, then */
- if (sc->compaction_ready)
- return 1;
- /* Untapped cgroup reserves? Don't OOM, retry. */
- if (!sc->may_thrash) {
- sc->priority = initial_priority;
- sc->may_thrash = 1;
- goto retry;
- }
- return 0;
- }
- static bool allow_direct_reclaim(pg_data_t *pgdat)
- {
- struct zone *zone;
- unsigned long pfmemalloc_reserve = 0;
- unsigned long free_pages = 0;
- int i;
- bool wmark_ok;
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
- return true;
- for (i = 0; i <= ZONE_NORMAL; i++) {
- zone = &pgdat->node_zones[i];
- if (!managed_zone(zone))
- continue;
- if (!zone_reclaimable_pages(zone))
- continue;
- pfmemalloc_reserve += min_wmark_pages(zone);
- free_pages += zone_page_state(zone, NR_FREE_PAGES);
- }
- /* If there are no reserves (unexpected config) then do not throttle */
- if (!pfmemalloc_reserve)
- return true;
- wmark_ok = free_pages > pfmemalloc_reserve / 2;
- /* kswapd must be awake if processes are being throttled */
- if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
- (enum zone_type)ZONE_NORMAL);
- wake_up_interruptible(&pgdat->kswapd_wait);
- }
- return wmark_ok;
- }
- /*
- * Throttle direct reclaimers if backing storage is backed by the network
- * and the PFMEMALLOC reserve for the preferred node is getting dangerously
- * depleted. kswapd will continue to make progress and wake the processes
- * when the low watermark is reached.
- *
- * Returns true if a fatal signal was delivered during throttling. If this
- * happens, the page allocator should not consider triggering the OOM killer.
- */
- static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
- nodemask_t *nodemask)
- {
- struct zoneref *z;
- struct zone *zone;
- pg_data_t *pgdat = NULL;
- /*
- * Kernel threads should not be throttled as they may be indirectly
- * responsible for cleaning pages necessary for reclaim to make forward
- * progress. kjournald for example may enter direct reclaim while
- * committing a transaction where throttling it could forcing other
- * processes to block on log_wait_commit().
- */
- if (current->flags & PF_KTHREAD)
- goto out;
- /*
- * If a fatal signal is pending, this process should not throttle.
- * It should return quickly so it can exit and free its memory
- */
- if (fatal_signal_pending(current))
- goto out;
- /*
- * Check if the pfmemalloc reserves are ok by finding the first node
- * with a usable ZONE_NORMAL or lower zone. The expectation is that
- * GFP_KERNEL will be required for allocating network buffers when
- * swapping over the network so ZONE_HIGHMEM is unusable.
- *
- * Throttling is based on the first usable node and throttled processes
- * wait on a queue until kswapd makes progress and wakes them. There
- * is an affinity then between processes waking up and where reclaim
- * progress has been made assuming the process wakes on the same node.
- * More importantly, processes running on remote nodes will not compete
- * for remote pfmemalloc reserves and processes on different nodes
- * should make reasonable progress.
- */
- for_each_zone_zonelist_nodemask(zone, z, zonelist,
- gfp_zone(gfp_mask), nodemask) {
- if (zone_idx(zone) > ZONE_NORMAL)
- continue;
- /* Throttle based on the first usable node */
- pgdat = zone->zone_pgdat;
- if (allow_direct_reclaim(pgdat))
- goto out;
- break;
- }
- /* If no zone was usable by the allocation flags then do not throttle */
- if (!pgdat)
- goto out;
- /* Account for the throttling */
- count_vm_event(PGSCAN_DIRECT_THROTTLE);
- /*
- * If the caller cannot enter the filesystem, it's possible that it
- * is due to the caller holding an FS lock or performing a journal
- * transaction in the case of a filesystem like ext[3|4]. In this case,
- * it is not safe to block on pfmemalloc_wait as kswapd could be
- * blocked waiting on the same lock. Instead, throttle for up to a
- * second before continuing.
- */
- if (!(gfp_mask & __GFP_FS)) {
- wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
- allow_direct_reclaim(pgdat), HZ);
- goto check_pending;
- }
- /* Throttle until kswapd wakes the process */
- wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
- allow_direct_reclaim(pgdat));
- check_pending:
- if (fatal_signal_pending(current))
- return true;
- out:
- return false;
- }
- unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask, nodemask_t *nodemask)
- {
- unsigned long nr_reclaimed;
- struct scan_control sc = {
- .nr_to_reclaim = SWAP_CLUSTER_MAX,
- .gfp_mask = memalloc_noio_flags(gfp_mask),
- .reclaim_idx = gfp_zone(gfp_mask),
- .order = order,
- .nodemask = nodemask,
- .priority = DEF_PRIORITY,
- .may_writepage = !laptop_mode,
- .may_unmap = 1,
- .may_swap = 1,
- };
- /*
- * Do not enter reclaim if fatal signal was delivered while throttled.
- * 1 is returned so that the page allocator does not OOM kill at this
- * point.
- */
- if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
- return 1;
- trace_mm_vmscan_direct_reclaim_begin(order,
- sc.may_writepage,
- sc.gfp_mask,
- sc.reclaim_idx);
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
- trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
- return nr_reclaimed;
- }
- #ifdef CONFIG_MEMCG
- unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
- gfp_t gfp_mask, bool noswap,
- pg_data_t *pgdat,
- unsigned long *nr_scanned)
- {
- struct scan_control sc = {
- .nr_to_reclaim = SWAP_CLUSTER_MAX,
- .target_mem_cgroup = memcg,
- .may_writepage = !laptop_mode,
- .may_unmap = 1,
- .reclaim_idx = MAX_NR_ZONES - 1,
- .may_swap = !noswap,
- };
- unsigned long lru_pages;
- sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
- (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
- sc.may_writepage,
- sc.gfp_mask,
- sc.reclaim_idx);
- /*
- * NOTE: Although we can get the priority field, using it
- * here is not a good idea, since it limits the pages we can scan.
- * if we don't reclaim here, the shrink_node from balance_pgdat
- * will pick up pages from other mem cgroup's as well. We hack
- * the priority and make it zero.
- */
- shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
- trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
- *nr_scanned = sc.nr_scanned;
- return sc.nr_reclaimed;
- }
- unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
- unsigned long nr_pages,
- gfp_t gfp_mask,
- bool may_swap)
- {
- struct zonelist *zonelist;
- unsigned long nr_reclaimed;
- int nid;
- struct scan_control sc = {
- .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
- (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
- .reclaim_idx = MAX_NR_ZONES - 1,
- .target_mem_cgroup = memcg,
- .priority = DEF_PRIORITY,
- .may_writepage = !laptop_mode,
- .may_unmap = 1,
- .may_swap = may_swap,
- };
- /*
- * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
- * take care of from where we get pages. So the node where we start the
- * scan does not need to be the current node.
- */
- nid = mem_cgroup_select_victim_node(memcg);
- zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
- trace_mm_vmscan_memcg_reclaim_begin(0,
- sc.may_writepage,
- sc.gfp_mask,
- sc.reclaim_idx);
- current->flags |= PF_MEMALLOC;
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
- current->flags &= ~PF_MEMALLOC;
- trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
- return nr_reclaimed;
- }
- #endif
- static void age_active_anon(struct pglist_data *pgdat,
- struct scan_control *sc)
- {
- struct mem_cgroup *memcg;
- if (!total_swap_pages)
- return;
- memcg = mem_cgroup_iter(NULL, NULL, NULL);
- do {
- struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
- if (inactive_list_is_low(lruvec, false, sc))
- shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
- sc, LRU_ACTIVE_ANON);
- memcg = mem_cgroup_iter(NULL, memcg, NULL);
- } while (memcg);
- }
- static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
- {
- unsigned long mark = high_wmark_pages(zone);
- if (!zone_watermark_ok_safe(zone, order, mark, classzone_idx))
- return false;
- /*
- * If any eligible zone is balanced then the node is not considered
- * to be congested or dirty
- */
- clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
- clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
- return true;
- }
- /*
- * Prepare kswapd for sleeping. This verifies that there are no processes
- * waiting in throttle_direct_reclaim() and that watermarks have been met.
- *
- * Returns true if kswapd is ready to sleep
- */
- static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
- {
- int i;
- /*
- * The throttled processes are normally woken up in balance_pgdat() as
- * soon as allow_direct_reclaim() is true. But there is a potential
- * race between when kswapd checks the watermarks and a process gets
- * throttled. There is also a potential race if processes get
- * throttled, kswapd wakes, a large process exits thereby balancing the
- * zones, which causes kswapd to exit balance_pgdat() before reaching
- * the wake up checks. If kswapd is going to sleep, no process should
- * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
- * the wake up is premature, processes will wake kswapd and get
- * throttled again. The difference from wake ups in balance_pgdat() is
- * that here we are under prepare_to_wait().
- */
- if (waitqueue_active(&pgdat->pfmemalloc_wait))
- wake_up_all(&pgdat->pfmemalloc_wait);
- /* Hopeless node, leave it to direct reclaim */
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
- return true;
- for (i = 0; i <= classzone_idx; i++) {
- struct zone *zone = pgdat->node_zones + i;
- if (!managed_zone(zone))
- continue;
- if (!zone_balanced(zone, order, classzone_idx))
- return false;
- }
- return true;
- }
- /*
- * kswapd shrinks a node of pages that are at or below the highest usable
- * zone that is currently unbalanced.
- *
- * Returns true if kswapd scanned at least the requested number of pages to
- * reclaim or if the lack of progress was due to pages under writeback.
- * This is used to determine if the scanning priority needs to be raised.
- */
- static bool kswapd_shrink_node(pg_data_t *pgdat,
- struct scan_control *sc)
- {
- struct zone *zone;
- int z;
- /* Reclaim a number of pages proportional to the number of zones */
- sc->nr_to_reclaim = 0;
- for (z = 0; z <= sc->reclaim_idx; z++) {
- zone = pgdat->node_zones + z;
- if (!managed_zone(zone))
- continue;
- sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
- }
- /*
- * Historically care was taken to put equal pressure on all zones but
- * now pressure is applied based on node LRU order.
- */
- shrink_node(pgdat, sc);
- /*
- * Fragmentation may mean that the system cannot be rebalanced for
- * high-order allocations. If twice the allocation size has been
- * reclaimed then recheck watermarks only at order-0 to prevent
- * excessive reclaim. Assume that a process requested a high-order
- * can direct reclaim/compact.
- */
- if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
- sc->order = 0;
- return sc->nr_scanned >= sc->nr_to_reclaim;
- }
- /*
- * For kswapd, balance_pgdat() will reclaim pages across a node from zones
- * that are eligible for use by the caller until at least one zone is
- * balanced.
- *
- * Returns the order kswapd finished reclaiming at.
- *
- * kswapd scans the zones in the highmem->normal->dma direction. It skips
- * zones which have free_pages > high_wmark_pages(zone), but once a zone is
- * found to have free_pages <= high_wmark_pages(zone), any page is that zone
- * or lower is eligible for reclaim until at least one usable zone is
- * balanced.
- */
- static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
- {
- int i;
- unsigned long nr_soft_reclaimed;
- unsigned long nr_soft_scanned;
- struct zone *zone;
- struct scan_control sc = {
- .gfp_mask = GFP_KERNEL,
- .order = order,
- .priority = DEF_PRIORITY,
- .may_writepage = !laptop_mode,
- .may_unmap = 1,
- .may_swap = 1,
- };
- count_vm_event(PAGEOUTRUN);
- do {
- unsigned long nr_reclaimed = sc.nr_reclaimed;
- bool raise_priority = true;
- sc.reclaim_idx = classzone_idx;
- /*
- * If the number of buffer_heads exceeds the maximum allowed
- * then consider reclaiming from all zones. This has a dual
- * purpose -- on 64-bit systems it is expected that
- * buffer_heads are stripped during active rotation. On 32-bit
- * systems, highmem pages can pin lowmem memory and shrinking
- * buffers can relieve lowmem pressure. Reclaim may still not
- * go ahead if all eligible zones for the original allocation
- * request are balanced to avoid excessive reclaim from kswapd.
- */
- if (buffer_heads_over_limit) {
- for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
- zone = pgdat->node_zones + i;
- if (!managed_zone(zone))
- continue;
- sc.reclaim_idx = i;
- break;
- }
- }
- /*
- * Only reclaim if there are no eligible zones. Check from
- * high to low zone as allocations prefer higher zones.
- * Scanning from low to high zone would allow congestion to be
- * cleared during a very small window when a small low
- * zone was balanced even under extreme pressure when the
- * overall node may be congested. Note that sc.reclaim_idx
- * is not used as buffer_heads_over_limit may have adjusted
- * it.
- */
- for (i = classzone_idx; i >= 0; i--) {
- zone = pgdat->node_zones + i;
- if (!managed_zone(zone))
- continue;
- if (zone_balanced(zone, sc.order, classzone_idx))
- goto out;
- }
- /*
- * Do some background aging of the anon list, to give
- * pages a chance to be referenced before reclaiming. All
- * pages are rotated regardless of classzone as this is
- * about consistent aging.
- */
- age_active_anon(pgdat, &sc);
- /*
- * If we're getting trouble reclaiming, start doing writepage
- * even in laptop mode.
- */
- if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
- sc.may_writepage = 1;
- /* Call soft limit reclaim before calling shrink_node. */
- sc.nr_scanned = 0;
- nr_soft_scanned = 0;
- nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
- sc.gfp_mask, &nr_soft_scanned);
- sc.nr_reclaimed += nr_soft_reclaimed;
- /*
- * There should be no need to raise the scanning priority if
- * enough pages are already being scanned that that high
- * watermark would be met at 100% efficiency.
- */
- if (kswapd_shrink_node(pgdat, &sc))
- raise_priority = false;
- /*
- * If the low watermark is met there is no need for processes
- * to be throttled on pfmemalloc_wait as they should not be
- * able to safely make forward progress. Wake them
- */
- if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
- allow_direct_reclaim(pgdat))
- wake_up_all(&pgdat->pfmemalloc_wait);
- /* Check if kswapd should be suspending */
- if (try_to_freeze() || kthread_should_stop())
- break;
- /*
- * Raise priority if scanning rate is too low or there was no
- * progress in reclaiming pages
- */
- nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
- if (raise_priority || !nr_reclaimed)
- sc.priority--;
- } while (sc.priority >= 1);
- if (!sc.nr_reclaimed)
- pgdat->kswapd_failures++;
- out:
- /*
- * Return the order kswapd stopped reclaiming at as
- * prepare_kswapd_sleep() takes it into account. If another caller
- * entered the allocator slow path while kswapd was awake, order will
- * remain at the higher level.
- */
- return sc.order;
- }
- static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
- unsigned int classzone_idx)
- {
- long remaining = 0;
- DEFINE_WAIT(wait);
- if (freezing(current) || kthread_should_stop())
- return;
- prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
- /* Try to sleep for a short interval */
- if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
- /*
- * We have freed the memory, now we should compact it to make
- * allocation of the requested order possible.
- */
- wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
- remaining = schedule_timeout(HZ/10);
- /*
- * If woken prematurely then reset kswapd_classzone_idx and
- * order. The values will either be from a wakeup request or
- * the previous request that slept prematurely.
- */
- if (remaining) {
- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
- }
- finish_wait(&pgdat->kswapd_wait, &wait);
- prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
- }
- /*
- * After a short sleep, check if it was a premature sleep. If not, then
- * go fully to sleep until explicitly woken up.
- */
- if (!remaining &&
- prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
- trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
- /*
- * vmstat counters are not perfectly accurate and the estimated
- * value for counters such as NR_FREE_PAGES can deviate from the
- * true value by nr_online_cpus * threshold. To avoid the zone
- * watermarks being breached while under pressure, we reduce the
- * per-cpu vmstat threshold while kswapd is awake and restore
- * them before going back to sleep.
- */
- set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- if (!kthread_should_stop())
- schedule();
- set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
- } else {
- if (remaining)
- count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
- else
- count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
- }
- finish_wait(&pgdat->kswapd_wait, &wait);
- }
- /*
- * The background pageout daemon, started as a kernel thread
- * from the init process.
- *
- * This basically trickles out pages so that we have _some_
- * free memory available even if there is no other activity
- * that frees anything up. This is needed for things like routing
- * etc, where we otherwise might have all activity going on in
- * asynchronous contexts that cannot page things out.
- *
- * If there are applications that are active memory-allocators
- * (most normal use), this basically shouldn't matter.
- */
- static int kswapd(void *p)
- {
- unsigned int alloc_order, reclaim_order, classzone_idx;
- pg_data_t *pgdat = (pg_data_t*)p;
- struct task_struct *tsk = current;
- struct reclaim_state reclaim_state = {
- .reclaimed_slab = 0,
- };
- const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
- lockdep_set_current_reclaim_state(GFP_KERNEL);
- if (!cpumask_empty(cpumask))
- set_cpus_allowed_ptr(tsk, cpumask);
- current->reclaim_state = &reclaim_state;
- /*
- * Tell the memory management that we're a "memory allocator",
- * and that if we need more memory we should get access to it
- * regardless (see "__alloc_pages()"). "kswapd" should
- * never get caught in the normal page freeing logic.
- *
- * (Kswapd normally doesn't need memory anyway, but sometimes
- * you need a small amount of memory in order to be able to
- * page out something else, and this flag essentially protects
- * us from recursively trying to free more memory as we're
- * trying to free the first piece of memory in the first place).
- */
- tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
- set_freezable();
- pgdat->kswapd_order = alloc_order = reclaim_order = 0;
- pgdat->kswapd_classzone_idx = classzone_idx = 0;
- for ( ; ; ) {
- bool ret;
- kswapd_try_sleep:
- kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
- classzone_idx);
- /* Read the new order and classzone_idx */
- alloc_order = reclaim_order = pgdat->kswapd_order;
- classzone_idx = pgdat->kswapd_classzone_idx;
- pgdat->kswapd_order = 0;
- pgdat->kswapd_classzone_idx = 0;
- ret = try_to_freeze();
- if (kthread_should_stop())
- break;
- /*
- * We can speed up thawing tasks if we don't call balance_pgdat
- * after returning from the refrigerator
- */
- if (ret)
- continue;
- /*
- * Reclaim begins at the requested order but if a high-order
- * reclaim fails then kswapd falls back to reclaiming for
- * order-0. If that happens, kswapd will consider sleeping
- * for the order it finished reclaiming at (reclaim_order)
- * but kcompactd is woken to compact for the original
- * request (alloc_order).
- */
- trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
- alloc_order);
- reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
- if (reclaim_order < alloc_order)
- goto kswapd_try_sleep;
- alloc_order = reclaim_order = pgdat->kswapd_order;
- classzone_idx = pgdat->kswapd_classzone_idx;
- }
- tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
- current->reclaim_state = NULL;
- lockdep_clear_current_reclaim_state();
- return 0;
- }
- /*
- * A zone is low on free memory, so wake its kswapd task to service it.
- */
- void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
- {
- pg_data_t *pgdat;
- int z;
- if (!managed_zone(zone))
- return;
- if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
- return;
- pgdat = zone->zone_pgdat;
- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
- if (!waitqueue_active(&pgdat->kswapd_wait))
- return;
- /* Hopeless node, leave it to direct reclaim */
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
- return;
- /* Only wake kswapd if all zones are unbalanced */
- for (z = 0; z <= classzone_idx; z++) {
- zone = pgdat->node_zones + z;
- if (!managed_zone(zone))
- continue;
- if (zone_balanced(zone, order, classzone_idx))
- return;
- }
- trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
- wake_up_interruptible(&pgdat->kswapd_wait);
- }
- #ifdef CONFIG_HIBERNATION
- /*
- * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
- * freed pages.
- *
- * Rather than trying to age LRUs the aim is to preserve the overall
- * LRU order by reclaiming preferentially
- * inactive > active > active referenced > active mapped
- */
- unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
- {
- struct reclaim_state reclaim_state;
- struct scan_control sc = {
- .nr_to_reclaim = nr_to_reclaim,
- .gfp_mask = GFP_HIGHUSER_MOVABLE,
- .reclaim_idx = MAX_NR_ZONES - 1,
- .priority = DEF_PRIORITY,
- .may_writepage = 1,
- .may_unmap = 1,
- .may_swap = 1,
- .hibernation_mode = 1,
- };
- struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
- struct task_struct *p = current;
- unsigned long nr_reclaimed;
- p->flags |= PF_MEMALLOC;
- lockdep_set_current_reclaim_state(sc.gfp_mask);
- reclaim_state.reclaimed_slab = 0;
- p->reclaim_state = &reclaim_state;
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
- p->reclaim_state = NULL;
- lockdep_clear_current_reclaim_state();
- p->flags &= ~PF_MEMALLOC;
- return nr_reclaimed;
- }
- #endif /* CONFIG_HIBERNATION */
- /* It's optimal to keep kswapds on the same CPUs as their memory, but
- not required for correctness. So if the last cpu in a node goes
- away, we get changed to run anywhere: as the first one comes back,
- restore their cpu bindings. */
- static int cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
- {
- int nid;
- if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
- for_each_node_state(nid, N_MEMORY) {
- pg_data_t *pgdat = NODE_DATA(nid);
- const struct cpumask *mask;
- mask = cpumask_of_node(pgdat->node_id);
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(pgdat->kswapd, mask);
- }
- }
- return NOTIFY_OK;
- }
- /*
- * This kswapd start function will be called by init and node-hot-add.
- * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
- */
- int kswapd_run(int nid)
- {
- pg_data_t *pgdat = NODE_DATA(nid);
- int ret = 0;
- if (pgdat->kswapd)
- return 0;
- pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
- if (IS_ERR(pgdat->kswapd)) {
- /* failure at boot is fatal */
- BUG_ON(system_state == SYSTEM_BOOTING);
- pr_err("Failed to start kswapd on node %d\n", nid);
- ret = PTR_ERR(pgdat->kswapd);
- pgdat->kswapd = NULL;
- }
- return ret;
- }
- /*
- * Called by memory hotplug when all memory in a node is offlined. Caller must
- * hold mem_hotplug_begin/end().
- */
- void kswapd_stop(int nid)
- {
- struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
- if (kswapd) {
- kthread_stop(kswapd);
- NODE_DATA(nid)->kswapd = NULL;
- }
- }
- static int __init kswapd_init(void)
- {
- int nid;
- swap_setup();
- for_each_node_state(nid, N_MEMORY)
- kswapd_run(nid);
- hotcpu_notifier(cpu_callback, 0);
- return 0;
- }
- module_init(kswapd_init)
- #ifdef CONFIG_NUMA
- /*
- * Node reclaim mode
- *
- * If non-zero call node_reclaim when the number of free pages falls below
- * the watermarks.
- */
- int node_reclaim_mode __read_mostly;
- #define RECLAIM_OFF 0
- #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
- #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
- #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
- /*
- * Priority for NODE_RECLAIM. This determines the fraction of pages
- * of a node considered for each zone_reclaim. 4 scans 1/16th of
- * a zone.
- */
- #define NODE_RECLAIM_PRIORITY 4
- /*
- * Percentage of pages in a zone that must be unmapped for node_reclaim to
- * occur.
- */
- int sysctl_min_unmapped_ratio = 1;
- /*
- * If the number of slab pages in a zone grows beyond this percentage then
- * slab reclaim needs to occur.
- */
- int sysctl_min_slab_ratio = 5;
- static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
- {
- unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
- unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
- node_page_state(pgdat, NR_ACTIVE_FILE);
- /*
- * It's possible for there to be more file mapped pages than
- * accounted for by the pages on the file LRU lists because
- * tmpfs pages accounted for as ANON can also be FILE_MAPPED
- */
- return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
- }
- /* Work out how many page cache pages we can reclaim in this reclaim_mode */
- static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
- {
- unsigned long nr_pagecache_reclaimable;
- unsigned long delta = 0;
- /*
- * If RECLAIM_UNMAP is set, then all file pages are considered
- * potentially reclaimable. Otherwise, we have to worry about
- * pages like swapcache and node_unmapped_file_pages() provides
- * a better estimate
- */
- if (node_reclaim_mode & RECLAIM_UNMAP)
- nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
- else
- nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
- /* If we can't clean pages, remove dirty pages from consideration */
- if (!(node_reclaim_mode & RECLAIM_WRITE))
- delta += node_page_state(pgdat, NR_FILE_DIRTY);
- /* Watch for any possible underflows due to delta */
- if (unlikely(delta > nr_pagecache_reclaimable))
- delta = nr_pagecache_reclaimable;
- return nr_pagecache_reclaimable - delta;
- }
- /*
- * Try to free up some pages from this node through reclaim.
- */
- static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
- {
- /* Minimum pages needed in order to stay on node */
- const unsigned long nr_pages = 1 << order;
- struct task_struct *p = current;
- struct reclaim_state reclaim_state;
- struct scan_control sc = {
- .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = memalloc_noio_flags(gfp_mask),
- .order = order,
- .priority = NODE_RECLAIM_PRIORITY,
- .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
- .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
- .may_swap = 1,
- .reclaim_idx = gfp_zone(gfp_mask),
- };
- cond_resched();
- /*
- * We need to be able to allocate from the reserves for RECLAIM_UNMAP
- * and we also need to be able to write out pages for RECLAIM_WRITE
- * and RECLAIM_UNMAP.
- */
- p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
- lockdep_set_current_reclaim_state(sc.gfp_mask);
- reclaim_state.reclaimed_slab = 0;
- p->reclaim_state = &reclaim_state;
- if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
- /*
- * Free memory by calling shrink zone with increasing
- * priorities until we have enough memory freed.
- */
- do {
- shrink_node(pgdat, &sc);
- } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
- }
- p->reclaim_state = NULL;
- current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
- lockdep_clear_current_reclaim_state();
- return sc.nr_reclaimed >= nr_pages;
- }
- int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
- {
- int ret;
- /*
- * Node reclaim reclaims unmapped file backed pages and
- * slab pages if we are over the defined limits.
- *
- * A small portion of unmapped file backed pages is needed for
- * file I/O otherwise pages read by file I/O will be immediately
- * thrown out if the node is overallocated. So we do not reclaim
- * if less than a specified percentage of the node is used by
- * unmapped file backed pages.
- */
- if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
- sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
- return NODE_RECLAIM_FULL;
- /*
- * Do not scan if the allocation should not be delayed.
- */
- if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
- return NODE_RECLAIM_NOSCAN;
- /*
- * Only run node reclaim on the local node or on nodes that do not
- * have associated processors. This will favor the local processor
- * over remote processors and spread off node memory allocations
- * as wide as possible.
- */
- if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
- return NODE_RECLAIM_NOSCAN;
- if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
- return NODE_RECLAIM_NOSCAN;
- ret = __node_reclaim(pgdat, gfp_mask, order);
- clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
- if (!ret)
- count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
- return ret;
- }
- #endif
- /*
- * page_evictable - test whether a page is evictable
- * @page: the page to test
- *
- * Test whether page is evictable--i.e., should be placed on active/inactive
- * lists vs unevictable list.
- *
- * Reasons page might not be evictable:
- * (1) page's mapping marked unevictable
- * (2) page is part of an mlocked VMA
- *
- */
- int page_evictable(struct page *page)
- {
- int ret;
- /* Prevent address_space of inode and swap cache from being freed */
- rcu_read_lock();
- ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
- rcu_read_unlock();
- return ret;
- }
- #ifdef CONFIG_SHMEM
- /**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages: array of pages to check
- * @nr_pages: number of pages to check
- *
- * Checks pages for evictability and moves them to the appropriate lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
- */
- void check_move_unevictable_pages(struct page **pages, int nr_pages)
- {
- struct lruvec *lruvec;
- struct pglist_data *pgdat = NULL;
- int pgscanned = 0;
- int pgrescued = 0;
- int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pages[i];
- struct pglist_data *pagepgdat = page_pgdat(page);
- pgscanned++;
- if (pagepgdat != pgdat) {
- if (pgdat)
- spin_unlock_irq(&pgdat->lru_lock);
- pgdat = pagepgdat;
- spin_lock_irq(&pgdat->lru_lock);
- }
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- if (!PageLRU(page) || !PageUnevictable(page))
- continue;
- if (page_evictable(page)) {
- enum lru_list lru = page_lru_base_type(page);
- VM_BUG_ON_PAGE(PageActive(page), page);
- ClearPageUnevictable(page);
- del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
- add_page_to_lru_list(page, lruvec, lru);
- pgrescued++;
- }
- }
- if (pgdat) {
- __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
- __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
- spin_unlock_irq(&pgdat->lru_lock);
- }
- }
- #endif /* CONFIG_SHMEM */
|