123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739 |
- #include <linux/latencytop.h>
- #include <linux/sched.h>
- #include <linux/cpumask.h>
- #include <linux/slab.h>
- #include <linux/profile.h>
- #include <linux/interrupt.h>
- #include <trace/events/sched.h>
- #include "sched.h"
- unsigned int sysctl_sched_latency = 6000000ULL;
- unsigned int normalized_sysctl_sched_latency = 6000000ULL;
- enum sched_tunable_scaling sysctl_sched_tunable_scaling
- = SCHED_TUNABLESCALING_LOG;
- unsigned int sysctl_sched_min_granularity = 750000ULL;
- unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
- static unsigned int sched_nr_latency = 8;
- unsigned int sysctl_sched_child_runs_first __read_mostly;
- unsigned int __read_mostly sysctl_sched_wake_to_idle;
- unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
- unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
- const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
- unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
- #ifdef CONFIG_CFS_BANDWIDTH
- unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
- #endif
- static int get_update_sysctl_factor(void)
- {
- unsigned int cpus = min_t(int, num_online_cpus(), 8);
- unsigned int factor;
- switch (sysctl_sched_tunable_scaling) {
- case SCHED_TUNABLESCALING_NONE:
- factor = 1;
- break;
- case SCHED_TUNABLESCALING_LINEAR:
- factor = cpus;
- break;
- case SCHED_TUNABLESCALING_LOG:
- default:
- factor = 1 + ilog2(cpus);
- break;
- }
- return factor;
- }
- static void update_sysctl(void)
- {
- unsigned int factor = get_update_sysctl_factor();
- #define SET_SYSCTL(name) \
- (sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
- SET_SYSCTL(sched_latency);
- SET_SYSCTL(sched_wakeup_granularity);
- #undef SET_SYSCTL
- }
- void sched_init_granularity(void)
- {
- update_sysctl();
- }
- #if BITS_PER_LONG == 32
- # define WMULT_CONST (~0UL)
- #else
- # define WMULT_CONST (1UL << 32)
- #endif
- #define WMULT_SHIFT 32
- /*
- * Shift right and round:
- */
- #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
- static unsigned long
- calc_delta_mine(unsigned long delta_exec, unsigned long weight,
- struct load_weight *lw)
- {
- u64 tmp;
-
- if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
- tmp = (u64)delta_exec * scale_load_down(weight);
- else
- tmp = (u64)delta_exec;
- if (!lw->inv_weight) {
- unsigned long w = scale_load_down(lw->weight);
- if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
- lw->inv_weight = 1;
- else if (unlikely(!w))
- lw->inv_weight = WMULT_CONST;
- else
- lw->inv_weight = WMULT_CONST / w;
- }
-
- if (unlikely(tmp > WMULT_CONST))
- tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
- WMULT_SHIFT/2);
- else
- tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
- return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
- }
- const struct sched_class fair_sched_class;
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
- {
- return cfs_rq->rq;
- }
- #define entity_is_task(se) (!se->my_q)
- static inline struct task_struct *task_of(struct sched_entity *se)
- {
- #ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(!entity_is_task(se));
- #endif
- return container_of(se, struct task_struct, se);
- }
- #define for_each_sched_entity(se) \
- for (; se; se = se->parent)
- static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
- {
- return p->se.cfs_rq;
- }
- static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
- {
- return se->cfs_rq;
- }
- static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
- {
- return grp->my_q;
- }
- static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- if (!cfs_rq->on_list) {
-
- if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
- } else {
- list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
- }
- cfs_rq->on_list = 1;
- }
- }
- static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- if (cfs_rq->on_list) {
- list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
- cfs_rq->on_list = 0;
- }
- }
- #define for_each_leaf_cfs_rq(rq, cfs_rq) \
- list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
- static inline int
- is_same_group(struct sched_entity *se, struct sched_entity *pse)
- {
- if (se->cfs_rq == pse->cfs_rq)
- return 1;
- return 0;
- }
- static inline struct sched_entity *parent_entity(struct sched_entity *se)
- {
- return se->parent;
- }
- static inline int depth_se(struct sched_entity *se)
- {
- int depth = 0;
- for_each_sched_entity(se)
- depth++;
- return depth;
- }
- static void
- find_matching_se(struct sched_entity **se, struct sched_entity **pse)
- {
- int se_depth, pse_depth;
-
-
- se_depth = depth_se(*se);
- pse_depth = depth_se(*pse);
- while (se_depth > pse_depth) {
- se_depth--;
- *se = parent_entity(*se);
- }
- while (pse_depth > se_depth) {
- pse_depth--;
- *pse = parent_entity(*pse);
- }
- while (!is_same_group(*se, *pse)) {
- *se = parent_entity(*se);
- *pse = parent_entity(*pse);
- }
- }
- #else
- static inline struct task_struct *task_of(struct sched_entity *se)
- {
- return container_of(se, struct task_struct, se);
- }
- static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
- {
- return container_of(cfs_rq, struct rq, cfs);
- }
- #define entity_is_task(se) 1
- #define for_each_sched_entity(se) \
- for (; se; se = NULL)
- static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
- {
- return &task_rq(p)->cfs;
- }
- static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
- {
- struct task_struct *p = task_of(se);
- struct rq *rq = task_rq(p);
- return &rq->cfs;
- }
- static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
- {
- return NULL;
- }
- static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- }
- static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- }
- #define for_each_leaf_cfs_rq(rq, cfs_rq) \
- for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
- static inline int
- is_same_group(struct sched_entity *se, struct sched_entity *pse)
- {
- return 1;
- }
- static inline struct sched_entity *parent_entity(struct sched_entity *se)
- {
- return NULL;
- }
- static inline void
- find_matching_se(struct sched_entity **se, struct sched_entity **pse)
- {
- }
- #endif
- static __always_inline
- void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
- static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
- {
- s64 delta = (s64)(vruntime - min_vruntime);
- if (delta > 0)
- min_vruntime = vruntime;
- return min_vruntime;
- }
- static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
- {
- s64 delta = (s64)(vruntime - min_vruntime);
- if (delta < 0)
- min_vruntime = vruntime;
- return min_vruntime;
- }
- static inline int entity_before(struct sched_entity *a,
- struct sched_entity *b)
- {
- return (s64)(a->vruntime - b->vruntime) < 0;
- }
- static void update_min_vruntime(struct cfs_rq *cfs_rq)
- {
- u64 vruntime = cfs_rq->min_vruntime;
- if (cfs_rq->curr)
- vruntime = cfs_rq->curr->vruntime;
- if (cfs_rq->rb_leftmost) {
- struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
- struct sched_entity,
- run_node);
- if (!cfs_rq->curr)
- vruntime = se->vruntime;
- else
- vruntime = min_vruntime(vruntime, se->vruntime);
- }
- cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
- #ifndef CONFIG_64BIT
- smp_wmb();
- cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
- #endif
- }
- static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
- struct rb_node *parent = NULL;
- struct sched_entity *entry;
- int leftmost = 1;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct sched_entity, run_node);
-
- if (entity_before(se, entry)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
- leftmost = 0;
- }
- }
-
- if (leftmost)
- cfs_rq->rb_leftmost = &se->run_node;
- rb_link_node(&se->run_node, parent, link);
- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
- }
- static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- if (cfs_rq->rb_leftmost == &se->run_node) {
- struct rb_node *next_node;
- next_node = rb_next(&se->run_node);
- cfs_rq->rb_leftmost = next_node;
- }
- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
- }
- struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
- {
- struct rb_node *left = cfs_rq->rb_leftmost;
- if (!left)
- return NULL;
- return rb_entry(left, struct sched_entity, run_node);
- }
- static struct sched_entity *__pick_next_entity(struct sched_entity *se)
- {
- struct rb_node *next = rb_next(&se->run_node);
- if (!next)
- return NULL;
- return rb_entry(next, struct sched_entity, run_node);
- }
- #ifdef CONFIG_SCHED_DEBUG
- struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
- {
- struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
- if (!last)
- return NULL;
- return rb_entry(last, struct sched_entity, run_node);
- }
- int sched_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- int factor = get_update_sysctl_factor();
- if (ret || !write)
- return ret;
- sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
- sysctl_sched_min_granularity);
- #define WRT_SYSCTL(name) \
- (normalized_sysctl_##name = sysctl_##name / (factor))
- WRT_SYSCTL(sched_min_granularity);
- WRT_SYSCTL(sched_latency);
- WRT_SYSCTL(sched_wakeup_granularity);
- #undef WRT_SYSCTL
- return 0;
- }
- #endif
- static inline unsigned long
- calc_delta_fair(unsigned long delta, struct sched_entity *se)
- {
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
- return delta;
- }
- static u64 __sched_period(unsigned long nr_running)
- {
- u64 period = sysctl_sched_latency;
- unsigned long nr_latency = sched_nr_latency;
- if (unlikely(nr_running > nr_latency)) {
- period = sysctl_sched_min_granularity;
- period *= nr_running;
- }
- return period;
- }
- static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
- for_each_sched_entity(se) {
- struct load_weight *load;
- struct load_weight lw;
- cfs_rq = cfs_rq_of(se);
- load = &cfs_rq->load;
- if (unlikely(!se->on_rq)) {
- lw = cfs_rq->load;
- update_load_add(&lw, se->load.weight);
- load = &lw;
- }
- slice = calc_delta_mine(slice, se->load.weight, load);
- }
- return slice;
- }
- static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- return calc_delta_fair(sched_slice(cfs_rq, se), se);
- }
- static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
- static void update_cfs_shares(struct cfs_rq *cfs_rq);
- static inline void
- __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
- unsigned long delta_exec)
- {
- unsigned long delta_exec_weighted;
- schedstat_set(curr->statistics.exec_max,
- max((u64)delta_exec, curr->statistics.exec_max));
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
- curr->vruntime += delta_exec_weighted;
- update_min_vruntime(cfs_rq);
- #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
- cfs_rq->load_unacc_exec_time += delta_exec;
- #endif
- }
- static void update_curr(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_of(cfs_rq)->clock_task;
- unsigned long delta_exec;
- if (unlikely(!curr))
- return;
-
- delta_exec = (unsigned long)(now - curr->exec_start);
- if (!delta_exec)
- return;
- __update_curr(cfs_rq, curr, delta_exec);
- curr->exec_start = now;
- if (entity_is_task(curr)) {
- struct task_struct *curtask = task_of(curr);
- trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
- cpuacct_charge(curtask, delta_exec);
- account_group_exec_runtime(curtask, delta_exec);
- }
- account_cfs_rq_runtime(cfs_rq, delta_exec);
- }
- static inline void
- update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
- }
- static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-
- if (se != cfs_rq->curr)
- update_stats_wait_start(cfs_rq, se);
- }
- static void
- update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
- rq_of(cfs_rq)->clock - se->statistics.wait_start));
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
- rq_of(cfs_rq)->clock - se->statistics.wait_start);
- #ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- trace_sched_stat_wait(task_of(se),
- rq_of(cfs_rq)->clock - se->statistics.wait_start);
- }
- #endif
- schedstat_set(se->statistics.wait_start, 0);
- }
- static inline void
- update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-
- if (se != cfs_rq->curr)
- update_stats_wait_end(cfs_rq, se);
- }
- static inline void
- update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-
- se->exec_start = rq_of(cfs_rq)->clock_task;
- }
- static void
- account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
- #ifdef CONFIG_SMP
- if (entity_is_task(se))
- list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
- #endif
- cfs_rq->nr_running++;
- }
- static void
- account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
- if (entity_is_task(se))
- list_del_init(&se->group_node);
- cfs_rq->nr_running--;
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
- # ifdef CONFIG_SMP
- static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
- int global_update)
- {
- struct task_group *tg = cfs_rq->tg;
- long load_avg;
- load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
- load_avg -= cfs_rq->load_contribution;
- if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
- atomic_add(load_avg, &tg->load_weight);
- cfs_rq->load_contribution += load_avg;
- }
- }
- static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
- {
- u64 period = sysctl_sched_shares_window;
- u64 now, delta;
- unsigned long load = cfs_rq->load.weight;
- if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
- return;
- now = rq_of(cfs_rq)->clock_task;
- delta = now - cfs_rq->load_stamp;
-
- if (cfs_rq->load_stamp > cfs_rq->load_last &&
- now - cfs_rq->load_last > 4 * period) {
- cfs_rq->load_period = 0;
- cfs_rq->load_avg = 0;
- delta = period - 1;
- }
- cfs_rq->load_stamp = now;
- cfs_rq->load_unacc_exec_time = 0;
- cfs_rq->load_period += delta;
- if (load) {
- cfs_rq->load_last = now;
- cfs_rq->load_avg += delta * load;
- }
-
- if (global_update || cfs_rq->load_period > period
- || !cfs_rq->load_period)
- update_cfs_rq_load_contribution(cfs_rq, global_update);
- while (cfs_rq->load_period > period) {
-
- asm("" : "+rm" (cfs_rq->load_period));
- cfs_rq->load_period /= 2;
- cfs_rq->load_avg /= 2;
- }
- if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
- list_del_leaf_cfs_rq(cfs_rq);
- }
- static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
- {
- long tg_weight;
-
- tg_weight = atomic_read(&tg->load_weight);
- tg_weight -= cfs_rq->load_contribution;
- tg_weight += cfs_rq->load.weight;
- return tg_weight;
- }
- static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
- {
- long tg_weight, load, shares;
- tg_weight = calc_tg_weight(tg, cfs_rq);
- load = cfs_rq->load.weight;
- shares = (tg->shares * load);
- if (tg_weight)
- shares /= tg_weight;
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- if (shares > tg->shares)
- shares = tg->shares;
- return shares;
- }
- static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
- {
- if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
- }
- }
- # else
- static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
- {
- }
- static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
- {
- return tg->shares;
- }
- static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
- {
- }
- # endif
- static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
- unsigned long weight)
- {
- if (se->on_rq) {
-
- if (cfs_rq->curr == se)
- update_curr(cfs_rq);
- account_entity_dequeue(cfs_rq, se);
- }
- update_load_set(&se->load, weight);
- if (se->on_rq)
- account_entity_enqueue(cfs_rq, se);
- }
- static void update_cfs_shares(struct cfs_rq *cfs_rq)
- {
- struct task_group *tg;
- struct sched_entity *se;
- long shares;
- tg = cfs_rq->tg;
- se = tg->se[cpu_of(rq_of(cfs_rq))];
- if (!se || throttled_hierarchy(cfs_rq))
- return;
- #ifndef CONFIG_SMP
- if (likely(se->load.weight == tg->shares))
- return;
- #endif
- shares = calc_cfs_shares(cfs_rq, tg);
- reweight_entity(cfs_rq_of(se), se, shares);
- }
- #else
- static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
- {
- }
- static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
- {
- }
- static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
- {
- }
- #endif
- static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- #ifdef CONFIG_SCHEDSTATS
- struct task_struct *tsk = NULL;
- if (entity_is_task(se))
- tsk = task_of(se);
- if (se->statistics.sleep_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
- if ((s64)delta < 0)
- delta = 0;
- if (unlikely(delta > se->statistics.sleep_max))
- se->statistics.sleep_max = delta;
- se->statistics.sleep_start = 0;
- se->statistics.sum_sleep_runtime += delta;
- if (tsk) {
- account_scheduler_latency(tsk, delta >> 10, 1);
- trace_sched_stat_sleep(tsk, delta);
- }
- }
- if (se->statistics.block_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
- if ((s64)delta < 0)
- delta = 0;
- if (unlikely(delta > se->statistics.block_max))
- se->statistics.block_max = delta;
- se->statistics.block_start = 0;
- se->statistics.sum_sleep_runtime += delta;
- if (tsk) {
- if (tsk->in_iowait) {
- se->statistics.iowait_sum += delta;
- se->statistics.iowait_count++;
- trace_sched_stat_iowait(tsk, delta);
- }
- trace_sched_stat_blocked(tsk, delta);
-
- if (unlikely(prof_on == SLEEP_PROFILING)) {
- profile_hits(SLEEP_PROFILING,
- (void *)get_wchan(tsk),
- delta >> 20);
- }
- account_scheduler_latency(tsk, delta >> 10, 0);
- }
- }
- #endif
- }
- static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- #ifdef CONFIG_SCHED_DEBUG
- s64 d = se->vruntime - cfs_rq->min_vruntime;
- if (d < 0)
- d = -d;
- if (d > 3*sysctl_sched_latency)
- schedstat_inc(cfs_rq, nr_spread_over);
- #endif
- }
- static void
- place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
- {
- u64 vruntime = cfs_rq->min_vruntime;
-
- if (initial && sched_feat(START_DEBIT))
- vruntime += sched_vslice(cfs_rq, se);
-
- if (!initial) {
- unsigned long thresh = sysctl_sched_latency;
-
- if (sched_feat(GENTLE_FAIR_SLEEPERS))
- thresh >>= 1;
- vruntime -= thresh;
- }
-
- vruntime = max_vruntime(se->vruntime, vruntime);
- se->vruntime = vruntime;
- }
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
- static void
- enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-
- if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
- se->vruntime += cfs_rq->min_vruntime;
-
- update_curr(cfs_rq);
- update_cfs_load(cfs_rq, 0);
- account_entity_enqueue(cfs_rq, se);
- update_cfs_shares(cfs_rq);
- if (flags & ENQUEUE_WAKEUP) {
- place_entity(cfs_rq, se, 0);
- enqueue_sleeper(cfs_rq, se);
- }
- update_stats_enqueue(cfs_rq, se);
- check_spread(cfs_rq, se);
- if (se != cfs_rq->curr)
- __enqueue_entity(cfs_rq, se);
- se->on_rq = 1;
- if (cfs_rq->nr_running == 1) {
- list_add_leaf_cfs_rq(cfs_rq);
- check_enqueue_throttle(cfs_rq);
- }
- }
- static void __clear_buddies_last(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->last == se)
- cfs_rq->last = NULL;
- else
- break;
- }
- }
- static void __clear_buddies_next(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->next == se)
- cfs_rq->next = NULL;
- else
- break;
- }
- }
- static void __clear_buddies_skip(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->skip == se)
- cfs_rq->skip = NULL;
- else
- break;
- }
- }
- static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- if (cfs_rq->last == se)
- __clear_buddies_last(se);
- if (cfs_rq->next == se)
- __clear_buddies_next(se);
- if (cfs_rq->skip == se)
- __clear_buddies_skip(se);
- }
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
- static void
- dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-
- update_curr(cfs_rq);
- update_stats_dequeue(cfs_rq, se);
- if (flags & DEQUEUE_SLEEP) {
- #ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- struct task_struct *tsk = task_of(se);
- if (tsk->state & TASK_INTERRUPTIBLE)
- se->statistics.sleep_start = rq_of(cfs_rq)->clock;
- if (tsk->state & TASK_UNINTERRUPTIBLE)
- se->statistics.block_start = rq_of(cfs_rq)->clock;
- }
- #endif
- }
- clear_buddies(cfs_rq, se);
- if (se != cfs_rq->curr)
- __dequeue_entity(cfs_rq, se);
- se->on_rq = 0;
- update_cfs_load(cfs_rq, 0);
- account_entity_dequeue(cfs_rq, se);
-
- if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
-
- return_cfs_rq_runtime(cfs_rq);
- update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq);
- }
- static void
- check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- {
- unsigned long ideal_runtime, delta_exec;
- struct sched_entity *se;
- s64 delta;
- ideal_runtime = sched_slice(cfs_rq, curr);
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime) {
- resched_task(rq_of(cfs_rq)->curr);
-
- clear_buddies(cfs_rq, curr);
- return;
- }
-
- if (delta_exec < sysctl_sched_min_granularity)
- return;
- se = __pick_first_entity(cfs_rq);
- delta = curr->vruntime - se->vruntime;
- if (delta < 0)
- return;
- if (delta > ideal_runtime)
- resched_task(rq_of(cfs_rq)->curr);
- }
- static void
- set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-
- if (se->on_rq) {
-
- update_stats_wait_end(cfs_rq, se);
- __dequeue_entity(cfs_rq, se);
- }
- update_stats_curr_start(cfs_rq, se);
- cfs_rq->curr = se;
- #ifdef CONFIG_SCHEDSTATS
-
- if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
- se->statistics.slice_max = max(se->statistics.slice_max,
- se->sum_exec_runtime - se->prev_sum_exec_runtime);
- }
- #endif
- se->prev_sum_exec_runtime = se->sum_exec_runtime;
- }
- static int
- wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
- static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *se = __pick_first_entity(cfs_rq);
- struct sched_entity *left = se;
-
- if (cfs_rq->skip == se) {
- struct sched_entity *second = __pick_next_entity(se);
- if (second && wakeup_preempt_entity(second, left) < 1)
- se = second;
- }
-
- if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
- se = cfs_rq->last;
-
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
- se = cfs_rq->next;
- clear_buddies(cfs_rq, se);
- return se;
- }
- static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
- static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
- {
-
- if (prev->on_rq)
- update_curr(cfs_rq);
-
- check_cfs_rq_runtime(cfs_rq);
- check_spread(cfs_rq, prev);
- if (prev->on_rq) {
- update_stats_wait_start(cfs_rq, prev);
-
- __enqueue_entity(cfs_rq, prev);
- }
- cfs_rq->curr = NULL;
- }
- static void
- entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
- {
-
- update_curr(cfs_rq);
-
- update_entity_shares_tick(cfs_rq);
- #ifdef CONFIG_SCHED_HRTICK
-
- if (queued) {
- resched_task(rq_of(cfs_rq)->curr);
- return;
- }
-
- if (!sched_feat(DOUBLE_TICK) &&
- hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
- return;
- #endif
- if (cfs_rq->nr_running > 1)
- check_preempt_tick(cfs_rq, curr);
- }
- #ifdef CONFIG_CFS_BANDWIDTH
- #ifdef HAVE_JUMP_LABEL
- static struct static_key __cfs_bandwidth_used;
- static inline bool cfs_bandwidth_used(void)
- {
- return static_key_false(&__cfs_bandwidth_used);
- }
- void cfs_bandwidth_usage_inc(void)
- {
- static_key_slow_inc(&__cfs_bandwidth_used);
- }
- void cfs_bandwidth_usage_dec(void)
- {
- static_key_slow_dec(&__cfs_bandwidth_used);
- }
- #else
- static bool cfs_bandwidth_used(void)
- {
- return true;
- }
- void cfs_bandwidth_usage_inc(void) {}
- void cfs_bandwidth_usage_dec(void) {}
- #endif
- static inline u64 default_cfs_period(void)
- {
- return 100000000ULL;
- }
- static inline u64 sched_cfs_bandwidth_slice(void)
- {
- return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
- }
- void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
- {
- u64 now;
- if (cfs_b->quota == RUNTIME_INF)
- return;
- now = sched_clock_cpu(smp_processor_id());
- cfs_b->runtime = cfs_b->quota;
- cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
- }
- static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
- {
- return &tg->cfs_bandwidth;
- }
- static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- struct task_group *tg = cfs_rq->tg;
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
- u64 amount = 0, min_amount, expires;
-
- min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
- raw_spin_lock(&cfs_b->lock);
- if (cfs_b->quota == RUNTIME_INF)
- amount = min_amount;
- else {
-
- if (!cfs_b->timer_active) {
- __refill_cfs_bandwidth_runtime(cfs_b);
- __start_cfs_bandwidth(cfs_b);
- }
- if (cfs_b->runtime > 0) {
- amount = min(cfs_b->runtime, min_amount);
- cfs_b->runtime -= amount;
- cfs_b->idle = 0;
- }
- }
- expires = cfs_b->runtime_expires;
- raw_spin_unlock(&cfs_b->lock);
- cfs_rq->runtime_remaining += amount;
-
- if ((s64)(expires - cfs_rq->runtime_expires) > 0)
- cfs_rq->runtime_expires = expires;
- return cfs_rq->runtime_remaining > 0;
- }
- static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct rq *rq = rq_of(cfs_rq);
-
- if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
- return;
- if (cfs_rq->runtime_remaining < 0)
- return;
-
- if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
-
- cfs_rq->runtime_expires += TICK_NSEC;
- } else {
-
- cfs_rq->runtime_remaining = 0;
- }
- }
- static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec)
- {
-
- cfs_rq->runtime_remaining -= delta_exec;
- expire_cfs_rq_runtime(cfs_rq);
- if (likely(cfs_rq->runtime_remaining > 0))
- return;
-
- if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- resched_task(rq_of(cfs_rq)->curr);
- }
- static __always_inline
- void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
- {
- if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
- return;
- __account_cfs_rq_runtime(cfs_rq, delta_exec);
- }
- static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
- {
- return cfs_bandwidth_used() && cfs_rq->throttled;
- }
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
- {
- return cfs_bandwidth_used() && cfs_rq->throttle_count;
- }
- static inline int throttled_lb_pair(struct task_group *tg,
- int src_cpu, int dest_cpu)
- {
- struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
- src_cfs_rq = tg->cfs_rq[src_cpu];
- dest_cfs_rq = tg->cfs_rq[dest_cpu];
- return throttled_hierarchy(src_cfs_rq) ||
- throttled_hierarchy(dest_cfs_rq);
- }
- static int tg_unthrottle_up(struct task_group *tg, void *data)
- {
- struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- cfs_rq->throttle_count--;
- #ifdef CONFIG_SMP
- if (!cfs_rq->throttle_count) {
- u64 delta = rq->clock_task - cfs_rq->load_stamp;
-
- cfs_rq->load_stamp += delta;
- cfs_rq->load_last += delta;
-
- update_cfs_shares(cfs_rq);
- }
- #endif
- return 0;
- }
- static int tg_throttle_down(struct task_group *tg, void *data)
- {
- struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
-
- if (!cfs_rq->throttle_count)
- update_cfs_load(cfs_rq, 0);
- cfs_rq->throttle_count++;
- return 0;
- }
- static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
- long task_delta, dequeue = 1;
- se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
-
- rcu_read_lock();
- walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
- rcu_read_unlock();
- task_delta = cfs_rq->h_nr_running;
- for_each_sched_entity(se) {
- struct cfs_rq *qcfs_rq = cfs_rq_of(se);
-
- if (!se->on_rq)
- break;
- if (dequeue)
- dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
- qcfs_rq->h_nr_running -= task_delta;
- if (qcfs_rq->load.weight)
- dequeue = 0;
- }
- if (!se)
- rq->nr_running -= task_delta;
- cfs_rq->throttled = 1;
- cfs_rq->throttled_timestamp = rq->clock;
- raw_spin_lock(&cfs_b->lock);
- list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
- if (!cfs_b->timer_active)
- __start_cfs_bandwidth(cfs_b);
- raw_spin_unlock(&cfs_b->lock);
- }
- void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
- int enqueue = 1;
- long task_delta;
- se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
- cfs_rq->throttled = 0;
- raw_spin_lock(&cfs_b->lock);
- cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
- list_del_rcu(&cfs_rq->throttled_list);
- raw_spin_unlock(&cfs_b->lock);
- cfs_rq->throttled_timestamp = 0;
- update_rq_clock(rq);
-
- walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
- if (!cfs_rq->load.weight)
- return;
- task_delta = cfs_rq->h_nr_running;
- for_each_sched_entity(se) {
- if (se->on_rq)
- enqueue = 0;
- cfs_rq = cfs_rq_of(se);
- if (enqueue)
- enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
- cfs_rq->h_nr_running += task_delta;
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
- if (!se)
- rq->nr_running += task_delta;
-
- if (rq->curr == rq->idle && rq->cfs.nr_running)
- resched_task(rq->curr);
- }
- static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
- u64 remaining, u64 expires)
- {
- struct cfs_rq *cfs_rq;
- u64 runtime = remaining;
- rcu_read_lock();
- list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
- throttled_list) {
- struct rq *rq = rq_of(cfs_rq);
- raw_spin_lock(&rq->lock);
- if (!cfs_rq_throttled(cfs_rq))
- goto next;
- runtime = -cfs_rq->runtime_remaining + 1;
- if (runtime > remaining)
- runtime = remaining;
- remaining -= runtime;
- cfs_rq->runtime_remaining += runtime;
- cfs_rq->runtime_expires = expires;
-
- if (cfs_rq->runtime_remaining > 0)
- unthrottle_cfs_rq(cfs_rq);
- next:
- raw_spin_unlock(&rq->lock);
- if (!remaining)
- break;
- }
- rcu_read_unlock();
- return remaining;
- }
- static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
- {
- u64 runtime, runtime_expires;
- int idle = 1, throttled;
- raw_spin_lock(&cfs_b->lock);
-
- if (cfs_b->quota == RUNTIME_INF)
- goto out_unlock;
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-
- idle = cfs_b->idle && !throttled;
- cfs_b->nr_periods += overrun;
-
- if (idle)
- goto out_unlock;
-
- cfs_b->timer_active = 1;
- __refill_cfs_bandwidth_runtime(cfs_b);
- if (!throttled) {
-
- cfs_b->idle = 1;
- goto out_unlock;
- }
-
- cfs_b->nr_throttled += overrun;
-
- runtime = cfs_b->runtime;
- runtime_expires = cfs_b->runtime_expires;
- cfs_b->runtime = 0;
-
- while (throttled && runtime > 0) {
- raw_spin_unlock(&cfs_b->lock);
-
- runtime = distribute_cfs_runtime(cfs_b, runtime,
- runtime_expires);
- raw_spin_lock(&cfs_b->lock);
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
- }
-
- cfs_b->runtime = runtime;
-
- cfs_b->idle = 0;
- out_unlock:
- if (idle)
- cfs_b->timer_active = 0;
- raw_spin_unlock(&cfs_b->lock);
- return idle;
- }
- static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
- static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
- static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
- static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
- {
- struct hrtimer *refresh_timer = &cfs_b->period_timer;
- u64 remaining;
-
- if (hrtimer_callback_running(refresh_timer))
- return 1;
-
- remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
- if (remaining < min_expire)
- return 1;
- return 0;
- }
- static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
-
- if (runtime_refresh_within(cfs_b, min_left))
- return;
- start_bandwidth_timer(&cfs_b->slack_timer,
- ns_to_ktime(cfs_bandwidth_slack_period));
- }
- static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
- if (slack_runtime <= 0)
- return;
- raw_spin_lock(&cfs_b->lock);
- if (cfs_b->quota != RUNTIME_INF &&
- cfs_rq->runtime_expires == cfs_b->runtime_expires) {
- cfs_b->runtime += slack_runtime;
-
- if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
- !list_empty(&cfs_b->throttled_cfs_rq))
- start_cfs_slack_bandwidth(cfs_b);
- }
- raw_spin_unlock(&cfs_b->lock);
-
- cfs_rq->runtime_remaining -= slack_runtime;
- }
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return;
- if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
- return;
- __return_cfs_rq_runtime(cfs_rq);
- }
- static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- {
- u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
- u64 expires;
-
- raw_spin_lock(&cfs_b->lock);
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
- raw_spin_unlock(&cfs_b->lock);
- return;
- }
- if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
- runtime = cfs_b->runtime;
- cfs_b->runtime = 0;
- }
- expires = cfs_b->runtime_expires;
- raw_spin_unlock(&cfs_b->lock);
- if (!runtime)
- return;
- runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
- raw_spin_lock(&cfs_b->lock);
- if (expires == cfs_b->runtime_expires)
- cfs_b->runtime = runtime;
- raw_spin_unlock(&cfs_b->lock);
- }
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return;
-
- if (!cfs_rq->runtime_enabled || cfs_rq->curr)
- return;
-
- if (cfs_rq_throttled(cfs_rq))
- return;
-
- account_cfs_rq_runtime(cfs_rq, 0);
- if (cfs_rq->runtime_remaining <= 0)
- throttle_cfs_rq(cfs_rq);
- }
- static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return;
- if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
- return;
-
- if (cfs_rq_throttled(cfs_rq))
- return;
- throttle_cfs_rq(cfs_rq);
- }
- static inline u64 default_cfs_period(void);
- static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
- static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
- static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
- {
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, slack_timer);
- do_sched_cfs_slack_timer(cfs_b);
- return HRTIMER_NORESTART;
- }
- extern const u64 max_cfs_quota_period;
- static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
- {
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, period_timer);
- ktime_t now;
- int overrun;
- int idle = 0;
- int count = 0;
- for (;;) {
- now = hrtimer_cb_get_time(timer);
- overrun = hrtimer_forward(timer, now, cfs_b->period);
- if (!overrun)
- break;
- if (++count > 3) {
- u64 new, old = ktime_to_ns(cfs_b->period);
- new = (old * 147) / 128;
- new = min(new, max_cfs_quota_period);
- cfs_b->period = ns_to_ktime(new);
-
- cfs_b->quota *= new;
- cfs_b->quota = div64_u64(cfs_b->quota, old);
- pr_warn_ratelimited(
- "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
- smp_processor_id(),
- div_u64(new, NSEC_PER_USEC),
- div_u64(cfs_b->quota, NSEC_PER_USEC));
-
- count = 0;
- }
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
- }
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
- }
- void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- raw_spin_lock_init(&cfs_b->lock);
- cfs_b->runtime = 0;
- cfs_b->quota = RUNTIME_INF;
- cfs_b->period = ns_to_ktime(default_cfs_period());
- INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->period_timer.function = sched_cfs_period_timer;
- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->slack_timer.function = sched_cfs_slack_timer;
- }
- static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- cfs_rq->runtime_enabled = 0;
- INIT_LIST_HEAD(&cfs_rq->throttled_list);
- }
- void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
-
- while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
- hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
-
- raw_spin_unlock(&cfs_b->lock);
- cpu_relax();
- raw_spin_lock(&cfs_b->lock);
-
- if (cfs_b->timer_active)
- return;
- }
- cfs_b->timer_active = 1;
- start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
- }
- static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- hrtimer_cancel(&cfs_b->period_timer);
- hrtimer_cancel(&cfs_b->slack_timer);
- }
- static void unthrottle_offline_cfs_rqs(struct rq *rq)
- {
- struct cfs_rq *cfs_rq;
- for_each_leaf_cfs_rq(rq, cfs_rq) {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- if (!cfs_rq->runtime_enabled)
- continue;
-
- cfs_rq->runtime_remaining = cfs_b->quota;
- if (cfs_rq_throttled(cfs_rq))
- unthrottle_cfs_rq(cfs_rq);
- }
- }
- #else
- static __always_inline
- void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
- static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
- static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static inline int throttled_lb_pair(struct task_group *tg,
- int src_cpu, int dest_cpu)
- {
- return 0;
- }
- void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
- #endif
- static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
- {
- return NULL;
- }
- static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
- static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
- #endif
- #ifdef CONFIG_SCHED_HRTICK
- static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- WARN_ON(task_rq(p) != rq);
- if (rq->cfs.h_nr_running > 1) {
- u64 slice = sched_slice(cfs_rq, se);
- u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
- s64 delta = slice - ran;
- if (delta < 0) {
- if (rq->curr == p)
- resched_task(p);
- return;
- }
-
- if (rq->curr != p)
- delta = max_t(s64, 10000LL, delta);
- hrtick_start(rq, delta);
- }
- }
- static void hrtick_update(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
- return;
- hrtick_start_fair(rq, curr);
- }
- #else
- static inline void
- hrtick_start_fair(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void hrtick_update(struct rq *rq)
- {
- }
- #endif
- static void
- enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- for_each_sched_entity(se) {
- if (se->on_rq)
- break;
- cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, flags);
-
- if (cfs_rq_throttled(cfs_rq))
- break;
- cfs_rq->h_nr_running++;
- flags = ENQUEUE_WAKEUP;
- }
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- cfs_rq->h_nr_running++;
- if (cfs_rq_throttled(cfs_rq))
- break;
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
- }
- if (!se)
- inc_nr_running(rq);
- hrtick_update(rq);
- }
- static void set_next_buddy(struct sched_entity *se);
- static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int task_sleep = flags & DEQUEUE_SLEEP;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, flags);
-
- if (cfs_rq_throttled(cfs_rq))
- break;
- cfs_rq->h_nr_running--;
-
- if (cfs_rq->load.weight) {
-
- if (task_sleep && parent_entity(se))
- set_next_buddy(parent_entity(se));
-
- se = parent_entity(se);
- break;
- }
- flags |= DEQUEUE_SLEEP;
- }
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- cfs_rq->h_nr_running--;
- if (cfs_rq_throttled(cfs_rq))
- break;
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
- }
- if (!se)
- dec_nr_running(rq);
- hrtick_update(rq);
- }
- #ifdef CONFIG_SMP
- static unsigned long weighted_cpuload(const int cpu)
- {
- return cpu_rq(cpu)->load.weight;
- }
- static unsigned long source_load(int cpu, int type)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
- return min(rq->cpu_load[type-1], total);
- }
- static unsigned long target_load(int cpu, int type)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
- return max(rq->cpu_load[type-1], total);
- }
- static unsigned long power_of(int cpu)
- {
- return cpu_rq(cpu)->cpu_power;
- }
- static unsigned long cpu_avg_load_per_task(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
- if (nr_running)
- return rq->load.weight / nr_running;
- return 0;
- }
- static void record_wakee(struct task_struct *p)
- {
-
- if (jiffies > current->wakee_flip_decay_ts + HZ) {
- current->wakee_flips = 0;
- current->wakee_flip_decay_ts = jiffies;
- }
- if (current->last_wakee != p) {
- current->last_wakee = p;
- current->wakee_flips++;
- }
- }
- static void task_waking_fair(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 min_vruntime;
- #ifndef CONFIG_64BIT
- u64 min_vruntime_copy;
- do {
- min_vruntime_copy = cfs_rq->min_vruntime_copy;
- smp_rmb();
- min_vruntime = cfs_rq->min_vruntime;
- } while (min_vruntime != min_vruntime_copy);
- #else
- min_vruntime = cfs_rq->min_vruntime;
- #endif
- se->vruntime -= min_vruntime;
- record_wakee(p);
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
- {
- struct sched_entity *se = tg->se[cpu];
- if (!tg->parent)
- return wl;
- for_each_sched_entity(se) {
- long w, W;
- tg = se->my_q->tg;
-
- W = wg + calc_tg_weight(tg, se->my_q);
-
- w = se->my_q->load.weight + wl;
-
- if (W > 0 && w < W)
- wl = (w * tg->shares) / W;
- else
- wl = tg->shares;
-
- if (wl < MIN_SHARES)
- wl = MIN_SHARES;
-
- wl -= se->load.weight;
-
- wg = 0;
- }
- return wl;
- }
- #else
- static inline unsigned long effective_load(struct task_group *tg, int cpu,
- unsigned long wl, unsigned long wg)
- {
- return wl;
- }
- #endif
- static int wake_wide(struct task_struct *p)
- {
- int factor = this_cpu_read(sd_llc_size);
-
- if (p->wakee_flips > factor) {
-
- if (current->wakee_flips > (factor * p->wakee_flips))
- return 1;
- }
- return 0;
- }
- static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
- {
- s64 this_load, load;
- int idx, this_cpu, prev_cpu;
- unsigned long tl_per_task;
- struct task_group *tg;
- unsigned long weight;
- int balanced;
-
- if (wake_wide(p))
- return 0;
- idx = sd->wake_idx;
- this_cpu = smp_processor_id();
- prev_cpu = task_cpu(p);
- load = source_load(prev_cpu, idx);
- this_load = target_load(this_cpu, idx);
-
- if (sync) {
- tg = task_group(current);
- weight = current->se.load.weight;
- this_load += effective_load(tg, this_cpu, -weight, -weight);
- load += effective_load(tg, prev_cpu, 0, -weight);
- }
- tg = task_group(p);
- weight = p->se.load.weight;
-
- if (this_load > 0) {
- s64 this_eff_load, prev_eff_load;
- this_eff_load = 100;
- this_eff_load *= power_of(prev_cpu);
- this_eff_load *= this_load +
- effective_load(tg, this_cpu, weight, weight);
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= power_of(this_cpu);
- prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
- balanced = this_eff_load <= prev_eff_load;
- } else
- balanced = true;
-
- if (sync && balanced)
- return 1;
- schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
- tl_per_task = cpu_avg_load_per_task(this_cpu);
- if (balanced ||
- (this_load <= load &&
- this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
-
- schedstat_inc(sd, ttwu_move_affine);
- schedstat_inc(p, se.statistics.nr_wakeups_affine);
- return 1;
- }
- return 0;
- }
- static struct sched_group *
- find_idlest_group(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int load_idx)
- {
- struct sched_group *idlest = NULL, *group = sd->groups;
- unsigned long min_load = ULONG_MAX, this_load = 0;
- int imbalance = 100 + (sd->imbalance_pct-100)/2;
- do {
- unsigned long load, avg_load;
- int local_group;
- int i;
-
- if (!cpumask_intersects(sched_group_cpus(group),
- tsk_cpus_allowed(p)))
- continue;
- local_group = cpumask_test_cpu(this_cpu,
- sched_group_cpus(group));
-
- avg_load = 0;
- for_each_cpu(i, sched_group_cpus(group)) {
-
- if (local_group)
- load = source_load(i, load_idx);
- else
- load = target_load(i, load_idx);
- avg_load += load;
- }
-
- avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
- if (local_group) {
- this_load = avg_load;
- } else if (avg_load < min_load) {
- min_load = avg_load;
- idlest = group;
- }
- } while (group = group->next, group != sd->groups);
- if (!idlest || 100*this_load < imbalance*min_load)
- return NULL;
- return idlest;
- }
- static int
- find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
- {
- unsigned long load, min_load = ULONG_MAX;
- int idlest = -1;
- int i;
-
- if (group->group_weight == 1)
- return cpumask_first(sched_group_cpus(group));
-
- for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- load = weighted_cpuload(i);
- if (load < min_load || (load == min_load && i == this_cpu)) {
- min_load = load;
- idlest = i;
- }
- }
- return idlest;
- }
- static int select_idle_sibling(struct task_struct *p, int target)
- {
- struct sched_domain *sd;
- struct sched_group *sg;
- int i = task_cpu(p);
- if (idle_cpu(target))
- return target;
-
- if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
- return i;
- if (!sysctl_sched_wake_to_idle &&
- !(current->flags & PF_WAKE_UP_IDLE) &&
- !(p->flags & PF_WAKE_UP_IDLE))
- return target;
-
- sd = rcu_dereference(per_cpu(sd_llc, target));
- for_each_lower_domain(sd) {
- sg = sd->groups;
- do {
- if (!cpumask_intersects(sched_group_cpus(sg),
- tsk_cpus_allowed(p)))
- goto next;
- for_each_cpu(i, sched_group_cpus(sg)) {
- if (i == target || !idle_cpu(i))
- goto next;
- }
- target = cpumask_first_and(sched_group_cpus(sg),
- tsk_cpus_allowed(p));
- goto done;
- next:
- sg = sg->next;
- } while (sg != sd->groups);
- }
- done:
- return target;
- }
- static int
- select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
- {
- struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
- int cpu = smp_processor_id();
- int prev_cpu = task_cpu(p);
- int new_cpu = cpu;
- int want_affine = 0;
- int want_sd = 1;
- int sync = wake_flags & WF_SYNC;
- if (p->nr_cpus_allowed == 1)
- return prev_cpu;
- if (sd_flag & SD_BALANCE_WAKE) {
- if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
- want_affine = 1;
- new_cpu = prev_cpu;
- }
- rcu_read_lock();
- for_each_domain(cpu, tmp) {
- if (!(tmp->flags & SD_LOAD_BALANCE))
- continue;
-
- if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
- unsigned long power = 0;
- unsigned long nr_running = 0;
- unsigned long capacity;
- int i;
- for_each_cpu(i, sched_domain_span(tmp)) {
- power += power_of(i);
- nr_running += cpu_rq(i)->cfs.nr_running;
- }
- capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
- if (tmp->flags & SD_POWERSAVINGS_BALANCE)
- nr_running /= 2;
- if (nr_running < capacity)
- want_sd = 0;
- }
-
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
- affine_sd = tmp;
- want_affine = 0;
- }
- if (!want_sd && !want_affine)
- break;
- if (!(tmp->flags & sd_flag))
- continue;
- if (want_sd)
- sd = tmp;
- }
- if (affine_sd) {
- if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
- prev_cpu = cpu;
- new_cpu = select_idle_sibling(p, prev_cpu);
- goto unlock;
- }
- while (sd) {
- int load_idx = sd->forkexec_idx;
- struct sched_group *group;
- int weight;
- if (!(sd->flags & sd_flag)) {
- sd = sd->child;
- continue;
- }
- if (sd_flag & SD_BALANCE_WAKE)
- load_idx = sd->wake_idx;
- group = find_idlest_group(sd, p, cpu, load_idx);
- if (!group) {
- sd = sd->child;
- continue;
- }
- new_cpu = find_idlest_cpu(group, p, cpu);
- if (new_cpu == -1 || new_cpu == cpu) {
-
- sd = sd->child;
- continue;
- }
-
- cpu = new_cpu;
- weight = sd->span_weight;
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= tmp->span_weight)
- break;
- if (tmp->flags & sd_flag)
- sd = tmp;
- }
-
- }
- unlock:
- rcu_read_unlock();
- return new_cpu;
- }
- #endif
- static unsigned long
- wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
- {
- unsigned long gran = sysctl_sched_wakeup_granularity;
-
- return calc_delta_fair(gran, se);
- }
- static int
- wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
- {
- s64 gran, vdiff = curr->vruntime - se->vruntime;
- if (vdiff <= 0)
- return -1;
- gran = wakeup_gran(curr, se);
- if (vdiff > gran)
- return 1;
- return 0;
- }
- static void set_last_buddy(struct sched_entity *se)
- {
- if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
- return;
- for_each_sched_entity(se)
- cfs_rq_of(se)->last = se;
- }
- static void set_next_buddy(struct sched_entity *se)
- {
- if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
- return;
- for_each_sched_entity(se)
- cfs_rq_of(se)->next = se;
- }
- static void set_skip_buddy(struct sched_entity *se)
- {
- for_each_sched_entity(se)
- cfs_rq_of(se)->skip = se;
- }
- static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- struct task_struct *curr = rq->curr;
- struct sched_entity *se = &curr->se, *pse = &p->se;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int scale = cfs_rq->nr_running >= sched_nr_latency;
- int next_buddy_marked = 0;
- if (unlikely(se == pse))
- return;
-
- if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
- return;
- if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
- set_next_buddy(pse);
- next_buddy_marked = 1;
- }
-
- if (test_tsk_need_resched(curr))
- return;
-
- if (unlikely(curr->policy == SCHED_IDLE) &&
- likely(p->policy != SCHED_IDLE))
- goto preempt;
-
- if (unlikely(p->policy != SCHED_NORMAL))
- return;
- find_matching_se(&se, &pse);
- update_curr(cfs_rq_of(se));
- BUG_ON(!pse);
- if (wakeup_preempt_entity(se, pse) == 1) {
-
- if (!next_buddy_marked)
- set_next_buddy(pse);
- goto preempt;
- }
- return;
- preempt:
- resched_task(curr);
-
- if (unlikely(!se->on_rq || curr == rq->idle))
- return;
- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
- }
- static struct task_struct *pick_next_task_fair(struct rq *rq)
- {
- struct task_struct *p;
- struct cfs_rq *cfs_rq = &rq->cfs;
- struct sched_entity *se;
- if (!cfs_rq->nr_running)
- return NULL;
- do {
- se = pick_next_entity(cfs_rq);
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
- p = task_of(se);
- if (hrtick_enabled(rq))
- hrtick_start_fair(rq, p);
- return p;
- }
- static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
- {
- struct sched_entity *se = &prev->se;
- struct cfs_rq *cfs_rq;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- put_prev_entity(cfs_rq, se);
- }
- }
- static void yield_task_fair(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- struct sched_entity *se = &curr->se;
-
- if (unlikely(rq->nr_running == 1))
- return;
- clear_buddies(cfs_rq, se);
- if (curr->policy != SCHED_BATCH) {
- update_rq_clock(rq);
-
- update_curr(cfs_rq);
-
- rq->skip_clock_update = 1;
- }
- set_skip_buddy(se);
- }
- static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
- {
- struct sched_entity *se = &p->se;
-
- if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
- return false;
-
- set_next_buddy(se);
- yield_task_fair(rq);
- return true;
- }
- #ifdef CONFIG_SMP
- static unsigned long __read_mostly max_load_balance_interval = HZ/10;
- #define LBF_ALL_PINNED 0x01
- #define LBF_NEED_BREAK 0x02
- struct lb_env {
- struct sched_domain *sd;
- int src_cpu;
- struct rq *src_rq;
- int dst_cpu;
- struct rq *dst_rq;
- enum cpu_idle_type idle;
- long load_move;
- unsigned int flags;
- unsigned int loop;
- unsigned int loop_break;
- unsigned int loop_max;
- };
- static DEFINE_PER_CPU(bool, dbs_boost_needed);
- static void move_task(struct task_struct *p, struct lb_env *env)
- {
- deactivate_task(env->src_rq, p, 0);
- set_task_cpu(p, env->dst_cpu);
- activate_task(env->dst_rq, p, 0);
- check_preempt_curr(env->dst_rq, p, 0);
- if (task_notify_on_migrate(p))
- per_cpu(dbs_boost_needed, env->dst_cpu) = true;
- }
- static int
- task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
- {
- s64 delta;
- if (p->sched_class != &fair_sched_class)
- return 0;
- if (unlikely(p->policy == SCHED_IDLE))
- return 0;
-
- if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
- return 1;
- if (sysctl_sched_migration_cost == -1)
- return 1;
- if (sysctl_sched_migration_cost == 0)
- return 0;
- delta = now - p->se.exec_start;
- return delta < (s64)sysctl_sched_migration_cost;
- }
- static
- int can_migrate_task(struct task_struct *p, struct lb_env *env)
- {
- int tsk_cache_hot = 0;
-
- if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
- return 0;
- }
- env->flags &= ~LBF_ALL_PINNED;
- if (task_running(env->src_rq, p)) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_running);
- return 0;
- }
-
- tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
- if (!tsk_cache_hot ||
- env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
- #ifdef CONFIG_SCHEDSTATS
- if (tsk_cache_hot) {
- schedstat_inc(env->sd, lb_hot_gained[env->idle]);
- schedstat_inc(p, se.statistics.nr_forced_migrations);
- }
- #endif
- return 1;
- }
- if (tsk_cache_hot) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
- return 0;
- }
- return 1;
- }
- static int move_one_task(struct lb_env *env)
- {
- struct task_struct *p, *n;
- list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
- if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
- continue;
- if (!can_migrate_task(p, env))
- continue;
- move_task(p, env);
-
- schedstat_inc(env->sd, lb_gained[env->idle]);
- return 1;
- }
- return 0;
- }
- static unsigned long task_h_load(struct task_struct *p);
- static const unsigned int sched_nr_migrate_break = 32;
- static int move_tasks(struct lb_env *env)
- {
- struct list_head *tasks = &env->src_rq->cfs_tasks;
- struct task_struct *p;
- unsigned long load;
- int pulled = 0;
- if (env->load_move <= 0)
- return 0;
- while (!list_empty(tasks)) {
- p = list_first_entry(tasks, struct task_struct, se.group_node);
- env->loop++;
-
- if (env->loop > env->loop_max)
- break;
-
- if (env->loop > env->loop_break) {
- env->loop_break += sched_nr_migrate_break;
- env->flags |= LBF_NEED_BREAK;
- break;
- }
- if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
- goto next;
- load = task_h_load(p);
- if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
- goto next;
- if ((load / 2) > env->load_move)
- goto next;
- if (!can_migrate_task(p, env))
- goto next;
- move_task(p, env);
- pulled++;
- env->load_move -= load;
- #ifdef CONFIG_PREEMPT
-
- if (env->idle == CPU_NEWLY_IDLE)
- break;
- #endif
-
- if (env->load_move <= 0)
- break;
- continue;
- next:
- list_move_tail(&p->se.group_node, tasks);
- }
-
- schedstat_add(env->sd, lb_gained[env->idle], pulled);
- return pulled;
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static int update_shares_cpu(struct task_group *tg, int cpu)
- {
- struct cfs_rq *cfs_rq;
- unsigned long flags;
- struct rq *rq;
- if (!tg->se[cpu])
- return 0;
- rq = cpu_rq(cpu);
- cfs_rq = tg->cfs_rq[cpu];
- raw_spin_lock_irqsave(&rq->lock, flags);
- update_rq_clock(rq);
- update_cfs_load(cfs_rq, 1);
-
- update_cfs_shares(cfs_rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- return 0;
- }
- static void update_shares(int cpu)
- {
- struct cfs_rq *cfs_rq;
- struct rq *rq = cpu_rq(cpu);
- rcu_read_lock();
-
- for_each_leaf_cfs_rq(rq, cfs_rq) {
-
- if (throttled_hierarchy(cfs_rq))
- continue;
- update_shares_cpu(cfs_rq->tg, cpu);
- }
- rcu_read_unlock();
- }
- static int tg_load_down(struct task_group *tg, void *data)
- {
- unsigned long load;
- long cpu = (long)data;
- if (!tg->parent) {
- load = cpu_rq(cpu)->load.weight;
- } else {
- load = tg->parent->cfs_rq[cpu]->h_load;
- load *= tg->se[cpu]->load.weight;
- load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
- }
- tg->cfs_rq[cpu]->h_load = load;
- return 0;
- }
- static void update_h_load(long cpu)
- {
- rcu_read_lock();
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
- rcu_read_unlock();
- }
- static unsigned long task_h_load(struct task_struct *p)
- {
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
- unsigned long load;
- load = p->se.load.weight;
- load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
- return load;
- }
- #else
- static inline void update_shares(int cpu)
- {
- }
- static inline void update_h_load(long cpu)
- {
- }
- static unsigned long task_h_load(struct task_struct *p)
- {
- return p->se.load.weight;
- }
- #endif
- struct sd_lb_stats {
- struct sched_group *busiest;
- struct sched_group *this;
- unsigned long total_load;
- unsigned long total_pwr;
- unsigned long avg_load;
-
- unsigned long this_load;
- unsigned long this_load_per_task;
- unsigned long this_nr_running;
- unsigned long this_has_capacity;
- unsigned int this_idle_cpus;
-
- unsigned int busiest_idle_cpus;
- unsigned long max_load;
- unsigned long busiest_load_per_task;
- unsigned long busiest_nr_running;
- unsigned long busiest_group_capacity;
- unsigned long busiest_has_capacity;
- unsigned int busiest_group_weight;
- int group_imb;
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- int power_savings_balance;
- struct sched_group *group_min;
- struct sched_group *group_leader;
- unsigned long min_load_per_task;
- unsigned long leader_nr_running;
- unsigned long min_nr_running;
- #endif
- };
- struct sg_lb_stats {
- unsigned long avg_load;
- unsigned long group_load;
- unsigned long sum_nr_running;
- unsigned long sum_weighted_load;
- unsigned long group_capacity;
- unsigned long idle_cpus;
- unsigned long group_weight;
- int group_imb;
- int group_has_capacity;
- };
- static inline int get_sd_load_idx(struct sched_domain *sd,
- enum cpu_idle_type idle)
- {
- int load_idx;
- switch (idle) {
- case CPU_NOT_IDLE:
- load_idx = sd->busy_idx;
- break;
- case CPU_NEWLY_IDLE:
- load_idx = sd->newidle_idx;
- break;
- default:
- load_idx = sd->idle_idx;
- break;
- }
- return load_idx;
- }
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- static inline void init_sd_power_savings_stats(struct sched_domain *sd,
- struct sd_lb_stats *sds, enum cpu_idle_type idle)
- {
-
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
- sds->power_savings_balance = 0;
- else {
- sds->power_savings_balance = 1;
- sds->min_nr_running = ULONG_MAX;
- sds->leader_nr_running = 0;
- }
- }
- static inline void update_sd_power_savings_stats(struct sched_group *group,
- struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
- {
- if (!sds->power_savings_balance)
- return;
-
- if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
- !sds->this_nr_running))
- sds->power_savings_balance = 0;
-
- if (!sds->power_savings_balance ||
- sgs->sum_nr_running >= sgs->group_capacity ||
- !sgs->sum_nr_running)
- return;
-
- if ((sgs->sum_nr_running < sds->min_nr_running) ||
- (sgs->sum_nr_running == sds->min_nr_running &&
- group_first_cpu(group) > group_first_cpu(sds->group_min))) {
- sds->group_min = group;
- sds->min_nr_running = sgs->sum_nr_running;
- sds->min_load_per_task = sgs->sum_weighted_load /
- sgs->sum_nr_running;
- }
-
- if (sgs->sum_nr_running + 1 > sgs->group_capacity)
- return;
- if (sgs->sum_nr_running > sds->leader_nr_running ||
- (sgs->sum_nr_running == sds->leader_nr_running &&
- group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
- sds->group_leader = group;
- sds->leader_nr_running = sgs->sum_nr_running;
- }
- }
- static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
- int this_cpu, unsigned long *imbalance)
- {
- if (!sds->power_savings_balance)
- return 0;
- if (sds->this != sds->group_leader ||
- sds->group_leader == sds->group_min)
- return 0;
- *imbalance = sds->min_load_per_task;
- sds->busiest = sds->group_min;
- return 1;
- }
- #else
- static inline void init_sd_power_savings_stats(struct sched_domain *sd,
- struct sd_lb_stats *sds, enum cpu_idle_type idle)
- {
- return;
- }
- static inline void update_sd_power_savings_stats(struct sched_group *group,
- struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
- {
- return;
- }
- static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
- int this_cpu, unsigned long *imbalance)
- {
- return 0;
- }
- #endif
- unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
- {
- return SCHED_POWER_SCALE;
- }
- unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
- {
- return default_scale_freq_power(sd, cpu);
- }
- unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
- {
- unsigned long weight = sd->span_weight;
- unsigned long smt_gain = sd->smt_gain;
- smt_gain /= weight;
- return smt_gain;
- }
- unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
- {
- return default_scale_smt_power(sd, cpu);
- }
- unsigned long scale_rt_power(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- u64 total, available, age_stamp, avg;
-
- age_stamp = ACCESS_ONCE(rq->age_stamp);
- avg = ACCESS_ONCE(rq->rt_avg);
- total = sched_avg_period() + (rq->clock - age_stamp);
- if (unlikely(total < avg)) {
-
- available = 0;
- } else {
- available = total - avg;
- }
- if (unlikely((s64)total < SCHED_POWER_SCALE))
- total = SCHED_POWER_SCALE;
- total >>= SCHED_POWER_SHIFT;
- return div_u64(available, total);
- }
- static void update_cpu_power(struct sched_domain *sd, int cpu)
- {
- unsigned long weight = sd->span_weight;
- unsigned long power = SCHED_POWER_SCALE;
- struct sched_group *sdg = sd->groups;
- if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
- if (sched_feat(ARCH_POWER))
- power *= arch_scale_smt_power(sd, cpu);
- else
- power *= default_scale_smt_power(sd, cpu);
- power >>= SCHED_POWER_SHIFT;
- }
- sdg->sgp->power_orig = power;
- if (sched_feat(ARCH_POWER))
- power *= arch_scale_freq_power(sd, cpu);
- else
- power *= default_scale_freq_power(sd, cpu);
- power >>= SCHED_POWER_SHIFT;
- power *= scale_rt_power(cpu);
- power >>= SCHED_POWER_SHIFT;
- if (!power)
- power = 1;
- cpu_rq(cpu)->cpu_power = power;
- sdg->sgp->power = power;
- }
- void update_group_power(struct sched_domain *sd, int cpu)
- {
- struct sched_domain *child = sd->child;
- struct sched_group *group, *sdg = sd->groups;
- unsigned long power;
- unsigned long interval;
- interval = msecs_to_jiffies(sd->balance_interval);
- interval = clamp(interval, 1UL, max_load_balance_interval);
- sdg->sgp->next_update = jiffies + interval;
- if (!child) {
- update_cpu_power(sd, cpu);
- return;
- }
- power = 0;
- if (child->flags & SD_OVERLAP) {
-
- for_each_cpu(cpu, sched_group_cpus(sdg))
- power += power_of(cpu);
- } else {
-
-
- group = child->groups;
- do {
- power += group->sgp->power;
- group = group->next;
- } while (group != child->groups);
- }
- sdg->sgp->power_orig = sdg->sgp->power = power;
- }
- static inline int
- fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
- {
-
- if (!(sd->flags & SD_SHARE_CPUPOWER))
- return 0;
-
- if (group->sgp->power * 32 > group->sgp->power_orig * 29)
- return 1;
- return 0;
- }
- static inline void update_sg_lb_stats(struct sched_domain *sd,
- struct sched_group *group, int this_cpu,
- enum cpu_idle_type idle, int load_idx,
- int local_group, const struct cpumask *cpus,
- int *balance, struct sg_lb_stats *sgs)
- {
- unsigned long nr_running, max_nr_running, min_nr_running;
- unsigned long load, max_cpu_load, min_cpu_load;
- int i;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long avg_load_per_task = 0;
- if (local_group)
- balance_cpu = group_balance_cpu(group);
-
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
- max_nr_running = 0;
- min_nr_running = ~0UL;
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
- struct rq *rq = cpu_rq(i);
- nr_running = rq->nr_running;
-
- if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu &&
- cpumask_test_cpu(i, sched_group_mask(group))) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
- load = target_load(i, load_idx);
- } else {
- load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
- if (nr_running > max_nr_running)
- max_nr_running = nr_running;
- if (min_nr_running > nr_running)
- min_nr_running = nr_running;
- }
- sgs->group_load += load;
- sgs->sum_nr_running += nr_running;
- sgs->sum_weighted_load += weighted_cpuload(i);
- if (idle_cpu(i))
- sgs->idle_cpus++;
- }
-
- if (local_group) {
- if (idle != CPU_NEWLY_IDLE) {
- if (balance_cpu != this_cpu) {
- *balance = 0;
- return;
- }
- update_group_power(sd, this_cpu);
- } else if (time_after_eq(jiffies, group->sgp->next_update))
- update_group_power(sd, this_cpu);
- }
-
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
-
- if (sgs->sum_nr_running)
- avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
- (max_nr_running - min_nr_running) > 1)
- sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
- SCHED_POWER_SCALE);
- if (!sgs->group_capacity)
- sgs->group_capacity = fix_small_capacity(sd, group);
- sgs->group_weight = group->group_weight;
- if (sgs->group_capacity > sgs->sum_nr_running)
- sgs->group_has_capacity = 1;
- }
- static bool update_sd_pick_busiest(struct sched_domain *sd,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs,
- int this_cpu)
- {
- if (sgs->avg_load <= sds->max_load)
- return false;
- if (sgs->sum_nr_running > sgs->group_capacity)
- return true;
- if (sgs->group_imb)
- return true;
-
- if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
- this_cpu < group_first_cpu(sg)) {
- if (!sds->busiest)
- return true;
- if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
- return true;
- }
- return false;
- }
- static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
- enum cpu_idle_type idle, const struct cpumask *cpus,
- int *balance, struct sd_lb_stats *sds)
- {
- struct sched_domain *child = sd->child;
- struct sched_group *sg = sd->groups;
- struct sg_lb_stats sgs;
- int load_idx, prefer_sibling = 0;
- if (child && child->flags & SD_PREFER_SIBLING)
- prefer_sibling = 1;
- init_sd_power_savings_stats(sd, sds, idle);
- load_idx = get_sd_load_idx(sd, idle);
- do {
- int local_group;
- local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
- memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
- local_group, cpus, balance, &sgs);
- if (local_group && !(*balance))
- return;
- sds->total_load += sgs.group_load;
- sds->total_pwr += sg->sgp->power;
-
- if (prefer_sibling && !local_group && sds->this_has_capacity)
- sgs.group_capacity = min(sgs.group_capacity, 1UL);
- if (local_group) {
- sds->this_load = sgs.avg_load;
- sds->this = sg;
- sds->this_nr_running = sgs.sum_nr_running;
- sds->this_load_per_task = sgs.sum_weighted_load;
- sds->this_has_capacity = sgs.group_has_capacity;
- sds->this_idle_cpus = sgs.idle_cpus;
- } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
- sds->max_load = sgs.avg_load;
- sds->busiest = sg;
- sds->busiest_nr_running = sgs.sum_nr_running;
- sds->busiest_idle_cpus = sgs.idle_cpus;
- sds->busiest_group_capacity = sgs.group_capacity;
- sds->busiest_load_per_task = sgs.sum_weighted_load;
- sds->busiest_has_capacity = sgs.group_has_capacity;
- sds->busiest_group_weight = sgs.group_weight;
- sds->group_imb = sgs.group_imb;
- }
- update_sd_power_savings_stats(sg, sds, local_group, &sgs);
- sg = sg->next;
- } while (sg != sd->groups);
- }
- static int check_asym_packing(struct sched_domain *sd,
- struct sd_lb_stats *sds,
- int this_cpu, unsigned long *imbalance)
- {
- int busiest_cpu;
- if (!(sd->flags & SD_ASYM_PACKING))
- return 0;
- if (!sds->busiest)
- return 0;
- busiest_cpu = group_first_cpu(sds->busiest);
- if (this_cpu > busiest_cpu)
- return 0;
- *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
- SCHED_POWER_SCALE);
- return 1;
- }
- static inline void fix_small_imbalance(struct sd_lb_stats *sds,
- int this_cpu, unsigned long *imbalance)
- {
- unsigned long tmp, pwr_now = 0, pwr_move = 0;
- unsigned int imbn = 2;
- unsigned long scaled_busy_load_per_task;
- if (sds->this_nr_running) {
- sds->this_load_per_task /= sds->this_nr_running;
- if (sds->busiest_load_per_task >
- sds->this_load_per_task)
- imbn = 1;
- } else
- sds->this_load_per_task =
- cpu_avg_load_per_task(this_cpu);
- scaled_busy_load_per_task = sds->busiest_load_per_task
- * SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->sgp->power;
- if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
- (scaled_busy_load_per_task * imbn)) {
- *imbalance = sds->busiest_load_per_task;
- return;
- }
-
- pwr_now += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load);
- pwr_now /= SCHED_POWER_SCALE;
-
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->sgp->power;
- if (sds->max_load > tmp)
- pwr_move += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load - tmp);
-
- if (sds->max_load * sds->busiest->sgp->power <
- sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->sgp->power) /
- sds->this->sgp->power;
- else
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->sgp->power;
- pwr_move += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load + tmp);
- pwr_move /= SCHED_POWER_SCALE;
-
- if (pwr_move > pwr_now)
- *imbalance = sds->busiest_load_per_task;
- }
- static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
- unsigned long *imbalance)
- {
- unsigned long max_pull, load_above_capacity = ~0UL;
- sds->busiest_load_per_task /= sds->busiest_nr_running;
- if (sds->group_imb) {
- sds->busiest_load_per_task =
- min(sds->busiest_load_per_task, sds->avg_load);
- }
-
- if (sds->max_load < sds->avg_load) {
- *imbalance = 0;
- return fix_small_imbalance(sds, this_cpu, imbalance);
- }
- if (!sds->group_imb) {
-
- load_above_capacity = (sds->busiest_nr_running -
- sds->busiest_group_capacity);
- load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
- load_above_capacity /= sds->busiest->sgp->power;
- }
-
- max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
-
- *imbalance = min(max_pull * sds->busiest->sgp->power,
- (sds->avg_load - sds->this_load) * sds->this->sgp->power)
- / SCHED_POWER_SCALE;
-
- if (*imbalance < sds->busiest_load_per_task)
- return fix_small_imbalance(sds, this_cpu, imbalance);
- }
- static struct sched_group *
- find_busiest_group(struct sched_domain *sd, int this_cpu,
- unsigned long *imbalance, enum cpu_idle_type idle,
- const struct cpumask *cpus, int *balance)
- {
- struct sd_lb_stats sds;
- memset(&sds, 0, sizeof(sds));
-
- update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
-
- if (!(*balance))
- goto ret;
- if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
- check_asym_packing(sd, &sds, this_cpu, imbalance))
- return sds.busiest;
-
- if (!sds.busiest || sds.busiest_nr_running == 0)
- goto out_balanced;
- sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
-
- if (sds.group_imb)
- goto force_balance;
-
- if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
- !sds.busiest_has_capacity)
- goto force_balance;
-
- if (sds.this_load >= sds.max_load)
- goto out_balanced;
-
- if (sds.this_load >= sds.avg_load)
- goto out_balanced;
- if (idle == CPU_IDLE) {
-
- if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
- sds.busiest_nr_running <= sds.busiest_group_weight)
- goto out_balanced;
- } else {
-
- if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
- goto out_balanced;
- }
- force_balance:
-
- calculate_imbalance(&sds, this_cpu, imbalance);
- return sds.busiest;
- out_balanced:
-
- if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
- return sds.busiest;
- ret:
- *imbalance = 0;
- return NULL;
- }
- static struct rq *
- find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
- enum cpu_idle_type idle, unsigned long imbalance,
- const struct cpumask *cpus)
- {
- struct rq *busiest = NULL, *rq;
- unsigned long max_load = 0;
- int i;
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
- unsigned long power = power_of(i);
- unsigned long capacity = DIV_ROUND_CLOSEST(power,
- SCHED_POWER_SCALE);
- unsigned long wl;
- if (!capacity)
- capacity = fix_small_capacity(sd, group);
- rq = cpu_rq(i);
- wl = weighted_cpuload(i);
-
- if (capacity && rq->nr_running == 1 && wl > imbalance)
- continue;
-
- wl = (wl * SCHED_POWER_SCALE) / power;
- if (wl > max_load) {
- max_load = wl;
- busiest = rq;
- }
- }
- return busiest;
- }
- #define MAX_PINNED_INTERVAL 512
- DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
- static int need_active_balance(struct sched_domain *sd, int idle,
- int busiest_cpu, int this_cpu)
- {
- if (idle == CPU_NEWLY_IDLE) {
-
- if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
- return 1;
-
- if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
- return 0;
- }
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
- }
- static int active_load_balance_cpu_stop(void *data);
- static int load_balance(int this_cpu, struct rq *this_rq,
- struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance)
- {
- int ld_moved, active_balance = 0;
- struct sched_group *group;
- unsigned long imbalance;
- struct rq *busiest = NULL;
- unsigned long flags;
- struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = this_cpu,
- .dst_rq = this_rq,
- .idle = idle,
- .loop_break = sched_nr_migrate_break,
- };
- cpumask_copy(cpus, cpu_active_mask);
- schedstat_inc(sd, lb_count[idle]);
- redo:
- group = find_busiest_group(sd, this_cpu, &imbalance, idle,
- cpus, balance);
- if (*balance == 0)
- goto out_balanced;
- if (!group) {
- schedstat_inc(sd, lb_nobusyg[idle]);
- goto out_balanced;
- }
- busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
- if (!busiest) {
- schedstat_inc(sd, lb_nobusyq[idle]);
- goto out_balanced;
- }
- BUG_ON(busiest == this_rq);
- schedstat_add(sd, lb_imbalance[idle], imbalance);
- ld_moved = 0;
- if (busiest->nr_running > 1) {
-
- env.flags |= LBF_ALL_PINNED;
- env.load_move = imbalance;
- env.src_cpu = busiest->cpu;
- env.src_rq = busiest;
- env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
- more_balance:
- local_irq_save(flags);
- double_rq_lock(this_rq, busiest);
- if (!env.loop)
- update_h_load(env.src_cpu);
- ld_moved += move_tasks(&env);
- double_rq_unlock(this_rq, busiest);
- local_irq_restore(flags);
- if (env.flags & LBF_NEED_BREAK) {
- env.flags &= ~LBF_NEED_BREAK;
- goto more_balance;
- }
-
- if (ld_moved && this_cpu != smp_processor_id())
- resched_cpu(this_cpu);
-
- if (unlikely(env.flags & LBF_ALL_PINNED)) {
- cpumask_clear_cpu(cpu_of(busiest), cpus);
- if (!cpumask_empty(cpus))
- goto redo;
- goto out_balanced;
- }
- }
- if (!ld_moved) {
- schedstat_inc(sd, lb_failed[idle]);
-
- if (idle != CPU_NEWLY_IDLE)
- sd->nr_balance_failed++;
- if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
- raw_spin_lock_irqsave(&busiest->lock, flags);
-
- if (!cpumask_test_cpu(this_cpu,
- tsk_cpus_allowed(busiest->curr))) {
- raw_spin_unlock_irqrestore(&busiest->lock,
- flags);
- env.flags |= LBF_ALL_PINNED;
- goto out_one_pinned;
- }
-
- if (!busiest->active_balance) {
- busiest->active_balance = 1;
- busiest->push_cpu = this_cpu;
- active_balance = 1;
- }
- raw_spin_unlock_irqrestore(&busiest->lock, flags);
- if (active_balance)
- stop_one_cpu_nowait(cpu_of(busiest),
- active_load_balance_cpu_stop, busiest,
- &busiest->active_balance_work);
-
- sd->nr_balance_failed = sd->cache_nice_tries+1;
- }
- } else {
- sd->nr_balance_failed = 0;
- if (per_cpu(dbs_boost_needed, this_cpu)) {
- per_cpu(dbs_boost_needed, this_cpu) = false;
- atomic_notifier_call_chain(&migration_notifier_head,
- this_cpu,
- (void *)cpu_of(busiest));
- }
- }
- if (likely(!active_balance)) {
-
- sd->balance_interval = sd->min_interval;
- } else {
-
- if (sd->balance_interval < sd->max_interval)
- sd->balance_interval *= 2;
- }
- goto out;
- out_balanced:
- schedstat_inc(sd, lb_balanced[idle]);
- sd->nr_balance_failed = 0;
- out_one_pinned:
-
- if (((env.flags & LBF_ALL_PINNED) &&
- sd->balance_interval < MAX_PINNED_INTERVAL) ||
- (sd->balance_interval < sd->max_interval))
- sd->balance_interval *= 2;
- ld_moved = 0;
- out:
- trace_sched_load_balance(this_cpu, idle, *balance,
- group ? group->cpumask[0] : 0,
- busiest ? busiest->nr_running : 0, imbalance,
- env.flags, ld_moved, sd->balance_interval);
- return ld_moved;
- }
- void idle_balance(int this_cpu, struct rq *this_rq)
- {
- struct sched_domain *sd;
- int pulled_task = 0;
- unsigned long next_balance = jiffies + HZ;
- this_rq->idle_stamp = this_rq->clock;
- if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
-
- raw_spin_unlock(&this_rq->lock);
- update_shares(this_cpu);
- rcu_read_lock();
- for_each_domain(this_cpu, sd) {
- unsigned long interval;
- int balance = 1;
- if (!(sd->flags & SD_LOAD_BALANCE))
- continue;
- if (sd->flags & SD_BALANCE_NEWIDLE) {
- pulled_task = load_balance(this_cpu, this_rq,
- sd, CPU_NEWLY_IDLE, &balance);
- }
- interval = msecs_to_jiffies(sd->balance_interval);
- if (time_after(next_balance, sd->last_balance + interval))
- next_balance = sd->last_balance + interval;
-
- if (pulled_task || this_rq->nr_running > 0) {
- this_rq->idle_stamp = 0;
- break;
- }
- }
- rcu_read_unlock();
- raw_spin_lock(&this_rq->lock);
-
- if (this_rq->nr_running && !pulled_task)
- return;
- if (!pulled_task || time_after(jiffies, this_rq->next_balance)) {
-
- this_rq->next_balance = next_balance;
- }
- }
- static int active_load_balance_cpu_stop(void *data)
- {
- struct rq *busiest_rq = data;
- int busiest_cpu = cpu_of(busiest_rq);
- int target_cpu = busiest_rq->push_cpu;
- struct rq *target_rq = cpu_rq(target_cpu);
- struct sched_domain *sd;
- raw_spin_lock_irq(&busiest_rq->lock);
-
- if (unlikely(busiest_cpu != smp_processor_id() ||
- !busiest_rq->active_balance))
- goto out_unlock;
-
- if (busiest_rq->nr_running <= 1)
- goto out_unlock;
-
- BUG_ON(busiest_rq == target_rq);
-
- double_lock_balance(busiest_rq, target_rq);
-
- rcu_read_lock();
- for_each_domain(target_cpu, sd) {
- if ((sd->flags & SD_LOAD_BALANCE) &&
- cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
- break;
- }
- if (likely(sd)) {
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = target_cpu,
- .dst_rq = target_rq,
- .src_cpu = busiest_rq->cpu,
- .src_rq = busiest_rq,
- .idle = CPU_IDLE,
- };
- schedstat_inc(sd, alb_count);
- if (move_one_task(&env))
- schedstat_inc(sd, alb_pushed);
- else
- schedstat_inc(sd, alb_failed);
- }
- rcu_read_unlock();
- double_unlock_balance(busiest_rq, target_rq);
- out_unlock:
- busiest_rq->active_balance = 0;
- raw_spin_unlock_irq(&busiest_rq->lock);
- if (per_cpu(dbs_boost_needed, target_cpu)) {
- per_cpu(dbs_boost_needed, target_cpu) = false;
- atomic_notifier_call_chain(&migration_notifier_head,
- target_cpu,
- (void *)cpu_of(busiest_rq));
- }
- return 0;
- }
- #ifdef CONFIG_NO_HZ
- static struct {
- cpumask_var_t idle_cpus_mask;
- atomic_t nr_cpus;
- unsigned long next_balance;
- } nohz ____cacheline_aligned;
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
- {
- struct sched_domain *sd;
- for_each_domain(cpu, sd)
- if (sd->flags & flag)
- break;
- return sd;
- }
- #define for_each_flag_domain(cpu, sd, flag) \
- for (sd = lowest_flag_domain(cpu, flag); \
- (sd && (sd->flags & flag)); sd = sd->parent)
- static int find_new_ilb(int cpu)
- {
- int ilb = cpumask_first(nohz.idle_cpus_mask);
- struct sched_group *ilbg;
- struct sched_domain *sd;
-
- if (!(sched_smt_power_savings || sched_mc_power_savings))
- goto out_done;
-
- if (cpumask_weight(nohz.idle_cpus_mask) < 2)
- goto out_done;
- rcu_read_lock();
- for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
- ilbg = sd->groups;
- do {
- if (ilbg->group_weight !=
- atomic_read(&ilbg->sgp->nr_busy_cpus)) {
- ilb = cpumask_first_and(nohz.idle_cpus_mask,
- sched_group_cpus(ilbg));
- goto unlock;
- }
- ilbg = ilbg->next;
- } while (ilbg != sd->groups);
- }
- unlock:
- rcu_read_unlock();
- out_done:
- if (ilb < nr_cpu_ids && idle_cpu(ilb))
- return ilb;
- return nr_cpu_ids;
- }
- #else
- static inline int find_new_ilb(int call_cpu)
- {
- return nr_cpu_ids;
- }
- #endif
- static void nohz_balancer_kick(int cpu)
- {
- int ilb_cpu;
- nohz.next_balance++;
- ilb_cpu = find_new_ilb(cpu);
- if (ilb_cpu >= nr_cpu_ids)
- return;
- if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
- return;
-
- smp_send_reschedule(ilb_cpu);
- return;
- }
- static inline void nohz_balance_exit_idle(int cpu)
- {
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
- }
- }
- static inline void set_cpu_sd_state_busy(void)
- {
- struct sched_domain *sd;
- int cpu = smp_processor_id();
- rcu_read_lock();
- sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd);
- if (!sd || !sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 0;
- for (; sd; sd = sd->parent)
- atomic_inc(&sd->groups->sgp->nr_busy_cpus);
- unlock:
- rcu_read_unlock();
- }
- void set_cpu_sd_state_idle(void)
- {
- struct sched_domain *sd;
- int cpu = smp_processor_id();
- rcu_read_lock();
- sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd);
- if (!sd || sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 1;
- for (; sd; sd = sd->parent)
- atomic_dec(&sd->groups->sgp->nr_busy_cpus);
- unlock:
- rcu_read_unlock();
- }
- void nohz_balance_enter_idle(int cpu)
- {
-
- if (!cpu_active(cpu))
- return;
- if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
- return;
- cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
- atomic_inc(&nohz.nr_cpus);
- set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
- }
- static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
- {
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DYING:
- nohz_balance_exit_idle(smp_processor_id());
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- #endif
- static DEFINE_SPINLOCK(balancing);
- void update_max_interval(void)
- {
- max_load_balance_interval = HZ*num_online_cpus()/10;
- }
- static void rebalance_domains(int cpu, enum cpu_idle_type idle)
- {
- int balance = 1;
- struct rq *rq = cpu_rq(cpu);
- unsigned long interval;
- struct sched_domain *sd;
-
- unsigned long next_balance = jiffies + 60*HZ;
- int update_next_balance = 0;
- int need_serialize;
- update_shares(cpu);
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- if (!(sd->flags & SD_LOAD_BALANCE))
- continue;
- interval = sd->balance_interval;
- if (idle != CPU_IDLE)
- interval *= sd->busy_factor;
-
- interval = msecs_to_jiffies(interval);
- interval = clamp(interval, 1UL, max_load_balance_interval);
- need_serialize = sd->flags & SD_SERIALIZE;
- if (need_serialize) {
- if (!spin_trylock(&balancing))
- goto out;
- }
- if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance)) {
-
- idle = CPU_NOT_IDLE;
- }
- sd->last_balance = jiffies;
- }
- if (need_serialize)
- spin_unlock(&balancing);
- out:
- if (time_after(next_balance, sd->last_balance + interval)) {
- next_balance = sd->last_balance + interval;
- update_next_balance = 1;
- }
-
- if (!balance)
- break;
- }
- rcu_read_unlock();
-
- if (likely(update_next_balance))
- rq->next_balance = next_balance;
- }
- #ifdef CONFIG_NO_HZ
- static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
- {
- struct rq *this_rq = cpu_rq(this_cpu);
- struct rq *rq;
- int balance_cpu;
- if (idle != CPU_IDLE ||
- !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
- goto end;
- for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
- if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
- continue;
-
- if (need_resched())
- break;
- rq = cpu_rq(balance_cpu);
- raw_spin_lock_irq(&rq->lock);
- update_rq_clock(rq);
- update_idle_cpu_load(rq);
- raw_spin_unlock_irq(&rq->lock);
- rebalance_domains(balance_cpu, CPU_IDLE);
- if (time_after(this_rq->next_balance, rq->next_balance))
- this_rq->next_balance = rq->next_balance;
- }
- nohz.next_balance = this_rq->next_balance;
- end:
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
- }
- static inline int nohz_kick_needed(struct rq *rq, int cpu)
- {
- unsigned long now = jiffies;
- struct sched_domain *sd;
- if (unlikely(idle_cpu(cpu)))
- return 0;
-
- set_cpu_sd_state_busy();
- nohz_balance_exit_idle(cpu);
-
- if (likely(!atomic_read(&nohz.nr_cpus)))
- return 0;
- if (time_before(now, nohz.next_balance))
- return 0;
- if (rq->nr_running >= 2)
- goto need_kick;
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- struct sched_group *sg = sd->groups;
- struct sched_group_power *sgp = sg->sgp;
- int nr_busy = atomic_read(&sgp->nr_busy_cpus);
- if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
- goto need_kick_unlock;
- if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
- && (cpumask_first_and(nohz.idle_cpus_mask,
- sched_domain_span(sd)) < cpu))
- goto need_kick_unlock;
- if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
- break;
- }
- rcu_read_unlock();
- return 0;
- need_kick_unlock:
- rcu_read_unlock();
- need_kick:
- return 1;
- }
- #else
- static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
- #endif
- static void run_rebalance_domains(struct softirq_action *h)
- {
- int this_cpu = smp_processor_id();
- struct rq *this_rq = cpu_rq(this_cpu);
- enum cpu_idle_type idle = this_rq->idle_balance ?
- CPU_IDLE : CPU_NOT_IDLE;
- rebalance_domains(this_cpu, idle);
-
- nohz_idle_balance(this_cpu, idle);
- }
- static inline int on_null_domain(int cpu)
- {
- return !rcu_dereference_sched(cpu_rq(cpu)->sd);
- }
- void trigger_load_balance(struct rq *rq, int cpu)
- {
-
- if (time_after_eq(jiffies, rq->next_balance) &&
- likely(!on_null_domain(cpu)))
- raise_softirq(SCHED_SOFTIRQ);
- #ifdef CONFIG_NO_HZ
- if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
- nohz_balancer_kick(cpu);
- #endif
- }
- static void rq_online_fair(struct rq *rq)
- {
- update_sysctl();
- }
- static void rq_offline_fair(struct rq *rq)
- {
- update_sysctl();
-
- unthrottle_offline_cfs_rqs(rq);
- }
- #endif
- static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &curr->se;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- entity_tick(cfs_rq, se, queued);
- }
- }
- static void task_fork_fair(struct task_struct *p)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se, *curr;
- int this_cpu = smp_processor_id();
- struct rq *rq = this_rq();
- unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
- update_rq_clock(rq);
- cfs_rq = task_cfs_rq(current);
- curr = cfs_rq->curr;
-
- rcu_read_lock();
- __set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- update_curr(cfs_rq);
- if (curr)
- se->vruntime = curr->vruntime;
- place_entity(cfs_rq, se, 1);
- if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
-
- swap(curr->vruntime, se->vruntime);
- resched_task(rq->curr);
- }
- se->vruntime -= cfs_rq->min_vruntime;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
- static void
- prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
- {
- if (!p->se.on_rq)
- return;
-
- if (rq->curr == p) {
- if (p->prio > oldprio)
- resched_task(rq->curr);
- } else
- check_preempt_curr(rq, p, 0);
- }
- static void switched_from_fair(struct rq *rq, struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- if (!p->on_rq && p->state != TASK_RUNNING) {
-
- place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
- }
- }
- static void switched_to_fair(struct rq *rq, struct task_struct *p)
- {
- if (!p->se.on_rq)
- return;
-
- if (rq->curr == p)
- resched_task(rq->curr);
- else
- check_preempt_curr(rq, p, 0);
- }
- static void set_curr_task_fair(struct rq *rq)
- {
- struct sched_entity *se = &rq->curr->se;
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- set_next_entity(cfs_rq, se);
-
- account_cfs_rq_runtime(cfs_rq, 0);
- }
- }
- void init_cfs_rq(struct cfs_rq *cfs_rq)
- {
- cfs_rq->tasks_timeline = RB_ROOT;
- cfs_rq->min_vruntime = (u64)(-(1LL << 20));
- #ifndef CONFIG_64BIT
- cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
- #endif
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static void task_move_group_fair(struct task_struct *p, int on_rq)
- {
-
-
- if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
- on_rq = 1;
- if (!on_rq)
- p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
- set_task_rq(p, task_cpu(p));
- if (!on_rq)
- p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
- }
- void free_fair_sched_group(struct task_group *tg)
- {
- int i;
- destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
- for_each_possible_cpu(i) {
- if (tg->cfs_rq)
- kfree(tg->cfs_rq[i]);
- if (tg->se)
- kfree(tg->se[i]);
- }
- kfree(tg->cfs_rq);
- kfree(tg->se);
- }
- int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se;
- int i;
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->cfs_rq)
- goto err;
- tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->se)
- goto err;
- tg->shares = NICE_0_LOAD;
- init_cfs_bandwidth(tg_cfs_bandwidth(tg));
- for_each_possible_cpu(i) {
- cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
- GFP_KERNEL, cpu_to_node(i));
- if (!cfs_rq)
- goto err;
- se = kzalloc_node(sizeof(struct sched_entity),
- GFP_KERNEL, cpu_to_node(i));
- if (!se)
- goto err_free_rq;
- init_cfs_rq(cfs_rq);
- init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
- }
- return 1;
- err_free_rq:
- kfree(cfs_rq);
- err:
- return 0;
- }
- void unregister_fair_sched_group(struct task_group *tg, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
-
- if (!tg->cfs_rq[cpu]->on_list)
- return;
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
- void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
- struct sched_entity *se, int cpu,
- struct sched_entity *parent)
- {
- struct rq *rq = cpu_rq(cpu);
- cfs_rq->tg = tg;
- cfs_rq->rq = rq;
- #ifdef CONFIG_SMP
-
- cfs_rq->load_stamp = 1;
- #endif
- init_cfs_rq_runtime(cfs_rq);
- tg->cfs_rq[cpu] = cfs_rq;
- tg->se[cpu] = se;
-
- if (!se)
- return;
- if (!parent)
- se->cfs_rq = &rq->cfs;
- else
- se->cfs_rq = parent->my_q;
- se->my_q = cfs_rq;
-
- update_load_set(&se->load, NICE_0_LOAD);
- se->parent = parent;
- }
- static DEFINE_MUTEX(shares_mutex);
- int sched_group_set_shares(struct task_group *tg, unsigned long shares)
- {
- int i;
- unsigned long flags;
-
- if (!tg->se[0])
- return -EINVAL;
- shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
- mutex_lock(&shares_mutex);
- if (tg->shares == shares)
- goto done;
- tg->shares = shares;
- for_each_possible_cpu(i) {
- struct rq *rq = cpu_rq(i);
- struct sched_entity *se;
- se = tg->se[i];
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se)
- update_cfs_shares(group_cfs_rq(se));
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
- done:
- mutex_unlock(&shares_mutex);
- return 0;
- }
- #else
- void free_fair_sched_group(struct task_group *tg) { }
- int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
- {
- return 1;
- }
- void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
- #endif
- static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
- {
- struct sched_entity *se = &task->se;
- unsigned int rr_interval = 0;
-
- if (rq->cfs.load.weight)
- rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
- return rr_interval;
- }
- const struct sched_class fair_sched_class = {
- .next = &idle_sched_class,
- .enqueue_task = enqueue_task_fair,
- .dequeue_task = dequeue_task_fair,
- .yield_task = yield_task_fair,
- .yield_to_task = yield_to_task_fair,
- .check_preempt_curr = check_preempt_wakeup,
- .pick_next_task = pick_next_task_fair,
- .put_prev_task = put_prev_task_fair,
- #ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_fair,
- .rq_online = rq_online_fair,
- .rq_offline = rq_offline_fair,
- .task_waking = task_waking_fair,
- #endif
- .set_curr_task = set_curr_task_fair,
- .task_tick = task_tick_fair,
- .task_fork = task_fork_fair,
- .prio_changed = prio_changed_fair,
- .switched_from = switched_from_fair,
- .switched_to = switched_to_fair,
- .get_rr_interval = get_rr_interval_fair,
- #ifdef CONFIG_FAIR_GROUP_SCHED
- .task_move_group = task_move_group_fair,
- #endif
- };
- #ifdef CONFIG_SCHED_DEBUG
- void print_cfs_stats(struct seq_file *m, int cpu)
- {
- struct cfs_rq *cfs_rq;
- rcu_read_lock();
- for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
- print_cfs_rq(m, cpu, cfs_rq);
- rcu_read_unlock();
- }
- #endif
- __init void init_sched_fair_class(void)
- {
- #ifdef CONFIG_SMP
- open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
- #ifdef CONFIG_NO_HZ
- nohz.next_balance = jiffies;
- zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
- cpu_notifier(sched_ilb_notifier, 0);
- #endif
- #endif
- }
|