raid10.c 130 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722
  1. /*
  2. * raid10.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 2000-2004 Neil Brown
  5. *
  6. * RAID-10 support for md.
  7. *
  8. * Base on code in raid1.c. See raid1.c for further copyright information.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/module.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/ratelimit.h>
  26. #include <linux/kthread.h>
  27. #include "md.h"
  28. #include "raid10.h"
  29. #include "raid0.h"
  30. #include "bitmap.h"
  31. /*
  32. * RAID10 provides a combination of RAID0 and RAID1 functionality.
  33. * The layout of data is defined by
  34. * chunk_size
  35. * raid_disks
  36. * near_copies (stored in low byte of layout)
  37. * far_copies (stored in second byte of layout)
  38. * far_offset (stored in bit 16 of layout )
  39. * use_far_sets (stored in bit 17 of layout )
  40. *
  41. * The data to be stored is divided into chunks using chunksize. Each device
  42. * is divided into far_copies sections. In each section, chunks are laid out
  43. * in a style similar to raid0, but near_copies copies of each chunk is stored
  44. * (each on a different drive). The starting device for each section is offset
  45. * near_copies from the starting device of the previous section. Thus there
  46. * are (near_copies * far_copies) of each chunk, and each is on a different
  47. * drive. near_copies and far_copies must be at least one, and their product
  48. * is at most raid_disks.
  49. *
  50. * If far_offset is true, then the far_copies are handled a bit differently.
  51. * The copies are still in different stripes, but instead of being very far
  52. * apart on disk, there are adjacent stripes.
  53. *
  54. * The far and offset algorithms are handled slightly differently if
  55. * 'use_far_sets' is true. In this case, the array's devices are grouped into
  56. * sets that are (near_copies * far_copies) in size. The far copied stripes
  57. * are still shifted by 'near_copies' devices, but this shifting stays confined
  58. * to the set rather than the entire array. This is done to improve the number
  59. * of device combinations that can fail without causing the array to fail.
  60. * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  61. * on a device):
  62. * A B C D A B C D E
  63. * ... ...
  64. * D A B C E A B C D
  65. * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  66. * [A B] [C D] [A B] [C D E]
  67. * |...| |...| |...| | ... |
  68. * [B A] [D C] [B A] [E C D]
  69. */
  70. /*
  71. * Number of guaranteed r10bios in case of extreme VM load:
  72. */
  73. #define NR_RAID10_BIOS 256
  74. /* when we get a read error on a read-only array, we redirect to another
  75. * device without failing the first device, or trying to over-write to
  76. * correct the read error. To keep track of bad blocks on a per-bio
  77. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  78. */
  79. #define IO_BLOCKED ((struct bio *)1)
  80. /* When we successfully write to a known bad-block, we need to remove the
  81. * bad-block marking which must be done from process context. So we record
  82. * the success by setting devs[n].bio to IO_MADE_GOOD
  83. */
  84. #define IO_MADE_GOOD ((struct bio *)2)
  85. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  86. /* When there are this many requests queued to be written by
  87. * the raid10 thread, we become 'congested' to provide back-pressure
  88. * for writeback.
  89. */
  90. static int max_queued_requests = 1024;
  91. static void allow_barrier(struct r10conf *conf);
  92. static void lower_barrier(struct r10conf *conf);
  93. static int _enough(struct r10conf *conf, int previous, int ignore);
  94. static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
  95. int *skipped);
  96. static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
  97. static void end_reshape_write(struct bio *bio, int error);
  98. static void end_reshape(struct r10conf *conf);
  99. static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
  100. {
  101. struct r10conf *conf = data;
  102. int size = offsetof(struct r10bio, devs[conf->copies]);
  103. /* allocate a r10bio with room for raid_disks entries in the
  104. * bios array */
  105. return kzalloc(size, gfp_flags);
  106. }
  107. static void r10bio_pool_free(void *r10_bio, void *data)
  108. {
  109. kfree(r10_bio);
  110. }
  111. /* Maximum size of each resync request */
  112. #define RESYNC_BLOCK_SIZE (64*1024)
  113. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  114. /* amount of memory to reserve for resync requests */
  115. #define RESYNC_WINDOW (1024*1024)
  116. /* maximum number of concurrent requests, memory permitting */
  117. #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
  118. /*
  119. * When performing a resync, we need to read and compare, so
  120. * we need as many pages are there are copies.
  121. * When performing a recovery, we need 2 bios, one for read,
  122. * one for write (we recover only one drive per r10buf)
  123. *
  124. */
  125. static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
  126. {
  127. struct r10conf *conf = data;
  128. struct page *page;
  129. struct r10bio *r10_bio;
  130. struct bio *bio;
  131. int i, j;
  132. int nalloc;
  133. r10_bio = r10bio_pool_alloc(gfp_flags, conf);
  134. if (!r10_bio)
  135. return NULL;
  136. if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
  137. test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
  138. nalloc = conf->copies; /* resync */
  139. else
  140. nalloc = 2; /* recovery */
  141. /*
  142. * Allocate bios.
  143. */
  144. for (j = nalloc ; j-- ; ) {
  145. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  146. if (!bio)
  147. goto out_free_bio;
  148. r10_bio->devs[j].bio = bio;
  149. if (!conf->have_replacement)
  150. continue;
  151. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  152. if (!bio)
  153. goto out_free_bio;
  154. r10_bio->devs[j].repl_bio = bio;
  155. }
  156. /*
  157. * Allocate RESYNC_PAGES data pages and attach them
  158. * where needed.
  159. */
  160. for (j = 0 ; j < nalloc; j++) {
  161. struct bio *rbio = r10_bio->devs[j].repl_bio;
  162. bio = r10_bio->devs[j].bio;
  163. for (i = 0; i < RESYNC_PAGES; i++) {
  164. if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
  165. &conf->mddev->recovery)) {
  166. /* we can share bv_page's during recovery
  167. * and reshape */
  168. struct bio *rbio = r10_bio->devs[0].bio;
  169. page = rbio->bi_io_vec[i].bv_page;
  170. get_page(page);
  171. } else
  172. page = alloc_page(gfp_flags);
  173. if (unlikely(!page))
  174. goto out_free_pages;
  175. bio->bi_io_vec[i].bv_page = page;
  176. if (rbio)
  177. rbio->bi_io_vec[i].bv_page = page;
  178. }
  179. }
  180. return r10_bio;
  181. out_free_pages:
  182. for ( ; i > 0 ; i--)
  183. safe_put_page(bio->bi_io_vec[i-1].bv_page);
  184. while (j--)
  185. for (i = 0; i < RESYNC_PAGES ; i++)
  186. safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
  187. j = 0;
  188. out_free_bio:
  189. for ( ; j < nalloc; j++) {
  190. if (r10_bio->devs[j].bio)
  191. bio_put(r10_bio->devs[j].bio);
  192. if (r10_bio->devs[j].repl_bio)
  193. bio_put(r10_bio->devs[j].repl_bio);
  194. }
  195. r10bio_pool_free(r10_bio, conf);
  196. return NULL;
  197. }
  198. static void r10buf_pool_free(void *__r10_bio, void *data)
  199. {
  200. int i;
  201. struct r10conf *conf = data;
  202. struct r10bio *r10bio = __r10_bio;
  203. int j;
  204. for (j=0; j < conf->copies; j++) {
  205. struct bio *bio = r10bio->devs[j].bio;
  206. if (bio) {
  207. for (i = 0; i < RESYNC_PAGES; i++) {
  208. safe_put_page(bio->bi_io_vec[i].bv_page);
  209. bio->bi_io_vec[i].bv_page = NULL;
  210. }
  211. bio_put(bio);
  212. }
  213. bio = r10bio->devs[j].repl_bio;
  214. if (bio)
  215. bio_put(bio);
  216. }
  217. r10bio_pool_free(r10bio, conf);
  218. }
  219. static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
  220. {
  221. int i;
  222. for (i = 0; i < conf->copies; i++) {
  223. struct bio **bio = & r10_bio->devs[i].bio;
  224. if (!BIO_SPECIAL(*bio))
  225. bio_put(*bio);
  226. *bio = NULL;
  227. bio = &r10_bio->devs[i].repl_bio;
  228. if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
  229. bio_put(*bio);
  230. *bio = NULL;
  231. }
  232. }
  233. static void free_r10bio(struct r10bio *r10_bio)
  234. {
  235. struct r10conf *conf = r10_bio->mddev->private;
  236. put_all_bios(conf, r10_bio);
  237. mempool_free(r10_bio, conf->r10bio_pool);
  238. }
  239. static void put_buf(struct r10bio *r10_bio)
  240. {
  241. struct r10conf *conf = r10_bio->mddev->private;
  242. mempool_free(r10_bio, conf->r10buf_pool);
  243. lower_barrier(conf);
  244. }
  245. static void reschedule_retry(struct r10bio *r10_bio)
  246. {
  247. unsigned long flags;
  248. struct mddev *mddev = r10_bio->mddev;
  249. struct r10conf *conf = mddev->private;
  250. spin_lock_irqsave(&conf->device_lock, flags);
  251. list_add(&r10_bio->retry_list, &conf->retry_list);
  252. conf->nr_queued ++;
  253. spin_unlock_irqrestore(&conf->device_lock, flags);
  254. /* wake up frozen array... */
  255. wake_up(&conf->wait_barrier);
  256. md_wakeup_thread(mddev->thread);
  257. }
  258. /*
  259. * raid_end_bio_io() is called when we have finished servicing a mirrored
  260. * operation and are ready to return a success/failure code to the buffer
  261. * cache layer.
  262. */
  263. static void raid_end_bio_io(struct r10bio *r10_bio)
  264. {
  265. struct bio *bio = r10_bio->master_bio;
  266. int done;
  267. struct r10conf *conf = r10_bio->mddev->private;
  268. if (bio->bi_phys_segments) {
  269. unsigned long flags;
  270. spin_lock_irqsave(&conf->device_lock, flags);
  271. bio->bi_phys_segments--;
  272. done = (bio->bi_phys_segments == 0);
  273. spin_unlock_irqrestore(&conf->device_lock, flags);
  274. } else
  275. done = 1;
  276. if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
  277. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  278. if (done) {
  279. bio_endio(bio, 0);
  280. /*
  281. * Wake up any possible resync thread that waits for the device
  282. * to go idle.
  283. */
  284. allow_barrier(conf);
  285. }
  286. free_r10bio(r10_bio);
  287. }
  288. /*
  289. * Update disk head position estimator based on IRQ completion info.
  290. */
  291. static inline void update_head_pos(int slot, struct r10bio *r10_bio)
  292. {
  293. struct r10conf *conf = r10_bio->mddev->private;
  294. conf->mirrors[r10_bio->devs[slot].devnum].head_position =
  295. r10_bio->devs[slot].addr + (r10_bio->sectors);
  296. }
  297. /*
  298. * Find the disk number which triggered given bio
  299. */
  300. static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
  301. struct bio *bio, int *slotp, int *replp)
  302. {
  303. int slot;
  304. int repl = 0;
  305. for (slot = 0; slot < conf->copies; slot++) {
  306. if (r10_bio->devs[slot].bio == bio)
  307. break;
  308. if (r10_bio->devs[slot].repl_bio == bio) {
  309. repl = 1;
  310. break;
  311. }
  312. }
  313. BUG_ON(slot == conf->copies);
  314. update_head_pos(slot, r10_bio);
  315. if (slotp)
  316. *slotp = slot;
  317. if (replp)
  318. *replp = repl;
  319. return r10_bio->devs[slot].devnum;
  320. }
  321. static void raid10_end_read_request(struct bio *bio, int error)
  322. {
  323. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  324. struct r10bio *r10_bio = bio->bi_private;
  325. int slot, dev;
  326. struct md_rdev *rdev;
  327. struct r10conf *conf = r10_bio->mddev->private;
  328. slot = r10_bio->read_slot;
  329. dev = r10_bio->devs[slot].devnum;
  330. rdev = r10_bio->devs[slot].rdev;
  331. /*
  332. * this branch is our 'one mirror IO has finished' event handler:
  333. */
  334. update_head_pos(slot, r10_bio);
  335. if (uptodate) {
  336. /*
  337. * Set R10BIO_Uptodate in our master bio, so that
  338. * we will return a good error code to the higher
  339. * levels even if IO on some other mirrored buffer fails.
  340. *
  341. * The 'master' represents the composite IO operation to
  342. * user-side. So if something waits for IO, then it will
  343. * wait for the 'master' bio.
  344. */
  345. set_bit(R10BIO_Uptodate, &r10_bio->state);
  346. } else {
  347. /* If all other devices that store this block have
  348. * failed, we want to return the error upwards rather
  349. * than fail the last device. Here we redefine
  350. * "uptodate" to mean "Don't want to retry"
  351. */
  352. if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
  353. rdev->raid_disk))
  354. uptodate = 1;
  355. }
  356. if (uptodate) {
  357. raid_end_bio_io(r10_bio);
  358. rdev_dec_pending(rdev, conf->mddev);
  359. } else {
  360. /*
  361. * oops, read error - keep the refcount on the rdev
  362. */
  363. char b[BDEVNAME_SIZE];
  364. printk_ratelimited(KERN_ERR
  365. "md/raid10:%s: %s: rescheduling sector %llu\n",
  366. mdname(conf->mddev),
  367. bdevname(rdev->bdev, b),
  368. (unsigned long long)r10_bio->sector);
  369. set_bit(R10BIO_ReadError, &r10_bio->state);
  370. reschedule_retry(r10_bio);
  371. }
  372. }
  373. static void close_write(struct r10bio *r10_bio)
  374. {
  375. /* clear the bitmap if all writes complete successfully */
  376. bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
  377. r10_bio->sectors,
  378. !test_bit(R10BIO_Degraded, &r10_bio->state),
  379. 0);
  380. md_write_end(r10_bio->mddev);
  381. }
  382. static void one_write_done(struct r10bio *r10_bio)
  383. {
  384. if (atomic_dec_and_test(&r10_bio->remaining)) {
  385. if (test_bit(R10BIO_WriteError, &r10_bio->state))
  386. reschedule_retry(r10_bio);
  387. else {
  388. close_write(r10_bio);
  389. if (test_bit(R10BIO_MadeGood, &r10_bio->state))
  390. reschedule_retry(r10_bio);
  391. else
  392. raid_end_bio_io(r10_bio);
  393. }
  394. }
  395. }
  396. static void raid10_end_write_request(struct bio *bio, int error)
  397. {
  398. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  399. struct r10bio *r10_bio = bio->bi_private;
  400. int dev;
  401. int dec_rdev = 1;
  402. struct r10conf *conf = r10_bio->mddev->private;
  403. int slot, repl;
  404. struct md_rdev *rdev = NULL;
  405. dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
  406. if (repl)
  407. rdev = conf->mirrors[dev].replacement;
  408. if (!rdev) {
  409. smp_rmb();
  410. repl = 0;
  411. rdev = conf->mirrors[dev].rdev;
  412. }
  413. /*
  414. * this branch is our 'one mirror IO has finished' event handler:
  415. */
  416. if (!uptodate) {
  417. if (repl)
  418. /* Never record new bad blocks to replacement,
  419. * just fail it.
  420. */
  421. md_error(rdev->mddev, rdev);
  422. else {
  423. set_bit(WriteErrorSeen, &rdev->flags);
  424. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  425. set_bit(MD_RECOVERY_NEEDED,
  426. &rdev->mddev->recovery);
  427. set_bit(R10BIO_WriteError, &r10_bio->state);
  428. dec_rdev = 0;
  429. }
  430. } else {
  431. /*
  432. * Set R10BIO_Uptodate in our master bio, so that
  433. * we will return a good error code for to the higher
  434. * levels even if IO on some other mirrored buffer fails.
  435. *
  436. * The 'master' represents the composite IO operation to
  437. * user-side. So if something waits for IO, then it will
  438. * wait for the 'master' bio.
  439. */
  440. sector_t first_bad;
  441. int bad_sectors;
  442. /*
  443. * Do not set R10BIO_Uptodate if the current device is
  444. * rebuilding or Faulty. This is because we cannot use
  445. * such device for properly reading the data back (we could
  446. * potentially use it, if the current write would have felt
  447. * before rdev->recovery_offset, but for simplicity we don't
  448. * check this here.
  449. */
  450. if (test_bit(In_sync, &rdev->flags) &&
  451. !test_bit(Faulty, &rdev->flags))
  452. set_bit(R10BIO_Uptodate, &r10_bio->state);
  453. /* Maybe we can clear some bad blocks. */
  454. if (is_badblock(rdev,
  455. r10_bio->devs[slot].addr,
  456. r10_bio->sectors,
  457. &first_bad, &bad_sectors)) {
  458. bio_put(bio);
  459. if (repl)
  460. r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
  461. else
  462. r10_bio->devs[slot].bio = IO_MADE_GOOD;
  463. dec_rdev = 0;
  464. set_bit(R10BIO_MadeGood, &r10_bio->state);
  465. }
  466. }
  467. /*
  468. *
  469. * Let's see if all mirrored write operations have finished
  470. * already.
  471. */
  472. one_write_done(r10_bio);
  473. if (dec_rdev)
  474. rdev_dec_pending(rdev, conf->mddev);
  475. }
  476. /*
  477. * RAID10 layout manager
  478. * As well as the chunksize and raid_disks count, there are two
  479. * parameters: near_copies and far_copies.
  480. * near_copies * far_copies must be <= raid_disks.
  481. * Normally one of these will be 1.
  482. * If both are 1, we get raid0.
  483. * If near_copies == raid_disks, we get raid1.
  484. *
  485. * Chunks are laid out in raid0 style with near_copies copies of the
  486. * first chunk, followed by near_copies copies of the next chunk and
  487. * so on.
  488. * If far_copies > 1, then after 1/far_copies of the array has been assigned
  489. * as described above, we start again with a device offset of near_copies.
  490. * So we effectively have another copy of the whole array further down all
  491. * the drives, but with blocks on different drives.
  492. * With this layout, and block is never stored twice on the one device.
  493. *
  494. * raid10_find_phys finds the sector offset of a given virtual sector
  495. * on each device that it is on.
  496. *
  497. * raid10_find_virt does the reverse mapping, from a device and a
  498. * sector offset to a virtual address
  499. */
  500. static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
  501. {
  502. int n,f;
  503. sector_t sector;
  504. sector_t chunk;
  505. sector_t stripe;
  506. int dev;
  507. int slot = 0;
  508. int last_far_set_start, last_far_set_size;
  509. last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
  510. last_far_set_start *= geo->far_set_size;
  511. last_far_set_size = geo->far_set_size;
  512. last_far_set_size += (geo->raid_disks % geo->far_set_size);
  513. /* now calculate first sector/dev */
  514. chunk = r10bio->sector >> geo->chunk_shift;
  515. sector = r10bio->sector & geo->chunk_mask;
  516. chunk *= geo->near_copies;
  517. stripe = chunk;
  518. dev = sector_div(stripe, geo->raid_disks);
  519. if (geo->far_offset)
  520. stripe *= geo->far_copies;
  521. sector += stripe << geo->chunk_shift;
  522. /* and calculate all the others */
  523. for (n = 0; n < geo->near_copies; n++) {
  524. int d = dev;
  525. int set;
  526. sector_t s = sector;
  527. r10bio->devs[slot].devnum = d;
  528. r10bio->devs[slot].addr = s;
  529. slot++;
  530. for (f = 1; f < geo->far_copies; f++) {
  531. set = d / geo->far_set_size;
  532. d += geo->near_copies;
  533. if ((geo->raid_disks % geo->far_set_size) &&
  534. (d > last_far_set_start)) {
  535. d -= last_far_set_start;
  536. d %= last_far_set_size;
  537. d += last_far_set_start;
  538. } else {
  539. d %= geo->far_set_size;
  540. d += geo->far_set_size * set;
  541. }
  542. s += geo->stride;
  543. r10bio->devs[slot].devnum = d;
  544. r10bio->devs[slot].addr = s;
  545. slot++;
  546. }
  547. dev++;
  548. if (dev >= geo->raid_disks) {
  549. dev = 0;
  550. sector += (geo->chunk_mask + 1);
  551. }
  552. }
  553. }
  554. static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
  555. {
  556. struct geom *geo = &conf->geo;
  557. if (conf->reshape_progress != MaxSector &&
  558. ((r10bio->sector >= conf->reshape_progress) !=
  559. conf->mddev->reshape_backwards)) {
  560. set_bit(R10BIO_Previous, &r10bio->state);
  561. geo = &conf->prev;
  562. } else
  563. clear_bit(R10BIO_Previous, &r10bio->state);
  564. __raid10_find_phys(geo, r10bio);
  565. }
  566. static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
  567. {
  568. sector_t offset, chunk, vchunk;
  569. /* Never use conf->prev as this is only called during resync
  570. * or recovery, so reshape isn't happening
  571. */
  572. struct geom *geo = &conf->geo;
  573. int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
  574. int far_set_size = geo->far_set_size;
  575. int last_far_set_start;
  576. if (geo->raid_disks % geo->far_set_size) {
  577. last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
  578. last_far_set_start *= geo->far_set_size;
  579. if (dev >= last_far_set_start) {
  580. far_set_size = geo->far_set_size;
  581. far_set_size += (geo->raid_disks % geo->far_set_size);
  582. far_set_start = last_far_set_start;
  583. }
  584. }
  585. offset = sector & geo->chunk_mask;
  586. if (geo->far_offset) {
  587. int fc;
  588. chunk = sector >> geo->chunk_shift;
  589. fc = sector_div(chunk, geo->far_copies);
  590. dev -= fc * geo->near_copies;
  591. if (dev < far_set_start)
  592. dev += far_set_size;
  593. } else {
  594. while (sector >= geo->stride) {
  595. sector -= geo->stride;
  596. if (dev < (geo->near_copies + far_set_start))
  597. dev += far_set_size - geo->near_copies;
  598. else
  599. dev -= geo->near_copies;
  600. }
  601. chunk = sector >> geo->chunk_shift;
  602. }
  603. vchunk = chunk * geo->raid_disks + dev;
  604. sector_div(vchunk, geo->near_copies);
  605. return (vchunk << geo->chunk_shift) + offset;
  606. }
  607. /**
  608. * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
  609. * @mddev: the md device
  610. * @bvm: properties of new bio
  611. * @biovec: the request that could be merged to it.
  612. *
  613. * Return amount of bytes we can accept at this offset
  614. * This requires checking for end-of-chunk if near_copies != raid_disks,
  615. * and for subordinate merge_bvec_fns if merge_check_needed.
  616. */
  617. static int raid10_mergeable_bvec(struct mddev *mddev,
  618. struct bvec_merge_data *bvm,
  619. struct bio_vec *biovec)
  620. {
  621. struct r10conf *conf = mddev->private;
  622. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  623. int max;
  624. unsigned int chunk_sectors;
  625. unsigned int bio_sectors = bvm->bi_size >> 9;
  626. struct geom *geo = &conf->geo;
  627. chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
  628. if (conf->reshape_progress != MaxSector &&
  629. ((sector >= conf->reshape_progress) !=
  630. conf->mddev->reshape_backwards))
  631. geo = &conf->prev;
  632. if (geo->near_copies < geo->raid_disks) {
  633. max = (chunk_sectors - ((sector & (chunk_sectors - 1))
  634. + bio_sectors)) << 9;
  635. if (max < 0)
  636. /* bio_add cannot handle a negative return */
  637. max = 0;
  638. if (max <= biovec->bv_len && bio_sectors == 0)
  639. return biovec->bv_len;
  640. } else
  641. max = biovec->bv_len;
  642. if (mddev->merge_check_needed) {
  643. struct {
  644. struct r10bio r10_bio;
  645. struct r10dev devs[conf->copies];
  646. } on_stack;
  647. struct r10bio *r10_bio = &on_stack.r10_bio;
  648. int s;
  649. if (conf->reshape_progress != MaxSector) {
  650. /* Cannot give any guidance during reshape */
  651. if (max <= biovec->bv_len && bio_sectors == 0)
  652. return biovec->bv_len;
  653. return 0;
  654. }
  655. r10_bio->sector = sector;
  656. raid10_find_phys(conf, r10_bio);
  657. rcu_read_lock();
  658. for (s = 0; s < conf->copies; s++) {
  659. int disk = r10_bio->devs[s].devnum;
  660. struct md_rdev *rdev = rcu_dereference(
  661. conf->mirrors[disk].rdev);
  662. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  663. struct request_queue *q =
  664. bdev_get_queue(rdev->bdev);
  665. if (q->merge_bvec_fn) {
  666. bvm->bi_sector = r10_bio->devs[s].addr
  667. + rdev->data_offset;
  668. bvm->bi_bdev = rdev->bdev;
  669. max = min(max, q->merge_bvec_fn(
  670. q, bvm, biovec));
  671. }
  672. }
  673. rdev = rcu_dereference(conf->mirrors[disk].replacement);
  674. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  675. struct request_queue *q =
  676. bdev_get_queue(rdev->bdev);
  677. if (q->merge_bvec_fn) {
  678. bvm->bi_sector = r10_bio->devs[s].addr
  679. + rdev->data_offset;
  680. bvm->bi_bdev = rdev->bdev;
  681. max = min(max, q->merge_bvec_fn(
  682. q, bvm, biovec));
  683. }
  684. }
  685. }
  686. rcu_read_unlock();
  687. }
  688. return max;
  689. }
  690. /*
  691. * This routine returns the disk from which the requested read should
  692. * be done. There is a per-array 'next expected sequential IO' sector
  693. * number - if this matches on the next IO then we use the last disk.
  694. * There is also a per-disk 'last know head position' sector that is
  695. * maintained from IRQ contexts, both the normal and the resync IO
  696. * completion handlers update this position correctly. If there is no
  697. * perfect sequential match then we pick the disk whose head is closest.
  698. *
  699. * If there are 2 mirrors in the same 2 devices, performance degrades
  700. * because position is mirror, not device based.
  701. *
  702. * The rdev for the device selected will have nr_pending incremented.
  703. */
  704. /*
  705. * FIXME: possibly should rethink readbalancing and do it differently
  706. * depending on near_copies / far_copies geometry.
  707. */
  708. static struct md_rdev *read_balance(struct r10conf *conf,
  709. struct r10bio *r10_bio,
  710. int *max_sectors)
  711. {
  712. const sector_t this_sector = r10_bio->sector;
  713. int disk, slot;
  714. int sectors = r10_bio->sectors;
  715. int best_good_sectors;
  716. sector_t new_distance, best_dist;
  717. struct md_rdev *best_rdev, *rdev = NULL;
  718. int do_balance;
  719. int best_slot;
  720. struct geom *geo = &conf->geo;
  721. raid10_find_phys(conf, r10_bio);
  722. rcu_read_lock();
  723. retry:
  724. sectors = r10_bio->sectors;
  725. best_slot = -1;
  726. best_rdev = NULL;
  727. best_dist = MaxSector;
  728. best_good_sectors = 0;
  729. do_balance = 1;
  730. /*
  731. * Check if we can balance. We can balance on the whole
  732. * device if no resync is going on (recovery is ok), or below
  733. * the resync window. We take the first readable disk when
  734. * above the resync window.
  735. */
  736. if (conf->mddev->recovery_cp < MaxSector
  737. && (this_sector + sectors >= conf->next_resync))
  738. do_balance = 0;
  739. for (slot = 0; slot < conf->copies ; slot++) {
  740. sector_t first_bad;
  741. int bad_sectors;
  742. sector_t dev_sector;
  743. if (r10_bio->devs[slot].bio == IO_BLOCKED)
  744. continue;
  745. disk = r10_bio->devs[slot].devnum;
  746. rdev = rcu_dereference(conf->mirrors[disk].replacement);
  747. if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
  748. test_bit(Unmerged, &rdev->flags) ||
  749. r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
  750. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  751. if (rdev == NULL ||
  752. test_bit(Faulty, &rdev->flags) ||
  753. test_bit(Unmerged, &rdev->flags))
  754. continue;
  755. if (!test_bit(In_sync, &rdev->flags) &&
  756. r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
  757. continue;
  758. dev_sector = r10_bio->devs[slot].addr;
  759. if (is_badblock(rdev, dev_sector, sectors,
  760. &first_bad, &bad_sectors)) {
  761. if (best_dist < MaxSector)
  762. /* Already have a better slot */
  763. continue;
  764. if (first_bad <= dev_sector) {
  765. /* Cannot read here. If this is the
  766. * 'primary' device, then we must not read
  767. * beyond 'bad_sectors' from another device.
  768. */
  769. bad_sectors -= (dev_sector - first_bad);
  770. if (!do_balance && sectors > bad_sectors)
  771. sectors = bad_sectors;
  772. if (best_good_sectors > sectors)
  773. best_good_sectors = sectors;
  774. } else {
  775. sector_t good_sectors =
  776. first_bad - dev_sector;
  777. if (good_sectors > best_good_sectors) {
  778. best_good_sectors = good_sectors;
  779. best_slot = slot;
  780. best_rdev = rdev;
  781. }
  782. if (!do_balance)
  783. /* Must read from here */
  784. break;
  785. }
  786. continue;
  787. } else
  788. best_good_sectors = sectors;
  789. if (!do_balance)
  790. break;
  791. /* This optimisation is debatable, and completely destroys
  792. * sequential read speed for 'far copies' arrays. So only
  793. * keep it for 'near' arrays, and review those later.
  794. */
  795. if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
  796. break;
  797. /* for far > 1 always use the lowest address */
  798. if (geo->far_copies > 1)
  799. new_distance = r10_bio->devs[slot].addr;
  800. else
  801. new_distance = abs(r10_bio->devs[slot].addr -
  802. conf->mirrors[disk].head_position);
  803. if (new_distance < best_dist) {
  804. best_dist = new_distance;
  805. best_slot = slot;
  806. best_rdev = rdev;
  807. }
  808. }
  809. if (slot >= conf->copies) {
  810. slot = best_slot;
  811. rdev = best_rdev;
  812. }
  813. if (slot >= 0) {
  814. atomic_inc(&rdev->nr_pending);
  815. if (test_bit(Faulty, &rdev->flags)) {
  816. /* Cannot risk returning a device that failed
  817. * before we inc'ed nr_pending
  818. */
  819. rdev_dec_pending(rdev, conf->mddev);
  820. goto retry;
  821. }
  822. r10_bio->read_slot = slot;
  823. } else
  824. rdev = NULL;
  825. rcu_read_unlock();
  826. *max_sectors = best_good_sectors;
  827. return rdev;
  828. }
  829. static int raid10_congested(struct mddev *mddev, int bits)
  830. {
  831. struct r10conf *conf = mddev->private;
  832. int i, ret = 0;
  833. if ((bits & (1 << WB_async_congested)) &&
  834. conf->pending_count >= max_queued_requests)
  835. return 1;
  836. rcu_read_lock();
  837. for (i = 0;
  838. (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
  839. && ret == 0;
  840. i++) {
  841. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  842. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  843. struct request_queue *q = bdev_get_queue(rdev->bdev);
  844. ret |= bdi_congested(&q->backing_dev_info, bits);
  845. }
  846. }
  847. rcu_read_unlock();
  848. return ret;
  849. }
  850. static void flush_pending_writes(struct r10conf *conf)
  851. {
  852. /* Any writes that have been queued but are awaiting
  853. * bitmap updates get flushed here.
  854. */
  855. spin_lock_irq(&conf->device_lock);
  856. if (conf->pending_bio_list.head) {
  857. struct bio *bio;
  858. bio = bio_list_get(&conf->pending_bio_list);
  859. conf->pending_count = 0;
  860. spin_unlock_irq(&conf->device_lock);
  861. /* flush any pending bitmap writes to disk
  862. * before proceeding w/ I/O */
  863. bitmap_unplug(conf->mddev->bitmap);
  864. wake_up(&conf->wait_barrier);
  865. while (bio) { /* submit pending writes */
  866. struct bio *next = bio->bi_next;
  867. bio->bi_next = NULL;
  868. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  869. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  870. /* Just ignore it */
  871. bio_endio(bio, 0);
  872. else
  873. generic_make_request(bio);
  874. bio = next;
  875. }
  876. } else
  877. spin_unlock_irq(&conf->device_lock);
  878. }
  879. /* Barriers....
  880. * Sometimes we need to suspend IO while we do something else,
  881. * either some resync/recovery, or reconfigure the array.
  882. * To do this we raise a 'barrier'.
  883. * The 'barrier' is a counter that can be raised multiple times
  884. * to count how many activities are happening which preclude
  885. * normal IO.
  886. * We can only raise the barrier if there is no pending IO.
  887. * i.e. if nr_pending == 0.
  888. * We choose only to raise the barrier if no-one is waiting for the
  889. * barrier to go down. This means that as soon as an IO request
  890. * is ready, no other operations which require a barrier will start
  891. * until the IO request has had a chance.
  892. *
  893. * So: regular IO calls 'wait_barrier'. When that returns there
  894. * is no backgroup IO happening, It must arrange to call
  895. * allow_barrier when it has finished its IO.
  896. * backgroup IO calls must call raise_barrier. Once that returns
  897. * there is no normal IO happeing. It must arrange to call
  898. * lower_barrier when the particular background IO completes.
  899. */
  900. static void raise_barrier(struct r10conf *conf, int force)
  901. {
  902. BUG_ON(force && !conf->barrier);
  903. spin_lock_irq(&conf->resync_lock);
  904. /* Wait until no block IO is waiting (unless 'force') */
  905. wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
  906. conf->resync_lock);
  907. /* block any new IO from starting */
  908. conf->barrier++;
  909. /* Now wait for all pending IO to complete */
  910. wait_event_lock_irq(conf->wait_barrier,
  911. !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
  912. conf->resync_lock);
  913. spin_unlock_irq(&conf->resync_lock);
  914. }
  915. static void lower_barrier(struct r10conf *conf)
  916. {
  917. unsigned long flags;
  918. spin_lock_irqsave(&conf->resync_lock, flags);
  919. conf->barrier--;
  920. spin_unlock_irqrestore(&conf->resync_lock, flags);
  921. wake_up(&conf->wait_barrier);
  922. }
  923. static void wait_barrier(struct r10conf *conf)
  924. {
  925. spin_lock_irq(&conf->resync_lock);
  926. if (conf->barrier) {
  927. conf->nr_waiting++;
  928. /* Wait for the barrier to drop.
  929. * However if there are already pending
  930. * requests (preventing the barrier from
  931. * rising completely), and the
  932. * pre-process bio queue isn't empty,
  933. * then don't wait, as we need to empty
  934. * that queue to get the nr_pending
  935. * count down.
  936. */
  937. wait_event_lock_irq(conf->wait_barrier,
  938. !conf->barrier ||
  939. (conf->nr_pending &&
  940. current->bio_list &&
  941. !bio_list_empty(current->bio_list)),
  942. conf->resync_lock);
  943. conf->nr_waiting--;
  944. }
  945. conf->nr_pending++;
  946. spin_unlock_irq(&conf->resync_lock);
  947. }
  948. static void allow_barrier(struct r10conf *conf)
  949. {
  950. unsigned long flags;
  951. spin_lock_irqsave(&conf->resync_lock, flags);
  952. conf->nr_pending--;
  953. spin_unlock_irqrestore(&conf->resync_lock, flags);
  954. wake_up(&conf->wait_barrier);
  955. }
  956. static void freeze_array(struct r10conf *conf, int extra)
  957. {
  958. /* stop syncio and normal IO and wait for everything to
  959. * go quiet.
  960. * We increment barrier and nr_waiting, and then
  961. * wait until nr_pending match nr_queued+extra
  962. * This is called in the context of one normal IO request
  963. * that has failed. Thus any sync request that might be pending
  964. * will be blocked by nr_pending, and we need to wait for
  965. * pending IO requests to complete or be queued for re-try.
  966. * Thus the number queued (nr_queued) plus this request (extra)
  967. * must match the number of pending IOs (nr_pending) before
  968. * we continue.
  969. */
  970. spin_lock_irq(&conf->resync_lock);
  971. conf->barrier++;
  972. conf->nr_waiting++;
  973. wait_event_lock_irq_cmd(conf->wait_barrier,
  974. conf->nr_pending == conf->nr_queued+extra,
  975. conf->resync_lock,
  976. flush_pending_writes(conf));
  977. spin_unlock_irq(&conf->resync_lock);
  978. }
  979. static void unfreeze_array(struct r10conf *conf)
  980. {
  981. /* reverse the effect of the freeze */
  982. spin_lock_irq(&conf->resync_lock);
  983. conf->barrier--;
  984. conf->nr_waiting--;
  985. wake_up(&conf->wait_barrier);
  986. spin_unlock_irq(&conf->resync_lock);
  987. }
  988. static sector_t choose_data_offset(struct r10bio *r10_bio,
  989. struct md_rdev *rdev)
  990. {
  991. if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
  992. test_bit(R10BIO_Previous, &r10_bio->state))
  993. return rdev->data_offset;
  994. else
  995. return rdev->new_data_offset;
  996. }
  997. struct raid10_plug_cb {
  998. struct blk_plug_cb cb;
  999. struct bio_list pending;
  1000. int pending_cnt;
  1001. };
  1002. static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
  1003. {
  1004. struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
  1005. cb);
  1006. struct mddev *mddev = plug->cb.data;
  1007. struct r10conf *conf = mddev->private;
  1008. struct bio *bio;
  1009. if (from_schedule || current->bio_list) {
  1010. spin_lock_irq(&conf->device_lock);
  1011. bio_list_merge(&conf->pending_bio_list, &plug->pending);
  1012. conf->pending_count += plug->pending_cnt;
  1013. spin_unlock_irq(&conf->device_lock);
  1014. wake_up(&conf->wait_barrier);
  1015. md_wakeup_thread(mddev->thread);
  1016. kfree(plug);
  1017. return;
  1018. }
  1019. /* we aren't scheduling, so we can do the write-out directly. */
  1020. bio = bio_list_get(&plug->pending);
  1021. bitmap_unplug(mddev->bitmap);
  1022. wake_up(&conf->wait_barrier);
  1023. while (bio) { /* submit pending writes */
  1024. struct bio *next = bio->bi_next;
  1025. bio->bi_next = NULL;
  1026. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  1027. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  1028. /* Just ignore it */
  1029. bio_endio(bio, 0);
  1030. else
  1031. generic_make_request(bio);
  1032. bio = next;
  1033. }
  1034. kfree(plug);
  1035. }
  1036. static void __make_request(struct mddev *mddev, struct bio *bio)
  1037. {
  1038. struct r10conf *conf = mddev->private;
  1039. struct r10bio *r10_bio;
  1040. struct bio *read_bio;
  1041. int i;
  1042. const int rw = bio_data_dir(bio);
  1043. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  1044. const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
  1045. const unsigned long do_discard = (bio->bi_rw
  1046. & (REQ_DISCARD | REQ_SECURE));
  1047. const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
  1048. unsigned long flags;
  1049. struct md_rdev *blocked_rdev;
  1050. struct blk_plug_cb *cb;
  1051. struct raid10_plug_cb *plug = NULL;
  1052. int sectors_handled;
  1053. int max_sectors;
  1054. int sectors;
  1055. /*
  1056. * Register the new request and wait if the reconstruction
  1057. * thread has put up a bar for new requests.
  1058. * Continue immediately if no resync is active currently.
  1059. */
  1060. wait_barrier(conf);
  1061. sectors = bio_sectors(bio);
  1062. while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  1063. bio->bi_iter.bi_sector < conf->reshape_progress &&
  1064. bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
  1065. /* IO spans the reshape position. Need to wait for
  1066. * reshape to pass
  1067. */
  1068. allow_barrier(conf);
  1069. wait_event(conf->wait_barrier,
  1070. conf->reshape_progress <= bio->bi_iter.bi_sector ||
  1071. conf->reshape_progress >= bio->bi_iter.bi_sector +
  1072. sectors);
  1073. wait_barrier(conf);
  1074. }
  1075. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  1076. bio_data_dir(bio) == WRITE &&
  1077. (mddev->reshape_backwards
  1078. ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
  1079. bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
  1080. : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
  1081. bio->bi_iter.bi_sector < conf->reshape_progress))) {
  1082. /* Need to update reshape_position in metadata */
  1083. mddev->reshape_position = conf->reshape_progress;
  1084. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1085. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  1086. md_wakeup_thread(mddev->thread);
  1087. wait_event(mddev->sb_wait,
  1088. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  1089. conf->reshape_safe = mddev->reshape_position;
  1090. }
  1091. r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  1092. r10_bio->master_bio = bio;
  1093. r10_bio->sectors = sectors;
  1094. r10_bio->mddev = mddev;
  1095. r10_bio->sector = bio->bi_iter.bi_sector;
  1096. r10_bio->state = 0;
  1097. /* We might need to issue multiple reads to different
  1098. * devices if there are bad blocks around, so we keep
  1099. * track of the number of reads in bio->bi_phys_segments.
  1100. * If this is 0, there is only one r10_bio and no locking
  1101. * will be needed when the request completes. If it is
  1102. * non-zero, then it is the number of not-completed requests.
  1103. */
  1104. bio->bi_phys_segments = 0;
  1105. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  1106. if (rw == READ) {
  1107. /*
  1108. * read balancing logic:
  1109. */
  1110. struct md_rdev *rdev;
  1111. int slot;
  1112. read_again:
  1113. rdev = read_balance(conf, r10_bio, &max_sectors);
  1114. if (!rdev) {
  1115. raid_end_bio_io(r10_bio);
  1116. return;
  1117. }
  1118. slot = r10_bio->read_slot;
  1119. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1120. bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
  1121. max_sectors);
  1122. r10_bio->devs[slot].bio = read_bio;
  1123. r10_bio->devs[slot].rdev = rdev;
  1124. read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
  1125. choose_data_offset(r10_bio, rdev);
  1126. read_bio->bi_bdev = rdev->bdev;
  1127. read_bio->bi_end_io = raid10_end_read_request;
  1128. read_bio->bi_rw = READ | do_sync;
  1129. read_bio->bi_private = r10_bio;
  1130. if (max_sectors < r10_bio->sectors) {
  1131. /* Could not read all from this device, so we will
  1132. * need another r10_bio.
  1133. */
  1134. sectors_handled = (r10_bio->sector + max_sectors
  1135. - bio->bi_iter.bi_sector);
  1136. r10_bio->sectors = max_sectors;
  1137. spin_lock_irq(&conf->device_lock);
  1138. if (bio->bi_phys_segments == 0)
  1139. bio->bi_phys_segments = 2;
  1140. else
  1141. bio->bi_phys_segments++;
  1142. spin_unlock_irq(&conf->device_lock);
  1143. /* Cannot call generic_make_request directly
  1144. * as that will be queued in __generic_make_request
  1145. * and subsequent mempool_alloc might block
  1146. * waiting for it. so hand bio over to raid10d.
  1147. */
  1148. reschedule_retry(r10_bio);
  1149. r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  1150. r10_bio->master_bio = bio;
  1151. r10_bio->sectors = bio_sectors(bio) - sectors_handled;
  1152. r10_bio->state = 0;
  1153. r10_bio->mddev = mddev;
  1154. r10_bio->sector = bio->bi_iter.bi_sector +
  1155. sectors_handled;
  1156. goto read_again;
  1157. } else
  1158. generic_make_request(read_bio);
  1159. return;
  1160. }
  1161. /*
  1162. * WRITE:
  1163. */
  1164. if (conf->pending_count >= max_queued_requests) {
  1165. md_wakeup_thread(mddev->thread);
  1166. wait_event(conf->wait_barrier,
  1167. conf->pending_count < max_queued_requests);
  1168. }
  1169. /* first select target devices under rcu_lock and
  1170. * inc refcount on their rdev. Record them by setting
  1171. * bios[x] to bio
  1172. * If there are known/acknowledged bad blocks on any device
  1173. * on which we have seen a write error, we want to avoid
  1174. * writing to those blocks. This potentially requires several
  1175. * writes to write around the bad blocks. Each set of writes
  1176. * gets its own r10_bio with a set of bios attached. The number
  1177. * of r10_bios is recored in bio->bi_phys_segments just as with
  1178. * the read case.
  1179. */
  1180. r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
  1181. raid10_find_phys(conf, r10_bio);
  1182. retry_write:
  1183. blocked_rdev = NULL;
  1184. rcu_read_lock();
  1185. max_sectors = r10_bio->sectors;
  1186. for (i = 0; i < conf->copies; i++) {
  1187. int d = r10_bio->devs[i].devnum;
  1188. struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
  1189. struct md_rdev *rrdev = rcu_dereference(
  1190. conf->mirrors[d].replacement);
  1191. if (rdev == rrdev)
  1192. rrdev = NULL;
  1193. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  1194. atomic_inc(&rdev->nr_pending);
  1195. blocked_rdev = rdev;
  1196. break;
  1197. }
  1198. if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
  1199. atomic_inc(&rrdev->nr_pending);
  1200. blocked_rdev = rrdev;
  1201. break;
  1202. }
  1203. if (rdev && (test_bit(Faulty, &rdev->flags)
  1204. || test_bit(Unmerged, &rdev->flags)))
  1205. rdev = NULL;
  1206. if (rrdev && (test_bit(Faulty, &rrdev->flags)
  1207. || test_bit(Unmerged, &rrdev->flags)))
  1208. rrdev = NULL;
  1209. r10_bio->devs[i].bio = NULL;
  1210. r10_bio->devs[i].repl_bio = NULL;
  1211. if (!rdev && !rrdev) {
  1212. set_bit(R10BIO_Degraded, &r10_bio->state);
  1213. continue;
  1214. }
  1215. if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
  1216. sector_t first_bad;
  1217. sector_t dev_sector = r10_bio->devs[i].addr;
  1218. int bad_sectors;
  1219. int is_bad;
  1220. is_bad = is_badblock(rdev, dev_sector,
  1221. max_sectors,
  1222. &first_bad, &bad_sectors);
  1223. if (is_bad < 0) {
  1224. /* Mustn't write here until the bad block
  1225. * is acknowledged
  1226. */
  1227. atomic_inc(&rdev->nr_pending);
  1228. set_bit(BlockedBadBlocks, &rdev->flags);
  1229. blocked_rdev = rdev;
  1230. break;
  1231. }
  1232. if (is_bad && first_bad <= dev_sector) {
  1233. /* Cannot write here at all */
  1234. bad_sectors -= (dev_sector - first_bad);
  1235. if (bad_sectors < max_sectors)
  1236. /* Mustn't write more than bad_sectors
  1237. * to other devices yet
  1238. */
  1239. max_sectors = bad_sectors;
  1240. /* We don't set R10BIO_Degraded as that
  1241. * only applies if the disk is missing,
  1242. * so it might be re-added, and we want to
  1243. * know to recover this chunk.
  1244. * In this case the device is here, and the
  1245. * fact that this chunk is not in-sync is
  1246. * recorded in the bad block log.
  1247. */
  1248. continue;
  1249. }
  1250. if (is_bad) {
  1251. int good_sectors = first_bad - dev_sector;
  1252. if (good_sectors < max_sectors)
  1253. max_sectors = good_sectors;
  1254. }
  1255. }
  1256. if (rdev) {
  1257. r10_bio->devs[i].bio = bio;
  1258. atomic_inc(&rdev->nr_pending);
  1259. }
  1260. if (rrdev) {
  1261. r10_bio->devs[i].repl_bio = bio;
  1262. atomic_inc(&rrdev->nr_pending);
  1263. }
  1264. }
  1265. rcu_read_unlock();
  1266. if (unlikely(blocked_rdev)) {
  1267. /* Have to wait for this device to get unblocked, then retry */
  1268. int j;
  1269. int d;
  1270. for (j = 0; j < i; j++) {
  1271. if (r10_bio->devs[j].bio) {
  1272. d = r10_bio->devs[j].devnum;
  1273. rdev_dec_pending(conf->mirrors[d].rdev, mddev);
  1274. }
  1275. if (r10_bio->devs[j].repl_bio) {
  1276. struct md_rdev *rdev;
  1277. d = r10_bio->devs[j].devnum;
  1278. rdev = conf->mirrors[d].replacement;
  1279. if (!rdev) {
  1280. /* Race with remove_disk */
  1281. smp_mb();
  1282. rdev = conf->mirrors[d].rdev;
  1283. }
  1284. rdev_dec_pending(rdev, mddev);
  1285. }
  1286. }
  1287. allow_barrier(conf);
  1288. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  1289. wait_barrier(conf);
  1290. goto retry_write;
  1291. }
  1292. if (max_sectors < r10_bio->sectors) {
  1293. /* We are splitting this into multiple parts, so
  1294. * we need to prepare for allocating another r10_bio.
  1295. */
  1296. r10_bio->sectors = max_sectors;
  1297. spin_lock_irq(&conf->device_lock);
  1298. if (bio->bi_phys_segments == 0)
  1299. bio->bi_phys_segments = 2;
  1300. else
  1301. bio->bi_phys_segments++;
  1302. spin_unlock_irq(&conf->device_lock);
  1303. }
  1304. sectors_handled = r10_bio->sector + max_sectors -
  1305. bio->bi_iter.bi_sector;
  1306. atomic_set(&r10_bio->remaining, 1);
  1307. bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
  1308. for (i = 0; i < conf->copies; i++) {
  1309. struct bio *mbio;
  1310. int d = r10_bio->devs[i].devnum;
  1311. if (r10_bio->devs[i].bio) {
  1312. struct md_rdev *rdev = conf->mirrors[d].rdev;
  1313. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1314. bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
  1315. max_sectors);
  1316. r10_bio->devs[i].bio = mbio;
  1317. mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
  1318. choose_data_offset(r10_bio,
  1319. rdev));
  1320. mbio->bi_bdev = rdev->bdev;
  1321. mbio->bi_end_io = raid10_end_write_request;
  1322. mbio->bi_rw =
  1323. WRITE | do_sync | do_fua | do_discard | do_same;
  1324. mbio->bi_private = r10_bio;
  1325. atomic_inc(&r10_bio->remaining);
  1326. cb = blk_check_plugged(raid10_unplug, mddev,
  1327. sizeof(*plug));
  1328. if (cb)
  1329. plug = container_of(cb, struct raid10_plug_cb,
  1330. cb);
  1331. else
  1332. plug = NULL;
  1333. spin_lock_irqsave(&conf->device_lock, flags);
  1334. if (plug) {
  1335. bio_list_add(&plug->pending, mbio);
  1336. plug->pending_cnt++;
  1337. } else {
  1338. bio_list_add(&conf->pending_bio_list, mbio);
  1339. conf->pending_count++;
  1340. }
  1341. spin_unlock_irqrestore(&conf->device_lock, flags);
  1342. if (!plug)
  1343. md_wakeup_thread(mddev->thread);
  1344. }
  1345. if (r10_bio->devs[i].repl_bio) {
  1346. struct md_rdev *rdev = conf->mirrors[d].replacement;
  1347. if (rdev == NULL) {
  1348. /* Replacement just got moved to main 'rdev' */
  1349. smp_mb();
  1350. rdev = conf->mirrors[d].rdev;
  1351. }
  1352. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1353. bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
  1354. max_sectors);
  1355. r10_bio->devs[i].repl_bio = mbio;
  1356. mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
  1357. choose_data_offset(
  1358. r10_bio, rdev));
  1359. mbio->bi_bdev = rdev->bdev;
  1360. mbio->bi_end_io = raid10_end_write_request;
  1361. mbio->bi_rw =
  1362. WRITE | do_sync | do_fua | do_discard | do_same;
  1363. mbio->bi_private = r10_bio;
  1364. atomic_inc(&r10_bio->remaining);
  1365. spin_lock_irqsave(&conf->device_lock, flags);
  1366. bio_list_add(&conf->pending_bio_list, mbio);
  1367. conf->pending_count++;
  1368. spin_unlock_irqrestore(&conf->device_lock, flags);
  1369. if (!mddev_check_plugged(mddev))
  1370. md_wakeup_thread(mddev->thread);
  1371. }
  1372. }
  1373. /* Don't remove the bias on 'remaining' (one_write_done) until
  1374. * after checking if we need to go around again.
  1375. */
  1376. if (sectors_handled < bio_sectors(bio)) {
  1377. one_write_done(r10_bio);
  1378. /* We need another r10_bio. It has already been counted
  1379. * in bio->bi_phys_segments.
  1380. */
  1381. r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  1382. r10_bio->master_bio = bio;
  1383. r10_bio->sectors = bio_sectors(bio) - sectors_handled;
  1384. r10_bio->mddev = mddev;
  1385. r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
  1386. r10_bio->state = 0;
  1387. goto retry_write;
  1388. }
  1389. one_write_done(r10_bio);
  1390. }
  1391. static void make_request(struct mddev *mddev, struct bio *bio)
  1392. {
  1393. struct r10conf *conf = mddev->private;
  1394. sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
  1395. int chunk_sects = chunk_mask + 1;
  1396. struct bio *split;
  1397. if (unlikely(bio->bi_rw & REQ_FLUSH)) {
  1398. md_flush_request(mddev, bio);
  1399. return;
  1400. }
  1401. md_write_start(mddev, bio);
  1402. do {
  1403. /*
  1404. * If this request crosses a chunk boundary, we need to split
  1405. * it.
  1406. */
  1407. if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
  1408. bio_sectors(bio) > chunk_sects
  1409. && (conf->geo.near_copies < conf->geo.raid_disks
  1410. || conf->prev.near_copies <
  1411. conf->prev.raid_disks))) {
  1412. split = bio_split(bio, chunk_sects -
  1413. (bio->bi_iter.bi_sector &
  1414. (chunk_sects - 1)),
  1415. GFP_NOIO, fs_bio_set);
  1416. bio_chain(split, bio);
  1417. } else {
  1418. split = bio;
  1419. }
  1420. __make_request(mddev, split);
  1421. } while (split != bio);
  1422. /* In case raid10d snuck in to freeze_array */
  1423. wake_up(&conf->wait_barrier);
  1424. }
  1425. static void status(struct seq_file *seq, struct mddev *mddev)
  1426. {
  1427. struct r10conf *conf = mddev->private;
  1428. int i;
  1429. if (conf->geo.near_copies < conf->geo.raid_disks)
  1430. seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
  1431. if (conf->geo.near_copies > 1)
  1432. seq_printf(seq, " %d near-copies", conf->geo.near_copies);
  1433. if (conf->geo.far_copies > 1) {
  1434. if (conf->geo.far_offset)
  1435. seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
  1436. else
  1437. seq_printf(seq, " %d far-copies", conf->geo.far_copies);
  1438. }
  1439. seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
  1440. conf->geo.raid_disks - mddev->degraded);
  1441. for (i = 0; i < conf->geo.raid_disks; i++)
  1442. seq_printf(seq, "%s",
  1443. conf->mirrors[i].rdev &&
  1444. test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
  1445. seq_printf(seq, "]");
  1446. }
  1447. /* check if there are enough drives for
  1448. * every block to appear on atleast one.
  1449. * Don't consider the device numbered 'ignore'
  1450. * as we might be about to remove it.
  1451. */
  1452. static int _enough(struct r10conf *conf, int previous, int ignore)
  1453. {
  1454. int first = 0;
  1455. int has_enough = 0;
  1456. int disks, ncopies;
  1457. if (previous) {
  1458. disks = conf->prev.raid_disks;
  1459. ncopies = conf->prev.near_copies;
  1460. } else {
  1461. disks = conf->geo.raid_disks;
  1462. ncopies = conf->geo.near_copies;
  1463. }
  1464. rcu_read_lock();
  1465. do {
  1466. int n = conf->copies;
  1467. int cnt = 0;
  1468. int this = first;
  1469. while (n--) {
  1470. struct md_rdev *rdev;
  1471. if (this != ignore &&
  1472. (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
  1473. test_bit(In_sync, &rdev->flags))
  1474. cnt++;
  1475. this = (this+1) % disks;
  1476. }
  1477. if (cnt == 0)
  1478. goto out;
  1479. first = (first + ncopies) % disks;
  1480. } while (first != 0);
  1481. has_enough = 1;
  1482. out:
  1483. rcu_read_unlock();
  1484. return has_enough;
  1485. }
  1486. static int enough(struct r10conf *conf, int ignore)
  1487. {
  1488. /* when calling 'enough', both 'prev' and 'geo' must
  1489. * be stable.
  1490. * This is ensured if ->reconfig_mutex or ->device_lock
  1491. * is held.
  1492. */
  1493. return _enough(conf, 0, ignore) &&
  1494. _enough(conf, 1, ignore);
  1495. }
  1496. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1497. {
  1498. char b[BDEVNAME_SIZE];
  1499. struct r10conf *conf = mddev->private;
  1500. unsigned long flags;
  1501. /*
  1502. * If it is not operational, then we have already marked it as dead
  1503. * else if it is the last working disks, ignore the error, let the
  1504. * next level up know.
  1505. * else mark the drive as failed
  1506. */
  1507. spin_lock_irqsave(&conf->device_lock, flags);
  1508. if (test_bit(In_sync, &rdev->flags)
  1509. && !enough(conf, rdev->raid_disk)) {
  1510. /*
  1511. * Don't fail the drive, just return an IO error.
  1512. */
  1513. spin_unlock_irqrestore(&conf->device_lock, flags);
  1514. return;
  1515. }
  1516. if (test_and_clear_bit(In_sync, &rdev->flags))
  1517. mddev->degraded++;
  1518. /*
  1519. * If recovery is running, make sure it aborts.
  1520. */
  1521. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1522. set_bit(Blocked, &rdev->flags);
  1523. set_bit(Faulty, &rdev->flags);
  1524. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1525. spin_unlock_irqrestore(&conf->device_lock, flags);
  1526. printk(KERN_ALERT
  1527. "md/raid10:%s: Disk failure on %s, disabling device.\n"
  1528. "md/raid10:%s: Operation continuing on %d devices.\n",
  1529. mdname(mddev), bdevname(rdev->bdev, b),
  1530. mdname(mddev), conf->geo.raid_disks - mddev->degraded);
  1531. }
  1532. static void print_conf(struct r10conf *conf)
  1533. {
  1534. int i;
  1535. struct raid10_info *tmp;
  1536. printk(KERN_DEBUG "RAID10 conf printout:\n");
  1537. if (!conf) {
  1538. printk(KERN_DEBUG "(!conf)\n");
  1539. return;
  1540. }
  1541. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
  1542. conf->geo.raid_disks);
  1543. for (i = 0; i < conf->geo.raid_disks; i++) {
  1544. char b[BDEVNAME_SIZE];
  1545. tmp = conf->mirrors + i;
  1546. if (tmp->rdev)
  1547. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1548. i, !test_bit(In_sync, &tmp->rdev->flags),
  1549. !test_bit(Faulty, &tmp->rdev->flags),
  1550. bdevname(tmp->rdev->bdev,b));
  1551. }
  1552. }
  1553. static void close_sync(struct r10conf *conf)
  1554. {
  1555. wait_barrier(conf);
  1556. allow_barrier(conf);
  1557. mempool_destroy(conf->r10buf_pool);
  1558. conf->r10buf_pool = NULL;
  1559. }
  1560. static int raid10_spare_active(struct mddev *mddev)
  1561. {
  1562. int i;
  1563. struct r10conf *conf = mddev->private;
  1564. struct raid10_info *tmp;
  1565. int count = 0;
  1566. unsigned long flags;
  1567. /*
  1568. * Find all non-in_sync disks within the RAID10 configuration
  1569. * and mark them in_sync
  1570. */
  1571. for (i = 0; i < conf->geo.raid_disks; i++) {
  1572. tmp = conf->mirrors + i;
  1573. if (tmp->replacement
  1574. && tmp->replacement->recovery_offset == MaxSector
  1575. && !test_bit(Faulty, &tmp->replacement->flags)
  1576. && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
  1577. /* Replacement has just become active */
  1578. if (!tmp->rdev
  1579. || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
  1580. count++;
  1581. if (tmp->rdev) {
  1582. /* Replaced device not technically faulty,
  1583. * but we need to be sure it gets removed
  1584. * and never re-added.
  1585. */
  1586. set_bit(Faulty, &tmp->rdev->flags);
  1587. sysfs_notify_dirent_safe(
  1588. tmp->rdev->sysfs_state);
  1589. }
  1590. sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
  1591. } else if (tmp->rdev
  1592. && tmp->rdev->recovery_offset == MaxSector
  1593. && !test_bit(Faulty, &tmp->rdev->flags)
  1594. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  1595. count++;
  1596. sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
  1597. }
  1598. }
  1599. spin_lock_irqsave(&conf->device_lock, flags);
  1600. mddev->degraded -= count;
  1601. spin_unlock_irqrestore(&conf->device_lock, flags);
  1602. print_conf(conf);
  1603. return count;
  1604. }
  1605. static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  1606. {
  1607. struct r10conf *conf = mddev->private;
  1608. int err = -EEXIST;
  1609. int mirror;
  1610. int first = 0;
  1611. int last = conf->geo.raid_disks - 1;
  1612. struct request_queue *q = bdev_get_queue(rdev->bdev);
  1613. if (mddev->recovery_cp < MaxSector)
  1614. /* only hot-add to in-sync arrays, as recovery is
  1615. * very different from resync
  1616. */
  1617. return -EBUSY;
  1618. if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
  1619. return -EINVAL;
  1620. if (rdev->raid_disk >= 0)
  1621. first = last = rdev->raid_disk;
  1622. if (q->merge_bvec_fn) {
  1623. set_bit(Unmerged, &rdev->flags);
  1624. mddev->merge_check_needed = 1;
  1625. }
  1626. if (rdev->saved_raid_disk >= first &&
  1627. conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
  1628. mirror = rdev->saved_raid_disk;
  1629. else
  1630. mirror = first;
  1631. for ( ; mirror <= last ; mirror++) {
  1632. struct raid10_info *p = &conf->mirrors[mirror];
  1633. if (p->recovery_disabled == mddev->recovery_disabled)
  1634. continue;
  1635. if (p->rdev) {
  1636. if (!test_bit(WantReplacement, &p->rdev->flags) ||
  1637. p->replacement != NULL)
  1638. continue;
  1639. clear_bit(In_sync, &rdev->flags);
  1640. set_bit(Replacement, &rdev->flags);
  1641. rdev->raid_disk = mirror;
  1642. err = 0;
  1643. if (mddev->gendisk)
  1644. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1645. rdev->data_offset << 9);
  1646. conf->fullsync = 1;
  1647. rcu_assign_pointer(p->replacement, rdev);
  1648. break;
  1649. }
  1650. if (mddev->gendisk)
  1651. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1652. rdev->data_offset << 9);
  1653. p->head_position = 0;
  1654. p->recovery_disabled = mddev->recovery_disabled - 1;
  1655. rdev->raid_disk = mirror;
  1656. err = 0;
  1657. if (rdev->saved_raid_disk != mirror)
  1658. conf->fullsync = 1;
  1659. rcu_assign_pointer(p->rdev, rdev);
  1660. break;
  1661. }
  1662. if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
  1663. /* Some requests might not have seen this new
  1664. * merge_bvec_fn. We must wait for them to complete
  1665. * before merging the device fully.
  1666. * First we make sure any code which has tested
  1667. * our function has submitted the request, then
  1668. * we wait for all outstanding requests to complete.
  1669. */
  1670. synchronize_sched();
  1671. freeze_array(conf, 0);
  1672. unfreeze_array(conf);
  1673. clear_bit(Unmerged, &rdev->flags);
  1674. }
  1675. md_integrity_add_rdev(rdev, mddev);
  1676. if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
  1677. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  1678. print_conf(conf);
  1679. return err;
  1680. }
  1681. static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  1682. {
  1683. struct r10conf *conf = mddev->private;
  1684. int err = 0;
  1685. int number = rdev->raid_disk;
  1686. struct md_rdev **rdevp;
  1687. struct raid10_info *p = conf->mirrors + number;
  1688. print_conf(conf);
  1689. if (rdev == p->rdev)
  1690. rdevp = &p->rdev;
  1691. else if (rdev == p->replacement)
  1692. rdevp = &p->replacement;
  1693. else
  1694. return 0;
  1695. if (test_bit(In_sync, &rdev->flags) ||
  1696. atomic_read(&rdev->nr_pending)) {
  1697. err = -EBUSY;
  1698. goto abort;
  1699. }
  1700. /* Only remove faulty devices if recovery
  1701. * is not possible.
  1702. */
  1703. if (!test_bit(Faulty, &rdev->flags) &&
  1704. mddev->recovery_disabled != p->recovery_disabled &&
  1705. (!p->replacement || p->replacement == rdev) &&
  1706. number < conf->geo.raid_disks &&
  1707. enough(conf, -1)) {
  1708. err = -EBUSY;
  1709. goto abort;
  1710. }
  1711. *rdevp = NULL;
  1712. synchronize_rcu();
  1713. if (atomic_read(&rdev->nr_pending)) {
  1714. /* lost the race, try later */
  1715. err = -EBUSY;
  1716. *rdevp = rdev;
  1717. goto abort;
  1718. } else if (p->replacement) {
  1719. /* We must have just cleared 'rdev' */
  1720. p->rdev = p->replacement;
  1721. clear_bit(Replacement, &p->replacement->flags);
  1722. smp_mb(); /* Make sure other CPUs may see both as identical
  1723. * but will never see neither -- if they are careful.
  1724. */
  1725. p->replacement = NULL;
  1726. clear_bit(WantReplacement, &rdev->flags);
  1727. } else
  1728. /* We might have just remove the Replacement as faulty
  1729. * Clear the flag just in case
  1730. */
  1731. clear_bit(WantReplacement, &rdev->flags);
  1732. err = md_integrity_register(mddev);
  1733. abort:
  1734. print_conf(conf);
  1735. return err;
  1736. }
  1737. static void end_sync_read(struct bio *bio, int error)
  1738. {
  1739. struct r10bio *r10_bio = bio->bi_private;
  1740. struct r10conf *conf = r10_bio->mddev->private;
  1741. int d;
  1742. if (bio == r10_bio->master_bio) {
  1743. /* this is a reshape read */
  1744. d = r10_bio->read_slot; /* really the read dev */
  1745. } else
  1746. d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
  1747. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1748. set_bit(R10BIO_Uptodate, &r10_bio->state);
  1749. else
  1750. /* The write handler will notice the lack of
  1751. * R10BIO_Uptodate and record any errors etc
  1752. */
  1753. atomic_add(r10_bio->sectors,
  1754. &conf->mirrors[d].rdev->corrected_errors);
  1755. /* for reconstruct, we always reschedule after a read.
  1756. * for resync, only after all reads
  1757. */
  1758. rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
  1759. if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
  1760. atomic_dec_and_test(&r10_bio->remaining)) {
  1761. /* we have read all the blocks,
  1762. * do the comparison in process context in raid10d
  1763. */
  1764. reschedule_retry(r10_bio);
  1765. }
  1766. }
  1767. static void end_sync_request(struct r10bio *r10_bio)
  1768. {
  1769. struct mddev *mddev = r10_bio->mddev;
  1770. while (atomic_dec_and_test(&r10_bio->remaining)) {
  1771. if (r10_bio->master_bio == NULL) {
  1772. /* the primary of several recovery bios */
  1773. sector_t s = r10_bio->sectors;
  1774. if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
  1775. test_bit(R10BIO_WriteError, &r10_bio->state))
  1776. reschedule_retry(r10_bio);
  1777. else
  1778. put_buf(r10_bio);
  1779. md_done_sync(mddev, s, 1);
  1780. break;
  1781. } else {
  1782. struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
  1783. if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
  1784. test_bit(R10BIO_WriteError, &r10_bio->state))
  1785. reschedule_retry(r10_bio);
  1786. else
  1787. put_buf(r10_bio);
  1788. r10_bio = r10_bio2;
  1789. }
  1790. }
  1791. }
  1792. static void end_sync_write(struct bio *bio, int error)
  1793. {
  1794. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1795. struct r10bio *r10_bio = bio->bi_private;
  1796. struct mddev *mddev = r10_bio->mddev;
  1797. struct r10conf *conf = mddev->private;
  1798. int d;
  1799. sector_t first_bad;
  1800. int bad_sectors;
  1801. int slot;
  1802. int repl;
  1803. struct md_rdev *rdev = NULL;
  1804. d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
  1805. if (repl)
  1806. rdev = conf->mirrors[d].replacement;
  1807. else
  1808. rdev = conf->mirrors[d].rdev;
  1809. if (!uptodate) {
  1810. if (repl)
  1811. md_error(mddev, rdev);
  1812. else {
  1813. set_bit(WriteErrorSeen, &rdev->flags);
  1814. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  1815. set_bit(MD_RECOVERY_NEEDED,
  1816. &rdev->mddev->recovery);
  1817. set_bit(R10BIO_WriteError, &r10_bio->state);
  1818. }
  1819. } else if (is_badblock(rdev,
  1820. r10_bio->devs[slot].addr,
  1821. r10_bio->sectors,
  1822. &first_bad, &bad_sectors))
  1823. set_bit(R10BIO_MadeGood, &r10_bio->state);
  1824. rdev_dec_pending(rdev, mddev);
  1825. end_sync_request(r10_bio);
  1826. }
  1827. /*
  1828. * Note: sync and recover and handled very differently for raid10
  1829. * This code is for resync.
  1830. * For resync, we read through virtual addresses and read all blocks.
  1831. * If there is any error, we schedule a write. The lowest numbered
  1832. * drive is authoritative.
  1833. * However requests come for physical address, so we need to map.
  1834. * For every physical address there are raid_disks/copies virtual addresses,
  1835. * which is always are least one, but is not necessarly an integer.
  1836. * This means that a physical address can span multiple chunks, so we may
  1837. * have to submit multiple io requests for a single sync request.
  1838. */
  1839. /*
  1840. * We check if all blocks are in-sync and only write to blocks that
  1841. * aren't in sync
  1842. */
  1843. static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
  1844. {
  1845. struct r10conf *conf = mddev->private;
  1846. int i, first;
  1847. struct bio *tbio, *fbio;
  1848. int vcnt;
  1849. atomic_set(&r10_bio->remaining, 1);
  1850. /* find the first device with a block */
  1851. for (i=0; i<conf->copies; i++)
  1852. if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
  1853. break;
  1854. if (i == conf->copies)
  1855. goto done;
  1856. first = i;
  1857. fbio = r10_bio->devs[i].bio;
  1858. vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
  1859. /* now find blocks with errors */
  1860. for (i=0 ; i < conf->copies ; i++) {
  1861. int j, d;
  1862. tbio = r10_bio->devs[i].bio;
  1863. if (tbio->bi_end_io != end_sync_read)
  1864. continue;
  1865. if (i == first)
  1866. continue;
  1867. if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
  1868. /* We know that the bi_io_vec layout is the same for
  1869. * both 'first' and 'i', so we just compare them.
  1870. * All vec entries are PAGE_SIZE;
  1871. */
  1872. int sectors = r10_bio->sectors;
  1873. for (j = 0; j < vcnt; j++) {
  1874. int len = PAGE_SIZE;
  1875. if (sectors < (len / 512))
  1876. len = sectors * 512;
  1877. if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
  1878. page_address(tbio->bi_io_vec[j].bv_page),
  1879. len))
  1880. break;
  1881. sectors -= len/512;
  1882. }
  1883. if (j == vcnt)
  1884. continue;
  1885. atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
  1886. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  1887. /* Don't fix anything. */
  1888. continue;
  1889. }
  1890. /* Ok, we need to write this bio, either to correct an
  1891. * inconsistency or to correct an unreadable block.
  1892. * First we need to fixup bv_offset, bv_len and
  1893. * bi_vecs, as the read request might have corrupted these
  1894. */
  1895. bio_reset(tbio);
  1896. tbio->bi_vcnt = vcnt;
  1897. tbio->bi_iter.bi_size = r10_bio->sectors << 9;
  1898. tbio->bi_rw = WRITE;
  1899. tbio->bi_private = r10_bio;
  1900. tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
  1901. tbio->bi_end_io = end_sync_write;
  1902. bio_copy_data(tbio, fbio);
  1903. d = r10_bio->devs[i].devnum;
  1904. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  1905. atomic_inc(&r10_bio->remaining);
  1906. md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
  1907. tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
  1908. tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
  1909. generic_make_request(tbio);
  1910. }
  1911. /* Now write out to any replacement devices
  1912. * that are active
  1913. */
  1914. for (i = 0; i < conf->copies; i++) {
  1915. int d;
  1916. tbio = r10_bio->devs[i].repl_bio;
  1917. if (!tbio || !tbio->bi_end_io)
  1918. continue;
  1919. if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
  1920. && r10_bio->devs[i].bio != fbio)
  1921. bio_copy_data(tbio, fbio);
  1922. d = r10_bio->devs[i].devnum;
  1923. atomic_inc(&r10_bio->remaining);
  1924. md_sync_acct(conf->mirrors[d].replacement->bdev,
  1925. bio_sectors(tbio));
  1926. generic_make_request(tbio);
  1927. }
  1928. done:
  1929. if (atomic_dec_and_test(&r10_bio->remaining)) {
  1930. md_done_sync(mddev, r10_bio->sectors, 1);
  1931. put_buf(r10_bio);
  1932. }
  1933. }
  1934. /*
  1935. * Now for the recovery code.
  1936. * Recovery happens across physical sectors.
  1937. * We recover all non-is_sync drives by finding the virtual address of
  1938. * each, and then choose a working drive that also has that virt address.
  1939. * There is a separate r10_bio for each non-in_sync drive.
  1940. * Only the first two slots are in use. The first for reading,
  1941. * The second for writing.
  1942. *
  1943. */
  1944. static void fix_recovery_read_error(struct r10bio *r10_bio)
  1945. {
  1946. /* We got a read error during recovery.
  1947. * We repeat the read in smaller page-sized sections.
  1948. * If a read succeeds, write it to the new device or record
  1949. * a bad block if we cannot.
  1950. * If a read fails, record a bad block on both old and
  1951. * new devices.
  1952. */
  1953. struct mddev *mddev = r10_bio->mddev;
  1954. struct r10conf *conf = mddev->private;
  1955. struct bio *bio = r10_bio->devs[0].bio;
  1956. sector_t sect = 0;
  1957. int sectors = r10_bio->sectors;
  1958. int idx = 0;
  1959. int dr = r10_bio->devs[0].devnum;
  1960. int dw = r10_bio->devs[1].devnum;
  1961. while (sectors) {
  1962. int s = sectors;
  1963. struct md_rdev *rdev;
  1964. sector_t addr;
  1965. int ok;
  1966. if (s > (PAGE_SIZE>>9))
  1967. s = PAGE_SIZE >> 9;
  1968. rdev = conf->mirrors[dr].rdev;
  1969. addr = r10_bio->devs[0].addr + sect,
  1970. ok = sync_page_io(rdev,
  1971. addr,
  1972. s << 9,
  1973. bio->bi_io_vec[idx].bv_page,
  1974. READ, false);
  1975. if (ok) {
  1976. rdev = conf->mirrors[dw].rdev;
  1977. addr = r10_bio->devs[1].addr + sect;
  1978. ok = sync_page_io(rdev,
  1979. addr,
  1980. s << 9,
  1981. bio->bi_io_vec[idx].bv_page,
  1982. WRITE, false);
  1983. if (!ok) {
  1984. set_bit(WriteErrorSeen, &rdev->flags);
  1985. if (!test_and_set_bit(WantReplacement,
  1986. &rdev->flags))
  1987. set_bit(MD_RECOVERY_NEEDED,
  1988. &rdev->mddev->recovery);
  1989. }
  1990. }
  1991. if (!ok) {
  1992. /* We don't worry if we cannot set a bad block -
  1993. * it really is bad so there is no loss in not
  1994. * recording it yet
  1995. */
  1996. rdev_set_badblocks(rdev, addr, s, 0);
  1997. if (rdev != conf->mirrors[dw].rdev) {
  1998. /* need bad block on destination too */
  1999. struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
  2000. addr = r10_bio->devs[1].addr + sect;
  2001. ok = rdev_set_badblocks(rdev2, addr, s, 0);
  2002. if (!ok) {
  2003. /* just abort the recovery */
  2004. printk(KERN_NOTICE
  2005. "md/raid10:%s: recovery aborted"
  2006. " due to read error\n",
  2007. mdname(mddev));
  2008. conf->mirrors[dw].recovery_disabled
  2009. = mddev->recovery_disabled;
  2010. set_bit(MD_RECOVERY_INTR,
  2011. &mddev->recovery);
  2012. break;
  2013. }
  2014. }
  2015. }
  2016. sectors -= s;
  2017. sect += s;
  2018. idx++;
  2019. }
  2020. }
  2021. static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
  2022. {
  2023. struct r10conf *conf = mddev->private;
  2024. int d;
  2025. struct bio *wbio, *wbio2;
  2026. if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
  2027. fix_recovery_read_error(r10_bio);
  2028. end_sync_request(r10_bio);
  2029. return;
  2030. }
  2031. /*
  2032. * share the pages with the first bio
  2033. * and submit the write request
  2034. */
  2035. d = r10_bio->devs[1].devnum;
  2036. wbio = r10_bio->devs[1].bio;
  2037. wbio2 = r10_bio->devs[1].repl_bio;
  2038. /* Need to test wbio2->bi_end_io before we call
  2039. * generic_make_request as if the former is NULL,
  2040. * the latter is free to free wbio2.
  2041. */
  2042. if (wbio2 && !wbio2->bi_end_io)
  2043. wbio2 = NULL;
  2044. if (wbio->bi_end_io) {
  2045. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  2046. md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
  2047. generic_make_request(wbio);
  2048. }
  2049. if (wbio2) {
  2050. atomic_inc(&conf->mirrors[d].replacement->nr_pending);
  2051. md_sync_acct(conf->mirrors[d].replacement->bdev,
  2052. bio_sectors(wbio2));
  2053. generic_make_request(wbio2);
  2054. }
  2055. }
  2056. /*
  2057. * Used by fix_read_error() to decay the per rdev read_errors.
  2058. * We halve the read error count for every hour that has elapsed
  2059. * since the last recorded read error.
  2060. *
  2061. */
  2062. static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
  2063. {
  2064. struct timespec cur_time_mon;
  2065. unsigned long hours_since_last;
  2066. unsigned int read_errors = atomic_read(&rdev->read_errors);
  2067. ktime_get_ts(&cur_time_mon);
  2068. if (rdev->last_read_error.tv_sec == 0 &&
  2069. rdev->last_read_error.tv_nsec == 0) {
  2070. /* first time we've seen a read error */
  2071. rdev->last_read_error = cur_time_mon;
  2072. return;
  2073. }
  2074. hours_since_last = (cur_time_mon.tv_sec -
  2075. rdev->last_read_error.tv_sec) / 3600;
  2076. rdev->last_read_error = cur_time_mon;
  2077. /*
  2078. * if hours_since_last is > the number of bits in read_errors
  2079. * just set read errors to 0. We do this to avoid
  2080. * overflowing the shift of read_errors by hours_since_last.
  2081. */
  2082. if (hours_since_last >= 8 * sizeof(read_errors))
  2083. atomic_set(&rdev->read_errors, 0);
  2084. else
  2085. atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
  2086. }
  2087. static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
  2088. int sectors, struct page *page, int rw)
  2089. {
  2090. sector_t first_bad;
  2091. int bad_sectors;
  2092. if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
  2093. && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
  2094. return -1;
  2095. if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
  2096. /* success */
  2097. return 1;
  2098. if (rw == WRITE) {
  2099. set_bit(WriteErrorSeen, &rdev->flags);
  2100. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  2101. set_bit(MD_RECOVERY_NEEDED,
  2102. &rdev->mddev->recovery);
  2103. }
  2104. /* need to record an error - either for the block or the device */
  2105. if (!rdev_set_badblocks(rdev, sector, sectors, 0))
  2106. md_error(rdev->mddev, rdev);
  2107. return 0;
  2108. }
  2109. /*
  2110. * This is a kernel thread which:
  2111. *
  2112. * 1. Retries failed read operations on working mirrors.
  2113. * 2. Updates the raid superblock when problems encounter.
  2114. * 3. Performs writes following reads for array synchronising.
  2115. */
  2116. static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
  2117. {
  2118. int sect = 0; /* Offset from r10_bio->sector */
  2119. int sectors = r10_bio->sectors;
  2120. struct md_rdev*rdev;
  2121. int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
  2122. int d = r10_bio->devs[r10_bio->read_slot].devnum;
  2123. /* still own a reference to this rdev, so it cannot
  2124. * have been cleared recently.
  2125. */
  2126. rdev = conf->mirrors[d].rdev;
  2127. if (test_bit(Faulty, &rdev->flags))
  2128. /* drive has already been failed, just ignore any
  2129. more fix_read_error() attempts */
  2130. return;
  2131. check_decay_read_errors(mddev, rdev);
  2132. atomic_inc(&rdev->read_errors);
  2133. if (atomic_read(&rdev->read_errors) > max_read_errors) {
  2134. char b[BDEVNAME_SIZE];
  2135. bdevname(rdev->bdev, b);
  2136. printk(KERN_NOTICE
  2137. "md/raid10:%s: %s: Raid device exceeded "
  2138. "read_error threshold [cur %d:max %d]\n",
  2139. mdname(mddev), b,
  2140. atomic_read(&rdev->read_errors), max_read_errors);
  2141. printk(KERN_NOTICE
  2142. "md/raid10:%s: %s: Failing raid device\n",
  2143. mdname(mddev), b);
  2144. md_error(mddev, conf->mirrors[d].rdev);
  2145. r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
  2146. return;
  2147. }
  2148. while(sectors) {
  2149. int s = sectors;
  2150. int sl = r10_bio->read_slot;
  2151. int success = 0;
  2152. int start;
  2153. if (s > (PAGE_SIZE>>9))
  2154. s = PAGE_SIZE >> 9;
  2155. rcu_read_lock();
  2156. do {
  2157. sector_t first_bad;
  2158. int bad_sectors;
  2159. d = r10_bio->devs[sl].devnum;
  2160. rdev = rcu_dereference(conf->mirrors[d].rdev);
  2161. if (rdev &&
  2162. !test_bit(Unmerged, &rdev->flags) &&
  2163. test_bit(In_sync, &rdev->flags) &&
  2164. is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
  2165. &first_bad, &bad_sectors) == 0) {
  2166. atomic_inc(&rdev->nr_pending);
  2167. rcu_read_unlock();
  2168. success = sync_page_io(rdev,
  2169. r10_bio->devs[sl].addr +
  2170. sect,
  2171. s<<9,
  2172. conf->tmppage, READ, false);
  2173. rdev_dec_pending(rdev, mddev);
  2174. rcu_read_lock();
  2175. if (success)
  2176. break;
  2177. }
  2178. sl++;
  2179. if (sl == conf->copies)
  2180. sl = 0;
  2181. } while (!success && sl != r10_bio->read_slot);
  2182. rcu_read_unlock();
  2183. if (!success) {
  2184. /* Cannot read from anywhere, just mark the block
  2185. * as bad on the first device to discourage future
  2186. * reads.
  2187. */
  2188. int dn = r10_bio->devs[r10_bio->read_slot].devnum;
  2189. rdev = conf->mirrors[dn].rdev;
  2190. if (!rdev_set_badblocks(
  2191. rdev,
  2192. r10_bio->devs[r10_bio->read_slot].addr
  2193. + sect,
  2194. s, 0)) {
  2195. md_error(mddev, rdev);
  2196. r10_bio->devs[r10_bio->read_slot].bio
  2197. = IO_BLOCKED;
  2198. }
  2199. break;
  2200. }
  2201. start = sl;
  2202. /* write it back and re-read */
  2203. rcu_read_lock();
  2204. while (sl != r10_bio->read_slot) {
  2205. char b[BDEVNAME_SIZE];
  2206. if (sl==0)
  2207. sl = conf->copies;
  2208. sl--;
  2209. d = r10_bio->devs[sl].devnum;
  2210. rdev = rcu_dereference(conf->mirrors[d].rdev);
  2211. if (!rdev ||
  2212. test_bit(Unmerged, &rdev->flags) ||
  2213. !test_bit(In_sync, &rdev->flags))
  2214. continue;
  2215. atomic_inc(&rdev->nr_pending);
  2216. rcu_read_unlock();
  2217. if (r10_sync_page_io(rdev,
  2218. r10_bio->devs[sl].addr +
  2219. sect,
  2220. s, conf->tmppage, WRITE)
  2221. == 0) {
  2222. /* Well, this device is dead */
  2223. printk(KERN_NOTICE
  2224. "md/raid10:%s: read correction "
  2225. "write failed"
  2226. " (%d sectors at %llu on %s)\n",
  2227. mdname(mddev), s,
  2228. (unsigned long long)(
  2229. sect +
  2230. choose_data_offset(r10_bio,
  2231. rdev)),
  2232. bdevname(rdev->bdev, b));
  2233. printk(KERN_NOTICE "md/raid10:%s: %s: failing "
  2234. "drive\n",
  2235. mdname(mddev),
  2236. bdevname(rdev->bdev, b));
  2237. }
  2238. rdev_dec_pending(rdev, mddev);
  2239. rcu_read_lock();
  2240. }
  2241. sl = start;
  2242. while (sl != r10_bio->read_slot) {
  2243. char b[BDEVNAME_SIZE];
  2244. if (sl==0)
  2245. sl = conf->copies;
  2246. sl--;
  2247. d = r10_bio->devs[sl].devnum;
  2248. rdev = rcu_dereference(conf->mirrors[d].rdev);
  2249. if (!rdev ||
  2250. !test_bit(In_sync, &rdev->flags))
  2251. continue;
  2252. atomic_inc(&rdev->nr_pending);
  2253. rcu_read_unlock();
  2254. switch (r10_sync_page_io(rdev,
  2255. r10_bio->devs[sl].addr +
  2256. sect,
  2257. s, conf->tmppage,
  2258. READ)) {
  2259. case 0:
  2260. /* Well, this device is dead */
  2261. printk(KERN_NOTICE
  2262. "md/raid10:%s: unable to read back "
  2263. "corrected sectors"
  2264. " (%d sectors at %llu on %s)\n",
  2265. mdname(mddev), s,
  2266. (unsigned long long)(
  2267. sect +
  2268. choose_data_offset(r10_bio, rdev)),
  2269. bdevname(rdev->bdev, b));
  2270. printk(KERN_NOTICE "md/raid10:%s: %s: failing "
  2271. "drive\n",
  2272. mdname(mddev),
  2273. bdevname(rdev->bdev, b));
  2274. break;
  2275. case 1:
  2276. printk(KERN_INFO
  2277. "md/raid10:%s: read error corrected"
  2278. " (%d sectors at %llu on %s)\n",
  2279. mdname(mddev), s,
  2280. (unsigned long long)(
  2281. sect +
  2282. choose_data_offset(r10_bio, rdev)),
  2283. bdevname(rdev->bdev, b));
  2284. atomic_add(s, &rdev->corrected_errors);
  2285. }
  2286. rdev_dec_pending(rdev, mddev);
  2287. rcu_read_lock();
  2288. }
  2289. rcu_read_unlock();
  2290. sectors -= s;
  2291. sect += s;
  2292. }
  2293. }
  2294. static int narrow_write_error(struct r10bio *r10_bio, int i)
  2295. {
  2296. struct bio *bio = r10_bio->master_bio;
  2297. struct mddev *mddev = r10_bio->mddev;
  2298. struct r10conf *conf = mddev->private;
  2299. struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
  2300. /* bio has the data to be written to slot 'i' where
  2301. * we just recently had a write error.
  2302. * We repeatedly clone the bio and trim down to one block,
  2303. * then try the write. Where the write fails we record
  2304. * a bad block.
  2305. * It is conceivable that the bio doesn't exactly align with
  2306. * blocks. We must handle this.
  2307. *
  2308. * We currently own a reference to the rdev.
  2309. */
  2310. int block_sectors;
  2311. sector_t sector;
  2312. int sectors;
  2313. int sect_to_write = r10_bio->sectors;
  2314. int ok = 1;
  2315. if (rdev->badblocks.shift < 0)
  2316. return 0;
  2317. block_sectors = roundup(1 << rdev->badblocks.shift,
  2318. bdev_logical_block_size(rdev->bdev) >> 9);
  2319. sector = r10_bio->sector;
  2320. sectors = ((r10_bio->sector + block_sectors)
  2321. & ~(sector_t)(block_sectors - 1))
  2322. - sector;
  2323. while (sect_to_write) {
  2324. struct bio *wbio;
  2325. if (sectors > sect_to_write)
  2326. sectors = sect_to_write;
  2327. /* Write at 'sector' for 'sectors' */
  2328. wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  2329. bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
  2330. wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
  2331. choose_data_offset(r10_bio, rdev) +
  2332. (sector - r10_bio->sector));
  2333. wbio->bi_bdev = rdev->bdev;
  2334. if (submit_bio_wait(WRITE, wbio) == 0)
  2335. /* Failure! */
  2336. ok = rdev_set_badblocks(rdev, sector,
  2337. sectors, 0)
  2338. && ok;
  2339. bio_put(wbio);
  2340. sect_to_write -= sectors;
  2341. sector += sectors;
  2342. sectors = block_sectors;
  2343. }
  2344. return ok;
  2345. }
  2346. static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
  2347. {
  2348. int slot = r10_bio->read_slot;
  2349. struct bio *bio;
  2350. struct r10conf *conf = mddev->private;
  2351. struct md_rdev *rdev = r10_bio->devs[slot].rdev;
  2352. char b[BDEVNAME_SIZE];
  2353. unsigned long do_sync;
  2354. int max_sectors;
  2355. /* we got a read error. Maybe the drive is bad. Maybe just
  2356. * the block and we can fix it.
  2357. * We freeze all other IO, and try reading the block from
  2358. * other devices. When we find one, we re-write
  2359. * and check it that fixes the read error.
  2360. * This is all done synchronously while the array is
  2361. * frozen.
  2362. */
  2363. bio = r10_bio->devs[slot].bio;
  2364. bdevname(bio->bi_bdev, b);
  2365. bio_put(bio);
  2366. r10_bio->devs[slot].bio = NULL;
  2367. if (mddev->ro == 0) {
  2368. freeze_array(conf, 1);
  2369. fix_read_error(conf, mddev, r10_bio);
  2370. unfreeze_array(conf);
  2371. } else
  2372. r10_bio->devs[slot].bio = IO_BLOCKED;
  2373. rdev_dec_pending(rdev, mddev);
  2374. read_more:
  2375. rdev = read_balance(conf, r10_bio, &max_sectors);
  2376. if (rdev == NULL) {
  2377. printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
  2378. " read error for block %llu\n",
  2379. mdname(mddev), b,
  2380. (unsigned long long)r10_bio->sector);
  2381. raid_end_bio_io(r10_bio);
  2382. return;
  2383. }
  2384. do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
  2385. slot = r10_bio->read_slot;
  2386. printk_ratelimited(
  2387. KERN_ERR
  2388. "md/raid10:%s: %s: redirecting "
  2389. "sector %llu to another mirror\n",
  2390. mdname(mddev),
  2391. bdevname(rdev->bdev, b),
  2392. (unsigned long long)r10_bio->sector);
  2393. bio = bio_clone_mddev(r10_bio->master_bio,
  2394. GFP_NOIO, mddev);
  2395. bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
  2396. r10_bio->devs[slot].bio = bio;
  2397. r10_bio->devs[slot].rdev = rdev;
  2398. bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
  2399. + choose_data_offset(r10_bio, rdev);
  2400. bio->bi_bdev = rdev->bdev;
  2401. bio->bi_rw = READ | do_sync;
  2402. bio->bi_private = r10_bio;
  2403. bio->bi_end_io = raid10_end_read_request;
  2404. if (max_sectors < r10_bio->sectors) {
  2405. /* Drat - have to split this up more */
  2406. struct bio *mbio = r10_bio->master_bio;
  2407. int sectors_handled =
  2408. r10_bio->sector + max_sectors
  2409. - mbio->bi_iter.bi_sector;
  2410. r10_bio->sectors = max_sectors;
  2411. spin_lock_irq(&conf->device_lock);
  2412. if (mbio->bi_phys_segments == 0)
  2413. mbio->bi_phys_segments = 2;
  2414. else
  2415. mbio->bi_phys_segments++;
  2416. spin_unlock_irq(&conf->device_lock);
  2417. generic_make_request(bio);
  2418. r10_bio = mempool_alloc(conf->r10bio_pool,
  2419. GFP_NOIO);
  2420. r10_bio->master_bio = mbio;
  2421. r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
  2422. r10_bio->state = 0;
  2423. set_bit(R10BIO_ReadError,
  2424. &r10_bio->state);
  2425. r10_bio->mddev = mddev;
  2426. r10_bio->sector = mbio->bi_iter.bi_sector
  2427. + sectors_handled;
  2428. goto read_more;
  2429. } else
  2430. generic_make_request(bio);
  2431. }
  2432. static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
  2433. {
  2434. /* Some sort of write request has finished and it
  2435. * succeeded in writing where we thought there was a
  2436. * bad block. So forget the bad block.
  2437. * Or possibly if failed and we need to record
  2438. * a bad block.
  2439. */
  2440. int m;
  2441. struct md_rdev *rdev;
  2442. if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
  2443. test_bit(R10BIO_IsRecover, &r10_bio->state)) {
  2444. for (m = 0; m < conf->copies; m++) {
  2445. int dev = r10_bio->devs[m].devnum;
  2446. rdev = conf->mirrors[dev].rdev;
  2447. if (r10_bio->devs[m].bio == NULL)
  2448. continue;
  2449. if (test_bit(BIO_UPTODATE,
  2450. &r10_bio->devs[m].bio->bi_flags)) {
  2451. rdev_clear_badblocks(
  2452. rdev,
  2453. r10_bio->devs[m].addr,
  2454. r10_bio->sectors, 0);
  2455. } else {
  2456. if (!rdev_set_badblocks(
  2457. rdev,
  2458. r10_bio->devs[m].addr,
  2459. r10_bio->sectors, 0))
  2460. md_error(conf->mddev, rdev);
  2461. }
  2462. rdev = conf->mirrors[dev].replacement;
  2463. if (r10_bio->devs[m].repl_bio == NULL)
  2464. continue;
  2465. if (test_bit(BIO_UPTODATE,
  2466. &r10_bio->devs[m].repl_bio->bi_flags)) {
  2467. rdev_clear_badblocks(
  2468. rdev,
  2469. r10_bio->devs[m].addr,
  2470. r10_bio->sectors, 0);
  2471. } else {
  2472. if (!rdev_set_badblocks(
  2473. rdev,
  2474. r10_bio->devs[m].addr,
  2475. r10_bio->sectors, 0))
  2476. md_error(conf->mddev, rdev);
  2477. }
  2478. }
  2479. put_buf(r10_bio);
  2480. } else {
  2481. for (m = 0; m < conf->copies; m++) {
  2482. int dev = r10_bio->devs[m].devnum;
  2483. struct bio *bio = r10_bio->devs[m].bio;
  2484. rdev = conf->mirrors[dev].rdev;
  2485. if (bio == IO_MADE_GOOD) {
  2486. rdev_clear_badblocks(
  2487. rdev,
  2488. r10_bio->devs[m].addr,
  2489. r10_bio->sectors, 0);
  2490. rdev_dec_pending(rdev, conf->mddev);
  2491. } else if (bio != NULL &&
  2492. !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  2493. if (!narrow_write_error(r10_bio, m)) {
  2494. md_error(conf->mddev, rdev);
  2495. set_bit(R10BIO_Degraded,
  2496. &r10_bio->state);
  2497. }
  2498. rdev_dec_pending(rdev, conf->mddev);
  2499. }
  2500. bio = r10_bio->devs[m].repl_bio;
  2501. rdev = conf->mirrors[dev].replacement;
  2502. if (rdev && bio == IO_MADE_GOOD) {
  2503. rdev_clear_badblocks(
  2504. rdev,
  2505. r10_bio->devs[m].addr,
  2506. r10_bio->sectors, 0);
  2507. rdev_dec_pending(rdev, conf->mddev);
  2508. }
  2509. }
  2510. if (test_bit(R10BIO_WriteError,
  2511. &r10_bio->state))
  2512. close_write(r10_bio);
  2513. raid_end_bio_io(r10_bio);
  2514. }
  2515. }
  2516. static void raid10d(struct md_thread *thread)
  2517. {
  2518. struct mddev *mddev = thread->mddev;
  2519. struct r10bio *r10_bio;
  2520. unsigned long flags;
  2521. struct r10conf *conf = mddev->private;
  2522. struct list_head *head = &conf->retry_list;
  2523. struct blk_plug plug;
  2524. md_check_recovery(mddev);
  2525. blk_start_plug(&plug);
  2526. for (;;) {
  2527. flush_pending_writes(conf);
  2528. spin_lock_irqsave(&conf->device_lock, flags);
  2529. if (list_empty(head)) {
  2530. spin_unlock_irqrestore(&conf->device_lock, flags);
  2531. break;
  2532. }
  2533. r10_bio = list_entry(head->prev, struct r10bio, retry_list);
  2534. list_del(head->prev);
  2535. conf->nr_queued--;
  2536. spin_unlock_irqrestore(&conf->device_lock, flags);
  2537. mddev = r10_bio->mddev;
  2538. conf = mddev->private;
  2539. if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
  2540. test_bit(R10BIO_WriteError, &r10_bio->state))
  2541. handle_write_completed(conf, r10_bio);
  2542. else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
  2543. reshape_request_write(mddev, r10_bio);
  2544. else if (test_bit(R10BIO_IsSync, &r10_bio->state))
  2545. sync_request_write(mddev, r10_bio);
  2546. else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
  2547. recovery_request_write(mddev, r10_bio);
  2548. else if (test_bit(R10BIO_ReadError, &r10_bio->state))
  2549. handle_read_error(mddev, r10_bio);
  2550. else {
  2551. /* just a partial read to be scheduled from a
  2552. * separate context
  2553. */
  2554. int slot = r10_bio->read_slot;
  2555. generic_make_request(r10_bio->devs[slot].bio);
  2556. }
  2557. cond_resched();
  2558. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  2559. md_check_recovery(mddev);
  2560. }
  2561. blk_finish_plug(&plug);
  2562. }
  2563. static int init_resync(struct r10conf *conf)
  2564. {
  2565. int buffs;
  2566. int i;
  2567. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  2568. BUG_ON(conf->r10buf_pool);
  2569. conf->have_replacement = 0;
  2570. for (i = 0; i < conf->geo.raid_disks; i++)
  2571. if (conf->mirrors[i].replacement)
  2572. conf->have_replacement = 1;
  2573. conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
  2574. if (!conf->r10buf_pool)
  2575. return -ENOMEM;
  2576. conf->next_resync = 0;
  2577. return 0;
  2578. }
  2579. /*
  2580. * perform a "sync" on one "block"
  2581. *
  2582. * We need to make sure that no normal I/O request - particularly write
  2583. * requests - conflict with active sync requests.
  2584. *
  2585. * This is achieved by tracking pending requests and a 'barrier' concept
  2586. * that can be installed to exclude normal IO requests.
  2587. *
  2588. * Resync and recovery are handled very differently.
  2589. * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
  2590. *
  2591. * For resync, we iterate over virtual addresses, read all copies,
  2592. * and update if there are differences. If only one copy is live,
  2593. * skip it.
  2594. * For recovery, we iterate over physical addresses, read a good
  2595. * value for each non-in_sync drive, and over-write.
  2596. *
  2597. * So, for recovery we may have several outstanding complex requests for a
  2598. * given address, one for each out-of-sync device. We model this by allocating
  2599. * a number of r10_bio structures, one for each out-of-sync device.
  2600. * As we setup these structures, we collect all bio's together into a list
  2601. * which we then process collectively to add pages, and then process again
  2602. * to pass to generic_make_request.
  2603. *
  2604. * The r10_bio structures are linked using a borrowed master_bio pointer.
  2605. * This link is counted in ->remaining. When the r10_bio that points to NULL
  2606. * has its remaining count decremented to 0, the whole complex operation
  2607. * is complete.
  2608. *
  2609. */
  2610. static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
  2611. int *skipped)
  2612. {
  2613. struct r10conf *conf = mddev->private;
  2614. struct r10bio *r10_bio;
  2615. struct bio *biolist = NULL, *bio;
  2616. sector_t max_sector, nr_sectors;
  2617. int i;
  2618. int max_sync;
  2619. sector_t sync_blocks;
  2620. sector_t sectors_skipped = 0;
  2621. int chunks_skipped = 0;
  2622. sector_t chunk_mask = conf->geo.chunk_mask;
  2623. if (!conf->r10buf_pool)
  2624. if (init_resync(conf))
  2625. return 0;
  2626. /*
  2627. * Allow skipping a full rebuild for incremental assembly
  2628. * of a clean array, like RAID1 does.
  2629. */
  2630. if (mddev->bitmap == NULL &&
  2631. mddev->recovery_cp == MaxSector &&
  2632. mddev->reshape_position == MaxSector &&
  2633. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  2634. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  2635. !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  2636. conf->fullsync == 0) {
  2637. *skipped = 1;
  2638. return mddev->dev_sectors - sector_nr;
  2639. }
  2640. skipped:
  2641. max_sector = mddev->dev_sectors;
  2642. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  2643. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  2644. max_sector = mddev->resync_max_sectors;
  2645. if (sector_nr >= max_sector) {
  2646. /* If we aborted, we need to abort the
  2647. * sync on the 'current' bitmap chucks (there can
  2648. * be several when recovering multiple devices).
  2649. * as we may have started syncing it but not finished.
  2650. * We can find the current address in
  2651. * mddev->curr_resync, but for recovery,
  2652. * we need to convert that to several
  2653. * virtual addresses.
  2654. */
  2655. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
  2656. end_reshape(conf);
  2657. close_sync(conf);
  2658. return 0;
  2659. }
  2660. if (mddev->curr_resync < max_sector) { /* aborted */
  2661. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  2662. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  2663. &sync_blocks, 1);
  2664. else for (i = 0; i < conf->geo.raid_disks; i++) {
  2665. sector_t sect =
  2666. raid10_find_virt(conf, mddev->curr_resync, i);
  2667. bitmap_end_sync(mddev->bitmap, sect,
  2668. &sync_blocks, 1);
  2669. }
  2670. } else {
  2671. /* completed sync */
  2672. if ((!mddev->bitmap || conf->fullsync)
  2673. && conf->have_replacement
  2674. && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  2675. /* Completed a full sync so the replacements
  2676. * are now fully recovered.
  2677. */
  2678. for (i = 0; i < conf->geo.raid_disks; i++)
  2679. if (conf->mirrors[i].replacement)
  2680. conf->mirrors[i].replacement
  2681. ->recovery_offset
  2682. = MaxSector;
  2683. }
  2684. conf->fullsync = 0;
  2685. }
  2686. bitmap_close_sync(mddev->bitmap);
  2687. close_sync(conf);
  2688. *skipped = 1;
  2689. return sectors_skipped;
  2690. }
  2691. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  2692. return reshape_request(mddev, sector_nr, skipped);
  2693. if (chunks_skipped >= conf->geo.raid_disks) {
  2694. /* if there has been nothing to do on any drive,
  2695. * then there is nothing to do at all..
  2696. */
  2697. *skipped = 1;
  2698. return (max_sector - sector_nr) + sectors_skipped;
  2699. }
  2700. if (max_sector > mddev->resync_max)
  2701. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  2702. /* make sure whole request will fit in a chunk - if chunks
  2703. * are meaningful
  2704. */
  2705. if (conf->geo.near_copies < conf->geo.raid_disks &&
  2706. max_sector > (sector_nr | chunk_mask))
  2707. max_sector = (sector_nr | chunk_mask) + 1;
  2708. /* Again, very different code for resync and recovery.
  2709. * Both must result in an r10bio with a list of bios that
  2710. * have bi_end_io, bi_sector, bi_bdev set,
  2711. * and bi_private set to the r10bio.
  2712. * For recovery, we may actually create several r10bios
  2713. * with 2 bios in each, that correspond to the bios in the main one.
  2714. * In this case, the subordinate r10bios link back through a
  2715. * borrowed master_bio pointer, and the counter in the master
  2716. * includes a ref from each subordinate.
  2717. */
  2718. /* First, we decide what to do and set ->bi_end_io
  2719. * To end_sync_read if we want to read, and
  2720. * end_sync_write if we will want to write.
  2721. */
  2722. max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
  2723. if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  2724. /* recovery... the complicated one */
  2725. int j;
  2726. r10_bio = NULL;
  2727. for (i = 0 ; i < conf->geo.raid_disks; i++) {
  2728. int still_degraded;
  2729. struct r10bio *rb2;
  2730. sector_t sect;
  2731. int must_sync;
  2732. int any_working;
  2733. struct raid10_info *mirror = &conf->mirrors[i];
  2734. if ((mirror->rdev == NULL ||
  2735. test_bit(In_sync, &mirror->rdev->flags))
  2736. &&
  2737. (mirror->replacement == NULL ||
  2738. test_bit(Faulty,
  2739. &mirror->replacement->flags)))
  2740. continue;
  2741. still_degraded = 0;
  2742. /* want to reconstruct this device */
  2743. rb2 = r10_bio;
  2744. sect = raid10_find_virt(conf, sector_nr, i);
  2745. if (sect >= mddev->resync_max_sectors) {
  2746. /* last stripe is not complete - don't
  2747. * try to recover this sector.
  2748. */
  2749. continue;
  2750. }
  2751. /* Unless we are doing a full sync, or a replacement
  2752. * we only need to recover the block if it is set in
  2753. * the bitmap
  2754. */
  2755. must_sync = bitmap_start_sync(mddev->bitmap, sect,
  2756. &sync_blocks, 1);
  2757. if (sync_blocks < max_sync)
  2758. max_sync = sync_blocks;
  2759. if (!must_sync &&
  2760. mirror->replacement == NULL &&
  2761. !conf->fullsync) {
  2762. /* yep, skip the sync_blocks here, but don't assume
  2763. * that there will never be anything to do here
  2764. */
  2765. chunks_skipped = -1;
  2766. continue;
  2767. }
  2768. r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
  2769. r10_bio->state = 0;
  2770. raise_barrier(conf, rb2 != NULL);
  2771. atomic_set(&r10_bio->remaining, 0);
  2772. r10_bio->master_bio = (struct bio*)rb2;
  2773. if (rb2)
  2774. atomic_inc(&rb2->remaining);
  2775. r10_bio->mddev = mddev;
  2776. set_bit(R10BIO_IsRecover, &r10_bio->state);
  2777. r10_bio->sector = sect;
  2778. raid10_find_phys(conf, r10_bio);
  2779. /* Need to check if the array will still be
  2780. * degraded
  2781. */
  2782. for (j = 0; j < conf->geo.raid_disks; j++)
  2783. if (conf->mirrors[j].rdev == NULL ||
  2784. test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
  2785. still_degraded = 1;
  2786. break;
  2787. }
  2788. must_sync = bitmap_start_sync(mddev->bitmap, sect,
  2789. &sync_blocks, still_degraded);
  2790. any_working = 0;
  2791. for (j=0; j<conf->copies;j++) {
  2792. int k;
  2793. int d = r10_bio->devs[j].devnum;
  2794. sector_t from_addr, to_addr;
  2795. struct md_rdev *rdev;
  2796. sector_t sector, first_bad;
  2797. int bad_sectors;
  2798. if (!conf->mirrors[d].rdev ||
  2799. !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
  2800. continue;
  2801. /* This is where we read from */
  2802. any_working = 1;
  2803. rdev = conf->mirrors[d].rdev;
  2804. sector = r10_bio->devs[j].addr;
  2805. if (is_badblock(rdev, sector, max_sync,
  2806. &first_bad, &bad_sectors)) {
  2807. if (first_bad > sector)
  2808. max_sync = first_bad - sector;
  2809. else {
  2810. bad_sectors -= (sector
  2811. - first_bad);
  2812. if (max_sync > bad_sectors)
  2813. max_sync = bad_sectors;
  2814. continue;
  2815. }
  2816. }
  2817. bio = r10_bio->devs[0].bio;
  2818. bio_reset(bio);
  2819. bio->bi_next = biolist;
  2820. biolist = bio;
  2821. bio->bi_private = r10_bio;
  2822. bio->bi_end_io = end_sync_read;
  2823. bio->bi_rw = READ;
  2824. from_addr = r10_bio->devs[j].addr;
  2825. bio->bi_iter.bi_sector = from_addr +
  2826. rdev->data_offset;
  2827. bio->bi_bdev = rdev->bdev;
  2828. atomic_inc(&rdev->nr_pending);
  2829. /* and we write to 'i' (if not in_sync) */
  2830. for (k=0; k<conf->copies; k++)
  2831. if (r10_bio->devs[k].devnum == i)
  2832. break;
  2833. BUG_ON(k == conf->copies);
  2834. to_addr = r10_bio->devs[k].addr;
  2835. r10_bio->devs[0].devnum = d;
  2836. r10_bio->devs[0].addr = from_addr;
  2837. r10_bio->devs[1].devnum = i;
  2838. r10_bio->devs[1].addr = to_addr;
  2839. rdev = mirror->rdev;
  2840. if (!test_bit(In_sync, &rdev->flags)) {
  2841. bio = r10_bio->devs[1].bio;
  2842. bio_reset(bio);
  2843. bio->bi_next = biolist;
  2844. biolist = bio;
  2845. bio->bi_private = r10_bio;
  2846. bio->bi_end_io = end_sync_write;
  2847. bio->bi_rw = WRITE;
  2848. bio->bi_iter.bi_sector = to_addr
  2849. + rdev->data_offset;
  2850. bio->bi_bdev = rdev->bdev;
  2851. atomic_inc(&r10_bio->remaining);
  2852. } else
  2853. r10_bio->devs[1].bio->bi_end_io = NULL;
  2854. /* and maybe write to replacement */
  2855. bio = r10_bio->devs[1].repl_bio;
  2856. if (bio)
  2857. bio->bi_end_io = NULL;
  2858. rdev = mirror->replacement;
  2859. /* Note: if rdev != NULL, then bio
  2860. * cannot be NULL as r10buf_pool_alloc will
  2861. * have allocated it.
  2862. * So the second test here is pointless.
  2863. * But it keeps semantic-checkers happy, and
  2864. * this comment keeps human reviewers
  2865. * happy.
  2866. */
  2867. if (rdev == NULL || bio == NULL ||
  2868. test_bit(Faulty, &rdev->flags))
  2869. break;
  2870. bio_reset(bio);
  2871. bio->bi_next = biolist;
  2872. biolist = bio;
  2873. bio->bi_private = r10_bio;
  2874. bio->bi_end_io = end_sync_write;
  2875. bio->bi_rw = WRITE;
  2876. bio->bi_iter.bi_sector = to_addr +
  2877. rdev->data_offset;
  2878. bio->bi_bdev = rdev->bdev;
  2879. atomic_inc(&r10_bio->remaining);
  2880. break;
  2881. }
  2882. if (j == conf->copies) {
  2883. /* Cannot recover, so abort the recovery or
  2884. * record a bad block */
  2885. if (any_working) {
  2886. /* problem is that there are bad blocks
  2887. * on other device(s)
  2888. */
  2889. int k;
  2890. for (k = 0; k < conf->copies; k++)
  2891. if (r10_bio->devs[k].devnum == i)
  2892. break;
  2893. if (!test_bit(In_sync,
  2894. &mirror->rdev->flags)
  2895. && !rdev_set_badblocks(
  2896. mirror->rdev,
  2897. r10_bio->devs[k].addr,
  2898. max_sync, 0))
  2899. any_working = 0;
  2900. if (mirror->replacement &&
  2901. !rdev_set_badblocks(
  2902. mirror->replacement,
  2903. r10_bio->devs[k].addr,
  2904. max_sync, 0))
  2905. any_working = 0;
  2906. }
  2907. if (!any_working) {
  2908. if (!test_and_set_bit(MD_RECOVERY_INTR,
  2909. &mddev->recovery))
  2910. printk(KERN_INFO "md/raid10:%s: insufficient "
  2911. "working devices for recovery.\n",
  2912. mdname(mddev));
  2913. mirror->recovery_disabled
  2914. = mddev->recovery_disabled;
  2915. }
  2916. put_buf(r10_bio);
  2917. if (rb2)
  2918. atomic_dec(&rb2->remaining);
  2919. r10_bio = rb2;
  2920. break;
  2921. }
  2922. }
  2923. if (biolist == NULL) {
  2924. while (r10_bio) {
  2925. struct r10bio *rb2 = r10_bio;
  2926. r10_bio = (struct r10bio*) rb2->master_bio;
  2927. rb2->master_bio = NULL;
  2928. put_buf(rb2);
  2929. }
  2930. goto giveup;
  2931. }
  2932. } else {
  2933. /* resync. Schedule a read for every block at this virt offset */
  2934. int count = 0;
  2935. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  2936. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  2937. &sync_blocks, mddev->degraded) &&
  2938. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
  2939. &mddev->recovery)) {
  2940. /* We can skip this block */
  2941. *skipped = 1;
  2942. return sync_blocks + sectors_skipped;
  2943. }
  2944. if (sync_blocks < max_sync)
  2945. max_sync = sync_blocks;
  2946. r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
  2947. r10_bio->state = 0;
  2948. r10_bio->mddev = mddev;
  2949. atomic_set(&r10_bio->remaining, 0);
  2950. raise_barrier(conf, 0);
  2951. conf->next_resync = sector_nr;
  2952. r10_bio->master_bio = NULL;
  2953. r10_bio->sector = sector_nr;
  2954. set_bit(R10BIO_IsSync, &r10_bio->state);
  2955. raid10_find_phys(conf, r10_bio);
  2956. r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
  2957. for (i = 0; i < conf->copies; i++) {
  2958. int d = r10_bio->devs[i].devnum;
  2959. sector_t first_bad, sector;
  2960. int bad_sectors;
  2961. if (r10_bio->devs[i].repl_bio)
  2962. r10_bio->devs[i].repl_bio->bi_end_io = NULL;
  2963. bio = r10_bio->devs[i].bio;
  2964. bio_reset(bio);
  2965. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  2966. if (conf->mirrors[d].rdev == NULL ||
  2967. test_bit(Faulty, &conf->mirrors[d].rdev->flags))
  2968. continue;
  2969. sector = r10_bio->devs[i].addr;
  2970. if (is_badblock(conf->mirrors[d].rdev,
  2971. sector, max_sync,
  2972. &first_bad, &bad_sectors)) {
  2973. if (first_bad > sector)
  2974. max_sync = first_bad - sector;
  2975. else {
  2976. bad_sectors -= (sector - first_bad);
  2977. if (max_sync > bad_sectors)
  2978. max_sync = bad_sectors;
  2979. continue;
  2980. }
  2981. }
  2982. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  2983. atomic_inc(&r10_bio->remaining);
  2984. bio->bi_next = biolist;
  2985. biolist = bio;
  2986. bio->bi_private = r10_bio;
  2987. bio->bi_end_io = end_sync_read;
  2988. bio->bi_rw = READ;
  2989. bio->bi_iter.bi_sector = sector +
  2990. conf->mirrors[d].rdev->data_offset;
  2991. bio->bi_bdev = conf->mirrors[d].rdev->bdev;
  2992. count++;
  2993. if (conf->mirrors[d].replacement == NULL ||
  2994. test_bit(Faulty,
  2995. &conf->mirrors[d].replacement->flags))
  2996. continue;
  2997. /* Need to set up for writing to the replacement */
  2998. bio = r10_bio->devs[i].repl_bio;
  2999. bio_reset(bio);
  3000. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  3001. sector = r10_bio->devs[i].addr;
  3002. atomic_inc(&conf->mirrors[d].rdev->nr_pending);
  3003. bio->bi_next = biolist;
  3004. biolist = bio;
  3005. bio->bi_private = r10_bio;
  3006. bio->bi_end_io = end_sync_write;
  3007. bio->bi_rw = WRITE;
  3008. bio->bi_iter.bi_sector = sector +
  3009. conf->mirrors[d].replacement->data_offset;
  3010. bio->bi_bdev = conf->mirrors[d].replacement->bdev;
  3011. count++;
  3012. }
  3013. if (count < 2) {
  3014. for (i=0; i<conf->copies; i++) {
  3015. int d = r10_bio->devs[i].devnum;
  3016. if (r10_bio->devs[i].bio->bi_end_io)
  3017. rdev_dec_pending(conf->mirrors[d].rdev,
  3018. mddev);
  3019. if (r10_bio->devs[i].repl_bio &&
  3020. r10_bio->devs[i].repl_bio->bi_end_io)
  3021. rdev_dec_pending(
  3022. conf->mirrors[d].replacement,
  3023. mddev);
  3024. }
  3025. put_buf(r10_bio);
  3026. biolist = NULL;
  3027. goto giveup;
  3028. }
  3029. }
  3030. nr_sectors = 0;
  3031. if (sector_nr + max_sync < max_sector)
  3032. max_sector = sector_nr + max_sync;
  3033. do {
  3034. struct page *page;
  3035. int len = PAGE_SIZE;
  3036. if (sector_nr + (len>>9) > max_sector)
  3037. len = (max_sector - sector_nr) << 9;
  3038. if (len == 0)
  3039. break;
  3040. for (bio= biolist ; bio ; bio=bio->bi_next) {
  3041. struct bio *bio2;
  3042. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  3043. if (bio_add_page(bio, page, len, 0))
  3044. continue;
  3045. /* stop here */
  3046. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  3047. for (bio2 = biolist;
  3048. bio2 && bio2 != bio;
  3049. bio2 = bio2->bi_next) {
  3050. /* remove last page from this bio */
  3051. bio2->bi_vcnt--;
  3052. bio2->bi_iter.bi_size -= len;
  3053. __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
  3054. }
  3055. goto bio_full;
  3056. }
  3057. nr_sectors += len>>9;
  3058. sector_nr += len>>9;
  3059. } while (biolist->bi_vcnt < RESYNC_PAGES);
  3060. bio_full:
  3061. r10_bio->sectors = nr_sectors;
  3062. while (biolist) {
  3063. bio = biolist;
  3064. biolist = biolist->bi_next;
  3065. bio->bi_next = NULL;
  3066. r10_bio = bio->bi_private;
  3067. r10_bio->sectors = nr_sectors;
  3068. if (bio->bi_end_io == end_sync_read) {
  3069. md_sync_acct(bio->bi_bdev, nr_sectors);
  3070. set_bit(BIO_UPTODATE, &bio->bi_flags);
  3071. generic_make_request(bio);
  3072. }
  3073. }
  3074. if (sectors_skipped)
  3075. /* pretend they weren't skipped, it makes
  3076. * no important difference in this case
  3077. */
  3078. md_done_sync(mddev, sectors_skipped, 1);
  3079. return sectors_skipped + nr_sectors;
  3080. giveup:
  3081. /* There is nowhere to write, so all non-sync
  3082. * drives must be failed or in resync, all drives
  3083. * have a bad block, so try the next chunk...
  3084. */
  3085. if (sector_nr + max_sync < max_sector)
  3086. max_sector = sector_nr + max_sync;
  3087. sectors_skipped += (max_sector - sector_nr);
  3088. chunks_skipped ++;
  3089. sector_nr = max_sector;
  3090. goto skipped;
  3091. }
  3092. static sector_t
  3093. raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  3094. {
  3095. sector_t size;
  3096. struct r10conf *conf = mddev->private;
  3097. if (!raid_disks)
  3098. raid_disks = min(conf->geo.raid_disks,
  3099. conf->prev.raid_disks);
  3100. if (!sectors)
  3101. sectors = conf->dev_sectors;
  3102. size = sectors >> conf->geo.chunk_shift;
  3103. sector_div(size, conf->geo.far_copies);
  3104. size = size * raid_disks;
  3105. sector_div(size, conf->geo.near_copies);
  3106. return size << conf->geo.chunk_shift;
  3107. }
  3108. static void calc_sectors(struct r10conf *conf, sector_t size)
  3109. {
  3110. /* Calculate the number of sectors-per-device that will
  3111. * actually be used, and set conf->dev_sectors and
  3112. * conf->stride
  3113. */
  3114. size = size >> conf->geo.chunk_shift;
  3115. sector_div(size, conf->geo.far_copies);
  3116. size = size * conf->geo.raid_disks;
  3117. sector_div(size, conf->geo.near_copies);
  3118. /* 'size' is now the number of chunks in the array */
  3119. /* calculate "used chunks per device" */
  3120. size = size * conf->copies;
  3121. /* We need to round up when dividing by raid_disks to
  3122. * get the stride size.
  3123. */
  3124. size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
  3125. conf->dev_sectors = size << conf->geo.chunk_shift;
  3126. if (conf->geo.far_offset)
  3127. conf->geo.stride = 1 << conf->geo.chunk_shift;
  3128. else {
  3129. sector_div(size, conf->geo.far_copies);
  3130. conf->geo.stride = size << conf->geo.chunk_shift;
  3131. }
  3132. }
  3133. enum geo_type {geo_new, geo_old, geo_start};
  3134. static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
  3135. {
  3136. int nc, fc, fo;
  3137. int layout, chunk, disks;
  3138. switch (new) {
  3139. case geo_old:
  3140. layout = mddev->layout;
  3141. chunk = mddev->chunk_sectors;
  3142. disks = mddev->raid_disks - mddev->delta_disks;
  3143. break;
  3144. case geo_new:
  3145. layout = mddev->new_layout;
  3146. chunk = mddev->new_chunk_sectors;
  3147. disks = mddev->raid_disks;
  3148. break;
  3149. default: /* avoid 'may be unused' warnings */
  3150. case geo_start: /* new when starting reshape - raid_disks not
  3151. * updated yet. */
  3152. layout = mddev->new_layout;
  3153. chunk = mddev->new_chunk_sectors;
  3154. disks = mddev->raid_disks + mddev->delta_disks;
  3155. break;
  3156. }
  3157. if (layout >> 18)
  3158. return -1;
  3159. if (chunk < (PAGE_SIZE >> 9) ||
  3160. !is_power_of_2(chunk))
  3161. return -2;
  3162. nc = layout & 255;
  3163. fc = (layout >> 8) & 255;
  3164. fo = layout & (1<<16);
  3165. geo->raid_disks = disks;
  3166. geo->near_copies = nc;
  3167. geo->far_copies = fc;
  3168. geo->far_offset = fo;
  3169. geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
  3170. geo->chunk_mask = chunk - 1;
  3171. geo->chunk_shift = ffz(~chunk);
  3172. return nc*fc;
  3173. }
  3174. static struct r10conf *setup_conf(struct mddev *mddev)
  3175. {
  3176. struct r10conf *conf = NULL;
  3177. int err = -EINVAL;
  3178. struct geom geo;
  3179. int copies;
  3180. copies = setup_geo(&geo, mddev, geo_new);
  3181. if (copies == -2) {
  3182. printk(KERN_ERR "md/raid10:%s: chunk size must be "
  3183. "at least PAGE_SIZE(%ld) and be a power of 2.\n",
  3184. mdname(mddev), PAGE_SIZE);
  3185. goto out;
  3186. }
  3187. if (copies < 2 || copies > mddev->raid_disks) {
  3188. printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
  3189. mdname(mddev), mddev->new_layout);
  3190. goto out;
  3191. }
  3192. err = -ENOMEM;
  3193. conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
  3194. if (!conf)
  3195. goto out;
  3196. /* FIXME calc properly */
  3197. conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
  3198. max(0,-mddev->delta_disks)),
  3199. GFP_KERNEL);
  3200. if (!conf->mirrors)
  3201. goto out;
  3202. conf->tmppage = alloc_page(GFP_KERNEL);
  3203. if (!conf->tmppage)
  3204. goto out;
  3205. conf->geo = geo;
  3206. conf->copies = copies;
  3207. conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
  3208. r10bio_pool_free, conf);
  3209. if (!conf->r10bio_pool)
  3210. goto out;
  3211. calc_sectors(conf, mddev->dev_sectors);
  3212. if (mddev->reshape_position == MaxSector) {
  3213. conf->prev = conf->geo;
  3214. conf->reshape_progress = MaxSector;
  3215. } else {
  3216. if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
  3217. err = -EINVAL;
  3218. goto out;
  3219. }
  3220. conf->reshape_progress = mddev->reshape_position;
  3221. if (conf->prev.far_offset)
  3222. conf->prev.stride = 1 << conf->prev.chunk_shift;
  3223. else
  3224. /* far_copies must be 1 */
  3225. conf->prev.stride = conf->dev_sectors;
  3226. }
  3227. spin_lock_init(&conf->device_lock);
  3228. INIT_LIST_HEAD(&conf->retry_list);
  3229. spin_lock_init(&conf->resync_lock);
  3230. init_waitqueue_head(&conf->wait_barrier);
  3231. conf->thread = md_register_thread(raid10d, mddev, "raid10");
  3232. if (!conf->thread)
  3233. goto out;
  3234. conf->mddev = mddev;
  3235. return conf;
  3236. out:
  3237. if (err == -ENOMEM)
  3238. printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
  3239. mdname(mddev));
  3240. if (conf) {
  3241. if (conf->r10bio_pool)
  3242. mempool_destroy(conf->r10bio_pool);
  3243. kfree(conf->mirrors);
  3244. safe_put_page(conf->tmppage);
  3245. kfree(conf);
  3246. }
  3247. return ERR_PTR(err);
  3248. }
  3249. static int run(struct mddev *mddev)
  3250. {
  3251. struct r10conf *conf;
  3252. int i, disk_idx, chunk_size;
  3253. struct raid10_info *disk;
  3254. struct md_rdev *rdev;
  3255. sector_t size;
  3256. sector_t min_offset_diff = 0;
  3257. int first = 1;
  3258. bool discard_supported = false;
  3259. if (mddev->private == NULL) {
  3260. conf = setup_conf(mddev);
  3261. if (IS_ERR(conf))
  3262. return PTR_ERR(conf);
  3263. mddev->private = conf;
  3264. }
  3265. conf = mddev->private;
  3266. if (!conf)
  3267. goto out;
  3268. mddev->thread = conf->thread;
  3269. conf->thread = NULL;
  3270. chunk_size = mddev->chunk_sectors << 9;
  3271. if (mddev->queue) {
  3272. blk_queue_max_discard_sectors(mddev->queue,
  3273. mddev->chunk_sectors);
  3274. blk_queue_max_write_same_sectors(mddev->queue, 0);
  3275. blk_queue_io_min(mddev->queue, chunk_size);
  3276. if (conf->geo.raid_disks % conf->geo.near_copies)
  3277. blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
  3278. else
  3279. blk_queue_io_opt(mddev->queue, chunk_size *
  3280. (conf->geo.raid_disks / conf->geo.near_copies));
  3281. }
  3282. rdev_for_each(rdev, mddev) {
  3283. long long diff;
  3284. struct request_queue *q;
  3285. disk_idx = rdev->raid_disk;
  3286. if (disk_idx < 0)
  3287. continue;
  3288. if (disk_idx >= conf->geo.raid_disks &&
  3289. disk_idx >= conf->prev.raid_disks)
  3290. continue;
  3291. disk = conf->mirrors + disk_idx;
  3292. if (test_bit(Replacement, &rdev->flags)) {
  3293. if (disk->replacement)
  3294. goto out_free_conf;
  3295. disk->replacement = rdev;
  3296. } else {
  3297. if (disk->rdev)
  3298. goto out_free_conf;
  3299. disk->rdev = rdev;
  3300. }
  3301. q = bdev_get_queue(rdev->bdev);
  3302. if (q->merge_bvec_fn)
  3303. mddev->merge_check_needed = 1;
  3304. diff = (rdev->new_data_offset - rdev->data_offset);
  3305. if (!mddev->reshape_backwards)
  3306. diff = -diff;
  3307. if (diff < 0)
  3308. diff = 0;
  3309. if (first || diff < min_offset_diff)
  3310. min_offset_diff = diff;
  3311. if (mddev->gendisk)
  3312. disk_stack_limits(mddev->gendisk, rdev->bdev,
  3313. rdev->data_offset << 9);
  3314. disk->head_position = 0;
  3315. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  3316. discard_supported = true;
  3317. }
  3318. if (mddev->queue) {
  3319. if (discard_supported)
  3320. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  3321. mddev->queue);
  3322. else
  3323. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  3324. mddev->queue);
  3325. }
  3326. /* need to check that every block has at least one working mirror */
  3327. if (!enough(conf, -1)) {
  3328. printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
  3329. mdname(mddev));
  3330. goto out_free_conf;
  3331. }
  3332. if (conf->reshape_progress != MaxSector) {
  3333. /* must ensure that shape change is supported */
  3334. if (conf->geo.far_copies != 1 &&
  3335. conf->geo.far_offset == 0)
  3336. goto out_free_conf;
  3337. if (conf->prev.far_copies != 1 &&
  3338. conf->prev.far_offset == 0)
  3339. goto out_free_conf;
  3340. }
  3341. mddev->degraded = 0;
  3342. for (i = 0;
  3343. i < conf->geo.raid_disks
  3344. || i < conf->prev.raid_disks;
  3345. i++) {
  3346. disk = conf->mirrors + i;
  3347. if (!disk->rdev && disk->replacement) {
  3348. /* The replacement is all we have - use it */
  3349. disk->rdev = disk->replacement;
  3350. disk->replacement = NULL;
  3351. clear_bit(Replacement, &disk->rdev->flags);
  3352. }
  3353. if (!disk->rdev ||
  3354. !test_bit(In_sync, &disk->rdev->flags)) {
  3355. disk->head_position = 0;
  3356. mddev->degraded++;
  3357. if (disk->rdev &&
  3358. disk->rdev->saved_raid_disk < 0)
  3359. conf->fullsync = 1;
  3360. }
  3361. disk->recovery_disabled = mddev->recovery_disabled - 1;
  3362. }
  3363. if (mddev->recovery_cp != MaxSector)
  3364. printk(KERN_NOTICE "md/raid10:%s: not clean"
  3365. " -- starting background reconstruction\n",
  3366. mdname(mddev));
  3367. printk(KERN_INFO
  3368. "md/raid10:%s: active with %d out of %d devices\n",
  3369. mdname(mddev), conf->geo.raid_disks - mddev->degraded,
  3370. conf->geo.raid_disks);
  3371. /*
  3372. * Ok, everything is just fine now
  3373. */
  3374. mddev->dev_sectors = conf->dev_sectors;
  3375. size = raid10_size(mddev, 0, 0);
  3376. md_set_array_sectors(mddev, size);
  3377. mddev->resync_max_sectors = size;
  3378. if (mddev->queue) {
  3379. int stripe = conf->geo.raid_disks *
  3380. ((mddev->chunk_sectors << 9) / PAGE_SIZE);
  3381. /* Calculate max read-ahead size.
  3382. * We need to readahead at least twice a whole stripe....
  3383. * maybe...
  3384. */
  3385. stripe /= conf->geo.near_copies;
  3386. if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  3387. mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  3388. }
  3389. if (md_integrity_register(mddev))
  3390. goto out_free_conf;
  3391. if (conf->reshape_progress != MaxSector) {
  3392. unsigned long before_length, after_length;
  3393. before_length = ((1 << conf->prev.chunk_shift) *
  3394. conf->prev.far_copies);
  3395. after_length = ((1 << conf->geo.chunk_shift) *
  3396. conf->geo.far_copies);
  3397. if (max(before_length, after_length) > min_offset_diff) {
  3398. /* This cannot work */
  3399. printk("md/raid10: offset difference not enough to continue reshape\n");
  3400. goto out_free_conf;
  3401. }
  3402. conf->offset_diff = min_offset_diff;
  3403. conf->reshape_safe = conf->reshape_progress;
  3404. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3405. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3406. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  3407. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  3408. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  3409. "reshape");
  3410. }
  3411. return 0;
  3412. out_free_conf:
  3413. md_unregister_thread(&mddev->thread);
  3414. if (conf->r10bio_pool)
  3415. mempool_destroy(conf->r10bio_pool);
  3416. safe_put_page(conf->tmppage);
  3417. kfree(conf->mirrors);
  3418. kfree(conf);
  3419. mddev->private = NULL;
  3420. out:
  3421. return -EIO;
  3422. }
  3423. static void raid10_free(struct mddev *mddev, void *priv)
  3424. {
  3425. struct r10conf *conf = priv;
  3426. if (conf->r10bio_pool)
  3427. mempool_destroy(conf->r10bio_pool);
  3428. safe_put_page(conf->tmppage);
  3429. kfree(conf->mirrors);
  3430. kfree(conf->mirrors_old);
  3431. kfree(conf->mirrors_new);
  3432. kfree(conf);
  3433. }
  3434. static void raid10_quiesce(struct mddev *mddev, int state)
  3435. {
  3436. struct r10conf *conf = mddev->private;
  3437. switch(state) {
  3438. case 1:
  3439. raise_barrier(conf, 0);
  3440. break;
  3441. case 0:
  3442. lower_barrier(conf);
  3443. break;
  3444. }
  3445. }
  3446. static int raid10_resize(struct mddev *mddev, sector_t sectors)
  3447. {
  3448. /* Resize of 'far' arrays is not supported.
  3449. * For 'near' and 'offset' arrays we can set the
  3450. * number of sectors used to be an appropriate multiple
  3451. * of the chunk size.
  3452. * For 'offset', this is far_copies*chunksize.
  3453. * For 'near' the multiplier is the LCM of
  3454. * near_copies and raid_disks.
  3455. * So if far_copies > 1 && !far_offset, fail.
  3456. * Else find LCM(raid_disks, near_copy)*far_copies and
  3457. * multiply by chunk_size. Then round to this number.
  3458. * This is mostly done by raid10_size()
  3459. */
  3460. struct r10conf *conf = mddev->private;
  3461. sector_t oldsize, size;
  3462. if (mddev->reshape_position != MaxSector)
  3463. return -EBUSY;
  3464. if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
  3465. return -EINVAL;
  3466. oldsize = raid10_size(mddev, 0, 0);
  3467. size = raid10_size(mddev, sectors, 0);
  3468. if (mddev->external_size &&
  3469. mddev->array_sectors > size)
  3470. return -EINVAL;
  3471. if (mddev->bitmap) {
  3472. int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
  3473. if (ret)
  3474. return ret;
  3475. }
  3476. md_set_array_sectors(mddev, size);
  3477. set_capacity(mddev->gendisk, mddev->array_sectors);
  3478. revalidate_disk(mddev->gendisk);
  3479. if (sectors > mddev->dev_sectors &&
  3480. mddev->recovery_cp > oldsize) {
  3481. mddev->recovery_cp = oldsize;
  3482. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3483. }
  3484. calc_sectors(conf, sectors);
  3485. mddev->dev_sectors = conf->dev_sectors;
  3486. mddev->resync_max_sectors = size;
  3487. return 0;
  3488. }
  3489. static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
  3490. {
  3491. struct md_rdev *rdev;
  3492. struct r10conf *conf;
  3493. if (mddev->degraded > 0) {
  3494. printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
  3495. mdname(mddev));
  3496. return ERR_PTR(-EINVAL);
  3497. }
  3498. sector_div(size, devs);
  3499. /* Set new parameters */
  3500. mddev->new_level = 10;
  3501. /* new layout: far_copies = 1, near_copies = 2 */
  3502. mddev->new_layout = (1<<8) + 2;
  3503. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3504. mddev->delta_disks = mddev->raid_disks;
  3505. mddev->raid_disks *= 2;
  3506. /* make sure it will be not marked as dirty */
  3507. mddev->recovery_cp = MaxSector;
  3508. mddev->dev_sectors = size;
  3509. conf = setup_conf(mddev);
  3510. if (!IS_ERR(conf)) {
  3511. rdev_for_each(rdev, mddev)
  3512. if (rdev->raid_disk >= 0) {
  3513. rdev->new_raid_disk = rdev->raid_disk * 2;
  3514. rdev->sectors = size;
  3515. }
  3516. conf->barrier = 1;
  3517. }
  3518. return conf;
  3519. }
  3520. static void *raid10_takeover(struct mddev *mddev)
  3521. {
  3522. struct r0conf *raid0_conf;
  3523. /* raid10 can take over:
  3524. * raid0 - providing it has only two drives
  3525. */
  3526. if (mddev->level == 0) {
  3527. /* for raid0 takeover only one zone is supported */
  3528. raid0_conf = mddev->private;
  3529. if (raid0_conf->nr_strip_zones > 1) {
  3530. printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
  3531. " with more than one zone.\n",
  3532. mdname(mddev));
  3533. return ERR_PTR(-EINVAL);
  3534. }
  3535. return raid10_takeover_raid0(mddev,
  3536. raid0_conf->strip_zone->zone_end,
  3537. raid0_conf->strip_zone->nb_dev);
  3538. }
  3539. return ERR_PTR(-EINVAL);
  3540. }
  3541. static int raid10_check_reshape(struct mddev *mddev)
  3542. {
  3543. /* Called when there is a request to change
  3544. * - layout (to ->new_layout)
  3545. * - chunk size (to ->new_chunk_sectors)
  3546. * - raid_disks (by delta_disks)
  3547. * or when trying to restart a reshape that was ongoing.
  3548. *
  3549. * We need to validate the request and possibly allocate
  3550. * space if that might be an issue later.
  3551. *
  3552. * Currently we reject any reshape of a 'far' mode array,
  3553. * allow chunk size to change if new is generally acceptable,
  3554. * allow raid_disks to increase, and allow
  3555. * a switch between 'near' mode and 'offset' mode.
  3556. */
  3557. struct r10conf *conf = mddev->private;
  3558. struct geom geo;
  3559. if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
  3560. return -EINVAL;
  3561. if (setup_geo(&geo, mddev, geo_start) != conf->copies)
  3562. /* mustn't change number of copies */
  3563. return -EINVAL;
  3564. if (geo.far_copies > 1 && !geo.far_offset)
  3565. /* Cannot switch to 'far' mode */
  3566. return -EINVAL;
  3567. if (mddev->array_sectors & geo.chunk_mask)
  3568. /* not factor of array size */
  3569. return -EINVAL;
  3570. if (!enough(conf, -1))
  3571. return -EINVAL;
  3572. kfree(conf->mirrors_new);
  3573. conf->mirrors_new = NULL;
  3574. if (mddev->delta_disks > 0) {
  3575. /* allocate new 'mirrors' list */
  3576. conf->mirrors_new = kzalloc(
  3577. sizeof(struct raid10_info)
  3578. *(mddev->raid_disks +
  3579. mddev->delta_disks),
  3580. GFP_KERNEL);
  3581. if (!conf->mirrors_new)
  3582. return -ENOMEM;
  3583. }
  3584. return 0;
  3585. }
  3586. /*
  3587. * Need to check if array has failed when deciding whether to:
  3588. * - start an array
  3589. * - remove non-faulty devices
  3590. * - add a spare
  3591. * - allow a reshape
  3592. * This determination is simple when no reshape is happening.
  3593. * However if there is a reshape, we need to carefully check
  3594. * both the before and after sections.
  3595. * This is because some failed devices may only affect one
  3596. * of the two sections, and some non-in_sync devices may
  3597. * be insync in the section most affected by failed devices.
  3598. */
  3599. static int calc_degraded(struct r10conf *conf)
  3600. {
  3601. int degraded, degraded2;
  3602. int i;
  3603. rcu_read_lock();
  3604. degraded = 0;
  3605. /* 'prev' section first */
  3606. for (i = 0; i < conf->prev.raid_disks; i++) {
  3607. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  3608. if (!rdev || test_bit(Faulty, &rdev->flags))
  3609. degraded++;
  3610. else if (!test_bit(In_sync, &rdev->flags))
  3611. /* When we can reduce the number of devices in
  3612. * an array, this might not contribute to
  3613. * 'degraded'. It does now.
  3614. */
  3615. degraded++;
  3616. }
  3617. rcu_read_unlock();
  3618. if (conf->geo.raid_disks == conf->prev.raid_disks)
  3619. return degraded;
  3620. rcu_read_lock();
  3621. degraded2 = 0;
  3622. for (i = 0; i < conf->geo.raid_disks; i++) {
  3623. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  3624. if (!rdev || test_bit(Faulty, &rdev->flags))
  3625. degraded2++;
  3626. else if (!test_bit(In_sync, &rdev->flags)) {
  3627. /* If reshape is increasing the number of devices,
  3628. * this section has already been recovered, so
  3629. * it doesn't contribute to degraded.
  3630. * else it does.
  3631. */
  3632. if (conf->geo.raid_disks <= conf->prev.raid_disks)
  3633. degraded2++;
  3634. }
  3635. }
  3636. rcu_read_unlock();
  3637. if (degraded2 > degraded)
  3638. return degraded2;
  3639. return degraded;
  3640. }
  3641. static int raid10_start_reshape(struct mddev *mddev)
  3642. {
  3643. /* A 'reshape' has been requested. This commits
  3644. * the various 'new' fields and sets MD_RECOVER_RESHAPE
  3645. * This also checks if there are enough spares and adds them
  3646. * to the array.
  3647. * We currently require enough spares to make the final
  3648. * array non-degraded. We also require that the difference
  3649. * between old and new data_offset - on each device - is
  3650. * enough that we never risk over-writing.
  3651. */
  3652. unsigned long before_length, after_length;
  3653. sector_t min_offset_diff = 0;
  3654. int first = 1;
  3655. struct geom new;
  3656. struct r10conf *conf = mddev->private;
  3657. struct md_rdev *rdev;
  3658. int spares = 0;
  3659. int ret;
  3660. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3661. return -EBUSY;
  3662. if (setup_geo(&new, mddev, geo_start) != conf->copies)
  3663. return -EINVAL;
  3664. before_length = ((1 << conf->prev.chunk_shift) *
  3665. conf->prev.far_copies);
  3666. after_length = ((1 << conf->geo.chunk_shift) *
  3667. conf->geo.far_copies);
  3668. rdev_for_each(rdev, mddev) {
  3669. if (!test_bit(In_sync, &rdev->flags)
  3670. && !test_bit(Faulty, &rdev->flags))
  3671. spares++;
  3672. if (rdev->raid_disk >= 0) {
  3673. long long diff = (rdev->new_data_offset
  3674. - rdev->data_offset);
  3675. if (!mddev->reshape_backwards)
  3676. diff = -diff;
  3677. if (diff < 0)
  3678. diff = 0;
  3679. if (first || diff < min_offset_diff)
  3680. min_offset_diff = diff;
  3681. }
  3682. }
  3683. if (max(before_length, after_length) > min_offset_diff)
  3684. return -EINVAL;
  3685. if (spares < mddev->delta_disks)
  3686. return -EINVAL;
  3687. conf->offset_diff = min_offset_diff;
  3688. spin_lock_irq(&conf->device_lock);
  3689. if (conf->mirrors_new) {
  3690. memcpy(conf->mirrors_new, conf->mirrors,
  3691. sizeof(struct raid10_info)*conf->prev.raid_disks);
  3692. smp_mb();
  3693. kfree(conf->mirrors_old);
  3694. conf->mirrors_old = conf->mirrors;
  3695. conf->mirrors = conf->mirrors_new;
  3696. conf->mirrors_new = NULL;
  3697. }
  3698. setup_geo(&conf->geo, mddev, geo_start);
  3699. smp_mb();
  3700. if (mddev->reshape_backwards) {
  3701. sector_t size = raid10_size(mddev, 0, 0);
  3702. if (size < mddev->array_sectors) {
  3703. spin_unlock_irq(&conf->device_lock);
  3704. printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
  3705. mdname(mddev));
  3706. return -EINVAL;
  3707. }
  3708. mddev->resync_max_sectors = size;
  3709. conf->reshape_progress = size;
  3710. } else
  3711. conf->reshape_progress = 0;
  3712. spin_unlock_irq(&conf->device_lock);
  3713. if (mddev->delta_disks && mddev->bitmap) {
  3714. ret = bitmap_resize(mddev->bitmap,
  3715. raid10_size(mddev, 0,
  3716. conf->geo.raid_disks),
  3717. 0, 0);
  3718. if (ret)
  3719. goto abort;
  3720. }
  3721. if (mddev->delta_disks > 0) {
  3722. rdev_for_each(rdev, mddev)
  3723. if (rdev->raid_disk < 0 &&
  3724. !test_bit(Faulty, &rdev->flags)) {
  3725. if (raid10_add_disk(mddev, rdev) == 0) {
  3726. if (rdev->raid_disk >=
  3727. conf->prev.raid_disks)
  3728. set_bit(In_sync, &rdev->flags);
  3729. else
  3730. rdev->recovery_offset = 0;
  3731. if (sysfs_link_rdev(mddev, rdev))
  3732. /* Failure here is OK */;
  3733. }
  3734. } else if (rdev->raid_disk >= conf->prev.raid_disks
  3735. && !test_bit(Faulty, &rdev->flags)) {
  3736. /* This is a spare that was manually added */
  3737. set_bit(In_sync, &rdev->flags);
  3738. }
  3739. }
  3740. /* When a reshape changes the number of devices,
  3741. * ->degraded is measured against the larger of the
  3742. * pre and post numbers.
  3743. */
  3744. spin_lock_irq(&conf->device_lock);
  3745. mddev->degraded = calc_degraded(conf);
  3746. spin_unlock_irq(&conf->device_lock);
  3747. mddev->raid_disks = conf->geo.raid_disks;
  3748. mddev->reshape_position = conf->reshape_progress;
  3749. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3750. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3751. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3752. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  3753. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  3754. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  3755. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  3756. "reshape");
  3757. if (!mddev->sync_thread) {
  3758. ret = -EAGAIN;
  3759. goto abort;
  3760. }
  3761. conf->reshape_checkpoint = jiffies;
  3762. md_wakeup_thread(mddev->sync_thread);
  3763. md_new_event(mddev);
  3764. return 0;
  3765. abort:
  3766. mddev->recovery = 0;
  3767. spin_lock_irq(&conf->device_lock);
  3768. conf->geo = conf->prev;
  3769. mddev->raid_disks = conf->geo.raid_disks;
  3770. rdev_for_each(rdev, mddev)
  3771. rdev->new_data_offset = rdev->data_offset;
  3772. smp_wmb();
  3773. conf->reshape_progress = MaxSector;
  3774. mddev->reshape_position = MaxSector;
  3775. spin_unlock_irq(&conf->device_lock);
  3776. return ret;
  3777. }
  3778. /* Calculate the last device-address that could contain
  3779. * any block from the chunk that includes the array-address 's'
  3780. * and report the next address.
  3781. * i.e. the address returned will be chunk-aligned and after
  3782. * any data that is in the chunk containing 's'.
  3783. */
  3784. static sector_t last_dev_address(sector_t s, struct geom *geo)
  3785. {
  3786. s = (s | geo->chunk_mask) + 1;
  3787. s >>= geo->chunk_shift;
  3788. s *= geo->near_copies;
  3789. s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
  3790. s *= geo->far_copies;
  3791. s <<= geo->chunk_shift;
  3792. return s;
  3793. }
  3794. /* Calculate the first device-address that could contain
  3795. * any block from the chunk that includes the array-address 's'.
  3796. * This too will be the start of a chunk
  3797. */
  3798. static sector_t first_dev_address(sector_t s, struct geom *geo)
  3799. {
  3800. s >>= geo->chunk_shift;
  3801. s *= geo->near_copies;
  3802. sector_div(s, geo->raid_disks);
  3803. s *= geo->far_copies;
  3804. s <<= geo->chunk_shift;
  3805. return s;
  3806. }
  3807. static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
  3808. int *skipped)
  3809. {
  3810. /* We simply copy at most one chunk (smallest of old and new)
  3811. * at a time, possibly less if that exceeds RESYNC_PAGES,
  3812. * or we hit a bad block or something.
  3813. * This might mean we pause for normal IO in the middle of
  3814. * a chunk, but that is not a problem was mddev->reshape_position
  3815. * can record any location.
  3816. *
  3817. * If we will want to write to a location that isn't
  3818. * yet recorded as 'safe' (i.e. in metadata on disk) then
  3819. * we need to flush all reshape requests and update the metadata.
  3820. *
  3821. * When reshaping forwards (e.g. to more devices), we interpret
  3822. * 'safe' as the earliest block which might not have been copied
  3823. * down yet. We divide this by previous stripe size and multiply
  3824. * by previous stripe length to get lowest device offset that we
  3825. * cannot write to yet.
  3826. * We interpret 'sector_nr' as an address that we want to write to.
  3827. * From this we use last_device_address() to find where we might
  3828. * write to, and first_device_address on the 'safe' position.
  3829. * If this 'next' write position is after the 'safe' position,
  3830. * we must update the metadata to increase the 'safe' position.
  3831. *
  3832. * When reshaping backwards, we round in the opposite direction
  3833. * and perform the reverse test: next write position must not be
  3834. * less than current safe position.
  3835. *
  3836. * In all this the minimum difference in data offsets
  3837. * (conf->offset_diff - always positive) allows a bit of slack,
  3838. * so next can be after 'safe', but not by more than offset_disk
  3839. *
  3840. * We need to prepare all the bios here before we start any IO
  3841. * to ensure the size we choose is acceptable to all devices.
  3842. * The means one for each copy for write-out and an extra one for
  3843. * read-in.
  3844. * We store the read-in bio in ->master_bio and the others in
  3845. * ->devs[x].bio and ->devs[x].repl_bio.
  3846. */
  3847. struct r10conf *conf = mddev->private;
  3848. struct r10bio *r10_bio;
  3849. sector_t next, safe, last;
  3850. int max_sectors;
  3851. int nr_sectors;
  3852. int s;
  3853. struct md_rdev *rdev;
  3854. int need_flush = 0;
  3855. struct bio *blist;
  3856. struct bio *bio, *read_bio;
  3857. int sectors_done = 0;
  3858. if (sector_nr == 0) {
  3859. /* If restarting in the middle, skip the initial sectors */
  3860. if (mddev->reshape_backwards &&
  3861. conf->reshape_progress < raid10_size(mddev, 0, 0)) {
  3862. sector_nr = (raid10_size(mddev, 0, 0)
  3863. - conf->reshape_progress);
  3864. } else if (!mddev->reshape_backwards &&
  3865. conf->reshape_progress > 0)
  3866. sector_nr = conf->reshape_progress;
  3867. if (sector_nr) {
  3868. mddev->curr_resync_completed = sector_nr;
  3869. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  3870. *skipped = 1;
  3871. return sector_nr;
  3872. }
  3873. }
  3874. /* We don't use sector_nr to track where we are up to
  3875. * as that doesn't work well for ->reshape_backwards.
  3876. * So just use ->reshape_progress.
  3877. */
  3878. if (mddev->reshape_backwards) {
  3879. /* 'next' is the earliest device address that we might
  3880. * write to for this chunk in the new layout
  3881. */
  3882. next = first_dev_address(conf->reshape_progress - 1,
  3883. &conf->geo);
  3884. /* 'safe' is the last device address that we might read from
  3885. * in the old layout after a restart
  3886. */
  3887. safe = last_dev_address(conf->reshape_safe - 1,
  3888. &conf->prev);
  3889. if (next + conf->offset_diff < safe)
  3890. need_flush = 1;
  3891. last = conf->reshape_progress - 1;
  3892. sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
  3893. & conf->prev.chunk_mask);
  3894. if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
  3895. sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
  3896. } else {
  3897. /* 'next' is after the last device address that we
  3898. * might write to for this chunk in the new layout
  3899. */
  3900. next = last_dev_address(conf->reshape_progress, &conf->geo);
  3901. /* 'safe' is the earliest device address that we might
  3902. * read from in the old layout after a restart
  3903. */
  3904. safe = first_dev_address(conf->reshape_safe, &conf->prev);
  3905. /* Need to update metadata if 'next' might be beyond 'safe'
  3906. * as that would possibly corrupt data
  3907. */
  3908. if (next > safe + conf->offset_diff)
  3909. need_flush = 1;
  3910. sector_nr = conf->reshape_progress;
  3911. last = sector_nr | (conf->geo.chunk_mask
  3912. & conf->prev.chunk_mask);
  3913. if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
  3914. last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
  3915. }
  3916. if (need_flush ||
  3917. time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
  3918. /* Need to update reshape_position in metadata */
  3919. wait_barrier(conf);
  3920. mddev->reshape_position = conf->reshape_progress;
  3921. if (mddev->reshape_backwards)
  3922. mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
  3923. - conf->reshape_progress;
  3924. else
  3925. mddev->curr_resync_completed = conf->reshape_progress;
  3926. conf->reshape_checkpoint = jiffies;
  3927. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3928. md_wakeup_thread(mddev->thread);
  3929. wait_event(mddev->sb_wait, mddev->flags == 0 ||
  3930. test_bit(MD_RECOVERY_INTR, &mddev->recovery));
  3931. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  3932. allow_barrier(conf);
  3933. return sectors_done;
  3934. }
  3935. conf->reshape_safe = mddev->reshape_position;
  3936. allow_barrier(conf);
  3937. }
  3938. read_more:
  3939. /* Now schedule reads for blocks from sector_nr to last */
  3940. r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
  3941. r10_bio->state = 0;
  3942. raise_barrier(conf, sectors_done != 0);
  3943. atomic_set(&r10_bio->remaining, 0);
  3944. r10_bio->mddev = mddev;
  3945. r10_bio->sector = sector_nr;
  3946. set_bit(R10BIO_IsReshape, &r10_bio->state);
  3947. r10_bio->sectors = last - sector_nr + 1;
  3948. rdev = read_balance(conf, r10_bio, &max_sectors);
  3949. BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
  3950. if (!rdev) {
  3951. /* Cannot read from here, so need to record bad blocks
  3952. * on all the target devices.
  3953. */
  3954. // FIXME
  3955. mempool_free(r10_bio, conf->r10buf_pool);
  3956. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  3957. return sectors_done;
  3958. }
  3959. read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
  3960. read_bio->bi_bdev = rdev->bdev;
  3961. read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
  3962. + rdev->data_offset);
  3963. read_bio->bi_private = r10_bio;
  3964. read_bio->bi_end_io = end_sync_read;
  3965. read_bio->bi_rw = READ;
  3966. read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
  3967. __set_bit(BIO_UPTODATE, &read_bio->bi_flags);
  3968. read_bio->bi_vcnt = 0;
  3969. read_bio->bi_iter.bi_size = 0;
  3970. r10_bio->master_bio = read_bio;
  3971. r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
  3972. /* Now find the locations in the new layout */
  3973. __raid10_find_phys(&conf->geo, r10_bio);
  3974. blist = read_bio;
  3975. read_bio->bi_next = NULL;
  3976. for (s = 0; s < conf->copies*2; s++) {
  3977. struct bio *b;
  3978. int d = r10_bio->devs[s/2].devnum;
  3979. struct md_rdev *rdev2;
  3980. if (s&1) {
  3981. rdev2 = conf->mirrors[d].replacement;
  3982. b = r10_bio->devs[s/2].repl_bio;
  3983. } else {
  3984. rdev2 = conf->mirrors[d].rdev;
  3985. b = r10_bio->devs[s/2].bio;
  3986. }
  3987. if (!rdev2 || test_bit(Faulty, &rdev2->flags))
  3988. continue;
  3989. bio_reset(b);
  3990. b->bi_bdev = rdev2->bdev;
  3991. b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
  3992. rdev2->new_data_offset;
  3993. b->bi_private = r10_bio;
  3994. b->bi_end_io = end_reshape_write;
  3995. b->bi_rw = WRITE;
  3996. b->bi_next = blist;
  3997. blist = b;
  3998. }
  3999. /* Now add as many pages as possible to all of these bios. */
  4000. nr_sectors = 0;
  4001. for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
  4002. struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
  4003. int len = (max_sectors - s) << 9;
  4004. if (len > PAGE_SIZE)
  4005. len = PAGE_SIZE;
  4006. for (bio = blist; bio ; bio = bio->bi_next) {
  4007. struct bio *bio2;
  4008. if (bio_add_page(bio, page, len, 0))
  4009. continue;
  4010. /* Didn't fit, must stop */
  4011. for (bio2 = blist;
  4012. bio2 && bio2 != bio;
  4013. bio2 = bio2->bi_next) {
  4014. /* Remove last page from this bio */
  4015. bio2->bi_vcnt--;
  4016. bio2->bi_iter.bi_size -= len;
  4017. __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
  4018. }
  4019. goto bio_full;
  4020. }
  4021. sector_nr += len >> 9;
  4022. nr_sectors += len >> 9;
  4023. }
  4024. bio_full:
  4025. r10_bio->sectors = nr_sectors;
  4026. /* Now submit the read */
  4027. md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
  4028. atomic_inc(&r10_bio->remaining);
  4029. read_bio->bi_next = NULL;
  4030. generic_make_request(read_bio);
  4031. sector_nr += nr_sectors;
  4032. sectors_done += nr_sectors;
  4033. if (sector_nr <= last)
  4034. goto read_more;
  4035. /* Now that we have done the whole section we can
  4036. * update reshape_progress
  4037. */
  4038. if (mddev->reshape_backwards)
  4039. conf->reshape_progress -= sectors_done;
  4040. else
  4041. conf->reshape_progress += sectors_done;
  4042. return sectors_done;
  4043. }
  4044. static void end_reshape_request(struct r10bio *r10_bio);
  4045. static int handle_reshape_read_error(struct mddev *mddev,
  4046. struct r10bio *r10_bio);
  4047. static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
  4048. {
  4049. /* Reshape read completed. Hopefully we have a block
  4050. * to write out.
  4051. * If we got a read error then we do sync 1-page reads from
  4052. * elsewhere until we find the data - or give up.
  4053. */
  4054. struct r10conf *conf = mddev->private;
  4055. int s;
  4056. if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
  4057. if (handle_reshape_read_error(mddev, r10_bio) < 0) {
  4058. /* Reshape has been aborted */
  4059. md_done_sync(mddev, r10_bio->sectors, 0);
  4060. return;
  4061. }
  4062. /* We definitely have the data in the pages, schedule the
  4063. * writes.
  4064. */
  4065. atomic_set(&r10_bio->remaining, 1);
  4066. for (s = 0; s < conf->copies*2; s++) {
  4067. struct bio *b;
  4068. int d = r10_bio->devs[s/2].devnum;
  4069. struct md_rdev *rdev;
  4070. if (s&1) {
  4071. rdev = conf->mirrors[d].replacement;
  4072. b = r10_bio->devs[s/2].repl_bio;
  4073. } else {
  4074. rdev = conf->mirrors[d].rdev;
  4075. b = r10_bio->devs[s/2].bio;
  4076. }
  4077. if (!rdev || test_bit(Faulty, &rdev->flags))
  4078. continue;
  4079. atomic_inc(&rdev->nr_pending);
  4080. md_sync_acct(b->bi_bdev, r10_bio->sectors);
  4081. atomic_inc(&r10_bio->remaining);
  4082. b->bi_next = NULL;
  4083. generic_make_request(b);
  4084. }
  4085. end_reshape_request(r10_bio);
  4086. }
  4087. static void end_reshape(struct r10conf *conf)
  4088. {
  4089. if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
  4090. return;
  4091. spin_lock_irq(&conf->device_lock);
  4092. conf->prev = conf->geo;
  4093. md_finish_reshape(conf->mddev);
  4094. smp_wmb();
  4095. conf->reshape_progress = MaxSector;
  4096. spin_unlock_irq(&conf->device_lock);
  4097. /* read-ahead size must cover two whole stripes, which is
  4098. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  4099. */
  4100. if (conf->mddev->queue) {
  4101. int stripe = conf->geo.raid_disks *
  4102. ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
  4103. stripe /= conf->geo.near_copies;
  4104. if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  4105. conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  4106. }
  4107. conf->fullsync = 0;
  4108. }
  4109. static int handle_reshape_read_error(struct mddev *mddev,
  4110. struct r10bio *r10_bio)
  4111. {
  4112. /* Use sync reads to get the blocks from somewhere else */
  4113. int sectors = r10_bio->sectors;
  4114. struct r10conf *conf = mddev->private;
  4115. struct {
  4116. struct r10bio r10_bio;
  4117. struct r10dev devs[conf->copies];
  4118. } on_stack;
  4119. struct r10bio *r10b = &on_stack.r10_bio;
  4120. int slot = 0;
  4121. int idx = 0;
  4122. struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
  4123. r10b->sector = r10_bio->sector;
  4124. __raid10_find_phys(&conf->prev, r10b);
  4125. while (sectors) {
  4126. int s = sectors;
  4127. int success = 0;
  4128. int first_slot = slot;
  4129. if (s > (PAGE_SIZE >> 9))
  4130. s = PAGE_SIZE >> 9;
  4131. while (!success) {
  4132. int d = r10b->devs[slot].devnum;
  4133. struct md_rdev *rdev = conf->mirrors[d].rdev;
  4134. sector_t addr;
  4135. if (rdev == NULL ||
  4136. test_bit(Faulty, &rdev->flags) ||
  4137. !test_bit(In_sync, &rdev->flags))
  4138. goto failed;
  4139. addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
  4140. success = sync_page_io(rdev,
  4141. addr,
  4142. s << 9,
  4143. bvec[idx].bv_page,
  4144. READ, false);
  4145. if (success)
  4146. break;
  4147. failed:
  4148. slot++;
  4149. if (slot >= conf->copies)
  4150. slot = 0;
  4151. if (slot == first_slot)
  4152. break;
  4153. }
  4154. if (!success) {
  4155. /* couldn't read this block, must give up */
  4156. set_bit(MD_RECOVERY_INTR,
  4157. &mddev->recovery);
  4158. return -EIO;
  4159. }
  4160. sectors -= s;
  4161. idx++;
  4162. }
  4163. return 0;
  4164. }
  4165. static void end_reshape_write(struct bio *bio, int error)
  4166. {
  4167. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  4168. struct r10bio *r10_bio = bio->bi_private;
  4169. struct mddev *mddev = r10_bio->mddev;
  4170. struct r10conf *conf = mddev->private;
  4171. int d;
  4172. int slot;
  4173. int repl;
  4174. struct md_rdev *rdev = NULL;
  4175. d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
  4176. if (repl)
  4177. rdev = conf->mirrors[d].replacement;
  4178. if (!rdev) {
  4179. smp_mb();
  4180. rdev = conf->mirrors[d].rdev;
  4181. }
  4182. if (!uptodate) {
  4183. /* FIXME should record badblock */
  4184. md_error(mddev, rdev);
  4185. }
  4186. rdev_dec_pending(rdev, mddev);
  4187. end_reshape_request(r10_bio);
  4188. }
  4189. static void end_reshape_request(struct r10bio *r10_bio)
  4190. {
  4191. if (!atomic_dec_and_test(&r10_bio->remaining))
  4192. return;
  4193. md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
  4194. bio_put(r10_bio->master_bio);
  4195. put_buf(r10_bio);
  4196. }
  4197. static void raid10_finish_reshape(struct mddev *mddev)
  4198. {
  4199. struct r10conf *conf = mddev->private;
  4200. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  4201. return;
  4202. if (mddev->delta_disks > 0) {
  4203. sector_t size = raid10_size(mddev, 0, 0);
  4204. md_set_array_sectors(mddev, size);
  4205. if (mddev->recovery_cp > mddev->resync_max_sectors) {
  4206. mddev->recovery_cp = mddev->resync_max_sectors;
  4207. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4208. }
  4209. mddev->resync_max_sectors = size;
  4210. set_capacity(mddev->gendisk, mddev->array_sectors);
  4211. revalidate_disk(mddev->gendisk);
  4212. } else {
  4213. int d;
  4214. for (d = conf->geo.raid_disks ;
  4215. d < conf->geo.raid_disks - mddev->delta_disks;
  4216. d++) {
  4217. struct md_rdev *rdev = conf->mirrors[d].rdev;
  4218. if (rdev)
  4219. clear_bit(In_sync, &rdev->flags);
  4220. rdev = conf->mirrors[d].replacement;
  4221. if (rdev)
  4222. clear_bit(In_sync, &rdev->flags);
  4223. }
  4224. }
  4225. mddev->layout = mddev->new_layout;
  4226. mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
  4227. mddev->reshape_position = MaxSector;
  4228. mddev->delta_disks = 0;
  4229. mddev->reshape_backwards = 0;
  4230. }
  4231. static struct md_personality raid10_personality =
  4232. {
  4233. .name = "raid10",
  4234. .level = 10,
  4235. .owner = THIS_MODULE,
  4236. .make_request = make_request,
  4237. .run = run,
  4238. .free = raid10_free,
  4239. .status = status,
  4240. .error_handler = error,
  4241. .hot_add_disk = raid10_add_disk,
  4242. .hot_remove_disk= raid10_remove_disk,
  4243. .spare_active = raid10_spare_active,
  4244. .sync_request = sync_request,
  4245. .quiesce = raid10_quiesce,
  4246. .size = raid10_size,
  4247. .resize = raid10_resize,
  4248. .takeover = raid10_takeover,
  4249. .check_reshape = raid10_check_reshape,
  4250. .start_reshape = raid10_start_reshape,
  4251. .finish_reshape = raid10_finish_reshape,
  4252. .congested = raid10_congested,
  4253. .mergeable_bvec = raid10_mergeable_bvec,
  4254. };
  4255. static int __init raid_init(void)
  4256. {
  4257. return register_md_personality(&raid10_personality);
  4258. }
  4259. static void raid_exit(void)
  4260. {
  4261. unregister_md_personality(&raid10_personality);
  4262. }
  4263. module_init(raid_init);
  4264. module_exit(raid_exit);
  4265. MODULE_LICENSE("GPL");
  4266. MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
  4267. MODULE_ALIAS("md-personality-9"); /* RAID10 */
  4268. MODULE_ALIAS("md-raid10");
  4269. MODULE_ALIAS("md-level-10");
  4270. module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);