xfs_inode.c 100 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_shared.h"
  22. #include "xfs_format.h"
  23. #include "xfs_log_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_defer.h"
  28. #include "xfs_inode.h"
  29. #include "xfs_da_format.h"
  30. #include "xfs_da_btree.h"
  31. #include "xfs_dir2.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_attr.h"
  34. #include "xfs_trans_space.h"
  35. #include "xfs_trans.h"
  36. #include "xfs_buf_item.h"
  37. #include "xfs_inode_item.h"
  38. #include "xfs_ialloc.h"
  39. #include "xfs_bmap.h"
  40. #include "xfs_bmap_util.h"
  41. #include "xfs_error.h"
  42. #include "xfs_quota.h"
  43. #include "xfs_filestream.h"
  44. #include "xfs_cksum.h"
  45. #include "xfs_trace.h"
  46. #include "xfs_icache.h"
  47. #include "xfs_symlink.h"
  48. #include "xfs_trans_priv.h"
  49. #include "xfs_log.h"
  50. #include "xfs_bmap_btree.h"
  51. #include "xfs_reflink.h"
  52. #include "xfs_dir2_priv.h"
  53. kmem_zone_t *xfs_inode_zone;
  54. /*
  55. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  56. * freed from a file in a single transaction.
  57. */
  58. #define XFS_ITRUNC_MAX_EXTENTS 2
  59. STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
  60. STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
  61. STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
  62. /*
  63. * helper function to extract extent size hint from inode
  64. */
  65. xfs_extlen_t
  66. xfs_get_extsz_hint(
  67. struct xfs_inode *ip)
  68. {
  69. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  70. return ip->i_d.di_extsize;
  71. if (XFS_IS_REALTIME_INODE(ip))
  72. return ip->i_mount->m_sb.sb_rextsize;
  73. return 0;
  74. }
  75. /*
  76. * Helper function to extract CoW extent size hint from inode.
  77. * Between the extent size hint and the CoW extent size hint, we
  78. * return the greater of the two. If the value is zero (automatic),
  79. * use the default size.
  80. */
  81. xfs_extlen_t
  82. xfs_get_cowextsz_hint(
  83. struct xfs_inode *ip)
  84. {
  85. xfs_extlen_t a, b;
  86. a = 0;
  87. if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  88. a = ip->i_d.di_cowextsize;
  89. b = xfs_get_extsz_hint(ip);
  90. a = max(a, b);
  91. if (a == 0)
  92. return XFS_DEFAULT_COWEXTSZ_HINT;
  93. return a;
  94. }
  95. /*
  96. * These two are wrapper routines around the xfs_ilock() routine used to
  97. * centralize some grungy code. They are used in places that wish to lock the
  98. * inode solely for reading the extents. The reason these places can't just
  99. * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
  100. * bringing in of the extents from disk for a file in b-tree format. If the
  101. * inode is in b-tree format, then we need to lock the inode exclusively until
  102. * the extents are read in. Locking it exclusively all the time would limit
  103. * our parallelism unnecessarily, though. What we do instead is check to see
  104. * if the extents have been read in yet, and only lock the inode exclusively
  105. * if they have not.
  106. *
  107. * The functions return a value which should be given to the corresponding
  108. * xfs_iunlock() call.
  109. */
  110. uint
  111. xfs_ilock_data_map_shared(
  112. struct xfs_inode *ip)
  113. {
  114. uint lock_mode = XFS_ILOCK_SHARED;
  115. if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
  116. (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
  117. lock_mode = XFS_ILOCK_EXCL;
  118. xfs_ilock(ip, lock_mode);
  119. return lock_mode;
  120. }
  121. uint
  122. xfs_ilock_attr_map_shared(
  123. struct xfs_inode *ip)
  124. {
  125. uint lock_mode = XFS_ILOCK_SHARED;
  126. if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
  127. (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
  128. lock_mode = XFS_ILOCK_EXCL;
  129. xfs_ilock(ip, lock_mode);
  130. return lock_mode;
  131. }
  132. /*
  133. * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and
  134. * the i_lock. This routine allows various combinations of the locks to be
  135. * obtained.
  136. *
  137. * The 3 locks should always be ordered so that the IO lock is obtained first,
  138. * the mmap lock second and the ilock last in order to prevent deadlock.
  139. *
  140. * Basic locking order:
  141. *
  142. * i_iolock -> i_mmap_lock -> page_lock -> i_ilock
  143. *
  144. * mmap_sem locking order:
  145. *
  146. * i_iolock -> page lock -> mmap_sem
  147. * mmap_sem -> i_mmap_lock -> page_lock
  148. *
  149. * The difference in mmap_sem locking order mean that we cannot hold the
  150. * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
  151. * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
  152. * in get_user_pages() to map the user pages into the kernel address space for
  153. * direct IO. Similarly the i_iolock cannot be taken inside a page fault because
  154. * page faults already hold the mmap_sem.
  155. *
  156. * Hence to serialise fully against both syscall and mmap based IO, we need to
  157. * take both the i_iolock and the i_mmap_lock. These locks should *only* be both
  158. * taken in places where we need to invalidate the page cache in a race
  159. * free manner (e.g. truncate, hole punch and other extent manipulation
  160. * functions).
  161. */
  162. void
  163. xfs_ilock(
  164. xfs_inode_t *ip,
  165. uint lock_flags)
  166. {
  167. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  168. /*
  169. * You can't set both SHARED and EXCL for the same lock,
  170. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  171. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  172. */
  173. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  174. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  175. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  176. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  177. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  178. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  179. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  180. if (lock_flags & XFS_IOLOCK_EXCL)
  181. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  182. else if (lock_flags & XFS_IOLOCK_SHARED)
  183. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  184. if (lock_flags & XFS_MMAPLOCK_EXCL)
  185. mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  186. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  187. mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
  188. if (lock_flags & XFS_ILOCK_EXCL)
  189. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  190. else if (lock_flags & XFS_ILOCK_SHARED)
  191. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  192. }
  193. /*
  194. * This is just like xfs_ilock(), except that the caller
  195. * is guaranteed not to sleep. It returns 1 if it gets
  196. * the requested locks and 0 otherwise. If the IO lock is
  197. * obtained but the inode lock cannot be, then the IO lock
  198. * is dropped before returning.
  199. *
  200. * ip -- the inode being locked
  201. * lock_flags -- this parameter indicates the inode's locks to be
  202. * to be locked. See the comment for xfs_ilock() for a list
  203. * of valid values.
  204. */
  205. int
  206. xfs_ilock_nowait(
  207. xfs_inode_t *ip,
  208. uint lock_flags)
  209. {
  210. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  211. /*
  212. * You can't set both SHARED and EXCL for the same lock,
  213. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  214. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  215. */
  216. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  217. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  218. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  219. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  220. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  221. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  222. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  223. if (lock_flags & XFS_IOLOCK_EXCL) {
  224. if (!mrtryupdate(&ip->i_iolock))
  225. goto out;
  226. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  227. if (!mrtryaccess(&ip->i_iolock))
  228. goto out;
  229. }
  230. if (lock_flags & XFS_MMAPLOCK_EXCL) {
  231. if (!mrtryupdate(&ip->i_mmaplock))
  232. goto out_undo_iolock;
  233. } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
  234. if (!mrtryaccess(&ip->i_mmaplock))
  235. goto out_undo_iolock;
  236. }
  237. if (lock_flags & XFS_ILOCK_EXCL) {
  238. if (!mrtryupdate(&ip->i_lock))
  239. goto out_undo_mmaplock;
  240. } else if (lock_flags & XFS_ILOCK_SHARED) {
  241. if (!mrtryaccess(&ip->i_lock))
  242. goto out_undo_mmaplock;
  243. }
  244. return 1;
  245. out_undo_mmaplock:
  246. if (lock_flags & XFS_MMAPLOCK_EXCL)
  247. mrunlock_excl(&ip->i_mmaplock);
  248. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  249. mrunlock_shared(&ip->i_mmaplock);
  250. out_undo_iolock:
  251. if (lock_flags & XFS_IOLOCK_EXCL)
  252. mrunlock_excl(&ip->i_iolock);
  253. else if (lock_flags & XFS_IOLOCK_SHARED)
  254. mrunlock_shared(&ip->i_iolock);
  255. out:
  256. return 0;
  257. }
  258. /*
  259. * xfs_iunlock() is used to drop the inode locks acquired with
  260. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  261. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  262. * that we know which locks to drop.
  263. *
  264. * ip -- the inode being unlocked
  265. * lock_flags -- this parameter indicates the inode's locks to be
  266. * to be unlocked. See the comment for xfs_ilock() for a list
  267. * of valid values for this parameter.
  268. *
  269. */
  270. void
  271. xfs_iunlock(
  272. xfs_inode_t *ip,
  273. uint lock_flags)
  274. {
  275. /*
  276. * You can't set both SHARED and EXCL for the same lock,
  277. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  278. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  279. */
  280. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  281. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  282. ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
  283. (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
  284. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  285. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  286. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
  287. ASSERT(lock_flags != 0);
  288. if (lock_flags & XFS_IOLOCK_EXCL)
  289. mrunlock_excl(&ip->i_iolock);
  290. else if (lock_flags & XFS_IOLOCK_SHARED)
  291. mrunlock_shared(&ip->i_iolock);
  292. if (lock_flags & XFS_MMAPLOCK_EXCL)
  293. mrunlock_excl(&ip->i_mmaplock);
  294. else if (lock_flags & XFS_MMAPLOCK_SHARED)
  295. mrunlock_shared(&ip->i_mmaplock);
  296. if (lock_flags & XFS_ILOCK_EXCL)
  297. mrunlock_excl(&ip->i_lock);
  298. else if (lock_flags & XFS_ILOCK_SHARED)
  299. mrunlock_shared(&ip->i_lock);
  300. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  301. }
  302. /*
  303. * give up write locks. the i/o lock cannot be held nested
  304. * if it is being demoted.
  305. */
  306. void
  307. xfs_ilock_demote(
  308. xfs_inode_t *ip,
  309. uint lock_flags)
  310. {
  311. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
  312. ASSERT((lock_flags &
  313. ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  314. if (lock_flags & XFS_ILOCK_EXCL)
  315. mrdemote(&ip->i_lock);
  316. if (lock_flags & XFS_MMAPLOCK_EXCL)
  317. mrdemote(&ip->i_mmaplock);
  318. if (lock_flags & XFS_IOLOCK_EXCL)
  319. mrdemote(&ip->i_iolock);
  320. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  321. }
  322. #if defined(DEBUG) || defined(XFS_WARN)
  323. int
  324. xfs_isilocked(
  325. xfs_inode_t *ip,
  326. uint lock_flags)
  327. {
  328. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  329. if (!(lock_flags & XFS_ILOCK_SHARED))
  330. return !!ip->i_lock.mr_writer;
  331. return rwsem_is_locked(&ip->i_lock.mr_lock);
  332. }
  333. if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
  334. if (!(lock_flags & XFS_MMAPLOCK_SHARED))
  335. return !!ip->i_mmaplock.mr_writer;
  336. return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
  337. }
  338. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  339. if (!(lock_flags & XFS_IOLOCK_SHARED))
  340. return !!ip->i_iolock.mr_writer;
  341. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  342. }
  343. ASSERT(0);
  344. return 0;
  345. }
  346. #endif
  347. #ifdef DEBUG
  348. int xfs_locked_n;
  349. int xfs_small_retries;
  350. int xfs_middle_retries;
  351. int xfs_lots_retries;
  352. int xfs_lock_delays;
  353. #endif
  354. /*
  355. * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
  356. * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
  357. * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
  358. * errors and warnings.
  359. */
  360. #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
  361. static bool
  362. xfs_lockdep_subclass_ok(
  363. int subclass)
  364. {
  365. return subclass < MAX_LOCKDEP_SUBCLASSES;
  366. }
  367. #else
  368. #define xfs_lockdep_subclass_ok(subclass) (true)
  369. #endif
  370. /*
  371. * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
  372. * value. This can be called for any type of inode lock combination, including
  373. * parent locking. Care must be taken to ensure we don't overrun the subclass
  374. * storage fields in the class mask we build.
  375. */
  376. static inline int
  377. xfs_lock_inumorder(int lock_mode, int subclass)
  378. {
  379. int class = 0;
  380. ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
  381. XFS_ILOCK_RTSUM)));
  382. ASSERT(xfs_lockdep_subclass_ok(subclass));
  383. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
  384. ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
  385. ASSERT(xfs_lockdep_subclass_ok(subclass +
  386. XFS_IOLOCK_PARENT_VAL));
  387. class += subclass << XFS_IOLOCK_SHIFT;
  388. if (lock_mode & XFS_IOLOCK_PARENT)
  389. class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
  390. }
  391. if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
  392. ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
  393. class += subclass << XFS_MMAPLOCK_SHIFT;
  394. }
  395. if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
  396. ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
  397. class += subclass << XFS_ILOCK_SHIFT;
  398. }
  399. return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
  400. }
  401. /*
  402. * The following routine will lock n inodes in exclusive mode. We assume the
  403. * caller calls us with the inodes in i_ino order.
  404. *
  405. * We need to detect deadlock where an inode that we lock is in the AIL and we
  406. * start waiting for another inode that is locked by a thread in a long running
  407. * transaction (such as truncate). This can result in deadlock since the long
  408. * running trans might need to wait for the inode we just locked in order to
  409. * push the tail and free space in the log.
  410. *
  411. * xfs_lock_inodes() can only be used to lock one type of lock at a time -
  412. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  413. * lock more than one at a time, lockdep will report false positives saying we
  414. * have violated locking orders.
  415. */
  416. static void
  417. xfs_lock_inodes(
  418. xfs_inode_t **ips,
  419. int inodes,
  420. uint lock_mode)
  421. {
  422. int attempts = 0, i, j, try_lock;
  423. xfs_log_item_t *lp;
  424. /*
  425. * Currently supports between 2 and 5 inodes with exclusive locking. We
  426. * support an arbitrary depth of locking here, but absolute limits on
  427. * inodes depend on the the type of locking and the limits placed by
  428. * lockdep annotations in xfs_lock_inumorder. These are all checked by
  429. * the asserts.
  430. */
  431. ASSERT(ips && inodes >= 2 && inodes <= 5);
  432. ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
  433. XFS_ILOCK_EXCL));
  434. ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
  435. XFS_ILOCK_SHARED)));
  436. ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
  437. inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
  438. ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
  439. inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
  440. ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
  441. inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
  442. if (lock_mode & XFS_IOLOCK_EXCL) {
  443. ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
  444. } else if (lock_mode & XFS_MMAPLOCK_EXCL)
  445. ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
  446. try_lock = 0;
  447. i = 0;
  448. again:
  449. for (; i < inodes; i++) {
  450. ASSERT(ips[i]);
  451. if (i && (ips[i] == ips[i - 1])) /* Already locked */
  452. continue;
  453. /*
  454. * If try_lock is not set yet, make sure all locked inodes are
  455. * not in the AIL. If any are, set try_lock to be used later.
  456. */
  457. if (!try_lock) {
  458. for (j = (i - 1); j >= 0 && !try_lock; j--) {
  459. lp = (xfs_log_item_t *)ips[j]->i_itemp;
  460. if (lp && (lp->li_flags & XFS_LI_IN_AIL))
  461. try_lock++;
  462. }
  463. }
  464. /*
  465. * If any of the previous locks we have locked is in the AIL,
  466. * we must TRY to get the second and subsequent locks. If
  467. * we can't get any, we must release all we have
  468. * and try again.
  469. */
  470. if (!try_lock) {
  471. xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
  472. continue;
  473. }
  474. /* try_lock means we have an inode locked that is in the AIL. */
  475. ASSERT(i != 0);
  476. if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
  477. continue;
  478. /*
  479. * Unlock all previous guys and try again. xfs_iunlock will try
  480. * to push the tail if the inode is in the AIL.
  481. */
  482. attempts++;
  483. for (j = i - 1; j >= 0; j--) {
  484. /*
  485. * Check to see if we've already unlocked this one. Not
  486. * the first one going back, and the inode ptr is the
  487. * same.
  488. */
  489. if (j != (i - 1) && ips[j] == ips[j + 1])
  490. continue;
  491. xfs_iunlock(ips[j], lock_mode);
  492. }
  493. if ((attempts % 5) == 0) {
  494. delay(1); /* Don't just spin the CPU */
  495. #ifdef DEBUG
  496. xfs_lock_delays++;
  497. #endif
  498. }
  499. i = 0;
  500. try_lock = 0;
  501. goto again;
  502. }
  503. #ifdef DEBUG
  504. if (attempts) {
  505. if (attempts < 5) xfs_small_retries++;
  506. else if (attempts < 100) xfs_middle_retries++;
  507. else xfs_lots_retries++;
  508. } else {
  509. xfs_locked_n++;
  510. }
  511. #endif
  512. }
  513. /*
  514. * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
  515. * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
  516. * lock more than one at a time, lockdep will report false positives saying we
  517. * have violated locking orders.
  518. */
  519. void
  520. xfs_lock_two_inodes(
  521. xfs_inode_t *ip0,
  522. xfs_inode_t *ip1,
  523. uint lock_mode)
  524. {
  525. xfs_inode_t *temp;
  526. int attempts = 0;
  527. xfs_log_item_t *lp;
  528. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
  529. ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
  530. ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
  531. } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
  532. ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
  533. ASSERT(ip0->i_ino != ip1->i_ino);
  534. if (ip0->i_ino > ip1->i_ino) {
  535. temp = ip0;
  536. ip0 = ip1;
  537. ip1 = temp;
  538. }
  539. again:
  540. xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
  541. /*
  542. * If the first lock we have locked is in the AIL, we must TRY to get
  543. * the second lock. If we can't get it, we must release the first one
  544. * and try again.
  545. */
  546. lp = (xfs_log_item_t *)ip0->i_itemp;
  547. if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
  548. if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
  549. xfs_iunlock(ip0, lock_mode);
  550. if ((++attempts % 5) == 0)
  551. delay(1); /* Don't just spin the CPU */
  552. goto again;
  553. }
  554. } else {
  555. xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
  556. }
  557. }
  558. void
  559. __xfs_iflock(
  560. struct xfs_inode *ip)
  561. {
  562. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  563. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  564. do {
  565. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  566. if (xfs_isiflocked(ip))
  567. io_schedule();
  568. } while (!xfs_iflock_nowait(ip));
  569. finish_wait(wq, &wait.wait);
  570. }
  571. STATIC uint
  572. _xfs_dic2xflags(
  573. __uint16_t di_flags,
  574. uint64_t di_flags2,
  575. bool has_attr)
  576. {
  577. uint flags = 0;
  578. if (di_flags & XFS_DIFLAG_ANY) {
  579. if (di_flags & XFS_DIFLAG_REALTIME)
  580. flags |= FS_XFLAG_REALTIME;
  581. if (di_flags & XFS_DIFLAG_PREALLOC)
  582. flags |= FS_XFLAG_PREALLOC;
  583. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  584. flags |= FS_XFLAG_IMMUTABLE;
  585. if (di_flags & XFS_DIFLAG_APPEND)
  586. flags |= FS_XFLAG_APPEND;
  587. if (di_flags & XFS_DIFLAG_SYNC)
  588. flags |= FS_XFLAG_SYNC;
  589. if (di_flags & XFS_DIFLAG_NOATIME)
  590. flags |= FS_XFLAG_NOATIME;
  591. if (di_flags & XFS_DIFLAG_NODUMP)
  592. flags |= FS_XFLAG_NODUMP;
  593. if (di_flags & XFS_DIFLAG_RTINHERIT)
  594. flags |= FS_XFLAG_RTINHERIT;
  595. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  596. flags |= FS_XFLAG_PROJINHERIT;
  597. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  598. flags |= FS_XFLAG_NOSYMLINKS;
  599. if (di_flags & XFS_DIFLAG_EXTSIZE)
  600. flags |= FS_XFLAG_EXTSIZE;
  601. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  602. flags |= FS_XFLAG_EXTSZINHERIT;
  603. if (di_flags & XFS_DIFLAG_NODEFRAG)
  604. flags |= FS_XFLAG_NODEFRAG;
  605. if (di_flags & XFS_DIFLAG_FILESTREAM)
  606. flags |= FS_XFLAG_FILESTREAM;
  607. }
  608. if (di_flags2 & XFS_DIFLAG2_ANY) {
  609. if (di_flags2 & XFS_DIFLAG2_DAX)
  610. flags |= FS_XFLAG_DAX;
  611. if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
  612. flags |= FS_XFLAG_COWEXTSIZE;
  613. }
  614. if (has_attr)
  615. flags |= FS_XFLAG_HASATTR;
  616. return flags;
  617. }
  618. uint
  619. xfs_ip2xflags(
  620. struct xfs_inode *ip)
  621. {
  622. struct xfs_icdinode *dic = &ip->i_d;
  623. return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
  624. }
  625. /*
  626. * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
  627. * is allowed, otherwise it has to be an exact match. If a CI match is found,
  628. * ci_name->name will point to a the actual name (caller must free) or
  629. * will be set to NULL if an exact match is found.
  630. */
  631. int
  632. xfs_lookup(
  633. xfs_inode_t *dp,
  634. struct xfs_name *name,
  635. xfs_inode_t **ipp,
  636. struct xfs_name *ci_name)
  637. {
  638. xfs_ino_t inum;
  639. int error;
  640. trace_xfs_lookup(dp, name);
  641. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  642. return -EIO;
  643. xfs_ilock(dp, XFS_IOLOCK_SHARED);
  644. error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
  645. if (error)
  646. goto out_unlock;
  647. error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
  648. if (error)
  649. goto out_free_name;
  650. xfs_iunlock(dp, XFS_IOLOCK_SHARED);
  651. return 0;
  652. out_free_name:
  653. if (ci_name)
  654. kmem_free(ci_name->name);
  655. out_unlock:
  656. xfs_iunlock(dp, XFS_IOLOCK_SHARED);
  657. *ipp = NULL;
  658. return error;
  659. }
  660. /*
  661. * Allocate an inode on disk and return a copy of its in-core version.
  662. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  663. * appropriately within the inode. The uid and gid for the inode are
  664. * set according to the contents of the given cred structure.
  665. *
  666. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  667. * has a free inode available, call xfs_iget() to obtain the in-core
  668. * version of the allocated inode. Finally, fill in the inode and
  669. * log its initial contents. In this case, ialloc_context would be
  670. * set to NULL.
  671. *
  672. * If xfs_dialloc() does not have an available inode, it will replenish
  673. * its supply by doing an allocation. Since we can only do one
  674. * allocation within a transaction without deadlocks, we must commit
  675. * the current transaction before returning the inode itself.
  676. * In this case, therefore, we will set ialloc_context and return.
  677. * The caller should then commit the current transaction, start a new
  678. * transaction, and call xfs_ialloc() again to actually get the inode.
  679. *
  680. * To ensure that some other process does not grab the inode that
  681. * was allocated during the first call to xfs_ialloc(), this routine
  682. * also returns the [locked] bp pointing to the head of the freelist
  683. * as ialloc_context. The caller should hold this buffer across
  684. * the commit and pass it back into this routine on the second call.
  685. *
  686. * If we are allocating quota inodes, we do not have a parent inode
  687. * to attach to or associate with (i.e. pip == NULL) because they
  688. * are not linked into the directory structure - they are attached
  689. * directly to the superblock - and so have no parent.
  690. */
  691. static int
  692. xfs_ialloc(
  693. xfs_trans_t *tp,
  694. xfs_inode_t *pip,
  695. umode_t mode,
  696. xfs_nlink_t nlink,
  697. xfs_dev_t rdev,
  698. prid_t prid,
  699. int okalloc,
  700. xfs_buf_t **ialloc_context,
  701. xfs_inode_t **ipp)
  702. {
  703. struct xfs_mount *mp = tp->t_mountp;
  704. xfs_ino_t ino;
  705. xfs_inode_t *ip;
  706. uint flags;
  707. int error;
  708. struct timespec tv;
  709. struct inode *inode;
  710. /*
  711. * Call the space management code to pick
  712. * the on-disk inode to be allocated.
  713. */
  714. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  715. ialloc_context, &ino);
  716. if (error)
  717. return error;
  718. if (*ialloc_context || ino == NULLFSINO) {
  719. *ipp = NULL;
  720. return 0;
  721. }
  722. ASSERT(*ialloc_context == NULL);
  723. /*
  724. * Get the in-core inode with the lock held exclusively.
  725. * This is because we're setting fields here we need
  726. * to prevent others from looking at until we're done.
  727. */
  728. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  729. XFS_ILOCK_EXCL, &ip);
  730. if (error)
  731. return error;
  732. ASSERT(ip != NULL);
  733. inode = VFS_I(ip);
  734. /*
  735. * We always convert v1 inodes to v2 now - we only support filesystems
  736. * with >= v2 inode capability, so there is no reason for ever leaving
  737. * an inode in v1 format.
  738. */
  739. if (ip->i_d.di_version == 1)
  740. ip->i_d.di_version = 2;
  741. inode->i_mode = mode;
  742. set_nlink(inode, nlink);
  743. ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
  744. ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
  745. xfs_set_projid(ip, prid);
  746. if (pip && XFS_INHERIT_GID(pip)) {
  747. ip->i_d.di_gid = pip->i_d.di_gid;
  748. if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
  749. inode->i_mode |= S_ISGID;
  750. }
  751. /*
  752. * If the group ID of the new file does not match the effective group
  753. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  754. * (and only if the irix_sgid_inherit compatibility variable is set).
  755. */
  756. if ((irix_sgid_inherit) &&
  757. (inode->i_mode & S_ISGID) &&
  758. (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
  759. inode->i_mode &= ~S_ISGID;
  760. ip->i_d.di_size = 0;
  761. ip->i_d.di_nextents = 0;
  762. ASSERT(ip->i_d.di_nblocks == 0);
  763. tv = current_time(inode);
  764. inode->i_mtime = tv;
  765. inode->i_atime = tv;
  766. inode->i_ctime = tv;
  767. ip->i_d.di_extsize = 0;
  768. ip->i_d.di_dmevmask = 0;
  769. ip->i_d.di_dmstate = 0;
  770. ip->i_d.di_flags = 0;
  771. if (ip->i_d.di_version == 3) {
  772. inode->i_version = 1;
  773. ip->i_d.di_flags2 = 0;
  774. ip->i_d.di_cowextsize = 0;
  775. ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
  776. ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
  777. }
  778. flags = XFS_ILOG_CORE;
  779. switch (mode & S_IFMT) {
  780. case S_IFIFO:
  781. case S_IFCHR:
  782. case S_IFBLK:
  783. case S_IFSOCK:
  784. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  785. ip->i_df.if_u2.if_rdev = rdev;
  786. ip->i_df.if_flags = 0;
  787. flags |= XFS_ILOG_DEV;
  788. break;
  789. case S_IFREG:
  790. case S_IFDIR:
  791. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  792. uint di_flags = 0;
  793. if (S_ISDIR(mode)) {
  794. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  795. di_flags |= XFS_DIFLAG_RTINHERIT;
  796. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  797. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  798. ip->i_d.di_extsize = pip->i_d.di_extsize;
  799. }
  800. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  801. di_flags |= XFS_DIFLAG_PROJINHERIT;
  802. } else if (S_ISREG(mode)) {
  803. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  804. di_flags |= XFS_DIFLAG_REALTIME;
  805. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  806. di_flags |= XFS_DIFLAG_EXTSIZE;
  807. ip->i_d.di_extsize = pip->i_d.di_extsize;
  808. }
  809. }
  810. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  811. xfs_inherit_noatime)
  812. di_flags |= XFS_DIFLAG_NOATIME;
  813. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  814. xfs_inherit_nodump)
  815. di_flags |= XFS_DIFLAG_NODUMP;
  816. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  817. xfs_inherit_sync)
  818. di_flags |= XFS_DIFLAG_SYNC;
  819. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  820. xfs_inherit_nosymlinks)
  821. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  822. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  823. xfs_inherit_nodefrag)
  824. di_flags |= XFS_DIFLAG_NODEFRAG;
  825. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  826. di_flags |= XFS_DIFLAG_FILESTREAM;
  827. ip->i_d.di_flags |= di_flags;
  828. }
  829. if (pip &&
  830. (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
  831. pip->i_d.di_version == 3 &&
  832. ip->i_d.di_version == 3) {
  833. uint64_t di_flags2 = 0;
  834. if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
  835. di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
  836. ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
  837. }
  838. if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
  839. di_flags2 |= XFS_DIFLAG2_DAX;
  840. ip->i_d.di_flags2 |= di_flags2;
  841. }
  842. /* FALLTHROUGH */
  843. case S_IFLNK:
  844. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  845. ip->i_df.if_flags = XFS_IFEXTENTS;
  846. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  847. ip->i_df.if_u1.if_extents = NULL;
  848. break;
  849. default:
  850. ASSERT(0);
  851. }
  852. /*
  853. * Attribute fork settings for new inode.
  854. */
  855. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  856. ip->i_d.di_anextents = 0;
  857. /*
  858. * Log the new values stuffed into the inode.
  859. */
  860. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  861. xfs_trans_log_inode(tp, ip, flags);
  862. /* now that we have an i_mode we can setup the inode structure */
  863. xfs_setup_inode(ip);
  864. *ipp = ip;
  865. return 0;
  866. }
  867. /*
  868. * Allocates a new inode from disk and return a pointer to the
  869. * incore copy. This routine will internally commit the current
  870. * transaction and allocate a new one if the Space Manager needed
  871. * to do an allocation to replenish the inode free-list.
  872. *
  873. * This routine is designed to be called from xfs_create and
  874. * xfs_create_dir.
  875. *
  876. */
  877. int
  878. xfs_dir_ialloc(
  879. xfs_trans_t **tpp, /* input: current transaction;
  880. output: may be a new transaction. */
  881. xfs_inode_t *dp, /* directory within whose allocate
  882. the inode. */
  883. umode_t mode,
  884. xfs_nlink_t nlink,
  885. xfs_dev_t rdev,
  886. prid_t prid, /* project id */
  887. int okalloc, /* ok to allocate new space */
  888. xfs_inode_t **ipp, /* pointer to inode; it will be
  889. locked. */
  890. int *committed)
  891. {
  892. xfs_trans_t *tp;
  893. xfs_inode_t *ip;
  894. xfs_buf_t *ialloc_context = NULL;
  895. int code;
  896. void *dqinfo;
  897. uint tflags;
  898. tp = *tpp;
  899. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  900. /*
  901. * xfs_ialloc will return a pointer to an incore inode if
  902. * the Space Manager has an available inode on the free
  903. * list. Otherwise, it will do an allocation and replenish
  904. * the freelist. Since we can only do one allocation per
  905. * transaction without deadlocks, we will need to commit the
  906. * current transaction and start a new one. We will then
  907. * need to call xfs_ialloc again to get the inode.
  908. *
  909. * If xfs_ialloc did an allocation to replenish the freelist,
  910. * it returns the bp containing the head of the freelist as
  911. * ialloc_context. We will hold a lock on it across the
  912. * transaction commit so that no other process can steal
  913. * the inode(s) that we've just allocated.
  914. */
  915. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
  916. &ialloc_context, &ip);
  917. /*
  918. * Return an error if we were unable to allocate a new inode.
  919. * This should only happen if we run out of space on disk or
  920. * encounter a disk error.
  921. */
  922. if (code) {
  923. *ipp = NULL;
  924. return code;
  925. }
  926. if (!ialloc_context && !ip) {
  927. *ipp = NULL;
  928. return -ENOSPC;
  929. }
  930. /*
  931. * If the AGI buffer is non-NULL, then we were unable to get an
  932. * inode in one operation. We need to commit the current
  933. * transaction and call xfs_ialloc() again. It is guaranteed
  934. * to succeed the second time.
  935. */
  936. if (ialloc_context) {
  937. /*
  938. * Normally, xfs_trans_commit releases all the locks.
  939. * We call bhold to hang on to the ialloc_context across
  940. * the commit. Holding this buffer prevents any other
  941. * processes from doing any allocations in this
  942. * allocation group.
  943. */
  944. xfs_trans_bhold(tp, ialloc_context);
  945. /*
  946. * We want the quota changes to be associated with the next
  947. * transaction, NOT this one. So, detach the dqinfo from this
  948. * and attach it to the next transaction.
  949. */
  950. dqinfo = NULL;
  951. tflags = 0;
  952. if (tp->t_dqinfo) {
  953. dqinfo = (void *)tp->t_dqinfo;
  954. tp->t_dqinfo = NULL;
  955. tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
  956. tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
  957. }
  958. code = xfs_trans_roll(&tp, NULL);
  959. if (committed != NULL)
  960. *committed = 1;
  961. /*
  962. * Re-attach the quota info that we detached from prev trx.
  963. */
  964. if (dqinfo) {
  965. tp->t_dqinfo = dqinfo;
  966. tp->t_flags |= tflags;
  967. }
  968. if (code) {
  969. xfs_buf_relse(ialloc_context);
  970. *tpp = tp;
  971. *ipp = NULL;
  972. return code;
  973. }
  974. xfs_trans_bjoin(tp, ialloc_context);
  975. /*
  976. * Call ialloc again. Since we've locked out all
  977. * other allocations in this allocation group,
  978. * this call should always succeed.
  979. */
  980. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
  981. okalloc, &ialloc_context, &ip);
  982. /*
  983. * If we get an error at this point, return to the caller
  984. * so that the current transaction can be aborted.
  985. */
  986. if (code) {
  987. *tpp = tp;
  988. *ipp = NULL;
  989. return code;
  990. }
  991. ASSERT(!ialloc_context && ip);
  992. } else {
  993. if (committed != NULL)
  994. *committed = 0;
  995. }
  996. *ipp = ip;
  997. *tpp = tp;
  998. return 0;
  999. }
  1000. /*
  1001. * Decrement the link count on an inode & log the change. If this causes the
  1002. * link count to go to zero, move the inode to AGI unlinked list so that it can
  1003. * be freed when the last active reference goes away via xfs_inactive().
  1004. */
  1005. static int /* error */
  1006. xfs_droplink(
  1007. xfs_trans_t *tp,
  1008. xfs_inode_t *ip)
  1009. {
  1010. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1011. drop_nlink(VFS_I(ip));
  1012. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1013. if (VFS_I(ip)->i_nlink)
  1014. return 0;
  1015. return xfs_iunlink(tp, ip);
  1016. }
  1017. /*
  1018. * Increment the link count on an inode & log the change.
  1019. */
  1020. static int
  1021. xfs_bumplink(
  1022. xfs_trans_t *tp,
  1023. xfs_inode_t *ip)
  1024. {
  1025. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1026. ASSERT(ip->i_d.di_version > 1);
  1027. inc_nlink(VFS_I(ip));
  1028. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1029. return 0;
  1030. }
  1031. int
  1032. xfs_create(
  1033. xfs_inode_t *dp,
  1034. struct xfs_name *name,
  1035. umode_t mode,
  1036. xfs_dev_t rdev,
  1037. xfs_inode_t **ipp)
  1038. {
  1039. int is_dir = S_ISDIR(mode);
  1040. struct xfs_mount *mp = dp->i_mount;
  1041. struct xfs_inode *ip = NULL;
  1042. struct xfs_trans *tp = NULL;
  1043. int error;
  1044. struct xfs_defer_ops dfops;
  1045. xfs_fsblock_t first_block;
  1046. bool unlock_dp_on_error = false;
  1047. prid_t prid;
  1048. struct xfs_dquot *udqp = NULL;
  1049. struct xfs_dquot *gdqp = NULL;
  1050. struct xfs_dquot *pdqp = NULL;
  1051. struct xfs_trans_res *tres;
  1052. uint resblks;
  1053. trace_xfs_create(dp, name);
  1054. if (XFS_FORCED_SHUTDOWN(mp))
  1055. return -EIO;
  1056. prid = xfs_get_initial_prid(dp);
  1057. /*
  1058. * Make sure that we have allocated dquot(s) on disk.
  1059. */
  1060. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1061. xfs_kgid_to_gid(current_fsgid()), prid,
  1062. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1063. &udqp, &gdqp, &pdqp);
  1064. if (error)
  1065. return error;
  1066. if (is_dir) {
  1067. rdev = 0;
  1068. resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
  1069. tres = &M_RES(mp)->tr_mkdir;
  1070. } else {
  1071. resblks = XFS_CREATE_SPACE_RES(mp, name->len);
  1072. tres = &M_RES(mp)->tr_create;
  1073. }
  1074. /*
  1075. * Initially assume that the file does not exist and
  1076. * reserve the resources for that case. If that is not
  1077. * the case we'll drop the one we have and get a more
  1078. * appropriate transaction later.
  1079. */
  1080. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1081. if (error == -ENOSPC) {
  1082. /* flush outstanding delalloc blocks and retry */
  1083. xfs_flush_inodes(mp);
  1084. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1085. }
  1086. if (error == -ENOSPC) {
  1087. /* No space at all so try a "no-allocation" reservation */
  1088. resblks = 0;
  1089. error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
  1090. }
  1091. if (error)
  1092. goto out_release_inode;
  1093. xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
  1094. XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
  1095. unlock_dp_on_error = true;
  1096. xfs_defer_init(&dfops, &first_block);
  1097. /*
  1098. * Reserve disk quota and the inode.
  1099. */
  1100. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1101. pdqp, resblks, 1, 0);
  1102. if (error)
  1103. goto out_trans_cancel;
  1104. if (!resblks) {
  1105. error = xfs_dir_canenter(tp, dp, name);
  1106. if (error)
  1107. goto out_trans_cancel;
  1108. }
  1109. /*
  1110. * A newly created regular or special file just has one directory
  1111. * entry pointing to them, but a directory also the "." entry
  1112. * pointing to itself.
  1113. */
  1114. error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
  1115. prid, resblks > 0, &ip, NULL);
  1116. if (error)
  1117. goto out_trans_cancel;
  1118. /*
  1119. * Now we join the directory inode to the transaction. We do not do it
  1120. * earlier because xfs_dir_ialloc might commit the previous transaction
  1121. * (and release all the locks). An error from here on will result in
  1122. * the transaction cancel unlocking dp so don't do it explicitly in the
  1123. * error path.
  1124. */
  1125. xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1126. unlock_dp_on_error = false;
  1127. error = xfs_dir_createname(tp, dp, name, ip->i_ino,
  1128. &first_block, &dfops, resblks ?
  1129. resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
  1130. if (error) {
  1131. ASSERT(error != -ENOSPC);
  1132. goto out_trans_cancel;
  1133. }
  1134. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1135. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  1136. if (is_dir) {
  1137. error = xfs_dir_init(tp, ip, dp);
  1138. if (error)
  1139. goto out_bmap_cancel;
  1140. error = xfs_bumplink(tp, dp);
  1141. if (error)
  1142. goto out_bmap_cancel;
  1143. }
  1144. /*
  1145. * If this is a synchronous mount, make sure that the
  1146. * create transaction goes to disk before returning to
  1147. * the user.
  1148. */
  1149. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1150. xfs_trans_set_sync(tp);
  1151. /*
  1152. * Attach the dquot(s) to the inodes and modify them incore.
  1153. * These ids of the inode couldn't have changed since the new
  1154. * inode has been locked ever since it was created.
  1155. */
  1156. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1157. error = xfs_defer_finish(&tp, &dfops, NULL);
  1158. if (error)
  1159. goto out_bmap_cancel;
  1160. error = xfs_trans_commit(tp);
  1161. if (error)
  1162. goto out_release_inode;
  1163. xfs_qm_dqrele(udqp);
  1164. xfs_qm_dqrele(gdqp);
  1165. xfs_qm_dqrele(pdqp);
  1166. *ipp = ip;
  1167. return 0;
  1168. out_bmap_cancel:
  1169. xfs_defer_cancel(&dfops);
  1170. out_trans_cancel:
  1171. xfs_trans_cancel(tp);
  1172. out_release_inode:
  1173. /*
  1174. * Wait until after the current transaction is aborted to finish the
  1175. * setup of the inode and release the inode. This prevents recursive
  1176. * transactions and deadlocks from xfs_inactive.
  1177. */
  1178. if (ip) {
  1179. xfs_finish_inode_setup(ip);
  1180. IRELE(ip);
  1181. }
  1182. xfs_qm_dqrele(udqp);
  1183. xfs_qm_dqrele(gdqp);
  1184. xfs_qm_dqrele(pdqp);
  1185. if (unlock_dp_on_error)
  1186. xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1187. return error;
  1188. }
  1189. int
  1190. xfs_create_tmpfile(
  1191. struct xfs_inode *dp,
  1192. struct dentry *dentry,
  1193. umode_t mode,
  1194. struct xfs_inode **ipp)
  1195. {
  1196. struct xfs_mount *mp = dp->i_mount;
  1197. struct xfs_inode *ip = NULL;
  1198. struct xfs_trans *tp = NULL;
  1199. int error;
  1200. prid_t prid;
  1201. struct xfs_dquot *udqp = NULL;
  1202. struct xfs_dquot *gdqp = NULL;
  1203. struct xfs_dquot *pdqp = NULL;
  1204. struct xfs_trans_res *tres;
  1205. uint resblks;
  1206. if (XFS_FORCED_SHUTDOWN(mp))
  1207. return -EIO;
  1208. prid = xfs_get_initial_prid(dp);
  1209. /*
  1210. * Make sure that we have allocated dquot(s) on disk.
  1211. */
  1212. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1213. xfs_kgid_to_gid(current_fsgid()), prid,
  1214. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1215. &udqp, &gdqp, &pdqp);
  1216. if (error)
  1217. return error;
  1218. resblks = XFS_IALLOC_SPACE_RES(mp);
  1219. tres = &M_RES(mp)->tr_create_tmpfile;
  1220. error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
  1221. if (error == -ENOSPC) {
  1222. /* No space at all so try a "no-allocation" reservation */
  1223. resblks = 0;
  1224. error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
  1225. }
  1226. if (error)
  1227. goto out_release_inode;
  1228. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1229. pdqp, resblks, 1, 0);
  1230. if (error)
  1231. goto out_trans_cancel;
  1232. error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
  1233. prid, resblks > 0, &ip, NULL);
  1234. if (error)
  1235. goto out_trans_cancel;
  1236. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1237. xfs_trans_set_sync(tp);
  1238. /*
  1239. * Attach the dquot(s) to the inodes and modify them incore.
  1240. * These ids of the inode couldn't have changed since the new
  1241. * inode has been locked ever since it was created.
  1242. */
  1243. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1244. error = xfs_iunlink(tp, ip);
  1245. if (error)
  1246. goto out_trans_cancel;
  1247. error = xfs_trans_commit(tp);
  1248. if (error)
  1249. goto out_release_inode;
  1250. xfs_qm_dqrele(udqp);
  1251. xfs_qm_dqrele(gdqp);
  1252. xfs_qm_dqrele(pdqp);
  1253. *ipp = ip;
  1254. return 0;
  1255. out_trans_cancel:
  1256. xfs_trans_cancel(tp);
  1257. out_release_inode:
  1258. /*
  1259. * Wait until after the current transaction is aborted to finish the
  1260. * setup of the inode and release the inode. This prevents recursive
  1261. * transactions and deadlocks from xfs_inactive.
  1262. */
  1263. if (ip) {
  1264. xfs_finish_inode_setup(ip);
  1265. IRELE(ip);
  1266. }
  1267. xfs_qm_dqrele(udqp);
  1268. xfs_qm_dqrele(gdqp);
  1269. xfs_qm_dqrele(pdqp);
  1270. return error;
  1271. }
  1272. int
  1273. xfs_link(
  1274. xfs_inode_t *tdp,
  1275. xfs_inode_t *sip,
  1276. struct xfs_name *target_name)
  1277. {
  1278. xfs_mount_t *mp = tdp->i_mount;
  1279. xfs_trans_t *tp;
  1280. int error;
  1281. struct xfs_defer_ops dfops;
  1282. xfs_fsblock_t first_block;
  1283. int resblks;
  1284. trace_xfs_link(tdp, target_name);
  1285. ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
  1286. if (XFS_FORCED_SHUTDOWN(mp))
  1287. return -EIO;
  1288. error = xfs_qm_dqattach(sip, 0);
  1289. if (error)
  1290. goto std_return;
  1291. error = xfs_qm_dqattach(tdp, 0);
  1292. if (error)
  1293. goto std_return;
  1294. resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
  1295. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
  1296. if (error == -ENOSPC) {
  1297. resblks = 0;
  1298. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
  1299. }
  1300. if (error)
  1301. goto std_return;
  1302. xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  1303. xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
  1304. xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
  1305. xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  1306. /*
  1307. * If we are using project inheritance, we only allow hard link
  1308. * creation in our tree when the project IDs are the same; else
  1309. * the tree quota mechanism could be circumvented.
  1310. */
  1311. if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  1312. (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
  1313. error = -EXDEV;
  1314. goto error_return;
  1315. }
  1316. if (!resblks) {
  1317. error = xfs_dir_canenter(tp, tdp, target_name);
  1318. if (error)
  1319. goto error_return;
  1320. }
  1321. xfs_defer_init(&dfops, &first_block);
  1322. /*
  1323. * Handle initial link state of O_TMPFILE inode
  1324. */
  1325. if (VFS_I(sip)->i_nlink == 0) {
  1326. error = xfs_iunlink_remove(tp, sip);
  1327. if (error)
  1328. goto error_return;
  1329. }
  1330. error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
  1331. &first_block, &dfops, resblks);
  1332. if (error)
  1333. goto error_return;
  1334. xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1335. xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
  1336. error = xfs_bumplink(tp, sip);
  1337. if (error)
  1338. goto error_return;
  1339. /*
  1340. * If this is a synchronous mount, make sure that the
  1341. * link transaction goes to disk before returning to
  1342. * the user.
  1343. */
  1344. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1345. xfs_trans_set_sync(tp);
  1346. error = xfs_defer_finish(&tp, &dfops, NULL);
  1347. if (error) {
  1348. xfs_defer_cancel(&dfops);
  1349. goto error_return;
  1350. }
  1351. return xfs_trans_commit(tp);
  1352. error_return:
  1353. xfs_trans_cancel(tp);
  1354. std_return:
  1355. return error;
  1356. }
  1357. /*
  1358. * Free up the underlying blocks past new_size. The new size must be smaller
  1359. * than the current size. This routine can be used both for the attribute and
  1360. * data fork, and does not modify the inode size, which is left to the caller.
  1361. *
  1362. * The transaction passed to this routine must have made a permanent log
  1363. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  1364. * given transaction and start new ones, so make sure everything involved in
  1365. * the transaction is tidy before calling here. Some transaction will be
  1366. * returned to the caller to be committed. The incoming transaction must
  1367. * already include the inode, and both inode locks must be held exclusively.
  1368. * The inode must also be "held" within the transaction. On return the inode
  1369. * will be "held" within the returned transaction. This routine does NOT
  1370. * require any disk space to be reserved for it within the transaction.
  1371. *
  1372. * If we get an error, we must return with the inode locked and linked into the
  1373. * current transaction. This keeps things simple for the higher level code,
  1374. * because it always knows that the inode is locked and held in the transaction
  1375. * that returns to it whether errors occur or not. We don't mark the inode
  1376. * dirty on error so that transactions can be easily aborted if possible.
  1377. */
  1378. int
  1379. xfs_itruncate_extents(
  1380. struct xfs_trans **tpp,
  1381. struct xfs_inode *ip,
  1382. int whichfork,
  1383. xfs_fsize_t new_size)
  1384. {
  1385. struct xfs_mount *mp = ip->i_mount;
  1386. struct xfs_trans *tp = *tpp;
  1387. struct xfs_defer_ops dfops;
  1388. xfs_fsblock_t first_block;
  1389. xfs_fileoff_t first_unmap_block;
  1390. xfs_fileoff_t last_block;
  1391. xfs_filblks_t unmap_len;
  1392. int error = 0;
  1393. int done = 0;
  1394. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1395. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  1396. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1397. ASSERT(new_size <= XFS_ISIZE(ip));
  1398. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  1399. ASSERT(ip->i_itemp != NULL);
  1400. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  1401. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  1402. trace_xfs_itruncate_extents_start(ip, new_size);
  1403. /*
  1404. * Since it is possible for space to become allocated beyond
  1405. * the end of the file (in a crash where the space is allocated
  1406. * but the inode size is not yet updated), simply remove any
  1407. * blocks which show up between the new EOF and the maximum
  1408. * possible file size. If the first block to be removed is
  1409. * beyond the maximum file size (ie it is the same as last_block),
  1410. * then there is nothing to do.
  1411. */
  1412. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  1413. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1414. if (first_unmap_block == last_block)
  1415. return 0;
  1416. ASSERT(first_unmap_block < last_block);
  1417. unmap_len = last_block - first_unmap_block + 1;
  1418. while (!done) {
  1419. xfs_defer_init(&dfops, &first_block);
  1420. error = xfs_bunmapi(tp, ip,
  1421. first_unmap_block, unmap_len,
  1422. xfs_bmapi_aflag(whichfork),
  1423. XFS_ITRUNC_MAX_EXTENTS,
  1424. &first_block, &dfops,
  1425. &done);
  1426. if (error)
  1427. goto out_bmap_cancel;
  1428. /*
  1429. * Duplicate the transaction that has the permanent
  1430. * reservation and commit the old transaction.
  1431. */
  1432. error = xfs_defer_finish(&tp, &dfops, ip);
  1433. if (error)
  1434. goto out_bmap_cancel;
  1435. error = xfs_trans_roll(&tp, ip);
  1436. if (error)
  1437. goto out;
  1438. }
  1439. /* Remove all pending CoW reservations. */
  1440. error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
  1441. last_block, true);
  1442. if (error)
  1443. goto out;
  1444. /*
  1445. * Clear the reflink flag if there are no data fork blocks and
  1446. * there are no extents staged in the cow fork.
  1447. */
  1448. if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
  1449. if (ip->i_d.di_nblocks == 0)
  1450. ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1451. xfs_inode_clear_cowblocks_tag(ip);
  1452. }
  1453. /*
  1454. * Always re-log the inode so that our permanent transaction can keep
  1455. * on rolling it forward in the log.
  1456. */
  1457. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1458. trace_xfs_itruncate_extents_end(ip, new_size);
  1459. out:
  1460. *tpp = tp;
  1461. return error;
  1462. out_bmap_cancel:
  1463. /*
  1464. * If the bunmapi call encounters an error, return to the caller where
  1465. * the transaction can be properly aborted. We just need to make sure
  1466. * we're not holding any resources that we were not when we came in.
  1467. */
  1468. xfs_defer_cancel(&dfops);
  1469. goto out;
  1470. }
  1471. int
  1472. xfs_release(
  1473. xfs_inode_t *ip)
  1474. {
  1475. xfs_mount_t *mp = ip->i_mount;
  1476. int error;
  1477. if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
  1478. return 0;
  1479. /* If this is a read-only mount, don't do this (would generate I/O) */
  1480. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1481. return 0;
  1482. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1483. int truncated;
  1484. /*
  1485. * If we previously truncated this file and removed old data
  1486. * in the process, we want to initiate "early" writeout on
  1487. * the last close. This is an attempt to combat the notorious
  1488. * NULL files problem which is particularly noticeable from a
  1489. * truncate down, buffered (re-)write (delalloc), followed by
  1490. * a crash. What we are effectively doing here is
  1491. * significantly reducing the time window where we'd otherwise
  1492. * be exposed to that problem.
  1493. */
  1494. truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
  1495. if (truncated) {
  1496. xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
  1497. if (ip->i_delayed_blks > 0) {
  1498. error = filemap_flush(VFS_I(ip)->i_mapping);
  1499. if (error)
  1500. return error;
  1501. }
  1502. }
  1503. }
  1504. if (VFS_I(ip)->i_nlink == 0)
  1505. return 0;
  1506. if (xfs_can_free_eofblocks(ip, false)) {
  1507. /*
  1508. * Check if the inode is being opened, written and closed
  1509. * frequently and we have delayed allocation blocks outstanding
  1510. * (e.g. streaming writes from the NFS server), truncating the
  1511. * blocks past EOF will cause fragmentation to occur.
  1512. *
  1513. * In this case don't do the truncation, but we have to be
  1514. * careful how we detect this case. Blocks beyond EOF show up as
  1515. * i_delayed_blks even when the inode is clean, so we need to
  1516. * truncate them away first before checking for a dirty release.
  1517. * Hence on the first dirty close we will still remove the
  1518. * speculative allocation, but after that we will leave it in
  1519. * place.
  1520. */
  1521. if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
  1522. return 0;
  1523. /*
  1524. * If we can't get the iolock just skip truncating the blocks
  1525. * past EOF because we could deadlock with the mmap_sem
  1526. * otherwise. We'll get another chance to drop them once the
  1527. * last reference to the inode is dropped, so we'll never leak
  1528. * blocks permanently.
  1529. */
  1530. if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
  1531. error = xfs_free_eofblocks(ip);
  1532. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  1533. if (error)
  1534. return error;
  1535. }
  1536. /* delalloc blocks after truncation means it really is dirty */
  1537. if (ip->i_delayed_blks)
  1538. xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
  1539. }
  1540. return 0;
  1541. }
  1542. /*
  1543. * xfs_inactive_truncate
  1544. *
  1545. * Called to perform a truncate when an inode becomes unlinked.
  1546. */
  1547. STATIC int
  1548. xfs_inactive_truncate(
  1549. struct xfs_inode *ip)
  1550. {
  1551. struct xfs_mount *mp = ip->i_mount;
  1552. struct xfs_trans *tp;
  1553. int error;
  1554. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
  1555. if (error) {
  1556. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1557. return error;
  1558. }
  1559. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1560. xfs_trans_ijoin(tp, ip, 0);
  1561. /*
  1562. * Log the inode size first to prevent stale data exposure in the event
  1563. * of a system crash before the truncate completes. See the related
  1564. * comment in xfs_vn_setattr_size() for details.
  1565. */
  1566. ip->i_d.di_size = 0;
  1567. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1568. error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
  1569. if (error)
  1570. goto error_trans_cancel;
  1571. ASSERT(ip->i_d.di_nextents == 0);
  1572. error = xfs_trans_commit(tp);
  1573. if (error)
  1574. goto error_unlock;
  1575. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1576. return 0;
  1577. error_trans_cancel:
  1578. xfs_trans_cancel(tp);
  1579. error_unlock:
  1580. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1581. return error;
  1582. }
  1583. /*
  1584. * xfs_inactive_ifree()
  1585. *
  1586. * Perform the inode free when an inode is unlinked.
  1587. */
  1588. STATIC int
  1589. xfs_inactive_ifree(
  1590. struct xfs_inode *ip)
  1591. {
  1592. struct xfs_defer_ops dfops;
  1593. xfs_fsblock_t first_block;
  1594. struct xfs_mount *mp = ip->i_mount;
  1595. struct xfs_trans *tp;
  1596. int error;
  1597. /*
  1598. * We try to use a per-AG reservation for any block needed by the finobt
  1599. * tree, but as the finobt feature predates the per-AG reservation
  1600. * support a degraded file system might not have enough space for the
  1601. * reservation at mount time. In that case try to dip into the reserved
  1602. * pool and pray.
  1603. *
  1604. * Send a warning if the reservation does happen to fail, as the inode
  1605. * now remains allocated and sits on the unlinked list until the fs is
  1606. * repaired.
  1607. */
  1608. if (unlikely(mp->m_inotbt_nores)) {
  1609. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
  1610. XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
  1611. &tp);
  1612. } else {
  1613. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
  1614. }
  1615. if (error) {
  1616. if (error == -ENOSPC) {
  1617. xfs_warn_ratelimited(mp,
  1618. "Failed to remove inode(s) from unlinked list. "
  1619. "Please free space, unmount and run xfs_repair.");
  1620. } else {
  1621. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1622. }
  1623. return error;
  1624. }
  1625. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1626. xfs_trans_ijoin(tp, ip, 0);
  1627. xfs_defer_init(&dfops, &first_block);
  1628. error = xfs_ifree(tp, ip, &dfops);
  1629. if (error) {
  1630. /*
  1631. * If we fail to free the inode, shut down. The cancel
  1632. * might do that, we need to make sure. Otherwise the
  1633. * inode might be lost for a long time or forever.
  1634. */
  1635. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1636. xfs_notice(mp, "%s: xfs_ifree returned error %d",
  1637. __func__, error);
  1638. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1639. }
  1640. xfs_trans_cancel(tp);
  1641. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1642. return error;
  1643. }
  1644. /*
  1645. * Credit the quota account(s). The inode is gone.
  1646. */
  1647. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
  1648. /*
  1649. * Just ignore errors at this point. There is nothing we can do except
  1650. * to try to keep going. Make sure it's not a silent error.
  1651. */
  1652. error = xfs_defer_finish(&tp, &dfops, NULL);
  1653. if (error) {
  1654. xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
  1655. __func__, error);
  1656. xfs_defer_cancel(&dfops);
  1657. }
  1658. error = xfs_trans_commit(tp);
  1659. if (error)
  1660. xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
  1661. __func__, error);
  1662. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1663. return 0;
  1664. }
  1665. /*
  1666. * xfs_inactive
  1667. *
  1668. * This is called when the vnode reference count for the vnode
  1669. * goes to zero. If the file has been unlinked, then it must
  1670. * now be truncated. Also, we clear all of the read-ahead state
  1671. * kept for the inode here since the file is now closed.
  1672. */
  1673. void
  1674. xfs_inactive(
  1675. xfs_inode_t *ip)
  1676. {
  1677. struct xfs_mount *mp;
  1678. int error;
  1679. int truncate = 0;
  1680. /*
  1681. * If the inode is already free, then there can be nothing
  1682. * to clean up here.
  1683. */
  1684. if (VFS_I(ip)->i_mode == 0) {
  1685. ASSERT(ip->i_df.if_real_bytes == 0);
  1686. ASSERT(ip->i_df.if_broot_bytes == 0);
  1687. return;
  1688. }
  1689. mp = ip->i_mount;
  1690. ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
  1691. /* If this is a read-only mount, don't do this (would generate I/O) */
  1692. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1693. return;
  1694. if (VFS_I(ip)->i_nlink != 0) {
  1695. /*
  1696. * force is true because we are evicting an inode from the
  1697. * cache. Post-eof blocks must be freed, lest we end up with
  1698. * broken free space accounting.
  1699. *
  1700. * Note: don't bother with iolock here since lockdep complains
  1701. * about acquiring it in reclaim context. We have the only
  1702. * reference to the inode at this point anyways.
  1703. */
  1704. if (xfs_can_free_eofblocks(ip, true))
  1705. xfs_free_eofblocks(ip);
  1706. return;
  1707. }
  1708. if (S_ISREG(VFS_I(ip)->i_mode) &&
  1709. (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
  1710. ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
  1711. truncate = 1;
  1712. error = xfs_qm_dqattach(ip, 0);
  1713. if (error)
  1714. return;
  1715. if (S_ISLNK(VFS_I(ip)->i_mode))
  1716. error = xfs_inactive_symlink(ip);
  1717. else if (truncate)
  1718. error = xfs_inactive_truncate(ip);
  1719. if (error)
  1720. return;
  1721. /*
  1722. * If there are attributes associated with the file then blow them away
  1723. * now. The code calls a routine that recursively deconstructs the
  1724. * attribute fork. If also blows away the in-core attribute fork.
  1725. */
  1726. if (XFS_IFORK_Q(ip)) {
  1727. error = xfs_attr_inactive(ip);
  1728. if (error)
  1729. return;
  1730. }
  1731. ASSERT(!ip->i_afp);
  1732. ASSERT(ip->i_d.di_anextents == 0);
  1733. ASSERT(ip->i_d.di_forkoff == 0);
  1734. /*
  1735. * Free the inode.
  1736. */
  1737. error = xfs_inactive_ifree(ip);
  1738. if (error)
  1739. return;
  1740. /*
  1741. * Release the dquots held by inode, if any.
  1742. */
  1743. xfs_qm_dqdetach(ip);
  1744. }
  1745. /*
  1746. * This is called when the inode's link count goes to 0 or we are creating a
  1747. * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
  1748. * set to true as the link count is dropped to zero by the VFS after we've
  1749. * created the file successfully, so we have to add it to the unlinked list
  1750. * while the link count is non-zero.
  1751. *
  1752. * We place the on-disk inode on a list in the AGI. It will be pulled from this
  1753. * list when the inode is freed.
  1754. */
  1755. STATIC int
  1756. xfs_iunlink(
  1757. struct xfs_trans *tp,
  1758. struct xfs_inode *ip)
  1759. {
  1760. xfs_mount_t *mp = tp->t_mountp;
  1761. xfs_agi_t *agi;
  1762. xfs_dinode_t *dip;
  1763. xfs_buf_t *agibp;
  1764. xfs_buf_t *ibp;
  1765. xfs_agino_t agino;
  1766. short bucket_index;
  1767. int offset;
  1768. int error;
  1769. ASSERT(VFS_I(ip)->i_mode != 0);
  1770. /*
  1771. * Get the agi buffer first. It ensures lock ordering
  1772. * on the list.
  1773. */
  1774. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  1775. if (error)
  1776. return error;
  1777. agi = XFS_BUF_TO_AGI(agibp);
  1778. /*
  1779. * Get the index into the agi hash table for the
  1780. * list this inode will go on.
  1781. */
  1782. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1783. ASSERT(agino != 0);
  1784. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1785. ASSERT(agi->agi_unlinked[bucket_index]);
  1786. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  1787. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  1788. /*
  1789. * There is already another inode in the bucket we need
  1790. * to add ourselves to. Add us at the front of the list.
  1791. * Here we put the head pointer into our next pointer,
  1792. * and then we fall through to point the head at us.
  1793. */
  1794. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1795. 0, 0);
  1796. if (error)
  1797. return error;
  1798. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  1799. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  1800. offset = ip->i_imap.im_boffset +
  1801. offsetof(xfs_dinode_t, di_next_unlinked);
  1802. /* need to recalc the inode CRC if appropriate */
  1803. xfs_dinode_calc_crc(mp, dip);
  1804. xfs_trans_inode_buf(tp, ibp);
  1805. xfs_trans_log_buf(tp, ibp, offset,
  1806. (offset + sizeof(xfs_agino_t) - 1));
  1807. xfs_inobp_check(mp, ibp);
  1808. }
  1809. /*
  1810. * Point the bucket head pointer at the inode being inserted.
  1811. */
  1812. ASSERT(agino != 0);
  1813. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  1814. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1815. (sizeof(xfs_agino_t) * bucket_index);
  1816. xfs_trans_log_buf(tp, agibp, offset,
  1817. (offset + sizeof(xfs_agino_t) - 1));
  1818. return 0;
  1819. }
  1820. /*
  1821. * Pull the on-disk inode from the AGI unlinked list.
  1822. */
  1823. STATIC int
  1824. xfs_iunlink_remove(
  1825. xfs_trans_t *tp,
  1826. xfs_inode_t *ip)
  1827. {
  1828. xfs_ino_t next_ino;
  1829. xfs_mount_t *mp;
  1830. xfs_agi_t *agi;
  1831. xfs_dinode_t *dip;
  1832. xfs_buf_t *agibp;
  1833. xfs_buf_t *ibp;
  1834. xfs_agnumber_t agno;
  1835. xfs_agino_t agino;
  1836. xfs_agino_t next_agino;
  1837. xfs_buf_t *last_ibp;
  1838. xfs_dinode_t *last_dip = NULL;
  1839. short bucket_index;
  1840. int offset, last_offset = 0;
  1841. int error;
  1842. mp = tp->t_mountp;
  1843. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  1844. /*
  1845. * Get the agi buffer first. It ensures lock ordering
  1846. * on the list.
  1847. */
  1848. error = xfs_read_agi(mp, tp, agno, &agibp);
  1849. if (error)
  1850. return error;
  1851. agi = XFS_BUF_TO_AGI(agibp);
  1852. /*
  1853. * Get the index into the agi hash table for the
  1854. * list this inode will go on.
  1855. */
  1856. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1857. ASSERT(agino != 0);
  1858. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1859. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  1860. ASSERT(agi->agi_unlinked[bucket_index]);
  1861. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  1862. /*
  1863. * We're at the head of the list. Get the inode's on-disk
  1864. * buffer to see if there is anyone after us on the list.
  1865. * Only modify our next pointer if it is not already NULLAGINO.
  1866. * This saves us the overhead of dealing with the buffer when
  1867. * there is no need to change it.
  1868. */
  1869. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1870. 0, 0);
  1871. if (error) {
  1872. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  1873. __func__, error);
  1874. return error;
  1875. }
  1876. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1877. ASSERT(next_agino != 0);
  1878. if (next_agino != NULLAGINO) {
  1879. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1880. offset = ip->i_imap.im_boffset +
  1881. offsetof(xfs_dinode_t, di_next_unlinked);
  1882. /* need to recalc the inode CRC if appropriate */
  1883. xfs_dinode_calc_crc(mp, dip);
  1884. xfs_trans_inode_buf(tp, ibp);
  1885. xfs_trans_log_buf(tp, ibp, offset,
  1886. (offset + sizeof(xfs_agino_t) - 1));
  1887. xfs_inobp_check(mp, ibp);
  1888. } else {
  1889. xfs_trans_brelse(tp, ibp);
  1890. }
  1891. /*
  1892. * Point the bucket head pointer at the next inode.
  1893. */
  1894. ASSERT(next_agino != 0);
  1895. ASSERT(next_agino != agino);
  1896. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  1897. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1898. (sizeof(xfs_agino_t) * bucket_index);
  1899. xfs_trans_log_buf(tp, agibp, offset,
  1900. (offset + sizeof(xfs_agino_t) - 1));
  1901. } else {
  1902. /*
  1903. * We need to search the list for the inode being freed.
  1904. */
  1905. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  1906. last_ibp = NULL;
  1907. while (next_agino != agino) {
  1908. struct xfs_imap imap;
  1909. if (last_ibp)
  1910. xfs_trans_brelse(tp, last_ibp);
  1911. imap.im_blkno = 0;
  1912. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  1913. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  1914. if (error) {
  1915. xfs_warn(mp,
  1916. "%s: xfs_imap returned error %d.",
  1917. __func__, error);
  1918. return error;
  1919. }
  1920. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  1921. &last_ibp, 0, 0);
  1922. if (error) {
  1923. xfs_warn(mp,
  1924. "%s: xfs_imap_to_bp returned error %d.",
  1925. __func__, error);
  1926. return error;
  1927. }
  1928. last_offset = imap.im_boffset;
  1929. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  1930. ASSERT(next_agino != NULLAGINO);
  1931. ASSERT(next_agino != 0);
  1932. }
  1933. /*
  1934. * Now last_ibp points to the buffer previous to us on the
  1935. * unlinked list. Pull us from the list.
  1936. */
  1937. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1938. 0, 0);
  1939. if (error) {
  1940. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  1941. __func__, error);
  1942. return error;
  1943. }
  1944. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1945. ASSERT(next_agino != 0);
  1946. ASSERT(next_agino != agino);
  1947. if (next_agino != NULLAGINO) {
  1948. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1949. offset = ip->i_imap.im_boffset +
  1950. offsetof(xfs_dinode_t, di_next_unlinked);
  1951. /* need to recalc the inode CRC if appropriate */
  1952. xfs_dinode_calc_crc(mp, dip);
  1953. xfs_trans_inode_buf(tp, ibp);
  1954. xfs_trans_log_buf(tp, ibp, offset,
  1955. (offset + sizeof(xfs_agino_t) - 1));
  1956. xfs_inobp_check(mp, ibp);
  1957. } else {
  1958. xfs_trans_brelse(tp, ibp);
  1959. }
  1960. /*
  1961. * Point the previous inode on the list to the next inode.
  1962. */
  1963. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  1964. ASSERT(next_agino != 0);
  1965. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  1966. /* need to recalc the inode CRC if appropriate */
  1967. xfs_dinode_calc_crc(mp, last_dip);
  1968. xfs_trans_inode_buf(tp, last_ibp);
  1969. xfs_trans_log_buf(tp, last_ibp, offset,
  1970. (offset + sizeof(xfs_agino_t) - 1));
  1971. xfs_inobp_check(mp, last_ibp);
  1972. }
  1973. return 0;
  1974. }
  1975. /*
  1976. * A big issue when freeing the inode cluster is that we _cannot_ skip any
  1977. * inodes that are in memory - they all must be marked stale and attached to
  1978. * the cluster buffer.
  1979. */
  1980. STATIC int
  1981. xfs_ifree_cluster(
  1982. xfs_inode_t *free_ip,
  1983. xfs_trans_t *tp,
  1984. struct xfs_icluster *xic)
  1985. {
  1986. xfs_mount_t *mp = free_ip->i_mount;
  1987. int blks_per_cluster;
  1988. int inodes_per_cluster;
  1989. int nbufs;
  1990. int i, j;
  1991. int ioffset;
  1992. xfs_daddr_t blkno;
  1993. xfs_buf_t *bp;
  1994. xfs_inode_t *ip;
  1995. xfs_inode_log_item_t *iip;
  1996. xfs_log_item_t *lip;
  1997. struct xfs_perag *pag;
  1998. xfs_ino_t inum;
  1999. inum = xic->first_ino;
  2000. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  2001. blks_per_cluster = xfs_icluster_size_fsb(mp);
  2002. inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
  2003. nbufs = mp->m_ialloc_blks / blks_per_cluster;
  2004. for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
  2005. /*
  2006. * The allocation bitmap tells us which inodes of the chunk were
  2007. * physically allocated. Skip the cluster if an inode falls into
  2008. * a sparse region.
  2009. */
  2010. ioffset = inum - xic->first_ino;
  2011. if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
  2012. ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
  2013. continue;
  2014. }
  2015. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  2016. XFS_INO_TO_AGBNO(mp, inum));
  2017. /*
  2018. * We obtain and lock the backing buffer first in the process
  2019. * here, as we have to ensure that any dirty inode that we
  2020. * can't get the flush lock on is attached to the buffer.
  2021. * If we scan the in-memory inodes first, then buffer IO can
  2022. * complete before we get a lock on it, and hence we may fail
  2023. * to mark all the active inodes on the buffer stale.
  2024. */
  2025. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  2026. mp->m_bsize * blks_per_cluster,
  2027. XBF_UNMAPPED);
  2028. if (!bp)
  2029. return -ENOMEM;
  2030. /*
  2031. * This buffer may not have been correctly initialised as we
  2032. * didn't read it from disk. That's not important because we are
  2033. * only using to mark the buffer as stale in the log, and to
  2034. * attach stale cached inodes on it. That means it will never be
  2035. * dispatched for IO. If it is, we want to know about it, and we
  2036. * want it to fail. We can acheive this by adding a write
  2037. * verifier to the buffer.
  2038. */
  2039. bp->b_ops = &xfs_inode_buf_ops;
  2040. /*
  2041. * Walk the inodes already attached to the buffer and mark them
  2042. * stale. These will all have the flush locks held, so an
  2043. * in-memory inode walk can't lock them. By marking them all
  2044. * stale first, we will not attempt to lock them in the loop
  2045. * below as the XFS_ISTALE flag will be set.
  2046. */
  2047. lip = bp->b_fspriv;
  2048. while (lip) {
  2049. if (lip->li_type == XFS_LI_INODE) {
  2050. iip = (xfs_inode_log_item_t *)lip;
  2051. ASSERT(iip->ili_logged == 1);
  2052. lip->li_cb = xfs_istale_done;
  2053. xfs_trans_ail_copy_lsn(mp->m_ail,
  2054. &iip->ili_flush_lsn,
  2055. &iip->ili_item.li_lsn);
  2056. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  2057. }
  2058. lip = lip->li_bio_list;
  2059. }
  2060. /*
  2061. * For each inode in memory attempt to add it to the inode
  2062. * buffer and set it up for being staled on buffer IO
  2063. * completion. This is safe as we've locked out tail pushing
  2064. * and flushing by locking the buffer.
  2065. *
  2066. * We have already marked every inode that was part of a
  2067. * transaction stale above, which means there is no point in
  2068. * even trying to lock them.
  2069. */
  2070. for (i = 0; i < inodes_per_cluster; i++) {
  2071. retry:
  2072. rcu_read_lock();
  2073. ip = radix_tree_lookup(&pag->pag_ici_root,
  2074. XFS_INO_TO_AGINO(mp, (inum + i)));
  2075. /* Inode not in memory, nothing to do */
  2076. if (!ip) {
  2077. rcu_read_unlock();
  2078. continue;
  2079. }
  2080. /*
  2081. * because this is an RCU protected lookup, we could
  2082. * find a recently freed or even reallocated inode
  2083. * during the lookup. We need to check under the
  2084. * i_flags_lock for a valid inode here. Skip it if it
  2085. * is not valid, the wrong inode or stale.
  2086. */
  2087. spin_lock(&ip->i_flags_lock);
  2088. if (ip->i_ino != inum + i ||
  2089. __xfs_iflags_test(ip, XFS_ISTALE)) {
  2090. spin_unlock(&ip->i_flags_lock);
  2091. rcu_read_unlock();
  2092. continue;
  2093. }
  2094. spin_unlock(&ip->i_flags_lock);
  2095. /*
  2096. * Don't try to lock/unlock the current inode, but we
  2097. * _cannot_ skip the other inodes that we did not find
  2098. * in the list attached to the buffer and are not
  2099. * already marked stale. If we can't lock it, back off
  2100. * and retry.
  2101. */
  2102. if (ip != free_ip) {
  2103. if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  2104. rcu_read_unlock();
  2105. delay(1);
  2106. goto retry;
  2107. }
  2108. /*
  2109. * Check the inode number again in case we're
  2110. * racing with freeing in xfs_reclaim_inode().
  2111. * See the comments in that function for more
  2112. * information as to why the initial check is
  2113. * not sufficient.
  2114. */
  2115. if (ip->i_ino != inum + i) {
  2116. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2117. rcu_read_unlock();
  2118. continue;
  2119. }
  2120. }
  2121. rcu_read_unlock();
  2122. xfs_iflock(ip);
  2123. xfs_iflags_set(ip, XFS_ISTALE);
  2124. /*
  2125. * we don't need to attach clean inodes or those only
  2126. * with unlogged changes (which we throw away, anyway).
  2127. */
  2128. iip = ip->i_itemp;
  2129. if (!iip || xfs_inode_clean(ip)) {
  2130. ASSERT(ip != free_ip);
  2131. xfs_ifunlock(ip);
  2132. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2133. continue;
  2134. }
  2135. iip->ili_last_fields = iip->ili_fields;
  2136. iip->ili_fields = 0;
  2137. iip->ili_fsync_fields = 0;
  2138. iip->ili_logged = 1;
  2139. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  2140. &iip->ili_item.li_lsn);
  2141. xfs_buf_attach_iodone(bp, xfs_istale_done,
  2142. &iip->ili_item);
  2143. if (ip != free_ip)
  2144. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2145. }
  2146. xfs_trans_stale_inode_buf(tp, bp);
  2147. xfs_trans_binval(tp, bp);
  2148. }
  2149. xfs_perag_put(pag);
  2150. return 0;
  2151. }
  2152. /*
  2153. * Free any local-format buffers sitting around before we reset to
  2154. * extents format.
  2155. */
  2156. static inline void
  2157. xfs_ifree_local_data(
  2158. struct xfs_inode *ip,
  2159. int whichfork)
  2160. {
  2161. struct xfs_ifork *ifp;
  2162. if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
  2163. return;
  2164. ifp = XFS_IFORK_PTR(ip, whichfork);
  2165. xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
  2166. }
  2167. /*
  2168. * This is called to return an inode to the inode free list.
  2169. * The inode should already be truncated to 0 length and have
  2170. * no pages associated with it. This routine also assumes that
  2171. * the inode is already a part of the transaction.
  2172. *
  2173. * The on-disk copy of the inode will have been added to the list
  2174. * of unlinked inodes in the AGI. We need to remove the inode from
  2175. * that list atomically with respect to freeing it here.
  2176. */
  2177. int
  2178. xfs_ifree(
  2179. xfs_trans_t *tp,
  2180. xfs_inode_t *ip,
  2181. struct xfs_defer_ops *dfops)
  2182. {
  2183. int error;
  2184. struct xfs_icluster xic = { 0 };
  2185. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  2186. ASSERT(VFS_I(ip)->i_nlink == 0);
  2187. ASSERT(ip->i_d.di_nextents == 0);
  2188. ASSERT(ip->i_d.di_anextents == 0);
  2189. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
  2190. ASSERT(ip->i_d.di_nblocks == 0);
  2191. /*
  2192. * Pull the on-disk inode from the AGI unlinked list.
  2193. */
  2194. error = xfs_iunlink_remove(tp, ip);
  2195. if (error)
  2196. return error;
  2197. error = xfs_difree(tp, ip->i_ino, dfops, &xic);
  2198. if (error)
  2199. return error;
  2200. xfs_ifree_local_data(ip, XFS_DATA_FORK);
  2201. xfs_ifree_local_data(ip, XFS_ATTR_FORK);
  2202. VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
  2203. ip->i_d.di_flags = 0;
  2204. ip->i_d.di_dmevmask = 0;
  2205. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  2206. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  2207. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  2208. /*
  2209. * Bump the generation count so no one will be confused
  2210. * by reincarnations of this inode.
  2211. */
  2212. VFS_I(ip)->i_generation++;
  2213. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  2214. if (xic.deleted)
  2215. error = xfs_ifree_cluster(ip, tp, &xic);
  2216. return error;
  2217. }
  2218. /*
  2219. * This is called to unpin an inode. The caller must have the inode locked
  2220. * in at least shared mode so that the buffer cannot be subsequently pinned
  2221. * once someone is waiting for it to be unpinned.
  2222. */
  2223. static void
  2224. xfs_iunpin(
  2225. struct xfs_inode *ip)
  2226. {
  2227. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2228. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  2229. /* Give the log a push to start the unpinning I/O */
  2230. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  2231. }
  2232. static void
  2233. __xfs_iunpin_wait(
  2234. struct xfs_inode *ip)
  2235. {
  2236. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  2237. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  2238. xfs_iunpin(ip);
  2239. do {
  2240. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  2241. if (xfs_ipincount(ip))
  2242. io_schedule();
  2243. } while (xfs_ipincount(ip));
  2244. finish_wait(wq, &wait.wait);
  2245. }
  2246. void
  2247. xfs_iunpin_wait(
  2248. struct xfs_inode *ip)
  2249. {
  2250. if (xfs_ipincount(ip))
  2251. __xfs_iunpin_wait(ip);
  2252. }
  2253. /*
  2254. * Removing an inode from the namespace involves removing the directory entry
  2255. * and dropping the link count on the inode. Removing the directory entry can
  2256. * result in locking an AGF (directory blocks were freed) and removing a link
  2257. * count can result in placing the inode on an unlinked list which results in
  2258. * locking an AGI.
  2259. *
  2260. * The big problem here is that we have an ordering constraint on AGF and AGI
  2261. * locking - inode allocation locks the AGI, then can allocate a new extent for
  2262. * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
  2263. * removes the inode from the unlinked list, requiring that we lock the AGI
  2264. * first, and then freeing the inode can result in an inode chunk being freed
  2265. * and hence freeing disk space requiring that we lock an AGF.
  2266. *
  2267. * Hence the ordering that is imposed by other parts of the code is AGI before
  2268. * AGF. This means we cannot remove the directory entry before we drop the inode
  2269. * reference count and put it on the unlinked list as this results in a lock
  2270. * order of AGF then AGI, and this can deadlock against inode allocation and
  2271. * freeing. Therefore we must drop the link counts before we remove the
  2272. * directory entry.
  2273. *
  2274. * This is still safe from a transactional point of view - it is not until we
  2275. * get to xfs_defer_finish() that we have the possibility of multiple
  2276. * transactions in this operation. Hence as long as we remove the directory
  2277. * entry and drop the link count in the first transaction of the remove
  2278. * operation, there are no transactional constraints on the ordering here.
  2279. */
  2280. int
  2281. xfs_remove(
  2282. xfs_inode_t *dp,
  2283. struct xfs_name *name,
  2284. xfs_inode_t *ip)
  2285. {
  2286. xfs_mount_t *mp = dp->i_mount;
  2287. xfs_trans_t *tp = NULL;
  2288. int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
  2289. int error = 0;
  2290. struct xfs_defer_ops dfops;
  2291. xfs_fsblock_t first_block;
  2292. uint resblks;
  2293. trace_xfs_remove(dp, name);
  2294. if (XFS_FORCED_SHUTDOWN(mp))
  2295. return -EIO;
  2296. error = xfs_qm_dqattach(dp, 0);
  2297. if (error)
  2298. goto std_return;
  2299. error = xfs_qm_dqattach(ip, 0);
  2300. if (error)
  2301. goto std_return;
  2302. /*
  2303. * We try to get the real space reservation first,
  2304. * allowing for directory btree deletion(s) implying
  2305. * possible bmap insert(s). If we can't get the space
  2306. * reservation then we use 0 instead, and avoid the bmap
  2307. * btree insert(s) in the directory code by, if the bmap
  2308. * insert tries to happen, instead trimming the LAST
  2309. * block from the directory.
  2310. */
  2311. resblks = XFS_REMOVE_SPACE_RES(mp);
  2312. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
  2313. if (error == -ENOSPC) {
  2314. resblks = 0;
  2315. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
  2316. &tp);
  2317. }
  2318. if (error) {
  2319. ASSERT(error != -ENOSPC);
  2320. goto std_return;
  2321. }
  2322. xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2323. xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
  2324. xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2325. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  2326. /*
  2327. * If we're removing a directory perform some additional validation.
  2328. */
  2329. if (is_dir) {
  2330. ASSERT(VFS_I(ip)->i_nlink >= 2);
  2331. if (VFS_I(ip)->i_nlink != 2) {
  2332. error = -ENOTEMPTY;
  2333. goto out_trans_cancel;
  2334. }
  2335. if (!xfs_dir_isempty(ip)) {
  2336. error = -ENOTEMPTY;
  2337. goto out_trans_cancel;
  2338. }
  2339. /* Drop the link from ip's "..". */
  2340. error = xfs_droplink(tp, dp);
  2341. if (error)
  2342. goto out_trans_cancel;
  2343. /* Drop the "." link from ip to self. */
  2344. error = xfs_droplink(tp, ip);
  2345. if (error)
  2346. goto out_trans_cancel;
  2347. } else {
  2348. /*
  2349. * When removing a non-directory we need to log the parent
  2350. * inode here. For a directory this is done implicitly
  2351. * by the xfs_droplink call for the ".." entry.
  2352. */
  2353. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  2354. }
  2355. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2356. /* Drop the link from dp to ip. */
  2357. error = xfs_droplink(tp, ip);
  2358. if (error)
  2359. goto out_trans_cancel;
  2360. xfs_defer_init(&dfops, &first_block);
  2361. error = xfs_dir_removename(tp, dp, name, ip->i_ino,
  2362. &first_block, &dfops, resblks);
  2363. if (error) {
  2364. ASSERT(error != -ENOENT);
  2365. goto out_bmap_cancel;
  2366. }
  2367. /*
  2368. * If this is a synchronous mount, make sure that the
  2369. * remove transaction goes to disk before returning to
  2370. * the user.
  2371. */
  2372. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2373. xfs_trans_set_sync(tp);
  2374. error = xfs_defer_finish(&tp, &dfops, NULL);
  2375. if (error)
  2376. goto out_bmap_cancel;
  2377. error = xfs_trans_commit(tp);
  2378. if (error)
  2379. goto std_return;
  2380. if (is_dir && xfs_inode_is_filestream(ip))
  2381. xfs_filestream_deassociate(ip);
  2382. return 0;
  2383. out_bmap_cancel:
  2384. xfs_defer_cancel(&dfops);
  2385. out_trans_cancel:
  2386. xfs_trans_cancel(tp);
  2387. std_return:
  2388. return error;
  2389. }
  2390. /*
  2391. * Enter all inodes for a rename transaction into a sorted array.
  2392. */
  2393. #define __XFS_SORT_INODES 5
  2394. STATIC void
  2395. xfs_sort_for_rename(
  2396. struct xfs_inode *dp1, /* in: old (source) directory inode */
  2397. struct xfs_inode *dp2, /* in: new (target) directory inode */
  2398. struct xfs_inode *ip1, /* in: inode of old entry */
  2399. struct xfs_inode *ip2, /* in: inode of new entry */
  2400. struct xfs_inode *wip, /* in: whiteout inode */
  2401. struct xfs_inode **i_tab,/* out: sorted array of inodes */
  2402. int *num_inodes) /* in/out: inodes in array */
  2403. {
  2404. int i, j;
  2405. ASSERT(*num_inodes == __XFS_SORT_INODES);
  2406. memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
  2407. /*
  2408. * i_tab contains a list of pointers to inodes. We initialize
  2409. * the table here & we'll sort it. We will then use it to
  2410. * order the acquisition of the inode locks.
  2411. *
  2412. * Note that the table may contain duplicates. e.g., dp1 == dp2.
  2413. */
  2414. i = 0;
  2415. i_tab[i++] = dp1;
  2416. i_tab[i++] = dp2;
  2417. i_tab[i++] = ip1;
  2418. if (ip2)
  2419. i_tab[i++] = ip2;
  2420. if (wip)
  2421. i_tab[i++] = wip;
  2422. *num_inodes = i;
  2423. /*
  2424. * Sort the elements via bubble sort. (Remember, there are at
  2425. * most 5 elements to sort, so this is adequate.)
  2426. */
  2427. for (i = 0; i < *num_inodes; i++) {
  2428. for (j = 1; j < *num_inodes; j++) {
  2429. if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
  2430. struct xfs_inode *temp = i_tab[j];
  2431. i_tab[j] = i_tab[j-1];
  2432. i_tab[j-1] = temp;
  2433. }
  2434. }
  2435. }
  2436. }
  2437. static int
  2438. xfs_finish_rename(
  2439. struct xfs_trans *tp,
  2440. struct xfs_defer_ops *dfops)
  2441. {
  2442. int error;
  2443. /*
  2444. * If this is a synchronous mount, make sure that the rename transaction
  2445. * goes to disk before returning to the user.
  2446. */
  2447. if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2448. xfs_trans_set_sync(tp);
  2449. error = xfs_defer_finish(&tp, dfops, NULL);
  2450. if (error) {
  2451. xfs_defer_cancel(dfops);
  2452. xfs_trans_cancel(tp);
  2453. return error;
  2454. }
  2455. return xfs_trans_commit(tp);
  2456. }
  2457. /*
  2458. * xfs_cross_rename()
  2459. *
  2460. * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
  2461. */
  2462. STATIC int
  2463. xfs_cross_rename(
  2464. struct xfs_trans *tp,
  2465. struct xfs_inode *dp1,
  2466. struct xfs_name *name1,
  2467. struct xfs_inode *ip1,
  2468. struct xfs_inode *dp2,
  2469. struct xfs_name *name2,
  2470. struct xfs_inode *ip2,
  2471. struct xfs_defer_ops *dfops,
  2472. xfs_fsblock_t *first_block,
  2473. int spaceres)
  2474. {
  2475. int error = 0;
  2476. int ip1_flags = 0;
  2477. int ip2_flags = 0;
  2478. int dp2_flags = 0;
  2479. /* Swap inode number for dirent in first parent */
  2480. error = xfs_dir_replace(tp, dp1, name1,
  2481. ip2->i_ino,
  2482. first_block, dfops, spaceres);
  2483. if (error)
  2484. goto out_trans_abort;
  2485. /* Swap inode number for dirent in second parent */
  2486. error = xfs_dir_replace(tp, dp2, name2,
  2487. ip1->i_ino,
  2488. first_block, dfops, spaceres);
  2489. if (error)
  2490. goto out_trans_abort;
  2491. /*
  2492. * If we're renaming one or more directories across different parents,
  2493. * update the respective ".." entries (and link counts) to match the new
  2494. * parents.
  2495. */
  2496. if (dp1 != dp2) {
  2497. dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2498. if (S_ISDIR(VFS_I(ip2)->i_mode)) {
  2499. error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
  2500. dp1->i_ino, first_block,
  2501. dfops, spaceres);
  2502. if (error)
  2503. goto out_trans_abort;
  2504. /* transfer ip2 ".." reference to dp1 */
  2505. if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
  2506. error = xfs_droplink(tp, dp2);
  2507. if (error)
  2508. goto out_trans_abort;
  2509. error = xfs_bumplink(tp, dp1);
  2510. if (error)
  2511. goto out_trans_abort;
  2512. }
  2513. /*
  2514. * Although ip1 isn't changed here, userspace needs
  2515. * to be warned about the change, so that applications
  2516. * relying on it (like backup ones), will properly
  2517. * notify the change
  2518. */
  2519. ip1_flags |= XFS_ICHGTIME_CHG;
  2520. ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2521. }
  2522. if (S_ISDIR(VFS_I(ip1)->i_mode)) {
  2523. error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
  2524. dp2->i_ino, first_block,
  2525. dfops, spaceres);
  2526. if (error)
  2527. goto out_trans_abort;
  2528. /* transfer ip1 ".." reference to dp2 */
  2529. if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
  2530. error = xfs_droplink(tp, dp1);
  2531. if (error)
  2532. goto out_trans_abort;
  2533. error = xfs_bumplink(tp, dp2);
  2534. if (error)
  2535. goto out_trans_abort;
  2536. }
  2537. /*
  2538. * Although ip2 isn't changed here, userspace needs
  2539. * to be warned about the change, so that applications
  2540. * relying on it (like backup ones), will properly
  2541. * notify the change
  2542. */
  2543. ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
  2544. ip2_flags |= XFS_ICHGTIME_CHG;
  2545. }
  2546. }
  2547. if (ip1_flags) {
  2548. xfs_trans_ichgtime(tp, ip1, ip1_flags);
  2549. xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
  2550. }
  2551. if (ip2_flags) {
  2552. xfs_trans_ichgtime(tp, ip2, ip2_flags);
  2553. xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
  2554. }
  2555. if (dp2_flags) {
  2556. xfs_trans_ichgtime(tp, dp2, dp2_flags);
  2557. xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
  2558. }
  2559. xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2560. xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
  2561. return xfs_finish_rename(tp, dfops);
  2562. out_trans_abort:
  2563. xfs_defer_cancel(dfops);
  2564. xfs_trans_cancel(tp);
  2565. return error;
  2566. }
  2567. /*
  2568. * xfs_rename_alloc_whiteout()
  2569. *
  2570. * Return a referenced, unlinked, unlocked inode that that can be used as a
  2571. * whiteout in a rename transaction. We use a tmpfile inode here so that if we
  2572. * crash between allocating the inode and linking it into the rename transaction
  2573. * recovery will free the inode and we won't leak it.
  2574. */
  2575. static int
  2576. xfs_rename_alloc_whiteout(
  2577. struct xfs_inode *dp,
  2578. struct xfs_inode **wip)
  2579. {
  2580. struct xfs_inode *tmpfile;
  2581. int error;
  2582. error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
  2583. if (error)
  2584. return error;
  2585. /*
  2586. * Prepare the tmpfile inode as if it were created through the VFS.
  2587. * Otherwise, the link increment paths will complain about nlink 0->1.
  2588. * Drop the link count as done by d_tmpfile(), complete the inode setup
  2589. * and flag it as linkable.
  2590. */
  2591. drop_nlink(VFS_I(tmpfile));
  2592. xfs_setup_iops(tmpfile);
  2593. xfs_finish_inode_setup(tmpfile);
  2594. VFS_I(tmpfile)->i_state |= I_LINKABLE;
  2595. *wip = tmpfile;
  2596. return 0;
  2597. }
  2598. /*
  2599. * xfs_rename
  2600. */
  2601. int
  2602. xfs_rename(
  2603. struct xfs_inode *src_dp,
  2604. struct xfs_name *src_name,
  2605. struct xfs_inode *src_ip,
  2606. struct xfs_inode *target_dp,
  2607. struct xfs_name *target_name,
  2608. struct xfs_inode *target_ip,
  2609. unsigned int flags)
  2610. {
  2611. struct xfs_mount *mp = src_dp->i_mount;
  2612. struct xfs_trans *tp;
  2613. struct xfs_defer_ops dfops;
  2614. xfs_fsblock_t first_block;
  2615. struct xfs_inode *wip = NULL; /* whiteout inode */
  2616. struct xfs_inode *inodes[__XFS_SORT_INODES];
  2617. int num_inodes = __XFS_SORT_INODES;
  2618. bool new_parent = (src_dp != target_dp);
  2619. bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
  2620. int spaceres;
  2621. int error;
  2622. trace_xfs_rename(src_dp, target_dp, src_name, target_name);
  2623. if ((flags & RENAME_EXCHANGE) && !target_ip)
  2624. return -EINVAL;
  2625. /*
  2626. * If we are doing a whiteout operation, allocate the whiteout inode
  2627. * we will be placing at the target and ensure the type is set
  2628. * appropriately.
  2629. */
  2630. if (flags & RENAME_WHITEOUT) {
  2631. ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
  2632. error = xfs_rename_alloc_whiteout(target_dp, &wip);
  2633. if (error)
  2634. return error;
  2635. /* setup target dirent info as whiteout */
  2636. src_name->type = XFS_DIR3_FT_CHRDEV;
  2637. }
  2638. xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
  2639. inodes, &num_inodes);
  2640. spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
  2641. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
  2642. if (error == -ENOSPC) {
  2643. spaceres = 0;
  2644. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
  2645. &tp);
  2646. }
  2647. if (error)
  2648. goto out_release_wip;
  2649. /*
  2650. * Attach the dquots to the inodes
  2651. */
  2652. error = xfs_qm_vop_rename_dqattach(inodes);
  2653. if (error)
  2654. goto out_trans_cancel;
  2655. /*
  2656. * Lock all the participating inodes. Depending upon whether
  2657. * the target_name exists in the target directory, and
  2658. * whether the target directory is the same as the source
  2659. * directory, we can lock from 2 to 4 inodes.
  2660. */
  2661. if (!new_parent)
  2662. xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2663. else
  2664. xfs_lock_two_inodes(src_dp, target_dp,
  2665. XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
  2666. xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
  2667. /*
  2668. * Join all the inodes to the transaction. From this point on,
  2669. * we can rely on either trans_commit or trans_cancel to unlock
  2670. * them.
  2671. */
  2672. xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2673. if (new_parent)
  2674. xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
  2675. xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
  2676. if (target_ip)
  2677. xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
  2678. if (wip)
  2679. xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
  2680. /*
  2681. * If we are using project inheritance, we only allow renames
  2682. * into our tree when the project IDs are the same; else the
  2683. * tree quota mechanism would be circumvented.
  2684. */
  2685. if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  2686. (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
  2687. error = -EXDEV;
  2688. goto out_trans_cancel;
  2689. }
  2690. xfs_defer_init(&dfops, &first_block);
  2691. /* RENAME_EXCHANGE is unique from here on. */
  2692. if (flags & RENAME_EXCHANGE)
  2693. return xfs_cross_rename(tp, src_dp, src_name, src_ip,
  2694. target_dp, target_name, target_ip,
  2695. &dfops, &first_block, spaceres);
  2696. /*
  2697. * Set up the target.
  2698. */
  2699. if (target_ip == NULL) {
  2700. /*
  2701. * If there's no space reservation, check the entry will
  2702. * fit before actually inserting it.
  2703. */
  2704. if (!spaceres) {
  2705. error = xfs_dir_canenter(tp, target_dp, target_name);
  2706. if (error)
  2707. goto out_trans_cancel;
  2708. }
  2709. /*
  2710. * If target does not exist and the rename crosses
  2711. * directories, adjust the target directory link count
  2712. * to account for the ".." reference from the new entry.
  2713. */
  2714. error = xfs_dir_createname(tp, target_dp, target_name,
  2715. src_ip->i_ino, &first_block,
  2716. &dfops, spaceres);
  2717. if (error)
  2718. goto out_bmap_cancel;
  2719. xfs_trans_ichgtime(tp, target_dp,
  2720. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2721. if (new_parent && src_is_directory) {
  2722. error = xfs_bumplink(tp, target_dp);
  2723. if (error)
  2724. goto out_bmap_cancel;
  2725. }
  2726. } else { /* target_ip != NULL */
  2727. /*
  2728. * If target exists and it's a directory, check that both
  2729. * target and source are directories and that target can be
  2730. * destroyed, or that neither is a directory.
  2731. */
  2732. if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
  2733. /*
  2734. * Make sure target dir is empty.
  2735. */
  2736. if (!(xfs_dir_isempty(target_ip)) ||
  2737. (VFS_I(target_ip)->i_nlink > 2)) {
  2738. error = -EEXIST;
  2739. goto out_trans_cancel;
  2740. }
  2741. }
  2742. /*
  2743. * Link the source inode under the target name.
  2744. * If the source inode is a directory and we are moving
  2745. * it across directories, its ".." entry will be
  2746. * inconsistent until we replace that down below.
  2747. *
  2748. * In case there is already an entry with the same
  2749. * name at the destination directory, remove it first.
  2750. */
  2751. error = xfs_dir_replace(tp, target_dp, target_name,
  2752. src_ip->i_ino,
  2753. &first_block, &dfops, spaceres);
  2754. if (error)
  2755. goto out_bmap_cancel;
  2756. xfs_trans_ichgtime(tp, target_dp,
  2757. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2758. /*
  2759. * Decrement the link count on the target since the target
  2760. * dir no longer points to it.
  2761. */
  2762. error = xfs_droplink(tp, target_ip);
  2763. if (error)
  2764. goto out_bmap_cancel;
  2765. if (src_is_directory) {
  2766. /*
  2767. * Drop the link from the old "." entry.
  2768. */
  2769. error = xfs_droplink(tp, target_ip);
  2770. if (error)
  2771. goto out_bmap_cancel;
  2772. }
  2773. } /* target_ip != NULL */
  2774. /*
  2775. * Remove the source.
  2776. */
  2777. if (new_parent && src_is_directory) {
  2778. /*
  2779. * Rewrite the ".." entry to point to the new
  2780. * directory.
  2781. */
  2782. error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
  2783. target_dp->i_ino,
  2784. &first_block, &dfops, spaceres);
  2785. ASSERT(error != -EEXIST);
  2786. if (error)
  2787. goto out_bmap_cancel;
  2788. }
  2789. /*
  2790. * We always want to hit the ctime on the source inode.
  2791. *
  2792. * This isn't strictly required by the standards since the source
  2793. * inode isn't really being changed, but old unix file systems did
  2794. * it and some incremental backup programs won't work without it.
  2795. */
  2796. xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
  2797. xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
  2798. /*
  2799. * Adjust the link count on src_dp. This is necessary when
  2800. * renaming a directory, either within one parent when
  2801. * the target existed, or across two parent directories.
  2802. */
  2803. if (src_is_directory && (new_parent || target_ip != NULL)) {
  2804. /*
  2805. * Decrement link count on src_directory since the
  2806. * entry that's moved no longer points to it.
  2807. */
  2808. error = xfs_droplink(tp, src_dp);
  2809. if (error)
  2810. goto out_bmap_cancel;
  2811. }
  2812. /*
  2813. * For whiteouts, we only need to update the source dirent with the
  2814. * inode number of the whiteout inode rather than removing it
  2815. * altogether.
  2816. */
  2817. if (wip) {
  2818. error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
  2819. &first_block, &dfops, spaceres);
  2820. } else
  2821. error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
  2822. &first_block, &dfops, spaceres);
  2823. if (error)
  2824. goto out_bmap_cancel;
  2825. /*
  2826. * For whiteouts, we need to bump the link count on the whiteout inode.
  2827. * This means that failures all the way up to this point leave the inode
  2828. * on the unlinked list and so cleanup is a simple matter of dropping
  2829. * the remaining reference to it. If we fail here after bumping the link
  2830. * count, we're shutting down the filesystem so we'll never see the
  2831. * intermediate state on disk.
  2832. */
  2833. if (wip) {
  2834. ASSERT(VFS_I(wip)->i_nlink == 0);
  2835. error = xfs_bumplink(tp, wip);
  2836. if (error)
  2837. goto out_bmap_cancel;
  2838. error = xfs_iunlink_remove(tp, wip);
  2839. if (error)
  2840. goto out_bmap_cancel;
  2841. xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
  2842. /*
  2843. * Now we have a real link, clear the "I'm a tmpfile" state
  2844. * flag from the inode so it doesn't accidentally get misused in
  2845. * future.
  2846. */
  2847. VFS_I(wip)->i_state &= ~I_LINKABLE;
  2848. }
  2849. xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2850. xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
  2851. if (new_parent)
  2852. xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
  2853. error = xfs_finish_rename(tp, &dfops);
  2854. if (wip)
  2855. IRELE(wip);
  2856. return error;
  2857. out_bmap_cancel:
  2858. xfs_defer_cancel(&dfops);
  2859. out_trans_cancel:
  2860. xfs_trans_cancel(tp);
  2861. out_release_wip:
  2862. if (wip)
  2863. IRELE(wip);
  2864. return error;
  2865. }
  2866. STATIC int
  2867. xfs_iflush_cluster(
  2868. struct xfs_inode *ip,
  2869. struct xfs_buf *bp)
  2870. {
  2871. struct xfs_mount *mp = ip->i_mount;
  2872. struct xfs_perag *pag;
  2873. unsigned long first_index, mask;
  2874. unsigned long inodes_per_cluster;
  2875. int cilist_size;
  2876. struct xfs_inode **cilist;
  2877. struct xfs_inode *cip;
  2878. int nr_found;
  2879. int clcount = 0;
  2880. int bufwasdelwri;
  2881. int i;
  2882. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  2883. inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
  2884. cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  2885. cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
  2886. if (!cilist)
  2887. goto out_put;
  2888. mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
  2889. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  2890. rcu_read_lock();
  2891. /* really need a gang lookup range call here */
  2892. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
  2893. first_index, inodes_per_cluster);
  2894. if (nr_found == 0)
  2895. goto out_free;
  2896. for (i = 0; i < nr_found; i++) {
  2897. cip = cilist[i];
  2898. if (cip == ip)
  2899. continue;
  2900. /*
  2901. * because this is an RCU protected lookup, we could find a
  2902. * recently freed or even reallocated inode during the lookup.
  2903. * We need to check under the i_flags_lock for a valid inode
  2904. * here. Skip it if it is not valid or the wrong inode.
  2905. */
  2906. spin_lock(&cip->i_flags_lock);
  2907. if (!cip->i_ino ||
  2908. __xfs_iflags_test(cip, XFS_ISTALE)) {
  2909. spin_unlock(&cip->i_flags_lock);
  2910. continue;
  2911. }
  2912. /*
  2913. * Once we fall off the end of the cluster, no point checking
  2914. * any more inodes in the list because they will also all be
  2915. * outside the cluster.
  2916. */
  2917. if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
  2918. spin_unlock(&cip->i_flags_lock);
  2919. break;
  2920. }
  2921. spin_unlock(&cip->i_flags_lock);
  2922. /*
  2923. * Do an un-protected check to see if the inode is dirty and
  2924. * is a candidate for flushing. These checks will be repeated
  2925. * later after the appropriate locks are acquired.
  2926. */
  2927. if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
  2928. continue;
  2929. /*
  2930. * Try to get locks. If any are unavailable or it is pinned,
  2931. * then this inode cannot be flushed and is skipped.
  2932. */
  2933. if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
  2934. continue;
  2935. if (!xfs_iflock_nowait(cip)) {
  2936. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2937. continue;
  2938. }
  2939. if (xfs_ipincount(cip)) {
  2940. xfs_ifunlock(cip);
  2941. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2942. continue;
  2943. }
  2944. /*
  2945. * Check the inode number again, just to be certain we are not
  2946. * racing with freeing in xfs_reclaim_inode(). See the comments
  2947. * in that function for more information as to why the initial
  2948. * check is not sufficient.
  2949. */
  2950. if (!cip->i_ino) {
  2951. xfs_ifunlock(cip);
  2952. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2953. continue;
  2954. }
  2955. /*
  2956. * arriving here means that this inode can be flushed. First
  2957. * re-check that it's dirty before flushing.
  2958. */
  2959. if (!xfs_inode_clean(cip)) {
  2960. int error;
  2961. error = xfs_iflush_int(cip, bp);
  2962. if (error) {
  2963. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2964. goto cluster_corrupt_out;
  2965. }
  2966. clcount++;
  2967. } else {
  2968. xfs_ifunlock(cip);
  2969. }
  2970. xfs_iunlock(cip, XFS_ILOCK_SHARED);
  2971. }
  2972. if (clcount) {
  2973. XFS_STATS_INC(mp, xs_icluster_flushcnt);
  2974. XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
  2975. }
  2976. out_free:
  2977. rcu_read_unlock();
  2978. kmem_free(cilist);
  2979. out_put:
  2980. xfs_perag_put(pag);
  2981. return 0;
  2982. cluster_corrupt_out:
  2983. /*
  2984. * Corruption detected in the clustering loop. Invalidate the
  2985. * inode buffer and shut down the filesystem.
  2986. */
  2987. rcu_read_unlock();
  2988. /*
  2989. * Clean up the buffer. If it was delwri, just release it --
  2990. * brelse can handle it with no problems. If not, shut down the
  2991. * filesystem before releasing the buffer.
  2992. */
  2993. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  2994. if (bufwasdelwri)
  2995. xfs_buf_relse(bp);
  2996. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  2997. if (!bufwasdelwri) {
  2998. /*
  2999. * Just like incore_relse: if we have b_iodone functions,
  3000. * mark the buffer as an error and call them. Otherwise
  3001. * mark it as stale and brelse.
  3002. */
  3003. if (bp->b_iodone) {
  3004. bp->b_flags &= ~XBF_DONE;
  3005. xfs_buf_stale(bp);
  3006. xfs_buf_ioerror(bp, -EIO);
  3007. xfs_buf_ioend(bp);
  3008. } else {
  3009. xfs_buf_stale(bp);
  3010. xfs_buf_relse(bp);
  3011. }
  3012. }
  3013. /*
  3014. * Unlocks the flush lock
  3015. */
  3016. xfs_iflush_abort(cip, false);
  3017. kmem_free(cilist);
  3018. xfs_perag_put(pag);
  3019. return -EFSCORRUPTED;
  3020. }
  3021. /*
  3022. * Flush dirty inode metadata into the backing buffer.
  3023. *
  3024. * The caller must have the inode lock and the inode flush lock held. The
  3025. * inode lock will still be held upon return to the caller, and the inode
  3026. * flush lock will be released after the inode has reached the disk.
  3027. *
  3028. * The caller must write out the buffer returned in *bpp and release it.
  3029. */
  3030. int
  3031. xfs_iflush(
  3032. struct xfs_inode *ip,
  3033. struct xfs_buf **bpp)
  3034. {
  3035. struct xfs_mount *mp = ip->i_mount;
  3036. struct xfs_buf *bp = NULL;
  3037. struct xfs_dinode *dip;
  3038. int error;
  3039. XFS_STATS_INC(mp, xs_iflush_count);
  3040. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  3041. ASSERT(xfs_isiflocked(ip));
  3042. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  3043. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  3044. *bpp = NULL;
  3045. xfs_iunpin_wait(ip);
  3046. /*
  3047. * For stale inodes we cannot rely on the backing buffer remaining
  3048. * stale in cache for the remaining life of the stale inode and so
  3049. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  3050. * inodes below. We have to check this after ensuring the inode is
  3051. * unpinned so that it is safe to reclaim the stale inode after the
  3052. * flush call.
  3053. */
  3054. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  3055. xfs_ifunlock(ip);
  3056. return 0;
  3057. }
  3058. /*
  3059. * This may have been unpinned because the filesystem is shutting
  3060. * down forcibly. If that's the case we must not write this inode
  3061. * to disk, because the log record didn't make it to disk.
  3062. *
  3063. * We also have to remove the log item from the AIL in this case,
  3064. * as we wait for an empty AIL as part of the unmount process.
  3065. */
  3066. if (XFS_FORCED_SHUTDOWN(mp)) {
  3067. error = -EIO;
  3068. goto abort_out;
  3069. }
  3070. /*
  3071. * Get the buffer containing the on-disk inode. We are doing a try-lock
  3072. * operation here, so we may get an EAGAIN error. In that case, we
  3073. * simply want to return with the inode still dirty.
  3074. *
  3075. * If we get any other error, we effectively have a corruption situation
  3076. * and we cannot flush the inode, so we treat it the same as failing
  3077. * xfs_iflush_int().
  3078. */
  3079. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  3080. 0);
  3081. if (error == -EAGAIN) {
  3082. xfs_ifunlock(ip);
  3083. return error;
  3084. }
  3085. if (error)
  3086. goto corrupt_out;
  3087. /*
  3088. * First flush out the inode that xfs_iflush was called with.
  3089. */
  3090. error = xfs_iflush_int(ip, bp);
  3091. if (error)
  3092. goto corrupt_out;
  3093. /*
  3094. * If the buffer is pinned then push on the log now so we won't
  3095. * get stuck waiting in the write for too long.
  3096. */
  3097. if (xfs_buf_ispinned(bp))
  3098. xfs_log_force(mp, 0);
  3099. /*
  3100. * inode clustering:
  3101. * see if other inodes can be gathered into this write
  3102. */
  3103. error = xfs_iflush_cluster(ip, bp);
  3104. if (error)
  3105. goto cluster_corrupt_out;
  3106. *bpp = bp;
  3107. return 0;
  3108. corrupt_out:
  3109. if (bp)
  3110. xfs_buf_relse(bp);
  3111. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  3112. cluster_corrupt_out:
  3113. error = -EFSCORRUPTED;
  3114. abort_out:
  3115. /*
  3116. * Unlocks the flush lock
  3117. */
  3118. xfs_iflush_abort(ip, false);
  3119. return error;
  3120. }
  3121. STATIC int
  3122. xfs_iflush_int(
  3123. struct xfs_inode *ip,
  3124. struct xfs_buf *bp)
  3125. {
  3126. struct xfs_inode_log_item *iip = ip->i_itemp;
  3127. struct xfs_dinode *dip;
  3128. struct xfs_mount *mp = ip->i_mount;
  3129. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  3130. ASSERT(xfs_isiflocked(ip));
  3131. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  3132. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  3133. ASSERT(iip != NULL && iip->ili_fields != 0);
  3134. ASSERT(ip->i_d.di_version > 1);
  3135. /* set *dip = inode's place in the buffer */
  3136. dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
  3137. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  3138. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  3139. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3140. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  3141. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  3142. goto corrupt_out;
  3143. }
  3144. if (S_ISREG(VFS_I(ip)->i_mode)) {
  3145. if (XFS_TEST_ERROR(
  3146. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3147. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  3148. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  3149. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3150. "%s: Bad regular inode %Lu, ptr 0x%p",
  3151. __func__, ip->i_ino, ip);
  3152. goto corrupt_out;
  3153. }
  3154. } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
  3155. if (XFS_TEST_ERROR(
  3156. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  3157. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  3158. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  3159. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  3160. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3161. "%s: Bad directory inode %Lu, ptr 0x%p",
  3162. __func__, ip->i_ino, ip);
  3163. goto corrupt_out;
  3164. }
  3165. }
  3166. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  3167. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  3168. XFS_RANDOM_IFLUSH_5)) {
  3169. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3170. "%s: detected corrupt incore inode %Lu, "
  3171. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  3172. __func__, ip->i_ino,
  3173. ip->i_d.di_nextents + ip->i_d.di_anextents,
  3174. ip->i_d.di_nblocks, ip);
  3175. goto corrupt_out;
  3176. }
  3177. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  3178. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  3179. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  3180. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  3181. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  3182. goto corrupt_out;
  3183. }
  3184. /*
  3185. * Inode item log recovery for v2 inodes are dependent on the
  3186. * di_flushiter count for correct sequencing. We bump the flush
  3187. * iteration count so we can detect flushes which postdate a log record
  3188. * during recovery. This is redundant as we now log every change and
  3189. * hence this can't happen but we need to still do it to ensure
  3190. * backwards compatibility with old kernels that predate logging all
  3191. * inode changes.
  3192. */
  3193. if (ip->i_d.di_version < 3)
  3194. ip->i_d.di_flushiter++;
  3195. /* Check the inline directory data. */
  3196. if (S_ISDIR(VFS_I(ip)->i_mode) &&
  3197. ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
  3198. xfs_dir2_sf_verify(ip))
  3199. goto corrupt_out;
  3200. /*
  3201. * Copy the dirty parts of the inode into the on-disk inode. We always
  3202. * copy out the core of the inode, because if the inode is dirty at all
  3203. * the core must be.
  3204. */
  3205. xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
  3206. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  3207. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  3208. ip->i_d.di_flushiter = 0;
  3209. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
  3210. if (XFS_IFORK_Q(ip))
  3211. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
  3212. xfs_inobp_check(mp, bp);
  3213. /*
  3214. * We've recorded everything logged in the inode, so we'd like to clear
  3215. * the ili_fields bits so we don't log and flush things unnecessarily.
  3216. * However, we can't stop logging all this information until the data
  3217. * we've copied into the disk buffer is written to disk. If we did we
  3218. * might overwrite the copy of the inode in the log with all the data
  3219. * after re-logging only part of it, and in the face of a crash we
  3220. * wouldn't have all the data we need to recover.
  3221. *
  3222. * What we do is move the bits to the ili_last_fields field. When
  3223. * logging the inode, these bits are moved back to the ili_fields field.
  3224. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  3225. * know that the information those bits represent is permanently on
  3226. * disk. As long as the flush completes before the inode is logged
  3227. * again, then both ili_fields and ili_last_fields will be cleared.
  3228. *
  3229. * We can play with the ili_fields bits here, because the inode lock
  3230. * must be held exclusively in order to set bits there and the flush
  3231. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  3232. * done routine can tell whether or not to look in the AIL. Also, store
  3233. * the current LSN of the inode so that we can tell whether the item has
  3234. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  3235. * need the AIL lock, because it is a 64 bit value that cannot be read
  3236. * atomically.
  3237. */
  3238. iip->ili_last_fields = iip->ili_fields;
  3239. iip->ili_fields = 0;
  3240. iip->ili_fsync_fields = 0;
  3241. iip->ili_logged = 1;
  3242. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  3243. &iip->ili_item.li_lsn);
  3244. /*
  3245. * Attach the function xfs_iflush_done to the inode's
  3246. * buffer. This will remove the inode from the AIL
  3247. * and unlock the inode's flush lock when the inode is
  3248. * completely written to disk.
  3249. */
  3250. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  3251. /* generate the checksum. */
  3252. xfs_dinode_calc_crc(mp, dip);
  3253. ASSERT(bp->b_fspriv != NULL);
  3254. ASSERT(bp->b_iodone != NULL);
  3255. return 0;
  3256. corrupt_out:
  3257. return -EFSCORRUPTED;
  3258. }