uvm_fault.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646
  1. /* $OpenBSD: uvm_fault.c,v 1.84 2015/03/14 03:38:53 jsg Exp $ */
  2. /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
  3. /*
  4. * Copyright (c) 1997 Charles D. Cranor and Washington University.
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *
  27. * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
  28. */
  29. /*
  30. * uvm_fault.c: fault handler
  31. */
  32. #include <sys/param.h>
  33. #include <sys/systm.h>
  34. #include <sys/kernel.h>
  35. #include <sys/proc.h>
  36. #include <sys/malloc.h>
  37. #include <sys/mman.h>
  38. #include <uvm/uvm.h>
  39. /*
  40. *
  41. * a word on page faults:
  42. *
  43. * types of page faults we handle:
  44. *
  45. * CASE 1: upper layer faults CASE 2: lower layer faults
  46. *
  47. * CASE 1A CASE 1B CASE 2A CASE 2B
  48. * read/write1 write>1 read/write +-cow_write/zero
  49. * | | | |
  50. * +--|--+ +--|--+ +-----+ + | + | +-----+
  51. * amap | V | | ----------->new| | | | ^ |
  52. * +-----+ +-----+ +-----+ + | + | +--|--+
  53. * | | |
  54. * +-----+ +-----+ +--|--+ | +--|--+
  55. * uobj | d/c | | d/c | | V | +----| |
  56. * +-----+ +-----+ +-----+ +-----+
  57. *
  58. * d/c = don't care
  59. *
  60. * case [0]: layerless fault
  61. * no amap or uobj is present. this is an error.
  62. *
  63. * case [1]: upper layer fault [anon active]
  64. * 1A: [read] or [write with anon->an_ref == 1]
  65. * I/O takes place in top level anon and uobj is not touched.
  66. * 1B: [write with anon->an_ref > 1]
  67. * new anon is alloc'd and data is copied off ["COW"]
  68. *
  69. * case [2]: lower layer fault [uobj]
  70. * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
  71. * I/O takes place directly in object.
  72. * 2B: [write to copy_on_write] or [read on NULL uobj]
  73. * data is "promoted" from uobj to a new anon.
  74. * if uobj is null, then we zero fill.
  75. *
  76. * we follow the standard UVM locking protocol ordering:
  77. *
  78. * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
  79. * we hold a PG_BUSY page if we unlock for I/O
  80. *
  81. *
  82. * the code is structured as follows:
  83. *
  84. * - init the "IN" params in the ufi structure
  85. * ReFault:
  86. * - do lookups [locks maps], check protection, handle needs_copy
  87. * - check for case 0 fault (error)
  88. * - establish "range" of fault
  89. * - if we have an amap lock it and extract the anons
  90. * - if sequential advice deactivate pages behind us
  91. * - at the same time check pmap for unmapped areas and anon for pages
  92. * that we could map in (and do map it if found)
  93. * - check object for resident pages that we could map in
  94. * - if (case 2) goto Case2
  95. * - >>> handle case 1
  96. * - ensure source anon is resident in RAM
  97. * - if case 1B alloc new anon and copy from source
  98. * - map the correct page in
  99. * Case2:
  100. * - >>> handle case 2
  101. * - ensure source page is resident (if uobj)
  102. * - if case 2B alloc new anon and copy from source (could be zero
  103. * fill if uobj == NULL)
  104. * - map the correct page in
  105. * - done!
  106. *
  107. * note on paging:
  108. * if we have to do I/O we place a PG_BUSY page in the correct object,
  109. * unlock everything, and do the I/O. when I/O is done we must reverify
  110. * the state of the world before assuming that our data structures are
  111. * valid. [because mappings could change while the map is unlocked]
  112. *
  113. * alternative 1: unbusy the page in question and restart the page fault
  114. * from the top (ReFault). this is easy but does not take advantage
  115. * of the information that we already have from our previous lookup,
  116. * although it is possible that the "hints" in the vm_map will help here.
  117. *
  118. * alternative 2: the system already keeps track of a "version" number of
  119. * a map. [i.e. every time you write-lock a map (e.g. to change a
  120. * mapping) you bump the version number up by one...] so, we can save
  121. * the version number of the map before we release the lock and start I/O.
  122. * then when I/O is done we can relock and check the version numbers
  123. * to see if anything changed. this might save us some over 1 because
  124. * we don't have to unbusy the page and may be less compares(?).
  125. *
  126. * alternative 3: put in backpointers or a way to "hold" part of a map
  127. * in place while I/O is in progress. this could be complex to
  128. * implement (especially with structures like amap that can be referenced
  129. * by multiple map entries, and figuring out what should wait could be
  130. * complex as well...).
  131. *
  132. * given that we are not currently multiprocessor or multithreaded we might
  133. * as well choose alternative 2 now. maybe alternative 3 would be useful
  134. * in the future. XXX keep in mind for future consideration//rechecking.
  135. */
  136. /*
  137. * local data structures
  138. */
  139. struct uvm_advice {
  140. int nback;
  141. int nforw;
  142. };
  143. /*
  144. * page range array: set up in uvmfault_init().
  145. */
  146. static struct uvm_advice uvmadvice[MADV_MASK + 1];
  147. #define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */
  148. /*
  149. * private prototypes
  150. */
  151. static void uvmfault_amapcopy(struct uvm_faultinfo *);
  152. static __inline void uvmfault_anonflush(struct vm_anon **, int);
  153. void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
  154. void uvmfault_update_stats(struct uvm_faultinfo *);
  155. /*
  156. * inline functions
  157. */
  158. /*
  159. * uvmfault_anonflush: try and deactivate pages in specified anons
  160. *
  161. * => does not have to deactivate page if it is busy
  162. */
  163. static __inline void
  164. uvmfault_anonflush(struct vm_anon **anons, int n)
  165. {
  166. int lcv;
  167. struct vm_page *pg;
  168. for (lcv = 0 ; lcv < n ; lcv++) {
  169. if (anons[lcv] == NULL)
  170. continue;
  171. pg = anons[lcv]->an_page;
  172. if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) {
  173. uvm_lock_pageq();
  174. if (pg->wire_count == 0) {
  175. pmap_page_protect(pg, PROT_NONE);
  176. uvm_pagedeactivate(pg);
  177. }
  178. uvm_unlock_pageq();
  179. }
  180. }
  181. }
  182. /*
  183. * normal functions
  184. */
  185. /*
  186. * uvmfault_init: compute proper values for the uvmadvice[] array.
  187. */
  188. void
  189. uvmfault_init()
  190. {
  191. int npages;
  192. npages = atop(16384);
  193. if (npages > 0) {
  194. KASSERT(npages <= UVM_MAXRANGE / 2);
  195. uvmadvice[MADV_NORMAL].nforw = npages;
  196. uvmadvice[MADV_NORMAL].nback = npages - 1;
  197. }
  198. npages = atop(32768);
  199. if (npages > 0) {
  200. KASSERT(npages <= UVM_MAXRANGE / 2);
  201. uvmadvice[MADV_SEQUENTIAL].nforw = npages - 1;
  202. uvmadvice[MADV_SEQUENTIAL].nback = npages;
  203. }
  204. }
  205. /*
  206. * uvmfault_amapcopy: clear "needs_copy" in a map.
  207. *
  208. * => if we are out of RAM we sleep (waiting for more)
  209. */
  210. static void
  211. uvmfault_amapcopy(struct uvm_faultinfo *ufi)
  212. {
  213. /* while we haven't done the job */
  214. while (1) {
  215. /* no mapping? give up. */
  216. if (uvmfault_lookup(ufi, TRUE) == FALSE)
  217. return;
  218. /* copy if needed. */
  219. if (UVM_ET_ISNEEDSCOPY(ufi->entry))
  220. amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
  221. ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
  222. /* didn't work? must be out of RAM. sleep. */
  223. if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
  224. uvmfault_unlockmaps(ufi, TRUE);
  225. uvm_wait("fltamapcopy");
  226. continue;
  227. }
  228. /* got it! */
  229. uvmfault_unlockmaps(ufi, TRUE);
  230. return;
  231. }
  232. /*NOTREACHED*/
  233. }
  234. /*
  235. * uvmfault_anonget: get data in an anon into a non-busy, non-released
  236. * page in that anon.
  237. *
  238. * => we don't move the page on the queues [gets moved later]
  239. * => if we allocate a new page [we_own], it gets put on the queues.
  240. * either way, the result is that the page is on the queues at return time
  241. * => for pages which are on loan from a uvm_object (and thus are not
  242. * owned by the anon): if successful, we return with the owning object
  243. */
  244. int
  245. uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
  246. struct vm_anon *anon)
  247. {
  248. boolean_t we_own; /* we own anon's page? */
  249. boolean_t locked; /* did we relock? */
  250. struct vm_page *pg;
  251. int result;
  252. result = 0; /* XXX shut up gcc */
  253. uvmexp.fltanget++;
  254. /* bump rusage counters */
  255. if (anon->an_page)
  256. curproc->p_ru.ru_minflt++;
  257. else
  258. curproc->p_ru.ru_majflt++;
  259. /* loop until we get it, or fail. */
  260. while (1) {
  261. we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
  262. pg = anon->an_page;
  263. /*
  264. * if there is a resident page and it is loaned, then anon
  265. * may not own it. call out to uvm_anon_lockpage() to ensure
  266. * the real owner of the page has been identified.
  267. */
  268. if (pg && pg->loan_count)
  269. pg = uvm_anon_lockloanpg(anon);
  270. /* page there? make sure it is not busy/released. */
  271. if (pg) {
  272. /*
  273. * at this point, if the page has a uobject [meaning
  274. * we have it on loan], then that uobject is locked
  275. * by us! if the page is busy, we drop all the
  276. * locks (including uobject) and try again.
  277. */
  278. if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) {
  279. return (VM_PAGER_OK);
  280. }
  281. atomic_setbits_int(&pg->pg_flags, PG_WANTED);
  282. uvmexp.fltpgwait++;
  283. /*
  284. * the last unlock must be an atomic unlock+wait on
  285. * the owner of page
  286. */
  287. if (pg->uobject) { /* owner is uobject ? */
  288. uvmfault_unlockall(ufi, amap, NULL, anon);
  289. UVM_WAIT(pg, FALSE, "anonget1",0);
  290. } else {
  291. /* anon owns page */
  292. uvmfault_unlockall(ufi, amap, NULL, NULL);
  293. UVM_WAIT(pg, 0, "anonget2", 0);
  294. }
  295. /* ready to relock and try again */
  296. } else {
  297. /* no page, we must try and bring it in. */
  298. pg = uvm_pagealloc(NULL, 0, anon, 0);
  299. if (pg == NULL) { /* out of RAM. */
  300. uvmfault_unlockall(ufi, amap, NULL, anon);
  301. uvmexp.fltnoram++;
  302. uvm_wait("flt_noram1");
  303. /* ready to relock and try again */
  304. } else {
  305. /* we set the PG_BUSY bit */
  306. we_own = TRUE;
  307. uvmfault_unlockall(ufi, amap, NULL, anon);
  308. /*
  309. * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
  310. * page into the uvm_swap_get function with
  311. * all data structures unlocked. note that
  312. * it is ok to read an_swslot here because
  313. * we hold PG_BUSY on the page.
  314. */
  315. uvmexp.pageins++;
  316. result = uvm_swap_get(pg, anon->an_swslot,
  317. PGO_SYNCIO);
  318. /*
  319. * we clean up after the i/o below in the
  320. * "we_own" case
  321. */
  322. /* ready to relock and try again */
  323. }
  324. }
  325. /* now relock and try again */
  326. locked = uvmfault_relock(ufi);
  327. /*
  328. * if we own the page (i.e. we set PG_BUSY), then we need
  329. * to clean up after the I/O. there are three cases to
  330. * consider:
  331. * [1] page released during I/O: free anon and ReFault.
  332. * [2] I/O not OK. free the page and cause the fault
  333. * to fail.
  334. * [3] I/O OK! activate the page and sync with the
  335. * non-we_own case (i.e. drop anon lock if not locked).
  336. */
  337. if (we_own) {
  338. if (pg->pg_flags & PG_WANTED) {
  339. wakeup(pg);
  340. }
  341. /* un-busy! */
  342. atomic_clearbits_int(&pg->pg_flags,
  343. PG_WANTED|PG_BUSY|PG_FAKE);
  344. UVM_PAGE_OWN(pg, NULL);
  345. /*
  346. * if we were RELEASED during I/O, then our anon is
  347. * no longer part of an amap. we need to free the
  348. * anon and try again.
  349. */
  350. if (pg->pg_flags & PG_RELEASED) {
  351. pmap_page_protect(pg, PROT_NONE);
  352. uvm_anfree(anon); /* frees page for us */
  353. if (locked)
  354. uvmfault_unlockall(ufi, amap, NULL,
  355. NULL);
  356. uvmexp.fltpgrele++;
  357. return (VM_PAGER_REFAULT); /* refault! */
  358. }
  359. if (result != VM_PAGER_OK) {
  360. KASSERT(result != VM_PAGER_PEND);
  361. /* remove page from anon */
  362. anon->an_page = NULL;
  363. /*
  364. * remove the swap slot from the anon
  365. * and mark the anon as having no real slot.
  366. * don't free the swap slot, thus preventing
  367. * it from being used again.
  368. */
  369. uvm_swap_markbad(anon->an_swslot, 1);
  370. anon->an_swslot = SWSLOT_BAD;
  371. /*
  372. * note: page was never !PG_BUSY, so it
  373. * can't be mapped and thus no need to
  374. * pmap_page_protect it...
  375. */
  376. uvm_lock_pageq();
  377. uvm_pagefree(pg);
  378. uvm_unlock_pageq();
  379. if (locked)
  380. uvmfault_unlockall(ufi, amap, NULL,
  381. anon);
  382. return (VM_PAGER_ERROR);
  383. }
  384. /*
  385. * must be OK, clear modify (already PG_CLEAN)
  386. * and activate
  387. */
  388. pmap_clear_modify(pg);
  389. uvm_lock_pageq();
  390. uvm_pageactivate(pg);
  391. uvm_unlock_pageq();
  392. }
  393. /* we were not able to relock. restart fault. */
  394. if (!locked)
  395. return (VM_PAGER_REFAULT);
  396. /* verify no one touched the amap and moved the anon on us. */
  397. if (ufi != NULL &&
  398. amap_lookup(&ufi->entry->aref,
  399. ufi->orig_rvaddr - ufi->entry->start) != anon) {
  400. uvmfault_unlockall(ufi, amap, NULL, anon);
  401. return (VM_PAGER_REFAULT);
  402. }
  403. /* try it again! */
  404. uvmexp.fltanretry++;
  405. continue;
  406. } /* while (1) */
  407. /*NOTREACHED*/
  408. }
  409. /*
  410. * Update statistics after fault resolution.
  411. * - maxrss
  412. */
  413. void
  414. uvmfault_update_stats(struct uvm_faultinfo *ufi)
  415. {
  416. struct vm_map *map;
  417. struct proc *p;
  418. vsize_t res;
  419. #ifndef pmap_resident_count
  420. struct vm_space *vm;
  421. #endif
  422. map = ufi->orig_map;
  423. /* Update the maxrss for the process. */
  424. if (map->flags & VM_MAP_ISVMSPACE) {
  425. p = curproc;
  426. KASSERT(p != NULL && &p->p_vmspace->vm_map == map);
  427. #ifdef pmap_resident_count
  428. res = pmap_resident_count(map->pmap);
  429. #else
  430. /*
  431. * Rather inaccurate, but this is the current anon size
  432. * of the vmspace. It's basically the resident size
  433. * minus the mmapped in files/text.
  434. */
  435. vm = (struct vmspace*)map;
  436. res = vm->dsize;
  437. #endif
  438. /* Convert res from pages to kilobytes. */
  439. res <<= (PAGE_SHIFT - 10);
  440. if (p->p_ru.ru_maxrss < res)
  441. p->p_ru.ru_maxrss = res;
  442. }
  443. }
  444. /*
  445. * F A U L T - m a i n e n t r y p o i n t
  446. */
  447. /*
  448. * uvm_fault: page fault handler
  449. *
  450. * => called from MD code to resolve a page fault
  451. * => VM data structures usually should be unlocked. however, it is
  452. * possible to call here with the main map locked if the caller
  453. * gets a write lock, sets it recursive, and then calls us (c.f.
  454. * uvm_map_pageable). this should be avoided because it keeps
  455. * the map locked off during I/O.
  456. */
  457. #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
  458. ~PROT_WRITE : PROT_MASK)
  459. int
  460. uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
  461. vm_prot_t access_type)
  462. {
  463. struct uvm_faultinfo ufi;
  464. vm_prot_t enter_prot;
  465. boolean_t wired, narrow, promote, locked, shadowed;
  466. int npages, nback, nforw, centeridx, result, lcv, gotpages;
  467. vaddr_t startva, currva;
  468. voff_t uoff;
  469. paddr_t pa;
  470. struct vm_amap *amap;
  471. struct uvm_object *uobj;
  472. struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
  473. struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;
  474. anon = NULL;
  475. pg = NULL;
  476. uvmexp.faults++; /* XXX: locking? */
  477. /* init the IN parameters in the ufi */
  478. ufi.orig_map = orig_map;
  479. ufi.orig_rvaddr = trunc_page(vaddr);
  480. ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
  481. if (fault_type == VM_FAULT_WIRE)
  482. narrow = TRUE; /* don't look for neighborhood
  483. * pages on wire */
  484. else
  485. narrow = FALSE; /* normal fault */
  486. /* "goto ReFault" means restart the page fault from ground zero. */
  487. ReFault:
  488. /* lookup and lock the maps */
  489. if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
  490. return (EFAULT);
  491. }
  492. #ifdef DIAGNOSTIC
  493. if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
  494. panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
  495. ufi.map, vaddr);
  496. #endif
  497. /* check protection */
  498. if ((ufi.entry->protection & access_type) != access_type) {
  499. uvmfault_unlockmaps(&ufi, FALSE);
  500. return (EACCES);
  501. }
  502. /*
  503. * "enter_prot" is the protection we want to enter the page in at.
  504. * for certain pages (e.g. copy-on-write pages) this protection can
  505. * be more strict than ufi.entry->protection. "wired" means either
  506. * the entry is wired or we are fault-wiring the pg.
  507. */
  508. enter_prot = ufi.entry->protection;
  509. wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
  510. if (wired)
  511. access_type = enter_prot; /* full access for wired */
  512. /* handle "needs_copy" case. */
  513. if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
  514. if ((access_type & PROT_WRITE) ||
  515. (ufi.entry->object.uvm_obj == NULL)) {
  516. /* need to clear */
  517. uvmfault_unlockmaps(&ufi, FALSE);
  518. uvmfault_amapcopy(&ufi);
  519. uvmexp.fltamcopy++;
  520. goto ReFault;
  521. } else {
  522. /*
  523. * ensure that we pmap_enter page R/O since
  524. * needs_copy is still true
  525. */
  526. enter_prot &= ~PROT_WRITE;
  527. }
  528. }
  529. /* identify the players */
  530. amap = ufi.entry->aref.ar_amap; /* top layer */
  531. uobj = ufi.entry->object.uvm_obj; /* bottom layer */
  532. /*
  533. * check for a case 0 fault. if nothing backing the entry then
  534. * error now.
  535. */
  536. if (amap == NULL && uobj == NULL) {
  537. uvmfault_unlockmaps(&ufi, FALSE);
  538. return (EFAULT);
  539. }
  540. /*
  541. * establish range of interest based on advice from mapper
  542. * and then clip to fit map entry. note that we only want
  543. * to do this the first time through the fault. if we
  544. * ReFault we will disable this by setting "narrow" to true.
  545. */
  546. if (narrow == FALSE) {
  547. /* wide fault (!narrow) */
  548. nback = min(uvmadvice[ufi.entry->advice].nback,
  549. (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
  550. startva = ufi.orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT);
  551. nforw = min(uvmadvice[ufi.entry->advice].nforw,
  552. ((ufi.entry->end - ufi.orig_rvaddr) >>
  553. PAGE_SHIFT) - 1);
  554. /*
  555. * note: "-1" because we don't want to count the
  556. * faulting page as forw
  557. */
  558. npages = nback + nforw + 1;
  559. centeridx = nback;
  560. narrow = TRUE; /* ensure only once per-fault */
  561. } else {
  562. /* narrow fault! */
  563. nback = nforw = 0;
  564. startva = ufi.orig_rvaddr;
  565. npages = 1;
  566. centeridx = 0;
  567. }
  568. /* if we've got an amap, extract current anons. */
  569. if (amap) {
  570. anons = anons_store;
  571. amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
  572. anons, npages);
  573. } else {
  574. anons = NULL; /* to be safe */
  575. }
  576. /*
  577. * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
  578. * now and then forget about them (for the rest of the fault).
  579. */
  580. if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
  581. /* flush back-page anons? */
  582. if (amap)
  583. uvmfault_anonflush(anons, nback);
  584. /* flush object? */
  585. if (uobj) {
  586. uoff = (startva - ufi.entry->start) + ufi.entry->offset;
  587. (void) uobj->pgops->pgo_flush(uobj, uoff, uoff +
  588. ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE);
  589. }
  590. /* now forget about the backpages */
  591. if (amap)
  592. anons += nback;
  593. startva += ((vsize_t)nback << PAGE_SHIFT);
  594. npages -= nback;
  595. centeridx = 0;
  596. }
  597. /*
  598. * map in the backpages and frontpages we found in the amap in hopes
  599. * of preventing future faults. we also init the pages[] array as
  600. * we go.
  601. */
  602. currva = startva;
  603. shadowed = FALSE;
  604. for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
  605. /*
  606. * dont play with VAs that are already mapped
  607. * except for center)
  608. */
  609. if (lcv != centeridx &&
  610. pmap_extract(ufi.orig_map->pmap, currva, &pa)) {
  611. pages[lcv] = PGO_DONTCARE;
  612. continue;
  613. }
  614. /* unmapped or center page. check if any anon at this level. */
  615. if (amap == NULL || anons[lcv] == NULL) {
  616. pages[lcv] = NULL;
  617. continue;
  618. }
  619. /* check for present page and map if possible. re-activate it. */
  620. pages[lcv] = PGO_DONTCARE;
  621. if (lcv == centeridx) { /* save center for later! */
  622. shadowed = TRUE;
  623. continue;
  624. }
  625. anon = anons[lcv];
  626. /* ignore loaned pages */
  627. if (anon->an_page && anon->an_page->loan_count == 0 &&
  628. (anon->an_page->pg_flags & (PG_RELEASED|PG_BUSY)) == 0) {
  629. uvm_lock_pageq();
  630. uvm_pageactivate(anon->an_page); /* reactivate */
  631. uvm_unlock_pageq();
  632. uvmexp.fltnamap++;
  633. /*
  634. * Since this isn't the page that's actually faulting,
  635. * ignore pmap_enter() failures; it's not critical
  636. * that we enter these right now.
  637. */
  638. (void) pmap_enter(ufi.orig_map->pmap, currva,
  639. VM_PAGE_TO_PHYS(anon->an_page),
  640. (anon->an_ref > 1) ? (enter_prot & ~PROT_WRITE) :
  641. enter_prot,
  642. PMAP_CANFAIL |
  643. (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
  644. }
  645. }
  646. if (npages > 1)
  647. pmap_update(ufi.orig_map->pmap);
  648. /* (shadowed == TRUE) if there is an anon at the faulting address */
  649. /*
  650. * note that if we are really short of RAM we could sleep in the above
  651. * call to pmap_enter. bad?
  652. *
  653. * XXX Actually, that is bad; pmap_enter() should just fail in that
  654. * XXX case. --thorpej
  655. */
  656. /*
  657. * if the desired page is not shadowed by the amap and we have a
  658. * backing object, then we check to see if the backing object would
  659. * prefer to handle the fault itself (rather than letting us do it
  660. * with the usual pgo_get hook). the backing object signals this by
  661. * providing a pgo_fault routine.
  662. */
  663. if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
  664. result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
  665. centeridx, fault_type, access_type,
  666. PGO_LOCKED);
  667. if (result == VM_PAGER_OK)
  668. return (0); /* pgo_fault did pmap enter */
  669. else if (result == VM_PAGER_REFAULT)
  670. goto ReFault; /* try again! */
  671. else
  672. return (EACCES);
  673. }
  674. /*
  675. * now, if the desired page is not shadowed by the amap and we have
  676. * a backing object that does not have a special fault routine, then
  677. * we ask (with pgo_get) the object for resident pages that we care
  678. * about and attempt to map them in. we do not let pgo_get block
  679. * (PGO_LOCKED).
  680. *
  681. * ("get" has the option of doing a pmap_enter for us)
  682. */
  683. if (uobj && shadowed == FALSE) {
  684. uvmexp.fltlget++;
  685. gotpages = npages;
  686. (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
  687. (startva - ufi.entry->start),
  688. pages, &gotpages, centeridx,
  689. access_type & MASK(ufi.entry),
  690. ufi.entry->advice, PGO_LOCKED);
  691. /* check for pages to map, if we got any */
  692. uobjpage = NULL;
  693. if (gotpages) {
  694. currva = startva;
  695. for (lcv = 0 ; lcv < npages ;
  696. lcv++, currva += PAGE_SIZE) {
  697. if (pages[lcv] == NULL ||
  698. pages[lcv] == PGO_DONTCARE)
  699. continue;
  700. KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
  701. /*
  702. * if center page is resident and not
  703. * PG_BUSY, then pgo_get made it PG_BUSY
  704. * for us and gave us a handle to it.
  705. * remember this page as "uobjpage."
  706. * (for later use).
  707. */
  708. if (lcv == centeridx) {
  709. uobjpage = pages[lcv];
  710. continue;
  711. }
  712. /*
  713. * note: calling pgo_get with locked data
  714. * structures returns us pages which are
  715. * neither busy nor released, so we don't
  716. * need to check for this. we can just
  717. * directly enter the page (after moving it
  718. * to the head of the active queue [useful?]).
  719. */
  720. uvm_lock_pageq();
  721. uvm_pageactivate(pages[lcv]); /* reactivate */
  722. uvm_unlock_pageq();
  723. uvmexp.fltnomap++;
  724. /*
  725. * Since this page isn't the page that's
  726. * actually faulting, ignore pmap_enter()
  727. * failures; it's not critical that we
  728. * enter these right now.
  729. */
  730. (void) pmap_enter(ufi.orig_map->pmap, currva,
  731. VM_PAGE_TO_PHYS(pages[lcv]),
  732. enter_prot & MASK(ufi.entry),
  733. PMAP_CANFAIL |
  734. (wired ? PMAP_WIRED : 0));
  735. /*
  736. * NOTE: page can't be PG_WANTED because
  737. * we've held the lock the whole time
  738. * we've had the handle.
  739. */
  740. atomic_clearbits_int(&pages[lcv]->pg_flags,
  741. PG_BUSY);
  742. UVM_PAGE_OWN(pages[lcv], NULL);
  743. } /* for "lcv" loop */
  744. pmap_update(ufi.orig_map->pmap);
  745. } /* "gotpages" != 0 */
  746. /* note: object still _locked_ */
  747. } else {
  748. uobjpage = NULL;
  749. }
  750. /*
  751. * note that at this point we are done with any front or back pages.
  752. * we are now going to focus on the center page (i.e. the one we've
  753. * faulted on). if we have faulted on the top (anon) layer
  754. * [i.e. case 1], then the anon we want is anons[centeridx] (we have
  755. * not touched it yet). if we have faulted on the bottom (uobj)
  756. * layer [i.e. case 2] and the page was both present and available,
  757. * then we've got a pointer to it as "uobjpage" and we've already
  758. * made it BUSY.
  759. */
  760. /*
  761. * there are four possible cases we must address: 1A, 1B, 2A, and 2B
  762. */
  763. /* redirect case 2: if we are not shadowed, go to case 2. */
  764. if (shadowed == FALSE)
  765. goto Case2;
  766. /* handle case 1: fault on an anon in our amap */
  767. anon = anons[centeridx];
  768. /*
  769. * no matter if we have case 1A or case 1B we are going to need to
  770. * have the anon's memory resident. ensure that now.
  771. */
  772. /*
  773. * let uvmfault_anonget do the dirty work.
  774. * also, if it is OK, then the anon's page is on the queues.
  775. */
  776. result = uvmfault_anonget(&ufi, amap, anon);
  777. switch (result) {
  778. case VM_PAGER_OK:
  779. break;
  780. case VM_PAGER_REFAULT:
  781. goto ReFault;
  782. case VM_PAGER_ERROR:
  783. /*
  784. * An error occured while trying to bring in the
  785. * page -- this is the only error we return right
  786. * now.
  787. */
  788. return (EACCES); /* XXX */
  789. default:
  790. #ifdef DIAGNOSTIC
  791. panic("uvm_fault: uvmfault_anonget -> %d", result);
  792. #else
  793. return (EACCES);
  794. #endif
  795. }
  796. /* uobj is non null if the page is on loan from an object (i.e. uobj) */
  797. uobj = anon->an_page->uobject;
  798. /* special handling for loaned pages */
  799. if (anon->an_page->loan_count) {
  800. if ((access_type & PROT_WRITE) == 0) {
  801. /*
  802. * for read faults on loaned pages we just cap the
  803. * protection at read-only.
  804. */
  805. enter_prot = enter_prot & ~PROT_WRITE;
  806. } else {
  807. /*
  808. * note that we can't allow writes into a loaned page!
  809. *
  810. * if we have a write fault on a loaned page in an
  811. * anon then we need to look at the anon's ref count.
  812. * if it is greater than one then we are going to do
  813. * a normal copy-on-write fault into a new anon (this
  814. * is not a problem). however, if the reference count
  815. * is one (a case where we would normally allow a
  816. * write directly to the page) then we need to kill
  817. * the loan before we continue.
  818. */
  819. /* >1 case is already ok */
  820. if (anon->an_ref == 1) {
  821. /* get new un-owned replacement page */
  822. pg = uvm_pagealloc(NULL, 0, NULL, 0);
  823. if (pg == NULL) {
  824. uvmfault_unlockall(&ufi, amap, uobj,
  825. anon);
  826. uvm_wait("flt_noram2");
  827. goto ReFault;
  828. }
  829. /* copy data, kill loan */
  830. /* copy old -> new */
  831. uvm_pagecopy(anon->an_page, pg);
  832. /* force reload */
  833. pmap_page_protect(anon->an_page, PROT_NONE);
  834. uvm_lock_pageq(); /* KILL loan */
  835. if (uobj)
  836. /* if we were loaning */
  837. anon->an_page->loan_count--;
  838. anon->an_page->uanon = NULL;
  839. /* in case we owned */
  840. atomic_clearbits_int(
  841. &anon->an_page->pg_flags, PQ_ANON);
  842. uvm_pageactivate(pg);
  843. uvm_unlock_pageq();
  844. if (uobj) {
  845. uobj = NULL;
  846. }
  847. /* install new page in anon */
  848. anon->an_page = pg;
  849. pg->uanon = anon;
  850. atomic_setbits_int(&pg->pg_flags, PQ_ANON);
  851. atomic_clearbits_int(&pg->pg_flags,
  852. PG_BUSY|PG_FAKE);
  853. UVM_PAGE_OWN(pg, NULL);
  854. } /* ref == 1 */
  855. } /* write fault */
  856. } /* loan count */
  857. /*
  858. * if we are case 1B then we will need to allocate a new blank
  859. * anon to transfer the data into. note that we have a lock
  860. * on anon, so no one can busy or release the page until we are done.
  861. * also note that the ref count can't drop to zero here because
  862. * it is > 1 and we are only dropping one ref.
  863. *
  864. * in the (hopefully very rare) case that we are out of RAM we
  865. * will wait for more RAM, and refault.
  866. *
  867. * if we are out of anon VM we kill the process (XXX: could wait?).
  868. */
  869. if ((access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
  870. uvmexp.flt_acow++;
  871. oanon = anon; /* oanon = old */
  872. anon = uvm_analloc();
  873. if (anon) {
  874. pg = uvm_pagealloc(NULL, 0, anon, 0);
  875. }
  876. /* check for out of RAM */
  877. if (anon == NULL || pg == NULL) {
  878. if (anon)
  879. uvm_anfree(anon);
  880. uvmfault_unlockall(&ufi, amap, uobj, oanon);
  881. KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
  882. if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
  883. uvmexp.fltnoanon++;
  884. return (ENOMEM);
  885. }
  886. uvmexp.fltnoram++;
  887. uvm_wait("flt_noram3"); /* out of RAM, wait for more */
  888. goto ReFault;
  889. }
  890. /* got all resources, replace anon with nanon */
  891. uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */
  892. /* un-busy! new page */
  893. atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
  894. UVM_PAGE_OWN(pg, NULL);
  895. amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
  896. anon, 1);
  897. /* deref: can not drop to zero here by defn! */
  898. oanon->an_ref--;
  899. /*
  900. * note: anon is _not_ locked, but we have the sole references
  901. * to in from amap.
  902. * thus, no one can get at it until we are done with it.
  903. */
  904. } else {
  905. uvmexp.flt_anon++;
  906. oanon = anon;
  907. pg = anon->an_page;
  908. if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
  909. enter_prot = enter_prot & ~PROT_WRITE;
  910. }
  911. /*
  912. * now map the page in ...
  913. * XXX: old fault unlocks object before pmap_enter. this seems
  914. * suspect since some other thread could blast the page out from
  915. * under us between the unlock and the pmap_enter.
  916. */
  917. if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
  918. enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
  919. != 0) {
  920. /*
  921. * No need to undo what we did; we can simply think of
  922. * this as the pmap throwing away the mapping information.
  923. *
  924. * We do, however, have to go through the ReFault path,
  925. * as the map may change while we're asleep.
  926. */
  927. uvmfault_unlockall(&ufi, amap, uobj, oanon);
  928. KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
  929. if (uvmexp.swpgonly == uvmexp.swpages) {
  930. /* XXX instrumentation */
  931. return (ENOMEM);
  932. }
  933. /* XXX instrumentation */
  934. uvm_wait("flt_pmfail1");
  935. goto ReFault;
  936. }
  937. /* ... update the page queues. */
  938. uvm_lock_pageq();
  939. if (fault_type == VM_FAULT_WIRE) {
  940. uvm_pagewire(pg);
  941. /*
  942. * since the now-wired page cannot be paged out,
  943. * release its swap resources for others to use.
  944. * since an anon with no swap cannot be PG_CLEAN,
  945. * clear its clean flag now.
  946. */
  947. atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
  948. uvm_anon_dropswap(anon);
  949. } else {
  950. /* activate it */
  951. uvm_pageactivate(pg);
  952. }
  953. uvm_unlock_pageq();
  954. /* done case 1! finish up by unlocking everything and returning success */
  955. uvmfault_unlockall(&ufi, amap, uobj, oanon);
  956. pmap_update(ufi.orig_map->pmap);
  957. return (0);
  958. Case2:
  959. /* handle case 2: faulting on backing object or zero fill */
  960. /*
  961. * note that uobjpage can not be PGO_DONTCARE at this point. we now
  962. * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
  963. * have a backing object, check and see if we are going to promote
  964. * the data up to an anon during the fault.
  965. */
  966. if (uobj == NULL) {
  967. uobjpage = PGO_DONTCARE;
  968. promote = TRUE; /* always need anon here */
  969. } else {
  970. KASSERT(uobjpage != PGO_DONTCARE);
  971. promote = (access_type & PROT_WRITE) &&
  972. UVM_ET_ISCOPYONWRITE(ufi.entry);
  973. }
  974. /*
  975. * if uobjpage is not null then we do not need to do I/O to get the
  976. * uobjpage.
  977. *
  978. * if uobjpage is null, then we need to ask the pager to
  979. * get the data for us. once we have the data, we need to reverify
  980. * the state the world. we are currently not holding any resources.
  981. */
  982. if (uobjpage) {
  983. /* update rusage counters */
  984. curproc->p_ru.ru_minflt++;
  985. } else {
  986. /* update rusage counters */
  987. curproc->p_ru.ru_majflt++;
  988. uvmfault_unlockall(&ufi, amap, NULL, NULL);
  989. uvmexp.fltget++;
  990. gotpages = 1;
  991. uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
  992. result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
  993. 0, access_type & MASK(ufi.entry), ufi.entry->advice,
  994. PGO_SYNCIO);
  995. /* recover from I/O */
  996. if (result != VM_PAGER_OK) {
  997. KASSERT(result != VM_PAGER_PEND);
  998. if (result == VM_PAGER_AGAIN) {
  999. tsleep(&lbolt, PVM, "fltagain2", 0);
  1000. goto ReFault;
  1001. }
  1002. if (!UVM_ET_ISNOFAULT(ufi.entry))
  1003. return (EACCES); /* XXX i/o error */
  1004. uobjpage = PGO_DONTCARE;
  1005. promote = TRUE;
  1006. }
  1007. /* re-verify the state of the world. */
  1008. locked = uvmfault_relock(&ufi);
  1009. /*
  1010. * Re-verify that amap slot is still free. if there is
  1011. * a problem, we clean up.
  1012. */
  1013. if (locked && amap && amap_lookup(&ufi.entry->aref,
  1014. ufi.orig_rvaddr - ufi.entry->start)) {
  1015. if (locked)
  1016. uvmfault_unlockall(&ufi, amap, NULL, NULL);
  1017. locked = FALSE;
  1018. }
  1019. /* didn't get the lock? release the page and retry. */
  1020. if (locked == FALSE && uobjpage != PGO_DONTCARE) {
  1021. uvm_lock_pageq();
  1022. /* make sure it is in queues */
  1023. uvm_pageactivate(uobjpage);
  1024. uvm_unlock_pageq();
  1025. if (uobjpage->pg_flags & PG_WANTED)
  1026. /* still holding object lock */
  1027. wakeup(uobjpage);
  1028. atomic_clearbits_int(&uobjpage->pg_flags,
  1029. PG_BUSY|PG_WANTED);
  1030. UVM_PAGE_OWN(uobjpage, NULL);
  1031. goto ReFault;
  1032. }
  1033. /*
  1034. * we have the data in uobjpage which is PG_BUSY
  1035. */
  1036. }
  1037. /*
  1038. * notes:
  1039. * - at this point uobjpage can not be NULL
  1040. * - at this point uobjpage could be PG_WANTED (handle later)
  1041. */
  1042. if (promote == FALSE) {
  1043. /*
  1044. * we are not promoting. if the mapping is COW ensure that we
  1045. * don't give more access than we should (e.g. when doing a read
  1046. * fault on a COPYONWRITE mapping we want to map the COW page in
  1047. * R/O even though the entry protection could be R/W).
  1048. *
  1049. * set "pg" to the page we want to map in (uobjpage, usually)
  1050. */
  1051. uvmexp.flt_obj++;
  1052. if (UVM_ET_ISCOPYONWRITE(ufi.entry))
  1053. enter_prot &= ~PROT_WRITE;
  1054. pg = uobjpage; /* map in the actual object */
  1055. /* assert(uobjpage != PGO_DONTCARE) */
  1056. /*
  1057. * we are faulting directly on the page. be careful
  1058. * about writing to loaned pages...
  1059. */
  1060. if (uobjpage->loan_count) {
  1061. if ((access_type & PROT_WRITE) == 0) {
  1062. /* read fault: cap the protection at readonly */
  1063. /* cap! */
  1064. enter_prot = enter_prot & ~PROT_WRITE;
  1065. } else {
  1066. /* write fault: must break the loan here */
  1067. /* alloc new un-owned page */
  1068. pg = uvm_pagealloc(NULL, 0, NULL, 0);
  1069. if (pg == NULL) {
  1070. /*
  1071. * drop ownership of page, it can't
  1072. * be released
  1073. */
  1074. if (uobjpage->pg_flags & PG_WANTED)
  1075. wakeup(uobjpage);
  1076. atomic_clearbits_int(
  1077. &uobjpage->pg_flags,
  1078. PG_BUSY|PG_WANTED);
  1079. UVM_PAGE_OWN(uobjpage, NULL);
  1080. uvm_lock_pageq();
  1081. /* activate: we will need it later */
  1082. uvm_pageactivate(uobjpage);
  1083. uvm_unlock_pageq();
  1084. uvmfault_unlockall(&ufi, amap, uobj,
  1085. NULL);
  1086. uvmexp.fltnoram++;
  1087. uvm_wait("flt_noram4");
  1088. goto ReFault;
  1089. }
  1090. /*
  1091. * copy the data from the old page to the new
  1092. * one and clear the fake/clean flags on the
  1093. * new page (keep it busy). force a reload
  1094. * of the old page by clearing it from all
  1095. * pmaps. then lock the page queues to
  1096. * rename the pages.
  1097. */
  1098. uvm_pagecopy(uobjpage, pg); /* old -> new */
  1099. atomic_clearbits_int(&pg->pg_flags,
  1100. PG_FAKE|PG_CLEAN);
  1101. pmap_page_protect(uobjpage, PROT_NONE);
  1102. if (uobjpage->pg_flags & PG_WANTED)
  1103. wakeup(uobjpage);
  1104. atomic_clearbits_int(&uobjpage->pg_flags,
  1105. PG_BUSY|PG_WANTED);
  1106. UVM_PAGE_OWN(uobjpage, NULL);
  1107. uvm_lock_pageq();
  1108. uoff = uobjpage->offset;
  1109. /* remove old page */
  1110. uvm_pagerealloc(uobjpage, NULL, 0);
  1111. /*
  1112. * at this point we have absolutely no
  1113. * control over uobjpage
  1114. */
  1115. /* install new page */
  1116. uvm_pagerealloc(pg, uobj, uoff);
  1117. uvm_unlock_pageq();
  1118. /*
  1119. * done! loan is broken and "pg" is
  1120. * PG_BUSY. it can now replace uobjpage.
  1121. */
  1122. uobjpage = pg;
  1123. } /* write fault case */
  1124. } /* if loan_count */
  1125. } else {
  1126. /*
  1127. * if we are going to promote the data to an anon we
  1128. * allocate a blank anon here and plug it into our amap.
  1129. */
  1130. #ifdef DIAGNOSTIC
  1131. if (amap == NULL)
  1132. panic("uvm_fault: want to promote data, but no anon");
  1133. #endif
  1134. anon = uvm_analloc();
  1135. if (anon) {
  1136. /*
  1137. * In `Fill in data...' below, if
  1138. * uobjpage == PGO_DONTCARE, we want
  1139. * a zero'd, dirty page, so have
  1140. * uvm_pagealloc() do that for us.
  1141. */
  1142. pg = uvm_pagealloc(NULL, 0, anon,
  1143. (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
  1144. }
  1145. /*
  1146. * out of memory resources?
  1147. */
  1148. if (anon == NULL || pg == NULL) {
  1149. /* arg! must unbusy our page and fail or sleep. */
  1150. if (uobjpage != PGO_DONTCARE) {
  1151. uvm_lock_pageq();
  1152. uvm_pageactivate(uobjpage);
  1153. uvm_unlock_pageq();
  1154. if (uobjpage->pg_flags & PG_WANTED)
  1155. wakeup(uobjpage);
  1156. atomic_clearbits_int(&uobjpage->pg_flags,
  1157. PG_BUSY|PG_WANTED);
  1158. UVM_PAGE_OWN(uobjpage, NULL);
  1159. }
  1160. /* unlock and fail ... */
  1161. uvmfault_unlockall(&ufi, amap, uobj, NULL);
  1162. KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
  1163. if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) {
  1164. uvmexp.fltnoanon++;
  1165. return (ENOMEM);
  1166. }
  1167. uvm_anfree(anon);
  1168. uvmexp.fltnoram++;
  1169. uvm_wait("flt_noram5");
  1170. goto ReFault;
  1171. }
  1172. /* fill in the data */
  1173. if (uobjpage != PGO_DONTCARE) {
  1174. uvmexp.flt_prcopy++;
  1175. /* copy page [pg now dirty] */
  1176. uvm_pagecopy(uobjpage, pg);
  1177. /*
  1178. * promote to shared amap? make sure all sharing
  1179. * procs see it
  1180. */
  1181. if ((amap_flags(amap) & AMAP_SHARED) != 0) {
  1182. pmap_page_protect(uobjpage, PROT_NONE);
  1183. }
  1184. /* dispose of uobjpage. drop handle to uobj as well. */
  1185. if (uobjpage->pg_flags & PG_WANTED)
  1186. wakeup(uobjpage);
  1187. atomic_clearbits_int(&uobjpage->pg_flags,
  1188. PG_BUSY|PG_WANTED);
  1189. UVM_PAGE_OWN(uobjpage, NULL);
  1190. uvm_lock_pageq();
  1191. uvm_pageactivate(uobjpage);
  1192. uvm_unlock_pageq();
  1193. uobj = NULL;
  1194. } else {
  1195. uvmexp.flt_przero++;
  1196. /*
  1197. * Page is zero'd and marked dirty by uvm_pagealloc()
  1198. * above.
  1199. */
  1200. }
  1201. amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
  1202. anon, 0);
  1203. }
  1204. /* note: pg is either the uobjpage or the new page in the new anon */
  1205. /*
  1206. * all resources are present. we can now map it in and free our
  1207. * resources.
  1208. */
  1209. if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
  1210. enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
  1211. != 0) {
  1212. /*
  1213. * No need to undo what we did; we can simply think of
  1214. * this as the pmap throwing away the mapping information.
  1215. *
  1216. * We do, however, have to go through the ReFault path,
  1217. * as the map may change while we're asleep.
  1218. */
  1219. if (pg->pg_flags & PG_WANTED)
  1220. wakeup(pg);
  1221. atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
  1222. UVM_PAGE_OWN(pg, NULL);
  1223. uvmfault_unlockall(&ufi, amap, uobj, NULL);
  1224. KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
  1225. if (uvmexp.swpgonly == uvmexp.swpages) {
  1226. /* XXX instrumentation */
  1227. return (ENOMEM);
  1228. }
  1229. /* XXX instrumentation */
  1230. uvm_wait("flt_pmfail2");
  1231. goto ReFault;
  1232. }
  1233. uvm_lock_pageq();
  1234. if (fault_type == VM_FAULT_WIRE) {
  1235. uvm_pagewire(pg);
  1236. if (pg->pg_flags & PQ_AOBJ) {
  1237. /*
  1238. * since the now-wired page cannot be paged out,
  1239. * release its swap resources for others to use.
  1240. * since an aobj page with no swap cannot be PG_CLEAN,
  1241. * clear its clean flag now.
  1242. */
  1243. atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
  1244. uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
  1245. }
  1246. } else {
  1247. /* activate it */
  1248. uvm_pageactivate(pg);
  1249. }
  1250. uvm_unlock_pageq();
  1251. if (pg->pg_flags & PG_WANTED)
  1252. wakeup(pg);
  1253. atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
  1254. UVM_PAGE_OWN(pg, NULL);
  1255. uvmfault_unlockall(&ufi, amap, uobj, NULL);
  1256. pmap_update(ufi.orig_map->pmap);
  1257. return (0);
  1258. }
  1259. /*
  1260. * uvm_fault_wire: wire down a range of virtual addresses in a map.
  1261. *
  1262. * => map may be read-locked by caller, but MUST NOT be write-locked.
  1263. * => if map is read-locked, any operations which may cause map to
  1264. * be write-locked in uvm_fault() must be taken care of by
  1265. * the caller. See uvm_map_pageable().
  1266. */
  1267. int
  1268. uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
  1269. {
  1270. vaddr_t va;
  1271. pmap_t pmap;
  1272. int rv;
  1273. pmap = vm_map_pmap(map);
  1274. /*
  1275. * now fault it in a page at a time. if the fault fails then we have
  1276. * to undo what we have done. note that in uvm_fault PROT_NONE
  1277. * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
  1278. */
  1279. for (va = start ; va < end ; va += PAGE_SIZE) {
  1280. rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
  1281. if (rv) {
  1282. if (va != start) {
  1283. uvm_fault_unwire(map, start, va);
  1284. }
  1285. return (rv);
  1286. }
  1287. }
  1288. return (0);
  1289. }
  1290. /*
  1291. * uvm_fault_unwire(): unwire range of virtual space.
  1292. */
  1293. void
  1294. uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end)
  1295. {
  1296. vm_map_lock_read(map);
  1297. uvm_fault_unwire_locked(map, start, end);
  1298. vm_map_unlock_read(map);
  1299. }
  1300. /*
  1301. * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
  1302. *
  1303. * => map must be at least read-locked.
  1304. */
  1305. void
  1306. uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
  1307. {
  1308. vm_map_entry_t entry, next;
  1309. pmap_t pmap = vm_map_pmap(map);
  1310. vaddr_t va;
  1311. paddr_t pa;
  1312. struct vm_page *pg;
  1313. KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
  1314. /*
  1315. * we assume that the area we are unwiring has actually been wired
  1316. * in the first place. this means that we should be able to extract
  1317. * the PAs from the pmap. we also lock out the page daemon so that
  1318. * we can call uvm_pageunwire.
  1319. */
  1320. uvm_lock_pageq();
  1321. /* find the beginning map entry for the region. */
  1322. KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
  1323. if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
  1324. panic("uvm_fault_unwire_locked: address not in map");
  1325. for (va = start; va < end ; va += PAGE_SIZE) {
  1326. if (pmap_extract(pmap, va, &pa) == FALSE)
  1327. continue;
  1328. /* find the map entry for the current address. */
  1329. KASSERT(va >= entry->start);
  1330. while (va >= entry->end) {
  1331. next = RB_NEXT(uvm_map_addr, &map->addr, entry);
  1332. KASSERT(next != NULL && next->start <= entry->end);
  1333. entry = next;
  1334. }
  1335. /* if the entry is no longer wired, tell the pmap. */
  1336. if (VM_MAPENT_ISWIRED(entry) == 0)
  1337. pmap_unwire(pmap, va);
  1338. pg = PHYS_TO_VM_PAGE(pa);
  1339. if (pg)
  1340. uvm_pageunwire(pg);
  1341. }
  1342. uvm_unlock_pageq();
  1343. }
  1344. /*
  1345. * uvmfault_unlockmaps: unlock the maps
  1346. */
  1347. void
  1348. uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
  1349. {
  1350. /*
  1351. * ufi can be NULL when this isn't really a fault,
  1352. * but merely paging in anon data.
  1353. */
  1354. if (ufi == NULL) {
  1355. return;
  1356. }
  1357. uvmfault_update_stats(ufi);
  1358. if (write_locked) {
  1359. vm_map_unlock(ufi->map);
  1360. } else {
  1361. vm_map_unlock_read(ufi->map);
  1362. }
  1363. }
  1364. /*
  1365. * uvmfault_unlockall: unlock everything passed in.
  1366. *
  1367. * => maps must be read-locked (not write-locked).
  1368. */
  1369. void
  1370. uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
  1371. struct uvm_object *uobj, struct vm_anon *anon)
  1372. {
  1373. uvmfault_unlockmaps(ufi, FALSE);
  1374. }
  1375. /*
  1376. * uvmfault_lookup: lookup a virtual address in a map
  1377. *
  1378. * => caller must provide a uvm_faultinfo structure with the IN
  1379. * params properly filled in
  1380. * => we will lookup the map entry (handling submaps) as we go
  1381. * => if the lookup is a success we will return with the maps locked
  1382. * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
  1383. * get a read lock.
  1384. * => note that submaps can only appear in the kernel and they are
  1385. * required to use the same virtual addresses as the map they
  1386. * are referenced by (thus address translation between the main
  1387. * map and the submap is unnecessary).
  1388. */
  1389. boolean_t
  1390. uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
  1391. {
  1392. vm_map_t tmpmap;
  1393. /* init ufi values for lookup. */
  1394. ufi->map = ufi->orig_map;
  1395. ufi->size = ufi->orig_size;
  1396. /*
  1397. * keep going down levels until we are done. note that there can
  1398. * only be two levels so we won't loop very long.
  1399. */
  1400. while (1) {
  1401. if (ufi->orig_rvaddr < ufi->map->min_offset ||
  1402. ufi->orig_rvaddr >= ufi->map->max_offset)
  1403. return(FALSE);
  1404. /* lock map */
  1405. if (write_lock) {
  1406. vm_map_lock(ufi->map);
  1407. } else {
  1408. vm_map_lock_read(ufi->map);
  1409. }
  1410. /* lookup */
  1411. if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
  1412. &ufi->entry)) {
  1413. uvmfault_unlockmaps(ufi, write_lock);
  1414. return(FALSE);
  1415. }
  1416. /* reduce size if necessary */
  1417. if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
  1418. ufi->size = ufi->entry->end - ufi->orig_rvaddr;
  1419. /*
  1420. * submap? replace map with the submap and lookup again.
  1421. * note: VAs in submaps must match VAs in main map.
  1422. */
  1423. if (UVM_ET_ISSUBMAP(ufi->entry)) {
  1424. tmpmap = ufi->entry->object.sub_map;
  1425. uvmfault_unlockmaps(ufi, write_lock);
  1426. ufi->map = tmpmap;
  1427. continue;
  1428. }
  1429. /* got it! */
  1430. ufi->mapv = ufi->map->timestamp;
  1431. return(TRUE);
  1432. }
  1433. /*NOTREACHED*/
  1434. }
  1435. /*
  1436. * uvmfault_relock: attempt to relock the same version of the map
  1437. *
  1438. * => fault data structures should be unlocked before calling.
  1439. * => if a success (TRUE) maps will be locked after call.
  1440. */
  1441. boolean_t
  1442. uvmfault_relock(struct uvm_faultinfo *ufi)
  1443. {
  1444. /*
  1445. * ufi can be NULL when this isn't really a fault,
  1446. * but merely paging in anon data.
  1447. */
  1448. if (ufi == NULL) {
  1449. return TRUE;
  1450. }
  1451. uvmexp.fltrelck++;
  1452. /*
  1453. * relock map. fail if version mismatch (in which case nothing
  1454. * gets locked).
  1455. */
  1456. vm_map_lock_read(ufi->map);
  1457. if (ufi->mapv != ufi->map->timestamp) {
  1458. vm_map_unlock_read(ufi->map);
  1459. return(FALSE);
  1460. }
  1461. uvmexp.fltrelckok++;
  1462. return(TRUE); /* got it! */
  1463. }