os_dep.c 130 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. # include "private/gc_priv.h"
  17. # if defined(LINUX) && !defined(POWERPC)
  18. # include <linux/version.h>
  19. # if (LINUX_VERSION_CODE <= 0x10400)
  20. /* Ugly hack to get struct sigcontext_struct definition. Required */
  21. /* for some early 1.3.X releases. Will hopefully go away soon. */
  22. /* in some later Linux releases, asm/sigcontext.h may have to */
  23. /* be included instead. */
  24. # define __KERNEL__
  25. # include <asm/signal.h>
  26. # undef __KERNEL__
  27. # else
  28. /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
  29. /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
  30. /* prototypes, so we have to include the top-level sigcontext.h to */
  31. /* make sure the former gets defined to be the latter if appropriate. */
  32. # include <features.h>
  33. # if 2 <= __GLIBC__
  34. # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
  35. /* glibc 2.1 no longer has sigcontext.h. But signal.h */
  36. /* has the right declaration for glibc 2.1. */
  37. # include <sigcontext.h>
  38. # endif /* 0 == __GLIBC_MINOR__ */
  39. # else /* not 2 <= __GLIBC__ */
  40. /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
  41. /* one. Check LINUX_VERSION_CODE to see which we should reference. */
  42. # include <asm/sigcontext.h>
  43. # endif /* 2 <= __GLIBC__ */
  44. # endif
  45. # endif
  46. # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
  47. && !defined(MSWINCE)
  48. # include <sys/types.h>
  49. # if !defined(MSWIN32) && !defined(SUNOS4)
  50. # include <unistd.h>
  51. # endif
  52. # endif
  53. # include <stdio.h>
  54. # if defined(MSWINCE)
  55. # define SIGSEGV 0 /* value is irrelevant */
  56. # else
  57. # include <signal.h>
  58. # endif
  59. #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
  60. # include <ctype.h>
  61. #endif
  62. /* Blatantly OS dependent routines, except for those that are related */
  63. /* to dynamic loading. */
  64. # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
  65. # define NEED_FIND_LIMIT
  66. # endif
  67. # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
  68. # define NEED_FIND_LIMIT
  69. # endif
  70. # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
  71. # define NEED_FIND_LIMIT
  72. # endif
  73. # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
  74. || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
  75. # define NEED_FIND_LIMIT
  76. # endif
  77. #if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__))
  78. # include <machine/trap.h>
  79. # if !defined(PCR)
  80. # define NEED_FIND_LIMIT
  81. # endif
  82. #endif
  83. #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
  84. && !defined(NEED_FIND_LIMIT)
  85. /* Used by GC_init_netbsd_elf() below. */
  86. # define NEED_FIND_LIMIT
  87. #endif
  88. #ifdef NEED_FIND_LIMIT
  89. # include <setjmp.h>
  90. #endif
  91. #ifdef AMIGA
  92. # define GC_AMIGA_DEF
  93. # include "AmigaOS.c"
  94. # undef GC_AMIGA_DEF
  95. #endif
  96. #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  97. # define WIN32_LEAN_AND_MEAN
  98. # define NOSERVICE
  99. # include <windows.h>
  100. #endif
  101. #ifdef MACOS
  102. # include <Processes.h>
  103. #endif
  104. #ifdef IRIX5
  105. # include <sys/uio.h>
  106. # include <malloc.h> /* for locking */
  107. #endif
  108. #if defined(USE_MMAP) || defined(USE_MUNMAP)
  109. # ifndef USE_MMAP
  110. --> USE_MUNMAP requires USE_MMAP
  111. # endif
  112. # include <sys/types.h>
  113. # include <sys/mman.h>
  114. # include <sys/stat.h>
  115. # include <errno.h>
  116. #endif
  117. #ifdef UNIX_LIKE
  118. # include <fcntl.h>
  119. # if defined(SUNOS5SIGS) && !defined(FREEBSD)
  120. # include <sys/siginfo.h>
  121. # endif
  122. /* Define SETJMP and friends to be the version that restores */
  123. /* the signal mask. */
  124. # define SETJMP(env) sigsetjmp(env, 1)
  125. # define LONGJMP(env, val) siglongjmp(env, val)
  126. # define JMP_BUF sigjmp_buf
  127. #else
  128. # define SETJMP(env) setjmp(env)
  129. # define LONGJMP(env, val) longjmp(env, val)
  130. # define JMP_BUF jmp_buf
  131. #endif
  132. #ifdef DARWIN
  133. /* for get_etext and friends */
  134. #include <mach-o/getsect.h>
  135. #endif
  136. #ifdef DJGPP
  137. /* Apparently necessary for djgpp 2.01. May cause problems with */
  138. /* other versions. */
  139. typedef long unsigned int caddr_t;
  140. #endif
  141. #ifdef PCR
  142. # include "il/PCR_IL.h"
  143. # include "th/PCR_ThCtl.h"
  144. # include "mm/PCR_MM.h"
  145. #endif
  146. #if !defined(NO_EXECUTE_PERMISSION)
  147. # define OPT_PROT_EXEC PROT_EXEC
  148. #else
  149. # define OPT_PROT_EXEC 0
  150. #endif
  151. #if defined(LINUX) && \
  152. (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
  153. /* We need to parse /proc/self/maps, either to find dynamic libraries, */
  154. /* and/or to find the register backing store base (IA64). Do it once */
  155. /* here. */
  156. #define READ read
  157. /* Repeatedly perform a read call until the buffer is filled or */
  158. /* we encounter EOF. */
  159. ssize_t GC_repeat_read(int fd, char *buf, size_t count)
  160. {
  161. ssize_t num_read = 0;
  162. ssize_t result;
  163. while (num_read < count) {
  164. result = READ(fd, buf + num_read, count - num_read);
  165. if (result < 0) return result;
  166. if (result == 0) break;
  167. num_read += result;
  168. }
  169. return num_read;
  170. }
  171. /*
  172. * Apply fn to a buffer containing the contents of /proc/self/maps.
  173. * Return the result of fn or, if we failed, 0.
  174. * We currently do nothing to /proc/self/maps other than simply read
  175. * it. This code could be simplified if we could determine its size
  176. * ahead of time.
  177. */
  178. word GC_apply_to_maps(word (*fn)(char *))
  179. {
  180. int f;
  181. int result;
  182. size_t maps_size = 4000; /* Initial guess. */
  183. static char init_buf[1];
  184. static char *maps_buf = init_buf;
  185. static size_t maps_buf_sz = 1;
  186. /* Read /proc/self/maps, growing maps_buf as necessary. */
  187. /* Note that we may not allocate conventionally, and */
  188. /* thus can't use stdio. */
  189. do {
  190. if (maps_size >= maps_buf_sz) {
  191. /* Grow only by powers of 2, since we leak "too small" buffers. */
  192. while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
  193. maps_buf = GC_scratch_alloc(maps_buf_sz);
  194. if (maps_buf == 0) return 0;
  195. }
  196. f = open("/proc/self/maps", O_RDONLY);
  197. if (-1 == f) return 0;
  198. maps_size = 0;
  199. do {
  200. result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
  201. if (result <= 0) return 0;
  202. maps_size += result;
  203. } while (result == maps_buf_sz-1);
  204. close(f);
  205. } while (maps_size >= maps_buf_sz);
  206. maps_buf[maps_size] = '\0';
  207. /* Apply fn to result. */
  208. return fn(maps_buf);
  209. }
  210. #endif /* Need GC_apply_to_maps */
  211. #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
  212. //
  213. // GC_parse_map_entry parses an entry from /proc/self/maps so we can
  214. // locate all writable data segments that belong to shared libraries.
  215. // The format of one of these entries and the fields we care about
  216. // is as follows:
  217. // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
  218. // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
  219. // start end prot maj_dev
  220. //
  221. // Note that since about auguat 2003 kernels, the columns no longer have
  222. // fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
  223. // anywhere, which is safer anyway.
  224. //
  225. /*
  226. * Assign various fields of the first line in buf_ptr to *start, *end,
  227. * *prot_buf and *maj_dev. Only *prot_buf may be set for unwritable maps.
  228. */
  229. char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
  230. char *prot_buf, unsigned int *maj_dev)
  231. {
  232. char *start_start, *end_start, *prot_start, *maj_dev_start;
  233. char *p;
  234. char *endp;
  235. if (buf_ptr == NULL || *buf_ptr == '\0') {
  236. return NULL;
  237. }
  238. p = buf_ptr;
  239. while (isspace(*p)) ++p;
  240. start_start = p;
  241. GC_ASSERT(isxdigit(*start_start));
  242. *start = strtoul(start_start, &endp, 16); p = endp;
  243. GC_ASSERT(*p=='-');
  244. ++p;
  245. end_start = p;
  246. GC_ASSERT(isxdigit(*end_start));
  247. *end = strtoul(end_start, &endp, 16); p = endp;
  248. GC_ASSERT(isspace(*p));
  249. while (isspace(*p)) ++p;
  250. prot_start = p;
  251. GC_ASSERT(*prot_start == 'r' || *prot_start == '-');
  252. memcpy(prot_buf, prot_start, 4);
  253. prot_buf[4] = '\0';
  254. if (prot_buf[1] == 'w') {/* we can skip the rest if it's not writable. */
  255. /* Skip past protection field to offset field */
  256. while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
  257. GC_ASSERT(isxdigit(*p));
  258. /* Skip past offset field, which we ignore */
  259. while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
  260. maj_dev_start = p;
  261. GC_ASSERT(isxdigit(*maj_dev_start));
  262. *maj_dev = strtoul(maj_dev_start, NULL, 16);
  263. }
  264. while (*p && *p++ != '\n');
  265. return p;
  266. }
  267. #endif /* Need to parse /proc/self/maps. */
  268. #if defined(SEARCH_FOR_DATA_START)
  269. /* The I386 case can be handled without a search. The Alpha case */
  270. /* used to be handled differently as well, but the rules changed */
  271. /* for recent Linux versions. This seems to be the easiest way to */
  272. /* cover all versions. */
  273. # if defined(LINUX) || defined(HURD)
  274. /* Some Linux distributions arrange to define __data_start. Some */
  275. /* define data_start as a weak symbol. The latter is technically */
  276. /* broken, since the user program may define data_start, in which */
  277. /* case we lose. Nonetheless, we try both, prefering __data_start. */
  278. /* We assume gcc-compatible pragmas. */
  279. # pragma weak __data_start
  280. extern int __data_start[];
  281. # pragma weak data_start
  282. extern int data_start[];
  283. # endif /* LINUX */
  284. extern int _end[];
  285. ptr_t GC_data_start;
  286. void GC_init_linux_data_start()
  287. {
  288. extern ptr_t GC_find_limit();
  289. # if defined(LINUX) || defined(HURD)
  290. /* Try the easy approaches first: */
  291. if ((ptr_t)__data_start != 0) {
  292. GC_data_start = (ptr_t)(__data_start);
  293. return;
  294. }
  295. if ((ptr_t)data_start != 0) {
  296. GC_data_start = (ptr_t)(data_start);
  297. return;
  298. }
  299. # endif /* LINUX */
  300. GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
  301. }
  302. #endif
  303. # ifdef ECOS
  304. # ifndef ECOS_GC_MEMORY_SIZE
  305. # define ECOS_GC_MEMORY_SIZE (448 * 1024)
  306. # endif /* ECOS_GC_MEMORY_SIZE */
  307. // setjmp() function, as described in ANSI para 7.6.1.1
  308. #undef SETJMP
  309. #define SETJMP( __env__ ) hal_setjmp( __env__ )
  310. // FIXME: This is a simple way of allocating memory which is
  311. // compatible with ECOS early releases. Later releases use a more
  312. // sophisticated means of allocating memory than this simple static
  313. // allocator, but this method is at least bound to work.
  314. static char memory[ECOS_GC_MEMORY_SIZE];
  315. static char *brk = memory;
  316. static void *tiny_sbrk(ptrdiff_t increment)
  317. {
  318. void *p = brk;
  319. brk += increment;
  320. if (brk > memory + sizeof memory)
  321. {
  322. brk -= increment;
  323. return NULL;
  324. }
  325. return p;
  326. }
  327. #define sbrk tiny_sbrk
  328. # endif /* ECOS */
  329. #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
  330. ptr_t GC_data_start;
  331. void GC_init_netbsd_elf()
  332. {
  333. extern ptr_t GC_find_limit();
  334. extern char **environ;
  335. /* This may need to be environ, without the underscore, for */
  336. /* some versions. */
  337. GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
  338. }
  339. #endif
  340. # ifdef OS2
  341. # include <stddef.h>
  342. # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
  343. struct exe_hdr {
  344. unsigned short magic_number;
  345. unsigned short padding[29];
  346. long new_exe_offset;
  347. };
  348. #define E_MAGIC(x) (x).magic_number
  349. #define EMAGIC 0x5A4D
  350. #define E_LFANEW(x) (x).new_exe_offset
  351. struct e32_exe {
  352. unsigned char magic_number[2];
  353. unsigned char byte_order;
  354. unsigned char word_order;
  355. unsigned long exe_format_level;
  356. unsigned short cpu;
  357. unsigned short os;
  358. unsigned long padding1[13];
  359. unsigned long object_table_offset;
  360. unsigned long object_count;
  361. unsigned long padding2[31];
  362. };
  363. #define E32_MAGIC1(x) (x).magic_number[0]
  364. #define E32MAGIC1 'L'
  365. #define E32_MAGIC2(x) (x).magic_number[1]
  366. #define E32MAGIC2 'X'
  367. #define E32_BORDER(x) (x).byte_order
  368. #define E32LEBO 0
  369. #define E32_WORDER(x) (x).word_order
  370. #define E32LEWO 0
  371. #define E32_CPU(x) (x).cpu
  372. #define E32CPU286 1
  373. #define E32_OBJTAB(x) (x).object_table_offset
  374. #define E32_OBJCNT(x) (x).object_count
  375. struct o32_obj {
  376. unsigned long size;
  377. unsigned long base;
  378. unsigned long flags;
  379. unsigned long pagemap;
  380. unsigned long mapsize;
  381. unsigned long reserved;
  382. };
  383. #define O32_FLAGS(x) (x).flags
  384. #define OBJREAD 0x0001L
  385. #define OBJWRITE 0x0002L
  386. #define OBJINVALID 0x0080L
  387. #define O32_SIZE(x) (x).size
  388. #define O32_BASE(x) (x).base
  389. # else /* IBM's compiler */
  390. /* A kludge to get around what appears to be a header file bug */
  391. # ifndef WORD
  392. # define WORD unsigned short
  393. # endif
  394. # ifndef DWORD
  395. # define DWORD unsigned long
  396. # endif
  397. # define EXE386 1
  398. # include <newexe.h>
  399. # include <exe386.h>
  400. # endif /* __IBMC__ */
  401. # define INCL_DOSEXCEPTIONS
  402. # define INCL_DOSPROCESS
  403. # define INCL_DOSERRORS
  404. # define INCL_DOSMODULEMGR
  405. # define INCL_DOSMEMMGR
  406. # include <os2.h>
  407. /* Disable and enable signals during nontrivial allocations */
  408. void GC_disable_signals(void)
  409. {
  410. ULONG nest;
  411. DosEnterMustComplete(&nest);
  412. if (nest != 1) ABORT("nested GC_disable_signals");
  413. }
  414. void GC_enable_signals(void)
  415. {
  416. ULONG nest;
  417. DosExitMustComplete(&nest);
  418. if (nest != 0) ABORT("GC_enable_signals");
  419. }
  420. # else
  421. # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
  422. && !defined(MSWINCE) \
  423. && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
  424. && !defined(NOSYS) && !defined(ECOS)
  425. # if defined(SIG_BLOCK)
  426. /* Use POSIX/SYSV interface */
  427. # define SIGSET_T sigset_t
  428. # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
  429. # define SIG_FILL(set) sigfillset(&set)
  430. # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
  431. # elif defined(sigmask) && !defined(UTS4) && !defined(HURD)
  432. /* Use the traditional BSD interface */
  433. # define SIGSET_T int
  434. # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
  435. # define SIG_FILL(set) (set) = 0x7fffffff
  436. /* Setting the leading bit appears to provoke a bug in some */
  437. /* longjmp implementations. Most systems appear not to have */
  438. /* a signal 32. */
  439. # define SIGSETMASK(old, new) (old) = sigsetmask(new)
  440. # else
  441. # error undetectable signal API
  442. # endif
  443. static GC_bool mask_initialized = FALSE;
  444. static SIGSET_T new_mask;
  445. static SIGSET_T old_mask;
  446. static SIGSET_T dummy;
  447. #if defined(PRINTSTATS) && !defined(THREADS)
  448. # define CHECK_SIGNALS
  449. int GC_sig_disabled = 0;
  450. #endif
  451. void GC_disable_signals()
  452. {
  453. if (!mask_initialized) {
  454. SIG_FILL(new_mask);
  455. SIG_DEL(new_mask, SIGSEGV);
  456. SIG_DEL(new_mask, SIGILL);
  457. SIG_DEL(new_mask, SIGQUIT);
  458. # ifdef SIGBUS
  459. SIG_DEL(new_mask, SIGBUS);
  460. # endif
  461. # ifdef SIGIOT
  462. SIG_DEL(new_mask, SIGIOT);
  463. # endif
  464. # ifdef SIGEMT
  465. SIG_DEL(new_mask, SIGEMT);
  466. # endif
  467. # ifdef SIGTRAP
  468. SIG_DEL(new_mask, SIGTRAP);
  469. # endif
  470. mask_initialized = TRUE;
  471. }
  472. # ifdef CHECK_SIGNALS
  473. if (GC_sig_disabled != 0) ABORT("Nested disables");
  474. GC_sig_disabled++;
  475. # endif
  476. SIGSETMASK(old_mask,new_mask);
  477. }
  478. void GC_enable_signals()
  479. {
  480. # ifdef CHECK_SIGNALS
  481. if (GC_sig_disabled != 1) ABORT("Unmatched enable");
  482. GC_sig_disabled--;
  483. # endif
  484. SIGSETMASK(dummy,old_mask);
  485. }
  486. # endif /* !PCR */
  487. # endif /*!OS/2 */
  488. /* Ivan Demakov: simplest way (to me) */
  489. #if defined (DOS4GW)
  490. void GC_disable_signals() { }
  491. void GC_enable_signals() { }
  492. #endif
  493. /* Find the page size */
  494. word GC_page_size;
  495. # if defined(MSWIN32) || defined(MSWINCE) || defined (CYGWIN32)
  496. void GC_setpagesize()
  497. {
  498. GetSystemInfo(&GC_sysinfo);
  499. GC_page_size = GC_sysinfo.dwPageSize;
  500. }
  501. # else
  502. # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
  503. || defined(USE_MUNMAP)
  504. void GC_setpagesize()
  505. {
  506. GC_page_size = GETPAGESIZE();
  507. }
  508. # else
  509. /* It's acceptable to fake it. */
  510. void GC_setpagesize()
  511. {
  512. GC_page_size = HBLKSIZE;
  513. }
  514. # endif
  515. # endif
  516. /*
  517. * Find the base of the stack.
  518. * Used only in single-threaded environment.
  519. * With threads, GC_mark_roots needs to know how to do this.
  520. * Called with allocator lock held.
  521. */
  522. # if defined(MSWIN32) || defined(MSWINCE)
  523. # define is_writable(prot) ((prot) == PAGE_READWRITE \
  524. || (prot) == PAGE_WRITECOPY \
  525. || (prot) == PAGE_EXECUTE_READWRITE \
  526. || (prot) == PAGE_EXECUTE_WRITECOPY)
  527. /* Return the number of bytes that are writable starting at p. */
  528. /* The pointer p is assumed to be page aligned. */
  529. /* If base is not 0, *base becomes the beginning of the */
  530. /* allocation region containing p. */
  531. word GC_get_writable_length(ptr_t p, ptr_t *base)
  532. {
  533. MEMORY_BASIC_INFORMATION buf;
  534. word result;
  535. word protect;
  536. result = VirtualQuery(p, &buf, sizeof(buf));
  537. if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
  538. if (base != 0) *base = (ptr_t)(buf.AllocationBase);
  539. protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
  540. if (!is_writable(protect)) {
  541. return(0);
  542. }
  543. if (buf.State != MEM_COMMIT) return(0);
  544. return(buf.RegionSize);
  545. }
  546. ptr_t GC_get_stack_base()
  547. {
  548. int dummy;
  549. ptr_t sp = (ptr_t)(&dummy);
  550. ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
  551. word size = GC_get_writable_length(trunc_sp, 0);
  552. return(trunc_sp + size);
  553. }
  554. # endif /* MS Windows */
  555. # ifdef BEOS
  556. # include <kernel/OS.h>
  557. ptr_t GC_get_stack_base(){
  558. thread_info th;
  559. get_thread_info(find_thread(NULL),&th);
  560. return th.stack_end;
  561. }
  562. # endif /* BEOS */
  563. # ifdef OS2
  564. ptr_t GC_get_stack_base()
  565. {
  566. PTIB ptib;
  567. PPIB ppib;
  568. if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
  569. GC_err_printf0("DosGetInfoBlocks failed\n");
  570. ABORT("DosGetInfoBlocks failed\n");
  571. }
  572. return((ptr_t)(ptib -> tib_pstacklimit));
  573. }
  574. # endif /* OS2 */
  575. # ifdef AMIGA
  576. # define GC_AMIGA_SB
  577. # include "AmigaOS.c"
  578. # undef GC_AMIGA_SB
  579. # endif /* AMIGA */
  580. # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
  581. # ifdef __STDC__
  582. typedef void (*handler)(int);
  583. # else
  584. typedef void (*handler)();
  585. # endif
  586. # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
  587. || defined(HURD) || defined(NETBSD)
  588. static struct sigaction old_segv_act;
  589. # if defined(IRIX5) || defined(HPUX) \
  590. || defined(HURD) || defined(NETBSD)
  591. static struct sigaction old_bus_act;
  592. # endif
  593. # else
  594. static handler old_segv_handler, old_bus_handler;
  595. # endif
  596. # ifdef __STDC__
  597. void GC_set_and_save_fault_handler(handler h)
  598. # else
  599. void GC_set_and_save_fault_handler(h)
  600. handler h;
  601. # endif
  602. {
  603. # if defined(SUNOS5SIGS) || defined(IRIX5) \
  604. || defined(OSF1) || defined(HURD) || defined(NETBSD)
  605. struct sigaction act;
  606. act.sa_handler = h;
  607. # if 0 /* Was necessary for Solaris 2.3 and very temporary */
  608. /* NetBSD bugs. */
  609. act.sa_flags = SA_RESTART | SA_NODEFER;
  610. # else
  611. act.sa_flags = SA_RESTART;
  612. # endif
  613. (void) sigemptyset(&act.sa_mask);
  614. # ifdef GC_IRIX_THREADS
  615. /* Older versions have a bug related to retrieving and */
  616. /* and setting a handler at the same time. */
  617. (void) sigaction(SIGSEGV, 0, &old_segv_act);
  618. (void) sigaction(SIGSEGV, &act, 0);
  619. (void) sigaction(SIGBUS, 0, &old_bus_act);
  620. (void) sigaction(SIGBUS, &act, 0);
  621. # else
  622. (void) sigaction(SIGSEGV, &act, &old_segv_act);
  623. # if defined(IRIX5) \
  624. || defined(HPUX) || defined(HURD) || defined(NETBSD)
  625. /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
  626. /* Pthreads doesn't exist under Irix 5.x, so we */
  627. /* don't have to worry in the threads case. */
  628. (void) sigaction(SIGBUS, &act, &old_bus_act);
  629. # endif
  630. # endif /* GC_IRIX_THREADS */
  631. # else
  632. old_segv_handler = signal(SIGSEGV, h);
  633. # ifdef SIGBUS
  634. old_bus_handler = signal(SIGBUS, h);
  635. # endif
  636. # endif
  637. }
  638. # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
  639. # ifdef NEED_FIND_LIMIT
  640. /* Some tools to implement HEURISTIC2 */
  641. # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
  642. /* static */ JMP_BUF GC_jmp_buf;
  643. /*ARGSUSED*/
  644. void GC_fault_handler(sig)
  645. int sig;
  646. {
  647. LONGJMP(GC_jmp_buf, 1);
  648. }
  649. void GC_setup_temporary_fault_handler()
  650. {
  651. GC_set_and_save_fault_handler(GC_fault_handler);
  652. }
  653. void GC_reset_fault_handler()
  654. {
  655. # if defined(SUNOS5SIGS) || defined(IRIX5) \
  656. || defined(OSF1) || defined(HURD) || defined(NETBSD)
  657. (void) sigaction(SIGSEGV, &old_segv_act, 0);
  658. # if defined(IRIX5) \
  659. || defined(HPUX) || defined(HURD) || defined(NETBSD)
  660. (void) sigaction(SIGBUS, &old_bus_act, 0);
  661. # endif
  662. # else
  663. (void) signal(SIGSEGV, old_segv_handler);
  664. # ifdef SIGBUS
  665. (void) signal(SIGBUS, old_bus_handler);
  666. # endif
  667. # endif
  668. }
  669. /* Return the first nonaddressible location > p (up) or */
  670. /* the smallest location q s.t. [q,p) is addressable (!up). */
  671. /* We assume that p (up) or p-1 (!up) is addressable. */
  672. ptr_t GC_find_limit(p, up)
  673. ptr_t p;
  674. GC_bool up;
  675. {
  676. static VOLATILE ptr_t result;
  677. /* Needs to be static, since otherwise it may not be */
  678. /* preserved across the longjmp. Can safely be */
  679. /* static since it's only called once, with the */
  680. /* allocation lock held. */
  681. GC_setup_temporary_fault_handler();
  682. if (SETJMP(GC_jmp_buf) == 0) {
  683. result = (ptr_t)(((word)(p))
  684. & ~(MIN_PAGE_SIZE-1));
  685. for (;;) {
  686. if (up) {
  687. result += MIN_PAGE_SIZE;
  688. } else {
  689. result -= MIN_PAGE_SIZE;
  690. }
  691. GC_noop1((word)(*result));
  692. }
  693. }
  694. GC_reset_fault_handler();
  695. if (!up) {
  696. result += MIN_PAGE_SIZE;
  697. }
  698. return(result);
  699. }
  700. # endif
  701. #if defined(ECOS) || defined(NOSYS)
  702. ptr_t GC_get_stack_base()
  703. {
  704. return STACKBOTTOM;
  705. }
  706. #endif
  707. #ifdef HPUX_STACKBOTTOM
  708. #include <sys/param.h>
  709. #include <sys/pstat.h>
  710. ptr_t GC_get_register_stack_base(void)
  711. {
  712. struct pst_vm_status vm_status;
  713. int i = 0;
  714. while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
  715. if (vm_status.pst_type == PS_RSESTACK) {
  716. return (ptr_t) vm_status.pst_vaddr;
  717. }
  718. }
  719. /* old way to get the register stackbottom */
  720. return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
  721. & ~(BACKING_STORE_ALIGNMENT - 1));
  722. }
  723. #endif /* HPUX_STACK_BOTTOM */
  724. #ifdef LINUX_STACKBOTTOM
  725. #include <sys/types.h>
  726. #include <sys/stat.h>
  727. # define STAT_SKIP 27 /* Number of fields preceding startstack */
  728. /* field in /proc/self/stat */
  729. #ifdef USE_LIBC_PRIVATES
  730. # pragma weak __libc_stack_end
  731. extern ptr_t __libc_stack_end;
  732. #endif
  733. # ifdef IA64
  734. /* Try to read the backing store base from /proc/self/maps. */
  735. /* We look for the writable mapping with a 0 major device, */
  736. /* which is as close to our frame as possible, but below it.*/
  737. static word backing_store_base_from_maps(char *maps)
  738. {
  739. char prot_buf[5];
  740. char *buf_ptr = maps;
  741. word start, end;
  742. unsigned int maj_dev;
  743. word current_best = 0;
  744. word dummy;
  745. for (;;) {
  746. buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
  747. if (buf_ptr == NULL) return current_best;
  748. if (prot_buf[1] == 'w' && maj_dev == 0) {
  749. if (end < (word)(&dummy) && start > current_best) current_best = start;
  750. }
  751. }
  752. return current_best;
  753. }
  754. static word backing_store_base_from_proc(void)
  755. {
  756. return GC_apply_to_maps(backing_store_base_from_maps);
  757. }
  758. # ifdef USE_LIBC_PRIVATES
  759. # pragma weak __libc_ia64_register_backing_store_base
  760. extern ptr_t __libc_ia64_register_backing_store_base;
  761. # endif
  762. ptr_t GC_get_register_stack_base(void)
  763. {
  764. # ifdef USE_LIBC_PRIVATES
  765. if (0 != &__libc_ia64_register_backing_store_base
  766. && 0 != __libc_ia64_register_backing_store_base) {
  767. /* Glibc 2.2.4 has a bug such that for dynamically linked */
  768. /* executables __libc_ia64_register_backing_store_base is */
  769. /* defined but uninitialized during constructor calls. */
  770. /* Hence we check for both nonzero address and value. */
  771. return __libc_ia64_register_backing_store_base;
  772. }
  773. # endif
  774. word result = backing_store_base_from_proc();
  775. if (0 == result) {
  776. /* Use dumb heuristics. Works only for default configuration. */
  777. result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
  778. result += BACKING_STORE_ALIGNMENT - 1;
  779. result &= ~(BACKING_STORE_ALIGNMENT - 1);
  780. /* Verify that it's at least readable. If not, we goofed. */
  781. GC_noop1(*(word *)result);
  782. }
  783. return (ptr_t)result;
  784. }
  785. # endif
  786. ptr_t GC_linux_stack_base(void)
  787. {
  788. /* We read the stack base value from /proc/self/stat. We do this */
  789. /* using direct I/O system calls in order to avoid calling malloc */
  790. /* in case REDIRECT_MALLOC is defined. */
  791. # define STAT_BUF_SIZE 4096
  792. # define STAT_READ read
  793. /* Should probably call the real read, if read is wrapped. */
  794. char stat_buf[STAT_BUF_SIZE];
  795. int f;
  796. char c;
  797. word result = 0;
  798. size_t i, buf_offset = 0;
  799. /* First try the easy way. This should work for glibc 2.2 */
  800. /* This fails in a prelinked ("prelink" command) executable */
  801. /* since the correct value of __libc_stack_end never */
  802. /* becomes visible to us. The second test works around */
  803. /* this. */
  804. # ifdef USE_LIBC_PRIVATES
  805. if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
  806. # ifdef IA64
  807. /* Some versions of glibc set the address 16 bytes too */
  808. /* low while the initialization code is running. */
  809. if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
  810. return __libc_stack_end + 0x10;
  811. } /* Otherwise it's not safe to add 16 bytes and we fall */
  812. /* back to using /proc. */
  813. # else
  814. # ifdef SPARC
  815. /* Older versions of glibc for 64-bit Sparc do not set
  816. * this variable correctly, it gets set to either zero
  817. * or one.
  818. */
  819. if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
  820. return __libc_stack_end;
  821. # else
  822. return __libc_stack_end;
  823. # endif
  824. # endif
  825. }
  826. # endif
  827. f = open("/proc/self/stat", O_RDONLY);
  828. if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
  829. ABORT("Couldn't read /proc/self/stat");
  830. }
  831. c = stat_buf[buf_offset++];
  832. /* Skip the required number of fields. This number is hopefully */
  833. /* constant across all Linux implementations. */
  834. for (i = 0; i < STAT_SKIP; ++i) {
  835. while (isspace(c)) c = stat_buf[buf_offset++];
  836. while (!isspace(c)) c = stat_buf[buf_offset++];
  837. }
  838. while (isspace(c)) c = stat_buf[buf_offset++];
  839. while (isdigit(c)) {
  840. result *= 10;
  841. result += c - '0';
  842. c = stat_buf[buf_offset++];
  843. }
  844. close(f);
  845. if (result < 0x10000000) ABORT("Absurd stack bottom value");
  846. return (ptr_t)result;
  847. }
  848. #endif /* LINUX_STACKBOTTOM */
  849. #ifdef FREEBSD_STACKBOTTOM
  850. /* This uses an undocumented sysctl call, but at least one expert */
  851. /* believes it will stay. */
  852. #include <unistd.h>
  853. #include <sys/types.h>
  854. #include <sys/sysctl.h>
  855. ptr_t GC_freebsd_stack_base(void)
  856. {
  857. int nm[2] = {CTL_KERN, KERN_USRSTACK};
  858. ptr_t base;
  859. size_t len = sizeof(ptr_t);
  860. int r = sysctl(nm, 2, &base, &len, NULL, 0);
  861. if (r) ABORT("Error getting stack base");
  862. return base;
  863. }
  864. #endif /* FREEBSD_STACKBOTTOM */
  865. #ifdef SOLARIS_STACKBOTTOM
  866. # include <thread.h>
  867. # include <signal.h>
  868. # include <pthread.h>
  869. /* These variables are used to cache ss_sp value for the primordial */
  870. /* thread (it's better not to call thr_stksegment() twice for this */
  871. /* thread - see JDK bug #4352906). */
  872. static pthread_t stackbase_main_self = 0;
  873. /* 0 means stackbase_main_ss_sp value is unset. */
  874. static void *stackbase_main_ss_sp = NULL;
  875. ptr_t GC_solaris_stack_base(void)
  876. {
  877. stack_t s;
  878. pthread_t self = pthread_self();
  879. if (self == stackbase_main_self)
  880. {
  881. /* If the client calls GC_get_stack_base() from the main thread */
  882. /* then just return the cached value. */
  883. GC_ASSERT(stackbase_main_ss_sp != NULL);
  884. return stackbase_main_ss_sp;
  885. }
  886. if (thr_stksegment(&s)) {
  887. /* According to the manual, the only failure error code returned */
  888. /* is EAGAIN meaning "the information is not available due to the */
  889. /* thread is not yet completely initialized or it is an internal */
  890. /* thread" - this shouldn't happen here. */
  891. ABORT("thr_stksegment failed");
  892. }
  893. /* s.ss_sp holds the pointer to the stack bottom. */
  894. GC_ASSERT((void *)&s HOTTER_THAN s.ss_sp);
  895. if (!stackbase_main_self)
  896. {
  897. /* Cache the stack base value for the primordial thread (this */
  898. /* is done during GC_init, so there is no race). */
  899. stackbase_main_ss_sp = s.ss_sp;
  900. stackbase_main_self = self;
  901. }
  902. return s.ss_sp;
  903. }
  904. #endif /* GC_SOLARIS_THREADS */
  905. #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
  906. && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
  907. ptr_t GC_get_stack_base()
  908. {
  909. # if defined(HEURISTIC1) || defined(HEURISTIC2) || \
  910. defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM) || \
  911. defined(SOLARIS_STACKBOTTOM)
  912. word dummy;
  913. ptr_t result;
  914. # endif
  915. # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
  916. # ifdef STACKBOTTOM
  917. return(STACKBOTTOM);
  918. # else
  919. # ifdef HEURISTIC1
  920. # ifdef STACK_GROWS_DOWN
  921. result = (ptr_t)((((word)(&dummy))
  922. + STACKBOTTOM_ALIGNMENT_M1)
  923. & ~STACKBOTTOM_ALIGNMENT_M1);
  924. # else
  925. result = (ptr_t)(((word)(&dummy))
  926. & ~STACKBOTTOM_ALIGNMENT_M1);
  927. # endif
  928. # endif /* HEURISTIC1 */
  929. # ifdef LINUX_STACKBOTTOM
  930. result = GC_linux_stack_base();
  931. # endif
  932. # ifdef FREEBSD_STACKBOTTOM
  933. result = GC_freebsd_stack_base();
  934. # endif
  935. # ifdef SOLARIS_STACKBOTTOM
  936. result = GC_solaris_stack_base();
  937. # endif
  938. # ifdef HEURISTIC2
  939. # ifdef STACK_GROWS_DOWN
  940. result = GC_find_limit((ptr_t)(&dummy), TRUE);
  941. # ifdef HEURISTIC2_LIMIT
  942. if (result > HEURISTIC2_LIMIT
  943. && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
  944. result = HEURISTIC2_LIMIT;
  945. }
  946. # endif
  947. # else
  948. result = GC_find_limit((ptr_t)(&dummy), FALSE);
  949. # ifdef HEURISTIC2_LIMIT
  950. if (result < HEURISTIC2_LIMIT
  951. && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
  952. result = HEURISTIC2_LIMIT;
  953. }
  954. # endif
  955. # endif
  956. # endif /* HEURISTIC2 */
  957. # ifdef STACK_GROWS_DOWN
  958. if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
  959. # endif
  960. return(result);
  961. # endif /* STACKBOTTOM */
  962. }
  963. # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
  964. /*
  965. * Register static data segment(s) as roots.
  966. * If more data segments are added later then they need to be registered
  967. * add that point (as we do with SunOS dynamic loading),
  968. * or GC_mark_roots needs to check for them (as we do with PCR).
  969. * Called with allocator lock held.
  970. */
  971. # ifdef OS2
  972. void GC_register_data_segments()
  973. {
  974. PTIB ptib;
  975. PPIB ppib;
  976. HMODULE module_handle;
  977. # define PBUFSIZ 512
  978. UCHAR path[PBUFSIZ];
  979. FILE * myexefile;
  980. struct exe_hdr hdrdos; /* MSDOS header. */
  981. struct e32_exe hdr386; /* Real header for my executable */
  982. struct o32_obj seg; /* Currrent segment */
  983. int nsegs;
  984. if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
  985. GC_err_printf0("DosGetInfoBlocks failed\n");
  986. ABORT("DosGetInfoBlocks failed\n");
  987. }
  988. module_handle = ppib -> pib_hmte;
  989. if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
  990. GC_err_printf0("DosQueryModuleName failed\n");
  991. ABORT("DosGetInfoBlocks failed\n");
  992. }
  993. myexefile = fopen(path, "rb");
  994. if (myexefile == 0) {
  995. GC_err_puts("Couldn't open executable ");
  996. GC_err_puts(path); GC_err_puts("\n");
  997. ABORT("Failed to open executable\n");
  998. }
  999. if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
  1000. GC_err_puts("Couldn't read MSDOS header from ");
  1001. GC_err_puts(path); GC_err_puts("\n");
  1002. ABORT("Couldn't read MSDOS header");
  1003. }
  1004. if (E_MAGIC(hdrdos) != EMAGIC) {
  1005. GC_err_puts("Executable has wrong DOS magic number: ");
  1006. GC_err_puts(path); GC_err_puts("\n");
  1007. ABORT("Bad DOS magic number");
  1008. }
  1009. if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
  1010. GC_err_puts("Seek to new header failed in ");
  1011. GC_err_puts(path); GC_err_puts("\n");
  1012. ABORT("Bad DOS magic number");
  1013. }
  1014. if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
  1015. GC_err_puts("Couldn't read MSDOS header from ");
  1016. GC_err_puts(path); GC_err_puts("\n");
  1017. ABORT("Couldn't read OS/2 header");
  1018. }
  1019. if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
  1020. GC_err_puts("Executable has wrong OS/2 magic number:");
  1021. GC_err_puts(path); GC_err_puts("\n");
  1022. ABORT("Bad OS/2 magic number");
  1023. }
  1024. if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
  1025. GC_err_puts("Executable %s has wrong byte order: ");
  1026. GC_err_puts(path); GC_err_puts("\n");
  1027. ABORT("Bad byte order");
  1028. }
  1029. if ( E32_CPU(hdr386) == E32CPU286) {
  1030. GC_err_puts("GC can't handle 80286 executables: ");
  1031. GC_err_puts(path); GC_err_puts("\n");
  1032. EXIT();
  1033. }
  1034. if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
  1035. SEEK_SET) != 0) {
  1036. GC_err_puts("Seek to object table failed: ");
  1037. GC_err_puts(path); GC_err_puts("\n");
  1038. ABORT("Seek to object table failed");
  1039. }
  1040. for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
  1041. int flags;
  1042. if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
  1043. GC_err_puts("Couldn't read obj table entry from ");
  1044. GC_err_puts(path); GC_err_puts("\n");
  1045. ABORT("Couldn't read obj table entry");
  1046. }
  1047. flags = O32_FLAGS(seg);
  1048. if (!(flags & OBJWRITE)) continue;
  1049. if (!(flags & OBJREAD)) continue;
  1050. if (flags & OBJINVALID) {
  1051. GC_err_printf0("Object with invalid pages?\n");
  1052. continue;
  1053. }
  1054. GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
  1055. }
  1056. }
  1057. # else /* !OS2 */
  1058. # if defined(MSWIN32) || defined(MSWINCE) || defined (CYGWIN32)
  1059. # ifdef CYGWIN32
  1060. # define GC_no_win32_dlls (FALSE)
  1061. # endif
  1062. # ifdef MSWIN32
  1063. /* Unfortunately, we have to handle win32s very differently from NT, */
  1064. /* Since VirtualQuery has very different semantics. In particular, */
  1065. /* under win32s a VirtualQuery call on an unmapped page returns an */
  1066. /* invalid result. Under NT, GC_register_data_segments is a noop and */
  1067. /* all real work is done by GC_register_dynamic_libraries. Under */
  1068. /* win32s, we cannot find the data segments associated with dll's. */
  1069. /* We register the main data segment here. */
  1070. GC_bool GC_no_win32_dlls = FALSE;
  1071. /* This used to be set for gcc, to avoid dealing with */
  1072. /* the structured exception handling issues. But we now have */
  1073. /* assembly code to do that right. */
  1074. GC_bool GC_wnt = FALSE;
  1075. /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
  1076. void GC_init_win32()
  1077. {
  1078. /* if we're running under win32s, assume that no DLLs will be loaded */
  1079. DWORD v = GetVersion();
  1080. GC_wnt = !(v & 0x80000000);
  1081. GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
  1082. }
  1083. /* Return the smallest address a such that VirtualQuery */
  1084. /* returns correct results for all addresses between a and start. */
  1085. /* Assumes VirtualQuery returns correct information for start. */
  1086. ptr_t GC_least_described_address(ptr_t start)
  1087. {
  1088. MEMORY_BASIC_INFORMATION buf;
  1089. DWORD result;
  1090. LPVOID limit;
  1091. ptr_t p;
  1092. LPVOID q;
  1093. limit = GC_sysinfo.lpMinimumApplicationAddress;
  1094. p = (ptr_t)((word)start & ~(GC_page_size - 1));
  1095. for (;;) {
  1096. q = (LPVOID)(p - GC_page_size);
  1097. if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
  1098. result = VirtualQuery(q, &buf, sizeof(buf));
  1099. if (result != sizeof(buf) || buf.AllocationBase == 0) break;
  1100. p = (ptr_t)(buf.AllocationBase);
  1101. }
  1102. return(p);
  1103. }
  1104. # endif
  1105. # ifndef REDIRECT_MALLOC
  1106. /* We maintain a linked list of AllocationBase values that we know */
  1107. /* correspond to malloc heap sections. Currently this is only called */
  1108. /* during a GC. But there is some hope that for long running */
  1109. /* programs we will eventually see most heap sections. */
  1110. /* In the long run, it would be more reliable to occasionally walk */
  1111. /* the malloc heap with HeapWalk on the default heap. But that */
  1112. /* apparently works only for NT-based Windows. */
  1113. /* In the long run, a better data structure would also be nice ... */
  1114. struct GC_malloc_heap_list {
  1115. void * allocation_base;
  1116. struct GC_malloc_heap_list *next;
  1117. } *GC_malloc_heap_l = 0;
  1118. /* Is p the base of one of the malloc heap sections we already know */
  1119. /* about? */
  1120. GC_bool GC_is_malloc_heap_base(ptr_t p)
  1121. {
  1122. struct GC_malloc_heap_list *q = GC_malloc_heap_l;
  1123. while (0 != q) {
  1124. if (q -> allocation_base == p) return TRUE;
  1125. q = q -> next;
  1126. }
  1127. return FALSE;
  1128. }
  1129. void *GC_get_allocation_base(void *p)
  1130. {
  1131. MEMORY_BASIC_INFORMATION buf;
  1132. DWORD result = VirtualQuery(p, &buf, sizeof(buf));
  1133. if (result != sizeof(buf)) {
  1134. ABORT("Weird VirtualQuery result");
  1135. }
  1136. return buf.AllocationBase;
  1137. }
  1138. size_t GC_max_root_size = 100000; /* Appr. largest root size. */
  1139. void GC_add_current_malloc_heap()
  1140. {
  1141. struct GC_malloc_heap_list *new_l =
  1142. malloc(sizeof(struct GC_malloc_heap_list));
  1143. void * candidate = GC_get_allocation_base(new_l);
  1144. if (new_l == 0) return;
  1145. if (GC_is_malloc_heap_base(candidate)) {
  1146. /* Try a little harder to find malloc heap. */
  1147. size_t req_size = 10000;
  1148. do {
  1149. void *p = malloc(req_size);
  1150. if (0 == p) { free(new_l); return; }
  1151. candidate = GC_get_allocation_base(p);
  1152. free(p);
  1153. req_size *= 2;
  1154. } while (GC_is_malloc_heap_base(candidate)
  1155. && req_size < GC_max_root_size/10 && req_size < 500000);
  1156. if (GC_is_malloc_heap_base(candidate)) {
  1157. free(new_l); return;
  1158. }
  1159. }
  1160. # ifdef CONDPRINT
  1161. if (GC_print_stats)
  1162. GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
  1163. candidate);
  1164. # endif
  1165. new_l -> allocation_base = candidate;
  1166. new_l -> next = GC_malloc_heap_l;
  1167. GC_malloc_heap_l = new_l;
  1168. }
  1169. # endif /* REDIRECT_MALLOC */
  1170. /* Is p the start of either the malloc heap, or of one of our */
  1171. /* heap sections? */
  1172. GC_bool GC_is_heap_base (ptr_t p)
  1173. {
  1174. unsigned i;
  1175. # ifndef REDIRECT_MALLOC
  1176. static word last_gc_no = -1;
  1177. if (last_gc_no != GC_gc_no) {
  1178. GC_add_current_malloc_heap();
  1179. last_gc_no = GC_gc_no;
  1180. }
  1181. if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
  1182. if (GC_is_malloc_heap_base(p)) return TRUE;
  1183. # endif
  1184. for (i = 0; i < GC_n_heap_bases; i++) {
  1185. if (GC_heap_bases[i] == p) return TRUE;
  1186. }
  1187. return FALSE ;
  1188. }
  1189. # ifdef MSWIN32
  1190. void GC_register_root_section(ptr_t static_root)
  1191. {
  1192. MEMORY_BASIC_INFORMATION buf;
  1193. DWORD result;
  1194. DWORD protect;
  1195. LPVOID p;
  1196. char * base;
  1197. char * limit, * new_limit;
  1198. if (!GC_no_win32_dlls) return;
  1199. p = base = limit = GC_least_described_address(static_root);
  1200. while (p < GC_sysinfo.lpMaximumApplicationAddress) {
  1201. result = VirtualQuery(p, &buf, sizeof(buf));
  1202. if (result != sizeof(buf) || buf.AllocationBase == 0
  1203. || GC_is_heap_base(buf.AllocationBase)) break;
  1204. new_limit = (char *)p + buf.RegionSize;
  1205. protect = buf.Protect;
  1206. if (buf.State == MEM_COMMIT
  1207. && is_writable(protect)) {
  1208. if ((char *)p == limit) {
  1209. limit = new_limit;
  1210. } else {
  1211. if (base != limit) GC_add_roots_inner(base, limit, FALSE);
  1212. base = p;
  1213. limit = new_limit;
  1214. }
  1215. }
  1216. if (p > (LPVOID)new_limit /* overflow */) break;
  1217. p = (LPVOID)new_limit;
  1218. }
  1219. if (base != limit) GC_add_roots_inner(base, limit, FALSE);
  1220. }
  1221. #endif
  1222. void GC_register_data_segments()
  1223. {
  1224. # ifdef MSWIN32
  1225. static char dummy;
  1226. GC_register_root_section((ptr_t)(&dummy));
  1227. # endif
  1228. }
  1229. # else /* !OS2 && !Windows */
  1230. # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
  1231. || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
  1232. ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
  1233. int max_page_size;
  1234. int * etext_addr;
  1235. {
  1236. word text_end = ((word)(etext_addr) + sizeof(word) - 1)
  1237. & ~(sizeof(word) - 1);
  1238. /* etext rounded to word boundary */
  1239. word next_page = ((text_end + (word)max_page_size - 1)
  1240. & ~((word)max_page_size - 1));
  1241. word page_offset = (text_end & ((word)max_page_size - 1));
  1242. VOLATILE char * result = (char *)(next_page + page_offset);
  1243. /* Note that this isnt equivalent to just adding */
  1244. /* max_page_size to &etext if &etext is at a page boundary */
  1245. GC_setup_temporary_fault_handler();
  1246. if (SETJMP(GC_jmp_buf) == 0) {
  1247. /* Try writing to the address. */
  1248. *result = *result;
  1249. GC_reset_fault_handler();
  1250. } else {
  1251. GC_reset_fault_handler();
  1252. /* We got here via a longjmp. The address is not readable. */
  1253. /* This is known to happen under Solaris 2.4 + gcc, which place */
  1254. /* string constants in the text segment, but after etext. */
  1255. /* Use plan B. Note that we now know there is a gap between */
  1256. /* text and data segments, so plan A bought us something. */
  1257. result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
  1258. }
  1259. return((ptr_t)result);
  1260. }
  1261. # endif
  1262. # if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)
  1263. /* Its unclear whether this should be identical to the above, or */
  1264. /* whether it should apply to non-X86 architectures. */
  1265. /* For now we don't assume that there is always an empty page after */
  1266. /* etext. But in some cases there actually seems to be slightly more. */
  1267. /* This also deals with holes between read-only data and writable data. */
  1268. ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
  1269. int max_page_size;
  1270. int * etext_addr;
  1271. {
  1272. word text_end = ((word)(etext_addr) + sizeof(word) - 1)
  1273. & ~(sizeof(word) - 1);
  1274. /* etext rounded to word boundary */
  1275. VOLATILE word next_page = (text_end + (word)max_page_size - 1)
  1276. & ~((word)max_page_size - 1);
  1277. VOLATILE ptr_t result = (ptr_t)text_end;
  1278. GC_setup_temporary_fault_handler();
  1279. if (SETJMP(GC_jmp_buf) == 0) {
  1280. /* Try reading at the address. */
  1281. /* This should happen before there is another thread. */
  1282. for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
  1283. *(VOLATILE char *)next_page;
  1284. GC_reset_fault_handler();
  1285. } else {
  1286. GC_reset_fault_handler();
  1287. /* As above, we go to plan B */
  1288. result = GC_find_limit((ptr_t)(DATAEND), FALSE);
  1289. }
  1290. return(result);
  1291. }
  1292. # endif
  1293. #ifdef AMIGA
  1294. # define GC_AMIGA_DS
  1295. # include "AmigaOS.c"
  1296. # undef GC_AMIGA_DS
  1297. #else /* !OS2 && !Windows && !AMIGA */
  1298. void GC_register_data_segments()
  1299. {
  1300. # if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
  1301. # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
  1302. /* As of Solaris 2.3, the Solaris threads implementation */
  1303. /* allocates the data structure for the initial thread with */
  1304. /* sbrk at process startup. It needs to be scanned, so that */
  1305. /* we don't lose some malloc allocated data structures */
  1306. /* hanging from it. We're on thin ice here ... */
  1307. extern caddr_t sbrk();
  1308. GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
  1309. # else
  1310. GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
  1311. # if defined(DATASTART2)
  1312. GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
  1313. # endif
  1314. # endif
  1315. # endif
  1316. # if defined(MACOS)
  1317. {
  1318. # if defined(THINK_C)
  1319. extern void* GC_MacGetDataStart(void);
  1320. /* globals begin above stack and end at a5. */
  1321. GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
  1322. (ptr_t)LMGetCurrentA5(), FALSE);
  1323. # else
  1324. # if defined(__MWERKS__)
  1325. # if !__POWERPC__
  1326. extern void* GC_MacGetDataStart(void);
  1327. /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
  1328. # if __option(far_data)
  1329. extern void* GC_MacGetDataEnd(void);
  1330. # endif
  1331. /* globals begin above stack and end at a5. */
  1332. GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
  1333. (ptr_t)LMGetCurrentA5(), FALSE);
  1334. /* MATTHEW: Handle Far Globals */
  1335. # if __option(far_data)
  1336. /* Far globals follow he QD globals: */
  1337. GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
  1338. (ptr_t)GC_MacGetDataEnd(), FALSE);
  1339. # endif
  1340. # else
  1341. extern char __data_start__[], __data_end__[];
  1342. GC_add_roots_inner((ptr_t)&__data_start__,
  1343. (ptr_t)&__data_end__, FALSE);
  1344. # endif /* __POWERPC__ */
  1345. # endif /* __MWERKS__ */
  1346. # endif /* !THINK_C */
  1347. }
  1348. # endif /* MACOS */
  1349. /* Dynamic libraries are added at every collection, since they may */
  1350. /* change. */
  1351. }
  1352. # endif /* ! AMIGA */
  1353. # endif /* ! MSWIN32 && ! MSWINCE*/
  1354. # endif /* ! OS2 */
  1355. /*
  1356. * Auxiliary routines for obtaining memory from OS.
  1357. */
  1358. # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
  1359. && !defined(MSWIN32) && !defined(MSWINCE) \
  1360. && !defined(MACOS) && !defined(DOS4GW)
  1361. # ifdef SUNOS4
  1362. extern caddr_t sbrk();
  1363. # endif
  1364. # ifdef __STDC__
  1365. # define SBRK_ARG_T ptrdiff_t
  1366. # else
  1367. # define SBRK_ARG_T int
  1368. # endif
  1369. # if 0 && defined(RS6000) /* We now use mmap */
  1370. /* The compiler seems to generate speculative reads one past the end of */
  1371. /* an allocated object. Hence we need to make sure that the page */
  1372. /* following the last heap page is also mapped. */
  1373. ptr_t GC_unix_get_mem(bytes)
  1374. word bytes;
  1375. {
  1376. caddr_t cur_brk = (caddr_t)sbrk(0);
  1377. caddr_t result;
  1378. SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
  1379. static caddr_t my_brk_val = 0;
  1380. if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
  1381. if (lsbs != 0) {
  1382. if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
  1383. }
  1384. if (cur_brk == my_brk_val) {
  1385. /* Use the extra block we allocated last time. */
  1386. result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
  1387. if (result == (caddr_t)(-1)) return(0);
  1388. result -= GC_page_size;
  1389. } else {
  1390. result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
  1391. if (result == (caddr_t)(-1)) return(0);
  1392. }
  1393. my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
  1394. return((ptr_t)result);
  1395. }
  1396. #else /* Not RS6000 */
  1397. #if defined(USE_MMAP) || defined(USE_MUNMAP)
  1398. #ifdef USE_MMAP_FIXED
  1399. # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
  1400. /* Seems to yield better performance on Solaris 2, but can */
  1401. /* be unreliable if something is already mapped at the address. */
  1402. #else
  1403. # define GC_MMAP_FLAGS MAP_PRIVATE
  1404. #endif
  1405. #ifdef USE_MMAP_ANON
  1406. # define zero_fd -1
  1407. # if defined(MAP_ANONYMOUS)
  1408. # define OPT_MAP_ANON MAP_ANONYMOUS
  1409. # else
  1410. # define OPT_MAP_ANON MAP_ANON
  1411. # endif
  1412. #else
  1413. static int zero_fd;
  1414. # define OPT_MAP_ANON 0
  1415. #endif
  1416. #endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */
  1417. #if defined(USE_MMAP)
  1418. /* Tested only under Linux, IRIX5 and Solaris 2 */
  1419. #ifndef HEAP_START
  1420. # define HEAP_START 0
  1421. #endif
  1422. ptr_t GC_unix_get_mem(bytes)
  1423. word bytes;
  1424. {
  1425. void *result;
  1426. static ptr_t last_addr = HEAP_START;
  1427. # ifndef USE_MMAP_ANON
  1428. static GC_bool initialized = FALSE;
  1429. if (!initialized) {
  1430. zero_fd = open("/dev/zero", O_RDONLY);
  1431. fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
  1432. initialized = TRUE;
  1433. }
  1434. # endif
  1435. if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
  1436. result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
  1437. GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
  1438. if (result == MAP_FAILED) return(0);
  1439. last_addr = (ptr_t)result + bytes + GC_page_size - 1;
  1440. last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
  1441. # if !defined(LINUX)
  1442. if (last_addr == 0) {
  1443. /* Oops. We got the end of the address space. This isn't */
  1444. /* usable by arbitrary C code, since one-past-end pointers */
  1445. /* don't work, so we discard it and try again. */
  1446. munmap(result, (size_t)(-GC_page_size) - (size_t)result);
  1447. /* Leave last page mapped, so we can't repeat. */
  1448. return GC_unix_get_mem(bytes);
  1449. }
  1450. # else
  1451. GC_ASSERT(last_addr != 0);
  1452. # endif
  1453. return((ptr_t)result);
  1454. }
  1455. #else /* Not RS6000, not USE_MMAP */
  1456. ptr_t GC_unix_get_mem(bytes)
  1457. word bytes;
  1458. {
  1459. ptr_t result;
  1460. # ifdef IRIX5
  1461. /* Bare sbrk isn't thread safe. Play by malloc rules. */
  1462. /* The equivalent may be needed on other systems as well. */
  1463. __LOCK_MALLOC();
  1464. # endif
  1465. {
  1466. ptr_t cur_brk = (ptr_t)sbrk(0);
  1467. SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
  1468. if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
  1469. if (lsbs != 0) {
  1470. if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
  1471. }
  1472. result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
  1473. if (result == (ptr_t)(-1)) result = 0;
  1474. }
  1475. # ifdef IRIX5
  1476. __UNLOCK_MALLOC();
  1477. # endif
  1478. return(result);
  1479. }
  1480. #endif /* Not USE_MMAP */
  1481. #endif /* Not RS6000 */
  1482. # endif /* UN*X */
  1483. # ifdef OS2
  1484. void * os2_alloc(size_t bytes)
  1485. {
  1486. void * result;
  1487. if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
  1488. PAG_WRITE | PAG_COMMIT)
  1489. != NO_ERROR) {
  1490. return(0);
  1491. }
  1492. if (result == 0) return(os2_alloc(bytes));
  1493. return(result);
  1494. }
  1495. # endif /* OS2 */
  1496. # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  1497. SYSTEM_INFO GC_sysinfo;
  1498. # endif
  1499. # if defined(MSWIN32) || defined(CYGWIN32)
  1500. word GC_n_heap_bases = 0;
  1501. # ifdef USE_GLOBAL_ALLOC
  1502. # define GLOBAL_ALLOC_TEST 1
  1503. # else
  1504. # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
  1505. # endif
  1506. ptr_t GC_win32_get_mem(bytes)
  1507. word bytes;
  1508. {
  1509. ptr_t result;
  1510. # ifdef CYGWIN32
  1511. result = GC_unix_get_mem (bytes);
  1512. # else
  1513. if (GLOBAL_ALLOC_TEST) {
  1514. /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
  1515. /* There are also unconfirmed rumors of other */
  1516. /* problems, so we dodge the issue. */
  1517. result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
  1518. result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
  1519. } else {
  1520. /* VirtualProtect only works on regions returned by a */
  1521. /* single VirtualAlloc call. Thus we allocate one */
  1522. /* extra page, which will prevent merging of blocks */
  1523. /* in separate regions, and eliminate any temptation */
  1524. /* to call VirtualProtect on a range spanning regions. */
  1525. /* This wastes a small amount of memory, and risks */
  1526. /* increased fragmentation. But better alternatives */
  1527. /* would require effort. */
  1528. result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
  1529. MEM_COMMIT | MEM_RESERVE,
  1530. PAGE_EXECUTE_READWRITE);
  1531. }
  1532. #endif
  1533. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  1534. /* If I read the documentation correctly, this can */
  1535. /* only happen if HBLKSIZE > 64k or not a power of 2. */
  1536. if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
  1537. GC_heap_bases[GC_n_heap_bases++] = result;
  1538. return(result);
  1539. }
  1540. void GC_win32_free_heap ()
  1541. {
  1542. if (GC_no_win32_dlls) {
  1543. while (GC_n_heap_bases > 0) {
  1544. # ifdef CYGWIN32
  1545. free (GC_heap_bases[--GC_n_heap_bases]);
  1546. # else
  1547. GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
  1548. # endif
  1549. GC_heap_bases[GC_n_heap_bases] = 0;
  1550. }
  1551. }
  1552. }
  1553. # endif
  1554. #ifdef AMIGA
  1555. # define GC_AMIGA_AM
  1556. # include "AmigaOS.c"
  1557. # undef GC_AMIGA_AM
  1558. #endif
  1559. # ifdef MSWINCE
  1560. word GC_n_heap_bases = 0;
  1561. ptr_t GC_wince_get_mem(bytes)
  1562. word bytes;
  1563. {
  1564. ptr_t result;
  1565. word i;
  1566. /* Round up allocation size to multiple of page size */
  1567. bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
  1568. /* Try to find reserved, uncommitted pages */
  1569. for (i = 0; i < GC_n_heap_bases; i++) {
  1570. if (((word)(-(signed_word)GC_heap_lengths[i])
  1571. & (GC_sysinfo.dwAllocationGranularity-1))
  1572. >= bytes) {
  1573. result = GC_heap_bases[i] + GC_heap_lengths[i];
  1574. break;
  1575. }
  1576. }
  1577. if (i == GC_n_heap_bases) {
  1578. /* Reserve more pages */
  1579. word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
  1580. & ~(GC_sysinfo.dwAllocationGranularity-1);
  1581. /* If we ever support MPROTECT_VDB here, we will probably need to */
  1582. /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
  1583. /* never spans regions. It seems to be OK for a VirtualFree argument */
  1584. /* to span regions, so we should be OK for now. */
  1585. result = (ptr_t) VirtualAlloc(NULL, res_bytes,
  1586. MEM_RESERVE | MEM_TOP_DOWN,
  1587. PAGE_EXECUTE_READWRITE);
  1588. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  1589. /* If I read the documentation correctly, this can */
  1590. /* only happen if HBLKSIZE > 64k or not a power of 2. */
  1591. if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
  1592. GC_heap_bases[GC_n_heap_bases] = result;
  1593. GC_heap_lengths[GC_n_heap_bases] = 0;
  1594. GC_n_heap_bases++;
  1595. }
  1596. /* Commit pages */
  1597. result = (ptr_t) VirtualAlloc(result, bytes,
  1598. MEM_COMMIT,
  1599. PAGE_EXECUTE_READWRITE);
  1600. if (result != NULL) {
  1601. if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
  1602. GC_heap_lengths[i] += bytes;
  1603. }
  1604. return(result);
  1605. }
  1606. # endif
  1607. #ifdef USE_MUNMAP
  1608. /* For now, this only works on Win32/WinCE and some Unix-like */
  1609. /* systems. If you have something else, don't define */
  1610. /* USE_MUNMAP. */
  1611. /* We assume ANSI C to support this feature. */
  1612. #if !defined(MSWIN32) && !defined(MSWINCE)
  1613. #include <unistd.h>
  1614. #include <sys/mman.h>
  1615. #include <sys/stat.h>
  1616. #include <sys/types.h>
  1617. #endif
  1618. /* Compute a page aligned starting address for the unmap */
  1619. /* operation on a block of size bytes starting at start. */
  1620. /* Return 0 if the block is too small to make this feasible. */
  1621. ptr_t GC_unmap_start(ptr_t start, word bytes)
  1622. {
  1623. ptr_t result = start;
  1624. /* Round start to next page boundary. */
  1625. result += GC_page_size - 1;
  1626. result = (ptr_t)((word)result & ~(GC_page_size - 1));
  1627. if (result + GC_page_size > start + bytes) return 0;
  1628. return result;
  1629. }
  1630. /* Compute end address for an unmap operation on the indicated */
  1631. /* block. */
  1632. ptr_t GC_unmap_end(ptr_t start, word bytes)
  1633. {
  1634. ptr_t end_addr = start + bytes;
  1635. end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
  1636. return end_addr;
  1637. }
  1638. /* Under Win32/WinCE we commit (map) and decommit (unmap) */
  1639. /* memory using VirtualAlloc and VirtualFree. These functions */
  1640. /* work on individual allocations of virtual memory, made */
  1641. /* previously using VirtualAlloc with the MEM_RESERVE flag. */
  1642. /* The ranges we need to (de)commit may span several of these */
  1643. /* allocations; therefore we use VirtualQuery to check */
  1644. /* allocation lengths, and split up the range as necessary. */
  1645. /* We assume that GC_remap is called on exactly the same range */
  1646. /* as a previous call to GC_unmap. It is safe to consistently */
  1647. /* round the endpoints in both places. */
  1648. void GC_unmap(ptr_t start, word bytes)
  1649. {
  1650. ptr_t start_addr = GC_unmap_start(start, bytes);
  1651. ptr_t end_addr = GC_unmap_end(start, bytes);
  1652. word len = end_addr - start_addr;
  1653. if (0 == start_addr) return;
  1654. # if defined(MSWIN32) || defined(MSWINCE)
  1655. while (len != 0) {
  1656. MEMORY_BASIC_INFORMATION mem_info;
  1657. GC_word free_len;
  1658. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  1659. != sizeof(mem_info))
  1660. ABORT("Weird VirtualQuery result");
  1661. free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  1662. if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
  1663. ABORT("VirtualFree failed");
  1664. GC_unmapped_bytes += free_len;
  1665. start_addr += free_len;
  1666. len -= free_len;
  1667. }
  1668. # else
  1669. /* We immediately remap it to prevent an intervening mmap from */
  1670. /* accidentally grabbing the same address space. */
  1671. {
  1672. void * result;
  1673. result = mmap(start_addr, len, PROT_NONE,
  1674. MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
  1675. zero_fd, 0/* offset */);
  1676. if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed");
  1677. }
  1678. GC_unmapped_bytes += len;
  1679. # endif
  1680. }
  1681. void GC_remap(ptr_t start, word bytes)
  1682. {
  1683. ptr_t start_addr = GC_unmap_start(start, bytes);
  1684. ptr_t end_addr = GC_unmap_end(start, bytes);
  1685. word len = end_addr - start_addr;
  1686. # if defined(MSWIN32) || defined(MSWINCE)
  1687. ptr_t result;
  1688. if (0 == start_addr) return;
  1689. while (len != 0) {
  1690. MEMORY_BASIC_INFORMATION mem_info;
  1691. GC_word alloc_len;
  1692. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  1693. != sizeof(mem_info))
  1694. ABORT("Weird VirtualQuery result");
  1695. alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  1696. result = VirtualAlloc(start_addr, alloc_len,
  1697. MEM_COMMIT,
  1698. PAGE_EXECUTE_READWRITE);
  1699. if (result != start_addr) {
  1700. ABORT("VirtualAlloc remapping failed");
  1701. }
  1702. GC_unmapped_bytes -= alloc_len;
  1703. start_addr += alloc_len;
  1704. len -= alloc_len;
  1705. }
  1706. # else
  1707. /* It was already remapped with PROT_NONE. */
  1708. int result;
  1709. if (0 == start_addr) return;
  1710. result = mprotect(start_addr, len,
  1711. PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
  1712. if (result != 0) {
  1713. GC_err_printf3(
  1714. "Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
  1715. start_addr, len, errno);
  1716. ABORT("Mprotect remapping failed");
  1717. }
  1718. GC_unmapped_bytes -= len;
  1719. # endif
  1720. }
  1721. /* Two adjacent blocks have already been unmapped and are about to */
  1722. /* be merged. Unmap the whole block. This typically requires */
  1723. /* that we unmap a small section in the middle that was not previously */
  1724. /* unmapped due to alignment constraints. */
  1725. void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
  1726. {
  1727. ptr_t start1_addr = GC_unmap_start(start1, bytes1);
  1728. ptr_t end1_addr = GC_unmap_end(start1, bytes1);
  1729. ptr_t start2_addr = GC_unmap_start(start2, bytes2);
  1730. ptr_t end2_addr = GC_unmap_end(start2, bytes2);
  1731. ptr_t start_addr = end1_addr;
  1732. ptr_t end_addr = start2_addr;
  1733. word len;
  1734. GC_ASSERT(start1 + bytes1 == start2);
  1735. if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
  1736. if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
  1737. if (0 == start_addr) return;
  1738. len = end_addr - start_addr;
  1739. # if defined(MSWIN32) || defined(MSWINCE)
  1740. while (len != 0) {
  1741. MEMORY_BASIC_INFORMATION mem_info;
  1742. GC_word free_len;
  1743. if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
  1744. != sizeof(mem_info))
  1745. ABORT("Weird VirtualQuery result");
  1746. free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
  1747. if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
  1748. ABORT("VirtualFree failed");
  1749. GC_unmapped_bytes += free_len;
  1750. start_addr += free_len;
  1751. len -= free_len;
  1752. }
  1753. # else
  1754. if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
  1755. GC_unmapped_bytes += len;
  1756. # endif
  1757. }
  1758. #endif /* USE_MUNMAP */
  1759. /* Routine for pushing any additional roots. In THREADS */
  1760. /* environment, this is also responsible for marking from */
  1761. /* thread stacks. */
  1762. #ifndef THREADS
  1763. void (*GC_push_other_roots)() = 0;
  1764. #else /* THREADS */
  1765. # ifdef PCR
  1766. PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
  1767. {
  1768. struct PCR_ThCtl_TInfoRep info;
  1769. PCR_ERes result;
  1770. info.ti_stkLow = info.ti_stkHi = 0;
  1771. result = PCR_ThCtl_GetInfo(t, &info);
  1772. GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
  1773. return(result);
  1774. }
  1775. /* Push the contents of an old object. We treat this as stack */
  1776. /* data only becasue that makes it robust against mark stack */
  1777. /* overflow. */
  1778. PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
  1779. {
  1780. GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
  1781. return(PCR_ERes_okay);
  1782. }
  1783. void GC_default_push_other_roots GC_PROTO((void))
  1784. {
  1785. /* Traverse data allocated by previous memory managers. */
  1786. {
  1787. extern struct PCR_MM_ProcsRep * GC_old_allocator;
  1788. if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
  1789. GC_push_old_obj, 0)
  1790. != PCR_ERes_okay) {
  1791. ABORT("Old object enumeration failed");
  1792. }
  1793. }
  1794. /* Traverse all thread stacks. */
  1795. if (PCR_ERes_IsErr(
  1796. PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
  1797. || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
  1798. ABORT("Thread stack marking failed\n");
  1799. }
  1800. }
  1801. # endif /* PCR */
  1802. # ifdef SRC_M3
  1803. # ifdef ALL_INTERIOR_POINTERS
  1804. --> misconfigured
  1805. # endif
  1806. void GC_push_thread_structures GC_PROTO((void))
  1807. {
  1808. /* Not our responsibibility. */
  1809. }
  1810. extern void ThreadF__ProcessStacks();
  1811. void GC_push_thread_stack(start, stop)
  1812. word start, stop;
  1813. {
  1814. GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
  1815. }
  1816. /* Push routine with M3 specific calling convention. */
  1817. GC_m3_push_root(dummy1, p, dummy2, dummy3)
  1818. word *p;
  1819. ptr_t dummy1, dummy2;
  1820. int dummy3;
  1821. {
  1822. word q = *p;
  1823. GC_PUSH_ONE_STACK(q, p);
  1824. }
  1825. /* M3 set equivalent to RTHeap.TracedRefTypes */
  1826. typedef struct { int elts[1]; } RefTypeSet;
  1827. RefTypeSet GC_TracedRefTypes = {{0x1}};
  1828. void GC_default_push_other_roots GC_PROTO((void))
  1829. {
  1830. /* Use the M3 provided routine for finding static roots. */
  1831. /* This is a bit dubious, since it presumes no C roots. */
  1832. /* We handle the collector roots explicitly in GC_push_roots */
  1833. RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
  1834. if (GC_words_allocd > 0) {
  1835. ThreadF__ProcessStacks(GC_push_thread_stack);
  1836. }
  1837. /* Otherwise this isn't absolutely necessary, and we have */
  1838. /* startup ordering problems. */
  1839. }
  1840. # endif /* SRC_M3 */
  1841. # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
  1842. defined(GC_WIN32_THREADS)
  1843. extern void GC_push_all_stacks();
  1844. void GC_default_push_other_roots GC_PROTO((void))
  1845. {
  1846. GC_push_all_stacks();
  1847. }
  1848. # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
  1849. void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
  1850. #endif /* THREADS */
  1851. /*
  1852. * Routines for accessing dirty bits on virtual pages.
  1853. * We plan to eventually implement four strategies for doing so:
  1854. * DEFAULT_VDB: A simple dummy implementation that treats every page
  1855. * as possibly dirty. This makes incremental collection
  1856. * useless, but the implementation is still correct.
  1857. * PCR_VDB: Use PPCRs virtual dirty bit facility.
  1858. * PROC_VDB: Use the /proc facility for reading dirty bits. Only
  1859. * works under some SVR4 variants. Even then, it may be
  1860. * too slow to be entirely satisfactory. Requires reading
  1861. * dirty bits for entire address space. Implementations tend
  1862. * to assume that the client is a (slow) debugger.
  1863. * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
  1864. * dirtied pages. The implementation (and implementability)
  1865. * is highly system dependent. This usually fails when system
  1866. * calls write to a protected page. We prevent the read system
  1867. * call from doing so. It is the clients responsibility to
  1868. * make sure that other system calls are similarly protected
  1869. * or write only to the stack.
  1870. */
  1871. GC_bool GC_dirty_maintained = FALSE;
  1872. # ifdef DEFAULT_VDB
  1873. /* All of the following assume the allocation lock is held, and */
  1874. /* signals are disabled. */
  1875. /* The client asserts that unallocated pages in the heap are never */
  1876. /* written. */
  1877. /* Initialize virtual dirty bit implementation. */
  1878. void GC_dirty_init()
  1879. {
  1880. # ifdef PRINTSTATS
  1881. GC_printf0("Initializing DEFAULT_VDB...\n");
  1882. # endif
  1883. GC_dirty_maintained = TRUE;
  1884. }
  1885. /* Retrieve system dirty bits for heap to a local buffer. */
  1886. /* Restore the systems notion of which pages are dirty. */
  1887. void GC_read_dirty()
  1888. {}
  1889. /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
  1890. /* If the actual page size is different, this returns TRUE if any */
  1891. /* of the pages overlapping h are dirty. This routine may err on the */
  1892. /* side of labelling pages as dirty (and this implementation does). */
  1893. /*ARGSUSED*/
  1894. GC_bool GC_page_was_dirty(h)
  1895. struct hblk *h;
  1896. {
  1897. return(TRUE);
  1898. }
  1899. /*
  1900. * The following two routines are typically less crucial. They matter
  1901. * most with large dynamic libraries, or if we can't accurately identify
  1902. * stacks, e.g. under Solaris 2.X. Otherwise the following default
  1903. * versions are adequate.
  1904. */
  1905. /* Could any valid GC heap pointer ever have been written to this page? */
  1906. /*ARGSUSED*/
  1907. GC_bool GC_page_was_ever_dirty(h)
  1908. struct hblk *h;
  1909. {
  1910. return(TRUE);
  1911. }
  1912. /* Reset the n pages starting at h to "was never dirty" status. */
  1913. void GC_is_fresh(h, n)
  1914. struct hblk *h;
  1915. word n;
  1916. {
  1917. }
  1918. /* A call that: */
  1919. /* I) hints that [h, h+nblocks) is about to be written. */
  1920. /* II) guarantees that protection is removed. */
  1921. /* (I) may speed up some dirty bit implementations. */
  1922. /* (II) may be essential if we need to ensure that */
  1923. /* pointer-free system call buffers in the heap are */
  1924. /* not protected. */
  1925. /*ARGSUSED*/
  1926. void GC_remove_protection(h, nblocks, is_ptrfree)
  1927. struct hblk *h;
  1928. word nblocks;
  1929. GC_bool is_ptrfree;
  1930. {
  1931. }
  1932. # endif /* DEFAULT_VDB */
  1933. # ifdef MPROTECT_VDB
  1934. /*
  1935. * See DEFAULT_VDB for interface descriptions.
  1936. */
  1937. /*
  1938. * This implementation maintains dirty bits itself by catching write
  1939. * faults and keeping track of them. We assume nobody else catches
  1940. * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
  1941. * This means that clients must ensure that system calls don't write
  1942. * to the write-protected heap. Probably the best way to do this is to
  1943. * ensure that system calls write at most to POINTERFREE objects in the
  1944. * heap, and do even that only if we are on a platform on which those
  1945. * are not protected. Another alternative is to wrap system calls
  1946. * (see example for read below), but the current implementation holds
  1947. * a lock across blocking calls, making it problematic for multithreaded
  1948. * applications.
  1949. * We assume the page size is a multiple of HBLKSIZE.
  1950. * We prefer them to be the same. We avoid protecting POINTERFREE
  1951. * objects only if they are the same.
  1952. */
  1953. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
  1954. # include <sys/mman.h>
  1955. # include <signal.h>
  1956. # include <sys/syscall.h>
  1957. # define PROTECT(addr, len) \
  1958. if (mprotect((caddr_t)(addr), (size_t)(len), \
  1959. PROT_READ | OPT_PROT_EXEC) < 0) { \
  1960. ABORT("mprotect failed"); \
  1961. }
  1962. # define UNPROTECT(addr, len) \
  1963. if (mprotect((caddr_t)(addr), (size_t)(len), \
  1964. PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
  1965. ABORT("un-mprotect failed"); \
  1966. }
  1967. # else
  1968. # ifdef DARWIN
  1969. /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
  1970. decrease the likelihood of some of the problems described below. */
  1971. #include <mach/vm_map.h>
  1972. static mach_port_t GC_task_self;
  1973. #define PROTECT(addr,len) \
  1974. if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
  1975. FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
  1976. ABORT("vm_portect failed"); \
  1977. }
  1978. #define UNPROTECT(addr,len) \
  1979. if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
  1980. FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
  1981. ABORT("vm_portect failed"); \
  1982. }
  1983. # else
  1984. # ifndef MSWINCE
  1985. # include <signal.h>
  1986. # endif
  1987. static DWORD protect_junk;
  1988. # define PROTECT(addr, len) \
  1989. if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
  1990. &protect_junk)) { \
  1991. DWORD last_error = GetLastError(); \
  1992. GC_printf1("Last error code: %lx\n", last_error); \
  1993. ABORT("VirtualProtect failed"); \
  1994. }
  1995. # define UNPROTECT(addr, len) \
  1996. if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
  1997. &protect_junk)) { \
  1998. ABORT("un-VirtualProtect failed"); \
  1999. }
  2000. # endif /* !DARWIN */
  2001. # endif /* MSWIN32 || MSWINCE || DARWIN */
  2002. #if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
  2003. typedef void (* SIG_PF)();
  2004. #endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
  2005. #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
  2006. || defined(HURD)
  2007. # ifdef __STDC__
  2008. typedef void (* SIG_PF)(int);
  2009. # else
  2010. typedef void (* SIG_PF)();
  2011. # endif
  2012. #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
  2013. #if defined(MSWIN32)
  2014. typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
  2015. # undef SIG_DFL
  2016. # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
  2017. #endif
  2018. #if defined(MSWINCE)
  2019. typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
  2020. # undef SIG_DFL
  2021. # define SIG_DFL (SIG_PF) (-1)
  2022. #endif
  2023. #if defined(IRIX5) || defined(OSF1) || defined(HURD)
  2024. typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
  2025. #endif /* IRIX5 || OSF1 || HURD */
  2026. #if defined(SUNOS5SIGS)
  2027. # if defined(HPUX) || defined(FREEBSD)
  2028. # define SIGINFO_T siginfo_t
  2029. # else
  2030. # define SIGINFO_T struct siginfo
  2031. # endif
  2032. # ifdef __STDC__
  2033. typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
  2034. # else
  2035. typedef void (* REAL_SIG_PF)();
  2036. # endif
  2037. #endif /* SUNOS5SIGS */
  2038. #if defined(LINUX)
  2039. # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
  2040. typedef struct sigcontext s_c;
  2041. # else /* glibc < 2.2 */
  2042. # include <linux/version.h>
  2043. # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
  2044. typedef struct sigcontext s_c;
  2045. # else
  2046. typedef struct sigcontext_struct s_c;
  2047. # endif
  2048. # endif /* glibc < 2.2 */
  2049. # if defined(ALPHA) || defined(M68K)
  2050. typedef void (* REAL_SIG_PF)(int, int, s_c *);
  2051. # else
  2052. # if defined(IA64) || defined(HP_PA) || defined(X86_64)
  2053. typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
  2054. /* FIXME: */
  2055. /* According to SUSV3, the last argument should have type */
  2056. /* void * or ucontext_t * */
  2057. # else
  2058. typedef void (* REAL_SIG_PF)(int, s_c);
  2059. # endif
  2060. # endif
  2061. # ifdef ALPHA
  2062. /* Retrieve fault address from sigcontext structure by decoding */
  2063. /* instruction. */
  2064. char * get_fault_addr(s_c *sc) {
  2065. unsigned instr;
  2066. word faultaddr;
  2067. instr = *((unsigned *)(sc->sc_pc));
  2068. faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
  2069. faultaddr += (word) (((int)instr << 16) >> 16);
  2070. return (char *)faultaddr;
  2071. }
  2072. # endif /* !ALPHA */
  2073. # endif /* LINUX */
  2074. #ifndef DARWIN
  2075. SIG_PF GC_old_bus_handler;
  2076. SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
  2077. #endif /* !DARWIN */
  2078. #if defined(THREADS)
  2079. /* We need to lock around the bitmap update in the write fault handler */
  2080. /* in order to avoid the risk of losing a bit. We do this with a */
  2081. /* test-and-set spin lock if we know how to do that. Otherwise we */
  2082. /* check whether we are already in the handler and use the dumb but */
  2083. /* safe fallback algorithm of setting all bits in the word. */
  2084. /* Contention should be very rare, so we do the minimum to handle it */
  2085. /* correctly. */
  2086. #ifdef GC_TEST_AND_SET_DEFINED
  2087. static VOLATILE unsigned int fault_handler_lock = 0;
  2088. void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
  2089. while (GC_test_and_set(&fault_handler_lock)) {}
  2090. /* Could also revert to set_pht_entry_from_index_safe if initial */
  2091. /* GC_test_and_set fails. */
  2092. set_pht_entry_from_index(db, index);
  2093. GC_clear(&fault_handler_lock);
  2094. }
  2095. #else /* !GC_TEST_AND_SET_DEFINED */
  2096. /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
  2097. /* just before we notice the conflict and correct it. We may end up */
  2098. /* looking at it while it's wrong. But this requires contention */
  2099. /* exactly when a GC is triggered, which seems far less likely to */
  2100. /* fail than the old code, which had no reported failures. Thus we */
  2101. /* leave it this way while we think of something better, or support */
  2102. /* GC_test_and_set on the remaining platforms. */
  2103. static VOLATILE word currently_updating = 0;
  2104. void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
  2105. unsigned int update_dummy;
  2106. currently_updating = (word)(&update_dummy);
  2107. set_pht_entry_from_index(db, index);
  2108. /* If we get contention in the 10 or so instruction window here, */
  2109. /* and we get stopped by a GC between the two updates, we lose! */
  2110. if (currently_updating != (word)(&update_dummy)) {
  2111. set_pht_entry_from_index_safe(db, index);
  2112. /* We claim that if two threads concurrently try to update the */
  2113. /* dirty bit vector, the first one to execute UPDATE_START */
  2114. /* will see it changed when UPDATE_END is executed. (Note that */
  2115. /* &update_dummy must differ in two distinct threads.) It */
  2116. /* will then execute set_pht_entry_from_index_safe, thus */
  2117. /* returning us to a safe state, though not soon enough. */
  2118. }
  2119. }
  2120. #endif /* !GC_TEST_AND_SET_DEFINED */
  2121. #else /* !THREADS */
  2122. # define async_set_pht_entry_from_index(db, index) \
  2123. set_pht_entry_from_index(db, index)
  2124. #endif /* !THREADS */
  2125. /*ARGSUSED*/
  2126. #if !defined(DARWIN)
  2127. # if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
  2128. void GC_write_fault_handler(sig, code, scp, addr)
  2129. int sig, code;
  2130. struct sigcontext *scp;
  2131. char * addr;
  2132. # ifdef SUNOS4
  2133. # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
  2134. # define CODE_OK (FC_CODE(code) == FC_PROT \
  2135. || (FC_CODE(code) == FC_OBJERR \
  2136. && FC_ERRNO(code) == FC_PROT))
  2137. # endif
  2138. # ifdef FREEBSD
  2139. # define SIG_OK (sig == SIGBUS)
  2140. # define CODE_OK TRUE
  2141. # endif
  2142. # endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
  2143. # if defined(IRIX5) || defined(OSF1) || defined(HURD)
  2144. # include <errno.h>
  2145. void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
  2146. # ifdef OSF1
  2147. # define SIG_OK (sig == SIGSEGV)
  2148. # define CODE_OK (code == 2 /* experimentally determined */)
  2149. # endif
  2150. # ifdef IRIX5
  2151. # define SIG_OK (sig == SIGSEGV)
  2152. # define CODE_OK (code == EACCES)
  2153. # endif
  2154. # ifdef HURD
  2155. # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
  2156. # define CODE_OK TRUE
  2157. # endif
  2158. # endif /* IRIX5 || OSF1 || HURD */
  2159. # if defined(LINUX)
  2160. # if defined(ALPHA) || defined(M68K)
  2161. void GC_write_fault_handler(int sig, int code, s_c * sc)
  2162. # else
  2163. # if defined(IA64) || defined(HP_PA) || defined(X86_64)
  2164. void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
  2165. # else
  2166. # if defined(ARM32)
  2167. void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
  2168. # else
  2169. void GC_write_fault_handler(int sig, s_c sc)
  2170. # endif
  2171. # endif
  2172. # endif
  2173. # define SIG_OK (sig == SIGSEGV)
  2174. # define CODE_OK TRUE
  2175. /* Empirically c.trapno == 14, on IA32, but is that useful? */
  2176. /* Should probably consider alignment issues on other */
  2177. /* architectures. */
  2178. # endif /* LINUX */
  2179. # if defined(SUNOS5SIGS)
  2180. # ifdef __STDC__
  2181. void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
  2182. # else
  2183. void GC_write_fault_handler(sig, scp, context)
  2184. int sig;
  2185. SIGINFO_T *scp;
  2186. void * context;
  2187. # endif
  2188. # ifdef HPUX
  2189. # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
  2190. # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
  2191. || (scp -> si_code == BUS_ADRERR) \
  2192. || (scp -> si_code == BUS_UNKNOWN) \
  2193. || (scp -> si_code == SEGV_UNKNOWN) \
  2194. || (scp -> si_code == BUS_OBJERR)
  2195. # else
  2196. # ifdef FREEBSD
  2197. # define SIG_OK (sig == SIGBUS)
  2198. # define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
  2199. # else
  2200. # define SIG_OK (sig == SIGSEGV)
  2201. # define CODE_OK (scp -> si_code == SEGV_ACCERR)
  2202. # endif
  2203. # endif
  2204. # endif /* SUNOS5SIGS */
  2205. # if defined(MSWIN32) || defined(MSWINCE)
  2206. LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
  2207. # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
  2208. STATUS_ACCESS_VIOLATION)
  2209. # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
  2210. /* Write fault */
  2211. # endif /* MSWIN32 || MSWINCE */
  2212. {
  2213. register unsigned i;
  2214. # if defined(HURD)
  2215. char *addr = (char *) code;
  2216. # endif
  2217. # ifdef IRIX5
  2218. char * addr = (char *) (size_t) (scp -> sc_badvaddr);
  2219. # endif
  2220. # if defined(OSF1) && defined(ALPHA)
  2221. char * addr = (char *) (scp -> sc_traparg_a0);
  2222. # endif
  2223. # ifdef SUNOS5SIGS
  2224. char * addr = (char *) (scp -> si_addr);
  2225. # endif
  2226. # ifdef LINUX
  2227. # if defined(I386)
  2228. char * addr = (char *) (sc.cr2);
  2229. # else
  2230. # if defined(M68K)
  2231. char * addr = NULL;
  2232. struct sigcontext *scp = (struct sigcontext *)(sc);
  2233. int format = (scp->sc_formatvec >> 12) & 0xf;
  2234. unsigned long *framedata = (unsigned long *)(scp + 1);
  2235. unsigned long ea;
  2236. if (format == 0xa || format == 0xb) {
  2237. /* 68020/030 */
  2238. ea = framedata[2];
  2239. } else if (format == 7) {
  2240. /* 68040 */
  2241. ea = framedata[3];
  2242. if (framedata[1] & 0x08000000) {
  2243. /* correct addr on misaligned access */
  2244. ea = (ea+4095)&(~4095);
  2245. }
  2246. } else if (format == 4) {
  2247. /* 68060 */
  2248. ea = framedata[0];
  2249. if (framedata[1] & 0x08000000) {
  2250. /* correct addr on misaligned access */
  2251. ea = (ea+4095)&(~4095);
  2252. }
  2253. }
  2254. addr = (char *)ea;
  2255. # else
  2256. # ifdef ALPHA
  2257. char * addr = get_fault_addr(sc);
  2258. # else
  2259. # if defined(IA64) || defined(HP_PA) || defined(X86_64)
  2260. char * addr = si -> si_addr;
  2261. /* I believe this is claimed to work on all platforms for */
  2262. /* Linux 2.3.47 and later. Hopefully we don't have to */
  2263. /* worry about earlier kernels on IA64. */
  2264. # else
  2265. # if defined(POWERPC)
  2266. char * addr = (char *) (sc.regs->dar);
  2267. # else
  2268. # if defined(ARM32)
  2269. char * addr = (char *)sc.fault_address;
  2270. # else
  2271. # if defined(CRIS)
  2272. char * addr = (char *)sc.regs.csraddr;
  2273. # else
  2274. --> architecture not supported
  2275. # endif
  2276. # endif
  2277. # endif
  2278. # endif
  2279. # endif
  2280. # endif
  2281. # endif
  2282. # endif
  2283. # if defined(MSWIN32) || defined(MSWINCE)
  2284. char * addr = (char *) (exc_info -> ExceptionRecord
  2285. -> ExceptionInformation[1]);
  2286. # define sig SIGSEGV
  2287. # endif
  2288. if (SIG_OK && CODE_OK) {
  2289. register struct hblk * h =
  2290. (struct hblk *)((word)addr & ~(GC_page_size-1));
  2291. GC_bool in_allocd_block;
  2292. # ifdef SUNOS5SIGS
  2293. /* Address is only within the correct physical page. */
  2294. in_allocd_block = FALSE;
  2295. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  2296. if (HDR(h+i) != 0) {
  2297. in_allocd_block = TRUE;
  2298. }
  2299. }
  2300. # else
  2301. in_allocd_block = (HDR(addr) != 0);
  2302. # endif
  2303. if (!in_allocd_block) {
  2304. /* FIXME - We should make sure that we invoke the */
  2305. /* old handler with the appropriate calling */
  2306. /* sequence, which often depends on SA_SIGINFO. */
  2307. /* Heap blocks now begin and end on page boundaries */
  2308. SIG_PF old_handler;
  2309. if (sig == SIGSEGV) {
  2310. old_handler = GC_old_segv_handler;
  2311. } else {
  2312. old_handler = GC_old_bus_handler;
  2313. }
  2314. if (old_handler == SIG_DFL) {
  2315. # if !defined(MSWIN32) && !defined(MSWINCE)
  2316. GC_err_printf1("Segfault at 0x%lx\n", addr);
  2317. ABORT("Unexpected bus error or segmentation fault");
  2318. # else
  2319. return(EXCEPTION_CONTINUE_SEARCH);
  2320. # endif
  2321. } else {
  2322. # if defined (SUNOS4) \
  2323. || (defined(FREEBSD) && !defined(SUNOS5SIGS))
  2324. (*old_handler) (sig, code, scp, addr);
  2325. return;
  2326. # endif
  2327. # if defined (SUNOS5SIGS)
  2328. /*
  2329. * FIXME: For FreeBSD, this code should check if the
  2330. * old signal handler used the traditional BSD style and
  2331. * if so call it using that style.
  2332. */
  2333. (*(REAL_SIG_PF)old_handler) (sig, scp, context);
  2334. return;
  2335. # endif
  2336. # if defined (LINUX)
  2337. # if defined(ALPHA) || defined(M68K)
  2338. (*(REAL_SIG_PF)old_handler) (sig, code, sc);
  2339. # else
  2340. # if defined(IA64) || defined(HP_PA) || defined(X86_64)
  2341. (*(REAL_SIG_PF)old_handler) (sig, si, scp);
  2342. # else
  2343. (*(REAL_SIG_PF)old_handler) (sig, sc);
  2344. # endif
  2345. # endif
  2346. return;
  2347. # endif
  2348. # if defined (IRIX5) || defined(OSF1) || defined(HURD)
  2349. (*(REAL_SIG_PF)old_handler) (sig, code, scp);
  2350. return;
  2351. # endif
  2352. # ifdef MSWIN32
  2353. return((*old_handler)(exc_info));
  2354. # endif
  2355. }
  2356. }
  2357. UNPROTECT(h, GC_page_size);
  2358. /* We need to make sure that no collection occurs between */
  2359. /* the UNPROTECT and the setting of the dirty bit. Otherwise */
  2360. /* a write by a third thread might go unnoticed. Reversing */
  2361. /* the order is just as bad, since we would end up unprotecting */
  2362. /* a page in a GC cycle during which it's not marked. */
  2363. /* Currently we do this by disabling the thread stopping */
  2364. /* signals while this handler is running. An alternative might */
  2365. /* be to record the fact that we're about to unprotect, or */
  2366. /* have just unprotected a page in the GC's thread structure, */
  2367. /* and then to have the thread stopping code set the dirty */
  2368. /* flag, if necessary. */
  2369. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  2370. register int index = PHT_HASH(h+i);
  2371. async_set_pht_entry_from_index(GC_dirty_pages, index);
  2372. }
  2373. # if defined(OSF1)
  2374. /* These reset the signal handler each time by default. */
  2375. signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
  2376. # endif
  2377. /* The write may not take place before dirty bits are read. */
  2378. /* But then we'll fault again ... */
  2379. # if defined(MSWIN32) || defined(MSWINCE)
  2380. return(EXCEPTION_CONTINUE_EXECUTION);
  2381. # else
  2382. return;
  2383. # endif
  2384. }
  2385. #if defined(MSWIN32) || defined(MSWINCE)
  2386. return EXCEPTION_CONTINUE_SEARCH;
  2387. #else
  2388. GC_err_printf1("Segfault at 0x%lx\n", addr);
  2389. ABORT("Unexpected bus error or segmentation fault");
  2390. #endif
  2391. }
  2392. #endif /* !DARWIN */
  2393. /*
  2394. * We hold the allocation lock. We expect block h to be written
  2395. * shortly. Ensure that all pages containing any part of the n hblks
  2396. * starting at h are no longer protected. If is_ptrfree is false,
  2397. * also ensure that they will subsequently appear to be dirty.
  2398. */
  2399. void GC_remove_protection(h, nblocks, is_ptrfree)
  2400. struct hblk *h;
  2401. word nblocks;
  2402. GC_bool is_ptrfree;
  2403. {
  2404. struct hblk * h_trunc; /* Truncated to page boundary */
  2405. struct hblk * h_end; /* Page boundary following block end */
  2406. struct hblk * current;
  2407. GC_bool found_clean;
  2408. if (!GC_dirty_maintained) return;
  2409. h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
  2410. h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
  2411. & ~(GC_page_size-1));
  2412. found_clean = FALSE;
  2413. for (current = h_trunc; current < h_end; ++current) {
  2414. int index = PHT_HASH(current);
  2415. if (!is_ptrfree || current < h || current >= h + nblocks) {
  2416. async_set_pht_entry_from_index(GC_dirty_pages, index);
  2417. }
  2418. }
  2419. UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
  2420. }
  2421. #if !defined(DARWIN)
  2422. void GC_dirty_init()
  2423. {
  2424. # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
  2425. defined(OSF1) || defined(HURD)
  2426. struct sigaction act, oldact;
  2427. /* We should probably specify SA_SIGINFO for Linux, and handle */
  2428. /* the different architectures more uniformly. */
  2429. # if defined(IRIX5) || defined(LINUX) && !defined(X86_64) \
  2430. || defined(OSF1) || defined(HURD)
  2431. act.sa_flags = SA_RESTART;
  2432. act.sa_handler = (SIG_PF)GC_write_fault_handler;
  2433. # else
  2434. act.sa_flags = SA_RESTART | SA_SIGINFO;
  2435. act.sa_sigaction = GC_write_fault_handler;
  2436. # endif
  2437. (void)sigemptyset(&act.sa_mask);
  2438. # ifdef SIG_SUSPEND
  2439. /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
  2440. /* handler. This effectively makes the handler atomic w.r.t. */
  2441. /* stopping the world for GC. */
  2442. (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
  2443. # endif /* SIG_SUSPEND */
  2444. # endif
  2445. # ifdef PRINTSTATS
  2446. GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
  2447. # endif
  2448. GC_dirty_maintained = TRUE;
  2449. if (GC_page_size % HBLKSIZE != 0) {
  2450. GC_err_printf0("Page size not multiple of HBLKSIZE\n");
  2451. ABORT("Page size not multiple of HBLKSIZE");
  2452. }
  2453. # if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
  2454. GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
  2455. if (GC_old_bus_handler == SIG_IGN) {
  2456. GC_err_printf0("Previously ignored bus error!?");
  2457. GC_old_bus_handler = SIG_DFL;
  2458. }
  2459. if (GC_old_bus_handler != SIG_DFL) {
  2460. # ifdef PRINTSTATS
  2461. GC_err_printf0("Replaced other SIGBUS handler\n");
  2462. # endif
  2463. }
  2464. # endif
  2465. # if defined(SUNOS4)
  2466. GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
  2467. if (GC_old_segv_handler == SIG_IGN) {
  2468. GC_err_printf0("Previously ignored segmentation violation!?");
  2469. GC_old_segv_handler = SIG_DFL;
  2470. }
  2471. if (GC_old_segv_handler != SIG_DFL) {
  2472. # ifdef PRINTSTATS
  2473. GC_err_printf0("Replaced other SIGSEGV handler\n");
  2474. # endif
  2475. }
  2476. # endif
  2477. # if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
  2478. || defined(LINUX) || defined(OSF1) || defined(HURD)
  2479. /* SUNOS5SIGS includes HPUX */
  2480. # if defined(GC_IRIX_THREADS)
  2481. sigaction(SIGSEGV, 0, &oldact);
  2482. sigaction(SIGSEGV, &act, 0);
  2483. # else
  2484. {
  2485. int res = sigaction(SIGSEGV, &act, &oldact);
  2486. if (res != 0) ABORT("Sigaction failed");
  2487. }
  2488. # endif
  2489. # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
  2490. /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
  2491. /* sa_sigaction. */
  2492. GC_old_segv_handler = oldact.sa_handler;
  2493. # else /* Irix 6.x or SUNOS5SIGS or LINUX */
  2494. if (oldact.sa_flags & SA_SIGINFO) {
  2495. GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
  2496. } else {
  2497. GC_old_segv_handler = oldact.sa_handler;
  2498. }
  2499. # endif
  2500. if (GC_old_segv_handler == SIG_IGN) {
  2501. GC_err_printf0("Previously ignored segmentation violation!?");
  2502. GC_old_segv_handler = SIG_DFL;
  2503. }
  2504. if (GC_old_segv_handler != SIG_DFL) {
  2505. # ifdef PRINTSTATS
  2506. GC_err_printf0("Replaced other SIGSEGV handler\n");
  2507. # endif
  2508. }
  2509. # endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
  2510. # if defined(HPUX) || defined(LINUX) || defined(HURD) \
  2511. || (defined(FREEBSD) && defined(SUNOS5SIGS))
  2512. sigaction(SIGBUS, &act, &oldact);
  2513. GC_old_bus_handler = oldact.sa_handler;
  2514. if (GC_old_bus_handler == SIG_IGN) {
  2515. GC_err_printf0("Previously ignored bus error!?");
  2516. GC_old_bus_handler = SIG_DFL;
  2517. }
  2518. if (GC_old_bus_handler != SIG_DFL) {
  2519. # ifdef PRINTSTATS
  2520. GC_err_printf0("Replaced other SIGBUS handler\n");
  2521. # endif
  2522. }
  2523. # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
  2524. # if defined(MSWIN32)
  2525. GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
  2526. if (GC_old_segv_handler != NULL) {
  2527. # ifdef PRINTSTATS
  2528. GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
  2529. # endif
  2530. } else {
  2531. GC_old_segv_handler = SIG_DFL;
  2532. }
  2533. # endif
  2534. }
  2535. #endif /* !DARWIN */
  2536. int GC_incremental_protection_needs()
  2537. {
  2538. if (GC_page_size == HBLKSIZE) {
  2539. return GC_PROTECTS_POINTER_HEAP;
  2540. } else {
  2541. return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
  2542. }
  2543. }
  2544. #define HAVE_INCREMENTAL_PROTECTION_NEEDS
  2545. #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
  2546. #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
  2547. void GC_protect_heap()
  2548. {
  2549. ptr_t start;
  2550. word len;
  2551. struct hblk * current;
  2552. struct hblk * current_start; /* Start of block to be protected. */
  2553. struct hblk * limit;
  2554. unsigned i;
  2555. GC_bool protect_all =
  2556. (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
  2557. for (i = 0; i < GC_n_heap_sects; i++) {
  2558. start = GC_heap_sects[i].hs_start;
  2559. len = GC_heap_sects[i].hs_bytes;
  2560. if (protect_all) {
  2561. PROTECT(start, len);
  2562. } else {
  2563. GC_ASSERT(PAGE_ALIGNED(len))
  2564. GC_ASSERT(PAGE_ALIGNED(start))
  2565. current_start = current = (struct hblk *)start;
  2566. limit = (struct hblk *)(start + len);
  2567. while (current < limit) {
  2568. hdr * hhdr;
  2569. word nhblks;
  2570. GC_bool is_ptrfree;
  2571. GC_ASSERT(PAGE_ALIGNED(current));
  2572. GET_HDR(current, hhdr);
  2573. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  2574. /* This can happen only if we're at the beginning of a */
  2575. /* heap segment, and a block spans heap segments. */
  2576. /* We will handle that block as part of the preceding */
  2577. /* segment. */
  2578. GC_ASSERT(current_start == current);
  2579. current_start = ++current;
  2580. continue;
  2581. }
  2582. if (HBLK_IS_FREE(hhdr)) {
  2583. GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
  2584. nhblks = divHBLKSZ(hhdr -> hb_sz);
  2585. is_ptrfree = TRUE; /* dirty on alloc */
  2586. } else {
  2587. nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  2588. is_ptrfree = IS_PTRFREE(hhdr);
  2589. }
  2590. if (is_ptrfree) {
  2591. if (current_start < current) {
  2592. PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
  2593. }
  2594. current_start = (current += nhblks);
  2595. } else {
  2596. current += nhblks;
  2597. }
  2598. }
  2599. if (current_start < current) {
  2600. PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
  2601. }
  2602. }
  2603. }
  2604. }
  2605. /* We assume that either the world is stopped or its OK to lose dirty */
  2606. /* bits while this is happenning (as in GC_enable_incremental). */
  2607. void GC_read_dirty()
  2608. {
  2609. BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
  2610. (sizeof GC_dirty_pages));
  2611. BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
  2612. GC_protect_heap();
  2613. }
  2614. GC_bool GC_page_was_dirty(h)
  2615. struct hblk * h;
  2616. {
  2617. register word index = PHT_HASH(h);
  2618. return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
  2619. }
  2620. /*
  2621. * Acquiring the allocation lock here is dangerous, since this
  2622. * can be called from within GC_call_with_alloc_lock, and the cord
  2623. * package does so. On systems that allow nested lock acquisition, this
  2624. * happens to work.
  2625. * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
  2626. */
  2627. static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
  2628. void GC_begin_syscall()
  2629. {
  2630. if (!I_HOLD_LOCK()) {
  2631. LOCK();
  2632. syscall_acquired_lock = TRUE;
  2633. }
  2634. }
  2635. void GC_end_syscall()
  2636. {
  2637. if (syscall_acquired_lock) {
  2638. syscall_acquired_lock = FALSE;
  2639. UNLOCK();
  2640. }
  2641. }
  2642. void GC_unprotect_range(addr, len)
  2643. ptr_t addr;
  2644. word len;
  2645. {
  2646. struct hblk * start_block;
  2647. struct hblk * end_block;
  2648. register struct hblk *h;
  2649. ptr_t obj_start;
  2650. if (!GC_dirty_maintained) return;
  2651. obj_start = GC_base(addr);
  2652. if (obj_start == 0) return;
  2653. if (GC_base(addr + len - 1) != obj_start) {
  2654. ABORT("GC_unprotect_range(range bigger than object)");
  2655. }
  2656. start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
  2657. end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
  2658. end_block += GC_page_size/HBLKSIZE - 1;
  2659. for (h = start_block; h <= end_block; h++) {
  2660. register word index = PHT_HASH(h);
  2661. async_set_pht_entry_from_index(GC_dirty_pages, index);
  2662. }
  2663. UNPROTECT(start_block,
  2664. ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
  2665. }
  2666. #if 0
  2667. /* We no longer wrap read by default, since that was causing too many */
  2668. /* problems. It is preferred that the client instead avoids writing */
  2669. /* to the write-protected heap with a system call. */
  2670. /* This still serves as sample code if you do want to wrap system calls.*/
  2671. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
  2672. /* Replacement for UNIX system call. */
  2673. /* Other calls that write to the heap should be handled similarly. */
  2674. /* Note that this doesn't work well for blocking reads: It will hold */
  2675. /* the allocation lock for the entire duration of the call. Multithreaded */
  2676. /* clients should really ensure that it won't block, either by setting */
  2677. /* the descriptor nonblocking, or by calling select or poll first, to */
  2678. /* make sure that input is available. */
  2679. /* Another, preferred alternative is to ensure that system calls never */
  2680. /* write to the protected heap (see above). */
  2681. # if defined(__STDC__) && !defined(SUNOS4)
  2682. # include <unistd.h>
  2683. # include <sys/uio.h>
  2684. ssize_t read(int fd, void *buf, size_t nbyte)
  2685. # else
  2686. # ifndef LINT
  2687. int read(fd, buf, nbyte)
  2688. # else
  2689. int GC_read(fd, buf, nbyte)
  2690. # endif
  2691. int fd;
  2692. char *buf;
  2693. int nbyte;
  2694. # endif
  2695. {
  2696. int result;
  2697. GC_begin_syscall();
  2698. GC_unprotect_range(buf, (word)nbyte);
  2699. # if defined(IRIX5) || defined(GC_LINUX_THREADS)
  2700. /* Indirect system call may not always be easily available. */
  2701. /* We could call _read, but that would interfere with the */
  2702. /* libpthread interception of read. */
  2703. /* On Linux, we have to be careful with the linuxthreads */
  2704. /* read interception. */
  2705. {
  2706. struct iovec iov;
  2707. iov.iov_base = buf;
  2708. iov.iov_len = nbyte;
  2709. result = readv(fd, &iov, 1);
  2710. }
  2711. # else
  2712. # if defined(HURD)
  2713. result = __read(fd, buf, nbyte);
  2714. # else
  2715. /* The two zero args at the end of this list are because one
  2716. IA-64 syscall() implementation actually requires six args
  2717. to be passed, even though they aren't always used. */
  2718. result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
  2719. # endif /* !HURD */
  2720. # endif
  2721. GC_end_syscall();
  2722. return(result);
  2723. }
  2724. #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
  2725. #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
  2726. /* We use the GNU ld call wrapping facility. */
  2727. /* This requires that the linker be invoked with "--wrap read". */
  2728. /* This can be done by passing -Wl,"--wrap read" to gcc. */
  2729. /* I'm not sure that this actually wraps whatever version of read */
  2730. /* is called by stdio. That code also mentions __read. */
  2731. # include <unistd.h>
  2732. ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
  2733. {
  2734. int result;
  2735. GC_begin_syscall();
  2736. GC_unprotect_range(buf, (word)nbyte);
  2737. result = __real_read(fd, buf, nbyte);
  2738. GC_end_syscall();
  2739. return(result);
  2740. }
  2741. /* We should probably also do this for __read, or whatever stdio */
  2742. /* actually calls. */
  2743. #endif
  2744. #endif /* 0 */
  2745. /*ARGSUSED*/
  2746. GC_bool GC_page_was_ever_dirty(h)
  2747. struct hblk *h;
  2748. {
  2749. return(TRUE);
  2750. }
  2751. /* Reset the n pages starting at h to "was never dirty" status. */
  2752. /*ARGSUSED*/
  2753. void GC_is_fresh(h, n)
  2754. struct hblk *h;
  2755. word n;
  2756. {
  2757. }
  2758. # endif /* MPROTECT_VDB */
  2759. # ifdef PROC_VDB
  2760. /*
  2761. * See DEFAULT_VDB for interface descriptions.
  2762. */
  2763. /*
  2764. * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
  2765. * from which we can read page modified bits. This facility is far from
  2766. * optimal (e.g. we would like to get the info for only some of the
  2767. * address space), but it avoids intercepting system calls.
  2768. */
  2769. #include <errno.h>
  2770. #include <sys/types.h>
  2771. #include <sys/signal.h>
  2772. #include <sys/fault.h>
  2773. #include <sys/syscall.h>
  2774. #include <sys/procfs.h>
  2775. #include <sys/stat.h>
  2776. #define INITIAL_BUF_SZ 16384
  2777. word GC_proc_buf_size = INITIAL_BUF_SZ;
  2778. char *GC_proc_buf;
  2779. #ifdef GC_SOLARIS_THREADS
  2780. /* We don't have exact sp values for threads. So we count on */
  2781. /* occasionally declaring stack pages to be fresh. Thus we */
  2782. /* need a real implementation of GC_is_fresh. We can't clear */
  2783. /* entries in GC_written_pages, since that would declare all */
  2784. /* pages with the given hash address to be fresh. */
  2785. # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
  2786. struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
  2787. /* Collisions are dropped. */
  2788. # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
  2789. # define ADD_FRESH_PAGE(h) \
  2790. GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
  2791. # define PAGE_IS_FRESH(h) \
  2792. (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
  2793. #endif
  2794. /* Add all pages in pht2 to pht1 */
  2795. void GC_or_pages(pht1, pht2)
  2796. page_hash_table pht1, pht2;
  2797. {
  2798. register int i;
  2799. for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
  2800. }
  2801. int GC_proc_fd;
  2802. void GC_dirty_init()
  2803. {
  2804. int fd;
  2805. char buf[30];
  2806. GC_dirty_maintained = TRUE;
  2807. if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
  2808. register int i;
  2809. for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
  2810. # ifdef PRINTSTATS
  2811. GC_printf1("Allocated words:%lu:all pages may have been written\n",
  2812. (unsigned long)
  2813. (GC_words_allocd + GC_words_allocd_before_gc));
  2814. # endif
  2815. }
  2816. sprintf(buf, "/proc/%d", getpid());
  2817. fd = open(buf, O_RDONLY);
  2818. if (fd < 0) {
  2819. ABORT("/proc open failed");
  2820. }
  2821. GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
  2822. close(fd);
  2823. syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
  2824. if (GC_proc_fd < 0) {
  2825. ABORT("/proc ioctl failed");
  2826. }
  2827. GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
  2828. # ifdef GC_SOLARIS_THREADS
  2829. GC_fresh_pages = (struct hblk **)
  2830. GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
  2831. if (GC_fresh_pages == 0) {
  2832. GC_err_printf0("No space for fresh pages\n");
  2833. EXIT();
  2834. }
  2835. BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
  2836. # endif
  2837. }
  2838. /* Ignore write hints. They don't help us here. */
  2839. /*ARGSUSED*/
  2840. void GC_remove_protection(h, nblocks, is_ptrfree)
  2841. struct hblk *h;
  2842. word nblocks;
  2843. GC_bool is_ptrfree;
  2844. {
  2845. }
  2846. #ifdef GC_SOLARIS_THREADS
  2847. # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
  2848. #else
  2849. # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
  2850. #endif
  2851. void GC_read_dirty()
  2852. {
  2853. unsigned long ps, np;
  2854. int nmaps;
  2855. ptr_t vaddr;
  2856. struct prasmap * map;
  2857. char * bufp;
  2858. ptr_t current_addr, limit;
  2859. int i;
  2860. int dummy;
  2861. BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
  2862. bufp = GC_proc_buf;
  2863. if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
  2864. # ifdef PRINTSTATS
  2865. GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
  2866. GC_proc_buf_size);
  2867. # endif
  2868. {
  2869. /* Retry with larger buffer. */
  2870. word new_size = 2 * GC_proc_buf_size;
  2871. char * new_buf = GC_scratch_alloc(new_size);
  2872. if (new_buf != 0) {
  2873. GC_proc_buf = bufp = new_buf;
  2874. GC_proc_buf_size = new_size;
  2875. }
  2876. if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
  2877. WARN("Insufficient space for /proc read\n", 0);
  2878. /* Punt: */
  2879. memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
  2880. memset(GC_written_pages, 0xff, sizeof(page_hash_table));
  2881. # ifdef GC_SOLARIS_THREADS
  2882. BZERO(GC_fresh_pages,
  2883. MAX_FRESH_PAGES * sizeof (struct hblk *));
  2884. # endif
  2885. return;
  2886. }
  2887. }
  2888. }
  2889. /* Copy dirty bits into GC_grungy_pages */
  2890. nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
  2891. /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
  2892. nmaps, PG_REFERENCED, PG_MODIFIED); */
  2893. bufp = bufp + sizeof(struct prpageheader);
  2894. for (i = 0; i < nmaps; i++) {
  2895. map = (struct prasmap *)bufp;
  2896. vaddr = (ptr_t)(map -> pr_vaddr);
  2897. ps = map -> pr_pagesize;
  2898. np = map -> pr_npage;
  2899. /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
  2900. limit = vaddr + ps * np;
  2901. bufp += sizeof (struct prasmap);
  2902. for (current_addr = vaddr;
  2903. current_addr < limit; current_addr += ps){
  2904. if ((*bufp++) & PG_MODIFIED) {
  2905. register struct hblk * h = (struct hblk *) current_addr;
  2906. while ((ptr_t)h < current_addr + ps) {
  2907. register word index = PHT_HASH(h);
  2908. set_pht_entry_from_index(GC_grungy_pages, index);
  2909. # ifdef GC_SOLARIS_THREADS
  2910. {
  2911. register int slot = FRESH_PAGE_SLOT(h);
  2912. if (GC_fresh_pages[slot] == h) {
  2913. GC_fresh_pages[slot] = 0;
  2914. }
  2915. }
  2916. # endif
  2917. h++;
  2918. }
  2919. }
  2920. }
  2921. bufp += sizeof(long) - 1;
  2922. bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
  2923. }
  2924. /* Update GC_written_pages. */
  2925. GC_or_pages(GC_written_pages, GC_grungy_pages);
  2926. # ifdef GC_SOLARIS_THREADS
  2927. /* Make sure that old stacks are considered completely clean */
  2928. /* unless written again. */
  2929. GC_old_stacks_are_fresh();
  2930. # endif
  2931. }
  2932. #undef READ
  2933. GC_bool GC_page_was_dirty(h)
  2934. struct hblk *h;
  2935. {
  2936. register word index = PHT_HASH(h);
  2937. register GC_bool result;
  2938. result = get_pht_entry_from_index(GC_grungy_pages, index);
  2939. # ifdef GC_SOLARIS_THREADS
  2940. if (result && PAGE_IS_FRESH(h)) result = FALSE;
  2941. /* This happens only if page was declared fresh since */
  2942. /* the read_dirty call, e.g. because it's in an unused */
  2943. /* thread stack. It's OK to treat it as clean, in */
  2944. /* that case. And it's consistent with */
  2945. /* GC_page_was_ever_dirty. */
  2946. # endif
  2947. return(result);
  2948. }
  2949. GC_bool GC_page_was_ever_dirty(h)
  2950. struct hblk *h;
  2951. {
  2952. register word index = PHT_HASH(h);
  2953. register GC_bool result;
  2954. result = get_pht_entry_from_index(GC_written_pages, index);
  2955. # ifdef GC_SOLARIS_THREADS
  2956. if (result && PAGE_IS_FRESH(h)) result = FALSE;
  2957. # endif
  2958. return(result);
  2959. }
  2960. /* Caller holds allocation lock. */
  2961. void GC_is_fresh(h, n)
  2962. struct hblk *h;
  2963. word n;
  2964. {
  2965. register word index;
  2966. # ifdef GC_SOLARIS_THREADS
  2967. register word i;
  2968. if (GC_fresh_pages != 0) {
  2969. for (i = 0; i < n; i++) {
  2970. ADD_FRESH_PAGE(h + i);
  2971. }
  2972. }
  2973. # endif
  2974. }
  2975. # endif /* PROC_VDB */
  2976. # ifdef PCR_VDB
  2977. # include "vd/PCR_VD.h"
  2978. # define NPAGES (32*1024) /* 128 MB */
  2979. PCR_VD_DB GC_grungy_bits[NPAGES];
  2980. ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
  2981. /* HBLKSIZE aligned. */
  2982. void GC_dirty_init()
  2983. {
  2984. GC_dirty_maintained = TRUE;
  2985. /* For the time being, we assume the heap generally grows up */
  2986. GC_vd_base = GC_heap_sects[0].hs_start;
  2987. if (GC_vd_base == 0) {
  2988. ABORT("Bad initial heap segment");
  2989. }
  2990. if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
  2991. != PCR_ERes_okay) {
  2992. ABORT("dirty bit initialization failed");
  2993. }
  2994. }
  2995. void GC_read_dirty()
  2996. {
  2997. /* lazily enable dirty bits on newly added heap sects */
  2998. {
  2999. static int onhs = 0;
  3000. int nhs = GC_n_heap_sects;
  3001. for( ; onhs < nhs; onhs++ ) {
  3002. PCR_VD_WriteProtectEnable(
  3003. GC_heap_sects[onhs].hs_start,
  3004. GC_heap_sects[onhs].hs_bytes );
  3005. }
  3006. }
  3007. if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
  3008. != PCR_ERes_okay) {
  3009. ABORT("dirty bit read failed");
  3010. }
  3011. }
  3012. GC_bool GC_page_was_dirty(h)
  3013. struct hblk *h;
  3014. {
  3015. if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
  3016. return(TRUE);
  3017. }
  3018. return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
  3019. }
  3020. /*ARGSUSED*/
  3021. void GC_remove_protection(h, nblocks, is_ptrfree)
  3022. struct hblk *h;
  3023. word nblocks;
  3024. GC_bool is_ptrfree;
  3025. {
  3026. PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
  3027. PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
  3028. }
  3029. # endif /* PCR_VDB */
  3030. #if defined(MPROTECT_VDB) && defined(DARWIN)
  3031. /* The following sources were used as a *reference* for this exception handling
  3032. code:
  3033. 1. Apple's mach/xnu documentation
  3034. 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
  3035. omnigroup's macosx-dev list.
  3036. www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
  3037. 3. macosx-nat.c from Apple's GDB source code.
  3038. */
  3039. /* The bug that caused all this trouble should now be fixed. This should
  3040. eventually be removed if all goes well. */
  3041. /* define BROKEN_EXCEPTION_HANDLING */
  3042. #include <mach/mach.h>
  3043. #include <mach/mach_error.h>
  3044. #include <mach/thread_status.h>
  3045. #include <mach/exception.h>
  3046. #include <mach/task.h>
  3047. #include <pthread.h>
  3048. /* These are not defined in any header, although they are documented */
  3049. extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
  3050. extern kern_return_t exception_raise(
  3051. mach_port_t,mach_port_t,mach_port_t,
  3052. exception_type_t,exception_data_t,mach_msg_type_number_t);
  3053. extern kern_return_t exception_raise_state(
  3054. mach_port_t,mach_port_t,mach_port_t,
  3055. exception_type_t,exception_data_t,mach_msg_type_number_t,
  3056. thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
  3057. thread_state_t,mach_msg_type_number_t*);
  3058. extern kern_return_t exception_raise_state_identity(
  3059. mach_port_t,mach_port_t,mach_port_t,
  3060. exception_type_t,exception_data_t,mach_msg_type_number_t,
  3061. thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
  3062. thread_state_t,mach_msg_type_number_t*);
  3063. #define MAX_EXCEPTION_PORTS 16
  3064. static struct {
  3065. mach_msg_type_number_t count;
  3066. exception_mask_t masks[MAX_EXCEPTION_PORTS];
  3067. exception_handler_t ports[MAX_EXCEPTION_PORTS];
  3068. exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
  3069. thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
  3070. } GC_old_exc_ports;
  3071. static struct {
  3072. mach_port_t exception;
  3073. #if defined(THREADS)
  3074. mach_port_t reply;
  3075. #endif
  3076. } GC_ports;
  3077. typedef struct {
  3078. mach_msg_header_t head;
  3079. } GC_msg_t;
  3080. typedef enum {
  3081. GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
  3082. } GC_mprotect_state_t;
  3083. /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
  3084. but it isn't documented. Use the source and see if they
  3085. should be ok. */
  3086. #define ID_STOP 1
  3087. #define ID_RESUME 2
  3088. /* These values are only used on the reply port */
  3089. #define ID_ACK 3
  3090. #if defined(THREADS)
  3091. GC_mprotect_state_t GC_mprotect_state;
  3092. /* The following should ONLY be called when the world is stopped */
  3093. static void GC_mprotect_thread_notify(mach_msg_id_t id) {
  3094. struct {
  3095. GC_msg_t msg;
  3096. mach_msg_trailer_t trailer;
  3097. } buf;
  3098. mach_msg_return_t r;
  3099. /* remote, local */
  3100. buf.msg.head.msgh_bits =
  3101. MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
  3102. buf.msg.head.msgh_size = sizeof(buf.msg);
  3103. buf.msg.head.msgh_remote_port = GC_ports.exception;
  3104. buf.msg.head.msgh_local_port = MACH_PORT_NULL;
  3105. buf.msg.head.msgh_id = id;
  3106. r = mach_msg(
  3107. &buf.msg.head,
  3108. MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
  3109. sizeof(buf.msg),
  3110. sizeof(buf),
  3111. GC_ports.reply,
  3112. MACH_MSG_TIMEOUT_NONE,
  3113. MACH_PORT_NULL);
  3114. if(r != MACH_MSG_SUCCESS)
  3115. ABORT("mach_msg failed in GC_mprotect_thread_notify");
  3116. if(buf.msg.head.msgh_id != ID_ACK)
  3117. ABORT("invalid ack in GC_mprotect_thread_notify");
  3118. }
  3119. /* Should only be called by the mprotect thread */
  3120. static void GC_mprotect_thread_reply() {
  3121. GC_msg_t msg;
  3122. mach_msg_return_t r;
  3123. /* remote, local */
  3124. msg.head.msgh_bits =
  3125. MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
  3126. msg.head.msgh_size = sizeof(msg);
  3127. msg.head.msgh_remote_port = GC_ports.reply;
  3128. msg.head.msgh_local_port = MACH_PORT_NULL;
  3129. msg.head.msgh_id = ID_ACK;
  3130. r = mach_msg(
  3131. &msg.head,
  3132. MACH_SEND_MSG,
  3133. sizeof(msg),
  3134. 0,
  3135. MACH_PORT_NULL,
  3136. MACH_MSG_TIMEOUT_NONE,
  3137. MACH_PORT_NULL);
  3138. if(r != MACH_MSG_SUCCESS)
  3139. ABORT("mach_msg failed in GC_mprotect_thread_reply");
  3140. }
  3141. void GC_mprotect_stop() {
  3142. GC_mprotect_thread_notify(ID_STOP);
  3143. }
  3144. void GC_mprotect_resume() {
  3145. GC_mprotect_thread_notify(ID_RESUME);
  3146. }
  3147. #else /* !THREADS */
  3148. /* The compiler should optimize away any GC_mprotect_state computations */
  3149. #define GC_mprotect_state GC_MP_NORMAL
  3150. #endif
  3151. static void *GC_mprotect_thread(void *arg) {
  3152. mach_msg_return_t r;
  3153. /* These two structures contain some private kernel data. We don't need to
  3154. access any of it so we don't bother defining a proper struct. The
  3155. correct definitions are in the xnu source code. */
  3156. struct {
  3157. mach_msg_header_t head;
  3158. char data[256];
  3159. } reply;
  3160. struct {
  3161. mach_msg_header_t head;
  3162. mach_msg_body_t msgh_body;
  3163. char data[1024];
  3164. } msg;
  3165. mach_msg_id_t id;
  3166. GC_darwin_register_mach_handler_thread(mach_thread_self());
  3167. for(;;) {
  3168. r = mach_msg(
  3169. &msg.head,
  3170. MACH_RCV_MSG|MACH_RCV_LARGE|
  3171. (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
  3172. 0,
  3173. sizeof(msg),
  3174. GC_ports.exception,
  3175. GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
  3176. MACH_PORT_NULL);
  3177. id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
  3178. #if defined(THREADS)
  3179. if(GC_mprotect_state == GC_MP_DISCARDING) {
  3180. if(r == MACH_RCV_TIMED_OUT) {
  3181. GC_mprotect_state = GC_MP_STOPPED;
  3182. GC_mprotect_thread_reply();
  3183. continue;
  3184. }
  3185. if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
  3186. ABORT("out of order mprotect thread request");
  3187. }
  3188. #endif
  3189. if(r != MACH_MSG_SUCCESS) {
  3190. GC_err_printf2("mach_msg failed with %d %s\n",
  3191. (int)r,mach_error_string(r));
  3192. ABORT("mach_msg failed");
  3193. }
  3194. switch(id) {
  3195. #if defined(THREADS)
  3196. case ID_STOP:
  3197. if(GC_mprotect_state != GC_MP_NORMAL)
  3198. ABORT("Called mprotect_stop when state wasn't normal");
  3199. GC_mprotect_state = GC_MP_DISCARDING;
  3200. break;
  3201. case ID_RESUME:
  3202. if(GC_mprotect_state != GC_MP_STOPPED)
  3203. ABORT("Called mprotect_resume when state wasn't stopped");
  3204. GC_mprotect_state = GC_MP_NORMAL;
  3205. GC_mprotect_thread_reply();
  3206. break;
  3207. #endif /* THREADS */
  3208. default:
  3209. /* Handle the message (calls catch_exception_raise) */
  3210. if(!exc_server(&msg.head,&reply.head))
  3211. ABORT("exc_server failed");
  3212. /* Send the reply */
  3213. r = mach_msg(
  3214. &reply.head,
  3215. MACH_SEND_MSG,
  3216. reply.head.msgh_size,
  3217. 0,
  3218. MACH_PORT_NULL,
  3219. MACH_MSG_TIMEOUT_NONE,
  3220. MACH_PORT_NULL);
  3221. if(r != MACH_MSG_SUCCESS) {
  3222. /* This will fail if the thread dies, but the thread shouldn't
  3223. die... */
  3224. #ifdef BROKEN_EXCEPTION_HANDLING
  3225. GC_err_printf2(
  3226. "mach_msg failed with %d %s while sending exc reply\n",
  3227. (int)r,mach_error_string(r));
  3228. #else
  3229. ABORT("mach_msg failed while sending exception reply");
  3230. #endif
  3231. }
  3232. } /* switch */
  3233. } /* for(;;) */
  3234. /* NOT REACHED */
  3235. return NULL;
  3236. }
  3237. /* All this SIGBUS code shouldn't be necessary. All protection faults should
  3238. be going throught the mach exception handler. However, it seems a SIGBUS is
  3239. occasionally sent for some unknown reason. Even more odd, it seems to be
  3240. meaningless and safe to ignore. */
  3241. #ifdef BROKEN_EXCEPTION_HANDLING
  3242. typedef void (* SIG_PF)();
  3243. static SIG_PF GC_old_bus_handler;
  3244. /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
  3245. Even if this doesn't get updated property, it isn't really a problem */
  3246. static int GC_sigbus_count;
  3247. static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
  3248. if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
  3249. /* Ugh... some seem safe to ignore, but too many in a row probably means
  3250. trouble. GC_sigbus_count is reset for each mach exception that is
  3251. handled */
  3252. if(GC_sigbus_count >= 8) {
  3253. ABORT("Got more than 8 SIGBUSs in a row!");
  3254. } else {
  3255. GC_sigbus_count++;
  3256. GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
  3257. }
  3258. }
  3259. #endif /* BROKEN_EXCEPTION_HANDLING */
  3260. void GC_dirty_init() {
  3261. kern_return_t r;
  3262. mach_port_t me;
  3263. pthread_t thread;
  3264. pthread_attr_t attr;
  3265. exception_mask_t mask;
  3266. # ifdef PRINTSTATS
  3267. GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
  3268. "implementation\n");
  3269. # endif
  3270. # ifdef BROKEN_EXCEPTION_HANDLING
  3271. GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
  3272. "exception handling bugs.\n");
  3273. # endif
  3274. GC_dirty_maintained = TRUE;
  3275. if (GC_page_size % HBLKSIZE != 0) {
  3276. GC_err_printf0("Page size not multiple of HBLKSIZE\n");
  3277. ABORT("Page size not multiple of HBLKSIZE");
  3278. }
  3279. GC_task_self = me = mach_task_self();
  3280. r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
  3281. if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
  3282. r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
  3283. MACH_MSG_TYPE_MAKE_SEND);
  3284. if(r != KERN_SUCCESS)
  3285. ABORT("mach_port_insert_right failed (exception port)");
  3286. #if defined(THREADS)
  3287. r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
  3288. if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
  3289. #endif
  3290. /* The exceptions we want to catch */
  3291. mask = EXC_MASK_BAD_ACCESS;
  3292. r = task_get_exception_ports(
  3293. me,
  3294. mask,
  3295. GC_old_exc_ports.masks,
  3296. &GC_old_exc_ports.count,
  3297. GC_old_exc_ports.ports,
  3298. GC_old_exc_ports.behaviors,
  3299. GC_old_exc_ports.flavors
  3300. );
  3301. if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
  3302. r = task_set_exception_ports(
  3303. me,
  3304. mask,
  3305. GC_ports.exception,
  3306. EXCEPTION_DEFAULT,
  3307. GC_MACH_THREAD_STATE
  3308. );
  3309. if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
  3310. if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
  3311. if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0)
  3312. ABORT("pthread_attr_setdetachedstate failed");
  3313. # undef pthread_create
  3314. /* This will call the real pthread function, not our wrapper */
  3315. if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
  3316. ABORT("pthread_create failed");
  3317. pthread_attr_destroy(&attr);
  3318. /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
  3319. #ifdef BROKEN_EXCEPTION_HANDLING
  3320. {
  3321. struct sigaction sa, oldsa;
  3322. sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
  3323. sigemptyset(&sa.sa_mask);
  3324. sa.sa_flags = SA_RESTART|SA_SIGINFO;
  3325. if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
  3326. GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
  3327. if (GC_old_bus_handler != SIG_DFL) {
  3328. # ifdef PRINTSTATS
  3329. GC_err_printf0("Replaced other SIGBUS handler\n");
  3330. # endif
  3331. }
  3332. }
  3333. #endif /* BROKEN_EXCEPTION_HANDLING */
  3334. }
  3335. /* The source code for Apple's GDB was used as a reference for the exception
  3336. forwarding code. This code is similar to be GDB code only because there is
  3337. only one way to do it. */
  3338. static kern_return_t GC_forward_exception(
  3339. mach_port_t thread,
  3340. mach_port_t task,
  3341. exception_type_t exception,
  3342. exception_data_t data,
  3343. mach_msg_type_number_t data_count
  3344. ) {
  3345. int i;
  3346. kern_return_t r;
  3347. mach_port_t port;
  3348. exception_behavior_t behavior;
  3349. thread_state_flavor_t flavor;
  3350. thread_state_t thread_state;
  3351. mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
  3352. for(i=0;i<GC_old_exc_ports.count;i++)
  3353. if(GC_old_exc_ports.masks[i] & (1 << exception))
  3354. break;
  3355. if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
  3356. port = GC_old_exc_ports.ports[i];
  3357. behavior = GC_old_exc_ports.behaviors[i];
  3358. flavor = GC_old_exc_ports.flavors[i];
  3359. if(behavior != EXCEPTION_DEFAULT) {
  3360. r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
  3361. if(r != KERN_SUCCESS)
  3362. ABORT("thread_get_state failed in forward_exception");
  3363. }
  3364. switch(behavior) {
  3365. case EXCEPTION_DEFAULT:
  3366. r = exception_raise(port,thread,task,exception,data,data_count);
  3367. break;
  3368. case EXCEPTION_STATE:
  3369. r = exception_raise_state(port,thread,task,exception,data,
  3370. data_count,&flavor,thread_state,thread_state_count,
  3371. thread_state,&thread_state_count);
  3372. break;
  3373. case EXCEPTION_STATE_IDENTITY:
  3374. r = exception_raise_state_identity(port,thread,task,exception,data,
  3375. data_count,&flavor,thread_state,thread_state_count,
  3376. thread_state,&thread_state_count);
  3377. break;
  3378. default:
  3379. r = KERN_FAILURE; /* make gcc happy */
  3380. ABORT("forward_exception: unknown behavior");
  3381. break;
  3382. }
  3383. if(behavior != EXCEPTION_DEFAULT) {
  3384. r = thread_set_state(thread,flavor,thread_state,thread_state_count);
  3385. if(r != KERN_SUCCESS)
  3386. ABORT("thread_set_state failed in forward_exception");
  3387. }
  3388. return r;
  3389. }
  3390. #define FWD() GC_forward_exception(thread,task,exception,code,code_count)
  3391. /* This violates the namespace rules but there isn't anything that can be done
  3392. about it. The exception handling stuff is hard coded to call this */
  3393. kern_return_t
  3394. catch_exception_raise(
  3395. mach_port_t exception_port,mach_port_t thread,mach_port_t task,
  3396. exception_type_t exception,exception_data_t code,
  3397. mach_msg_type_number_t code_count
  3398. ) {
  3399. kern_return_t r;
  3400. char *addr;
  3401. struct hblk *h;
  3402. int i;
  3403. # if defined(POWERPC)
  3404. # if CPP_WORDSZ == 32
  3405. thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
  3406. mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
  3407. ppc_exception_state_t exc_state;
  3408. # else
  3409. thread_state_flavor_t flavor = PPC_EXCEPTION_STATE64;
  3410. mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE64_COUNT;
  3411. ppc_exception_state64_t exc_state;
  3412. # endif
  3413. # elif defined(I386) || defined(X86_64)
  3414. # if CPP_WORDSZ == 32
  3415. thread_state_flavor_t flavor = x86_EXCEPTION_STATE32;
  3416. mach_msg_type_number_t exc_state_count = x86_EXCEPTION_STATE32_COUNT;
  3417. x86_exception_state32_t exc_state;
  3418. # else
  3419. thread_state_flavor_t flavor = x86_EXCEPTION_STATE64;
  3420. mach_msg_type_number_t exc_state_count = x86_EXCEPTION_STATE64_COUNT;
  3421. x86_exception_state64_t exc_state;
  3422. # endif
  3423. # else
  3424. # error FIXME for non-ppc darwin
  3425. # endif
  3426. if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
  3427. #ifdef DEBUG_EXCEPTION_HANDLING
  3428. /* We aren't interested, pass it on to the old handler */
  3429. GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
  3430. exception,
  3431. code_count > 0 ? code[0] : -1,
  3432. code_count > 1 ? code[1] : -1);
  3433. #endif
  3434. return FWD();
  3435. }
  3436. r = thread_get_state(thread,flavor,
  3437. (natural_t*)&exc_state,&exc_state_count);
  3438. if(r != KERN_SUCCESS) {
  3439. /* The thread is supposed to be suspended while the exception handler
  3440. is called. This shouldn't fail. */
  3441. #ifdef BROKEN_EXCEPTION_HANDLING
  3442. GC_err_printf0("thread_get_state failed in "
  3443. "catch_exception_raise\n");
  3444. return KERN_SUCCESS;
  3445. #else
  3446. ABORT("thread_get_state failed in catch_exception_raise");
  3447. #endif
  3448. }
  3449. /* This is the address that caused the fault */
  3450. #if defined(POWERPC)
  3451. addr = (char*) exc_state. THREAD_FLD(dar);
  3452. #elif defined (I386) || defined (X86_64)
  3453. addr = (char*) exc_state. THREAD_FLD(faultvaddr);
  3454. #else
  3455. # error FIXME for non POWERPC/I386
  3456. #endif
  3457. if((HDR(addr)) == 0) {
  3458. /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
  3459. KERN_PROTECTION_FAILURE every once and a while. We wait till we get
  3460. a bunch in a row before doing anything about it. If a "real" fault
  3461. ever occurres it'll just keep faulting over and over and we'll hit
  3462. the limit pretty quickly. */
  3463. #ifdef BROKEN_EXCEPTION_HANDLING
  3464. static char *last_fault;
  3465. static int last_fault_count;
  3466. if(addr != last_fault) {
  3467. last_fault = addr;
  3468. last_fault_count = 0;
  3469. }
  3470. if(++last_fault_count < 32) {
  3471. if(last_fault_count == 1)
  3472. GC_err_printf1(
  3473. "GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
  3474. addr);
  3475. return KERN_SUCCESS;
  3476. }
  3477. GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
  3478. /* Can't pass it along to the signal handler because that is
  3479. ignoring SIGBUS signals. We also shouldn't call ABORT here as
  3480. signals don't always work too well from the exception handler. */
  3481. GC_err_printf0("Aborting\n");
  3482. exit(EXIT_FAILURE);
  3483. #else /* BROKEN_EXCEPTION_HANDLING */
  3484. /* Pass it along to the next exception handler
  3485. (which should call SIGBUS/SIGSEGV) */
  3486. return FWD();
  3487. #endif /* !BROKEN_EXCEPTION_HANDLING */
  3488. }
  3489. #ifdef BROKEN_EXCEPTION_HANDLING
  3490. /* Reset the number of consecutive SIGBUSs */
  3491. GC_sigbus_count = 0;
  3492. #endif
  3493. if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
  3494. h = (struct hblk*)((word)addr & ~(GC_page_size-1));
  3495. UNPROTECT(h, GC_page_size);
  3496. for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
  3497. register int index = PHT_HASH(h+i);
  3498. async_set_pht_entry_from_index(GC_dirty_pages, index);
  3499. }
  3500. } else if(GC_mprotect_state == GC_MP_DISCARDING) {
  3501. /* Lie to the thread for now. No sense UNPROTECT()ing the memory
  3502. when we're just going to PROTECT() it again later. The thread
  3503. will just fault again once it resumes */
  3504. } else {
  3505. /* Shouldn't happen, i don't think */
  3506. GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
  3507. return FWD();
  3508. }
  3509. return KERN_SUCCESS;
  3510. }
  3511. #undef FWD
  3512. /* These should never be called, but just in case... */
  3513. kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
  3514. int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
  3515. int flavor, thread_state_t old_state, int old_stateCnt,
  3516. thread_state_t new_state, int new_stateCnt)
  3517. {
  3518. ABORT("catch_exception_raise_state");
  3519. return(KERN_INVALID_ARGUMENT);
  3520. }
  3521. kern_return_t catch_exception_raise_state_identity(
  3522. mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
  3523. int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
  3524. int flavor, thread_state_t old_state, int old_stateCnt,
  3525. thread_state_t new_state, int new_stateCnt)
  3526. {
  3527. ABORT("catch_exception_raise_state_identity");
  3528. return(KERN_INVALID_ARGUMENT);
  3529. }
  3530. #endif /* DARWIN && MPROTECT_VDB */
  3531. # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
  3532. int GC_incremental_protection_needs()
  3533. {
  3534. return GC_PROTECTS_NONE;
  3535. }
  3536. # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
  3537. /*
  3538. * Call stack save code for debugging.
  3539. * Should probably be in mach_dep.c, but that requires reorganization.
  3540. */
  3541. /* I suspect the following works for most X86 *nix variants, so */
  3542. /* long as the frame pointer is explicitly stored. In the case of gcc, */
  3543. /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
  3544. #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
  3545. # include <features.h>
  3546. struct frame {
  3547. struct frame *fr_savfp;
  3548. long fr_savpc;
  3549. long fr_arg[NARGS]; /* All the arguments go here. */
  3550. };
  3551. #endif
  3552. #if defined(SPARC)
  3553. # if defined(LINUX)
  3554. # include <features.h>
  3555. struct frame {
  3556. long fr_local[8];
  3557. long fr_arg[6];
  3558. struct frame *fr_savfp;
  3559. long fr_savpc;
  3560. # ifndef __arch64__
  3561. char *fr_stret;
  3562. # endif
  3563. long fr_argd[6];
  3564. long fr_argx[0];
  3565. };
  3566. # else
  3567. # if defined(SUNOS4)
  3568. # include <machine/frame.h>
  3569. # else
  3570. # if defined (DRSNX)
  3571. # include <sys/sparc/frame.h>
  3572. # else
  3573. # if defined(OPENBSD)
  3574. # include <frame.h>
  3575. # else
  3576. # if defined(FREEBSD) || defined(NETBSD)
  3577. # include <machine/frame.h>
  3578. # else
  3579. # include <sys/frame.h>
  3580. # endif
  3581. # endif
  3582. # endif
  3583. # endif
  3584. # endif
  3585. # if NARGS > 6
  3586. --> We only know how to to get the first 6 arguments
  3587. # endif
  3588. #endif /* SPARC */
  3589. #ifdef NEED_CALLINFO
  3590. /* Fill in the pc and argument information for up to NFRAMES of my */
  3591. /* callers. Ignore my frame and my callers frame. */
  3592. #ifdef LINUX
  3593. # include <unistd.h>
  3594. #endif
  3595. #endif /* NEED_CALLINFO */
  3596. #if defined(GC_HAVE_BUILTIN_BACKTRACE)
  3597. # include <execinfo.h>
  3598. #endif
  3599. #ifdef SAVE_CALL_CHAIN
  3600. #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
  3601. && defined(GC_HAVE_BUILTIN_BACKTRACE)
  3602. #ifdef REDIRECT_MALLOC
  3603. /* Deal with possible malloc calls in backtrace by omitting */
  3604. /* the infinitely recursing backtrace. */
  3605. # ifdef THREADS
  3606. __thread /* If your compiler doesn't understand this */
  3607. /* you could use something like pthread_getspecific. */
  3608. # endif
  3609. GC_in_save_callers = FALSE;
  3610. #endif
  3611. void GC_save_callers (info)
  3612. struct callinfo info[NFRAMES];
  3613. {
  3614. void * tmp_info[NFRAMES + 1];
  3615. int npcs, i;
  3616. # define IGNORE_FRAMES 1
  3617. /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
  3618. /* points to our own frame. */
  3619. # ifdef REDIRECT_MALLOC
  3620. if (GC_in_save_callers) {
  3621. info[0].ci_pc = (word)(&GC_save_callers);
  3622. for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
  3623. return;
  3624. }
  3625. GC_in_save_callers = TRUE;
  3626. # endif
  3627. GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
  3628. npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
  3629. BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
  3630. for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
  3631. # ifdef REDIRECT_MALLOC
  3632. GC_in_save_callers = FALSE;
  3633. # endif
  3634. }
  3635. #else /* No builtin backtrace; do it ourselves */
  3636. #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
  3637. # define FR_SAVFP fr_fp
  3638. # define FR_SAVPC fr_pc
  3639. #else
  3640. # define FR_SAVFP fr_savfp
  3641. # define FR_SAVPC fr_savpc
  3642. #endif
  3643. #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
  3644. # define BIAS 2047
  3645. #else
  3646. # define BIAS 0
  3647. #endif
  3648. void GC_save_callers (info)
  3649. struct callinfo info[NFRAMES];
  3650. {
  3651. struct frame *frame;
  3652. struct frame *fp;
  3653. int nframes = 0;
  3654. # ifdef I386
  3655. /* We assume this is turned on only with gcc as the compiler. */
  3656. asm("movl %%ebp,%0" : "=r"(frame));
  3657. fp = frame;
  3658. # else
  3659. frame = (struct frame *) GC_save_regs_in_stack ();
  3660. fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
  3661. #endif
  3662. for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
  3663. && (nframes < NFRAMES));
  3664. fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
  3665. register int i;
  3666. info[nframes].ci_pc = fp->FR_SAVPC;
  3667. # if NARGS > 0
  3668. for (i = 0; i < NARGS; i++) {
  3669. info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
  3670. }
  3671. # endif /* NARGS > 0 */
  3672. }
  3673. if (nframes < NFRAMES) info[nframes].ci_pc = 0;
  3674. }
  3675. #endif /* No builtin backtrace */
  3676. #endif /* SAVE_CALL_CHAIN */
  3677. #ifdef NEED_CALLINFO
  3678. /* Print info to stderr. We do NOT hold the allocation lock */
  3679. void GC_print_callers (info)
  3680. struct callinfo info[NFRAMES];
  3681. {
  3682. register int i;
  3683. static int reentry_count = 0;
  3684. GC_bool stop = FALSE;
  3685. /* FIXME: This should probably use a different lock, so that we */
  3686. /* become callable with or without the allocation lock. */
  3687. LOCK();
  3688. ++reentry_count;
  3689. UNLOCK();
  3690. # if NFRAMES == 1
  3691. GC_err_printf0("\tCaller at allocation:\n");
  3692. # else
  3693. GC_err_printf0("\tCall chain at allocation:\n");
  3694. # endif
  3695. for (i = 0; i < NFRAMES && !stop ; i++) {
  3696. if (info[i].ci_pc == 0) break;
  3697. # if NARGS > 0
  3698. {
  3699. int j;
  3700. GC_err_printf0("\t\targs: ");
  3701. for (j = 0; j < NARGS; j++) {
  3702. if (j != 0) GC_err_printf0(", ");
  3703. GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
  3704. ~(info[i].ci_arg[j]));
  3705. }
  3706. GC_err_printf0("\n");
  3707. }
  3708. # endif
  3709. if (reentry_count > 1) {
  3710. /* We were called during an allocation during */
  3711. /* a previous GC_print_callers call; punt. */
  3712. GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
  3713. continue;
  3714. }
  3715. {
  3716. # ifdef LINUX
  3717. FILE *pipe;
  3718. # endif
  3719. # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
  3720. && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
  3721. char **sym_name =
  3722. backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
  3723. char *name = sym_name[0];
  3724. # else
  3725. char buf[40];
  3726. char *name = buf;
  3727. sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
  3728. # endif
  3729. # if defined(LINUX) && !defined(SMALL_CONFIG)
  3730. /* Try for a line number. */
  3731. {
  3732. # define EXE_SZ 100
  3733. static char exe_name[EXE_SZ];
  3734. # define CMD_SZ 200
  3735. char cmd_buf[CMD_SZ];
  3736. # define RESULT_SZ 200
  3737. static char result_buf[RESULT_SZ];
  3738. size_t result_len;
  3739. char *old_preload;
  3740. # define PRELOAD_SZ 200
  3741. char preload_buf[PRELOAD_SZ];
  3742. static GC_bool found_exe_name = FALSE;
  3743. static GC_bool will_fail = FALSE;
  3744. int ret_code;
  3745. /* Try to get it via a hairy and expensive scheme. */
  3746. /* First we get the name of the executable: */
  3747. if (will_fail) goto out;
  3748. if (!found_exe_name) {
  3749. ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
  3750. if (ret_code < 0 || ret_code >= EXE_SZ
  3751. || exe_name[0] != '/') {
  3752. will_fail = TRUE; /* Dont try again. */
  3753. goto out;
  3754. }
  3755. exe_name[ret_code] = '\0';
  3756. found_exe_name = TRUE;
  3757. }
  3758. /* Then we use popen to start addr2line -e <exe> <addr> */
  3759. /* There are faster ways to do this, but hopefully this */
  3760. /* isn't time critical. */
  3761. sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
  3762. (unsigned long)info[i].ci_pc);
  3763. old_preload = getenv ("LD_PRELOAD");
  3764. if (0 != old_preload) {
  3765. if (strlen (old_preload) >= PRELOAD_SZ) {
  3766. will_fail = TRUE;
  3767. goto out;
  3768. }
  3769. strcpy (preload_buf, old_preload);
  3770. unsetenv ("LD_PRELOAD");
  3771. }
  3772. pipe = popen(cmd_buf, "r");
  3773. if (0 != old_preload
  3774. && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
  3775. WARN("Failed to reset LD_PRELOAD\n", 0);
  3776. }
  3777. if (pipe == NULL
  3778. || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
  3779. == 0) {
  3780. if (pipe != NULL) pclose(pipe);
  3781. will_fail = TRUE;
  3782. goto out;
  3783. }
  3784. if (result_buf[result_len - 1] == '\n') --result_len;
  3785. result_buf[result_len] = 0;
  3786. if (result_buf[0] == '?'
  3787. || result_buf[result_len-2] == ':'
  3788. && result_buf[result_len-1] == '0') {
  3789. pclose(pipe);
  3790. goto out;
  3791. }
  3792. /* Get rid of embedded newline, if any. Test for "main" */
  3793. {
  3794. char * nl = strchr(result_buf, '\n');
  3795. if (nl != NULL && nl < result_buf + result_len) {
  3796. *nl = ':';
  3797. }
  3798. if (strncmp(result_buf, "main", nl - result_buf) == 0) {
  3799. stop = TRUE;
  3800. }
  3801. }
  3802. if (result_len < RESULT_SZ - 25) {
  3803. /* Add in hex address */
  3804. sprintf(result_buf + result_len, " [0x%lx]",
  3805. (unsigned long)info[i].ci_pc);
  3806. }
  3807. name = result_buf;
  3808. pclose(pipe);
  3809. out:;
  3810. }
  3811. # endif /* LINUX */
  3812. GC_err_printf1("\t\t%s\n", name);
  3813. # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
  3814. && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
  3815. free(sym_name); /* May call GC_free; that's OK */
  3816. # endif
  3817. }
  3818. }
  3819. LOCK();
  3820. --reentry_count;
  3821. UNLOCK();
  3822. }
  3823. #endif /* NEED_CALLINFO */
  3824. #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
  3825. /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
  3826. addresses in FIND_LEAK output. */
  3827. static word dump_maps(char *maps)
  3828. {
  3829. GC_err_write(maps, strlen(maps));
  3830. return 1;
  3831. }
  3832. void GC_print_address_map()
  3833. {
  3834. GC_err_printf0("---------- Begin address map ----------\n");
  3835. GC_apply_to_maps(dump_maps);
  3836. GC_err_printf0("---------- End address map ----------\n");
  3837. }
  3838. #endif