jit.c 181 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321
  1. /* Copyright 2018-2021, 2023
  2. Free Software Foundation, Inc.
  3. This file is part of Guile.
  4. Guile is free software: you can redistribute it and/or modify it
  5. under the terms of the GNU Lesser General Public License as published
  6. by the Free Software Foundation, either version 3 of the License, or
  7. (at your option) any later version.
  8. Guile is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  11. License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with Guile. If not, see
  14. <https://www.gnu.org/licenses/>. */
  15. #ifdef HAVE_CONFIG_H
  16. # include <config.h>
  17. #endif
  18. /* All of this whole file is within an ENABLE_JIT flag. */
  19. #if ENABLE_JIT
  20. #include <stdio.h>
  21. #include <lightening.h>
  22. #include "frames.h"
  23. #include "gsubr.h"
  24. #include "gc-inline.h"
  25. #include "instructions.h"
  26. #include "intrinsics.h"
  27. #include "simpos.h" /* scm_getenv_int */
  28. #include "threads.h"
  29. #include "vm-builtins.h"
  30. #include "vm-operations.h"
  31. #ifdef __MINGW32__
  32. #ifndef WIN32_LEAN_AND_MEAN
  33. #define WIN32_LEAN_AND_MEAN
  34. #endif
  35. #include <windows.h>
  36. #else
  37. #include <sys/mman.h>
  38. #endif
  39. #if defined __APPLE__ && HAVE_PTHREAD_JIT_WRITE_PROTECT_NP
  40. #include <libkern/OSCacheControl.h>
  41. #endif
  42. #include "jit.h"
  43. /* Guile's just-in-time (JIT) compiler is a simple "template JIT". It
  44. produces machine code corresponding to each VM instruction,
  45. substituting in the arguments from the bytecode. The generated code
  46. performs the same operations on the Guile program state the VM
  47. interpreter would: the same stack reads and writes, the same calls,
  48. the same control flow: the same thing. It's a very simple JIT.
  49. This JIT uses GNU Lightning, a library for generating assembly code.
  50. It has backends for every architecture you can think of. Lightning
  51. exposes a minimum of 3 "volatile" or "scratch" registers, those that
  52. may be overwritten by called functions, and 3 "non-volatile" or
  53. "preserved" registers, those whose values will persist over calls.
  54. Guile's JIT uses two preserved registers for itself, to store the
  55. current thread and the current stack pointer. The other four
  56. registers are available for the JIT. However as Guile's JIT is
  57. really simple and doesn't do register allocation, no other register
  58. is live between bytecodes; the other four registers are just scratch
  59. space.
  60. Machine code emitted by the JIT (mcode) should only ever be entered
  61. from the interpreter (the VM). To enter bytecode, the interpreter
  62. calls an "entry trampoline" that saves the needed non-volatile
  63. registers, reserves some stack space, loads the thread and stack
  64. pointer into the reserved registers, then jumps into the mcode. The
  65. mcode then does its thing.
  66. When mcode needs to call out to another function, e.g. via the "call"
  67. instruction, it makes a new frame in just the same way the VM would,
  68. with the difference that it also sets the machine return address
  69. (mRA) in the stack frame, in addition to the virtual (bytecode)
  70. return address (vRA). If the callee has mcode, then the caller jumps
  71. to the callee's mcode. It's a jump, not a call, as the stack is
  72. maintained on the side: it's not the stack used by the e.g. x86
  73. "call" instruction.
  74. When mcode calls a function that doesn't have vcode, or returns to a
  75. continuation that doesn't have vcode, the mcode simply returns to the
  76. VM interpreter, allowing the interpreter to pick up from there. The
  77. return actually happens via an exit trampoline, which restores the
  78. saved register values.
  79. Every function in Guile's VM begins with an "instrument-entry"
  80. instruction. The instruction links to a statically allocated "struct
  81. scm_jit_function_data" corresponding to that function. When the
  82. interpreter sees instrument-entry, first it checks that if the
  83. function has mcode, by looking in the scm_jit_function_data. If it
  84. has mcode, the interpreter enters mcode directly, as described above.
  85. If a function doesn't have mcode, "instrument-entry" will increment a
  86. counter in the scm_jit_function_data. If the counter exceeds a
  87. threshold, the interpreter will ask the JIT compiler to produce
  88. mcode. If the JIT compiler was able to do so (always possible except
  89. in case of resource exhaustion), then it sets the mcode pointer in
  90. the scm_jit_function_data, and returns the mcode pointer to the
  91. interpreter. At that point the interpreter will enter mcode.
  92. If the counter value does not exceed the threshold, then the VM
  93. will interpret the function instead of running compiled code.
  94. Additionally, Guile puts an "instrument-loop" instruction into the
  95. body of each loop iteration. It works similarly, except that the
  96. returned mcode pointer starts in the middle of the function, at the
  97. point that corresponds to the program point of the "instrument-loop"
  98. instruction. The idea is that some functions have long-running loops
  99. in them, and it would be a shame to have to wait until the next time
  100. they're called to enter mcode. Being able to "tier up" from inside a
  101. loop reduces overall program latency.
  102. Think of the JIT as microarchitecture. The interpreter specifies the
  103. architecture of the VM, in terms of the stack, stack and frame
  104. pointers, and a virtual instruction pointer. Sometimes this
  105. architectural state is manipulated by the interpreter. Sometimes
  106. it's compiled down to native code. But the existence of native code
  107. is a detail that's fully encapsulated; systems-oriented Guile Scheme
  108. can walk stacks, throw errors, reinstate partial continuations, and
  109. so on without being aware of the existence of the JIT. */
  110. static const uint32_t default_jit_threshold = 1000;
  111. /* Threshold for when to JIT-compile a function. Set from the
  112. GUILE_JIT_THRESHOLD environment variable. */
  113. uint32_t scm_jit_counter_threshold = -1;
  114. /* If positive, stop JIT compilation after the Nth compilation. Useful
  115. for hunting down bugs. */
  116. static int jit_stop_after = -1;
  117. /* If nonzero, pause when stopping JIT compilation after the Nth
  118. compilation. For debugging. */
  119. static int jit_pause_when_stopping = 0;
  120. /* Log level for JIT events. 0 means off. */
  121. static int jit_log_level = 0;
  122. /* Entry trampoline: saves registers, initializes THREAD and SP
  123. registers, and jumps into mcode. */
  124. static void (*enter_mcode) (scm_thread *thread, const uint8_t *mcode);
  125. /* Exit trampoline: restores registers and returns to interpreter. */
  126. static void *exit_mcode;
  127. /* Handle interrupts trampoline: the slow path of the handle-interrupts
  128. instruction, compiled as a stub on the side to reduce code size. */
  129. static void *handle_interrupts_trampoline;
  130. /* Return to interpreter trampoline: trampoline to load IP from the VRA
  131. and tier down. */
  132. void *scm_jit_return_to_interpreter_trampoline;
  133. /* Thread-local buffer into which to write code. */
  134. struct code_arena
  135. {
  136. #ifdef __MINGW32__
  137. HANDLE handle;
  138. #endif
  139. uint8_t *base;
  140. size_t used;
  141. size_t size;
  142. struct code_arena *prev;
  143. };
  144. /* Branches between instructions. */
  145. struct pending_reloc
  146. {
  147. jit_reloc_t reloc;
  148. /* Each instruction has two labels: one principal label, for inline
  149. code, and one auxiliary label for the slow path (if any). The
  150. inline label is the vcode offset times two, and the slow label is
  151. the vcode offset times two plus one. */
  152. ptrdiff_t target_label_offset;
  153. };
  154. /* State of the JIT compiler for the current thread. */
  155. struct scm_jit_state {
  156. jit_state_t *jit;
  157. scm_thread *thread;
  158. const uint32_t *start;
  159. uint32_t *ip;
  160. uint32_t *next_ip;
  161. const uint32_t *end;
  162. uint32_t *entry;
  163. uint8_t *op_attrs;
  164. struct pending_reloc *relocs;
  165. size_t reloc_idx;
  166. size_t reloc_count;
  167. void **labels;
  168. int32_t frame_size_min;
  169. int32_t frame_size_max;
  170. uint32_t register_state;
  171. jit_gpr_t sp_cache_gpr;
  172. jit_fpr_t sp_cache_fpr;
  173. uint32_t sp_cache_gpr_idx;
  174. uint32_t sp_cache_fpr_idx;
  175. struct code_arena *code_arena;
  176. };
  177. typedef struct scm_jit_state scm_jit_state;
  178. static const uint32_t program_word_offset_free_variable = 2;
  179. static const uint32_t frame_offset_mra = 0 * sizeof(union scm_vm_stack_element);
  180. static const uint32_t frame_offset_vra = 1 * sizeof(union scm_vm_stack_element);
  181. static const uint32_t frame_offset_prev = 2 * sizeof(union scm_vm_stack_element);
  182. static const uint32_t frame_overhead_slots = 3;
  183. #define DEFINE_THREAD_OFFSET(f) \
  184. static const uint32_t thread_offset_##f = \
  185. offsetof (struct scm_thread, f)
  186. DEFINE_THREAD_OFFSET (handle);
  187. DEFINE_THREAD_OFFSET (pending_asyncs);
  188. DEFINE_THREAD_OFFSET (block_asyncs);
  189. #define DEFINE_THREAD_VP_OFFSET(f) \
  190. static const uint32_t thread_offset_##f = \
  191. offsetof (struct scm_thread, vm) + offsetof (struct scm_vm, f)
  192. DEFINE_THREAD_VP_OFFSET (fp);
  193. DEFINE_THREAD_VP_OFFSET (sp);
  194. DEFINE_THREAD_VP_OFFSET (ip);
  195. DEFINE_THREAD_VP_OFFSET (stack_limit);
  196. /* The current scm_thread*. Preserved across callouts. */
  197. static const jit_gpr_t THREAD = JIT_V0;
  198. /* The current stack pointer. Clobbered across callouts. Can be
  199. reloaded from the thread. Note that any callout that might
  200. recursively enter the VM may move the stack pointer. */
  201. static const jit_gpr_t SP = JIT_R0;
  202. /* During calls and returns -- the parts of the code that manipulate the
  203. frame pointer -- the current frame pointer is stored in FP.
  204. Otherwise this is a temp register. It can always be reloaded from
  205. THREAD. Like SP, it can move. */
  206. static const jit_gpr_t FP = JIT_R1;
  207. /* When we return to a function that doesn't have mcode, the just-popped
  208. FP is stored in this register. The return-to-the-interpreter
  209. trampoline reads the vRA from the just-popped frame. */
  210. static const jit_gpr_t OLD_FP_FOR_RETURN_TRAMPOLINE = JIT_V1; /* T0 */
  211. /* Scratch registers. */
  212. static const jit_gpr_t T0 = JIT_V1;
  213. static const jit_gpr_t T1 = JIT_V2;
  214. static const jit_gpr_t T2 = JIT_R2;
  215. SCM_UNUSED static const jit_gpr_t T3_OR_FP = JIT_R1;
  216. SCM_UNUSED static const jit_gpr_t T4_OR_SP = JIT_R0;
  217. /* Sometimes you want to call out the fact that T0 and T1 are preserved
  218. across calls. In that case, use these. */
  219. SCM_UNUSED static const jit_gpr_t T0_PRESERVED = JIT_V1;
  220. static const jit_gpr_t T1_PRESERVED = JIT_V2;
  221. static const uint32_t SP_IN_REGISTER = 0x1;
  222. static const uint32_t FP_IN_REGISTER = 0x2;
  223. static const uint32_t UNREACHABLE = 0x4;
  224. static const uint32_t SP_CACHE_GPR = 0x8;
  225. static const uint32_t SP_CACHE_FPR = 0x10;
  226. static const uint8_t OP_ATTR_BLOCK = 0x1;
  227. static const uint8_t OP_ATTR_ENTRY = 0x2;
  228. #ifdef WORDS_BIGENDIAN
  229. #define JIT_BIGENDIAN 1
  230. #else
  231. #define JIT_BIGENDIAN 0
  232. #endif
  233. #if SCM_SIZEOF_UINTPTR_T == 4
  234. static const uint32_t log2_sizeof_uintptr_t = 2;
  235. #elif SCM_SIZEOF_UINTPTR_T == 8
  236. static const uint32_t log2_sizeof_uintptr_t = 3;
  237. #else
  238. #error unhandled uintptr_t size
  239. #endif
  240. #define LENGTH_NOP 0
  241. #define LENGTH_OP1(a) 1
  242. #define LENGTH_OP2(a,b) 2
  243. #define LENGTH_OP3(a,b,c) 3
  244. #define LENGTH_OP4(a,b,c,d) 4
  245. #define LENGTH_DOP1(a) 1
  246. #define LENGTH_DOP2(a,b) 2
  247. #define LENGTH_DOP3(a,b,c) 3
  248. #define LENGTH_DOP4(a,b,c,d) 4
  249. static const uint8_t op_lengths[256] = {
  250. #define OP_LENGTH(code, cname, name, arity) LENGTH_##arity,
  251. FOR_EACH_VM_OPERATION(OP_LENGTH)
  252. #undef OP_LENGTH
  253. };
  254. static void die (int line, const char *msg) SCM_NORETURN;
  255. static void
  256. die (int line, const char *msg)
  257. {
  258. fprintf (stderr, "jit.c:%d: fatal: %s\n", line, msg);
  259. abort ();
  260. }
  261. #define DIE(msg) die(__LINE__, msg)
  262. #define ASSERT(x) \
  263. do { if (SCM_UNLIKELY (!(x))) DIE ("assertion failed"); } while (0)
  264. #define UNREACHABLE() \
  265. DIE ("unreachable")
  266. #define _LOG(level, ...) \
  267. do { \
  268. if (SCM_UNLIKELY (jit_log_level >= level)) \
  269. fprintf (stderr, "jit: " __VA_ARGS__); \
  270. } while (0)
  271. enum {
  272. LOG_LEVEL_NONE,
  273. LOG_LEVEL_INFO,
  274. LOG_LEVEL_DEBUG,
  275. LOG_LEVEL_LOG
  276. };
  277. #define INFO(...) _LOG(LOG_LEVEL_INFO, __VA_ARGS__)
  278. #define DEBUG(...) _LOG(LOG_LEVEL_DEBUG, __VA_ARGS__)
  279. #define LOG(...) _LOG(LOG_LEVEL_LOG, __VA_ARGS__)
  280. static void
  281. reset_register_state (scm_jit_state *j, uint32_t state)
  282. {
  283. j->register_state = state;
  284. }
  285. static void
  286. clear_register_state (scm_jit_state *j, uint32_t state)
  287. {
  288. j->register_state &= ~state;
  289. }
  290. static void
  291. clear_scratch_register_state (scm_jit_state *j)
  292. {
  293. reset_register_state (j, 0);
  294. }
  295. static void
  296. set_register_state (scm_jit_state *j, uint32_t state)
  297. {
  298. j->register_state |= state;
  299. }
  300. static uint32_t
  301. unreachable (scm_jit_state *j)
  302. {
  303. return j->register_state & UNREACHABLE;
  304. }
  305. static uint32_t
  306. has_register_state (scm_jit_state *j, uint32_t state)
  307. {
  308. return (j->register_state & state) == state;
  309. }
  310. #define ASSERT_HAS_REGISTER_STATE(state) \
  311. ASSERT (unreachable (j) || has_register_state (j, state));
  312. static void
  313. record_gpr_clobber (scm_jit_state *j, jit_gpr_t r)
  314. {
  315. if (jit_same_gprs (j->sp_cache_gpr, r))
  316. clear_register_state (j, SP_CACHE_GPR);
  317. if (jit_same_gprs (r, SP))
  318. clear_register_state (j, SP_IN_REGISTER);
  319. else if (jit_same_gprs (r, FP))
  320. clear_register_state (j, FP_IN_REGISTER);
  321. }
  322. static void
  323. record_fpr_clobber (scm_jit_state *j, jit_fpr_t r)
  324. {
  325. if (jit_same_fprs (j->sp_cache_fpr, r))
  326. clear_register_state (j, SP_CACHE_FPR);
  327. }
  328. static void
  329. set_sp_cache_gpr (scm_jit_state *j, uint32_t idx, jit_gpr_t r)
  330. {
  331. set_register_state (j, SP_CACHE_GPR);
  332. j->sp_cache_gpr_idx = idx;
  333. if (j->sp_cache_fpr_idx == idx)
  334. clear_register_state (j, SP_CACHE_FPR);
  335. }
  336. static void
  337. set_sp_cache_fpr (scm_jit_state *j, uint32_t idx, jit_fpr_t r)
  338. {
  339. set_register_state (j, SP_CACHE_FPR);
  340. j->sp_cache_fpr_idx = idx;
  341. if (j->sp_cache_gpr_idx == idx)
  342. clear_register_state (j, SP_CACHE_GPR);
  343. }
  344. static inline ptrdiff_t
  345. inline_label_offset (uint32_t vcode_offset)
  346. {
  347. return vcode_offset * 2;
  348. }
  349. static inline ptrdiff_t
  350. slow_label_offset (uint32_t vcode_offset)
  351. {
  352. return vcode_offset * 2 + 1;
  353. }
  354. /* Q: When should I use emit_retval instead of jit_retval? When to use
  355. emit_movi, emit_ldxi?
  356. A: Generally you should use the emit_ variants instead of the jit_
  357. variants. Guile's JIT compiler has a primitive form of local
  358. (intrablock) register allocation that records recent stores. A
  359. subsequent load might be able to replace a register read instead of a
  360. memory load. This simple allocator works for straight-line code, and
  361. it works as long as register writes are recorded. The JIT itself
  362. will clear the register allocator state at control-flow joins, but
  363. control flow within an instruction needs to be careful.
  364. It's OK to use the jit_emit, jit_retval etc primitives if you
  365. manually make corresponding changes to the register_state, perhaps by
  366. inserting record_gpr_clobber calls. If the register is later
  367. clobbered by e.g. emit_sp_set_scm, sometimes those can be omitted
  368. though. Also, if your instruction includes a call, that code will
  369. invalidate any cached register-stack-index associations, so if
  370. there's a call, maybe you can avoid calling emit_*.
  371. Note of course that an association between registers and
  372. stack-indexed locals is also invalidated if the stack frame expands
  373. via alloc-frame or push, or shrinks via reset-frame, pop, drop,
  374. etc. */
  375. static void
  376. emit_retval (scm_jit_state *j, jit_gpr_t r)
  377. {
  378. jit_retval (j->jit, r);
  379. record_gpr_clobber (j, r);
  380. }
  381. static void
  382. emit_retval_d (scm_jit_state *j, jit_fpr_t r)
  383. {
  384. jit_retval_d (j->jit, r);
  385. record_fpr_clobber (j, r);
  386. }
  387. static void
  388. emit_movi (scm_jit_state *j, jit_gpr_t r, jit_word_t i)
  389. {
  390. jit_movi (j->jit, r, i);
  391. record_gpr_clobber (j, r);
  392. }
  393. static jit_reloc_t
  394. emit_mov_addr (scm_jit_state *j, jit_gpr_t r)
  395. {
  396. record_gpr_clobber (j, r);
  397. return jit_mov_addr (j->jit, r);
  398. }
  399. static void
  400. emit_ldxi (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t src, jit_word_t offset)
  401. {
  402. if (offset == 0)
  403. jit_ldr (j->jit, dst, src);
  404. else
  405. jit_ldxi (j->jit, dst, src, offset);
  406. record_gpr_clobber (j, dst);
  407. }
  408. #define DEFINE_CLOBBER_RECORDING_EMITTER_R(stem, typ) \
  409. static void \
  410. emit_##stem (scm_jit_state *j, jit_##typ##_t dst, jit_##typ##_t a) \
  411. { \
  412. jit_##stem (j->jit, dst, a); \
  413. record_##typ##_clobber (j, dst); \
  414. }
  415. #define DEFINE_CLOBBER_RECORDING_EMITTER_P(stem, typ) \
  416. static void \
  417. emit_##stem (scm_jit_state *j, jit_##typ##_t dst, jit_pointer_t a) \
  418. { \
  419. jit_##stem (j->jit, dst, a); \
  420. record_##typ##_clobber (j, dst); \
  421. }
  422. #define DEFINE_CLOBBER_RECORDING_EMITTER_R_I(stem, typ) \
  423. static void \
  424. emit_##stem (scm_jit_state *j, jit_##typ##_t dst, \
  425. jit_##typ##_t a, jit_word_t b) \
  426. { \
  427. jit_##stem (j->jit, dst, a, b); \
  428. record_##typ##_clobber (j, dst); \
  429. }
  430. #define DEFINE_CLOBBER_RECORDING_EMITTER_R_R(stem, typ) \
  431. static void \
  432. emit_##stem (scm_jit_state *j, jit_##typ##_t dst, \
  433. jit_##typ##_t a, jit_##typ##_t b) \
  434. { \
  435. jit_##stem (j->jit, dst, a, b); \
  436. record_##typ##_clobber (j, dst); \
  437. }
  438. #define DEFINE_CLOBBER_RECORDING_EMITTER_R_R_2(stem, typ) \
  439. static void \
  440. emit_##stem (scm_jit_state *j, \
  441. jit_##typ##_t dst1, jit_##typ##_t dst2, \
  442. jit_##typ##_t a, jit_##typ##_t b) \
  443. { \
  444. jit_##stem (j->jit, dst1, dst2, a, b); \
  445. record_##typ##_clobber (j, dst1); \
  446. record_##typ##_clobber (j, dst2); \
  447. }
  448. DEFINE_CLOBBER_RECORDING_EMITTER_R(ldr, gpr)
  449. DEFINE_CLOBBER_RECORDING_EMITTER_P(ldi, gpr)
  450. DEFINE_CLOBBER_RECORDING_EMITTER_R(comr, gpr)
  451. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(ldxr, gpr)
  452. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(addi, gpr)
  453. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(addr, gpr)
  454. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(addr_d, fpr)
  455. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(subi, gpr)
  456. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(subr, gpr)
  457. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(subr_d, fpr)
  458. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(muli, gpr)
  459. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(mulr, gpr)
  460. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(mulr_d, fpr)
  461. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(divr_d, fpr)
  462. DEFINE_CLOBBER_RECORDING_EMITTER_R(absr_d, fpr)
  463. DEFINE_CLOBBER_RECORDING_EMITTER_R(sqrtr_d, fpr)
  464. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(andi, gpr)
  465. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(andr, gpr)
  466. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(orr, gpr)
  467. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(xorr, gpr)
  468. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(rshi, gpr)
  469. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(rshi_u, gpr)
  470. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(rshr, gpr)
  471. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(rshr_u, gpr)
  472. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(lshi, gpr)
  473. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(lshr, gpr)
  474. #if SIZEOF_UINTPTR_T < 8
  475. DEFINE_CLOBBER_RECORDING_EMITTER_R(movr, gpr)
  476. DEFINE_CLOBBER_RECORDING_EMITTER_R(negr, gpr)
  477. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(addci, gpr)
  478. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(addcr, gpr)
  479. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(addxi, gpr)
  480. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(addxr, gpr)
  481. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(subci, gpr)
  482. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(subcr, gpr)
  483. DEFINE_CLOBBER_RECORDING_EMITTER_R_I(subxi, gpr)
  484. DEFINE_CLOBBER_RECORDING_EMITTER_R_R(subxr, gpr)
  485. DEFINE_CLOBBER_RECORDING_EMITTER_R_R_2(qmulr_u, gpr)
  486. #endif
  487. static void
  488. emit_reload_sp (scm_jit_state *j)
  489. {
  490. emit_ldxi (j, SP, THREAD, thread_offset_sp);
  491. set_register_state (j, SP_IN_REGISTER);
  492. }
  493. static void
  494. emit_store_sp (scm_jit_state *j)
  495. {
  496. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  497. jit_stxi (j->jit, thread_offset_sp, THREAD, SP);
  498. }
  499. static void
  500. emit_reload_fp (scm_jit_state *j)
  501. {
  502. emit_ldxi (j, FP, THREAD, thread_offset_fp);
  503. set_register_state (j, FP_IN_REGISTER);
  504. }
  505. static void
  506. emit_store_fp (scm_jit_state *j)
  507. {
  508. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  509. jit_stxi (j->jit, thread_offset_fp, THREAD, FP);
  510. }
  511. static uint32_t
  512. save_reloadable_register_state (scm_jit_state *j)
  513. {
  514. return j->register_state & (SP_IN_REGISTER | FP_IN_REGISTER);
  515. }
  516. static void
  517. restore_reloadable_register_state (scm_jit_state *j, uint32_t state)
  518. {
  519. if ((state & SP_IN_REGISTER) && !has_register_state (j, SP_IN_REGISTER))
  520. emit_reload_sp (j);
  521. if ((state & FP_IN_REGISTER) && !has_register_state (j, FP_IN_REGISTER))
  522. emit_reload_fp (j);
  523. }
  524. static void
  525. emit_subtract_stack_slots (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t src,
  526. uint32_t n)
  527. {
  528. emit_subi (j, dst, src, n * sizeof (union scm_vm_stack_element));
  529. }
  530. static void
  531. emit_load_mra (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t fp)
  532. {
  533. emit_ldxi (j, dst, fp, frame_offset_mra);
  534. }
  535. static void
  536. emit_store_mra (scm_jit_state *j, jit_gpr_t fp, jit_gpr_t mra)
  537. {
  538. ASSERT (frame_offset_mra == 0);
  539. jit_str (j->jit, fp, mra);
  540. }
  541. static void
  542. emit_load_vra (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t fp)
  543. {
  544. emit_ldxi (j, dst, fp, frame_offset_vra);
  545. }
  546. static void
  547. emit_store_vra (scm_jit_state *j, jit_gpr_t fp, jit_gpr_t t, const uint32_t *vra)
  548. {
  549. emit_movi (j, t, (intptr_t) vra);
  550. jit_stxi (j->jit, frame_offset_vra, fp, t);
  551. }
  552. static void
  553. emit_load_prev_fp_offset (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t fp)
  554. {
  555. emit_ldxi (j, dst, fp, frame_offset_prev);
  556. }
  557. static void
  558. emit_store_prev_fp_offset (scm_jit_state *j, jit_gpr_t fp, jit_gpr_t t,
  559. uint32_t n)
  560. {
  561. emit_movi (j, t, n);
  562. jit_stxi (j->jit, frame_offset_prev, fp, t);
  563. }
  564. static void
  565. emit_store_ip (scm_jit_state *j, jit_gpr_t ip)
  566. {
  567. jit_stxi (j->jit, thread_offset_ip, THREAD, ip);
  568. }
  569. static void
  570. emit_store_current_ip (scm_jit_state *j, jit_gpr_t t)
  571. {
  572. emit_movi (j, t, (intptr_t) j->ip);
  573. emit_store_ip (j, t);
  574. }
  575. static void
  576. emit_pop_fp (scm_jit_state *j, jit_gpr_t old_fp)
  577. {
  578. emit_ldxi (j, old_fp, THREAD, thread_offset_fp);
  579. emit_load_prev_fp_offset (j, FP, old_fp);
  580. emit_lshi (j, FP, FP, 3); /* Multiply by sizeof (scm_vm_stack_element) */
  581. emit_addr (j, FP, old_fp, FP);
  582. set_register_state (j, FP_IN_REGISTER);
  583. emit_store_fp (j);
  584. }
  585. static void
  586. emit_reset_frame (scm_jit_state *j, uint32_t nlocals)
  587. {
  588. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  589. emit_subtract_stack_slots (j, SP, FP, nlocals);
  590. set_register_state (j, SP_IN_REGISTER);
  591. emit_store_sp (j);
  592. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  593. }
  594. static jit_operand_t
  595. thread_operand (void)
  596. {
  597. return jit_operand_gpr (JIT_OPERAND_ABI_POINTER, THREAD);
  598. }
  599. static void
  600. emit_call_0 (scm_jit_state *j, void *f)
  601. {
  602. jit_calli_0 (j->jit, f);
  603. clear_scratch_register_state (j);
  604. }
  605. static void
  606. emit_call_1 (scm_jit_state *j, void *f, jit_operand_t a)
  607. {
  608. jit_calli_1 (j->jit, f, a);
  609. clear_scratch_register_state (j);
  610. }
  611. static void
  612. emit_call_2 (scm_jit_state *j, void *f, jit_operand_t a, jit_operand_t b)
  613. {
  614. jit_calli_2 (j->jit, f, a, b);
  615. clear_scratch_register_state (j);
  616. }
  617. static void
  618. emit_call_3 (scm_jit_state *j, void *f, jit_operand_t a, jit_operand_t b,
  619. jit_operand_t c)
  620. {
  621. jit_calli_3 (j->jit, f, a, b, c);
  622. clear_scratch_register_state (j);
  623. }
  624. static jit_reloc_t
  625. emit_alloc_frame_for_sp_fast (scm_jit_state *j, jit_gpr_t t)
  626. {
  627. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  628. emit_ldxi (j, t, THREAD, thread_offset_stack_limit);
  629. jit_reloc_t slow = jit_bltr (j->jit, SP, t);
  630. emit_store_sp (j);
  631. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  632. return slow;
  633. }
  634. static void
  635. emit_alloc_frame_for_sp_slow (scm_jit_state *j, jit_gpr_t t)
  636. {
  637. /* Slow case: call out to expand stack. */
  638. emit_store_current_ip (j, t);
  639. emit_call_2 (j, scm_vm_intrinsics.expand_stack, thread_operand (),
  640. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, SP));
  641. restore_reloadable_register_state (j, SP_IN_REGISTER | FP_IN_REGISTER);
  642. }
  643. static void
  644. emit_alloc_frame (scm_jit_state *j, jit_gpr_t t, uint32_t nlocals)
  645. {
  646. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  647. emit_subtract_stack_slots (j, SP, FP, nlocals);
  648. set_register_state (j, SP_IN_REGISTER);
  649. jit_reloc_t slow = emit_alloc_frame_for_sp_fast (j, t);
  650. jit_reloc_t k = jit_jmp (j->jit);
  651. jit_patch_here (j->jit, slow);
  652. emit_alloc_frame_for_sp_slow (j, t);
  653. jit_patch_here (j->jit, k);
  654. }
  655. static void
  656. emit_get_callee_vcode (scm_jit_state *j, jit_gpr_t dst)
  657. {
  658. emit_call_1 (j, scm_vm_intrinsics.get_callee_vcode, thread_operand ());
  659. emit_retval (j, dst);
  660. emit_reload_sp (j);
  661. emit_reload_fp (j);
  662. }
  663. static void
  664. emit_get_ip_relative_addr (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t ip,
  665. uint32_t offset)
  666. {
  667. uint32_t byte_offset = offset * sizeof (uint32_t);
  668. jit_ldxi_i (j->jit, dst, ip, byte_offset);
  669. record_gpr_clobber (j, dst);
  670. emit_lshi (j, dst, dst, 2); /* Multiply by sizeof (uint32_t) */
  671. emit_addr (j, dst, dst, ip);
  672. }
  673. static void
  674. emit_exit (scm_jit_state *j)
  675. {
  676. jit_jmpi (j->jit, exit_mcode);
  677. }
  678. static void
  679. emit_push_frame (scm_jit_state *j, uint32_t proc_slot, uint32_t nlocals,
  680. const uint32_t *vra)
  681. {
  682. jit_gpr_t t = T0;
  683. emit_reload_fp (j);
  684. emit_subtract_stack_slots (j, FP, FP, proc_slot);
  685. set_register_state (j, FP_IN_REGISTER);
  686. emit_store_vra (j, FP, t, vra);
  687. emit_store_prev_fp_offset (j, FP, t, proc_slot);
  688. emit_store_fp (j);
  689. emit_reset_frame (j, nlocals);
  690. }
  691. static void
  692. emit_indirect_tail_call (scm_jit_state *j)
  693. {
  694. emit_get_callee_vcode (j, T0);
  695. emit_get_ip_relative_addr (j, T1, T0, 1);
  696. emit_ldxi (j, T1, T1, 0);
  697. jit_reloc_t no_mcode = jit_beqi (j->jit, T1, 0);
  698. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER | SP_IN_REGISTER);
  699. jit_jmpr (j->jit, T1);
  700. jit_patch_here (j->jit, no_mcode);
  701. emit_store_ip (j, T0);
  702. emit_exit (j);
  703. }
  704. static void
  705. emit_direct_tail_call (scm_jit_state *j, const uint32_t *vcode)
  706. {
  707. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER | SP_IN_REGISTER);
  708. ASSERT ((vcode[0] & 0xff) == scm_op_instrument_entry);
  709. if (vcode == j->start)
  710. {
  711. uint8_t *mcode = j->labels[inline_label_offset (0)];
  712. ASSERT (mcode);
  713. jit_jmpi (j->jit, mcode);
  714. }
  715. else
  716. {
  717. struct scm_jit_function_data *data;
  718. data = (struct scm_jit_function_data *) (vcode + (int32_t)(vcode[1]));
  719. if (data->mcode)
  720. {
  721. /* FIXME: Jump indirectly, to allow mcode to be changed
  722. (e.g. to add/remove breakpoints or hooks). */
  723. jit_jmpi (j->jit, data->mcode);
  724. }
  725. else
  726. {
  727. jit_reloc_t no_mcode;
  728. /* No need to track clobbers. */
  729. jit_ldi (j->jit, T0, &data->mcode);
  730. no_mcode = jit_beqi (j->jit, T0, 0);
  731. jit_jmpr (j->jit, T0);
  732. jit_patch_here (j->jit, no_mcode);
  733. jit_movi (j->jit, T0, (intptr_t) vcode);
  734. emit_store_ip (j, T0);
  735. emit_exit (j);
  736. }
  737. }
  738. }
  739. static jit_operand_t
  740. fp_scm_operand (scm_jit_state *j, uint32_t slot) SCM_UNUSED;
  741. static jit_operand_t
  742. fp_scm_operand (scm_jit_state *j, uint32_t slot)
  743. {
  744. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  745. return jit_operand_mem (JIT_OPERAND_ABI_POINTER, FP,
  746. -8 * ((ptrdiff_t) slot + 1));
  747. }
  748. static void
  749. emit_fp_ref_scm (scm_jit_state *j, jit_gpr_t dst, uint32_t slot)
  750. {
  751. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  752. emit_ldxi (j, dst, FP, -8 * ((ptrdiff_t) slot + 1));
  753. }
  754. static void
  755. emit_fp_set_scm (scm_jit_state *j, uint32_t slot, jit_gpr_t val)
  756. {
  757. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  758. jit_stxi (j->jit, -8 * ((ptrdiff_t) slot + 1), FP, val);
  759. clear_register_state (j, SP_CACHE_GPR);
  760. }
  761. static jit_operand_t
  762. sp_slot_operand (scm_jit_state *j, uint32_t slot) SCM_UNUSED;
  763. static jit_operand_t
  764. sp_slot_operand (scm_jit_state *j, uint32_t slot)
  765. {
  766. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  767. return jit_operand_addi (jit_operand_gpr (JIT_OPERAND_ABI_POINTER, SP),
  768. 8 * slot);
  769. }
  770. static jit_operand_t
  771. sp_scm_operand (scm_jit_state *j, uint32_t slot)
  772. {
  773. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  774. return jit_operand_mem (JIT_OPERAND_ABI_POINTER, SP, 8 * slot);
  775. }
  776. static void
  777. emit_sp_ref_scm (scm_jit_state *j, jit_gpr_t dst, uint32_t slot)
  778. {
  779. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  780. emit_ldxi (j, dst, SP, 8 * slot);
  781. }
  782. static void
  783. emit_sp_set_scm (scm_jit_state *j, uint32_t slot, jit_gpr_t val)
  784. {
  785. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  786. if (slot == 0)
  787. jit_str (j->jit, SP, val);
  788. else
  789. jit_stxi (j->jit, 8 * slot, SP, val);
  790. set_sp_cache_gpr (j, slot, val);
  791. }
  792. /* Use when you know that the u64 value will be within the size_t range,
  793. for example when it's ensured by the compiler. */
  794. static jit_operand_t
  795. sp_sz_operand (scm_jit_state *j, uint32_t src)
  796. {
  797. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  798. enum jit_operand_abi abi =
  799. sizeof (size_t) == 4 ? JIT_OPERAND_ABI_UINT32 : JIT_OPERAND_ABI_UINT64;
  800. if (JIT_BIGENDIAN && sizeof (size_t) == 4)
  801. return jit_operand_mem (abi, SP, src * 8 + 4);
  802. else
  803. return jit_operand_mem (abi, SP, src * 8);
  804. }
  805. static void
  806. emit_sp_ref_sz (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  807. {
  808. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  809. if (JIT_BIGENDIAN && sizeof (size_t) == 4)
  810. emit_ldxi (j, dst, SP, src * 8 + 4);
  811. else
  812. emit_ldxi (j, dst, SP, src * 8);
  813. }
  814. static void
  815. emit_sp_set_sz (scm_jit_state *j, uint32_t dst, jit_gpr_t src)
  816. {
  817. size_t offset = dst * 8;
  818. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  819. if (sizeof (size_t) == 4)
  820. {
  821. size_t lo, hi;
  822. if (JIT_BIGENDIAN)
  823. lo = offset + 4, hi = offset;
  824. else
  825. lo = offset, hi = offset + 4;
  826. jit_stxi (j->jit, lo, SP, src);
  827. /* Set high word to 0. Clobber src. */
  828. emit_xorr (j, src, src, src);
  829. jit_stxi (j->jit, hi, SP, src);
  830. }
  831. else
  832. {
  833. jit_stxi (j->jit, offset, SP, src);
  834. set_sp_cache_gpr (j, dst, src);
  835. }
  836. }
  837. static jit_operand_t
  838. sp_u64_operand (scm_jit_state *j, uint32_t slot)
  839. {
  840. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  841. return jit_operand_mem (JIT_OPERAND_ABI_UINT64, SP, 8 * slot);
  842. }
  843. #if SIZEOF_UINTPTR_T >= 8
  844. static void
  845. emit_sp_ref_u64 (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  846. {
  847. size_t offset = src * 8;
  848. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  849. emit_ldxi (j, dst, SP, offset);
  850. }
  851. static void
  852. emit_sp_set_u64 (scm_jit_state *j, uint32_t dst, jit_gpr_t src)
  853. {
  854. size_t offset = dst * 8;
  855. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  856. if (dst == 0)
  857. jit_str (j->jit, SP, src);
  858. else
  859. jit_stxi (j->jit, offset, SP, src);
  860. set_sp_cache_gpr (j, dst, src);
  861. }
  862. static void
  863. emit_sp_ref_s64 (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  864. {
  865. emit_sp_ref_u64 (j, dst, src);
  866. }
  867. static void
  868. emit_sp_set_s64 (scm_jit_state *j, uint32_t dst, jit_gpr_t src)
  869. {
  870. emit_sp_set_u64 (j, dst, src);
  871. }
  872. static void
  873. emit_sp_ref_ptr (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  874. {
  875. emit_sp_ref_u64 (j, dst, src);
  876. }
  877. #else /* SCM_SIZEOF_UINTPTR_T >= 8 */
  878. static jit_operand_t
  879. sp_s32_operand (scm_jit_state *j, uint32_t src)
  880. {
  881. return sp_sz_operand (j, src);
  882. }
  883. static void
  884. emit_sp_ref_s32 (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  885. {
  886. emit_sp_ref_sz (j, dst, src);
  887. }
  888. static void
  889. emit_sp_ref_u64 (scm_jit_state *j, jit_gpr_t dst_lo, jit_gpr_t dst_hi,
  890. uint32_t src)
  891. {
  892. size_t offset = src * 8;
  893. jit_gpr_t first, second;
  894. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  895. #if JIT_BIGENDIAN
  896. first = dst_hi, second = dst_lo;
  897. #else
  898. first = dst_lo, second = dst_hi;
  899. #endif
  900. emit_ldxi (j, first, SP, offset);
  901. emit_ldxi (j, second, SP, offset + 4);
  902. }
  903. static void
  904. emit_sp_set_u64 (scm_jit_state *j, uint32_t dst, jit_gpr_t lo, jit_gpr_t hi)
  905. {
  906. size_t offset = dst * 8;
  907. jit_gpr_t first, second;
  908. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  909. #if JIT_BIGENDIAN
  910. first = hi, second = lo;
  911. #else
  912. first = lo, second = hi;
  913. #endif
  914. if (offset == 0)
  915. jit_str (j->jit, SP, first);
  916. else
  917. jit_stxi (j->jit, offset, SP, first);
  918. jit_stxi (j->jit, offset + 4, SP, second);
  919. clear_register_state (j, SP_CACHE_GPR);
  920. }
  921. static void
  922. emit_sp_ref_s64 (scm_jit_state *j, jit_gpr_t dst_lo, jit_gpr_t dst_hi,
  923. uint32_t src)
  924. {
  925. emit_sp_ref_u64 (j, dst_lo, dst_hi, src);
  926. }
  927. static void
  928. emit_sp_set_s64 (scm_jit_state *j, uint32_t dst, jit_gpr_t lo, jit_gpr_t hi)
  929. {
  930. emit_sp_set_u64 (j, dst, lo, hi);
  931. }
  932. static void
  933. emit_sp_ref_u64_lower_half (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  934. {
  935. size_t offset = src * 8;
  936. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  937. emit_ldxi (j, dst, SP, offset);
  938. }
  939. static void
  940. emit_sp_ref_ptr (scm_jit_state *j, jit_gpr_t dst, uint32_t src)
  941. {
  942. emit_sp_ref_u64_lower_half (j, dst, src);
  943. }
  944. #endif /* SCM_SIZEOF_UINTPTR_T >= 8 */
  945. static jit_operand_t
  946. sp_f64_operand (scm_jit_state *j, uint32_t slot)
  947. {
  948. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  949. return jit_operand_mem (JIT_OPERAND_ABI_DOUBLE, SP, 8 * slot);
  950. }
  951. static void
  952. emit_sp_ref_f64 (scm_jit_state *j, jit_fpr_t dst, uint32_t src)
  953. {
  954. size_t offset = src * 8;
  955. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  956. if (offset == 0)
  957. jit_ldr_d (j->jit, dst, SP);
  958. else
  959. jit_ldxi_d (j->jit, dst, SP, offset);
  960. record_fpr_clobber (j, dst);
  961. }
  962. static void
  963. emit_sp_set_f64 (scm_jit_state *j, uint32_t dst, jit_fpr_t src)
  964. {
  965. size_t offset = dst * 8;
  966. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  967. if (offset == 0)
  968. jit_str_d (j->jit, SP, src);
  969. else
  970. jit_stxi_d (j->jit, offset, SP, src);
  971. set_sp_cache_fpr (j, dst, src);
  972. }
  973. static void
  974. emit_mov (scm_jit_state *j, uint32_t dst, uint32_t src, jit_gpr_t t)
  975. {
  976. emit_sp_ref_scm (j, t, src);
  977. emit_sp_set_scm (j, dst, t);
  978. /* FIXME: The compiler currently emits "push", "mov", etc for SCM,
  979. F64, U64, and S64 variables. However SCM values are the usual
  980. case, and on a 32-bit machine it might be cheaper to move a SCM
  981. than to move a 64-bit number. */
  982. if (sizeof (void*) < sizeof (union scm_vm_stack_element))
  983. {
  984. /* Copy the high word as well. */
  985. uintptr_t src_offset = src * sizeof (union scm_vm_stack_element);
  986. uintptr_t dst_offset = dst * sizeof (union scm_vm_stack_element);
  987. jit_ldxi (j->jit, t, SP, src_offset + sizeof (void*));
  988. jit_stxi (j->jit, dst_offset + sizeof (void*), SP, t);
  989. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  990. }
  991. else
  992. /* In any case since we move the register using GPRs, it won't be in
  993. a cached FPR. */
  994. clear_register_state (j, SP_CACHE_FPR);
  995. }
  996. static jit_reloc_t
  997. emit_branch_if_frame_locals_count_less_than (scm_jit_state *j, jit_gpr_t t,
  998. uint32_t nlocals)
  999. {
  1000. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1001. emit_subr (j, t, FP, SP);
  1002. return jit_blti (j->jit, t, nlocals * sizeof (union scm_vm_stack_element));
  1003. }
  1004. static jit_reloc_t
  1005. emit_branch_if_frame_locals_count_eq (scm_jit_state *j, jit_gpr_t t,
  1006. uint32_t nlocals)
  1007. {
  1008. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1009. emit_subr (j, t, FP, SP);
  1010. return jit_beqi (j->jit, t, nlocals * sizeof (union scm_vm_stack_element));
  1011. }
  1012. static jit_reloc_t
  1013. emit_branch_if_frame_locals_count_not_eq (scm_jit_state *j, jit_gpr_t t,
  1014. uint32_t nlocals)
  1015. {
  1016. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1017. emit_subr (j, t, FP, SP);
  1018. return jit_bnei (j->jit, t, nlocals * sizeof (union scm_vm_stack_element));
  1019. }
  1020. static jit_reloc_t
  1021. emit_branch_if_frame_locals_count_greater_than (scm_jit_state *j, jit_gpr_t t,
  1022. uint32_t nlocals)
  1023. {
  1024. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1025. emit_subr (j, t, FP, SP);
  1026. return jit_bgti (j->jit, t, nlocals * sizeof (union scm_vm_stack_element));
  1027. }
  1028. static void
  1029. emit_load_fp_slot (scm_jit_state *j, jit_gpr_t dst, uint32_t slot)
  1030. {
  1031. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  1032. emit_subi (j, dst, FP, (slot + 1) * sizeof (union scm_vm_stack_element));
  1033. }
  1034. static jit_reloc_t
  1035. emit_branch_if_immediate (scm_jit_state *j, jit_gpr_t r)
  1036. {
  1037. return jit_bmsi (j->jit, r, 6);
  1038. }
  1039. static void
  1040. emit_load_heap_object_word (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t r,
  1041. uint32_t word)
  1042. {
  1043. emit_ldxi (j, dst, r, word * sizeof(SCM));
  1044. }
  1045. static void
  1046. emit_load_heap_object_tc (scm_jit_state *j, jit_gpr_t dst, jit_gpr_t r,
  1047. scm_t_bits mask)
  1048. {
  1049. emit_load_heap_object_word (j, dst, r, 0);
  1050. emit_andi (j, dst, dst, mask);
  1051. }
  1052. static jit_reloc_t
  1053. emit_branch_if_heap_object_has_tc (scm_jit_state *j, jit_gpr_t r, jit_gpr_t t,
  1054. scm_t_bits mask, scm_t_bits tc)
  1055. {
  1056. emit_load_heap_object_tc (j, t, r, mask);
  1057. return jit_beqi (j->jit, t, tc);
  1058. }
  1059. static jit_reloc_t
  1060. emit_branch_if_heap_object_not_tc (scm_jit_state *j, jit_gpr_t r, jit_gpr_t t,
  1061. scm_t_bits mask, scm_t_bits tc)
  1062. {
  1063. emit_load_heap_object_tc (j, t, r, mask);
  1064. return jit_bnei (j->jit, t, tc);
  1065. }
  1066. static jit_reloc_t
  1067. emit_branch_if_heap_object_has_tc7 (scm_jit_state *j, jit_gpr_t r, jit_gpr_t t,
  1068. scm_t_bits tc7)
  1069. {
  1070. return emit_branch_if_heap_object_has_tc (j, r, t, 0x7f, tc7);
  1071. }
  1072. static jit_reloc_t
  1073. emit_branch_if_heap_object_not_tc7 (scm_jit_state *j, jit_gpr_t r, jit_gpr_t t,
  1074. scm_t_bits tc7)
  1075. {
  1076. return emit_branch_if_heap_object_not_tc (j, r, t, 0x7f, tc7);
  1077. }
  1078. static void
  1079. emit_entry_trampoline (scm_jit_state *j)
  1080. {
  1081. size_t align = jit_enter_jit_abi(j->jit, 3, 0, 0);
  1082. /* Load our reserved registers: THREAD and SP. Also load IP for the
  1083. mcode jump. */
  1084. jit_load_args_2 (j->jit, thread_operand (),
  1085. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, T0));
  1086. emit_reload_sp (j);
  1087. /* Load FP, set during call sequences. */
  1088. emit_reload_fp (j);
  1089. /* Jump to the mcode! */
  1090. jit_jmpr (j->jit, T0);
  1091. /* Initialize global exit_mcode to point here. */
  1092. exit_mcode = jit_address (j->jit);
  1093. jit_leave_jit_abi(j->jit, 3, 0, align);
  1094. /* When mcode finishes, interpreter will continue with vp->ip. */
  1095. jit_ret (j->jit);
  1096. }
  1097. static void
  1098. emit_handle_interrupts_trampoline (scm_jit_state *j)
  1099. {
  1100. /* Precondition: IP synced. */
  1101. jit_pop_link_register (j->jit);
  1102. emit_call_2 (j, scm_vm_intrinsics.push_interrupt_frame,
  1103. thread_operand (),
  1104. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, JIT_LR));
  1105. emit_reload_sp (j);
  1106. emit_reload_fp (j);
  1107. emit_direct_tail_call (j, scm_vm_intrinsics.handle_interrupt_code);
  1108. }
  1109. /* To limit the number of mmap calls and re-emission of JIT code, use
  1110. 256 kB code arenas. Unused pages won't be resident. Assume pages
  1111. are power-of-two-sized and this size is a multiple of the page size
  1112. on all architectures. */
  1113. static const size_t default_code_arena_size = 0x40000;
  1114. static struct code_arena *
  1115. allocate_code_arena (size_t size, struct code_arena *prev)
  1116. {
  1117. struct code_arena *ret = malloc (sizeof (struct code_arena));
  1118. if (!ret) return NULL;
  1119. memset (ret, 0, sizeof (*ret));
  1120. ret->used = 0;
  1121. ret->size = size;
  1122. ret->prev = prev;
  1123. #ifndef __MINGW32__
  1124. int flags = MAP_PRIVATE | MAP_ANONYMOUS;
  1125. #if defined __APPLE__ && HAVE_PTHREAD_JIT_WRITE_PROTECT_NP
  1126. flags |= MAP_JIT;
  1127. #endif
  1128. ret->base = mmap (NULL, ret->size,
  1129. PROT_EXEC | PROT_READ | PROT_WRITE,
  1130. flags, -1, 0);
  1131. if (ret->base == MAP_FAILED)
  1132. {
  1133. perror ("allocating JIT code buffer failed");
  1134. free (ret);
  1135. return NULL;
  1136. }
  1137. #else
  1138. ret->handle = CreateFileMappingA(INVALID_HANDLE_VALUE, NULL,
  1139. PAGE_EXECUTE_READWRITE,
  1140. size >> 32, size & 0xffffffff, NULL);
  1141. if (ret->handle == NULL)
  1142. {
  1143. fprintf (stderr, "allocating JIT code buffer failed: %lu\n",
  1144. GetLastError());
  1145. free (ret);
  1146. return NULL;
  1147. }
  1148. ret->base = MapViewOfFile (ret->handle,
  1149. FILE_MAP_WRITE | FILE_MAP_EXECUTE | FILE_MAP_COPY,
  1150. 0, 0, size);
  1151. if (ret->base == NULL)
  1152. {
  1153. CloseHandle (ret->handle);
  1154. fprintf (stderr, "memory mapping JIT code buffer failed: %lu\n",
  1155. GetLastError());
  1156. free (ret);
  1157. return NULL;
  1158. }
  1159. #endif
  1160. INFO ("allocated code arena, %p-%p\n", ret->base, ret->base + ret->size);
  1161. return ret;
  1162. }
  1163. static void *
  1164. emit_code (scm_jit_state *j, void (*emit) (scm_jit_state *))
  1165. {
  1166. if (!j->code_arena)
  1167. j->code_arena = allocate_code_arena (default_code_arena_size, NULL);
  1168. if (!j->code_arena)
  1169. /* Resource exhaustion; turn off JIT. */
  1170. return NULL;
  1171. while (1)
  1172. {
  1173. struct code_arena *arena = j->code_arena;
  1174. jit_begin(j->jit, arena->base + arena->used, arena->size - arena->used);
  1175. uint8_t *ret = jit_address (j->jit);
  1176. #if defined __APPLE__ && HAVE_PTHREAD_JIT_WRITE_PROTECT_NP
  1177. pthread_jit_write_protect_np(0);
  1178. #endif
  1179. emit (j);
  1180. size_t size;
  1181. if (!jit_has_overflow (j->jit) && jit_end (j->jit, &size))
  1182. {
  1183. #if defined __APPLE__ && HAVE_PTHREAD_JIT_WRITE_PROTECT_NP
  1184. /* protect previous code arena. leave unprotected after emit()
  1185. since jit_end() also writes to code arena. */
  1186. pthread_jit_write_protect_np(1);
  1187. sys_icache_invalidate(arena->base, arena->size);
  1188. #endif
  1189. ASSERT (size <= (arena->size - arena->used));
  1190. DEBUG ("mcode: %p,+%zu\n", ret, size);
  1191. arena->used += size;
  1192. /* Align next JIT to 16-byte boundaries to optimize initial
  1193. icache fetch. */
  1194. arena->used = (arena->used + 15) & ~15;
  1195. /* Assertion should not be invalidated as arena size is a
  1196. multiple of 16. */
  1197. ASSERT (arena->used <= arena->size);
  1198. return ret;
  1199. }
  1200. else
  1201. {
  1202. #if defined __APPLE__ && HAVE_PTHREAD_JIT_WRITE_PROTECT_NP
  1203. /* protect previous code arena */
  1204. pthread_jit_write_protect_np(1);
  1205. sys_icache_invalidate(arena->base, arena->size);
  1206. #endif
  1207. jit_reset (j->jit);
  1208. if (arena->used == 0)
  1209. {
  1210. /* Code too big to fit into empty arena; allocate a larger
  1211. one. */
  1212. INFO ("code didn't fit in empty arena of size %zu\n", arena->size);
  1213. arena = allocate_code_arena (arena->size * 2, arena->prev);
  1214. if (!arena)
  1215. return NULL;
  1216. #ifndef __MINGW32__
  1217. munmap (j->code_arena->base, j->code_arena->size);
  1218. #else
  1219. UnmapViewOfFile (j->code_arena->base);
  1220. CloseHandle (j->code_arena->handle);
  1221. #endif
  1222. free (j->code_arena);
  1223. j->code_arena = arena;
  1224. }
  1225. else
  1226. {
  1227. /* Arena full; allocate another. */
  1228. /* FIXME: If partial code that we wrote crosses a page
  1229. boundary, we could tell the OS to forget about the tail
  1230. pages. */
  1231. INFO ("code didn't fit in arena tail %zu\n",
  1232. arena->size - arena->used);
  1233. arena = allocate_code_arena (arena->size, arena);
  1234. if (!arena)
  1235. return NULL;
  1236. j->code_arena = arena;
  1237. }
  1238. }
  1239. }
  1240. }
  1241. static jit_operand_t
  1242. free_variable_operand (scm_jit_state *j, jit_gpr_t src, size_t n)
  1243. {
  1244. ptrdiff_t offset = (n + program_word_offset_free_variable) * sizeof(SCM);
  1245. return jit_operand_mem (JIT_OPERAND_ABI_POINTER, src, offset);
  1246. }
  1247. static void
  1248. add_pending_reloc (scm_jit_state *j, jit_reloc_t reloc, ptrdiff_t offset)
  1249. {
  1250. if (j->reloc_idx >= j->reloc_count)
  1251. {
  1252. size_t count = j->reloc_count * 2;
  1253. if (!count) count = 10;
  1254. size_t size = sizeof(*j->relocs) * count;
  1255. ASSERT(size / sizeof(*j->relocs) == count);
  1256. struct pending_reloc *relocs = realloc (j->relocs, size);
  1257. if (relocs)
  1258. {
  1259. j->reloc_count = count;
  1260. j->relocs = relocs;
  1261. }
  1262. }
  1263. ASSERT (j->reloc_idx < j->reloc_count);
  1264. ASSERT (0 <= offset && offset < (j->end - j->start) * 2);
  1265. j->relocs[j->reloc_idx].reloc = reloc;
  1266. j->relocs[j->reloc_idx].target_label_offset = offset;
  1267. j->reloc_idx++;
  1268. }
  1269. static void
  1270. add_inter_instruction_patch (scm_jit_state *j, jit_reloc_t reloc,
  1271. const uint32_t *target)
  1272. {
  1273. ASSERT (j->start <= target && target < j->end);
  1274. ptrdiff_t offset = inline_label_offset (target - j->start);
  1275. if (j->labels[offset])
  1276. {
  1277. jit_patch_there (j->jit, reloc, j->labels[offset]);
  1278. return;
  1279. }
  1280. add_pending_reloc (j, reloc, offset);
  1281. }
  1282. static void
  1283. add_slow_path_patch (scm_jit_state *j, jit_reloc_t reloc)
  1284. {
  1285. ASSERT (j->start <= j->ip && j->ip < j->end);
  1286. ptrdiff_t offset = slow_label_offset (j->ip - j->start);
  1287. add_pending_reloc (j, reloc, offset);
  1288. }
  1289. static void
  1290. continue_after_slow_path (scm_jit_state *j, const uint32_t *target)
  1291. {
  1292. void *label = j->labels[inline_label_offset (target - j->start)];
  1293. ASSERT (label);
  1294. restore_reloadable_register_state (j, SP_IN_REGISTER | FP_IN_REGISTER);
  1295. jit_jmpi (j->jit, label);
  1296. }
  1297. static void
  1298. bad_instruction (scm_jit_state *j)
  1299. {
  1300. ASSERT (0);
  1301. }
  1302. static void
  1303. compile_halt (scm_jit_state *j)
  1304. {
  1305. bad_instruction (j);
  1306. }
  1307. static void
  1308. compile_halt_slow (scm_jit_state *j)
  1309. {
  1310. }
  1311. static void
  1312. compile_call (scm_jit_state *j, uint32_t proc, uint32_t nlocals)
  1313. {
  1314. jit_reloc_t push_frame = jit_jmp (j->jit);
  1315. void *trampoline = jit_address (j->jit);
  1316. reset_register_state (j, FP_IN_REGISTER | SP_IN_REGISTER);
  1317. jit_pop_link_register (j->jit);
  1318. emit_store_mra (j, FP, JIT_LR);
  1319. emit_indirect_tail_call (j);
  1320. jit_patch_here (j->jit, push_frame);
  1321. /* 2 = size of call inst */
  1322. emit_push_frame (j, proc, nlocals, j->ip + 2);
  1323. jit_jmpi_with_link (j->jit, trampoline);
  1324. reset_register_state (j, FP_IN_REGISTER | SP_IN_REGISTER);
  1325. j->frame_size_min = proc;
  1326. j->frame_size_max = INT32_MAX;
  1327. }
  1328. static void
  1329. compile_call_slow (scm_jit_state *j, uint32_t proc, uint32_t nlocals)
  1330. {
  1331. }
  1332. static void
  1333. compile_call_label (scm_jit_state *j, uint32_t proc, uint32_t nlocals, const uint32_t *vcode)
  1334. {
  1335. jit_reloc_t push_frame = jit_jmp (j->jit);
  1336. void *trampoline = jit_address (j->jit);
  1337. reset_register_state (j, FP_IN_REGISTER | SP_IN_REGISTER);
  1338. jit_pop_link_register (j->jit);
  1339. emit_store_mra (j, FP, JIT_LR);
  1340. emit_direct_tail_call (j, vcode);
  1341. jit_patch_here (j->jit, push_frame);
  1342. /* 3 = size of call-label inst */
  1343. emit_push_frame (j, proc, nlocals, j->ip + 3);
  1344. jit_jmpi_with_link (j->jit, trampoline);
  1345. reset_register_state (j, FP_IN_REGISTER | SP_IN_REGISTER);
  1346. j->frame_size_min = proc;
  1347. j->frame_size_max = INT32_MAX;
  1348. }
  1349. static void
  1350. compile_call_label_slow (scm_jit_state *j, uint32_t proc, uint32_t nlocals, const uint32_t *vcode)
  1351. {
  1352. }
  1353. static void
  1354. compile_tail_call (scm_jit_state *j)
  1355. {
  1356. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  1357. restore_reloadable_register_state (j, FP_IN_REGISTER);
  1358. emit_indirect_tail_call (j);
  1359. j->frame_size_min = 0;
  1360. j->frame_size_max = INT32_MAX;
  1361. }
  1362. static void
  1363. compile_tail_call_slow (scm_jit_state *j)
  1364. {
  1365. }
  1366. static void
  1367. compile_tail_call_label (scm_jit_state *j, const uint32_t *vcode)
  1368. {
  1369. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
  1370. restore_reloadable_register_state (j, FP_IN_REGISTER);
  1371. emit_direct_tail_call (j, vcode);
  1372. j->frame_size_min = 0;
  1373. j->frame_size_max = INT32_MAX;
  1374. }
  1375. static void
  1376. compile_tail_call_label_slow (scm_jit_state *j, const uint32_t *vcode)
  1377. {
  1378. }
  1379. static void
  1380. compile_instrument_entry (scm_jit_state *j, void *data)
  1381. {
  1382. }
  1383. static void
  1384. compile_instrument_entry_slow (scm_jit_state *j, void *data)
  1385. {
  1386. }
  1387. static void
  1388. compile_instrument_loop (scm_jit_state *j, void *data)
  1389. {
  1390. /* Nothing to do. */
  1391. }
  1392. static void
  1393. compile_instrument_loop_slow (scm_jit_state *j, void *data)
  1394. {
  1395. }
  1396. static void
  1397. compile_receive (scm_jit_state *j, uint32_t dst, uint32_t proc, uint32_t nlocals)
  1398. {
  1399. jit_gpr_t t = T0;
  1400. add_slow_path_patch
  1401. (j, emit_branch_if_frame_locals_count_less_than (j, t, proc + 1));
  1402. emit_fp_ref_scm (j, t, proc);
  1403. emit_fp_set_scm (j, dst, t);
  1404. emit_reset_frame (j, nlocals);
  1405. j->frame_size_min = j->frame_size_max = nlocals;
  1406. }
  1407. static void
  1408. compile_receive_slow (scm_jit_state *j, uint32_t dst, uint32_t proc, uint32_t nlocals)
  1409. {
  1410. emit_store_current_ip (j, T0);
  1411. emit_call_0 (j, scm_vm_intrinsics.error_no_values);
  1412. }
  1413. static void
  1414. compile_receive_values (scm_jit_state *j, uint32_t proc, uint8_t allow_extra,
  1415. uint32_t nvalues)
  1416. {
  1417. jit_gpr_t t = T0;
  1418. /* Although most uses of receive-values are after a call returns, the
  1419. baseline compiler will sometimes emit it elsewhere. In that case
  1420. ensure that FP is in a register for the frame-locals-count
  1421. branches. */
  1422. restore_reloadable_register_state (j, FP_IN_REGISTER);
  1423. if (allow_extra)
  1424. add_slow_path_patch
  1425. (j, emit_branch_if_frame_locals_count_less_than (j, t, proc + nvalues));
  1426. else
  1427. add_slow_path_patch
  1428. (j, emit_branch_if_frame_locals_count_not_eq (j, t, proc + nvalues));
  1429. j->frame_size_min = proc + nvalues;
  1430. j->frame_size_max = allow_extra ? INT32_MAX : j->frame_size_min;
  1431. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  1432. }
  1433. static void
  1434. compile_receive_values_slow (scm_jit_state *j, uint32_t proc, uint8_t allow_extra,
  1435. uint32_t nvalues)
  1436. {
  1437. emit_store_current_ip (j, T0);
  1438. if (allow_extra)
  1439. emit_call_0 (j, scm_vm_intrinsics.error_not_enough_values);
  1440. else
  1441. emit_call_1 (j, scm_vm_intrinsics.error_wrong_number_of_values,
  1442. jit_operand_imm (JIT_OPERAND_ABI_UINT32, nvalues));
  1443. }
  1444. static void
  1445. compile_shuffle_down (scm_jit_state *j, uint32_t from, uint32_t to)
  1446. {
  1447. jit_gpr_t walk = T0, t = T1;
  1448. size_t offset = (from - to) * sizeof (union scm_vm_stack_element);
  1449. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1450. emit_load_fp_slot (j, walk, from);
  1451. jit_reloc_t done = jit_bltr (j->jit, walk, SP);
  1452. void *head = jit_address (j->jit);
  1453. jit_ldr (j->jit, t, walk);
  1454. jit_stxi (j->jit, offset, walk, t);
  1455. jit_subi (j->jit, walk, walk, sizeof (union scm_vm_stack_element));
  1456. jit_patch_there (j->jit, jit_bger (j->jit, walk, SP), head);
  1457. jit_patch_here (j->jit, done);
  1458. jit_addi (j->jit, SP, SP, offset);
  1459. emit_store_sp (j);
  1460. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  1461. j->frame_size_min -= (from - to);
  1462. if (j->frame_size_max != INT32_MAX)
  1463. j->frame_size_max -= (from - to);
  1464. }
  1465. static void
  1466. compile_shuffle_down_slow (scm_jit_state *j, uint32_t from, uint32_t to)
  1467. {
  1468. }
  1469. static void
  1470. compile_return_values (scm_jit_state *j)
  1471. {
  1472. emit_pop_fp (j, OLD_FP_FOR_RETURN_TRAMPOLINE);
  1473. emit_load_mra (j, JIT_LR, OLD_FP_FOR_RETURN_TRAMPOLINE);
  1474. jit_push_link_register (j->jit);
  1475. jit_ret (j->jit);
  1476. j->frame_size_min = 0;
  1477. j->frame_size_max = INT32_MAX;
  1478. }
  1479. static void
  1480. compile_return_values_slow (scm_jit_state *j)
  1481. {
  1482. }
  1483. static void
  1484. emit_return_to_interpreter_trampoline (scm_jit_state *j)
  1485. {
  1486. jit_gpr_t ra = T1;
  1487. emit_load_vra (j, ra, OLD_FP_FOR_RETURN_TRAMPOLINE);
  1488. emit_store_ip (j, ra);
  1489. emit_exit (j);
  1490. }
  1491. static void
  1492. compile_subr_call (scm_jit_state *j, uint32_t idx)
  1493. {
  1494. jit_gpr_t t = T0, ret = T1;
  1495. void *subr;
  1496. jit_reloc_t immediate;
  1497. jit_operand_t args[SCM_GSUBR_MAX];
  1498. ASSERT (j->frame_size_min == j->frame_size_max);
  1499. size_t argc = j->frame_size_max - 1;
  1500. ASSERT (argc <= SCM_GSUBR_MAX);
  1501. subr = scm_subr_function_by_index (idx);
  1502. emit_store_current_ip (j, t);
  1503. for (size_t i = 2; i <= j->frame_size_max; i++)
  1504. args[i - 2] = sp_scm_operand (j, (j->frame_size_max - i));
  1505. jit_calli (j->jit, subr, argc, args);
  1506. clear_scratch_register_state (j);
  1507. jit_retval (j->jit, ret);
  1508. immediate = emit_branch_if_immediate (j, ret);
  1509. add_slow_path_patch
  1510. (j, emit_branch_if_heap_object_has_tc7 (j, ret, t, scm_tc7_values));
  1511. jit_patch_here (j->jit, immediate);
  1512. emit_reload_fp (j);
  1513. emit_subtract_stack_slots (j, SP, FP, 1);
  1514. set_register_state (j, SP_IN_REGISTER);
  1515. emit_store_sp (j);
  1516. jit_str (j->jit, SP, ret);
  1517. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  1518. j->frame_size_min = 0;
  1519. j->frame_size_max = INT32_MAX;
  1520. }
  1521. static void
  1522. compile_subr_call_slow (scm_jit_state *j, uint32_t idx)
  1523. {
  1524. jit_gpr_t ret = T1;
  1525. emit_call_2 (j, scm_vm_intrinsics.unpack_values_object, thread_operand (),
  1526. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, ret));
  1527. continue_after_slow_path (j, j->next_ip);
  1528. }
  1529. static void
  1530. compile_foreign_call (scm_jit_state *j, uint32_t cif_idx, uint32_t ptr_idx)
  1531. {
  1532. uint32_t saved_state;
  1533. ASSERT (j->frame_size_min == j->frame_size_max);
  1534. emit_store_current_ip (j, T0);
  1535. emit_sp_ref_scm (j, T0, j->frame_size_min - 1);
  1536. /* FIXME: Inline the foreign call. */
  1537. saved_state = save_reloadable_register_state (j);
  1538. emit_call_3 (j, scm_vm_intrinsics.foreign_call, thread_operand (),
  1539. free_variable_operand (j, T0, cif_idx),
  1540. free_variable_operand (j, T0, ptr_idx));
  1541. restore_reloadable_register_state (j, saved_state);
  1542. j->frame_size_min = j->frame_size_max = 2; /* Return value and errno. */
  1543. }
  1544. static void
  1545. compile_foreign_call_slow (scm_jit_state *j, uint32_t cif_idx, uint32_t ptr_idx)
  1546. {
  1547. }
  1548. static void
  1549. compile_continuation_call (scm_jit_state *j, uint32_t contregs_idx)
  1550. {
  1551. emit_reload_fp (j);
  1552. emit_store_current_ip (j, T0);
  1553. emit_fp_ref_scm (j, T0, 0);
  1554. emit_call_2 (j, scm_vm_intrinsics.reinstate_continuation_x,
  1555. thread_operand (), free_variable_operand (j, T0, contregs_idx));
  1556. /* Does not fall through. */
  1557. j->frame_size_min = 0;
  1558. j->frame_size_max = INT32_MAX;
  1559. }
  1560. static void
  1561. compile_continuation_call_slow (scm_jit_state *j, uint32_t contregs_idx)
  1562. {
  1563. }
  1564. static void
  1565. compile_compose_continuation (scm_jit_state *j, uint32_t cont_idx)
  1566. {
  1567. ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER | FP_IN_REGISTER);
  1568. emit_store_current_ip (j, T0);
  1569. emit_fp_ref_scm (j, T0, 0);
  1570. emit_call_2 (j, scm_vm_intrinsics.compose_continuation,
  1571. thread_operand (), free_variable_operand (j, T0, cont_idx));
  1572. jit_retval (j->jit, T0);
  1573. add_slow_path_patch (j, jit_beqi (j->jit, T0, 0));
  1574. emit_reload_sp (j);
  1575. emit_reload_fp (j);
  1576. jit_jmpr (j->jit, T0);
  1577. j->frame_size_min = 0;
  1578. j->frame_size_max = INT32_MAX;
  1579. }
  1580. static void
  1581. compile_compose_continuation_slow (scm_jit_state *j, uint32_t cont_idx)
  1582. {
  1583. emit_exit (j);
  1584. }
  1585. static void
  1586. compile_capture_continuation (scm_jit_state *j, uint32_t dst)
  1587. {
  1588. emit_store_current_ip (j, T0);
  1589. emit_call_1 (j, scm_vm_intrinsics.capture_continuation, thread_operand ());
  1590. jit_retval (j->jit, T0);
  1591. emit_reload_sp (j);
  1592. emit_reload_fp (j);
  1593. emit_sp_set_scm (j, dst, T0);
  1594. }
  1595. static void
  1596. compile_capture_continuation_slow (scm_jit_state *j, uint32_t dst)
  1597. {
  1598. }
  1599. static void
  1600. compile_abort (scm_jit_state *j)
  1601. {
  1602. jit_movi (j->jit, T0, (intptr_t) (j->ip + 1));
  1603. emit_store_ip (j, T0);
  1604. jit_reloc_t k = jit_mov_addr (j->jit, T0);
  1605. emit_call_2 (j, scm_vm_intrinsics.abort_to_prompt, thread_operand (),
  1606. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, T0));
  1607. jit_retval (j->jit, T1_PRESERVED);
  1608. add_slow_path_patch(j, jit_beqi (j->jit, T1_PRESERVED, 0));
  1609. emit_reload_sp (j);
  1610. emit_reload_fp (j);
  1611. jit_jmpr (j->jit, T1_PRESERVED);
  1612. jit_patch_here (j->jit, k);
  1613. j->frame_size_min = 0;
  1614. j->frame_size_max = INT32_MAX;
  1615. }
  1616. static void
  1617. compile_abort_slow (scm_jit_state *j)
  1618. {
  1619. emit_exit (j);
  1620. }
  1621. static void
  1622. compile_builtin_ref (scm_jit_state *j, uint32_t dst, uint16_t idx)
  1623. {
  1624. SCM builtin = scm_vm_builtin_ref (idx);
  1625. emit_movi (j, T0, SCM_UNPACK (builtin));
  1626. emit_sp_set_scm (j, dst, T0);
  1627. }
  1628. static void
  1629. compile_builtin_ref_slow (scm_jit_state *j, uint32_t dst, uint16_t idx)
  1630. {
  1631. }
  1632. static void
  1633. compile_throw (scm_jit_state *j, uint32_t key, uint32_t args)
  1634. {
  1635. emit_store_current_ip (j, T0);
  1636. emit_call_2 (j, scm_vm_intrinsics.throw_, sp_scm_operand (j, key),
  1637. sp_scm_operand (j, args));
  1638. /* throw_ does not return. */
  1639. set_register_state (j, UNREACHABLE);
  1640. }
  1641. static void
  1642. compile_throw_slow (scm_jit_state *j, uint32_t key, uint32_t args)
  1643. {
  1644. }
  1645. static void
  1646. compile_throw_value (scm_jit_state *j, uint32_t val,
  1647. const void *key_subr_and_message)
  1648. {
  1649. emit_store_current_ip (j, T0);
  1650. emit_call_2 (j, scm_vm_intrinsics.throw_with_value, sp_scm_operand (j, val),
  1651. jit_operand_imm (JIT_OPERAND_ABI_POINTER,
  1652. (intptr_t) key_subr_and_message));
  1653. /* Like throw_, throw_with_value does not return. */
  1654. set_register_state (j, UNREACHABLE);
  1655. }
  1656. static void
  1657. compile_throw_value_slow (scm_jit_state *j, uint32_t val,
  1658. const void *key_subr_and_message)
  1659. {
  1660. }
  1661. static void
  1662. compile_throw_value_and_data (scm_jit_state *j, uint32_t val,
  1663. const void *key_subr_and_message)
  1664. {
  1665. emit_store_current_ip (j, T0);
  1666. emit_call_2 (j, scm_vm_intrinsics.throw_with_value_and_data,
  1667. sp_scm_operand (j, val),
  1668. jit_operand_imm (JIT_OPERAND_ABI_POINTER,
  1669. (intptr_t) key_subr_and_message));
  1670. /* Like throw_, throw_with_value_and_data does not return. */
  1671. set_register_state (j, UNREACHABLE);
  1672. }
  1673. static void
  1674. compile_throw_value_and_data_slow (scm_jit_state *j, uint32_t val,
  1675. const void *key_subr_and_message)
  1676. {
  1677. }
  1678. static void
  1679. compile_unreachable (scm_jit_state *j)
  1680. {
  1681. jit_breakpoint (j->jit);
  1682. set_register_state (j, UNREACHABLE);
  1683. }
  1684. static void
  1685. compile_unreachable_slow (scm_jit_state *j)
  1686. {
  1687. }
  1688. static void
  1689. compile_assert_nargs_ee (scm_jit_state *j, uint32_t nlocals)
  1690. {
  1691. add_slow_path_patch
  1692. (j, emit_branch_if_frame_locals_count_not_eq (j, T0, nlocals));
  1693. j->frame_size_min = j->frame_size_max = nlocals;
  1694. }
  1695. static void
  1696. compile_assert_nargs_ee_slow (scm_jit_state *j, uint32_t nlocals)
  1697. {
  1698. emit_store_current_ip (j, T0);
  1699. emit_call_1 (j, scm_vm_intrinsics.error_wrong_num_args,
  1700. thread_operand ());
  1701. }
  1702. static void
  1703. compile_assert_nargs_ge (scm_jit_state *j, uint32_t nlocals)
  1704. {
  1705. if (nlocals > 0)
  1706. add_slow_path_patch
  1707. (j, emit_branch_if_frame_locals_count_less_than (j, T0, nlocals));
  1708. j->frame_size_min = nlocals;
  1709. }
  1710. static void
  1711. compile_assert_nargs_ge_slow (scm_jit_state *j, uint32_t nlocals)
  1712. {
  1713. emit_store_current_ip (j, T0);
  1714. emit_call_1 (j, scm_vm_intrinsics.error_wrong_num_args,
  1715. thread_operand ());
  1716. }
  1717. static void
  1718. compile_assert_nargs_le (scm_jit_state *j, uint32_t nlocals)
  1719. {
  1720. add_slow_path_patch
  1721. (j, emit_branch_if_frame_locals_count_greater_than (j, T0, nlocals));
  1722. j->frame_size_max = nlocals;
  1723. }
  1724. static void
  1725. compile_assert_nargs_le_slow (scm_jit_state *j, uint32_t nlocals)
  1726. {
  1727. emit_store_current_ip (j, T0);
  1728. emit_call_1 (j, scm_vm_intrinsics.error_wrong_num_args,
  1729. thread_operand ());
  1730. }
  1731. static void
  1732. compile_alloc_frame (scm_jit_state *j, uint32_t nlocals)
  1733. {
  1734. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER);
  1735. emit_subtract_stack_slots (j, SP, FP, nlocals);
  1736. set_register_state (j, SP_IN_REGISTER);
  1737. add_slow_path_patch (j, emit_alloc_frame_for_sp_fast (j, T0));
  1738. j->frame_size_min = j->frame_size_max = nlocals;
  1739. }
  1740. static void
  1741. compile_alloc_frame_slow (scm_jit_state *j, uint32_t nlocals)
  1742. {
  1743. emit_alloc_frame_for_sp_slow (j, T0);
  1744. continue_after_slow_path (j, j->next_ip);
  1745. }
  1746. static void
  1747. compile_reset_frame (scm_jit_state *j, uint32_t nlocals)
  1748. {
  1749. restore_reloadable_register_state (j, FP_IN_REGISTER);
  1750. emit_reset_frame (j, nlocals);
  1751. j->frame_size_min = j->frame_size_max = nlocals;
  1752. }
  1753. static void
  1754. compile_reset_frame_slow (scm_jit_state *j, uint32_t nlocals)
  1755. {
  1756. }
  1757. static void
  1758. compile_push (scm_jit_state *j, uint32_t src)
  1759. {
  1760. UNREACHABLE ();
  1761. }
  1762. static void
  1763. compile_push_slow (scm_jit_state *j, uint32_t src)
  1764. {
  1765. UNREACHABLE ();
  1766. }
  1767. static void
  1768. compile_pop (scm_jit_state *j, uint32_t dst)
  1769. {
  1770. UNREACHABLE ();
  1771. }
  1772. static void
  1773. compile_pop_slow (scm_jit_state *j, uint32_t dst)
  1774. {
  1775. UNREACHABLE ();
  1776. }
  1777. static void
  1778. compile_drop (scm_jit_state *j, uint32_t nvalues)
  1779. {
  1780. UNREACHABLE ();
  1781. }
  1782. static void
  1783. compile_drop_slow (scm_jit_state *j, uint32_t nvalues)
  1784. {
  1785. UNREACHABLE ();
  1786. }
  1787. static void
  1788. compile_assert_nargs_ee_locals (scm_jit_state *j, uint32_t expected,
  1789. uint32_t nlocals)
  1790. {
  1791. jit_gpr_t t = T0;
  1792. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER | SP_IN_REGISTER);
  1793. if (nlocals)
  1794. {
  1795. emit_subtract_stack_slots (j, SP, SP, nlocals);
  1796. set_register_state (j, SP_IN_REGISTER);
  1797. }
  1798. add_slow_path_patch
  1799. (j, emit_branch_if_frame_locals_count_not_eq (j, t, expected + nlocals));
  1800. if (nlocals)
  1801. add_slow_path_patch (j, emit_alloc_frame_for_sp_fast (j, t));
  1802. j->frame_size_min = j->frame_size_max = expected + nlocals;
  1803. }
  1804. static void
  1805. compile_assert_nargs_ee_locals_slow (scm_jit_state *j, uint32_t expected,
  1806. uint32_t nlocals)
  1807. {
  1808. jit_gpr_t t = T0;
  1809. reset_register_state (j, SP_IN_REGISTER | FP_IN_REGISTER);
  1810. jit_reloc_t args_ok =
  1811. emit_branch_if_frame_locals_count_eq (j, t, expected + nlocals);
  1812. emit_store_current_ip (j, t);
  1813. emit_call_1 (j, scm_vm_intrinsics.error_wrong_num_args,
  1814. thread_operand ());
  1815. jit_patch_here (j->jit, args_ok);
  1816. if (nlocals)
  1817. emit_alloc_frame_for_sp_slow (j, t);
  1818. continue_after_slow_path (j, j->next_ip);
  1819. }
  1820. static void
  1821. compile_expand_apply_argument (scm_jit_state *j)
  1822. {
  1823. emit_store_current_ip (j, T0);
  1824. emit_call_1 (j, scm_vm_intrinsics.expand_apply_argument, thread_operand ());
  1825. emit_reload_sp (j);
  1826. emit_reload_fp (j);
  1827. j->frame_size_min--;
  1828. j->frame_size_max = INT32_MAX;
  1829. }
  1830. static void
  1831. compile_expand_apply_argument_slow (scm_jit_state *j)
  1832. {
  1833. }
  1834. static void
  1835. compile_bind_kwargs (scm_jit_state *j, uint32_t nreq, uint8_t flags,
  1836. uint32_t nreq_and_opt, uint32_t ntotal, const void *kw)
  1837. {
  1838. uint8_t allow_other_keys = flags & 0x1, has_rest = flags & 0x2;
  1839. jit_gpr_t t = T0, npositional = T1;
  1840. emit_store_current_ip (j, t);
  1841. emit_call_3 (j, scm_vm_intrinsics.compute_kwargs_npositional,
  1842. thread_operand (),
  1843. jit_operand_imm (JIT_OPERAND_ABI_UINT32, nreq),
  1844. jit_operand_imm (JIT_OPERAND_ABI_UINT32, nreq_and_opt - nreq));
  1845. jit_retval_i (j->jit, npositional);
  1846. jit_operand_t args[] =
  1847. { jit_operand_gpr (JIT_OPERAND_ABI_POINTER, THREAD),
  1848. jit_operand_gpr (JIT_OPERAND_ABI_UINT32, npositional),
  1849. jit_operand_imm (JIT_OPERAND_ABI_UINT32, ntotal),
  1850. jit_operand_imm (JIT_OPERAND_ABI_POINTER, (intptr_t)kw),
  1851. jit_operand_imm (JIT_OPERAND_ABI_UINT8, !has_rest),
  1852. jit_operand_imm (JIT_OPERAND_ABI_UINT8, allow_other_keys) };
  1853. jit_calli (j->jit, scm_vm_intrinsics.bind_kwargs, 6, args);
  1854. clear_scratch_register_state (j);
  1855. if (has_rest)
  1856. {
  1857. emit_call_2 (j, scm_vm_intrinsics.cons_rest, thread_operand (),
  1858. jit_operand_imm (JIT_OPERAND_ABI_UINT32, ntotal));
  1859. jit_retval (j->jit, t);
  1860. emit_reload_fp (j);
  1861. emit_fp_set_scm (j, nreq_and_opt, t);
  1862. }
  1863. else
  1864. emit_reload_fp (j);
  1865. emit_reset_frame (j, ntotal);
  1866. j->frame_size_min = j->frame_size_max = ntotal;
  1867. }
  1868. static void
  1869. compile_bind_kwargs_slow (scm_jit_state *j, uint32_t nreq, uint8_t flags,
  1870. uint32_t nreq_and_opt, uint32_t ntotal, const void *kw)
  1871. {
  1872. }
  1873. static void
  1874. compile_bind_rest (scm_jit_state *j, uint32_t dst)
  1875. {
  1876. jit_reloc_t k, cons;
  1877. jit_gpr_t t = T1;
  1878. /* As with receive-values, although bind-rest is usually used after a
  1879. call returns, the baseline compiler will sometimes emit it
  1880. elsewhere. In that case ensure that FP is in a register for the
  1881. frame-locals-count branches. */
  1882. restore_reloadable_register_state (j, FP_IN_REGISTER);
  1883. cons = emit_branch_if_frame_locals_count_greater_than (j, t, dst);
  1884. emit_alloc_frame (j, t, dst + 1);
  1885. emit_movi (j, t, SCM_UNPACK (SCM_EOL));
  1886. emit_sp_set_scm (j, 0, t);
  1887. k = jit_jmp (j->jit);
  1888. jit_patch_here (j->jit, cons);
  1889. emit_store_current_ip (j, t);
  1890. emit_call_2 (j, scm_vm_intrinsics.cons_rest, thread_operand (),
  1891. jit_operand_imm (JIT_OPERAND_ABI_UINT32, dst));
  1892. emit_retval (j, t);
  1893. compile_reset_frame (j, dst + 1);
  1894. emit_sp_set_scm (j, 0, t);
  1895. jit_patch_here (j->jit, k);
  1896. j->frame_size_min = dst + 1;
  1897. }
  1898. static void
  1899. compile_bind_rest_slow (scm_jit_state *j, uint32_t dst)
  1900. {
  1901. }
  1902. static void
  1903. compile_bind_optionals (scm_jit_state *j, uint32_t nlocals)
  1904. {
  1905. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER | SP_IN_REGISTER);
  1906. ASSERT(j->frame_size_min < nlocals);
  1907. ASSERT(j->frame_size_min < j->frame_size_max);
  1908. jit_gpr_t saved_frame_size = T1_PRESERVED;
  1909. jit_subr (j->jit, saved_frame_size, FP, SP);
  1910. jit_reloc_t no_optionals = jit_bgei
  1911. (j->jit, saved_frame_size, nlocals * sizeof (union scm_vm_stack_element));
  1912. emit_alloc_frame (j, T0, nlocals);
  1913. j->frame_size_min = nlocals;
  1914. jit_gpr_t walk = saved_frame_size;
  1915. jit_subr (j->jit, walk, FP, saved_frame_size);
  1916. jit_reloc_t done = jit_bler (j->jit, walk, SP);
  1917. jit_movi (j->jit, T0, SCM_UNPACK (SCM_UNDEFINED));
  1918. void *head = jit_address (j->jit);
  1919. jit_subi (j->jit, walk, walk, sizeof (union scm_vm_stack_element));
  1920. jit_str (j->jit, walk, T0);
  1921. jit_patch_there (j->jit, jit_bner (j->jit, walk, SP), head);
  1922. jit_patch_here (j->jit, done);
  1923. jit_patch_here (j->jit, no_optionals);
  1924. }
  1925. static void
  1926. compile_bind_optionals_slow (scm_jit_state *j, uint32_t nlocals)
  1927. {
  1928. }
  1929. static void
  1930. compile_allocate_words (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  1931. {
  1932. jit_gpr_t t = T0;
  1933. emit_store_current_ip (j, t);
  1934. emit_call_2 (j, scm_vm_intrinsics.allocate_words, thread_operand (),
  1935. sp_sz_operand (j, nwords));
  1936. emit_retval (j, t);
  1937. record_gpr_clobber (j, t);
  1938. emit_reload_sp (j);
  1939. emit_sp_set_scm (j, dst, t);
  1940. }
  1941. static void
  1942. compile_allocate_words_slow (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  1943. {
  1944. }
  1945. static void
  1946. compile_allocate_words_immediate (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  1947. {
  1948. size_t bytes = nwords * sizeof(SCM);
  1949. size_t idx = scm_inline_gc_bytes_to_freelist_index (bytes);
  1950. if (SCM_UNLIKELY (idx >= SCM_INLINE_GC_FREELIST_COUNT))
  1951. {
  1952. jit_gpr_t t = T0;
  1953. emit_store_current_ip (j, t);
  1954. emit_call_1 (j, GC_malloc, jit_operand_imm (JIT_OPERAND_ABI_WORD, bytes));
  1955. emit_retval (j, t);
  1956. emit_reload_sp (j);
  1957. emit_sp_set_scm (j, dst, t);
  1958. }
  1959. else
  1960. {
  1961. jit_gpr_t res = T0;
  1962. ptrdiff_t offset = offsetof(struct scm_thread, freelists);
  1963. offset += idx * sizeof(void*);
  1964. emit_ldxi (j, res, THREAD, offset);
  1965. add_slow_path_patch (j, jit_beqi (j->jit, res, 0));
  1966. jit_gpr_t new_freelist = T1;
  1967. emit_ldr (j, new_freelist, res);
  1968. jit_stxi (j->jit, offset, THREAD, new_freelist);
  1969. emit_sp_set_scm (j, dst, res);
  1970. }
  1971. }
  1972. static void
  1973. compile_allocate_words_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  1974. {
  1975. size_t bytes = nwords * sizeof(SCM);
  1976. size_t idx = scm_inline_gc_bytes_to_freelist_index (bytes);
  1977. if (SCM_UNLIKELY (idx >= SCM_INLINE_GC_FREELIST_COUNT))
  1978. {
  1979. }
  1980. else
  1981. {
  1982. jit_gpr_t res = T0;
  1983. emit_store_current_ip (j, res);
  1984. emit_call_2 (j, scm_vm_intrinsics.allocate_words_with_freelist,
  1985. thread_operand (),
  1986. jit_operand_imm (JIT_OPERAND_ABI_WORD, idx));
  1987. emit_retval (j, res);
  1988. emit_reload_sp (j);
  1989. emit_sp_set_scm (j, dst, res);
  1990. continue_after_slow_path (j, j->next_ip);
  1991. }
  1992. }
  1993. static void
  1994. compile_allocate_pointerless_words (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  1995. {
  1996. jit_gpr_t t = T0;
  1997. emit_store_current_ip (j, t);
  1998. emit_call_2 (j, scm_vm_intrinsics.allocate_pointerless_words, thread_operand (),
  1999. sp_sz_operand (j, nwords));
  2000. emit_retval (j, t);
  2001. record_gpr_clobber (j, t);
  2002. emit_reload_sp (j);
  2003. emit_sp_set_scm (j, dst, t);
  2004. }
  2005. static void
  2006. compile_allocate_pointerless_words_slow (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  2007. {
  2008. }
  2009. static void
  2010. compile_allocate_pointerless_words_immediate (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  2011. {
  2012. size_t bytes = nwords * sizeof(SCM);
  2013. size_t idx = scm_inline_gc_bytes_to_freelist_index (bytes);
  2014. if (SCM_UNLIKELY (idx >= SCM_INLINE_GC_FREELIST_COUNT))
  2015. {
  2016. jit_gpr_t t = T0;
  2017. emit_store_current_ip (j, t);
  2018. emit_call_1 (j, GC_malloc_atomic, jit_operand_imm (JIT_OPERAND_ABI_WORD, bytes));
  2019. emit_retval (j, t);
  2020. emit_reload_sp (j);
  2021. emit_sp_set_scm (j, dst, t);
  2022. }
  2023. else
  2024. {
  2025. jit_gpr_t res = T0;
  2026. ptrdiff_t offset = offsetof(struct scm_thread, pointerless_freelists);
  2027. offset += idx * sizeof(void*);
  2028. emit_ldxi (j, res, THREAD, offset);
  2029. add_slow_path_patch (j, jit_beqi (j->jit, res, 0));
  2030. jit_gpr_t new_freelist = T1;
  2031. emit_ldr (j, new_freelist, res);
  2032. jit_stxi (j->jit, offset, THREAD, new_freelist);
  2033. emit_sp_set_scm (j, dst, res);
  2034. }
  2035. }
  2036. static void
  2037. compile_allocate_pointerless_words_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t nwords)
  2038. {
  2039. size_t bytes = nwords * sizeof(SCM);
  2040. size_t idx = scm_inline_gc_bytes_to_freelist_index (bytes);
  2041. if (SCM_UNLIKELY (idx >= SCM_INLINE_GC_FREELIST_COUNT))
  2042. {
  2043. }
  2044. else
  2045. {
  2046. jit_gpr_t res = T0;
  2047. emit_store_current_ip (j, res);
  2048. emit_call_2 (j, scm_vm_intrinsics.allocate_pointerless_words_with_freelist,
  2049. thread_operand (),
  2050. jit_operand_imm (JIT_OPERAND_ABI_WORD, idx));
  2051. emit_retval (j, res);
  2052. emit_reload_sp (j);
  2053. emit_sp_set_scm (j, dst, res);
  2054. continue_after_slow_path (j, j->next_ip);
  2055. }
  2056. }
  2057. static void
  2058. compile_scm_ref (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2059. {
  2060. emit_sp_ref_scm (j, T0, obj);
  2061. emit_sp_ref_sz (j, T1, idx);
  2062. emit_lshi (j, T1, T1, log2_sizeof_uintptr_t);
  2063. emit_ldxr (j, T0, T0, T1);
  2064. emit_sp_set_scm (j, dst, T0);
  2065. }
  2066. static void
  2067. compile_scm_ref_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2068. {
  2069. }
  2070. static void
  2071. compile_scm_set (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2072. {
  2073. emit_sp_ref_scm (j, T0, obj);
  2074. emit_sp_ref_sz (j, T1, idx);
  2075. emit_sp_ref_scm (j, T2, val);
  2076. emit_lshi (j, T1, T1, log2_sizeof_uintptr_t);
  2077. jit_stxr (j->jit, T0, T1, T2);
  2078. }
  2079. static void
  2080. compile_scm_set_slow (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2081. {
  2082. }
  2083. static void
  2084. compile_scm_ref_tag (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t tag)
  2085. {
  2086. emit_sp_ref_scm (j, T0, obj);
  2087. emit_ldr (j, T0, T0);
  2088. emit_subi (j, T0, T0, tag);
  2089. emit_sp_set_scm (j, dst, T0);
  2090. }
  2091. static void
  2092. compile_scm_ref_tag_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t tag)
  2093. {
  2094. }
  2095. static void
  2096. compile_scm_set_tag (scm_jit_state *j, uint32_t obj, uint32_t tag, uint32_t val)
  2097. {
  2098. emit_sp_ref_scm (j, T0, obj);
  2099. emit_sp_ref_scm (j, T1, val);
  2100. emit_addi (j, T1, T1, tag);
  2101. jit_str (j->jit, T0, T1);
  2102. }
  2103. static void
  2104. compile_scm_set_tag_slow (scm_jit_state *j, uint32_t obj, uint32_t tag, uint32_t val)
  2105. {
  2106. }
  2107. static void
  2108. compile_scm_ref_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2109. {
  2110. emit_sp_ref_scm (j, T0, obj);
  2111. emit_ldxi (j, T0, T0, idx * sizeof (SCM));
  2112. emit_sp_set_scm (j, dst, T0);
  2113. }
  2114. static void
  2115. compile_scm_ref_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2116. {
  2117. }
  2118. static void
  2119. compile_scm_set_immediate (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2120. {
  2121. emit_sp_ref_scm (j, T0, obj);
  2122. emit_sp_ref_scm (j, T1, val);
  2123. jit_stxi (j->jit, idx * sizeof (SCM), T0, T1);
  2124. }
  2125. static void
  2126. compile_scm_set_immediate_slow (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2127. {
  2128. }
  2129. static void
  2130. compile_word_ref (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2131. {
  2132. emit_sp_ref_scm (j, T0, obj);
  2133. emit_sp_ref_sz (j, T1, idx);
  2134. emit_lshi (j, T1, T1, log2_sizeof_uintptr_t);
  2135. emit_ldxr (j, T0, T0, T1);
  2136. emit_sp_set_sz (j, dst, T0);
  2137. }
  2138. static void
  2139. compile_word_ref_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2140. {
  2141. }
  2142. static void
  2143. compile_word_set (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2144. {
  2145. emit_sp_ref_scm (j, T0, obj);
  2146. emit_sp_ref_sz (j, T1, idx);
  2147. emit_sp_ref_sz (j, T2, val);
  2148. emit_lshi (j, T1, T1, log2_sizeof_uintptr_t);
  2149. jit_stxr (j->jit, T0, T1, T2);
  2150. }
  2151. static void
  2152. compile_word_set_slow (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2153. {
  2154. }
  2155. static void
  2156. compile_word_ref_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2157. {
  2158. emit_sp_ref_scm (j, T0, obj);
  2159. emit_ldxi (j, T0, T0, idx * sizeof (SCM));
  2160. emit_sp_set_sz (j, dst, T0);
  2161. }
  2162. static void
  2163. compile_word_ref_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2164. {
  2165. }
  2166. static void
  2167. compile_word_set_immediate (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2168. {
  2169. emit_sp_ref_scm (j, T0, obj);
  2170. emit_sp_ref_sz (j, T1, val);
  2171. jit_stxi (j->jit, idx * sizeof (SCM), T0, T1);
  2172. }
  2173. static void
  2174. compile_word_set_immediate_slow (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2175. {
  2176. }
  2177. static void
  2178. compile_pointer_ref_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2179. {
  2180. emit_sp_ref_scm (j, T0, obj);
  2181. emit_ldxi (j, T0, T0, idx * sizeof (SCM));
  2182. emit_sp_set_scm (j, dst, T0);
  2183. }
  2184. static void
  2185. compile_pointer_ref_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2186. {
  2187. }
  2188. static void
  2189. compile_pointer_set_immediate (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2190. {
  2191. emit_sp_ref_scm (j, T0, obj);
  2192. emit_sp_ref_scm (j, T1, val);
  2193. jit_stxi (j->jit, idx * sizeof (SCM), T0, T1);
  2194. }
  2195. static void
  2196. compile_pointer_set_immediate_slow (scm_jit_state *j, uint32_t obj, uint32_t idx, uint32_t val)
  2197. {
  2198. }
  2199. static void
  2200. compile_tail_pointer_ref_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2201. {
  2202. emit_sp_ref_scm (j, T0, obj);
  2203. emit_addi (j, T0, T0, idx * sizeof (SCM));
  2204. emit_sp_set_scm (j, dst, T0);
  2205. }
  2206. static void
  2207. compile_tail_pointer_ref_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t idx)
  2208. {
  2209. }
  2210. static void
  2211. compile_mov (scm_jit_state *j, uint32_t dst, uint32_t src)
  2212. {
  2213. emit_mov (j, dst, src, T0);
  2214. }
  2215. static void
  2216. compile_mov_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  2217. {
  2218. }
  2219. static void
  2220. compile_long_mov (scm_jit_state *j, uint32_t dst, uint32_t src)
  2221. {
  2222. emit_mov (j, dst, src, T0);
  2223. }
  2224. static void
  2225. compile_long_mov_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  2226. {
  2227. }
  2228. static void
  2229. compile_long_fmov (scm_jit_state *j, uint32_t dst, uint32_t src)
  2230. {
  2231. jit_gpr_t t = T0;
  2232. restore_reloadable_register_state (j, FP_IN_REGISTER);
  2233. emit_fp_ref_scm (j, t, src);
  2234. emit_fp_set_scm (j, dst, t);
  2235. }
  2236. static void
  2237. compile_long_fmov_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  2238. {
  2239. }
  2240. static void
  2241. compile_call_scm_from_scm_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2242. {
  2243. switch ((enum scm_vm_intrinsic) idx)
  2244. {
  2245. case SCM_VM_INTRINSIC_ADD:
  2246. {
  2247. emit_sp_ref_scm (j, T0, a);
  2248. emit_sp_ref_scm (j, T1, b);
  2249. add_slow_path_patch (j, jit_bmci (j->jit, T0, scm_tc2_int));
  2250. add_slow_path_patch (j, jit_bmci (j->jit, T1, scm_tc2_int));
  2251. jit_subi (j->jit, T0, T0, scm_tc2_int);
  2252. add_slow_path_patch (j, jit_boaddr (j->jit, T0, T1));
  2253. break;
  2254. }
  2255. case SCM_VM_INTRINSIC_SUB:
  2256. {
  2257. emit_sp_ref_scm (j, T0, a);
  2258. emit_sp_ref_scm (j, T1, b);
  2259. add_slow_path_patch (j, jit_bmci (j->jit, T0, scm_tc2_int));
  2260. add_slow_path_patch (j, jit_bmci (j->jit, T1, scm_tc2_int));
  2261. jit_subi (j->jit, T1, T1, scm_tc2_int);
  2262. add_slow_path_patch (j, jit_bosubr (j->jit, T0, T1));
  2263. break;
  2264. }
  2265. default:
  2266. {
  2267. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2268. jit_operand_t op_a = sp_scm_operand (j, a);
  2269. jit_operand_t op_b = sp_scm_operand (j, b);
  2270. emit_store_current_ip (j, T2);
  2271. emit_call_2 (j, intrinsic, op_a, op_b);
  2272. emit_retval (j, T0);
  2273. emit_reload_sp (j);
  2274. }
  2275. }
  2276. emit_sp_set_scm (j, dst, T0);
  2277. }
  2278. static void
  2279. compile_call_scm_from_scm_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2280. {
  2281. switch ((enum scm_vm_intrinsic) idx)
  2282. {
  2283. case SCM_VM_INTRINSIC_ADD:
  2284. case SCM_VM_INTRINSIC_SUB:
  2285. {
  2286. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2287. jit_operand_t op_a = sp_scm_operand (j, a);
  2288. jit_operand_t op_b = sp_scm_operand (j, b);
  2289. emit_store_current_ip (j, T1);
  2290. emit_call_2 (j, intrinsic, op_a, op_b);
  2291. emit_retval (j, T0);
  2292. emit_reload_sp (j);
  2293. emit_sp_set_scm (j, dst, T0);
  2294. continue_after_slow_path (j, j->next_ip);
  2295. break;
  2296. }
  2297. default:
  2298. break;
  2299. }
  2300. }
  2301. static void
  2302. compile_call_scm_from_scm_uimm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2303. {
  2304. switch ((enum scm_vm_intrinsic) idx)
  2305. {
  2306. case SCM_VM_INTRINSIC_ADD_IMMEDIATE:
  2307. {
  2308. emit_sp_ref_scm (j, T0, a);
  2309. scm_t_bits addend = b << 2;
  2310. add_slow_path_patch (j, jit_bmci (j->jit, T0, 2));
  2311. add_slow_path_patch (j, jit_boaddi (j->jit, T0, addend));
  2312. break;
  2313. }
  2314. case SCM_VM_INTRINSIC_SUB_IMMEDIATE:
  2315. {
  2316. emit_sp_ref_scm (j, T0, a);
  2317. scm_t_bits subtrahend = b << 2;
  2318. add_slow_path_patch (j, jit_bmci (j->jit, T0, 2));
  2319. add_slow_path_patch (j, jit_bosubi (j->jit, T0, subtrahend));
  2320. break;
  2321. }
  2322. default:
  2323. {
  2324. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2325. jit_operand_t op_a = sp_scm_operand (j, a);
  2326. jit_operand_t op_b = jit_operand_imm (JIT_OPERAND_ABI_UINT8, b);
  2327. emit_store_current_ip (j, T1);
  2328. emit_call_2 (j, intrinsic, op_a, op_b);
  2329. emit_retval (j, T0);
  2330. emit_reload_sp (j);
  2331. break;
  2332. }
  2333. }
  2334. emit_sp_set_scm (j, dst, T0);
  2335. }
  2336. static void
  2337. compile_call_scm_from_scm_uimm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2338. {
  2339. switch ((enum scm_vm_intrinsic) idx)
  2340. {
  2341. case SCM_VM_INTRINSIC_ADD_IMMEDIATE:
  2342. case SCM_VM_INTRINSIC_SUB_IMMEDIATE:
  2343. {
  2344. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2345. jit_operand_t op_a = sp_scm_operand (j, a);
  2346. jit_operand_t op_b = jit_operand_imm (JIT_OPERAND_ABI_UINT8, b);
  2347. emit_store_current_ip (j, T1);
  2348. emit_call_2 (j, intrinsic, op_a, op_b);
  2349. emit_retval (j, T0);
  2350. emit_reload_sp (j);
  2351. emit_sp_set_scm (j, dst, T0);
  2352. continue_after_slow_path (j, j->next_ip);
  2353. break;
  2354. }
  2355. default:
  2356. break;
  2357. }
  2358. }
  2359. static void
  2360. compile_call_scm_sz_u32 (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t c, uint32_t idx)
  2361. {
  2362. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2363. emit_store_current_ip (j, T0);
  2364. emit_call_3 (j, intrinsic, sp_scm_operand (j, a), sp_sz_operand (j, b),
  2365. sp_sz_operand (j, c));
  2366. emit_reload_sp (j);
  2367. }
  2368. static void
  2369. compile_call_scm_sz_u32_slow (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t c, uint32_t idx)
  2370. {
  2371. }
  2372. static void
  2373. compile_call_scm_from_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2374. {
  2375. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2376. emit_store_current_ip (j, T0);
  2377. emit_call_1 (j, intrinsic, sp_scm_operand (j, a));
  2378. emit_retval (j, T0);
  2379. emit_reload_sp (j);
  2380. emit_sp_set_scm (j, dst, T0);
  2381. }
  2382. static void
  2383. compile_call_scm_from_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2384. {
  2385. }
  2386. static void
  2387. compile_call_f64_from_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2388. {
  2389. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2390. emit_store_current_ip (j, T0);
  2391. emit_call_1 (j, intrinsic, sp_scm_operand (j, a));
  2392. emit_retval_d (j, JIT_F0);
  2393. emit_reload_sp (j);
  2394. emit_sp_set_f64 (j, dst, JIT_F0);
  2395. }
  2396. static void
  2397. compile_call_f64_from_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2398. {
  2399. }
  2400. static void
  2401. compile_call_f64_from_f64 (scm_jit_state *j, uint32_t dst, uint32_t src, uint32_t idx)
  2402. {
  2403. switch ((enum scm_vm_intrinsic) idx)
  2404. {
  2405. case SCM_VM_INTRINSIC_FABS:
  2406. {
  2407. emit_sp_ref_f64 (j, JIT_F0, src);
  2408. emit_absr_d (j, JIT_F0, JIT_F0);
  2409. emit_sp_set_f64 (j, dst, JIT_F0);
  2410. break;
  2411. }
  2412. case SCM_VM_INTRINSIC_FSQRT:
  2413. {
  2414. emit_sp_ref_f64 (j, JIT_F0, src);
  2415. emit_sqrtr_d (j, JIT_F0, JIT_F0);
  2416. emit_sp_set_f64 (j, dst, JIT_F0);
  2417. break;
  2418. }
  2419. default:
  2420. {
  2421. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2422. emit_call_1 (j, intrinsic, sp_f64_operand (j, src));
  2423. emit_retval_d (j, JIT_F0);
  2424. emit_reload_sp (j);
  2425. emit_sp_set_f64 (j, dst, JIT_F0);
  2426. break;
  2427. }
  2428. }
  2429. }
  2430. static void
  2431. compile_call_f64_from_f64_slow (scm_jit_state *j, uint32_t dst, uint32_t src, uint32_t idx)
  2432. {
  2433. }
  2434. static void
  2435. compile_call_f64_from_f64_f64 (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2436. {
  2437. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2438. emit_call_2 (j, intrinsic, sp_f64_operand (j, a), sp_f64_operand (j, b));
  2439. emit_retval_d (j, JIT_F0);
  2440. emit_reload_sp (j);
  2441. emit_sp_set_f64 (j, dst, JIT_F0);
  2442. }
  2443. static void
  2444. compile_call_f64_from_f64_f64_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2445. {
  2446. }
  2447. static void
  2448. compile_call_u64_from_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2449. {
  2450. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2451. emit_store_current_ip (j, T0);
  2452. #if INDIRECT_INT64_INTRINSICS
  2453. emit_call_2 (j, intrinsic, sp_slot_operand (j, dst), sp_scm_operand (j, a));
  2454. emit_reload_sp (j);
  2455. #else
  2456. emit_call_1 (j, intrinsic, sp_scm_operand (j, a));
  2457. emit_retval (j, T0);
  2458. emit_reload_sp (j);
  2459. emit_sp_set_u64 (j, dst, T0);
  2460. #endif
  2461. }
  2462. static void
  2463. compile_call_u64_from_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2464. {
  2465. }
  2466. static void
  2467. compile_make_immediate (scm_jit_state *j, uint32_t dst, SCM a)
  2468. {
  2469. emit_movi (j, T0, SCM_UNPACK (a));
  2470. emit_sp_set_scm (j, dst, T0);
  2471. }
  2472. static void
  2473. compile_make_immediate_slow (scm_jit_state *j, uint32_t dst, SCM a)
  2474. {
  2475. }
  2476. static void
  2477. compile_make_short_immediate (scm_jit_state *j, uint32_t dst, SCM a)
  2478. {
  2479. emit_movi (j, T0, SCM_UNPACK (a));
  2480. emit_sp_set_scm (j, dst, T0);
  2481. }
  2482. static void
  2483. compile_make_short_immediate_slow (scm_jit_state *j, uint32_t dst, SCM a)
  2484. {
  2485. }
  2486. static void
  2487. compile_make_long_immediate (scm_jit_state *j, uint32_t dst, SCM a)
  2488. {
  2489. emit_movi (j, T0, SCM_UNPACK (a));
  2490. emit_sp_set_scm (j, dst, T0);
  2491. }
  2492. static void
  2493. compile_make_long_immediate_slow (scm_jit_state *j, uint32_t dst, SCM a)
  2494. {
  2495. }
  2496. static void
  2497. compile_make_long_long_immediate (scm_jit_state *j, uint32_t dst, SCM a)
  2498. {
  2499. emit_movi (j, T0, SCM_UNPACK (a));
  2500. emit_sp_set_scm (j, dst, T0);
  2501. }
  2502. static void
  2503. compile_make_long_long_immediate_slow (scm_jit_state *j, uint32_t dst, SCM a)
  2504. {
  2505. }
  2506. static void
  2507. compile_make_non_immediate (scm_jit_state *j, uint32_t dst, const void *data)
  2508. {
  2509. emit_movi (j, T0, (uintptr_t)data);
  2510. emit_sp_set_scm (j, dst, T0);
  2511. }
  2512. static void
  2513. compile_make_non_immediate_slow (scm_jit_state *j, uint32_t dst, const void *data)
  2514. {
  2515. }
  2516. static void
  2517. compile_static_ref (scm_jit_state *j, uint32_t dst, void *loc)
  2518. {
  2519. emit_ldi (j, T0, loc);
  2520. emit_sp_set_scm (j, dst, T0);
  2521. }
  2522. static void
  2523. compile_static_ref_slow (scm_jit_state *j, uint32_t dst, void *loc)
  2524. {
  2525. }
  2526. static void
  2527. compile_static_set (scm_jit_state *j, uint32_t obj, void *loc)
  2528. {
  2529. emit_sp_ref_scm (j, T0, obj);
  2530. jit_sti (j->jit, loc, T0);
  2531. }
  2532. static void
  2533. compile_static_set_slow (scm_jit_state *j, uint32_t obj, void *loc)
  2534. {
  2535. }
  2536. static void
  2537. compile_static_patch (scm_jit_state *j, void *dst, const void *src)
  2538. {
  2539. emit_movi (j, T0, (uintptr_t) src);
  2540. jit_sti (j->jit, dst, T0);
  2541. }
  2542. static void
  2543. compile_static_patch_slow (scm_jit_state *j, void *dst, const void *src)
  2544. {
  2545. }
  2546. static void
  2547. compile_prompt (scm_jit_state *j, uint32_t tag, uint8_t escape_only_p,
  2548. uint32_t proc_slot, const uint32_t *vcode)
  2549. {
  2550. emit_store_current_ip (j, T0);
  2551. emit_reload_fp (j);
  2552. jit_subi (j->jit, FP, FP, proc_slot * sizeof (union scm_vm_stack_element));
  2553. jit_reloc_t mra = emit_mov_addr (j, T2);
  2554. jit_operand_t args[] =
  2555. { thread_operand (),
  2556. jit_operand_imm (JIT_OPERAND_ABI_UINT8, escape_only_p),
  2557. sp_scm_operand (j, tag),
  2558. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, FP),
  2559. jit_operand_imm (JIT_OPERAND_ABI_POINTER, (uintptr_t)vcode),
  2560. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, T2) };
  2561. jit_calli (j->jit, scm_vm_intrinsics.push_prompt, 6, args);
  2562. clear_scratch_register_state (j);
  2563. emit_reload_sp (j);
  2564. emit_reload_fp (j);
  2565. add_inter_instruction_patch (j, mra, vcode);
  2566. }
  2567. static void
  2568. compile_prompt_slow (scm_jit_state *j, uint32_t tag, uint8_t escape_only_p,
  2569. uint32_t proc_slot, const uint32_t *vcode)
  2570. {
  2571. }
  2572. static void
  2573. compile_load_label (scm_jit_state *j, uint32_t dst, const uint32_t *vcode)
  2574. {
  2575. emit_movi (j, T0, (uintptr_t) vcode);
  2576. #if SIZEOF_UINTPTR_T >= 8
  2577. emit_sp_set_u64 (j, dst, T0);
  2578. #else
  2579. emit_movi (j, T1, 0);
  2580. emit_sp_set_u64 (j, dst, T0, T1);
  2581. #endif
  2582. }
  2583. static void
  2584. compile_load_label_slow (scm_jit_state *j, uint32_t dst, const uint32_t *vcode)
  2585. {
  2586. }
  2587. static void
  2588. compile_call_s64_from_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2589. {
  2590. compile_call_u64_from_scm (j, dst, a, idx);
  2591. }
  2592. static void
  2593. compile_call_s64_from_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2594. {
  2595. }
  2596. static void
  2597. compile_call_scm_from_u64 (scm_jit_state *j, uint32_t dst, uint32_t src, uint32_t idx)
  2598. {
  2599. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2600. emit_store_current_ip (j, T0);
  2601. #if INDIRECT_INT64_INTRINSICS
  2602. emit_call_1 (j, intrinsic, sp_slot_operand (j, src));
  2603. #else
  2604. emit_call_1 (j, intrinsic, sp_u64_operand (j, src));
  2605. #endif
  2606. emit_retval (j, T0);
  2607. emit_reload_sp (j);
  2608. emit_sp_set_scm (j, dst, T0);
  2609. }
  2610. static void
  2611. compile_call_scm_from_u64_slow (scm_jit_state *j, uint32_t dst, uint32_t src, uint32_t idx)
  2612. {
  2613. }
  2614. static void
  2615. compile_call_scm_from_s64 (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2616. {
  2617. compile_call_scm_from_u64 (j, dst, a, b);
  2618. }
  2619. static void
  2620. compile_call_scm_from_s64_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2621. {
  2622. }
  2623. static void
  2624. compile_tag_char (scm_jit_state *j, uint32_t dst, uint32_t src)
  2625. {
  2626. #if SIZEOF_UINTPTR_T >= 8
  2627. emit_sp_ref_u64 (j, T0, src);
  2628. #else
  2629. emit_sp_ref_u64_lower_half (j, T0, src);
  2630. #endif
  2631. emit_lshi (j, T0, T0, 8);
  2632. emit_addi (j, T0, T0, scm_tc8_char);
  2633. emit_sp_set_scm (j, dst, T0);
  2634. }
  2635. static void
  2636. compile_tag_char_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  2637. {
  2638. }
  2639. static void
  2640. compile_untag_char (scm_jit_state *j, uint32_t dst, uint32_t src)
  2641. {
  2642. emit_sp_ref_scm (j, T0, src);
  2643. emit_rshi (j, T0, T0, 8);
  2644. #if SIZEOF_UINTPTR_T >= 8
  2645. emit_sp_set_u64 (j, dst, T0);
  2646. #else
  2647. emit_movi (j, T1, 0);
  2648. emit_sp_set_u64 (j, dst, T0, T1);
  2649. #endif
  2650. }
  2651. static void
  2652. compile_untag_char_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  2653. {
  2654. }
  2655. static void
  2656. compile_atomic_scm_ref_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t offset)
  2657. {
  2658. emit_sp_ref_scm (j, T0, obj);
  2659. emit_addi (j, T0, T0, offset * sizeof (SCM));
  2660. jit_ldr_atomic (j->jit, T0, T0);
  2661. record_gpr_clobber (j, T0);
  2662. emit_sp_set_scm (j, dst, T0);
  2663. }
  2664. static void
  2665. compile_atomic_scm_ref_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t offset)
  2666. {
  2667. }
  2668. static void
  2669. compile_atomic_scm_set_immediate (scm_jit_state *j, uint32_t obj, uint32_t offset, uint32_t val)
  2670. {
  2671. emit_sp_ref_scm (j, T0, obj);
  2672. emit_sp_ref_scm (j, T1, val);
  2673. emit_addi (j, T0, T0, offset * sizeof (SCM));
  2674. jit_str_atomic (j->jit, T0, T1);
  2675. }
  2676. static void
  2677. compile_atomic_scm_set_immediate_slow (scm_jit_state *j, uint32_t obj, uint32_t offset, uint32_t val)
  2678. {
  2679. }
  2680. static void
  2681. compile_atomic_scm_swap_immediate (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t offset, uint32_t val)
  2682. {
  2683. emit_sp_ref_scm (j, T0, obj);
  2684. emit_sp_ref_scm (j, T1, val);
  2685. emit_addi (j, T0, T0, offset * sizeof (SCM));
  2686. jit_swap_atomic (j->jit, T1, T0, T1);
  2687. record_gpr_clobber (j, T1);
  2688. emit_sp_set_scm (j, dst, T1);
  2689. }
  2690. static void
  2691. compile_atomic_scm_swap_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t obj, uint32_t offset, uint32_t val)
  2692. {
  2693. }
  2694. static void
  2695. compile_atomic_scm_compare_and_swap_immediate (scm_jit_state *j, uint32_t dst,
  2696. uint32_t obj, uint32_t offset,
  2697. uint32_t expected, uint32_t desired)
  2698. {
  2699. emit_sp_ref_scm (j, T0, obj);
  2700. emit_sp_ref_scm (j, T1, expected);
  2701. emit_sp_ref_scm (j, T2, desired);
  2702. emit_addi (j, T0, T0, offset * sizeof (SCM));
  2703. jit_cas_atomic (j->jit, T1, T0, T1, T2);
  2704. record_gpr_clobber (j, T1);
  2705. emit_sp_set_scm (j, dst, T1);
  2706. }
  2707. static void
  2708. compile_atomic_scm_compare_and_swap_immediate_slow (scm_jit_state *j, uint32_t dst,
  2709. uint32_t obj, uint32_t offset,
  2710. uint32_t expected, uint32_t desired)
  2711. {
  2712. }
  2713. static void
  2714. compile_call_thread_scm_scm (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t idx)
  2715. {
  2716. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2717. emit_store_current_ip (j, T0);
  2718. emit_call_3 (j, intrinsic, thread_operand (), sp_scm_operand (j, a),
  2719. sp_scm_operand (j, b));
  2720. emit_reload_sp (j);
  2721. }
  2722. static void
  2723. compile_call_thread_scm_scm_slow (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t idx)
  2724. {
  2725. }
  2726. static void
  2727. compile_call_thread (scm_jit_state *j, uint32_t idx)
  2728. {
  2729. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2730. emit_store_current_ip (j, T0);
  2731. emit_call_1 (j, intrinsic, thread_operand ());
  2732. emit_reload_sp (j);
  2733. }
  2734. static void
  2735. compile_call_thread_slow (scm_jit_state *j, uint32_t idx)
  2736. {
  2737. }
  2738. static void
  2739. compile_call_scm_from_thread_scm (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2740. {
  2741. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2742. emit_store_current_ip (j, T0);
  2743. emit_call_2 (j, intrinsic, thread_operand (), sp_scm_operand (j, a));
  2744. emit_retval (j, T0);
  2745. emit_reload_sp (j);
  2746. emit_sp_set_scm (j, dst, T0);
  2747. }
  2748. static void
  2749. compile_call_scm_from_thread_scm_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t idx)
  2750. {
  2751. }
  2752. static void
  2753. compile_call_thread_scm (scm_jit_state *j, uint32_t a, uint32_t idx)
  2754. {
  2755. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2756. emit_store_current_ip (j, T0);
  2757. emit_call_2 (j, intrinsic, thread_operand (), sp_scm_operand (j, a));
  2758. emit_reload_sp (j);
  2759. }
  2760. static void
  2761. compile_call_thread_scm_slow (scm_jit_state *j, uint32_t a, uint32_t idx)
  2762. {
  2763. }
  2764. static void
  2765. compile_call_scm_from_scm_u64 (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2766. {
  2767. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2768. emit_store_current_ip (j, T0);
  2769. #if INDIRECT_INT64_INTRINSICS
  2770. emit_call_2 (j, intrinsic, sp_scm_operand (j, a), sp_slot_operand (j, b));
  2771. #else
  2772. emit_call_2 (j, intrinsic, sp_scm_operand (j, a), sp_u64_operand (j, b));
  2773. #endif
  2774. emit_retval (j, T0);
  2775. emit_reload_sp (j);
  2776. emit_sp_set_scm (j, dst, T0);
  2777. }
  2778. static void
  2779. compile_call_scm_from_scm_u64_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b, uint32_t idx)
  2780. {
  2781. }
  2782. static void
  2783. compile_call_scm_from_thread (scm_jit_state *j, uint32_t dst, uint32_t idx)
  2784. {
  2785. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2786. emit_store_current_ip (j, T0);
  2787. emit_call_1 (j, intrinsic, thread_operand ());
  2788. emit_retval (j, T0);
  2789. emit_reload_sp (j);
  2790. emit_sp_set_scm (j, dst, T0);
  2791. }
  2792. static void
  2793. compile_call_scm_from_thread_slow (scm_jit_state *j, uint32_t dst, uint32_t idx)
  2794. {
  2795. }
  2796. static void
  2797. compile_call_scm_scm (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t idx)
  2798. {
  2799. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2800. emit_store_current_ip (j, T0);
  2801. emit_call_2 (j, intrinsic, sp_scm_operand (j, a), sp_scm_operand (j, b));
  2802. emit_reload_sp (j);
  2803. }
  2804. static void
  2805. compile_call_scm_scm_slow (scm_jit_state *j, uint32_t a, uint32_t b,
  2806. uint32_t idx)
  2807. {
  2808. }
  2809. static void
  2810. compile_call_scm_scm_scm (scm_jit_state *j, uint32_t a, uint32_t b, uint32_t c,
  2811. uint32_t idx)
  2812. {
  2813. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2814. emit_store_current_ip (j, T0);
  2815. emit_call_3 (j, intrinsic, sp_scm_operand (j, a), sp_scm_operand (j, b),
  2816. sp_scm_operand (j, c));
  2817. emit_reload_sp (j);
  2818. }
  2819. static void
  2820. compile_call_scm_scm_scm_slow (scm_jit_state *j, uint32_t a, uint32_t b,
  2821. uint32_t c, uint32_t idx)
  2822. {
  2823. }
  2824. static void
  2825. compile_call_scm_uimm_scm (scm_jit_state *j, uint32_t a, uint8_t b, uint32_t c,
  2826. uint32_t idx)
  2827. {
  2828. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  2829. emit_store_current_ip (j, T0);
  2830. emit_call_3 (j, intrinsic, sp_scm_operand (j, a),
  2831. jit_operand_imm (JIT_OPERAND_ABI_UINT8, b),
  2832. sp_scm_operand (j, c));
  2833. emit_reload_sp (j);
  2834. }
  2835. static void
  2836. compile_call_scm_uimm_scm_slow (scm_jit_state *j, uint32_t a, uint8_t b,
  2837. uint32_t c, uint32_t idx)
  2838. {
  2839. }
  2840. static void
  2841. compile_fadd (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2842. {
  2843. emit_sp_ref_f64 (j, JIT_F0, a);
  2844. emit_sp_ref_f64 (j, JIT_F1, b);
  2845. emit_addr_d (j, JIT_F0, JIT_F0, JIT_F1);
  2846. emit_sp_set_f64 (j, dst, JIT_F0);
  2847. }
  2848. static void
  2849. compile_fadd_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2850. {
  2851. }
  2852. static void
  2853. compile_fsub (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2854. {
  2855. emit_sp_ref_f64 (j, JIT_F0, a);
  2856. emit_sp_ref_f64 (j, JIT_F1, b);
  2857. emit_subr_d (j, JIT_F0, JIT_F0, JIT_F1);
  2858. emit_sp_set_f64 (j, dst, JIT_F0);
  2859. }
  2860. static void
  2861. compile_fsub_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2862. {
  2863. }
  2864. static void
  2865. compile_fmul (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2866. {
  2867. emit_sp_ref_f64 (j, JIT_F0, a);
  2868. emit_sp_ref_f64 (j, JIT_F1, b);
  2869. emit_mulr_d (j, JIT_F0, JIT_F0, JIT_F1);
  2870. emit_sp_set_f64 (j, dst, JIT_F0);
  2871. }
  2872. static void
  2873. compile_fmul_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2874. {
  2875. }
  2876. static void
  2877. compile_fdiv (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2878. {
  2879. emit_sp_ref_f64 (j, JIT_F0, a);
  2880. emit_sp_ref_f64 (j, JIT_F1, b);
  2881. emit_divr_d (j, JIT_F0, JIT_F0, JIT_F1);
  2882. emit_sp_set_f64 (j, dst, JIT_F0);
  2883. }
  2884. static void
  2885. compile_fdiv_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2886. {
  2887. }
  2888. static void
  2889. compile_uadd (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2890. {
  2891. #if SIZEOF_UINTPTR_T >= 8
  2892. emit_sp_ref_u64 (j, T0, a);
  2893. emit_sp_ref_u64 (j, T1, b);
  2894. emit_addr (j, T0, T0, T1);
  2895. emit_sp_set_u64 (j, dst, T0);
  2896. #else
  2897. emit_sp_ref_u64 (j, T0, T1, a);
  2898. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  2899. emit_addcr (j, T0, T0, T2);
  2900. emit_addxr (j, T1, T1, T3_OR_FP);
  2901. emit_sp_set_u64 (j, dst, T0, T1);
  2902. #endif
  2903. }
  2904. static void
  2905. compile_uadd_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2906. {
  2907. }
  2908. static void
  2909. compile_usub (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2910. {
  2911. #if SIZEOF_UINTPTR_T >= 8
  2912. emit_sp_ref_u64 (j, T0, a);
  2913. emit_sp_ref_u64 (j, T1, b);
  2914. emit_subr (j, T0, T0, T1);
  2915. emit_sp_set_u64 (j, dst, T0);
  2916. #else
  2917. emit_sp_ref_u64 (j, T0, T1, a);
  2918. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  2919. emit_subcr (j, T0, T0, T2);
  2920. emit_subxr (j, T1, T1, T3_OR_FP);
  2921. emit_sp_set_u64 (j, dst, T0, T1);
  2922. #endif
  2923. }
  2924. static void
  2925. compile_usub_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2926. {
  2927. }
  2928. static void
  2929. compile_umul (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2930. {
  2931. #if SIZEOF_UINTPTR_T >= 8
  2932. emit_sp_ref_u64 (j, T0, a);
  2933. emit_sp_ref_u64 (j, T1, b);
  2934. emit_mulr (j, T0, T0, T1);
  2935. emit_sp_set_u64 (j, dst, T0);
  2936. #else
  2937. /* FIXME: This is untested! */
  2938. emit_sp_ref_u64 (j, T0, T1, a);
  2939. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  2940. emit_mulr (j, T1, T1, T2); /* High A times low B */
  2941. emit_mulr (j, T3_OR_FP, T3_OR_FP, T0); /* High B times low A */
  2942. emit_addr (j, T1, T1, T3_OR_FP); /* Add high results, throw away overflow */
  2943. emit_qmulr_u (j, T0, T2, T0, T2); /* Low A times low B */
  2944. emit_addr (j, T1, T1, T2); /* Add high result of low product */
  2945. emit_sp_set_u64 (j, dst, T0, T1);
  2946. #endif
  2947. }
  2948. static void
  2949. compile_umul_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2950. {
  2951. }
  2952. static void
  2953. compile_uadd_immediate (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2954. {
  2955. #if SIZEOF_UINTPTR_T >= 8
  2956. emit_sp_ref_u64 (j, T0, a);
  2957. emit_addi (j, T0, T0, b);
  2958. emit_sp_set_u64 (j, dst, T0);
  2959. #else
  2960. emit_sp_ref_u64 (j, T0, T1, a);
  2961. emit_addci (j, T0, T0, b);
  2962. emit_addxi (j, T1, T1, 0);
  2963. emit_sp_set_u64 (j, dst, T0, T1);
  2964. #endif
  2965. }
  2966. static void
  2967. compile_uadd_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2968. {
  2969. }
  2970. static void
  2971. compile_usub_immediate (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2972. {
  2973. #if SIZEOF_UINTPTR_T >= 8
  2974. emit_sp_ref_u64 (j, T0, a);
  2975. emit_subi (j, T0, T0, b);
  2976. emit_sp_set_u64 (j, dst, T0);
  2977. #else
  2978. emit_sp_ref_u64 (j, T0, T1, a);
  2979. emit_subci (j, T0, T0, b);
  2980. emit_subxi (j, T1, T1, 0);
  2981. emit_sp_set_u64 (j, dst, T0, T1);
  2982. #endif
  2983. }
  2984. static void
  2985. compile_usub_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2986. {
  2987. }
  2988. static void
  2989. compile_umul_immediate (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  2990. {
  2991. #if SIZEOF_UINTPTR_T >= 8
  2992. emit_sp_ref_u64 (j, T0, a);
  2993. emit_muli (j, T0, T0, b);
  2994. emit_sp_set_u64 (j, dst, T0);
  2995. #else
  2996. /* FIXME: This is untested! */
  2997. emit_sp_ref_u64 (j, T0, T1, a);
  2998. emit_muli (j, T1, T1, b); /* High A times low B */
  2999. /* High B times low A is 0. */
  3000. emit_movi (j, T2, b);
  3001. emit_qmulr_u (j, T0, T2, T0, T2); /* Low A times low B */
  3002. emit_addr (j, T1, T1, T2); /* Add high result of low product */
  3003. emit_sp_set_u64 (j, dst, T0, T1);
  3004. #endif
  3005. }
  3006. static void
  3007. compile_umul_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3008. {
  3009. }
  3010. static void
  3011. compile_load_f64 (scm_jit_state *j, uint32_t dst, double a)
  3012. {
  3013. jit_movi_d (j->jit, JIT_F0, a);
  3014. record_fpr_clobber (j, JIT_F0);
  3015. emit_sp_set_f64 (j, dst, JIT_F0);
  3016. }
  3017. static void
  3018. compile_load_f64_slow (scm_jit_state *j, uint32_t dst, double a)
  3019. {
  3020. }
  3021. static void
  3022. compile_load_u64 (scm_jit_state *j, uint32_t dst, uint64_t a)
  3023. {
  3024. #if SIZEOF_UINTPTR_T >= 8
  3025. emit_movi (j, T0, a);
  3026. emit_sp_set_u64 (j, dst, T0);
  3027. #else
  3028. emit_movi (j, T0, a & 0xffffffff);
  3029. emit_movi (j, T1, a >> 32);
  3030. emit_sp_set_u64 (j, dst, T0, T1);
  3031. #endif
  3032. }
  3033. static void
  3034. compile_load_u64_slow (scm_jit_state *j, uint32_t dst, uint64_t a)
  3035. {
  3036. }
  3037. static void
  3038. compile_load_s64 (scm_jit_state *j, uint32_t dst, int64_t a)
  3039. {
  3040. compile_load_u64 (j, dst, a);
  3041. }
  3042. static void
  3043. compile_load_s64_slow (scm_jit_state *j, uint32_t dst, int64_t a)
  3044. {
  3045. }
  3046. static void
  3047. compile_current_thread (scm_jit_state *j, uint32_t dst)
  3048. {
  3049. emit_ldxi (j, T0, THREAD, thread_offset_handle);
  3050. emit_sp_set_scm (j, dst, T0);
  3051. }
  3052. static void
  3053. compile_current_thread_slow (scm_jit_state *j, uint32_t dst)
  3054. {
  3055. }
  3056. static void
  3057. compile_ulogand (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3058. {
  3059. #if SIZEOF_UINTPTR_T >= 8
  3060. emit_sp_ref_u64 (j, T0, a);
  3061. emit_sp_ref_u64 (j, T1, b);
  3062. emit_andr (j, T0, T0, T1);
  3063. emit_sp_set_u64 (j, dst, T0);
  3064. #else
  3065. emit_sp_ref_u64 (j, T0, T1, a);
  3066. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3067. emit_andr (j, T0, T0, T2);
  3068. emit_andr (j, T1, T1, T3_OR_FP);
  3069. emit_sp_set_u64 (j, dst, T0, T1);
  3070. #endif
  3071. }
  3072. static void
  3073. compile_ulogand_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3074. {
  3075. }
  3076. static void
  3077. compile_ulogior (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3078. {
  3079. #if SIZEOF_UINTPTR_T >= 8
  3080. emit_sp_ref_u64 (j, T0, a);
  3081. emit_sp_ref_u64 (j, T1, b);
  3082. emit_orr (j, T0, T0, T1);
  3083. emit_sp_set_u64 (j, dst, T0);
  3084. #else
  3085. emit_sp_ref_u64 (j, T0, T1, a);
  3086. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3087. emit_orr (j, T0, T0, T2);
  3088. emit_orr (j, T1, T1, T3_OR_FP);
  3089. emit_sp_set_u64 (j, dst, T0, T1);
  3090. #endif
  3091. }
  3092. static void
  3093. compile_ulogior_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3094. {
  3095. }
  3096. static void
  3097. compile_ulogsub (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3098. {
  3099. #if SIZEOF_UINTPTR_T >= 8
  3100. emit_sp_ref_u64 (j, T0, a);
  3101. emit_sp_ref_u64 (j, T1, b);
  3102. emit_comr (j, T1, T1);
  3103. emit_andr (j, T0, T0, T1);
  3104. emit_sp_set_u64 (j, dst, T0);
  3105. #else
  3106. emit_sp_ref_u64 (j, T0, T1, a);
  3107. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3108. emit_comr (j, T2, T2);
  3109. emit_comr (j, T3_OR_FP, T3_OR_FP);
  3110. emit_andr (j, T0, T0, T2);
  3111. emit_andr (j, T1, T1, T3_OR_FP);
  3112. emit_sp_set_u64 (j, dst, T0, T1);
  3113. #endif
  3114. }
  3115. static void
  3116. compile_ulogsub_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3117. {
  3118. }
  3119. static void
  3120. compile_ursh (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3121. {
  3122. #if SIZEOF_UINTPTR_T >= 8
  3123. emit_sp_ref_u64 (j, T0, a);
  3124. emit_sp_ref_u64 (j, T1, b);
  3125. emit_andi (j, T1, T1, 63);
  3126. emit_rshr_u (j, T0, T0, T1);
  3127. emit_sp_set_u64 (j, dst, T0);
  3128. #else
  3129. /* FIXME: Not tested. */
  3130. jit_reloc_t zero, both, done;
  3131. emit_sp_ref_u64 (j, T0, T1, a);
  3132. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3133. emit_andi (j, T2, T2, 63);
  3134. zero = jit_beqi (j->jit, T2, 0);
  3135. both = jit_blti (j->jit, T2, 32);
  3136. /* 32 <= s < 64: hi = 0, lo = hi >> (s-32) */
  3137. emit_subi (j, T2, T2, 32);
  3138. emit_rshr_u (j, T0, T1, T2);
  3139. emit_movi (j, T1, 0);
  3140. done = jit_jmp (j->jit);
  3141. jit_patch_here (j->jit, both);
  3142. /* 0 < s < 32: hi = hi >> s, lo = lo >> s + hi << (32-s) */
  3143. emit_negr (j, T3_OR_FP, T2);
  3144. emit_addi (j, T3_OR_FP, T3_OR_FP, 32);
  3145. emit_lshr (j, T3_OR_FP, T1, T3_OR_FP);
  3146. emit_rshr_u (j, T1, T1, T2);
  3147. emit_rshr_u (j, T0, T0, T2);
  3148. emit_addr (j, T0, T0, T3_OR_FP);
  3149. jit_patch_here (j->jit, done);
  3150. jit_patch_here (j->jit, zero);
  3151. emit_sp_set_u64 (j, dst, T0, T1);
  3152. #endif
  3153. }
  3154. static void
  3155. compile_ursh_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3156. {
  3157. }
  3158. static void
  3159. compile_ulsh (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3160. {
  3161. #if SIZEOF_UINTPTR_T >= 8
  3162. emit_sp_ref_u64 (j, T0, a);
  3163. emit_sp_ref_u64 (j, T1, b);
  3164. emit_andi (j, T1, T1, 63);
  3165. emit_lshr (j, T0, T0, T1);
  3166. emit_sp_set_u64 (j, dst, T0);
  3167. #else
  3168. /* FIXME: Not tested. */
  3169. jit_reloc_t zero, both, done;
  3170. emit_sp_ref_u64 (j, T0, T1, a);
  3171. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3172. emit_andi (j, T2, T2, 63);
  3173. zero = jit_beqi (j->jit, T2, 0);
  3174. both = jit_blti (j->jit, T2, 32);
  3175. /* 32 <= s < 64: hi = lo << (s-32), lo = 0 */
  3176. emit_subi (j, T2, T2, 32);
  3177. emit_lshr (j, T1, T0, T2);
  3178. emit_movi (j, T0, 0);
  3179. done = jit_jmp (j->jit);
  3180. jit_patch_here (j->jit, both);
  3181. /* 0 < s < 32: hi = hi << s + lo >> (32-s), lo = lo << s */
  3182. emit_negr (j, T3_OR_FP, T2);
  3183. emit_addi (j, T3_OR_FP, T3_OR_FP, 32);
  3184. emit_rshr_u (j, T3_OR_FP, T0, T3_OR_FP);
  3185. emit_lshr (j, T1, T1, T2);
  3186. emit_lshr (j, T0, T0, T2);
  3187. emit_addr (j, T1, T1, T3_OR_FP);
  3188. jit_patch_here (j->jit, done);
  3189. jit_patch_here (j->jit, zero);
  3190. emit_sp_set_u64 (j, dst, T0, T1);
  3191. #endif
  3192. }
  3193. static void
  3194. compile_ulsh_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3195. {
  3196. }
  3197. static void
  3198. compile_ursh_immediate (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3199. {
  3200. b &= 63;
  3201. #if SIZEOF_UINTPTR_T >= 8
  3202. emit_sp_ref_u64 (j, T0, a);
  3203. emit_rshi_u (j, T0, T0, b);
  3204. emit_sp_set_u64 (j, dst, T0);
  3205. #else
  3206. /* FIXME: Not tested. */
  3207. emit_sp_ref_u64 (j, T0, T1, a);
  3208. if (b == 0)
  3209. {
  3210. /* Nothing to do. */
  3211. }
  3212. else if (b < 32)
  3213. {
  3214. /* 0 < s < 32: hi = hi >> s, lo = lo >> s + hi << (32-s) */
  3215. emit_lshi (j, T2, T1, 32 - b);
  3216. emit_rshi_u (j, T1, T1, b);
  3217. emit_rshi_u (j, T0, T0, b);
  3218. emit_addr (j, T0, T0, T2);
  3219. }
  3220. else if (b == 32)
  3221. {
  3222. /* hi = 0, lo = hi */
  3223. emit_movr (j, T0, T1);
  3224. emit_movi (j, T1, 0);
  3225. }
  3226. else /* b > 32 */
  3227. {
  3228. /* hi = 0, lo = hi >> (s-32) */
  3229. emit_rshi_u (j, T0, T1, b - 32);
  3230. emit_movi (j, T1, 0);
  3231. }
  3232. emit_sp_set_u64 (j, dst, T0, T1);
  3233. #endif
  3234. }
  3235. static void
  3236. compile_ursh_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3237. {
  3238. }
  3239. static void
  3240. compile_ulsh_immediate (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3241. {
  3242. b &= 63;
  3243. #if SIZEOF_UINTPTR_T >= 8
  3244. emit_sp_ref_u64 (j, T0, a);
  3245. emit_lshi (j, T0, T0, b);
  3246. emit_sp_set_u64 (j, dst, T0);
  3247. #else
  3248. /* FIXME: Not tested. */
  3249. emit_sp_ref_u64 (j, T0, T1, a);
  3250. if (b == 0)
  3251. {
  3252. /* Nothing to do. */
  3253. }
  3254. else if (b < 32)
  3255. {
  3256. /* hi = hi << s + lo >> (32-s), lo = lo << s */
  3257. emit_rshi_u (j, T2, T0, 32 - b);
  3258. emit_lshi (j, T1, T1, b);
  3259. emit_lshi (j, T0, T0, b);
  3260. emit_addr (j, T1, T1, T2);
  3261. }
  3262. else if (b == 32)
  3263. {
  3264. /* hi = lo, lo = 0 */
  3265. emit_movr (j, T1, T0);
  3266. emit_movi (j, T0, 0);
  3267. }
  3268. else /* b > 32 */
  3269. {
  3270. /* hi = lo << (s-32), lo = 0 */
  3271. emit_lshi (j, T1, T0, b - 32);
  3272. emit_movi (j, T0, 0);
  3273. }
  3274. emit_sp_set_u64 (j, dst, T0, T1);
  3275. #endif
  3276. }
  3277. static void
  3278. compile_ulsh_immediate_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3279. {
  3280. }
  3281. static void
  3282. compile_ulogxor (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3283. {
  3284. #if SIZEOF_UINTPTR_T >= 8
  3285. emit_sp_ref_u64 (j, T0, a);
  3286. emit_sp_ref_u64 (j, T1, b);
  3287. emit_xorr (j, T0, T0, T1);
  3288. emit_sp_set_u64 (j, dst, T0);
  3289. #else
  3290. emit_sp_ref_u64 (j, T0, T1, a);
  3291. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3292. emit_xorr (j, T0, T0, T2);
  3293. emit_xorr (j, T1, T1, T3_OR_FP);
  3294. emit_sp_set_u64 (j, dst, T0, T1);
  3295. #endif
  3296. }
  3297. static void
  3298. compile_ulogxor_slow (scm_jit_state *j, uint32_t dst, uint32_t a, uint32_t b)
  3299. {
  3300. }
  3301. static void
  3302. compile_handle_interrupts (scm_jit_state *j)
  3303. {
  3304. jit_addi (j->jit, T0, THREAD, thread_offset_pending_asyncs);
  3305. jit_ldr_atomic (j->jit, T0, T0);
  3306. add_slow_path_patch (j, jit_bnei (j->jit, T0, SCM_UNPACK (SCM_EOL)));
  3307. }
  3308. static void
  3309. compile_handle_interrupts_slow (scm_jit_state *j)
  3310. {
  3311. jit_ldxi_i (j->jit, T0, THREAD, thread_offset_block_asyncs);
  3312. add_inter_instruction_patch (j,
  3313. jit_bnei (j->jit, T0, 0),
  3314. j->next_ip);
  3315. emit_store_current_ip (j, T0);
  3316. jit_jmpi_with_link (j->jit, handle_interrupts_trampoline);
  3317. continue_after_slow_path (j, j->ip);
  3318. }
  3319. static void
  3320. compile_return_from_interrupt (scm_jit_state *j)
  3321. {
  3322. jit_gpr_t old_fp = T0, ra = T1;
  3323. jit_reloc_t interp;
  3324. emit_pop_fp (j, old_fp);
  3325. emit_load_mra (j, ra, old_fp);
  3326. interp = jit_beqi (j->jit, ra, 0);
  3327. jit_addi (j->jit, SP, old_fp, frame_overhead_slots * sizeof (union scm_vm_stack_element));
  3328. set_register_state (j, SP_IN_REGISTER);
  3329. emit_store_sp (j);
  3330. jit_jmpr (j->jit, ra);
  3331. jit_patch_here (j->jit, interp);
  3332. emit_load_vra (j, ra, old_fp);
  3333. emit_store_ip (j, ra);
  3334. jit_addi (j->jit, SP, old_fp, frame_overhead_slots * sizeof (union scm_vm_stack_element));
  3335. set_register_state (j, SP_IN_REGISTER);
  3336. emit_store_sp (j);
  3337. emit_exit (j);
  3338. clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
  3339. }
  3340. static void
  3341. compile_return_from_interrupt_slow (scm_jit_state *j)
  3342. {
  3343. }
  3344. static enum scm_opcode
  3345. fuse_conditional_branch (scm_jit_state *j, uint32_t **target)
  3346. {
  3347. uint8_t next = j->next_ip[0] & 0xff;
  3348. switch (next)
  3349. {
  3350. case scm_op_jl:
  3351. case scm_op_je:
  3352. case scm_op_jnl:
  3353. case scm_op_jne:
  3354. case scm_op_jge:
  3355. case scm_op_jnge:
  3356. *target = j->next_ip + (((int32_t) j->next_ip[0]) >> 8);
  3357. j->next_ip += op_lengths[next];
  3358. return next;
  3359. default:
  3360. ASSERT (0);
  3361. }
  3362. }
  3363. static void
  3364. compile_u64_numerically_equal (scm_jit_state *j, uint32_t a, uint32_t b)
  3365. {
  3366. uint32_t *target;
  3367. #if SIZEOF_UINTPTR_T >= 8
  3368. jit_reloc_t k;
  3369. emit_sp_ref_u64 (j, T0, a);
  3370. emit_sp_ref_u64 (j, T1, b);
  3371. switch (fuse_conditional_branch (j, &target))
  3372. {
  3373. case scm_op_je:
  3374. k = jit_beqr (j->jit, T0, T1);
  3375. break;
  3376. case scm_op_jne:
  3377. k = jit_bner (j->jit, T0, T1);
  3378. break;
  3379. default:
  3380. UNREACHABLE ();
  3381. }
  3382. add_inter_instruction_patch (j, k, target);
  3383. #else
  3384. jit_reloc_t k1, k2;
  3385. emit_sp_ref_u64 (j, T0, T1, a);
  3386. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3387. switch (fuse_conditional_branch (j, &target))
  3388. {
  3389. case scm_op_je:
  3390. k1 = jit_bner (j->jit, T0, T2);
  3391. k2 = jit_beqr (j->jit, T1, T3_OR_FP);
  3392. jit_patch_here (j->jit, k1);
  3393. add_inter_instruction_patch (j, k2, target);
  3394. break;
  3395. case scm_op_jne:
  3396. k1 = jit_bner (j->jit, T0, T2);
  3397. k2 = jit_bner (j->jit, T1, T3_OR_FP);
  3398. add_inter_instruction_patch (j, k1, target);
  3399. add_inter_instruction_patch (j, k2, target);
  3400. break;
  3401. default:
  3402. UNREACHABLE ();
  3403. }
  3404. #endif
  3405. }
  3406. static void
  3407. compile_u64_numerically_equal_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3408. {
  3409. }
  3410. static void
  3411. compile_u64_less (scm_jit_state *j, uint32_t a, uint32_t b)
  3412. {
  3413. uint32_t *target;
  3414. #if SIZEOF_UINTPTR_T >= 8
  3415. jit_reloc_t k;
  3416. emit_sp_ref_u64 (j, T0, a);
  3417. emit_sp_ref_u64 (j, T1, b);
  3418. switch (fuse_conditional_branch (j, &target))
  3419. {
  3420. case scm_op_jl:
  3421. k = jit_bltr_u (j->jit, T0, T1);
  3422. break;
  3423. case scm_op_jnl:
  3424. k = jit_bger_u (j->jit, T0, T1);
  3425. break;
  3426. default:
  3427. UNREACHABLE ();
  3428. }
  3429. add_inter_instruction_patch (j, k, target);
  3430. #else
  3431. jit_reloc_t k1, k2, k3;
  3432. emit_sp_ref_u64 (j, T0, T1, a);
  3433. emit_sp_ref_u64 (j, T2, T3_OR_FP, b);
  3434. k1 = jit_bltr_u (j->jit, T1, T3_OR_FP);
  3435. k2 = jit_bner (j->jit, T1, T3_OR_FP);
  3436. switch (fuse_conditional_branch (j, &target))
  3437. {
  3438. case scm_op_jl:
  3439. k3 = jit_bltr_u (j->jit, T0, T2);
  3440. jit_patch_here (j->jit, k2);
  3441. add_inter_instruction_patch (j, k1, target);
  3442. add_inter_instruction_patch (j, k3, target);
  3443. break;
  3444. case scm_op_jnl:
  3445. k3 = jit_bger_u (j->jit, T0, T2);
  3446. jit_patch_here (j->jit, k1);
  3447. add_inter_instruction_patch (j, k2, target);
  3448. add_inter_instruction_patch (j, k3, target);
  3449. break;
  3450. default:
  3451. UNREACHABLE ();
  3452. }
  3453. #endif
  3454. }
  3455. static void
  3456. compile_u64_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3457. {
  3458. }
  3459. static void
  3460. compile_s64_less (scm_jit_state *j, uint32_t a, uint32_t b)
  3461. {
  3462. uint32_t *target;
  3463. #if SIZEOF_UINTPTR_T >= 8
  3464. jit_reloc_t k;
  3465. emit_sp_ref_s64 (j, T0, a);
  3466. emit_sp_ref_s64 (j, T1, b);
  3467. switch (fuse_conditional_branch (j, &target))
  3468. {
  3469. case scm_op_jl:
  3470. k = jit_bltr (j->jit, T0, T1);
  3471. break;
  3472. case scm_op_jnl:
  3473. k = jit_bger (j->jit, T0, T1);
  3474. break;
  3475. default:
  3476. UNREACHABLE ();
  3477. }
  3478. add_inter_instruction_patch (j, k, target);
  3479. #else
  3480. jit_reloc_t k1, k2, k3;
  3481. emit_sp_ref_s64 (j, T0, T1, a);
  3482. emit_sp_ref_s64 (j, T2, T3_OR_FP, b);
  3483. k1 = jit_bltr (j->jit, T1, T3_OR_FP);
  3484. k2 = jit_bner (j->jit, T1, T3_OR_FP);
  3485. switch (fuse_conditional_branch (j, &target))
  3486. {
  3487. case scm_op_jl:
  3488. k3 = jit_bltr (j->jit, T0, T2);
  3489. jit_patch_here (j->jit, k2);
  3490. add_inter_instruction_patch (j, k1, target);
  3491. add_inter_instruction_patch (j, k3, target);
  3492. break;
  3493. case scm_op_jnl:
  3494. k3 = jit_bger (j->jit, T0, T2);
  3495. jit_patch_here (j->jit, k1);
  3496. add_inter_instruction_patch (j, k2, target);
  3497. add_inter_instruction_patch (j, k3, target);
  3498. break;
  3499. default:
  3500. UNREACHABLE ();
  3501. }
  3502. #endif
  3503. }
  3504. static void
  3505. compile_s64_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3506. {
  3507. }
  3508. static void
  3509. compile_f64_numerically_equal (scm_jit_state *j, uint32_t a, uint32_t b)
  3510. {
  3511. jit_reloc_t k;
  3512. uint32_t *target;
  3513. emit_sp_ref_f64 (j, JIT_F0, a);
  3514. emit_sp_ref_f64 (j, JIT_F1, b);
  3515. switch (fuse_conditional_branch (j, &target))
  3516. {
  3517. case scm_op_je:
  3518. k = jit_beqr_d (j->jit, JIT_F0, JIT_F1);
  3519. break;
  3520. case scm_op_jne:
  3521. k = jit_bner_d (j->jit, JIT_F0, JIT_F1);
  3522. break;
  3523. default:
  3524. UNREACHABLE ();
  3525. }
  3526. add_inter_instruction_patch (j, k, target);
  3527. }
  3528. static void
  3529. compile_f64_numerically_equal_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3530. {
  3531. }
  3532. static void
  3533. compile_f64_less (scm_jit_state *j, uint32_t a, uint32_t b)
  3534. {
  3535. jit_reloc_t k;
  3536. uint32_t *target;
  3537. emit_sp_ref_f64 (j, JIT_F0, a);
  3538. emit_sp_ref_f64 (j, JIT_F1, b);
  3539. switch (fuse_conditional_branch (j, &target))
  3540. {
  3541. case scm_op_jl:
  3542. k = jit_bltr_d (j->jit, JIT_F0, JIT_F1);
  3543. break;
  3544. case scm_op_jnl:
  3545. k = jit_bunger_d (j->jit, JIT_F0, JIT_F1);
  3546. break;
  3547. case scm_op_jge:
  3548. k = jit_bger_d (j->jit, JIT_F0, JIT_F1);
  3549. break;
  3550. case scm_op_jnge:
  3551. k = jit_bunltr_d (j->jit, JIT_F0, JIT_F1);
  3552. break;
  3553. default:
  3554. UNREACHABLE ();
  3555. }
  3556. add_inter_instruction_patch (j, k, target);
  3557. }
  3558. static void
  3559. compile_f64_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3560. {
  3561. }
  3562. static void
  3563. compile_numerically_equal (scm_jit_state *j, uint32_t a, uint32_t b)
  3564. {
  3565. jit_reloc_t k;
  3566. uint32_t *target;
  3567. emit_sp_ref_scm (j, T0, a);
  3568. emit_sp_ref_scm (j, T1, b);
  3569. emit_andr (j, T2, T0, T1);
  3570. add_slow_path_patch (j, jit_bmci (j->jit, T2, scm_tc2_int));
  3571. switch (fuse_conditional_branch (j, &target))
  3572. {
  3573. case scm_op_je:
  3574. k = jit_beqr (j->jit, T0, T1);
  3575. break;
  3576. case scm_op_jne:
  3577. k = jit_bner (j->jit, T0, T1);
  3578. break;
  3579. default:
  3580. UNREACHABLE ();
  3581. }
  3582. add_inter_instruction_patch (j, k, target);
  3583. }
  3584. static void
  3585. compile_numerically_equal_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3586. {
  3587. jit_reloc_t k;
  3588. uint32_t *target;
  3589. emit_store_current_ip (j, T2);
  3590. emit_call_2 (j, scm_vm_intrinsics.numerically_equal_p,
  3591. jit_operand_gpr (JIT_OPERAND_ABI_WORD, T0),
  3592. jit_operand_gpr (JIT_OPERAND_ABI_WORD, T1));
  3593. emit_retval (j, T0);
  3594. emit_reload_sp (j);
  3595. switch (fuse_conditional_branch (j, &target))
  3596. {
  3597. case scm_op_je:
  3598. k = jit_bnei (j->jit, T0, 0);
  3599. break;
  3600. case scm_op_jne:
  3601. k = jit_beqi (j->jit, T0, 0);
  3602. break;
  3603. default:
  3604. UNREACHABLE ();
  3605. }
  3606. add_inter_instruction_patch (j, k, target);
  3607. continue_after_slow_path (j, j->next_ip);
  3608. }
  3609. static void
  3610. compile_less (scm_jit_state *j, uint32_t a, uint32_t b)
  3611. {
  3612. jit_reloc_t k;
  3613. uint32_t *target;
  3614. emit_sp_ref_scm (j, T0, a);
  3615. emit_sp_ref_scm (j, T1, b);
  3616. emit_andr (j, T2, T0, T1);
  3617. add_slow_path_patch (j, jit_bmci (j->jit, T2, scm_tc2_int));
  3618. switch (fuse_conditional_branch (j, &target))
  3619. {
  3620. case scm_op_jl:
  3621. case scm_op_jnge:
  3622. k = jit_bltr (j->jit, T0, T1);
  3623. break;
  3624. case scm_op_jnl:
  3625. case scm_op_jge:
  3626. k = jit_bger (j->jit, T0, T1);
  3627. break;
  3628. default:
  3629. UNREACHABLE ();
  3630. }
  3631. add_inter_instruction_patch (j, k, target);
  3632. }
  3633. static void
  3634. compile_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3635. {
  3636. jit_reloc_t k;
  3637. uint32_t *target;
  3638. emit_store_current_ip (j, T2);
  3639. emit_call_2 (j, scm_vm_intrinsics.less_p,
  3640. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, T0),
  3641. jit_operand_gpr (JIT_OPERAND_ABI_POINTER, T1));
  3642. emit_retval (j, T0);
  3643. emit_reload_sp (j);
  3644. switch (fuse_conditional_branch (j, &target))
  3645. {
  3646. case scm_op_jl:
  3647. k = jit_beqi (j->jit, T0, SCM_F_COMPARE_LESS_THAN);
  3648. break;
  3649. case scm_op_jnl:
  3650. k = jit_bnei (j->jit, T0, SCM_F_COMPARE_LESS_THAN);
  3651. break;
  3652. case scm_op_jge:
  3653. k = jit_beqi (j->jit, T0, SCM_F_COMPARE_NONE);
  3654. break;
  3655. case scm_op_jnge:
  3656. k = jit_bnei (j->jit, T0, SCM_F_COMPARE_NONE);
  3657. break;
  3658. default:
  3659. UNREACHABLE ();
  3660. }
  3661. add_inter_instruction_patch (j, k, target);
  3662. continue_after_slow_path (j, j->next_ip);
  3663. }
  3664. static void
  3665. compile_check_arguments (scm_jit_state *j, uint32_t expected)
  3666. {
  3667. jit_reloc_t k;
  3668. uint32_t *target;
  3669. jit_gpr_t t = T0;
  3670. emit_reload_fp (j);
  3671. switch (fuse_conditional_branch (j, &target))
  3672. {
  3673. case scm_op_jne:
  3674. k = emit_branch_if_frame_locals_count_not_eq (j, t, expected);
  3675. break;
  3676. case scm_op_jl:
  3677. k = emit_branch_if_frame_locals_count_less_than (j, t, expected);
  3678. break;
  3679. case scm_op_jge:
  3680. /* The arguments<=? instruction sets NONE to indicate
  3681. greater-than, whereas for <, NONE usually indicates
  3682. greater-than-or-equal, hence the name jge. So we need to fuse
  3683. to greater-than, not greater-than-or-equal. Perhaps we just
  3684. need to rename jge to br-if-none. */
  3685. k = emit_branch_if_frame_locals_count_greater_than (j, t, expected);
  3686. break;
  3687. default:
  3688. UNREACHABLE ();
  3689. }
  3690. add_inter_instruction_patch (j, k, target);
  3691. }
  3692. static void
  3693. compile_check_arguments_slow (scm_jit_state *j, uint32_t expected)
  3694. {
  3695. }
  3696. static void
  3697. compile_check_positional_arguments (scm_jit_state *j, uint32_t nreq, uint32_t expected)
  3698. {
  3699. uint32_t *target;
  3700. jit_reloc_t lt, gt;
  3701. jit_gpr_t walk = T0, min = T1, obj = T2;
  3702. ASSERT_HAS_REGISTER_STATE (FP_IN_REGISTER | SP_IN_REGISTER);
  3703. switch (fuse_conditional_branch (j, &target))
  3704. {
  3705. case scm_op_jge:
  3706. /* Like arguments<=?, this instruction sets NONE to indicate
  3707. greater-than, whereas for <, NONE usually indicates
  3708. greater-than-or-equal, hence the name jge. So we need to fuse
  3709. to greater-than, not greater-than-or-equal. Perhaps we just
  3710. need to rename jge to br-if-none. */
  3711. /* Break to target if npos > expected. */
  3712. break;
  3713. default:
  3714. UNREACHABLE ();
  3715. }
  3716. emit_subtract_stack_slots (j, min, FP, expected);
  3717. emit_subtract_stack_slots (j, walk, FP, nreq);
  3718. void *head = jit_address (j->jit);
  3719. /* npos > expected if walk < min. */
  3720. gt = jit_bltr (j->jit, walk, min);
  3721. emit_subtract_stack_slots (j, walk, walk, 1);
  3722. lt = jit_bltr (j->jit, walk, SP);
  3723. emit_ldr (j, obj, walk);
  3724. jit_patch_there
  3725. (j->jit,
  3726. emit_branch_if_immediate (j, obj),
  3727. head);
  3728. jit_patch_there
  3729. (j->jit,
  3730. emit_branch_if_heap_object_not_tc7 (j, obj, obj, scm_tc7_keyword),
  3731. head);
  3732. jit_patch_here (j->jit, lt);
  3733. add_inter_instruction_patch (j, gt, target);
  3734. }
  3735. static void
  3736. compile_check_positional_arguments_slow (scm_jit_state *j, uint32_t nreq, uint32_t expected)
  3737. {
  3738. }
  3739. static void
  3740. compile_immediate_tag_equals (scm_jit_state *j, uint32_t a, uint32_t mask,
  3741. uint32_t expected)
  3742. {
  3743. jit_reloc_t k;
  3744. uint32_t *target;
  3745. emit_sp_ref_scm (j, T0, a);
  3746. emit_andi (j, T0, T0, mask);
  3747. switch (fuse_conditional_branch (j, &target))
  3748. {
  3749. case scm_op_je:
  3750. k = jit_beqi (j->jit, T0, expected);
  3751. break;
  3752. case scm_op_jne:
  3753. k = jit_bnei (j->jit, T0, expected);
  3754. break;
  3755. default:
  3756. UNREACHABLE ();
  3757. }
  3758. add_inter_instruction_patch (j, k, target);
  3759. }
  3760. static void
  3761. compile_immediate_tag_equals_slow (scm_jit_state *j, uint32_t a, uint32_t mask,
  3762. uint32_t expected)
  3763. {
  3764. }
  3765. static void
  3766. compile_heap_tag_equals (scm_jit_state *j, uint32_t obj,
  3767. uint32_t mask, uint32_t expected)
  3768. {
  3769. jit_reloc_t k;
  3770. uint32_t *target;
  3771. emit_sp_ref_scm (j, T0, obj);
  3772. switch (fuse_conditional_branch (j, &target))
  3773. {
  3774. case scm_op_je:
  3775. k = emit_branch_if_heap_object_has_tc (j, T0, T0, mask, expected);
  3776. break;
  3777. case scm_op_jne:
  3778. k = emit_branch_if_heap_object_not_tc (j, T0, T0, mask, expected);
  3779. break;
  3780. default:
  3781. UNREACHABLE ();
  3782. }
  3783. add_inter_instruction_patch (j, k, target);
  3784. }
  3785. static void
  3786. compile_heap_tag_equals_slow (scm_jit_state *j, uint32_t obj,
  3787. uint32_t mask, uint32_t expected)
  3788. {
  3789. }
  3790. static void
  3791. compile_eq (scm_jit_state *j, uint32_t a, uint32_t b)
  3792. {
  3793. jit_reloc_t k;
  3794. uint32_t *target;
  3795. emit_sp_ref_scm (j, T0, a);
  3796. emit_sp_ref_scm (j, T1, b);
  3797. switch (fuse_conditional_branch (j, &target))
  3798. {
  3799. case scm_op_je:
  3800. k = jit_beqr (j->jit, T0, T1);
  3801. break;
  3802. case scm_op_jne:
  3803. k = jit_bner (j->jit, T0, T1);
  3804. break;
  3805. default:
  3806. UNREACHABLE ();
  3807. }
  3808. add_inter_instruction_patch (j, k, target);
  3809. }
  3810. static void
  3811. compile_eq_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3812. {
  3813. }
  3814. static void
  3815. compile_eq_immediate (scm_jit_state *j, uint32_t a, SCM b)
  3816. {
  3817. jit_reloc_t k;
  3818. uint32_t *target;
  3819. emit_sp_ref_scm (j, T0, a);
  3820. switch (fuse_conditional_branch (j, &target))
  3821. {
  3822. case scm_op_je:
  3823. k = jit_beqi (j->jit, T0, SCM_UNPACK (b));
  3824. break;
  3825. case scm_op_jne:
  3826. k = jit_bnei (j->jit, T0, SCM_UNPACK (b));
  3827. break;
  3828. default:
  3829. UNREACHABLE ();
  3830. }
  3831. add_inter_instruction_patch (j, k, target);
  3832. }
  3833. static void
  3834. compile_eq_immediate_slow (scm_jit_state *j, uint32_t a, SCM b)
  3835. {
  3836. }
  3837. static void
  3838. compile_j (scm_jit_state *j, const uint32_t *vcode)
  3839. {
  3840. jit_reloc_t jmp;
  3841. jmp = jit_jmp (j->jit);
  3842. add_inter_instruction_patch (j, jmp, vcode);
  3843. }
  3844. static void
  3845. compile_j_slow (scm_jit_state *j, const uint32_t *vcode)
  3846. {
  3847. }
  3848. static void
  3849. compile_jl (scm_jit_state *j, const uint32_t *vcode)
  3850. {
  3851. UNREACHABLE (); /* All tests should fuse their following branches. */
  3852. }
  3853. static void
  3854. compile_jl_slow (scm_jit_state *j, const uint32_t *vcode)
  3855. {
  3856. }
  3857. static void
  3858. compile_je (scm_jit_state *j, const uint32_t *vcode)
  3859. {
  3860. UNREACHABLE (); /* All tests should fuse their following branches. */
  3861. }
  3862. static void
  3863. compile_je_slow (scm_jit_state *j, const uint32_t *vcode)
  3864. {
  3865. }
  3866. static void
  3867. compile_jnl (scm_jit_state *j, const uint32_t *vcode)
  3868. {
  3869. UNREACHABLE (); /* All tests should fuse their following branches. */
  3870. }
  3871. static void
  3872. compile_jnl_slow (scm_jit_state *j, const uint32_t *vcode)
  3873. {
  3874. }
  3875. static void
  3876. compile_jne (scm_jit_state *j, const uint32_t *vcode)
  3877. {
  3878. UNREACHABLE (); /* All tests should fuse their following branches. */
  3879. }
  3880. static void
  3881. compile_jne_slow (scm_jit_state *j, const uint32_t *vcode)
  3882. {
  3883. }
  3884. static void
  3885. compile_jge (scm_jit_state *j, const uint32_t *vcode)
  3886. {
  3887. UNREACHABLE (); /* All tests should fuse their following branches. */
  3888. }
  3889. static void
  3890. compile_jge_slow (scm_jit_state *j, const uint32_t *vcode)
  3891. {
  3892. }
  3893. static void
  3894. compile_jnge (scm_jit_state *j, const uint32_t *vcode)
  3895. {
  3896. UNREACHABLE (); /* All tests should fuse their following branches. */
  3897. }
  3898. static void
  3899. compile_jnge_slow (scm_jit_state *j, const uint32_t *vcode)
  3900. {
  3901. }
  3902. static void
  3903. compile_jtable (scm_jit_state *j, uint32_t idx, uint32_t len,
  3904. const uint32_t *offsets)
  3905. {
  3906. ASSERT (len > 0);
  3907. int32_t default_offset = offsets[len - 1];
  3908. default_offset >>= 8; /* Sign-extending shift. */
  3909. uint32_t *default_target = j->ip + default_offset;
  3910. #if SIZEOF_UINTPTR_T >= 8
  3911. emit_sp_ref_u64 (j, T0, idx);
  3912. #else
  3913. emit_sp_ref_u64 (j, T0, T1, idx);
  3914. jit_reloc_t high_word_nonzero = jit_bnei (j->jit, T1, 0);
  3915. add_inter_instruction_patch (j, high_word_nonzero, default_target);
  3916. #endif
  3917. jit_reloc_t out_of_range = jit_bgei_u (j->jit, T0, len - 1);
  3918. add_inter_instruction_patch (j, out_of_range, default_target);
  3919. /* Now that we know that the u64 at IDX is in the table, load the
  3920. table address, look up the target, and branch. */
  3921. emit_lshi (j, T0, T0, log2_sizeof_uintptr_t);
  3922. jit_reloc_t table = emit_mov_addr (j, T1);
  3923. jit_ldxr (j->jit, T0, T1, T0);
  3924. jit_jmpr (j->jit, T0);
  3925. /* Here's the table itself. */
  3926. jit_begin_data (j->jit, sizeof(intptr_t) * len);
  3927. jit_align (j->jit, sizeof(intptr_t));
  3928. jit_patch_here (j->jit, table);
  3929. for (size_t i = 0; i + 1 < len; i++) {
  3930. int32_t offset = offsets[i];
  3931. offset >>= 8; /* Sign-extending shift. */
  3932. uint32_t *target = j->ip + offset;
  3933. jit_reloc_t addr = jit_emit_addr (j->jit);
  3934. add_inter_instruction_patch (j, addr, target);
  3935. }
  3936. jit_end_data (j->jit);
  3937. }
  3938. static void
  3939. compile_jtable_slow (scm_jit_state *j, uint32_t idx, uint32_t len,
  3940. const uint32_t *offsets)
  3941. {
  3942. }
  3943. static void
  3944. compile_heap_numbers_equal (scm_jit_state *j, uint32_t a, uint32_t b)
  3945. {
  3946. jit_reloc_t k;
  3947. uint32_t *target;
  3948. emit_store_current_ip (j, T0);
  3949. emit_call_2 (j, scm_vm_intrinsics.heap_numbers_equal_p, sp_scm_operand (j, a),
  3950. sp_scm_operand (j, b));
  3951. emit_retval (j, T0);
  3952. emit_reload_sp (j);
  3953. switch (fuse_conditional_branch (j, &target))
  3954. {
  3955. case scm_op_je:
  3956. k = jit_bnei (j->jit, T0, 0);
  3957. break;
  3958. case scm_op_jne:
  3959. k = jit_beqi (j->jit, T0, 0);
  3960. break;
  3961. default:
  3962. UNREACHABLE ();
  3963. }
  3964. add_inter_instruction_patch (j, k, target);
  3965. }
  3966. static void
  3967. compile_heap_numbers_equal_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  3968. {
  3969. }
  3970. static void
  3971. compile_untag_fixnum (scm_jit_state *j, uint32_t dst, uint32_t a)
  3972. {
  3973. emit_sp_ref_scm (j, T0, a);
  3974. emit_rshi (j, T0, T0, 2);
  3975. #if SIZEOF_UINTPTR_T >= 8
  3976. emit_sp_set_s64 (j, dst, T0);
  3977. #else
  3978. /* FIXME: Untested! */
  3979. emit_rshi (j, T1, T0, 31);
  3980. emit_sp_set_s64 (j, dst, T0, T1);
  3981. #endif
  3982. }
  3983. static void
  3984. compile_untag_fixnum_slow (scm_jit_state *j, uint32_t dst, uint32_t a)
  3985. {
  3986. }
  3987. static void
  3988. compile_tag_fixnum (scm_jit_state *j, uint32_t dst, uint32_t a)
  3989. {
  3990. #if SIZEOF_UINTPTR_T >= 8
  3991. emit_sp_ref_s64 (j, T0, a);
  3992. #else
  3993. emit_sp_ref_s32 (j, T0, a);
  3994. #endif
  3995. emit_lshi (j, T0, T0, 2);
  3996. emit_addi (j, T0, T0, scm_tc2_int);
  3997. emit_sp_set_scm (j, dst, T0);
  3998. }
  3999. static void
  4000. compile_tag_fixnum_slow (scm_jit_state *j, uint32_t dst, uint32_t a)
  4001. {
  4002. }
  4003. static void
  4004. compile_srsh (scm_jit_state *j, uint8_t dst, uint8_t a, uint8_t b)
  4005. {
  4006. #if SIZEOF_UINTPTR_T >= 8
  4007. emit_sp_ref_s64 (j, T0, a);
  4008. emit_sp_ref_s64 (j, T1, b);
  4009. emit_andi (j, T1, T1, 63);
  4010. emit_rshr (j, T0, T0, T1);
  4011. emit_sp_set_s64 (j, dst, T0);
  4012. #else
  4013. /* FIXME: Not tested. */
  4014. jit_reloc_t zero, both, done;
  4015. emit_sp_ref_s64 (j, T0, T1, a);
  4016. emit_sp_ref_s64 (j, T2, T3_OR_FP, b);
  4017. emit_andi (j, T2, T2, 63);
  4018. zero = jit_beqi (j->jit, T2, 0);
  4019. both = jit_blti (j->jit, T2, 32);
  4020. /* 32 <= s < 64: hi = hi >> 31, lo = hi >> (s-32) */
  4021. emit_subi (j, T2, T2, 32);
  4022. emit_rshr (j, T0, T1, T2);
  4023. emit_rshi (j, T1, T1, 31);
  4024. done = jit_jmp (j->jit);
  4025. jit_patch_here (j->jit, both);
  4026. /* 0 < s < 32: hi = hi >> s, lo = lo >> s + hi << (32-s) */
  4027. emit_negr (j, T3_OR_FP, T2);
  4028. emit_addi (j, T3_OR_FP, T3_OR_FP, 32);
  4029. emit_lshr (j, T3_OR_FP, T1, T3_OR_FP);
  4030. emit_rshr (j, T1, T1, T2);
  4031. emit_rshr_u (j, T0, T0, T2);
  4032. emit_addr (j, T0, T0, T3_OR_FP);
  4033. jit_patch_here (j->jit, done);
  4034. jit_patch_here (j->jit, zero);
  4035. emit_sp_set_s64 (j, dst, T0, T1);
  4036. #endif
  4037. }
  4038. static void
  4039. compile_srsh_slow (scm_jit_state *j, uint8_t dst, uint8_t a, uint8_t b)
  4040. {
  4041. }
  4042. static void
  4043. compile_srsh_immediate (scm_jit_state *j, uint8_t dst, uint8_t a, uint8_t b)
  4044. {
  4045. b &= 63;
  4046. #if SIZEOF_UINTPTR_T >= 8
  4047. emit_sp_ref_s64 (j, T0, a);
  4048. emit_rshi (j, T0, T0, b);
  4049. emit_sp_set_s64 (j, dst, T0);
  4050. #else
  4051. /* FIXME: Not tested. */
  4052. emit_sp_ref_s64 (j, T0, T1, a);
  4053. if (b == 0)
  4054. {
  4055. /* Nothing to do. */
  4056. }
  4057. else if (b < 32)
  4058. {
  4059. /* 0 < s < 32: hi = hi >> s, lo = lo >> s + hi << (32-s) */
  4060. emit_lshi (j, T2, T1, 32 - b);
  4061. emit_rshi (j, T1, T1, b);
  4062. emit_rshi_u (j, T0, T0, b);
  4063. emit_addr (j, T0, T0, T2);
  4064. }
  4065. else if (b == 32)
  4066. {
  4067. /* hi = sign-ext, lo = hi */
  4068. emit_movr (j, T0, T1);
  4069. emit_rshi (j, T1, T1, 31);
  4070. }
  4071. else /* b > 32 */
  4072. {
  4073. /* hi = sign-ext, lo = hi >> (s-32) */
  4074. emit_rshi (j, T0, T1, b - 32);
  4075. emit_rshi (j, T1, T1, 31);
  4076. }
  4077. emit_sp_set_s64 (j, dst, T0, T1);
  4078. #endif
  4079. }
  4080. static void
  4081. compile_srsh_immediate_slow (scm_jit_state *j, uint8_t dst, uint8_t a, uint8_t b)
  4082. {
  4083. }
  4084. static void
  4085. compile_s64_imm_numerically_equal (scm_jit_state *j, uint32_t a, int16_t b)
  4086. {
  4087. #if SIZEOF_UINTPTR_T >= 8
  4088. jit_reloc_t k;
  4089. uint32_t *target;
  4090. emit_sp_ref_s64 (j, T0, a);
  4091. switch (fuse_conditional_branch (j, &target))
  4092. {
  4093. case scm_op_je:
  4094. k = jit_beqi (j->jit, T0, b);
  4095. break;
  4096. case scm_op_jne:
  4097. k = jit_bnei (j->jit, T0, b);
  4098. break;
  4099. default:
  4100. UNREACHABLE ();
  4101. }
  4102. add_inter_instruction_patch (j, k, target);
  4103. #else
  4104. jit_reloc_t k1, k2;
  4105. uint32_t *target;
  4106. emit_sp_ref_s64 (j, T0, T1, a);
  4107. switch (fuse_conditional_branch (j, &target))
  4108. {
  4109. case scm_op_je:
  4110. k1 = jit_bnei (j->jit, T0, b);
  4111. k2 = jit_beqi (j->jit, T1, b < 0 ? -1 : 0);
  4112. jit_patch_here (j->jit, k1);
  4113. add_inter_instruction_patch (j, k2, target);
  4114. break;
  4115. case scm_op_jne:
  4116. k1 = jit_bnei (j->jit, T0, b);
  4117. k2 = jit_bnei (j->jit, T1, b < 0 ? -1 : 0);
  4118. add_inter_instruction_patch (j, k1, target);
  4119. add_inter_instruction_patch (j, k2, target);
  4120. break;
  4121. default:
  4122. UNREACHABLE ();
  4123. }
  4124. #endif
  4125. }
  4126. static void
  4127. compile_s64_imm_numerically_equal_slow (scm_jit_state *j, uint32_t a, int16_t b)
  4128. {
  4129. }
  4130. static void
  4131. compile_u64_imm_less (scm_jit_state *j, uint32_t a, uint32_t b)
  4132. {
  4133. #if SIZEOF_UINTPTR_T >= 8
  4134. jit_reloc_t k;
  4135. uint32_t *target;
  4136. emit_sp_ref_u64 (j, T0, a);
  4137. switch (fuse_conditional_branch (j, &target))
  4138. {
  4139. case scm_op_jl:
  4140. k = jit_blti_u (j->jit, T0, b);
  4141. break;
  4142. case scm_op_jnl:
  4143. k = jit_bgei_u (j->jit, T0, b);
  4144. break;
  4145. default:
  4146. UNREACHABLE ();
  4147. }
  4148. add_inter_instruction_patch (j, k, target);
  4149. #else
  4150. jit_reloc_t k1, k2;
  4151. uint32_t *target;
  4152. emit_sp_ref_u64 (j, T0, T1, a);
  4153. switch (fuse_conditional_branch (j, &target))
  4154. {
  4155. case scm_op_jl:
  4156. k1 = jit_bnei (j->jit, T1, 0);
  4157. k2 = jit_blti_u (j->jit, T0, b);
  4158. jit_patch_here (j->jit, k1);
  4159. add_inter_instruction_patch (j, k2, target);
  4160. break;
  4161. case scm_op_jnl:
  4162. k1 = jit_bnei (j->jit, T1, 0);
  4163. k2 = jit_bgei_u (j->jit, T0, b);
  4164. add_inter_instruction_patch (j, k1, target);
  4165. add_inter_instruction_patch (j, k2, target);
  4166. break;
  4167. default:
  4168. UNREACHABLE ();
  4169. }
  4170. #endif
  4171. }
  4172. static void
  4173. compile_u64_imm_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  4174. {
  4175. }
  4176. static void
  4177. compile_imm_u64_less (scm_jit_state *j, uint32_t a, uint32_t b)
  4178. {
  4179. #if SIZEOF_UINTPTR_T >= 8
  4180. jit_reloc_t k;
  4181. uint32_t *target;
  4182. emit_sp_ref_u64 (j, T0, a);
  4183. switch (fuse_conditional_branch (j, &target))
  4184. {
  4185. case scm_op_jl:
  4186. k = jit_bgti_u (j->jit, T0, b);
  4187. break;
  4188. case scm_op_jnl:
  4189. k = jit_blei_u (j->jit, T0, b);
  4190. break;
  4191. default:
  4192. UNREACHABLE ();
  4193. }
  4194. add_inter_instruction_patch (j, k, target);
  4195. #else
  4196. jit_reloc_t k1, k2;
  4197. uint32_t *target;
  4198. emit_sp_ref_u64 (j, T0, T1, a);
  4199. switch (fuse_conditional_branch (j, &target))
  4200. {
  4201. case scm_op_jl:
  4202. k1 = jit_bnei (j->jit, T1, 0);
  4203. k2 = jit_bgti_u (j->jit, T0, b);
  4204. add_inter_instruction_patch (j, k1, target);
  4205. add_inter_instruction_patch (j, k2, target);
  4206. break;
  4207. case scm_op_jnl:
  4208. k1 = jit_bnei (j->jit, T1, 0);
  4209. k2 = jit_blei_u (j->jit, T0, b);
  4210. jit_patch_here (j->jit, k1);
  4211. add_inter_instruction_patch (j, k2, target);
  4212. break;
  4213. default:
  4214. UNREACHABLE ();
  4215. }
  4216. #endif
  4217. }
  4218. static void
  4219. compile_imm_u64_less_slow (scm_jit_state *j, uint32_t a, uint32_t b)
  4220. {
  4221. }
  4222. static void
  4223. compile_s64_imm_less (scm_jit_state *j, uint32_t a, int16_t b)
  4224. {
  4225. #if SIZEOF_UINTPTR_T >= 8
  4226. jit_reloc_t k;
  4227. uint32_t *target;
  4228. emit_sp_ref_s64 (j, T0, a);
  4229. switch (fuse_conditional_branch (j, &target))
  4230. {
  4231. case scm_op_jl:
  4232. k = jit_blti (j->jit, T0, b);
  4233. break;
  4234. case scm_op_jnl:
  4235. k = jit_bgei (j->jit, T0, b);
  4236. break;
  4237. default:
  4238. UNREACHABLE ();
  4239. }
  4240. add_inter_instruction_patch (j, k, target);
  4241. #else
  4242. jit_reloc_t k1, k2, k3;
  4243. int32_t sign = b < 0 ? -1 : 0;
  4244. uint32_t *target;
  4245. emit_sp_ref_s64 (j, T0, T1, a);
  4246. switch (fuse_conditional_branch (j, &target))
  4247. {
  4248. case scm_op_jl:
  4249. k1 = jit_blti (j->jit, T1, sign);
  4250. k2 = jit_bnei (j->jit, T1, sign);
  4251. k3 = jit_blti (j->jit, T0, b);
  4252. add_inter_instruction_patch (j, k1, target);
  4253. jit_patch_here (j->jit, k2);
  4254. add_inter_instruction_patch (j, k3, target);
  4255. break;
  4256. case scm_op_jnl:
  4257. k1 = jit_blti (j->jit, T1, sign);
  4258. k2 = jit_bnei (j->jit, T1, sign);
  4259. k3 = jit_bgei (j->jit, T0, b);
  4260. jit_patch_here (j->jit, k1);
  4261. add_inter_instruction_patch (j, k2, target);
  4262. add_inter_instruction_patch (j, k3, target);
  4263. break;
  4264. default:
  4265. UNREACHABLE ();
  4266. }
  4267. #endif
  4268. }
  4269. static void
  4270. compile_s64_imm_less_slow (scm_jit_state *j, uint32_t a, int16_t b)
  4271. {
  4272. }
  4273. static void
  4274. compile_imm_s64_less (scm_jit_state *j, uint32_t a, int16_t b)
  4275. {
  4276. #if SIZEOF_UINTPTR_T >= 8
  4277. jit_reloc_t k;
  4278. uint32_t *target;
  4279. emit_sp_ref_s64 (j, T0, a);
  4280. switch (fuse_conditional_branch (j, &target))
  4281. {
  4282. case scm_op_jl:
  4283. k = jit_bgti (j->jit, T0, b);
  4284. break;
  4285. case scm_op_jnl:
  4286. k = jit_blei (j->jit, T0, b);
  4287. break;
  4288. default:
  4289. UNREACHABLE ();
  4290. }
  4291. add_inter_instruction_patch (j, k, target);
  4292. #else
  4293. jit_reloc_t k1, k2, k3;
  4294. int32_t sign = b < 0 ? -1 : 0;
  4295. uint32_t *target;
  4296. emit_sp_ref_s64 (j, T0, T1, a);
  4297. switch (fuse_conditional_branch (j, &target))
  4298. {
  4299. case scm_op_jl:
  4300. k1 = jit_blti (j->jit, T1, sign);
  4301. k2 = jit_bnei (j->jit, T1, sign);
  4302. k3 = jit_bgti (j->jit, T0, b);
  4303. jit_patch_here (j->jit, k1);
  4304. add_inter_instruction_patch (j, k2, target);
  4305. add_inter_instruction_patch (j, k3, target);
  4306. break;
  4307. case scm_op_jnl:
  4308. k1 = jit_blti (j->jit, T1, sign);
  4309. k2 = jit_bnei (j->jit, T1, sign);
  4310. k3 = jit_blei (j->jit, T0, b);
  4311. add_inter_instruction_patch (j, k1, target);
  4312. jit_patch_here (j->jit, k2);
  4313. add_inter_instruction_patch (j, k3, target);
  4314. break;
  4315. default:
  4316. UNREACHABLE ();
  4317. }
  4318. #endif
  4319. }
  4320. static void
  4321. compile_imm_s64_less_slow (scm_jit_state *j, uint32_t a, int16_t b)
  4322. {
  4323. }
  4324. static void
  4325. compile_u8_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4326. {
  4327. emit_sp_ref_ptr (j, T0, ptr);
  4328. emit_sp_ref_sz (j, T1, idx);
  4329. jit_ldxr_uc (j->jit, T0, T0, T1);
  4330. record_gpr_clobber (j, T0);
  4331. #if SIZEOF_UINTPTR_T >= 8
  4332. emit_sp_set_u64 (j, dst, T0);
  4333. #else
  4334. emit_movi (j, T1, 0);
  4335. emit_sp_set_u64 (j, dst, T0, T1);
  4336. #endif
  4337. }
  4338. static void
  4339. compile_u8_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4340. {
  4341. }
  4342. static void
  4343. compile_u16_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4344. {
  4345. emit_sp_ref_ptr (j, T0, ptr);
  4346. emit_sp_ref_sz (j, T1, idx);
  4347. jit_ldxr_us (j->jit, T0, T0, T1);
  4348. record_gpr_clobber (j, T0);
  4349. #if SIZEOF_UINTPTR_T >= 8
  4350. emit_sp_set_u64 (j, dst, T0);
  4351. #else
  4352. emit_movi (j, T1, 0);
  4353. emit_sp_set_u64 (j, dst, T0, T1);
  4354. #endif
  4355. }
  4356. static void
  4357. compile_u16_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4358. {
  4359. }
  4360. static void
  4361. compile_u32_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4362. {
  4363. emit_sp_ref_ptr (j, T0, ptr);
  4364. emit_sp_ref_sz (j, T1, idx);
  4365. #if SIZEOF_UINTPTR_T >= 8
  4366. jit_ldxr_ui (j->jit, T0, T0, T1);
  4367. record_gpr_clobber (j, T0);
  4368. emit_sp_set_u64 (j, dst, T0);
  4369. #else
  4370. emit_ldxr (j, T0, T0, T1);
  4371. emit_movi (j, T1, 0);
  4372. emit_sp_set_u64 (j, dst, T0, T1);
  4373. #endif
  4374. }
  4375. static void
  4376. compile_u32_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4377. {
  4378. }
  4379. static void
  4380. compile_u64_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4381. {
  4382. emit_sp_ref_ptr (j, T0, ptr);
  4383. emit_sp_ref_sz (j, T1, idx);
  4384. #if SIZEOF_UINTPTR_T >= 8
  4385. emit_ldxr (j, T0, T0, T1);
  4386. emit_sp_set_u64 (j, dst, T0);
  4387. #else
  4388. emit_addr (j, T0, T0, T1);
  4389. if (JIT_BIGENDIAN)
  4390. {
  4391. emit_ldr (j, T1, T0);
  4392. emit_ldxi (j, T0, T0, 4);
  4393. }
  4394. else
  4395. {
  4396. emit_ldxi (j, T1, T0, 4);
  4397. emit_ldr (j, T0, T0);
  4398. }
  4399. emit_sp_set_u64 (j, dst, T0, T1);
  4400. #endif
  4401. }
  4402. static void
  4403. compile_u64_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4404. {
  4405. }
  4406. static void
  4407. compile_u8_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4408. {
  4409. emit_sp_ref_ptr (j, T0, ptr);
  4410. emit_sp_ref_sz (j, T1, idx);
  4411. #if SIZEOF_UINTPTR_T >= 8
  4412. emit_sp_ref_u64 (j, T2, v);
  4413. #else
  4414. emit_sp_ref_u64_lower_half (j, T2, v);
  4415. #endif
  4416. jit_stxr_c (j->jit, T0, T1, T2);
  4417. }
  4418. static void
  4419. compile_u8_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4420. {
  4421. }
  4422. static void
  4423. compile_u16_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4424. {
  4425. emit_sp_ref_ptr (j, T0, ptr);
  4426. emit_sp_ref_sz (j, T1, idx);
  4427. #if SIZEOF_UINTPTR_T >= 8
  4428. emit_sp_ref_u64 (j, T2, v);
  4429. #else
  4430. emit_sp_ref_u64_lower_half (j, T2, v);
  4431. #endif
  4432. jit_stxr_s (j->jit, T0, T1, T2);
  4433. }
  4434. static void
  4435. compile_u16_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4436. {
  4437. }
  4438. static void
  4439. compile_u32_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4440. {
  4441. emit_sp_ref_ptr (j, T0, ptr);
  4442. emit_sp_ref_sz (j, T1, idx);
  4443. #if SIZEOF_UINTPTR_T >= 8
  4444. emit_sp_ref_u64 (j, T2, v);
  4445. jit_stxr_i (j->jit, T0, T1, T2);
  4446. #else
  4447. emit_sp_ref_u64_lower_half (j, T2, v);
  4448. jit_stxr (j->jit, T0, T1, T2);
  4449. #endif
  4450. }
  4451. static void
  4452. compile_u32_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4453. {
  4454. }
  4455. static void
  4456. compile_u64_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4457. {
  4458. emit_sp_ref_ptr (j, T0, ptr);
  4459. emit_sp_ref_sz (j, T1, idx);
  4460. #if SIZEOF_UINTPTR_T >= 8
  4461. emit_sp_ref_u64 (j, T2, v);
  4462. jit_stxr (j->jit, T0, T1, T2);
  4463. #else
  4464. jit_addr (j->jit, T0, T0, T1);
  4465. emit_sp_ref_u64 (j, T1, T2, v);
  4466. if (JIT_BIGENDIAN)
  4467. {
  4468. jit_str (j->jit, T0, T2);
  4469. jit_stxi (j->jit, 4, T0, T1);
  4470. }
  4471. else
  4472. {
  4473. jit_str (j->jit, T0, T1);
  4474. jit_stxi (j->jit, 4, T0, T2);
  4475. }
  4476. #endif
  4477. }
  4478. static void
  4479. compile_u64_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4480. {
  4481. }
  4482. static void
  4483. compile_s8_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4484. {
  4485. emit_sp_ref_ptr (j, T0, ptr);
  4486. emit_sp_ref_sz (j, T1, idx);
  4487. jit_ldxr_c (j->jit, T0, T0, T1);
  4488. record_gpr_clobber (j, T0);
  4489. #if SIZEOF_UINTPTR_T >= 8
  4490. emit_sp_set_s64 (j, dst, T0);
  4491. #else
  4492. emit_rshi (j, T1, T0, 7);
  4493. emit_sp_set_u64 (j, dst, T0, T1);
  4494. #endif
  4495. }
  4496. static void
  4497. compile_s8_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4498. {
  4499. }
  4500. static void
  4501. compile_s16_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4502. {
  4503. emit_sp_ref_ptr (j, T0, ptr);
  4504. emit_sp_ref_sz (j, T1, idx);
  4505. jit_ldxr_s (j->jit, T0, T0, T1);
  4506. record_gpr_clobber (j, T0);
  4507. #if SIZEOF_UINTPTR_T >= 8
  4508. emit_sp_set_s64 (j, dst, T0);
  4509. #else
  4510. emit_rshi (j, T1, T0, 15);
  4511. emit_sp_set_u64 (j, dst, T0, T1);
  4512. #endif
  4513. }
  4514. static void
  4515. compile_s16_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4516. {
  4517. }
  4518. static void
  4519. compile_s32_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4520. {
  4521. emit_sp_ref_ptr (j, T0, ptr);
  4522. emit_sp_ref_sz (j, T1, idx);
  4523. jit_ldxr_i (j->jit, T0, T0, T1);
  4524. record_gpr_clobber (j, T0);
  4525. #if SIZEOF_UINTPTR_T >= 8
  4526. emit_sp_set_s64 (j, dst, T0);
  4527. #else
  4528. emit_rshi (j, T1, T0, 31);
  4529. emit_sp_set_u64 (j, dst, T0, T1);
  4530. #endif
  4531. }
  4532. static void
  4533. compile_s32_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4534. {
  4535. }
  4536. static void
  4537. compile_s64_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4538. {
  4539. compile_u64_ref (j, dst, ptr, idx);
  4540. }
  4541. static void
  4542. compile_s64_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4543. {
  4544. }
  4545. static void
  4546. compile_s8_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4547. {
  4548. compile_u8_set (j, ptr, idx, v);
  4549. }
  4550. static void
  4551. compile_s8_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4552. {
  4553. }
  4554. static void
  4555. compile_s16_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4556. {
  4557. compile_u16_set (j, ptr, idx, v);
  4558. }
  4559. static void
  4560. compile_s16_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4561. {
  4562. }
  4563. static void
  4564. compile_s32_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4565. {
  4566. compile_u32_set (j, ptr, idx, v);
  4567. }
  4568. static void
  4569. compile_s32_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4570. {
  4571. }
  4572. static void
  4573. compile_s64_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4574. {
  4575. compile_u64_set (j, ptr, idx, v);
  4576. }
  4577. static void
  4578. compile_s64_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4579. {
  4580. }
  4581. static void
  4582. compile_f32_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4583. {
  4584. emit_sp_ref_ptr (j, T0, ptr);
  4585. emit_sp_ref_sz (j, T1, idx);
  4586. jit_ldxr_f (j->jit, JIT_F0, T0, T1);
  4587. record_fpr_clobber (j, JIT_F0);
  4588. jit_extr_f_d (j->jit, JIT_F0, JIT_F0);
  4589. emit_sp_set_f64 (j, dst, JIT_F0);
  4590. }
  4591. static void
  4592. compile_f32_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4593. {
  4594. }
  4595. static void
  4596. compile_f64_ref (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4597. {
  4598. emit_sp_ref_ptr (j, T0, ptr);
  4599. emit_sp_ref_sz (j, T1, idx);
  4600. jit_ldxr_d (j->jit, JIT_F0, T0, T1);
  4601. record_fpr_clobber (j, JIT_F0);
  4602. emit_sp_set_f64 (j, dst, JIT_F0);
  4603. }
  4604. static void
  4605. compile_f64_ref_slow (scm_jit_state *j, uint8_t dst, uint8_t ptr, uint8_t idx)
  4606. {
  4607. }
  4608. static void
  4609. compile_f32_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4610. {
  4611. emit_sp_ref_ptr (j, T0, ptr);
  4612. emit_sp_ref_sz (j, T1, idx);
  4613. emit_sp_ref_f64 (j, JIT_F0, v);
  4614. jit_extr_d_f (j->jit, JIT_F0, JIT_F0);
  4615. record_fpr_clobber (j, JIT_F0);
  4616. jit_stxr_f (j->jit, T0, T1, JIT_F0);
  4617. }
  4618. static void
  4619. compile_f32_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4620. {
  4621. }
  4622. static void
  4623. compile_f64_set (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4624. {
  4625. emit_sp_ref_ptr (j, T0, ptr);
  4626. emit_sp_ref_sz (j, T1, idx);
  4627. emit_sp_ref_f64 (j, JIT_F0, v);
  4628. jit_stxr_d (j->jit, T0, T1, JIT_F0);
  4629. }
  4630. static void
  4631. compile_f64_set_slow (scm_jit_state *j, uint8_t ptr, uint8_t idx, uint8_t v)
  4632. {
  4633. }
  4634. static void
  4635. compile_s64_to_f64 (scm_jit_state *j, uint32_t dst, uint32_t src)
  4636. {
  4637. #if SIZEOF_UINTPTR_T >= 8
  4638. emit_sp_ref_s64 (j, T0, src);
  4639. jit_extr_d (j->jit, JIT_F0, T0);
  4640. #else
  4641. emit_call_1 (j, scm_vm_intrinsics.s64_to_f64, sp_slot_operand (j, src));
  4642. jit_retval_d (j->jit, JIT_F0);
  4643. emit_reload_sp (j);
  4644. #endif
  4645. record_fpr_clobber (j, JIT_F0);
  4646. emit_sp_set_f64 (j, dst, JIT_F0);
  4647. }
  4648. static void
  4649. compile_s64_to_f64_slow (scm_jit_state *j, uint32_t dst, uint32_t src)
  4650. {
  4651. }
  4652. static void
  4653. compile_call_scm_from_scmn_scmn (scm_jit_state *j, uint32_t dst,
  4654. void *a, void *b, uint32_t idx)
  4655. {
  4656. void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
  4657. jit_operand_t op_a = jit_operand_imm (JIT_OPERAND_ABI_POINTER, (uintptr_t)a);
  4658. jit_operand_t op_b = jit_operand_imm (JIT_OPERAND_ABI_POINTER, (uintptr_t)b);
  4659. emit_store_current_ip (j, T2);
  4660. emit_call_2 (j, intrinsic, op_a, op_b);
  4661. emit_retval (j, T0);
  4662. emit_reload_sp (j);
  4663. emit_sp_set_scm (j, dst, T0);
  4664. }
  4665. static void
  4666. compile_call_scm_from_scmn_scmn_slow (scm_jit_state *j, uint32_t dst,
  4667. void *a, void *b, uint32_t idx)
  4668. {
  4669. }
  4670. #define UNPACK_8_8_8(op,a,b,c) \
  4671. do \
  4672. { \
  4673. a = (op >> 8) & 0xff; \
  4674. b = (op >> 16) & 0xff; \
  4675. c = op >> 24; \
  4676. } \
  4677. while (0)
  4678. #define UNPACK_8_16(op,a,b) \
  4679. do \
  4680. { \
  4681. a = (op >> 8) & 0xff; \
  4682. b = op >> 16; \
  4683. } \
  4684. while (0)
  4685. #define UNPACK_12_12(op,a,b) \
  4686. do \
  4687. { \
  4688. a = (op >> 8) & 0xfff; \
  4689. b = op >> 20; \
  4690. } \
  4691. while (0)
  4692. #define UNPACK_24(op,a) \
  4693. do \
  4694. { \
  4695. a = op >> 8; \
  4696. } \
  4697. while (0)
  4698. #define UNPACK_8_24(op,a,b) \
  4699. do \
  4700. { \
  4701. a = op & 0xff; \
  4702. b = op >> 8; \
  4703. } \
  4704. while (0)
  4705. #define UNPACK_16_16(op,a,b) \
  4706. do \
  4707. { \
  4708. a = op & 0xffff; \
  4709. b = op >> 16; \
  4710. } \
  4711. while (0)
  4712. #define COMPILE_OP1(t0) \
  4713. COMPILE_##t0
  4714. #define COMPILE_OP2(t0, t1) \
  4715. COMPILE_##t0##__##t1
  4716. #define COMPILE_OP3(t0, t1, t2) \
  4717. COMPILE_##t0##__##t1##__##t2
  4718. #define COMPILE_OP4(t0, t1, t2, t3) \
  4719. COMPILE_##t0##__##t1##__##t2##__##t3
  4720. #define COMPILE_OP5(t0, t1, t2, t3, t4) \
  4721. COMPILE_##t0##__##t1##__##t2##__##t3##__##t4
  4722. #define COMPILE_DOP1(t0) COMPILE_OP1(t0)
  4723. #define COMPILE_DOP2(t0, t1) COMPILE_OP2(t0, t1)
  4724. #define COMPILE_DOP3(t0, t1, t2) COMPILE_OP3(t0, t1, t2)
  4725. #define COMPILE_DOP4(t0, t1, t2, t3) COMPILE_OP4(t0, t1, t2, t3)
  4726. #define COMPILE_DOP5(t0, t1, t2, t3, t4) COMPILE_OP5(t0, t1, t2, t3, t4)
  4727. #define COMPILE_WIDE_OP1(t0) \
  4728. COMPILE_WIDE_##t0
  4729. #define COMPILE_WIDE_OP2(t0, t1) \
  4730. COMPILE_WIDE_##t0##__##t1
  4731. #define COMPILE_WIDE_OP3(t0, t1, t2) \
  4732. COMPILE_WIDE_##t0##__##t1##__##t2
  4733. #define COMPILE_WIDE_OP4(t0, t1, t2, t3) \
  4734. COMPILE_WIDE_##t0##__##t1##__##t2##__##t3
  4735. #define COMPILE_WIDE_OP5(t0, t1, t2, t3, t4) \
  4736. COMPILE_WIDE_##t0##__##t1##__##t2##__##t3##__##t4
  4737. #define COMPILE_WIDE_DOP1(t0) COMPILE_WIDE_OP1(t0)
  4738. #define COMPILE_WIDE_DOP2(t0, t1) COMPILE_WIDE_OP2(t0, t1)
  4739. #define COMPILE_WIDE_DOP3(t0, t1, t2) COMPILE_WIDE_OP3(t0, t1, t2)
  4740. #define COMPILE_WIDE_DOP4(t0, t1, t2, t3) COMPILE_WIDE_OP4(t0, t1, t2, t3)
  4741. #define COMPILE_WIDE_DOP5(t0, t1, t2, t3, t4) COMPILE_WIDE_OP5(t0, t1, t2, t3, t4)
  4742. #define COMPILE_NOP(j, comp) \
  4743. { \
  4744. bad_instruction (j); \
  4745. }
  4746. #define COMPILE_WIDE_NOP(j, comp) UNREACHABLE()
  4747. #define COMPILE_X32(j, comp) \
  4748. { \
  4749. comp (j); \
  4750. }
  4751. #define COMPILE_WIDE_X32(j, comp) UNREACHABLE()
  4752. #define COMPILE_X8_C24(j, comp) \
  4753. { \
  4754. uint32_t a; \
  4755. UNPACK_24 (j->ip[0], a); \
  4756. comp (j, a); \
  4757. }
  4758. #define COMPILE_WIDE_X8_C24(j, comp) UNREACHABLE()
  4759. #define COMPILE_X8_F24(j, comp) \
  4760. COMPILE_X8_C24 (j, comp)
  4761. #define COMPILE_WIDE_X8_F24(j, comp) UNREACHABLE()
  4762. #define COMPILE_X8_S24(j, comp) \
  4763. COMPILE_X8_C24 (j, comp)
  4764. #define COMPILE_WIDE_X8_S24(j, comp) UNREACHABLE()
  4765. #define COMPILE_X8_L24(j, comp) \
  4766. { \
  4767. int32_t a = j->ip[0]; \
  4768. a >>= 8; /* Sign extension. */ \
  4769. comp (j, j->ip + a); \
  4770. }
  4771. #define COMPILE_WIDE_X8_L24(j, comp) UNREACHABLE()
  4772. #define COMPILE_X8_C12_C12(j, comp) \
  4773. { \
  4774. uint16_t a, b; \
  4775. UNPACK_12_12 (j->ip[0], a, b); \
  4776. comp (j, a, b); \
  4777. }
  4778. #define COMPILE_WIDE_X8_C12_C12(j, comp) UNREACHABLE()
  4779. #define COMPILE_X8_S12_C12(j, comp) \
  4780. COMPILE_X8_C12_C12 (j, comp)
  4781. #define COMPILE_WIDE_X8_S12_C12(j, comp) \
  4782. { \
  4783. SCM_UNUSED uint16_t a; \
  4784. uint16_t b; \
  4785. UNPACK_12_12 (j->ip[0], a, b); \
  4786. comp (j, wide_operands[0], b); \
  4787. }
  4788. #define COMPILE_X8_S12_S12(j, comp) \
  4789. COMPILE_X8_C12_C12 (j, comp)
  4790. #define COMPILE_WIDE_X8_S12_S12(j, comp) \
  4791. { \
  4792. comp (j, wide_operands[0], wide_operands[1]); \
  4793. }
  4794. #define COMPILE_X8_F12_F12(j, comp) \
  4795. COMPILE_X8_C12_C12 (j, comp)
  4796. #define COMPILE_WIDE_X8_F12_F12(j, comp) UNREACHABLE()
  4797. #define COMPILE_X8_S12_Z12(j, comp) \
  4798. { \
  4799. uint16_t a = (j->ip[0] >> 8) & 0xfff; \
  4800. int16_t b = ((int32_t) j->ip[0]) >> 20; /* Sign extension. */ \
  4801. comp (j, a, b); \
  4802. }
  4803. #define COMPILE_WIDE_X8_S12_Z12(j, comp) \
  4804. { \
  4805. int16_t b = ((int32_t) j->ip[0]) >> 20; /* Sign extension. */ \
  4806. comp (j, wide_operands[0], b); \
  4807. }
  4808. #define COMPILE_X8_S8_C8_S8(j, comp) \
  4809. { \
  4810. uint8_t a, b, c; \
  4811. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  4812. comp (j, a, b, c); \
  4813. }
  4814. #define COMPILE_WIDE_X8_S8_C8_S8(j, comp) \
  4815. { \
  4816. SCM_UNUSED uint8_t a, c; \
  4817. uint8_t b; \
  4818. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  4819. comp (j, wide_operands[0], b, wide_operands[1]); \
  4820. }
  4821. #define COMPILE_X8_S8_S8_C8(j, comp) \
  4822. COMPILE_X8_S8_C8_S8 (j, comp)
  4823. #define COMPILE_WIDE_X8_S8_S8_C8(j, comp) \
  4824. { \
  4825. SCM_UNUSED uint8_t a, b; \
  4826. uint8_t c; \
  4827. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  4828. comp (j, wide_operands[0], wide_operands[1], c); \
  4829. }
  4830. #define COMPILE_X8_S8_S8_S8(j, comp) \
  4831. COMPILE_X8_S8_C8_S8 (j, comp)
  4832. #define COMPILE_WIDE_X8_S8_S8_S8(j, comp) \
  4833. { \
  4834. comp (j, wide_operands[0], wide_operands[1], wide_operands[2]); \
  4835. }
  4836. #define COMPILE_X8_S8_I16(j, comp) \
  4837. { \
  4838. uint8_t a; \
  4839. scm_t_bits b; \
  4840. UNPACK_8_16 (j->ip[0], a, b); \
  4841. comp (j, a, SCM_PACK (b)); \
  4842. }
  4843. #define COMPILE_WIDE_X8_S8_I16(j, comp) \
  4844. { \
  4845. SCM_UNUSED uint8_t a; \
  4846. scm_t_bits b; \
  4847. UNPACK_8_16 (j->ip[0], a, b); \
  4848. comp (j, wide_operands[0], SCM_PACK(b)); \
  4849. }
  4850. #define COMPILE_X8_S8_ZI16(j, comp) \
  4851. { \
  4852. uint8_t a; \
  4853. int16_t b; \
  4854. UNPACK_8_16 (j->ip[0], a, b); \
  4855. comp (j, a, SCM_PACK ((scm_t_signed_bits) b)); \
  4856. }
  4857. #define COMPILE_WIDE_X8_S8_ZI16(j, comp) \
  4858. { \
  4859. SCM_UNUSED uint8_t a; \
  4860. int16_t b; \
  4861. UNPACK_8_16 (j->ip[0], a, b); \
  4862. comp (j, wide_operands[0], SCM_PACK ((scm_t_signed_bits) b)); \
  4863. }
  4864. #define COMPILE_X32__C32(j, comp) \
  4865. { \
  4866. comp (j, j->ip[1]); \
  4867. }
  4868. #define COMPILE_WIDE_X32__C32(j, comp) UNREACHABLE()
  4869. #define COMPILE_X32__L32(j, comp) \
  4870. { \
  4871. int32_t a = j->ip[1]; \
  4872. comp (j, j->ip + a); \
  4873. }
  4874. #define COMPILE_WIDE_X32__L32(j, comp) UNREACHABLE()
  4875. #define COMPILE_X32__N32(j, comp) \
  4876. COMPILE_X32__L32 (j, comp)
  4877. #define COMPILE_WIDE_X32__N32(j, comp) UNREACHABLE()
  4878. #define COMPILE_X8_C24__L32(j, comp) \
  4879. { \
  4880. uint32_t a; \
  4881. int32_t b; \
  4882. UNPACK_24 (j->ip[0], a); \
  4883. b = j->ip[1]; \
  4884. comp (j, a, j->ip + b); \
  4885. }
  4886. #define COMPILE_WIDE_X8_C24__L32(j, comp) UNREACHABLE()
  4887. #define COMPILE_X8_S24__L32(j, comp) \
  4888. COMPILE_X8_C24__L32 (j, comp)
  4889. #define COMPILE_WIDE_X8_S24__L32(j, comp) UNREACHABLE()
  4890. #define COMPILE_X8_S24__LO32(j, comp) \
  4891. COMPILE_X8_C24__L32 (j, comp)
  4892. #define COMPILE_WIDE_X8_S24__LO32(j, comp) UNREACHABLE()
  4893. #define COMPILE_X8_S24__N32(j, comp) \
  4894. COMPILE_X8_C24__L32 (j, comp)
  4895. #define COMPILE_WIDE_X8_S24__N32(j, comp) UNREACHABLE()
  4896. #define COMPILE_X8_S24__R32(j, comp) \
  4897. COMPILE_X8_C24__L32 (j, comp)
  4898. #define COMPILE_WIDE_X8_S24__R32(j, comp) UNREACHABLE()
  4899. #define COMPILE_X8_C24__X8_C24(j, comp) \
  4900. { \
  4901. uint32_t a, b; \
  4902. UNPACK_24 (j->ip[0], a); \
  4903. UNPACK_24 (j->ip[1], b); \
  4904. comp (j, a, b); \
  4905. }
  4906. #define COMPILE_WIDE_X8_C24__X8_C24(j, comp) UNREACHABLE()
  4907. #define COMPILE_X8_F24__X8_C24(j, comp) \
  4908. COMPILE_X8_C24__X8_C24(j, comp)
  4909. #define COMPILE_WIDE_X8_F24__X8_C24(j, comp) UNREACHABLE()
  4910. #define COMPILE_X8_F24__X8_F24(j, comp) \
  4911. COMPILE_X8_C24__X8_C24(j, comp)
  4912. #define COMPILE_WIDE_X8_F24__X8_F24(j, comp) UNREACHABLE()
  4913. #define COMPILE_X8_S24__X8_S24(j, comp) \
  4914. COMPILE_X8_C24__X8_C24(j, comp)
  4915. #define COMPILE_WIDE_X8_S24__X8_S24(j, comp) UNREACHABLE()
  4916. #define COMPILE_X8_F12_F12__X8_C24(j, comp) \
  4917. { \
  4918. uint16_t a, b; \
  4919. uint32_t c; \
  4920. UNPACK_12_12 (j->ip[0], a, b); \
  4921. UNPACK_24 (j->ip[1], c); \
  4922. comp (j, a, b, c); \
  4923. }
  4924. #define COMPILE_WIDE_X8_F12_F12__X8_C24(j, comp) UNREACHABLE()
  4925. #define COMPILE_X8_F24__B1_X7_C24(j, comp) \
  4926. { \
  4927. uint32_t a, c; \
  4928. uint8_t b; \
  4929. UNPACK_24 (j->ip[0], a); \
  4930. b = j->ip[1] & 0x1; \
  4931. UNPACK_24 (j->ip[1], c); \
  4932. comp (j, a, b, c); \
  4933. }
  4934. #define COMPILE_WIDE_X8_F24__B1_X7_C24(j, comp) UNREACHABLE()
  4935. #define COMPILE_X8_S12_S12__C32(j, comp) \
  4936. { \
  4937. uint16_t a, b; \
  4938. uint32_t c; \
  4939. UNPACK_12_12 (j->ip[0], a, b); \
  4940. c = j->ip[1]; \
  4941. comp (j, a, b, c); \
  4942. }
  4943. #define COMPILE_WIDE_X8_S12_S12__C32(j, comp) \
  4944. { \
  4945. uint32_t c = j->ip[1]; \
  4946. comp (j, wide_operands[0], wide_operands[1], c); \
  4947. }
  4948. #define COMPILE_X8_S24__C16_C16(j, comp) \
  4949. { \
  4950. uint32_t a; \
  4951. uint16_t b, c; \
  4952. UNPACK_24 (j->ip[0], a); \
  4953. UNPACK_16_16 (j->ip[1], b, c); \
  4954. comp (j, a, b, c); \
  4955. }
  4956. #define COMPILE_WIDE_X8_S24__C16_C16(j, comp) UNREACHABLE()
  4957. #define COMPILE_X8_S24__C32(j, comp) \
  4958. { \
  4959. uint32_t a, b; \
  4960. UNPACK_24 (j->ip[0], a); \
  4961. b = j->ip[1]; \
  4962. comp (j, a, b); \
  4963. }
  4964. #define COMPILE_WIDE_X8_S24__C32(j, comp) UNREACHABLE()
  4965. #define COMPILE_X8_S24__I32(j, comp) \
  4966. { \
  4967. uint32_t a; \
  4968. scm_t_bits b; \
  4969. UNPACK_24 (j->ip[0], a); \
  4970. b = j->ip[1]; \
  4971. comp (j, a, SCM_PACK (b)); \
  4972. }
  4973. #define COMPILE_WIDE_X8_S24__I32(j, comp) UNREACHABLE()
  4974. #define COMPILE_X8_S8_S8_C8__C32(j, comp) \
  4975. { \
  4976. uint8_t a, b, c; \
  4977. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  4978. uint32_t d; \
  4979. d = j->ip[1]; \
  4980. comp (j, a, b, c, d); \
  4981. }
  4982. #define COMPILE_WIDE_X8_S8_S8_C8__C32(j, comp) \
  4983. { \
  4984. SCM_UNUSED uint16_t a, b; \
  4985. uint8_t c; \
  4986. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  4987. uint32_t d = j->ip[1]; \
  4988. comp (j, wide_operands[0], wide_operands[1], c, d); \
  4989. }
  4990. #define COMPILE_X8_S8_S8_S8__C32(j, comp) \
  4991. COMPILE_X8_S8_S8_C8__C32(j, comp)
  4992. #define COMPILE_WIDE_X8_S8_S8_S8__C32(j, comp) \
  4993. { \
  4994. uint32_t d = j->ip[1]; \
  4995. comp (j, wide_operands[0], wide_operands[1], wide_operands[2], d); \
  4996. }
  4997. #define COMPILE_X8_S8_C8_S8__C32(j, comp) \
  4998. COMPILE_X8_S8_S8_C8__C32(j, comp)
  4999. #define COMPILE_WIDE_X8_S8_C8_S8__C32(j, comp) \
  5000. { \
  5001. SCM_UNUSED uint8_t a, c; \
  5002. uint8_t b; \
  5003. UNPACK_8_8_8 (j->ip[0], a, b, c); \
  5004. uint32_t d = j->ip[1]; \
  5005. comp (j, wide_operands[0], b, wide_operands[1], d); \
  5006. }
  5007. #define COMPILE_X8_S24__V32_X8_L24(j, comp) \
  5008. { \
  5009. uint32_t a, len; \
  5010. UNPACK_24 (j->ip[0], a); \
  5011. len = j->ip[1]; \
  5012. j->next_ip += len; \
  5013. comp (j, a, len, j->ip + 2); \
  5014. }
  5015. #define COMPILE_WIDE_X8_S24__V32_X8_L24(j, comp) UNREACHABLE()
  5016. #define COMPILE_X32__LO32__L32(j, comp) \
  5017. { \
  5018. int32_t a = j->ip[1], b = j->ip[2]; \
  5019. comp (j, j->ip + a, j->ip + b); \
  5020. }
  5021. #define COMPILE_WIDE_X32__LO32__L32(j, comp) UNREACHABLE()
  5022. #define COMPILE_X8_F24__X8_C24__L32(j, comp) \
  5023. { \
  5024. uint32_t a, b; \
  5025. int32_t c; \
  5026. UNPACK_24 (j->ip[0], a); \
  5027. UNPACK_24 (j->ip[1], b); \
  5028. c = j->ip[2]; \
  5029. comp (j, a, b, j->ip + c); \
  5030. }
  5031. #define COMPILE_WIDE_X8_F24__X8_C24__L32(j, comp) UNREACHABLE()
  5032. #define COMPILE_X8_S24__A32__B32(j, comp) \
  5033. { \
  5034. uint32_t a; \
  5035. uint64_t b; \
  5036. UNPACK_24 (j->ip[0], a); \
  5037. b = (((uint64_t) j->ip[1]) << 32) | ((uint64_t) j->ip[2]); \
  5038. ASSERT (b <= (uint64_t) UINTPTR_MAX); \
  5039. comp (j, a, SCM_PACK ((uintptr_t) b)); \
  5040. }
  5041. #define COMPILE_WIDE_X8_S24__A32__B32(j, comp) UNREACHABLE()
  5042. #define COMPILE_X8_S24__AF32__BF32(j, comp) \
  5043. { \
  5044. uint32_t a; \
  5045. union { uint64_t u; double d; } b; \
  5046. UNPACK_24 (j->ip[0], a); \
  5047. b.u = (((uint64_t) j->ip[1]) << 32) | ((uint64_t) j->ip[2]); \
  5048. comp (j, a, b.d); \
  5049. }
  5050. #define COMPILE_WIDE_X8_S24__AF32__BF32(j, comp) UNREACHABLE()
  5051. #define COMPILE_X8_S24__AS32__BS32(j, comp) \
  5052. { \
  5053. uint32_t a; \
  5054. uint64_t b; \
  5055. UNPACK_24 (j->ip[0], a); \
  5056. b = (((uint64_t) j->ip[1]) << 32) | ((uint64_t) j->ip[2]); \
  5057. comp (j, a, (int64_t) b); \
  5058. }
  5059. #define COMPILE_WIDE_X8_S24__AS32__BS32(j, comp) UNREACHABLE()
  5060. #define COMPILE_X8_S24__AU32__BU32(j, comp) \
  5061. { \
  5062. uint32_t a; \
  5063. uint64_t b; \
  5064. UNPACK_24 (j->ip[0], a); \
  5065. b = (((uint64_t) j->ip[1]) << 32) | ((uint64_t) j->ip[2]); \
  5066. comp (j, a, b); \
  5067. }
  5068. #define COMPILE_WIDE_X8_S24__AU32__BU32(j, comp) UNREACHABLE()
  5069. #define COMPILE_X8_S24__B1_X7_F24__X8_L24(j, comp) \
  5070. { \
  5071. uint32_t a, c; \
  5072. uint8_t b; \
  5073. int32_t d; \
  5074. UNPACK_24 (j->ip[0], a); \
  5075. b = j->ip[1] & 0x1; \
  5076. UNPACK_24 (j->ip[1], c); \
  5077. d = j->ip[2]; d >>= 8; /* Sign extension. */ \
  5078. comp (j, a, b, c, j->ip + d); \
  5079. }
  5080. #define COMPILE_WIDE_X8_S24__B1_X7_F24__X8_L24(j, comp) UNREACHABLE()
  5081. #define COMPILE_X8_S24__X8_S24__C8_S24(j, comp) \
  5082. { \
  5083. uint32_t a, b, d; \
  5084. uint8_t c; \
  5085. UNPACK_24 (j->ip[0], a); \
  5086. UNPACK_24 (j->ip[1], b); \
  5087. UNPACK_8_24 (j->ip[2], c, d); \
  5088. comp (j, a, b, c, d); \
  5089. }
  5090. #define COMPILE_WIDE_X8_S24__X8_S24__C8_S24(j, comp) UNREACHABLE()
  5091. #define COMPILE_X8_C24__C8_C24__X8_C24__N32(j, comp) \
  5092. { \
  5093. uint32_t a, c, d; \
  5094. uint8_t b; \
  5095. int32_t e; \
  5096. UNPACK_24 (j->ip[0], a); \
  5097. UNPACK_8_24 (j->ip[1], b, c); \
  5098. UNPACK_24 (j->ip[2], d); \
  5099. e = j->ip[3]; \
  5100. comp (j, a, b, c, d, j->ip + e); \
  5101. }
  5102. #define COMPILE_WIDE_X8_C24__C8_C24__X8_C24__N32(j, comp) UNREACHABLE()
  5103. #define COMPILE_X8_S24__X8_S24__C8_S24__X8_S24(j, comp) \
  5104. { \
  5105. uint32_t a, b, d, e; \
  5106. uint8_t c; \
  5107. UNPACK_24 (j->ip[0], a); \
  5108. UNPACK_24 (j->ip[1], b); \
  5109. UNPACK_8_24 (j->ip[2], c, d); \
  5110. UNPACK_24 (j->ip[3], e); \
  5111. comp (j, a, b, c, d, e); \
  5112. }
  5113. #define COMPILE_WIDE_X8_S24__X8_S24__C8_S24__X8_S24(j, comp) UNREACHABLE()
  5114. #define COMPILE_X8_S24__N32__N32__C32(j, comp) \
  5115. { \
  5116. uint32_t a; \
  5117. UNPACK_24 (j->ip[0], a); \
  5118. int32_t b = j->ip[1]; \
  5119. int32_t c = j->ip[2]; \
  5120. uint32_t d = j->ip[3]; \
  5121. comp (j, a, j->ip + b, j->ip + c, d); \
  5122. }
  5123. #define COMPILE_WIDE_X8_S24__N32__N32__C32(j, comp) UNREACHABLE()
  5124. static uint8_t
  5125. parse_wide_operands (scm_jit_state *j, uint32_t wide_operands[3])
  5126. {
  5127. uint8_t opcode = j->ip[0] & 0xff;
  5128. uint32_t push_count = 0;
  5129. while (opcode == scm_op_push)
  5130. {
  5131. ASSERT (push_count < 3);
  5132. UNPACK_24 (j->ip[0], wide_operands[push_count]);
  5133. wide_operands[push_count] -= push_count;
  5134. push_count++;
  5135. j->ip = j->next_ip;
  5136. opcode = j->ip[0] & 0xff;
  5137. j->next_ip = j->ip + op_lengths[opcode];
  5138. }
  5139. ASSERT (push_count > 0);
  5140. uint8_t finish_opcode = j->next_ip[0] & 0xff;
  5141. uint32_t pop_count = 0;
  5142. if (finish_opcode == scm_op_drop)
  5143. {
  5144. uint32_t count;
  5145. UNPACK_24 (j->next_ip[0], count);
  5146. pop_count += count;
  5147. ASSERT(pop_count <= push_count);
  5148. j->next_ip = j->next_ip + op_lengths[finish_opcode];
  5149. finish_opcode = j->next_ip[0] & 0xff;
  5150. }
  5151. if (finish_opcode == scm_op_pop)
  5152. {
  5153. ASSERT (push_count < 3);
  5154. ASSERT (push_count - pop_count == 1);
  5155. switch (push_count) {
  5156. case 2: wide_operands[2] = wide_operands[1]; /* fall through */
  5157. case 1: wide_operands[1] = wide_operands[0]; break;
  5158. default: UNREACHABLE ();
  5159. }
  5160. UNPACK_24 (j->next_ip[0], wide_operands[0]);
  5161. pop_count++;
  5162. j->next_ip = j->next_ip + op_lengths[finish_opcode];
  5163. finish_opcode = j->next_ip[0] & 0xff;
  5164. }
  5165. ASSERT (pop_count == push_count);
  5166. return opcode;
  5167. }
  5168. static uintptr_t opcodes_seen[256 / (SCM_SIZEOF_UINTPTR_T * 8)];
  5169. static uintptr_t
  5170. bitvector_ref (const uintptr_t *bv, size_t n)
  5171. {
  5172. uintptr_t word = bv[n / (SCM_SIZEOF_UINTPTR_T * 8)];
  5173. return word & (((uintptr_t) 1) << (n & (SCM_SIZEOF_UINTPTR_T * 8 - 1)));
  5174. }
  5175. static void
  5176. bitvector_set (uintptr_t *bv, size_t n)
  5177. {
  5178. uintptr_t *word_loc = &bv[n / (SCM_SIZEOF_UINTPTR_T * 8)];
  5179. *word_loc |= ((uintptr_t) 1) << (n & (SCM_SIZEOF_UINTPTR_T * 8 - 1));
  5180. }
  5181. static void
  5182. compile1 (scm_jit_state *j)
  5183. {
  5184. uint8_t opcode = j->ip[0] & 0xff;
  5185. if (jit_log_level >= LOG_LEVEL_DEBUG)
  5186. {
  5187. const char *n;
  5188. switch (opcode)
  5189. {
  5190. #define NAME(code, cname, name, arity) case code: n = name; break;
  5191. FOR_EACH_VM_OPERATION(NAME)
  5192. #undef NAME
  5193. default:
  5194. UNREACHABLE ();
  5195. }
  5196. if (!bitvector_ref (opcodes_seen, opcode))
  5197. {
  5198. bitvector_set (opcodes_seen, opcode);
  5199. DEBUG ("Instruction first seen at vcode %p: %s\n", j->ip, n);
  5200. }
  5201. LOG ("Instruction at vcode %p: %s\n", j->ip, n);
  5202. }
  5203. j->next_ip = j->ip + op_lengths[opcode];
  5204. if (opcode == scm_op_push)
  5205. {
  5206. uint32_t wide_operands[3];
  5207. opcode = parse_wide_operands (j, wide_operands);
  5208. switch (opcode)
  5209. {
  5210. #define COMPILE1(code, cname, name, arity) \
  5211. case code: COMPILE_WIDE_##arity(j, compile_##cname); break;
  5212. FOR_EACH_VM_OPERATION(COMPILE1)
  5213. #undef COMPILE1
  5214. default:
  5215. UNREACHABLE ();
  5216. }
  5217. }
  5218. else
  5219. switch (opcode)
  5220. {
  5221. #define COMPILE1(code, cname, name, arity) \
  5222. case code: COMPILE_##arity(j, compile_##cname); break;
  5223. FOR_EACH_VM_OPERATION(COMPILE1)
  5224. #undef COMPILE1
  5225. default:
  5226. UNREACHABLE ();
  5227. }
  5228. j->ip = j->next_ip;
  5229. }
  5230. static void
  5231. compile_slow_path (scm_jit_state *j)
  5232. {
  5233. uint8_t opcode = j->ip[0] & 0xff;
  5234. j->next_ip = j->ip + op_lengths[opcode];
  5235. if (opcode == scm_op_push)
  5236. {
  5237. uint32_t wide_operands[3];
  5238. opcode = parse_wide_operands (j, wide_operands);
  5239. ptrdiff_t offset = j->ip - j->start;
  5240. j->labels[slow_label_offset (offset)] = jit_address (j->jit);
  5241. switch (opcode)
  5242. {
  5243. #define COMPILE_SLOW(code, cname, name, arity) \
  5244. case code: COMPILE_WIDE_##arity(j, compile_##cname##_slow); break;
  5245. FOR_EACH_VM_OPERATION(COMPILE_SLOW)
  5246. #undef COMPILE_SLOW
  5247. default:
  5248. UNREACHABLE ();
  5249. }
  5250. }
  5251. else
  5252. {
  5253. ptrdiff_t offset = j->ip - j->start;
  5254. j->labels[slow_label_offset (offset)] = jit_address (j->jit);
  5255. switch (opcode)
  5256. {
  5257. #define COMPILE_SLOW(code, cname, name, arity) \
  5258. case code: COMPILE_##arity(j, compile_##cname##_slow); break;
  5259. FOR_EACH_VM_OPERATION(COMPILE_SLOW)
  5260. #undef COMPILE_SLOW
  5261. default:
  5262. UNREACHABLE ();
  5263. }
  5264. }
  5265. j->ip = j->next_ip;
  5266. }
  5267. static void
  5268. analyze (scm_jit_state *j)
  5269. {
  5270. memset (j->op_attrs, 0, j->end - j->start);
  5271. j->op_attrs[0] = OP_ATTR_BLOCK | OP_ATTR_ENTRY;
  5272. for (j->ip = (uint32_t *) j->start; j->ip < j->end; j->ip = j->next_ip)
  5273. {
  5274. uint8_t opcode = j->ip[0] & 0xff;
  5275. uint8_t attrs = 0;
  5276. uint32_t *target;
  5277. j->next_ip = j->ip + op_lengths[opcode];
  5278. switch (opcode)
  5279. {
  5280. case scm_op_check_arguments:
  5281. case scm_op_check_positional_arguments:
  5282. attrs |= OP_ATTR_ENTRY;
  5283. /* Fall through. */
  5284. case scm_op_u64_numerically_equal:
  5285. case scm_op_u64_less:
  5286. case scm_op_s64_less:
  5287. case scm_op_f64_numerically_equal:
  5288. case scm_op_f64_less:
  5289. case scm_op_numerically_equal:
  5290. case scm_op_less:
  5291. case scm_op_immediate_tag_equals:
  5292. case scm_op_heap_tag_equals:
  5293. case scm_op_eq:
  5294. case scm_op_eq_immediate:
  5295. case scm_op_heap_numbers_equal:
  5296. case scm_op_s64_imm_numerically_equal:
  5297. case scm_op_u64_imm_less:
  5298. case scm_op_imm_u64_less:
  5299. case scm_op_s64_imm_less:
  5300. case scm_op_imm_s64_less:
  5301. {
  5302. uint8_t next = j->next_ip[0] & 0xff;
  5303. if (next == scm_op_drop)
  5304. {
  5305. j->next_ip += op_lengths[next];
  5306. next = j->next_ip[0] & 0xff;
  5307. }
  5308. if (next == scm_op_pop)
  5309. {
  5310. j->next_ip += op_lengths[next];
  5311. next = j->next_ip[0] & 0xff;
  5312. }
  5313. }
  5314. attrs |= OP_ATTR_BLOCK;
  5315. fuse_conditional_branch (j, &target);
  5316. j->op_attrs[target - j->start] |= attrs;
  5317. break;
  5318. case scm_op_j:
  5319. target = j->ip + (((int32_t)j->ip[0]) >> 8);
  5320. j->op_attrs[target - j->start] |= OP_ATTR_BLOCK;
  5321. break;
  5322. case scm_op_jtable:
  5323. {
  5324. uint32_t len = j->ip[1];
  5325. const uint32_t *offsets = j->ip + 2;
  5326. for (uint32_t i = 0; i < len; i++)
  5327. {
  5328. int32_t offset = offsets[i];
  5329. offset >>= 8; /* Sign-extending shift. */
  5330. target = j->ip + offset;
  5331. ASSERT(j->start <= target && target < j->end);
  5332. j->op_attrs[target - j->start] |= OP_ATTR_BLOCK;
  5333. }
  5334. j->next_ip += len;
  5335. break;
  5336. }
  5337. case scm_op_call:
  5338. case scm_op_call_label:
  5339. attrs = OP_ATTR_BLOCK;
  5340. target = j->next_ip;
  5341. j->op_attrs[target - j->start] |= OP_ATTR_BLOCK | OP_ATTR_ENTRY;
  5342. break;
  5343. case scm_op_prompt:
  5344. target = j->ip + (((int32_t) j->ip[2]) >> 8);
  5345. j->op_attrs[target - j->start] |= OP_ATTR_BLOCK | OP_ATTR_ENTRY;
  5346. break;
  5347. default:
  5348. break;
  5349. }
  5350. }
  5351. /* Even in loops, the entry should be a jump target. */
  5352. ASSERT (j->op_attrs[j->entry - j->start] & OP_ATTR_BLOCK);
  5353. }
  5354. static void
  5355. compile (scm_jit_state *j)
  5356. {
  5357. j->ip = (uint32_t *) j->start;
  5358. set_register_state (j, SP_IN_REGISTER | FP_IN_REGISTER);
  5359. j->frame_size_min = 0;
  5360. j->frame_size_max = INT32_MAX;
  5361. for (ptrdiff_t offset = 0; j->ip + offset < j->end; offset++) {
  5362. j->labels[inline_label_offset (offset)] = NULL;
  5363. j->labels[slow_label_offset (offset)] = NULL;
  5364. }
  5365. j->reloc_idx = 0;
  5366. while (j->ip < j->end)
  5367. {
  5368. ptrdiff_t offset = j->ip - j->start;
  5369. uint8_t attrs = j->op_attrs[offset];
  5370. j->labels[inline_label_offset (offset)] = jit_address (j->jit);
  5371. if (attrs & OP_ATTR_BLOCK)
  5372. {
  5373. uint32_t state = SP_IN_REGISTER;
  5374. if (attrs & OP_ATTR_ENTRY)
  5375. state |= FP_IN_REGISTER;
  5376. j->register_state = state;
  5377. }
  5378. compile1 (j);
  5379. if (jit_has_overflow (j->jit))
  5380. return;
  5381. }
  5382. jit_breakpoint (j->jit);
  5383. j->ip = (uint32_t *) j->start;
  5384. while (j->ip < j->end)
  5385. {
  5386. // set register state from j->register_states[offset] ?
  5387. reset_register_state (j, SP_IN_REGISTER);
  5388. compile_slow_path (j);
  5389. if (jit_has_overflow (j->jit))
  5390. return;
  5391. }
  5392. jit_breakpoint (j->jit);
  5393. for (size_t i = 0; i < j->reloc_idx; i++)
  5394. {
  5395. void *target = j->labels[j->relocs[i].target_label_offset];
  5396. ASSERT(target);
  5397. jit_patch_there (j->jit, j->relocs[i].reloc, target);
  5398. }
  5399. }
  5400. static scm_i_pthread_once_t initialize_jit_once = SCM_I_PTHREAD_ONCE_INIT;
  5401. static void*
  5402. jit_alloc_fn (size_t size)
  5403. {
  5404. return scm_gc_malloc (size, "jit state");
  5405. }
  5406. static void
  5407. jit_free_fn (void *unused)
  5408. {
  5409. }
  5410. static scm_jit_state *
  5411. initialize_thread_jit_state (scm_thread *thread)
  5412. {
  5413. scm_jit_state *j;
  5414. ASSERT (!thread->jit_state);
  5415. j = scm_gc_malloc (sizeof (*j), "jit state");
  5416. memset (j, 0, sizeof (*j));
  5417. thread->jit_state = j;
  5418. j->jit = jit_new_state (jit_alloc_fn, jit_free_fn);
  5419. return j;
  5420. }
  5421. static void
  5422. initialize_jit (void)
  5423. {
  5424. scm_jit_state *j;
  5425. if (!init_jit ())
  5426. {
  5427. scm_jit_counter_threshold = -1;
  5428. fprintf (stderr, "JIT failed to initialize\n");
  5429. fprintf (stderr, "disabling automatic JIT compilation\n");
  5430. return;
  5431. }
  5432. /* Init the thread's jit state so we can emit the entry
  5433. trampoline and the handle-interrupts trampoline. */
  5434. j = initialize_thread_jit_state (SCM_I_CURRENT_THREAD);
  5435. jit_pointer_t enter_mcode_addr = emit_code (j, emit_entry_trampoline);
  5436. ASSERT (enter_mcode_addr);
  5437. enter_mcode = jit_address_to_function_pointer (enter_mcode_addr);
  5438. handle_interrupts_trampoline =
  5439. emit_code (j, emit_handle_interrupts_trampoline);
  5440. ASSERT (handle_interrupts_trampoline);
  5441. scm_jit_return_to_interpreter_trampoline =
  5442. emit_code (j, emit_return_to_interpreter_trampoline);
  5443. ASSERT (scm_jit_return_to_interpreter_trampoline);
  5444. scm_jit_return_to_interpreter_trampoline = jit_address_to_function_pointer
  5445. (scm_jit_return_to_interpreter_trampoline);
  5446. }
  5447. static scm_i_pthread_once_t create_perf_map_once = SCM_I_PTHREAD_ONCE_INIT;
  5448. static FILE *perf_map = NULL;
  5449. static void
  5450. create_perf_map (void)
  5451. {
  5452. unsigned long pid = getpid ();
  5453. char *file_name;
  5454. if (asprintf (&file_name, "/tmp/perf-%lu.map", pid) < 0)
  5455. return;
  5456. perf_map = fopen (file_name, "w");
  5457. if (perf_map)
  5458. DEBUG ("created %s\n", file_name);
  5459. free (file_name);
  5460. }
  5461. static uint8_t *
  5462. compute_mcode (scm_thread *thread, uint32_t *entry_ip,
  5463. struct scm_jit_function_data *data)
  5464. {
  5465. scm_jit_state *j = thread->jit_state;
  5466. uint8_t *entry_mcode;
  5467. if (!j)
  5468. {
  5469. scm_i_pthread_once (&initialize_jit_once, initialize_jit);
  5470. if (scm_jit_counter_threshold == -1)
  5471. {
  5472. /* initialization failed! */
  5473. return NULL;
  5474. }
  5475. j = thread->jit_state;
  5476. /* It's possible that initialize_jit_once inits this thread's jit
  5477. state. */
  5478. if (!j)
  5479. j = initialize_thread_jit_state (thread);
  5480. }
  5481. j->thread = thread;
  5482. j->start = (const uint32_t *) (((char *)data) + data->start);
  5483. j->end = (const uint32_t *) (((char *)data) + data->end);
  5484. j->entry = entry_ip;
  5485. ASSERT (j->start < j->end);
  5486. ASSERT (j->start <= j->entry);
  5487. ASSERT (j->entry < j->end);
  5488. j->op_attrs = calloc ((j->end - j->start), sizeof (*j->op_attrs));
  5489. ASSERT (j->op_attrs);
  5490. j->labels = calloc ((j->end - j->start) * 2, sizeof (*j->labels));
  5491. ASSERT (j->labels);
  5492. j->frame_size_min = 0;
  5493. j->frame_size_max = INT32_MAX;
  5494. INFO ("vcode: start=%p,+%zu entry=+%zu\n", j->start, j->end - j->start,
  5495. j->entry - j->start);
  5496. analyze (j);
  5497. uint8_t *mcode = emit_code (j, compile);
  5498. if (mcode)
  5499. {
  5500. entry_mcode = j->labels[inline_label_offset (j->entry - j->start)];
  5501. data->mcode = mcode;
  5502. if (jit_log_level >= LOG_LEVEL_INFO) {
  5503. scm_i_pthread_once (&create_perf_map_once, create_perf_map);
  5504. if (perf_map) {
  5505. uint8_t *end = j->code_arena->base + j->code_arena->used;
  5506. fprintf (perf_map, "%lx %zx %p,+%zu\n", (long)mcode, end - mcode,
  5507. j->start, j->end - j->start);
  5508. fflush (perf_map);
  5509. }
  5510. }
  5511. }
  5512. else
  5513. {
  5514. entry_mcode = NULL;
  5515. }
  5516. free (j->op_attrs);
  5517. j->op_attrs = NULL;
  5518. free (j->labels);
  5519. j->labels = NULL;
  5520. free (j->relocs);
  5521. j->relocs = NULL;
  5522. j->reloc_idx = 0;
  5523. j->reloc_count = 0;
  5524. j->start = j->end = j->ip = j->entry = NULL;
  5525. j->frame_size_min = 0;
  5526. j->frame_size_max = INT32_MAX;
  5527. return entry_mcode;
  5528. }
  5529. const uint8_t *
  5530. scm_jit_compute_mcode (scm_thread *thread, struct scm_jit_function_data *data)
  5531. {
  5532. const uint32_t *vcode_start = (const uint32_t *) (((char *)data) + data->start);
  5533. if (data->mcode)
  5534. {
  5535. if (vcode_start == thread->vm.ip)
  5536. return data->mcode;
  5537. /* The function has mcode, compiled via some other activation
  5538. (possibly in another thread), but right now we're currently in
  5539. an interpreted loop (not at the beginning of the function). It
  5540. would be nice if we could jump into the already-compiled
  5541. function, but we don't know the offset. You'd think we could
  5542. just compile again without writing bytes to find out the offset
  5543. into the old code, but we're not guaranteed to get the same
  5544. compiled code, for example due to variations on whether direct
  5545. callees have mcode at the time of the compile, or different
  5546. encodings for relative references. So oh well: we're just
  5547. going to compile another copy and update the mcode pointer,
  5548. hoping this is a rare occurence. */
  5549. }
  5550. uint8_t *mcode = compute_mcode (thread, thread->vm.ip, data);
  5551. if (!mcode)
  5552. {
  5553. scm_jit_counter_threshold = -1;
  5554. fprintf (stderr, "JIT failed due to resource exhaustion\n");
  5555. fprintf (stderr, "disabling automatic JIT compilation\n");
  5556. }
  5557. else if (--jit_stop_after == 0)
  5558. {
  5559. scm_jit_counter_threshold = -1;
  5560. fprintf (stderr, "stopping automatic JIT compilation, as requested\n");
  5561. if (jit_pause_when_stopping)
  5562. {
  5563. fprintf (stderr, "sleeping for 30s; to debug:\n");
  5564. fprintf (stderr, " gdb -p %d\n\n", getpid ());
  5565. sleep (30);
  5566. }
  5567. }
  5568. return mcode;
  5569. }
  5570. void
  5571. scm_jit_enter_mcode (scm_thread *thread, const uint8_t *mcode)
  5572. {
  5573. LOG ("entering mcode: %p\n", mcode);
  5574. if (!SCM_FRAME_MACHINE_RETURN_ADDRESS (thread->vm.fp))
  5575. SCM_FRAME_SET_MACHINE_RETURN_ADDRESS
  5576. (thread->vm.fp, scm_jit_return_to_interpreter_trampoline);
  5577. enter_mcode (thread, mcode);
  5578. LOG ("exited mcode\n");
  5579. }
  5580. /* Call to force a thread to go back to the interpreter, for example
  5581. when single-stepping is enabled. */
  5582. void
  5583. scm_jit_clear_mcode_return_addresses (scm_thread *thread)
  5584. {
  5585. union scm_vm_stack_element *fp;
  5586. struct scm_vm *vp = &thread->vm;
  5587. for (fp = vp->fp; fp < vp->stack_top; fp = SCM_FRAME_DYNAMIC_LINK (fp))
  5588. SCM_FRAME_SET_MACHINE_RETURN_ADDRESS
  5589. (fp, scm_jit_return_to_interpreter_trampoline);
  5590. }
  5591. void
  5592. scm_jit_state_free (scm_jit_state *j)
  5593. {
  5594. /* Nothing to do; we leave j->jit NULL between compilations. */
  5595. }
  5596. void
  5597. scm_init_jit (void)
  5598. {
  5599. scm_jit_counter_threshold = scm_getenv_int ("GUILE_JIT_THRESHOLD",
  5600. default_jit_threshold);
  5601. jit_stop_after = scm_getenv_int ("GUILE_JIT_STOP_AFTER", -1);
  5602. jit_pause_when_stopping = scm_getenv_int ("GUILE_JIT_PAUSE_WHEN_STOPPING", 0);
  5603. jit_log_level = scm_getenv_int ("GUILE_JIT_LOG", 0);
  5604. }
  5605. #endif /* ENABLE_JIT */