emulate.c 149 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798
  1. /******************************************************************************
  2. * emulate.c
  3. *
  4. * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
  5. *
  6. * Copyright (c) 2005 Keir Fraser
  7. *
  8. * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  9. * privileged instructions:
  10. *
  11. * Copyright (C) 2006 Qumranet
  12. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13. *
  14. * Avi Kivity <avi@qumranet.com>
  15. * Yaniv Kamay <yaniv@qumranet.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21. */
  22. #include <linux/kvm_host.h>
  23. #include "kvm_cache_regs.h"
  24. #include <asm/kvm_emulate.h>
  25. #include <linux/stringify.h>
  26. #include <asm/debugreg.h>
  27. #include <asm/nospec-branch.h>
  28. #include "x86.h"
  29. #include "tss.h"
  30. #include "mmu.h"
  31. /*
  32. * Operand types
  33. */
  34. #define OpNone 0ull
  35. #define OpImplicit 1ull /* No generic decode */
  36. #define OpReg 2ull /* Register */
  37. #define OpMem 3ull /* Memory */
  38. #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
  39. #define OpDI 5ull /* ES:DI/EDI/RDI */
  40. #define OpMem64 6ull /* Memory, 64-bit */
  41. #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
  42. #define OpDX 8ull /* DX register */
  43. #define OpCL 9ull /* CL register (for shifts) */
  44. #define OpImmByte 10ull /* 8-bit sign extended immediate */
  45. #define OpOne 11ull /* Implied 1 */
  46. #define OpImm 12ull /* Sign extended up to 32-bit immediate */
  47. #define OpMem16 13ull /* Memory operand (16-bit). */
  48. #define OpMem32 14ull /* Memory operand (32-bit). */
  49. #define OpImmU 15ull /* Immediate operand, zero extended */
  50. #define OpSI 16ull /* SI/ESI/RSI */
  51. #define OpImmFAddr 17ull /* Immediate far address */
  52. #define OpMemFAddr 18ull /* Far address in memory */
  53. #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
  54. #define OpES 20ull /* ES */
  55. #define OpCS 21ull /* CS */
  56. #define OpSS 22ull /* SS */
  57. #define OpDS 23ull /* DS */
  58. #define OpFS 24ull /* FS */
  59. #define OpGS 25ull /* GS */
  60. #define OpMem8 26ull /* 8-bit zero extended memory operand */
  61. #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
  62. #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
  63. #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
  64. #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
  65. #define OpBits 5 /* Width of operand field */
  66. #define OpMask ((1ull << OpBits) - 1)
  67. /*
  68. * Opcode effective-address decode tables.
  69. * Note that we only emulate instructions that have at least one memory
  70. * operand (excluding implicit stack references). We assume that stack
  71. * references and instruction fetches will never occur in special memory
  72. * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  73. * not be handled.
  74. */
  75. /* Operand sizes: 8-bit operands or specified/overridden size. */
  76. #define ByteOp (1<<0) /* 8-bit operands. */
  77. /* Destination operand type. */
  78. #define DstShift 1
  79. #define ImplicitOps (OpImplicit << DstShift)
  80. #define DstReg (OpReg << DstShift)
  81. #define DstMem (OpMem << DstShift)
  82. #define DstAcc (OpAcc << DstShift)
  83. #define DstDI (OpDI << DstShift)
  84. #define DstMem64 (OpMem64 << DstShift)
  85. #define DstMem16 (OpMem16 << DstShift)
  86. #define DstImmUByte (OpImmUByte << DstShift)
  87. #define DstDX (OpDX << DstShift)
  88. #define DstAccLo (OpAccLo << DstShift)
  89. #define DstMask (OpMask << DstShift)
  90. /* Source operand type. */
  91. #define SrcShift 6
  92. #define SrcNone (OpNone << SrcShift)
  93. #define SrcReg (OpReg << SrcShift)
  94. #define SrcMem (OpMem << SrcShift)
  95. #define SrcMem16 (OpMem16 << SrcShift)
  96. #define SrcMem32 (OpMem32 << SrcShift)
  97. #define SrcImm (OpImm << SrcShift)
  98. #define SrcImmByte (OpImmByte << SrcShift)
  99. #define SrcOne (OpOne << SrcShift)
  100. #define SrcImmUByte (OpImmUByte << SrcShift)
  101. #define SrcImmU (OpImmU << SrcShift)
  102. #define SrcSI (OpSI << SrcShift)
  103. #define SrcXLat (OpXLat << SrcShift)
  104. #define SrcImmFAddr (OpImmFAddr << SrcShift)
  105. #define SrcMemFAddr (OpMemFAddr << SrcShift)
  106. #define SrcAcc (OpAcc << SrcShift)
  107. #define SrcImmU16 (OpImmU16 << SrcShift)
  108. #define SrcImm64 (OpImm64 << SrcShift)
  109. #define SrcDX (OpDX << SrcShift)
  110. #define SrcMem8 (OpMem8 << SrcShift)
  111. #define SrcAccHi (OpAccHi << SrcShift)
  112. #define SrcMask (OpMask << SrcShift)
  113. #define BitOp (1<<11)
  114. #define MemAbs (1<<12) /* Memory operand is absolute displacement */
  115. #define String (1<<13) /* String instruction (rep capable) */
  116. #define Stack (1<<14) /* Stack instruction (push/pop) */
  117. #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
  118. #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
  119. #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
  120. #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
  121. #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
  122. #define Escape (5<<15) /* Escape to coprocessor instruction */
  123. #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
  124. #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
  125. #define Sse (1<<18) /* SSE Vector instruction */
  126. /* Generic ModRM decode. */
  127. #define ModRM (1<<19)
  128. /* Destination is only written; never read. */
  129. #define Mov (1<<20)
  130. /* Misc flags */
  131. #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
  132. #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
  133. #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
  134. #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
  135. #define Undefined (1<<25) /* No Such Instruction */
  136. #define Lock (1<<26) /* lock prefix is allowed for the instruction */
  137. #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
  138. #define No64 (1<<28)
  139. #define PageTable (1 << 29) /* instruction used to write page table */
  140. #define NotImpl (1 << 30) /* instruction is not implemented */
  141. /* Source 2 operand type */
  142. #define Src2Shift (31)
  143. #define Src2None (OpNone << Src2Shift)
  144. #define Src2Mem (OpMem << Src2Shift)
  145. #define Src2CL (OpCL << Src2Shift)
  146. #define Src2ImmByte (OpImmByte << Src2Shift)
  147. #define Src2One (OpOne << Src2Shift)
  148. #define Src2Imm (OpImm << Src2Shift)
  149. #define Src2ES (OpES << Src2Shift)
  150. #define Src2CS (OpCS << Src2Shift)
  151. #define Src2SS (OpSS << Src2Shift)
  152. #define Src2DS (OpDS << Src2Shift)
  153. #define Src2FS (OpFS << Src2Shift)
  154. #define Src2GS (OpGS << Src2Shift)
  155. #define Src2Mask (OpMask << Src2Shift)
  156. #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
  157. #define AlignMask ((u64)7 << 41)
  158. #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
  159. #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
  160. #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
  161. #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
  162. #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
  163. #define NoWrite ((u64)1 << 45) /* No writeback */
  164. #define SrcWrite ((u64)1 << 46) /* Write back src operand */
  165. #define NoMod ((u64)1 << 47) /* Mod field is ignored */
  166. #define Intercept ((u64)1 << 48) /* Has valid intercept field */
  167. #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
  168. #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
  169. #define NearBranch ((u64)1 << 52) /* Near branches */
  170. #define No16 ((u64)1 << 53) /* No 16 bit operand */
  171. #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
  172. #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
  173. #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
  174. #define X2(x...) x, x
  175. #define X3(x...) X2(x), x
  176. #define X4(x...) X2(x), X2(x)
  177. #define X5(x...) X4(x), x
  178. #define X6(x...) X4(x), X2(x)
  179. #define X7(x...) X4(x), X3(x)
  180. #define X8(x...) X4(x), X4(x)
  181. #define X16(x...) X8(x), X8(x)
  182. #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
  183. #define FASTOP_SIZE 8
  184. /*
  185. * fastop functions have a special calling convention:
  186. *
  187. * dst: rax (in/out)
  188. * src: rdx (in/out)
  189. * src2: rcx (in)
  190. * flags: rflags (in/out)
  191. * ex: rsi (in:fastop pointer, out:zero if exception)
  192. *
  193. * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
  194. * different operand sizes can be reached by calculation, rather than a jump
  195. * table (which would be bigger than the code).
  196. *
  197. * fastop functions are declared as taking a never-defined fastop parameter,
  198. * so they can't be called from C directly.
  199. */
  200. struct fastop;
  201. struct opcode {
  202. u64 flags : 56;
  203. u64 intercept : 8;
  204. union {
  205. int (*execute)(struct x86_emulate_ctxt *ctxt);
  206. const struct opcode *group;
  207. const struct group_dual *gdual;
  208. const struct gprefix *gprefix;
  209. const struct escape *esc;
  210. const struct instr_dual *idual;
  211. const struct mode_dual *mdual;
  212. void (*fastop)(struct fastop *fake);
  213. } u;
  214. int (*check_perm)(struct x86_emulate_ctxt *ctxt);
  215. };
  216. struct group_dual {
  217. struct opcode mod012[8];
  218. struct opcode mod3[8];
  219. };
  220. struct gprefix {
  221. struct opcode pfx_no;
  222. struct opcode pfx_66;
  223. struct opcode pfx_f2;
  224. struct opcode pfx_f3;
  225. };
  226. struct escape {
  227. struct opcode op[8];
  228. struct opcode high[64];
  229. };
  230. struct instr_dual {
  231. struct opcode mod012;
  232. struct opcode mod3;
  233. };
  234. struct mode_dual {
  235. struct opcode mode32;
  236. struct opcode mode64;
  237. };
  238. #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
  239. enum x86_transfer_type {
  240. X86_TRANSFER_NONE,
  241. X86_TRANSFER_CALL_JMP,
  242. X86_TRANSFER_RET,
  243. X86_TRANSFER_TASK_SWITCH,
  244. };
  245. static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
  246. {
  247. if (!(ctxt->regs_valid & (1 << nr))) {
  248. ctxt->regs_valid |= 1 << nr;
  249. ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
  250. }
  251. return ctxt->_regs[nr];
  252. }
  253. static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
  254. {
  255. ctxt->regs_valid |= 1 << nr;
  256. ctxt->regs_dirty |= 1 << nr;
  257. return &ctxt->_regs[nr];
  258. }
  259. static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
  260. {
  261. reg_read(ctxt, nr);
  262. return reg_write(ctxt, nr);
  263. }
  264. static void writeback_registers(struct x86_emulate_ctxt *ctxt)
  265. {
  266. unsigned reg;
  267. for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
  268. ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
  269. }
  270. static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
  271. {
  272. ctxt->regs_dirty = 0;
  273. ctxt->regs_valid = 0;
  274. }
  275. /*
  276. * These EFLAGS bits are restored from saved value during emulation, and
  277. * any changes are written back to the saved value after emulation.
  278. */
  279. #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
  280. X86_EFLAGS_PF|X86_EFLAGS_CF)
  281. #ifdef CONFIG_X86_64
  282. #define ON64(x) x
  283. #else
  284. #define ON64(x)
  285. #endif
  286. static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
  287. #define FOP_FUNC(name) \
  288. ".align " __stringify(FASTOP_SIZE) " \n\t" \
  289. ".type " name ", @function \n\t" \
  290. name ":\n\t"
  291. #define FOP_RET "ret \n\t"
  292. #define FOP_START(op) \
  293. extern void em_##op(struct fastop *fake); \
  294. asm(".pushsection .text, \"ax\" \n\t" \
  295. ".global em_" #op " \n\t" \
  296. FOP_FUNC("em_" #op)
  297. #define FOP_END \
  298. ".popsection")
  299. #define FOPNOP() \
  300. FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
  301. FOP_RET
  302. #define FOP1E(op, dst) \
  303. FOP_FUNC(#op "_" #dst) \
  304. "10: " #op " %" #dst " \n\t" FOP_RET
  305. #define FOP1EEX(op, dst) \
  306. FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
  307. #define FASTOP1(op) \
  308. FOP_START(op) \
  309. FOP1E(op##b, al) \
  310. FOP1E(op##w, ax) \
  311. FOP1E(op##l, eax) \
  312. ON64(FOP1E(op##q, rax)) \
  313. FOP_END
  314. /* 1-operand, using src2 (for MUL/DIV r/m) */
  315. #define FASTOP1SRC2(op, name) \
  316. FOP_START(name) \
  317. FOP1E(op, cl) \
  318. FOP1E(op, cx) \
  319. FOP1E(op, ecx) \
  320. ON64(FOP1E(op, rcx)) \
  321. FOP_END
  322. /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
  323. #define FASTOP1SRC2EX(op, name) \
  324. FOP_START(name) \
  325. FOP1EEX(op, cl) \
  326. FOP1EEX(op, cx) \
  327. FOP1EEX(op, ecx) \
  328. ON64(FOP1EEX(op, rcx)) \
  329. FOP_END
  330. #define FOP2E(op, dst, src) \
  331. FOP_FUNC(#op "_" #dst "_" #src) \
  332. #op " %" #src ", %" #dst " \n\t" FOP_RET
  333. #define FASTOP2(op) \
  334. FOP_START(op) \
  335. FOP2E(op##b, al, dl) \
  336. FOP2E(op##w, ax, dx) \
  337. FOP2E(op##l, eax, edx) \
  338. ON64(FOP2E(op##q, rax, rdx)) \
  339. FOP_END
  340. /* 2 operand, word only */
  341. #define FASTOP2W(op) \
  342. FOP_START(op) \
  343. FOPNOP() \
  344. FOP2E(op##w, ax, dx) \
  345. FOP2E(op##l, eax, edx) \
  346. ON64(FOP2E(op##q, rax, rdx)) \
  347. FOP_END
  348. /* 2 operand, src is CL */
  349. #define FASTOP2CL(op) \
  350. FOP_START(op) \
  351. FOP2E(op##b, al, cl) \
  352. FOP2E(op##w, ax, cl) \
  353. FOP2E(op##l, eax, cl) \
  354. ON64(FOP2E(op##q, rax, cl)) \
  355. FOP_END
  356. /* 2 operand, src and dest are reversed */
  357. #define FASTOP2R(op, name) \
  358. FOP_START(name) \
  359. FOP2E(op##b, dl, al) \
  360. FOP2E(op##w, dx, ax) \
  361. FOP2E(op##l, edx, eax) \
  362. ON64(FOP2E(op##q, rdx, rax)) \
  363. FOP_END
  364. #define FOP3E(op, dst, src, src2) \
  365. FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
  366. #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
  367. /* 3-operand, word-only, src2=cl */
  368. #define FASTOP3WCL(op) \
  369. FOP_START(op) \
  370. FOPNOP() \
  371. FOP3E(op##w, ax, dx, cl) \
  372. FOP3E(op##l, eax, edx, cl) \
  373. ON64(FOP3E(op##q, rax, rdx, cl)) \
  374. FOP_END
  375. /* Special case for SETcc - 1 instruction per cc */
  376. #define FOP_SETCC(op) \
  377. ".align 4 \n\t" \
  378. ".type " #op ", @function \n\t" \
  379. #op ": \n\t" \
  380. #op " %al \n\t" \
  381. FOP_RET
  382. asm(".pushsection .fixup, \"ax\"\n"
  383. ".global kvm_fastop_exception \n"
  384. "kvm_fastop_exception: xor %esi, %esi; ret\n"
  385. ".popsection");
  386. FOP_START(setcc)
  387. FOP_SETCC(seto)
  388. FOP_SETCC(setno)
  389. FOP_SETCC(setc)
  390. FOP_SETCC(setnc)
  391. FOP_SETCC(setz)
  392. FOP_SETCC(setnz)
  393. FOP_SETCC(setbe)
  394. FOP_SETCC(setnbe)
  395. FOP_SETCC(sets)
  396. FOP_SETCC(setns)
  397. FOP_SETCC(setp)
  398. FOP_SETCC(setnp)
  399. FOP_SETCC(setl)
  400. FOP_SETCC(setnl)
  401. FOP_SETCC(setle)
  402. FOP_SETCC(setnle)
  403. FOP_END;
  404. FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
  405. FOP_END;
  406. /*
  407. * XXX: inoutclob user must know where the argument is being expanded.
  408. * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
  409. */
  410. #define asm_safe(insn, inoutclob...) \
  411. ({ \
  412. int _fault = 0; \
  413. \
  414. asm volatile("1:" insn "\n" \
  415. "2:\n" \
  416. ".pushsection .fixup, \"ax\"\n" \
  417. "3: movl $1, %[_fault]\n" \
  418. " jmp 2b\n" \
  419. ".popsection\n" \
  420. _ASM_EXTABLE(1b, 3b) \
  421. : [_fault] "+qm"(_fault) inoutclob ); \
  422. \
  423. _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
  424. })
  425. static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
  426. enum x86_intercept intercept,
  427. enum x86_intercept_stage stage)
  428. {
  429. struct x86_instruction_info info = {
  430. .intercept = intercept,
  431. .rep_prefix = ctxt->rep_prefix,
  432. .modrm_mod = ctxt->modrm_mod,
  433. .modrm_reg = ctxt->modrm_reg,
  434. .modrm_rm = ctxt->modrm_rm,
  435. .src_val = ctxt->src.val64,
  436. .dst_val = ctxt->dst.val64,
  437. .src_bytes = ctxt->src.bytes,
  438. .dst_bytes = ctxt->dst.bytes,
  439. .ad_bytes = ctxt->ad_bytes,
  440. .next_rip = ctxt->eip,
  441. };
  442. return ctxt->ops->intercept(ctxt, &info, stage);
  443. }
  444. static void assign_masked(ulong *dest, ulong src, ulong mask)
  445. {
  446. *dest = (*dest & ~mask) | (src & mask);
  447. }
  448. static void assign_register(unsigned long *reg, u64 val, int bytes)
  449. {
  450. /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
  451. switch (bytes) {
  452. case 1:
  453. *(u8 *)reg = (u8)val;
  454. break;
  455. case 2:
  456. *(u16 *)reg = (u16)val;
  457. break;
  458. case 4:
  459. *reg = (u32)val;
  460. break; /* 64b: zero-extend */
  461. case 8:
  462. *reg = val;
  463. break;
  464. }
  465. }
  466. static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
  467. {
  468. return (1UL << (ctxt->ad_bytes << 3)) - 1;
  469. }
  470. static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
  471. {
  472. u16 sel;
  473. struct desc_struct ss;
  474. if (ctxt->mode == X86EMUL_MODE_PROT64)
  475. return ~0UL;
  476. ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
  477. return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
  478. }
  479. static int stack_size(struct x86_emulate_ctxt *ctxt)
  480. {
  481. return (__fls(stack_mask(ctxt)) + 1) >> 3;
  482. }
  483. /* Access/update address held in a register, based on addressing mode. */
  484. static inline unsigned long
  485. address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
  486. {
  487. if (ctxt->ad_bytes == sizeof(unsigned long))
  488. return reg;
  489. else
  490. return reg & ad_mask(ctxt);
  491. }
  492. static inline unsigned long
  493. register_address(struct x86_emulate_ctxt *ctxt, int reg)
  494. {
  495. return address_mask(ctxt, reg_read(ctxt, reg));
  496. }
  497. static void masked_increment(ulong *reg, ulong mask, int inc)
  498. {
  499. assign_masked(reg, *reg + inc, mask);
  500. }
  501. static inline void
  502. register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
  503. {
  504. ulong *preg = reg_rmw(ctxt, reg);
  505. assign_register(preg, *preg + inc, ctxt->ad_bytes);
  506. }
  507. static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
  508. {
  509. masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
  510. }
  511. static u32 desc_limit_scaled(struct desc_struct *desc)
  512. {
  513. u32 limit = get_desc_limit(desc);
  514. return desc->g ? (limit << 12) | 0xfff : limit;
  515. }
  516. static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
  517. {
  518. if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
  519. return 0;
  520. return ctxt->ops->get_cached_segment_base(ctxt, seg);
  521. }
  522. static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
  523. u32 error, bool valid)
  524. {
  525. WARN_ON(vec > 0x1f);
  526. ctxt->exception.vector = vec;
  527. ctxt->exception.error_code = error;
  528. ctxt->exception.error_code_valid = valid;
  529. return X86EMUL_PROPAGATE_FAULT;
  530. }
  531. static int emulate_db(struct x86_emulate_ctxt *ctxt)
  532. {
  533. return emulate_exception(ctxt, DB_VECTOR, 0, false);
  534. }
  535. static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
  536. {
  537. return emulate_exception(ctxt, GP_VECTOR, err, true);
  538. }
  539. static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
  540. {
  541. return emulate_exception(ctxt, SS_VECTOR, err, true);
  542. }
  543. static int emulate_ud(struct x86_emulate_ctxt *ctxt)
  544. {
  545. return emulate_exception(ctxt, UD_VECTOR, 0, false);
  546. }
  547. static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
  548. {
  549. return emulate_exception(ctxt, TS_VECTOR, err, true);
  550. }
  551. static int emulate_de(struct x86_emulate_ctxt *ctxt)
  552. {
  553. return emulate_exception(ctxt, DE_VECTOR, 0, false);
  554. }
  555. static int emulate_nm(struct x86_emulate_ctxt *ctxt)
  556. {
  557. return emulate_exception(ctxt, NM_VECTOR, 0, false);
  558. }
  559. static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
  560. {
  561. u16 selector;
  562. struct desc_struct desc;
  563. ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
  564. return selector;
  565. }
  566. static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
  567. unsigned seg)
  568. {
  569. u16 dummy;
  570. u32 base3;
  571. struct desc_struct desc;
  572. ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
  573. ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
  574. }
  575. /*
  576. * x86 defines three classes of vector instructions: explicitly
  577. * aligned, explicitly unaligned, and the rest, which change behaviour
  578. * depending on whether they're AVX encoded or not.
  579. *
  580. * Also included is CMPXCHG16B which is not a vector instruction, yet it is
  581. * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
  582. * 512 bytes of data must be aligned to a 16 byte boundary.
  583. */
  584. static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
  585. {
  586. u64 alignment = ctxt->d & AlignMask;
  587. if (likely(size < 16))
  588. return 1;
  589. switch (alignment) {
  590. case Unaligned:
  591. case Avx:
  592. return 1;
  593. case Aligned16:
  594. return 16;
  595. case Aligned:
  596. default:
  597. return size;
  598. }
  599. }
  600. static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
  601. struct segmented_address addr,
  602. unsigned *max_size, unsigned size,
  603. bool write, bool fetch,
  604. enum x86emul_mode mode, ulong *linear)
  605. {
  606. struct desc_struct desc;
  607. bool usable;
  608. ulong la;
  609. u32 lim;
  610. u16 sel;
  611. u8 va_bits;
  612. la = seg_base(ctxt, addr.seg) + addr.ea;
  613. *max_size = 0;
  614. switch (mode) {
  615. case X86EMUL_MODE_PROT64:
  616. *linear = la;
  617. va_bits = ctxt_virt_addr_bits(ctxt);
  618. if (get_canonical(la, va_bits) != la)
  619. goto bad;
  620. *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
  621. if (size > *max_size)
  622. goto bad;
  623. break;
  624. default:
  625. *linear = la = (u32)la;
  626. usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
  627. addr.seg);
  628. if (!usable)
  629. goto bad;
  630. /* code segment in protected mode or read-only data segment */
  631. if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
  632. || !(desc.type & 2)) && write)
  633. goto bad;
  634. /* unreadable code segment */
  635. if (!fetch && (desc.type & 8) && !(desc.type & 2))
  636. goto bad;
  637. lim = desc_limit_scaled(&desc);
  638. if (!(desc.type & 8) && (desc.type & 4)) {
  639. /* expand-down segment */
  640. if (addr.ea <= lim)
  641. goto bad;
  642. lim = desc.d ? 0xffffffff : 0xffff;
  643. }
  644. if (addr.ea > lim)
  645. goto bad;
  646. if (lim == 0xffffffff)
  647. *max_size = ~0u;
  648. else {
  649. *max_size = (u64)lim + 1 - addr.ea;
  650. if (size > *max_size)
  651. goto bad;
  652. }
  653. break;
  654. }
  655. if (la & (insn_alignment(ctxt, size) - 1))
  656. return emulate_gp(ctxt, 0);
  657. return X86EMUL_CONTINUE;
  658. bad:
  659. if (addr.seg == VCPU_SREG_SS)
  660. return emulate_ss(ctxt, 0);
  661. else
  662. return emulate_gp(ctxt, 0);
  663. }
  664. static int linearize(struct x86_emulate_ctxt *ctxt,
  665. struct segmented_address addr,
  666. unsigned size, bool write,
  667. ulong *linear)
  668. {
  669. unsigned max_size;
  670. return __linearize(ctxt, addr, &max_size, size, write, false,
  671. ctxt->mode, linear);
  672. }
  673. static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
  674. enum x86emul_mode mode)
  675. {
  676. ulong linear;
  677. int rc;
  678. unsigned max_size;
  679. struct segmented_address addr = { .seg = VCPU_SREG_CS,
  680. .ea = dst };
  681. if (ctxt->op_bytes != sizeof(unsigned long))
  682. addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
  683. rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
  684. if (rc == X86EMUL_CONTINUE)
  685. ctxt->_eip = addr.ea;
  686. return rc;
  687. }
  688. static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
  689. {
  690. return assign_eip(ctxt, dst, ctxt->mode);
  691. }
  692. static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
  693. const struct desc_struct *cs_desc)
  694. {
  695. enum x86emul_mode mode = ctxt->mode;
  696. int rc;
  697. #ifdef CONFIG_X86_64
  698. if (ctxt->mode >= X86EMUL_MODE_PROT16) {
  699. if (cs_desc->l) {
  700. u64 efer = 0;
  701. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  702. if (efer & EFER_LMA)
  703. mode = X86EMUL_MODE_PROT64;
  704. } else
  705. mode = X86EMUL_MODE_PROT32; /* temporary value */
  706. }
  707. #endif
  708. if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
  709. mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  710. rc = assign_eip(ctxt, dst, mode);
  711. if (rc == X86EMUL_CONTINUE)
  712. ctxt->mode = mode;
  713. return rc;
  714. }
  715. static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
  716. {
  717. return assign_eip_near(ctxt, ctxt->_eip + rel);
  718. }
  719. static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
  720. void *data, unsigned size)
  721. {
  722. return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
  723. }
  724. static int linear_write_system(struct x86_emulate_ctxt *ctxt,
  725. ulong linear, void *data,
  726. unsigned int size)
  727. {
  728. return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
  729. }
  730. static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
  731. struct segmented_address addr,
  732. void *data,
  733. unsigned size)
  734. {
  735. int rc;
  736. ulong linear;
  737. rc = linearize(ctxt, addr, size, false, &linear);
  738. if (rc != X86EMUL_CONTINUE)
  739. return rc;
  740. return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
  741. }
  742. static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
  743. struct segmented_address addr,
  744. void *data,
  745. unsigned int size)
  746. {
  747. int rc;
  748. ulong linear;
  749. rc = linearize(ctxt, addr, size, true, &linear);
  750. if (rc != X86EMUL_CONTINUE)
  751. return rc;
  752. return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
  753. }
  754. /*
  755. * Prefetch the remaining bytes of the instruction without crossing page
  756. * boundary if they are not in fetch_cache yet.
  757. */
  758. static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
  759. {
  760. int rc;
  761. unsigned size, max_size;
  762. unsigned long linear;
  763. int cur_size = ctxt->fetch.end - ctxt->fetch.data;
  764. struct segmented_address addr = { .seg = VCPU_SREG_CS,
  765. .ea = ctxt->eip + cur_size };
  766. /*
  767. * We do not know exactly how many bytes will be needed, and
  768. * __linearize is expensive, so fetch as much as possible. We
  769. * just have to avoid going beyond the 15 byte limit, the end
  770. * of the segment, or the end of the page.
  771. *
  772. * __linearize is called with size 0 so that it does not do any
  773. * boundary check itself. Instead, we use max_size to check
  774. * against op_size.
  775. */
  776. rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
  777. &linear);
  778. if (unlikely(rc != X86EMUL_CONTINUE))
  779. return rc;
  780. size = min_t(unsigned, 15UL ^ cur_size, max_size);
  781. size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
  782. /*
  783. * One instruction can only straddle two pages,
  784. * and one has been loaded at the beginning of
  785. * x86_decode_insn. So, if not enough bytes
  786. * still, we must have hit the 15-byte boundary.
  787. */
  788. if (unlikely(size < op_size))
  789. return emulate_gp(ctxt, 0);
  790. rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
  791. size, &ctxt->exception);
  792. if (unlikely(rc != X86EMUL_CONTINUE))
  793. return rc;
  794. ctxt->fetch.end += size;
  795. return X86EMUL_CONTINUE;
  796. }
  797. static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
  798. unsigned size)
  799. {
  800. unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
  801. if (unlikely(done_size < size))
  802. return __do_insn_fetch_bytes(ctxt, size - done_size);
  803. else
  804. return X86EMUL_CONTINUE;
  805. }
  806. /* Fetch next part of the instruction being emulated. */
  807. #define insn_fetch(_type, _ctxt) \
  808. ({ _type _x; \
  809. \
  810. rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
  811. if (rc != X86EMUL_CONTINUE) \
  812. goto done; \
  813. ctxt->_eip += sizeof(_type); \
  814. memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
  815. ctxt->fetch.ptr += sizeof(_type); \
  816. _x; \
  817. })
  818. #define insn_fetch_arr(_arr, _size, _ctxt) \
  819. ({ \
  820. rc = do_insn_fetch_bytes(_ctxt, _size); \
  821. if (rc != X86EMUL_CONTINUE) \
  822. goto done; \
  823. ctxt->_eip += (_size); \
  824. memcpy(_arr, ctxt->fetch.ptr, _size); \
  825. ctxt->fetch.ptr += (_size); \
  826. })
  827. /*
  828. * Given the 'reg' portion of a ModRM byte, and a register block, return a
  829. * pointer into the block that addresses the relevant register.
  830. * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  831. */
  832. static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
  833. int byteop)
  834. {
  835. void *p;
  836. int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
  837. if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
  838. p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
  839. else
  840. p = reg_rmw(ctxt, modrm_reg);
  841. return p;
  842. }
  843. static int read_descriptor(struct x86_emulate_ctxt *ctxt,
  844. struct segmented_address addr,
  845. u16 *size, unsigned long *address, int op_bytes)
  846. {
  847. int rc;
  848. if (op_bytes == 2)
  849. op_bytes = 3;
  850. *address = 0;
  851. rc = segmented_read_std(ctxt, addr, size, 2);
  852. if (rc != X86EMUL_CONTINUE)
  853. return rc;
  854. addr.ea += 2;
  855. rc = segmented_read_std(ctxt, addr, address, op_bytes);
  856. return rc;
  857. }
  858. FASTOP2(add);
  859. FASTOP2(or);
  860. FASTOP2(adc);
  861. FASTOP2(sbb);
  862. FASTOP2(and);
  863. FASTOP2(sub);
  864. FASTOP2(xor);
  865. FASTOP2(cmp);
  866. FASTOP2(test);
  867. FASTOP1SRC2(mul, mul_ex);
  868. FASTOP1SRC2(imul, imul_ex);
  869. FASTOP1SRC2EX(div, div_ex);
  870. FASTOP1SRC2EX(idiv, idiv_ex);
  871. FASTOP3WCL(shld);
  872. FASTOP3WCL(shrd);
  873. FASTOP2W(imul);
  874. FASTOP1(not);
  875. FASTOP1(neg);
  876. FASTOP1(inc);
  877. FASTOP1(dec);
  878. FASTOP2CL(rol);
  879. FASTOP2CL(ror);
  880. FASTOP2CL(rcl);
  881. FASTOP2CL(rcr);
  882. FASTOP2CL(shl);
  883. FASTOP2CL(shr);
  884. FASTOP2CL(sar);
  885. FASTOP2W(bsf);
  886. FASTOP2W(bsr);
  887. FASTOP2W(bt);
  888. FASTOP2W(bts);
  889. FASTOP2W(btr);
  890. FASTOP2W(btc);
  891. FASTOP2(xadd);
  892. FASTOP2R(cmp, cmp_r);
  893. static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
  894. {
  895. /* If src is zero, do not writeback, but update flags */
  896. if (ctxt->src.val == 0)
  897. ctxt->dst.type = OP_NONE;
  898. return fastop(ctxt, em_bsf);
  899. }
  900. static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
  901. {
  902. /* If src is zero, do not writeback, but update flags */
  903. if (ctxt->src.val == 0)
  904. ctxt->dst.type = OP_NONE;
  905. return fastop(ctxt, em_bsr);
  906. }
  907. static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
  908. {
  909. u8 rc;
  910. void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
  911. flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
  912. asm("push %[flags]; popf; " CALL_NOSPEC
  913. : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
  914. return rc;
  915. }
  916. static void fetch_register_operand(struct operand *op)
  917. {
  918. switch (op->bytes) {
  919. case 1:
  920. op->val = *(u8 *)op->addr.reg;
  921. break;
  922. case 2:
  923. op->val = *(u16 *)op->addr.reg;
  924. break;
  925. case 4:
  926. op->val = *(u32 *)op->addr.reg;
  927. break;
  928. case 8:
  929. op->val = *(u64 *)op->addr.reg;
  930. break;
  931. }
  932. }
  933. static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
  934. {
  935. ctxt->ops->get_fpu(ctxt);
  936. switch (reg) {
  937. case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
  938. case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
  939. case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
  940. case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
  941. case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
  942. case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
  943. case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
  944. case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
  945. #ifdef CONFIG_X86_64
  946. case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
  947. case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
  948. case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
  949. case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
  950. case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
  951. case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
  952. case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
  953. case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
  954. #endif
  955. default: BUG();
  956. }
  957. ctxt->ops->put_fpu(ctxt);
  958. }
  959. static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
  960. int reg)
  961. {
  962. ctxt->ops->get_fpu(ctxt);
  963. switch (reg) {
  964. case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
  965. case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
  966. case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
  967. case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
  968. case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
  969. case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
  970. case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
  971. case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
  972. #ifdef CONFIG_X86_64
  973. case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
  974. case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
  975. case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
  976. case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
  977. case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
  978. case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
  979. case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
  980. case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
  981. #endif
  982. default: BUG();
  983. }
  984. ctxt->ops->put_fpu(ctxt);
  985. }
  986. static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
  987. {
  988. ctxt->ops->get_fpu(ctxt);
  989. switch (reg) {
  990. case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
  991. case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
  992. case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
  993. case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
  994. case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
  995. case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
  996. case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
  997. case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
  998. default: BUG();
  999. }
  1000. ctxt->ops->put_fpu(ctxt);
  1001. }
  1002. static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
  1003. {
  1004. ctxt->ops->get_fpu(ctxt);
  1005. switch (reg) {
  1006. case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
  1007. case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
  1008. case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
  1009. case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
  1010. case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
  1011. case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
  1012. case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
  1013. case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
  1014. default: BUG();
  1015. }
  1016. ctxt->ops->put_fpu(ctxt);
  1017. }
  1018. static int em_fninit(struct x86_emulate_ctxt *ctxt)
  1019. {
  1020. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  1021. return emulate_nm(ctxt);
  1022. ctxt->ops->get_fpu(ctxt);
  1023. asm volatile("fninit");
  1024. ctxt->ops->put_fpu(ctxt);
  1025. return X86EMUL_CONTINUE;
  1026. }
  1027. static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
  1028. {
  1029. u16 fcw;
  1030. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  1031. return emulate_nm(ctxt);
  1032. ctxt->ops->get_fpu(ctxt);
  1033. asm volatile("fnstcw %0": "+m"(fcw));
  1034. ctxt->ops->put_fpu(ctxt);
  1035. ctxt->dst.val = fcw;
  1036. return X86EMUL_CONTINUE;
  1037. }
  1038. static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
  1039. {
  1040. u16 fsw;
  1041. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  1042. return emulate_nm(ctxt);
  1043. ctxt->ops->get_fpu(ctxt);
  1044. asm volatile("fnstsw %0": "+m"(fsw));
  1045. ctxt->ops->put_fpu(ctxt);
  1046. ctxt->dst.val = fsw;
  1047. return X86EMUL_CONTINUE;
  1048. }
  1049. static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
  1050. struct operand *op)
  1051. {
  1052. unsigned reg = ctxt->modrm_reg;
  1053. if (!(ctxt->d & ModRM))
  1054. reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
  1055. if (ctxt->d & Sse) {
  1056. op->type = OP_XMM;
  1057. op->bytes = 16;
  1058. op->addr.xmm = reg;
  1059. read_sse_reg(ctxt, &op->vec_val, reg);
  1060. return;
  1061. }
  1062. if (ctxt->d & Mmx) {
  1063. reg &= 7;
  1064. op->type = OP_MM;
  1065. op->bytes = 8;
  1066. op->addr.mm = reg;
  1067. return;
  1068. }
  1069. op->type = OP_REG;
  1070. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  1071. op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
  1072. fetch_register_operand(op);
  1073. op->orig_val = op->val;
  1074. }
  1075. static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
  1076. {
  1077. if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
  1078. ctxt->modrm_seg = VCPU_SREG_SS;
  1079. }
  1080. static int decode_modrm(struct x86_emulate_ctxt *ctxt,
  1081. struct operand *op)
  1082. {
  1083. u8 sib;
  1084. int index_reg, base_reg, scale;
  1085. int rc = X86EMUL_CONTINUE;
  1086. ulong modrm_ea = 0;
  1087. ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
  1088. index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
  1089. base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
  1090. ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
  1091. ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
  1092. ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
  1093. ctxt->modrm_seg = VCPU_SREG_DS;
  1094. if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
  1095. op->type = OP_REG;
  1096. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  1097. op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
  1098. ctxt->d & ByteOp);
  1099. if (ctxt->d & Sse) {
  1100. op->type = OP_XMM;
  1101. op->bytes = 16;
  1102. op->addr.xmm = ctxt->modrm_rm;
  1103. read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
  1104. return rc;
  1105. }
  1106. if (ctxt->d & Mmx) {
  1107. op->type = OP_MM;
  1108. op->bytes = 8;
  1109. op->addr.mm = ctxt->modrm_rm & 7;
  1110. return rc;
  1111. }
  1112. fetch_register_operand(op);
  1113. return rc;
  1114. }
  1115. op->type = OP_MEM;
  1116. if (ctxt->ad_bytes == 2) {
  1117. unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
  1118. unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
  1119. unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
  1120. unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
  1121. /* 16-bit ModR/M decode. */
  1122. switch (ctxt->modrm_mod) {
  1123. case 0:
  1124. if (ctxt->modrm_rm == 6)
  1125. modrm_ea += insn_fetch(u16, ctxt);
  1126. break;
  1127. case 1:
  1128. modrm_ea += insn_fetch(s8, ctxt);
  1129. break;
  1130. case 2:
  1131. modrm_ea += insn_fetch(u16, ctxt);
  1132. break;
  1133. }
  1134. switch (ctxt->modrm_rm) {
  1135. case 0:
  1136. modrm_ea += bx + si;
  1137. break;
  1138. case 1:
  1139. modrm_ea += bx + di;
  1140. break;
  1141. case 2:
  1142. modrm_ea += bp + si;
  1143. break;
  1144. case 3:
  1145. modrm_ea += bp + di;
  1146. break;
  1147. case 4:
  1148. modrm_ea += si;
  1149. break;
  1150. case 5:
  1151. modrm_ea += di;
  1152. break;
  1153. case 6:
  1154. if (ctxt->modrm_mod != 0)
  1155. modrm_ea += bp;
  1156. break;
  1157. case 7:
  1158. modrm_ea += bx;
  1159. break;
  1160. }
  1161. if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
  1162. (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
  1163. ctxt->modrm_seg = VCPU_SREG_SS;
  1164. modrm_ea = (u16)modrm_ea;
  1165. } else {
  1166. /* 32/64-bit ModR/M decode. */
  1167. if ((ctxt->modrm_rm & 7) == 4) {
  1168. sib = insn_fetch(u8, ctxt);
  1169. index_reg |= (sib >> 3) & 7;
  1170. base_reg |= sib & 7;
  1171. scale = sib >> 6;
  1172. if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
  1173. modrm_ea += insn_fetch(s32, ctxt);
  1174. else {
  1175. modrm_ea += reg_read(ctxt, base_reg);
  1176. adjust_modrm_seg(ctxt, base_reg);
  1177. /* Increment ESP on POP [ESP] */
  1178. if ((ctxt->d & IncSP) &&
  1179. base_reg == VCPU_REGS_RSP)
  1180. modrm_ea += ctxt->op_bytes;
  1181. }
  1182. if (index_reg != 4)
  1183. modrm_ea += reg_read(ctxt, index_reg) << scale;
  1184. } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
  1185. modrm_ea += insn_fetch(s32, ctxt);
  1186. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1187. ctxt->rip_relative = 1;
  1188. } else {
  1189. base_reg = ctxt->modrm_rm;
  1190. modrm_ea += reg_read(ctxt, base_reg);
  1191. adjust_modrm_seg(ctxt, base_reg);
  1192. }
  1193. switch (ctxt->modrm_mod) {
  1194. case 1:
  1195. modrm_ea += insn_fetch(s8, ctxt);
  1196. break;
  1197. case 2:
  1198. modrm_ea += insn_fetch(s32, ctxt);
  1199. break;
  1200. }
  1201. }
  1202. op->addr.mem.ea = modrm_ea;
  1203. if (ctxt->ad_bytes != 8)
  1204. ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
  1205. done:
  1206. return rc;
  1207. }
  1208. static int decode_abs(struct x86_emulate_ctxt *ctxt,
  1209. struct operand *op)
  1210. {
  1211. int rc = X86EMUL_CONTINUE;
  1212. op->type = OP_MEM;
  1213. switch (ctxt->ad_bytes) {
  1214. case 2:
  1215. op->addr.mem.ea = insn_fetch(u16, ctxt);
  1216. break;
  1217. case 4:
  1218. op->addr.mem.ea = insn_fetch(u32, ctxt);
  1219. break;
  1220. case 8:
  1221. op->addr.mem.ea = insn_fetch(u64, ctxt);
  1222. break;
  1223. }
  1224. done:
  1225. return rc;
  1226. }
  1227. static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
  1228. {
  1229. long sv = 0, mask;
  1230. if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
  1231. mask = ~((long)ctxt->dst.bytes * 8 - 1);
  1232. if (ctxt->src.bytes == 2)
  1233. sv = (s16)ctxt->src.val & (s16)mask;
  1234. else if (ctxt->src.bytes == 4)
  1235. sv = (s32)ctxt->src.val & (s32)mask;
  1236. else
  1237. sv = (s64)ctxt->src.val & (s64)mask;
  1238. ctxt->dst.addr.mem.ea = address_mask(ctxt,
  1239. ctxt->dst.addr.mem.ea + (sv >> 3));
  1240. }
  1241. /* only subword offset */
  1242. ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
  1243. }
  1244. static int read_emulated(struct x86_emulate_ctxt *ctxt,
  1245. unsigned long addr, void *dest, unsigned size)
  1246. {
  1247. int rc;
  1248. struct read_cache *mc = &ctxt->mem_read;
  1249. if (mc->pos < mc->end)
  1250. goto read_cached;
  1251. WARN_ON((mc->end + size) >= sizeof(mc->data));
  1252. rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
  1253. &ctxt->exception);
  1254. if (rc != X86EMUL_CONTINUE)
  1255. return rc;
  1256. mc->end += size;
  1257. read_cached:
  1258. memcpy(dest, mc->data + mc->pos, size);
  1259. mc->pos += size;
  1260. return X86EMUL_CONTINUE;
  1261. }
  1262. static int segmented_read(struct x86_emulate_ctxt *ctxt,
  1263. struct segmented_address addr,
  1264. void *data,
  1265. unsigned size)
  1266. {
  1267. int rc;
  1268. ulong linear;
  1269. rc = linearize(ctxt, addr, size, false, &linear);
  1270. if (rc != X86EMUL_CONTINUE)
  1271. return rc;
  1272. return read_emulated(ctxt, linear, data, size);
  1273. }
  1274. static int segmented_write(struct x86_emulate_ctxt *ctxt,
  1275. struct segmented_address addr,
  1276. const void *data,
  1277. unsigned size)
  1278. {
  1279. int rc;
  1280. ulong linear;
  1281. rc = linearize(ctxt, addr, size, true, &linear);
  1282. if (rc != X86EMUL_CONTINUE)
  1283. return rc;
  1284. return ctxt->ops->write_emulated(ctxt, linear, data, size,
  1285. &ctxt->exception);
  1286. }
  1287. static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
  1288. struct segmented_address addr,
  1289. const void *orig_data, const void *data,
  1290. unsigned size)
  1291. {
  1292. int rc;
  1293. ulong linear;
  1294. rc = linearize(ctxt, addr, size, true, &linear);
  1295. if (rc != X86EMUL_CONTINUE)
  1296. return rc;
  1297. return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
  1298. size, &ctxt->exception);
  1299. }
  1300. static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  1301. unsigned int size, unsigned short port,
  1302. void *dest)
  1303. {
  1304. struct read_cache *rc = &ctxt->io_read;
  1305. if (rc->pos == rc->end) { /* refill pio read ahead */
  1306. unsigned int in_page, n;
  1307. unsigned int count = ctxt->rep_prefix ?
  1308. address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
  1309. in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
  1310. offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
  1311. PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
  1312. n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
  1313. if (n == 0)
  1314. n = 1;
  1315. rc->pos = rc->end = 0;
  1316. if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
  1317. return 0;
  1318. rc->end = n * size;
  1319. }
  1320. if (ctxt->rep_prefix && (ctxt->d & String) &&
  1321. !(ctxt->eflags & X86_EFLAGS_DF)) {
  1322. ctxt->dst.data = rc->data + rc->pos;
  1323. ctxt->dst.type = OP_MEM_STR;
  1324. ctxt->dst.count = (rc->end - rc->pos) / size;
  1325. rc->pos = rc->end;
  1326. } else {
  1327. memcpy(dest, rc->data + rc->pos, size);
  1328. rc->pos += size;
  1329. }
  1330. return 1;
  1331. }
  1332. static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
  1333. u16 index, struct desc_struct *desc)
  1334. {
  1335. struct desc_ptr dt;
  1336. ulong addr;
  1337. ctxt->ops->get_idt(ctxt, &dt);
  1338. if (dt.size < index * 8 + 7)
  1339. return emulate_gp(ctxt, index << 3 | 0x2);
  1340. addr = dt.address + index * 8;
  1341. return linear_read_system(ctxt, addr, desc, sizeof *desc);
  1342. }
  1343. static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
  1344. u16 selector, struct desc_ptr *dt)
  1345. {
  1346. const struct x86_emulate_ops *ops = ctxt->ops;
  1347. u32 base3 = 0;
  1348. if (selector & 1 << 2) {
  1349. struct desc_struct desc;
  1350. u16 sel;
  1351. memset (dt, 0, sizeof *dt);
  1352. if (!ops->get_segment(ctxt, &sel, &desc, &base3,
  1353. VCPU_SREG_LDTR))
  1354. return;
  1355. dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
  1356. dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
  1357. } else
  1358. ops->get_gdt(ctxt, dt);
  1359. }
  1360. static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
  1361. u16 selector, ulong *desc_addr_p)
  1362. {
  1363. struct desc_ptr dt;
  1364. u16 index = selector >> 3;
  1365. ulong addr;
  1366. get_descriptor_table_ptr(ctxt, selector, &dt);
  1367. if (dt.size < index * 8 + 7)
  1368. return emulate_gp(ctxt, selector & 0xfffc);
  1369. addr = dt.address + index * 8;
  1370. #ifdef CONFIG_X86_64
  1371. if (addr >> 32 != 0) {
  1372. u64 efer = 0;
  1373. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  1374. if (!(efer & EFER_LMA))
  1375. addr &= (u32)-1;
  1376. }
  1377. #endif
  1378. *desc_addr_p = addr;
  1379. return X86EMUL_CONTINUE;
  1380. }
  1381. /* allowed just for 8 bytes segments */
  1382. static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1383. u16 selector, struct desc_struct *desc,
  1384. ulong *desc_addr_p)
  1385. {
  1386. int rc;
  1387. rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
  1388. if (rc != X86EMUL_CONTINUE)
  1389. return rc;
  1390. return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
  1391. }
  1392. /* allowed just for 8 bytes segments */
  1393. static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1394. u16 selector, struct desc_struct *desc)
  1395. {
  1396. int rc;
  1397. ulong addr;
  1398. rc = get_descriptor_ptr(ctxt, selector, &addr);
  1399. if (rc != X86EMUL_CONTINUE)
  1400. return rc;
  1401. return linear_write_system(ctxt, addr, desc, sizeof *desc);
  1402. }
  1403. static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1404. u16 selector, int seg, u8 cpl,
  1405. enum x86_transfer_type transfer,
  1406. struct desc_struct *desc)
  1407. {
  1408. struct desc_struct seg_desc, old_desc;
  1409. u8 dpl, rpl;
  1410. unsigned err_vec = GP_VECTOR;
  1411. u32 err_code = 0;
  1412. bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
  1413. ulong desc_addr;
  1414. int ret;
  1415. u16 dummy;
  1416. u32 base3 = 0;
  1417. memset(&seg_desc, 0, sizeof seg_desc);
  1418. if (ctxt->mode == X86EMUL_MODE_REAL) {
  1419. /* set real mode segment descriptor (keep limit etc. for
  1420. * unreal mode) */
  1421. ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
  1422. set_desc_base(&seg_desc, selector << 4);
  1423. goto load;
  1424. } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
  1425. /* VM86 needs a clean new segment descriptor */
  1426. set_desc_base(&seg_desc, selector << 4);
  1427. set_desc_limit(&seg_desc, 0xffff);
  1428. seg_desc.type = 3;
  1429. seg_desc.p = 1;
  1430. seg_desc.s = 1;
  1431. seg_desc.dpl = 3;
  1432. goto load;
  1433. }
  1434. rpl = selector & 3;
  1435. /* TR should be in GDT only */
  1436. if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
  1437. goto exception;
  1438. /* NULL selector is not valid for TR, CS and (except for long mode) SS */
  1439. if (null_selector) {
  1440. if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
  1441. goto exception;
  1442. if (seg == VCPU_SREG_SS) {
  1443. if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
  1444. goto exception;
  1445. /*
  1446. * ctxt->ops->set_segment expects the CPL to be in
  1447. * SS.DPL, so fake an expand-up 32-bit data segment.
  1448. */
  1449. seg_desc.type = 3;
  1450. seg_desc.p = 1;
  1451. seg_desc.s = 1;
  1452. seg_desc.dpl = cpl;
  1453. seg_desc.d = 1;
  1454. seg_desc.g = 1;
  1455. }
  1456. /* Skip all following checks */
  1457. goto load;
  1458. }
  1459. ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
  1460. if (ret != X86EMUL_CONTINUE)
  1461. return ret;
  1462. err_code = selector & 0xfffc;
  1463. err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
  1464. GP_VECTOR;
  1465. /* can't load system descriptor into segment selector */
  1466. if (seg <= VCPU_SREG_GS && !seg_desc.s) {
  1467. if (transfer == X86_TRANSFER_CALL_JMP)
  1468. return X86EMUL_UNHANDLEABLE;
  1469. goto exception;
  1470. }
  1471. if (!seg_desc.p) {
  1472. err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
  1473. goto exception;
  1474. }
  1475. dpl = seg_desc.dpl;
  1476. switch (seg) {
  1477. case VCPU_SREG_SS:
  1478. /*
  1479. * segment is not a writable data segment or segment
  1480. * selector's RPL != CPL or segment selector's RPL != CPL
  1481. */
  1482. if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
  1483. goto exception;
  1484. break;
  1485. case VCPU_SREG_CS:
  1486. if (!(seg_desc.type & 8))
  1487. goto exception;
  1488. if (seg_desc.type & 4) {
  1489. /* conforming */
  1490. if (dpl > cpl)
  1491. goto exception;
  1492. } else {
  1493. /* nonconforming */
  1494. if (rpl > cpl || dpl != cpl)
  1495. goto exception;
  1496. }
  1497. /* in long-mode d/b must be clear if l is set */
  1498. if (seg_desc.d && seg_desc.l) {
  1499. u64 efer = 0;
  1500. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  1501. if (efer & EFER_LMA)
  1502. goto exception;
  1503. }
  1504. /* CS(RPL) <- CPL */
  1505. selector = (selector & 0xfffc) | cpl;
  1506. break;
  1507. case VCPU_SREG_TR:
  1508. if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
  1509. goto exception;
  1510. old_desc = seg_desc;
  1511. seg_desc.type |= 2; /* busy */
  1512. ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
  1513. sizeof(seg_desc), &ctxt->exception);
  1514. if (ret != X86EMUL_CONTINUE)
  1515. return ret;
  1516. break;
  1517. case VCPU_SREG_LDTR:
  1518. if (seg_desc.s || seg_desc.type != 2)
  1519. goto exception;
  1520. break;
  1521. default: /* DS, ES, FS, or GS */
  1522. /*
  1523. * segment is not a data or readable code segment or
  1524. * ((segment is a data or nonconforming code segment)
  1525. * and (both RPL and CPL > DPL))
  1526. */
  1527. if ((seg_desc.type & 0xa) == 0x8 ||
  1528. (((seg_desc.type & 0xc) != 0xc) &&
  1529. (rpl > dpl && cpl > dpl)))
  1530. goto exception;
  1531. break;
  1532. }
  1533. if (seg_desc.s) {
  1534. /* mark segment as accessed */
  1535. if (!(seg_desc.type & 1)) {
  1536. seg_desc.type |= 1;
  1537. ret = write_segment_descriptor(ctxt, selector,
  1538. &seg_desc);
  1539. if (ret != X86EMUL_CONTINUE)
  1540. return ret;
  1541. }
  1542. } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
  1543. ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
  1544. if (ret != X86EMUL_CONTINUE)
  1545. return ret;
  1546. if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
  1547. ((u64)base3 << 32), ctxt))
  1548. return emulate_gp(ctxt, 0);
  1549. }
  1550. load:
  1551. ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
  1552. if (desc)
  1553. *desc = seg_desc;
  1554. return X86EMUL_CONTINUE;
  1555. exception:
  1556. return emulate_exception(ctxt, err_vec, err_code, true);
  1557. }
  1558. static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1559. u16 selector, int seg)
  1560. {
  1561. u8 cpl = ctxt->ops->cpl(ctxt);
  1562. /*
  1563. * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
  1564. * they can load it at CPL<3 (Intel's manual says only LSS can,
  1565. * but it's wrong).
  1566. *
  1567. * However, the Intel manual says that putting IST=1/DPL=3 in
  1568. * an interrupt gate will result in SS=3 (the AMD manual instead
  1569. * says it doesn't), so allow SS=3 in __load_segment_descriptor
  1570. * and only forbid it here.
  1571. */
  1572. if (seg == VCPU_SREG_SS && selector == 3 &&
  1573. ctxt->mode == X86EMUL_MODE_PROT64)
  1574. return emulate_exception(ctxt, GP_VECTOR, 0, true);
  1575. return __load_segment_descriptor(ctxt, selector, seg, cpl,
  1576. X86_TRANSFER_NONE, NULL);
  1577. }
  1578. static void write_register_operand(struct operand *op)
  1579. {
  1580. return assign_register(op->addr.reg, op->val, op->bytes);
  1581. }
  1582. static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
  1583. {
  1584. switch (op->type) {
  1585. case OP_REG:
  1586. write_register_operand(op);
  1587. break;
  1588. case OP_MEM:
  1589. if (ctxt->lock_prefix)
  1590. return segmented_cmpxchg(ctxt,
  1591. op->addr.mem,
  1592. &op->orig_val,
  1593. &op->val,
  1594. op->bytes);
  1595. else
  1596. return segmented_write(ctxt,
  1597. op->addr.mem,
  1598. &op->val,
  1599. op->bytes);
  1600. break;
  1601. case OP_MEM_STR:
  1602. return segmented_write(ctxt,
  1603. op->addr.mem,
  1604. op->data,
  1605. op->bytes * op->count);
  1606. break;
  1607. case OP_XMM:
  1608. write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
  1609. break;
  1610. case OP_MM:
  1611. write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
  1612. break;
  1613. case OP_NONE:
  1614. /* no writeback */
  1615. break;
  1616. default:
  1617. break;
  1618. }
  1619. return X86EMUL_CONTINUE;
  1620. }
  1621. static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
  1622. {
  1623. struct segmented_address addr;
  1624. rsp_increment(ctxt, -bytes);
  1625. addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
  1626. addr.seg = VCPU_SREG_SS;
  1627. return segmented_write(ctxt, addr, data, bytes);
  1628. }
  1629. static int em_push(struct x86_emulate_ctxt *ctxt)
  1630. {
  1631. /* Disable writeback. */
  1632. ctxt->dst.type = OP_NONE;
  1633. return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
  1634. }
  1635. static int emulate_pop(struct x86_emulate_ctxt *ctxt,
  1636. void *dest, int len)
  1637. {
  1638. int rc;
  1639. struct segmented_address addr;
  1640. addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
  1641. addr.seg = VCPU_SREG_SS;
  1642. rc = segmented_read(ctxt, addr, dest, len);
  1643. if (rc != X86EMUL_CONTINUE)
  1644. return rc;
  1645. rsp_increment(ctxt, len);
  1646. return rc;
  1647. }
  1648. static int em_pop(struct x86_emulate_ctxt *ctxt)
  1649. {
  1650. return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1651. }
  1652. static int emulate_popf(struct x86_emulate_ctxt *ctxt,
  1653. void *dest, int len)
  1654. {
  1655. int rc;
  1656. unsigned long val, change_mask;
  1657. int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
  1658. int cpl = ctxt->ops->cpl(ctxt);
  1659. rc = emulate_pop(ctxt, &val, len);
  1660. if (rc != X86EMUL_CONTINUE)
  1661. return rc;
  1662. change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  1663. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
  1664. X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
  1665. X86_EFLAGS_AC | X86_EFLAGS_ID;
  1666. switch(ctxt->mode) {
  1667. case X86EMUL_MODE_PROT64:
  1668. case X86EMUL_MODE_PROT32:
  1669. case X86EMUL_MODE_PROT16:
  1670. if (cpl == 0)
  1671. change_mask |= X86_EFLAGS_IOPL;
  1672. if (cpl <= iopl)
  1673. change_mask |= X86_EFLAGS_IF;
  1674. break;
  1675. case X86EMUL_MODE_VM86:
  1676. if (iopl < 3)
  1677. return emulate_gp(ctxt, 0);
  1678. change_mask |= X86_EFLAGS_IF;
  1679. break;
  1680. default: /* real mode */
  1681. change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
  1682. break;
  1683. }
  1684. *(unsigned long *)dest =
  1685. (ctxt->eflags & ~change_mask) | (val & change_mask);
  1686. return rc;
  1687. }
  1688. static int em_popf(struct x86_emulate_ctxt *ctxt)
  1689. {
  1690. ctxt->dst.type = OP_REG;
  1691. ctxt->dst.addr.reg = &ctxt->eflags;
  1692. ctxt->dst.bytes = ctxt->op_bytes;
  1693. return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1694. }
  1695. static int em_enter(struct x86_emulate_ctxt *ctxt)
  1696. {
  1697. int rc;
  1698. unsigned frame_size = ctxt->src.val;
  1699. unsigned nesting_level = ctxt->src2.val & 31;
  1700. ulong rbp;
  1701. if (nesting_level)
  1702. return X86EMUL_UNHANDLEABLE;
  1703. rbp = reg_read(ctxt, VCPU_REGS_RBP);
  1704. rc = push(ctxt, &rbp, stack_size(ctxt));
  1705. if (rc != X86EMUL_CONTINUE)
  1706. return rc;
  1707. assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
  1708. stack_mask(ctxt));
  1709. assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
  1710. reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
  1711. stack_mask(ctxt));
  1712. return X86EMUL_CONTINUE;
  1713. }
  1714. static int em_leave(struct x86_emulate_ctxt *ctxt)
  1715. {
  1716. assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
  1717. stack_mask(ctxt));
  1718. return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
  1719. }
  1720. static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
  1721. {
  1722. int seg = ctxt->src2.val;
  1723. ctxt->src.val = get_segment_selector(ctxt, seg);
  1724. if (ctxt->op_bytes == 4) {
  1725. rsp_increment(ctxt, -2);
  1726. ctxt->op_bytes = 2;
  1727. }
  1728. return em_push(ctxt);
  1729. }
  1730. static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
  1731. {
  1732. int seg = ctxt->src2.val;
  1733. unsigned long selector;
  1734. int rc;
  1735. rc = emulate_pop(ctxt, &selector, 2);
  1736. if (rc != X86EMUL_CONTINUE)
  1737. return rc;
  1738. if (ctxt->modrm_reg == VCPU_SREG_SS)
  1739. ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
  1740. if (ctxt->op_bytes > 2)
  1741. rsp_increment(ctxt, ctxt->op_bytes - 2);
  1742. rc = load_segment_descriptor(ctxt, (u16)selector, seg);
  1743. return rc;
  1744. }
  1745. static int em_pusha(struct x86_emulate_ctxt *ctxt)
  1746. {
  1747. unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
  1748. int rc = X86EMUL_CONTINUE;
  1749. int reg = VCPU_REGS_RAX;
  1750. while (reg <= VCPU_REGS_RDI) {
  1751. (reg == VCPU_REGS_RSP) ?
  1752. (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
  1753. rc = em_push(ctxt);
  1754. if (rc != X86EMUL_CONTINUE)
  1755. return rc;
  1756. ++reg;
  1757. }
  1758. return rc;
  1759. }
  1760. static int em_pushf(struct x86_emulate_ctxt *ctxt)
  1761. {
  1762. ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
  1763. return em_push(ctxt);
  1764. }
  1765. static int em_popa(struct x86_emulate_ctxt *ctxt)
  1766. {
  1767. int rc = X86EMUL_CONTINUE;
  1768. int reg = VCPU_REGS_RDI;
  1769. u32 val;
  1770. while (reg >= VCPU_REGS_RAX) {
  1771. if (reg == VCPU_REGS_RSP) {
  1772. rsp_increment(ctxt, ctxt->op_bytes);
  1773. --reg;
  1774. }
  1775. rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
  1776. if (rc != X86EMUL_CONTINUE)
  1777. break;
  1778. assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
  1779. --reg;
  1780. }
  1781. return rc;
  1782. }
  1783. static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
  1784. {
  1785. const struct x86_emulate_ops *ops = ctxt->ops;
  1786. int rc;
  1787. struct desc_ptr dt;
  1788. gva_t cs_addr;
  1789. gva_t eip_addr;
  1790. u16 cs, eip;
  1791. /* TODO: Add limit checks */
  1792. ctxt->src.val = ctxt->eflags;
  1793. rc = em_push(ctxt);
  1794. if (rc != X86EMUL_CONTINUE)
  1795. return rc;
  1796. ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
  1797. ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
  1798. rc = em_push(ctxt);
  1799. if (rc != X86EMUL_CONTINUE)
  1800. return rc;
  1801. ctxt->src.val = ctxt->_eip;
  1802. rc = em_push(ctxt);
  1803. if (rc != X86EMUL_CONTINUE)
  1804. return rc;
  1805. ops->get_idt(ctxt, &dt);
  1806. eip_addr = dt.address + (irq << 2);
  1807. cs_addr = dt.address + (irq << 2) + 2;
  1808. rc = linear_read_system(ctxt, cs_addr, &cs, 2);
  1809. if (rc != X86EMUL_CONTINUE)
  1810. return rc;
  1811. rc = linear_read_system(ctxt, eip_addr, &eip, 2);
  1812. if (rc != X86EMUL_CONTINUE)
  1813. return rc;
  1814. rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
  1815. if (rc != X86EMUL_CONTINUE)
  1816. return rc;
  1817. ctxt->_eip = eip;
  1818. return rc;
  1819. }
  1820. int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
  1821. {
  1822. int rc;
  1823. invalidate_registers(ctxt);
  1824. rc = __emulate_int_real(ctxt, irq);
  1825. if (rc == X86EMUL_CONTINUE)
  1826. writeback_registers(ctxt);
  1827. return rc;
  1828. }
  1829. static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
  1830. {
  1831. switch(ctxt->mode) {
  1832. case X86EMUL_MODE_REAL:
  1833. return __emulate_int_real(ctxt, irq);
  1834. case X86EMUL_MODE_VM86:
  1835. case X86EMUL_MODE_PROT16:
  1836. case X86EMUL_MODE_PROT32:
  1837. case X86EMUL_MODE_PROT64:
  1838. default:
  1839. /* Protected mode interrupts unimplemented yet */
  1840. return X86EMUL_UNHANDLEABLE;
  1841. }
  1842. }
  1843. static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
  1844. {
  1845. int rc = X86EMUL_CONTINUE;
  1846. unsigned long temp_eip = 0;
  1847. unsigned long temp_eflags = 0;
  1848. unsigned long cs = 0;
  1849. unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  1850. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
  1851. X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
  1852. X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
  1853. X86_EFLAGS_AC | X86_EFLAGS_ID |
  1854. X86_EFLAGS_FIXED;
  1855. unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
  1856. X86_EFLAGS_VIP;
  1857. /* TODO: Add stack limit check */
  1858. rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
  1859. if (rc != X86EMUL_CONTINUE)
  1860. return rc;
  1861. if (temp_eip & ~0xffff)
  1862. return emulate_gp(ctxt, 0);
  1863. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1864. if (rc != X86EMUL_CONTINUE)
  1865. return rc;
  1866. rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
  1867. if (rc != X86EMUL_CONTINUE)
  1868. return rc;
  1869. rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
  1870. if (rc != X86EMUL_CONTINUE)
  1871. return rc;
  1872. ctxt->_eip = temp_eip;
  1873. if (ctxt->op_bytes == 4)
  1874. ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
  1875. else if (ctxt->op_bytes == 2) {
  1876. ctxt->eflags &= ~0xffff;
  1877. ctxt->eflags |= temp_eflags;
  1878. }
  1879. ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
  1880. ctxt->eflags |= X86_EFLAGS_FIXED;
  1881. ctxt->ops->set_nmi_mask(ctxt, false);
  1882. return rc;
  1883. }
  1884. static int em_iret(struct x86_emulate_ctxt *ctxt)
  1885. {
  1886. switch(ctxt->mode) {
  1887. case X86EMUL_MODE_REAL:
  1888. return emulate_iret_real(ctxt);
  1889. case X86EMUL_MODE_VM86:
  1890. case X86EMUL_MODE_PROT16:
  1891. case X86EMUL_MODE_PROT32:
  1892. case X86EMUL_MODE_PROT64:
  1893. default:
  1894. /* iret from protected mode unimplemented yet */
  1895. return X86EMUL_UNHANDLEABLE;
  1896. }
  1897. }
  1898. static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
  1899. {
  1900. int rc;
  1901. unsigned short sel;
  1902. struct desc_struct new_desc;
  1903. u8 cpl = ctxt->ops->cpl(ctxt);
  1904. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  1905. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
  1906. X86_TRANSFER_CALL_JMP,
  1907. &new_desc);
  1908. if (rc != X86EMUL_CONTINUE)
  1909. return rc;
  1910. rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
  1911. /* Error handling is not implemented. */
  1912. if (rc != X86EMUL_CONTINUE)
  1913. return X86EMUL_UNHANDLEABLE;
  1914. return rc;
  1915. }
  1916. static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
  1917. {
  1918. return assign_eip_near(ctxt, ctxt->src.val);
  1919. }
  1920. static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
  1921. {
  1922. int rc;
  1923. long int old_eip;
  1924. old_eip = ctxt->_eip;
  1925. rc = assign_eip_near(ctxt, ctxt->src.val);
  1926. if (rc != X86EMUL_CONTINUE)
  1927. return rc;
  1928. ctxt->src.val = old_eip;
  1929. rc = em_push(ctxt);
  1930. return rc;
  1931. }
  1932. static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
  1933. {
  1934. u64 old = ctxt->dst.orig_val64;
  1935. if (ctxt->dst.bytes == 16)
  1936. return X86EMUL_UNHANDLEABLE;
  1937. if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
  1938. ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
  1939. *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
  1940. *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
  1941. ctxt->eflags &= ~X86_EFLAGS_ZF;
  1942. } else {
  1943. ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
  1944. (u32) reg_read(ctxt, VCPU_REGS_RBX);
  1945. ctxt->eflags |= X86_EFLAGS_ZF;
  1946. }
  1947. return X86EMUL_CONTINUE;
  1948. }
  1949. static int em_ret(struct x86_emulate_ctxt *ctxt)
  1950. {
  1951. int rc;
  1952. unsigned long eip;
  1953. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1954. if (rc != X86EMUL_CONTINUE)
  1955. return rc;
  1956. return assign_eip_near(ctxt, eip);
  1957. }
  1958. static int em_ret_far(struct x86_emulate_ctxt *ctxt)
  1959. {
  1960. int rc;
  1961. unsigned long eip, cs;
  1962. int cpl = ctxt->ops->cpl(ctxt);
  1963. struct desc_struct new_desc;
  1964. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1965. if (rc != X86EMUL_CONTINUE)
  1966. return rc;
  1967. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1968. if (rc != X86EMUL_CONTINUE)
  1969. return rc;
  1970. /* Outer-privilege level return is not implemented */
  1971. if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
  1972. return X86EMUL_UNHANDLEABLE;
  1973. rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
  1974. X86_TRANSFER_RET,
  1975. &new_desc);
  1976. if (rc != X86EMUL_CONTINUE)
  1977. return rc;
  1978. rc = assign_eip_far(ctxt, eip, &new_desc);
  1979. /* Error handling is not implemented. */
  1980. if (rc != X86EMUL_CONTINUE)
  1981. return X86EMUL_UNHANDLEABLE;
  1982. return rc;
  1983. }
  1984. static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
  1985. {
  1986. int rc;
  1987. rc = em_ret_far(ctxt);
  1988. if (rc != X86EMUL_CONTINUE)
  1989. return rc;
  1990. rsp_increment(ctxt, ctxt->src.val);
  1991. return X86EMUL_CONTINUE;
  1992. }
  1993. static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
  1994. {
  1995. /* Save real source value, then compare EAX against destination. */
  1996. ctxt->dst.orig_val = ctxt->dst.val;
  1997. ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
  1998. ctxt->src.orig_val = ctxt->src.val;
  1999. ctxt->src.val = ctxt->dst.orig_val;
  2000. fastop(ctxt, em_cmp);
  2001. if (ctxt->eflags & X86_EFLAGS_ZF) {
  2002. /* Success: write back to memory; no update of EAX */
  2003. ctxt->src.type = OP_NONE;
  2004. ctxt->dst.val = ctxt->src.orig_val;
  2005. } else {
  2006. /* Failure: write the value we saw to EAX. */
  2007. ctxt->src.type = OP_REG;
  2008. ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  2009. ctxt->src.val = ctxt->dst.orig_val;
  2010. /* Create write-cycle to dest by writing the same value */
  2011. ctxt->dst.val = ctxt->dst.orig_val;
  2012. }
  2013. return X86EMUL_CONTINUE;
  2014. }
  2015. static int em_lseg(struct x86_emulate_ctxt *ctxt)
  2016. {
  2017. int seg = ctxt->src2.val;
  2018. unsigned short sel;
  2019. int rc;
  2020. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  2021. rc = load_segment_descriptor(ctxt, sel, seg);
  2022. if (rc != X86EMUL_CONTINUE)
  2023. return rc;
  2024. ctxt->dst.val = ctxt->src.val;
  2025. return rc;
  2026. }
  2027. static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
  2028. {
  2029. u32 eax, ebx, ecx, edx;
  2030. eax = 0x80000001;
  2031. ecx = 0;
  2032. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
  2033. return edx & bit(X86_FEATURE_LM);
  2034. }
  2035. #define GET_SMSTATE(type, smbase, offset) \
  2036. ({ \
  2037. type __val; \
  2038. int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
  2039. sizeof(__val)); \
  2040. if (r != X86EMUL_CONTINUE) \
  2041. return X86EMUL_UNHANDLEABLE; \
  2042. __val; \
  2043. })
  2044. static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
  2045. {
  2046. desc->g = (flags >> 23) & 1;
  2047. desc->d = (flags >> 22) & 1;
  2048. desc->l = (flags >> 21) & 1;
  2049. desc->avl = (flags >> 20) & 1;
  2050. desc->p = (flags >> 15) & 1;
  2051. desc->dpl = (flags >> 13) & 3;
  2052. desc->s = (flags >> 12) & 1;
  2053. desc->type = (flags >> 8) & 15;
  2054. }
  2055. static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
  2056. {
  2057. struct desc_struct desc;
  2058. int offset;
  2059. u16 selector;
  2060. selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
  2061. if (n < 3)
  2062. offset = 0x7f84 + n * 12;
  2063. else
  2064. offset = 0x7f2c + (n - 3) * 12;
  2065. set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
  2066. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
  2067. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
  2068. ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
  2069. return X86EMUL_CONTINUE;
  2070. }
  2071. static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
  2072. {
  2073. struct desc_struct desc;
  2074. int offset;
  2075. u16 selector;
  2076. u32 base3;
  2077. offset = 0x7e00 + n * 16;
  2078. selector = GET_SMSTATE(u16, smbase, offset);
  2079. rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
  2080. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
  2081. set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
  2082. base3 = GET_SMSTATE(u32, smbase, offset + 12);
  2083. ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
  2084. return X86EMUL_CONTINUE;
  2085. }
  2086. static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  2087. u64 cr0, u64 cr3, u64 cr4)
  2088. {
  2089. int bad;
  2090. u64 pcid;
  2091. /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
  2092. pcid = 0;
  2093. if (cr4 & X86_CR4_PCIDE) {
  2094. pcid = cr3 & 0xfff;
  2095. cr3 &= ~0xfff;
  2096. }
  2097. bad = ctxt->ops->set_cr(ctxt, 3, cr3);
  2098. if (bad)
  2099. return X86EMUL_UNHANDLEABLE;
  2100. /*
  2101. * First enable PAE, long mode needs it before CR0.PG = 1 is set.
  2102. * Then enable protected mode. However, PCID cannot be enabled
  2103. * if EFER.LMA=0, so set it separately.
  2104. */
  2105. bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  2106. if (bad)
  2107. return X86EMUL_UNHANDLEABLE;
  2108. bad = ctxt->ops->set_cr(ctxt, 0, cr0);
  2109. if (bad)
  2110. return X86EMUL_UNHANDLEABLE;
  2111. if (cr4 & X86_CR4_PCIDE) {
  2112. bad = ctxt->ops->set_cr(ctxt, 4, cr4);
  2113. if (bad)
  2114. return X86EMUL_UNHANDLEABLE;
  2115. if (pcid) {
  2116. bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
  2117. if (bad)
  2118. return X86EMUL_UNHANDLEABLE;
  2119. }
  2120. }
  2121. return X86EMUL_CONTINUE;
  2122. }
  2123. static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
  2124. {
  2125. struct desc_struct desc;
  2126. struct desc_ptr dt;
  2127. u16 selector;
  2128. u32 val, cr0, cr3, cr4;
  2129. int i;
  2130. cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
  2131. cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
  2132. ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
  2133. ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
  2134. for (i = 0; i < 8; i++)
  2135. *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
  2136. val = GET_SMSTATE(u32, smbase, 0x7fcc);
  2137. ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
  2138. val = GET_SMSTATE(u32, smbase, 0x7fc8);
  2139. ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
  2140. selector = GET_SMSTATE(u32, smbase, 0x7fc4);
  2141. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
  2142. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
  2143. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
  2144. ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
  2145. selector = GET_SMSTATE(u32, smbase, 0x7fc0);
  2146. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
  2147. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
  2148. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
  2149. ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
  2150. dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
  2151. dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
  2152. ctxt->ops->set_gdt(ctxt, &dt);
  2153. dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
  2154. dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
  2155. ctxt->ops->set_idt(ctxt, &dt);
  2156. for (i = 0; i < 6; i++) {
  2157. int r = rsm_load_seg_32(ctxt, smbase, i);
  2158. if (r != X86EMUL_CONTINUE)
  2159. return r;
  2160. }
  2161. cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
  2162. ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
  2163. return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
  2164. }
  2165. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
  2166. {
  2167. struct desc_struct desc;
  2168. struct desc_ptr dt;
  2169. u64 val, cr0, cr3, cr4;
  2170. u32 base3;
  2171. u16 selector;
  2172. int i, r;
  2173. for (i = 0; i < 16; i++)
  2174. *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
  2175. ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
  2176. ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
  2177. val = GET_SMSTATE(u32, smbase, 0x7f68);
  2178. ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
  2179. val = GET_SMSTATE(u32, smbase, 0x7f60);
  2180. ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
  2181. cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
  2182. cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
  2183. cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
  2184. ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
  2185. val = GET_SMSTATE(u64, smbase, 0x7ed0);
  2186. ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
  2187. selector = GET_SMSTATE(u32, smbase, 0x7e90);
  2188. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
  2189. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
  2190. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
  2191. base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
  2192. ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
  2193. dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
  2194. dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
  2195. ctxt->ops->set_idt(ctxt, &dt);
  2196. selector = GET_SMSTATE(u32, smbase, 0x7e70);
  2197. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
  2198. set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
  2199. set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
  2200. base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
  2201. ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
  2202. dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
  2203. dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
  2204. ctxt->ops->set_gdt(ctxt, &dt);
  2205. r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
  2206. if (r != X86EMUL_CONTINUE)
  2207. return r;
  2208. for (i = 0; i < 6; i++) {
  2209. r = rsm_load_seg_64(ctxt, smbase, i);
  2210. if (r != X86EMUL_CONTINUE)
  2211. return r;
  2212. }
  2213. return X86EMUL_CONTINUE;
  2214. }
  2215. static int em_rsm(struct x86_emulate_ctxt *ctxt)
  2216. {
  2217. unsigned long cr0, cr4, efer;
  2218. u64 smbase;
  2219. int ret;
  2220. if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
  2221. return emulate_ud(ctxt);
  2222. /*
  2223. * Get back to real mode, to prepare a safe state in which to load
  2224. * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
  2225. * supports long mode.
  2226. */
  2227. if (emulator_has_longmode(ctxt)) {
  2228. struct desc_struct cs_desc;
  2229. /* Zero CR4.PCIDE before CR0.PG. */
  2230. cr4 = ctxt->ops->get_cr(ctxt, 4);
  2231. if (cr4 & X86_CR4_PCIDE)
  2232. ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  2233. /* A 32-bit code segment is required to clear EFER.LMA. */
  2234. memset(&cs_desc, 0, sizeof(cs_desc));
  2235. cs_desc.type = 0xb;
  2236. cs_desc.s = cs_desc.g = cs_desc.p = 1;
  2237. ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
  2238. }
  2239. /* For the 64-bit case, this will clear EFER.LMA. */
  2240. cr0 = ctxt->ops->get_cr(ctxt, 0);
  2241. if (cr0 & X86_CR0_PE)
  2242. ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
  2243. if (emulator_has_longmode(ctxt)) {
  2244. /* Clear CR4.PAE before clearing EFER.LME. */
  2245. cr4 = ctxt->ops->get_cr(ctxt, 4);
  2246. if (cr4 & X86_CR4_PAE)
  2247. ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
  2248. /* And finally go back to 32-bit mode. */
  2249. efer = 0;
  2250. ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
  2251. }
  2252. smbase = ctxt->ops->get_smbase(ctxt);
  2253. if (emulator_has_longmode(ctxt))
  2254. ret = rsm_load_state_64(ctxt, smbase + 0x8000);
  2255. else
  2256. ret = rsm_load_state_32(ctxt, smbase + 0x8000);
  2257. if (ret != X86EMUL_CONTINUE) {
  2258. /* FIXME: should triple fault */
  2259. return X86EMUL_UNHANDLEABLE;
  2260. }
  2261. if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
  2262. ctxt->ops->set_nmi_mask(ctxt, false);
  2263. ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
  2264. ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
  2265. return X86EMUL_CONTINUE;
  2266. }
  2267. static void
  2268. setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
  2269. struct desc_struct *cs, struct desc_struct *ss)
  2270. {
  2271. cs->l = 0; /* will be adjusted later */
  2272. set_desc_base(cs, 0); /* flat segment */
  2273. cs->g = 1; /* 4kb granularity */
  2274. set_desc_limit(cs, 0xfffff); /* 4GB limit */
  2275. cs->type = 0x0b; /* Read, Execute, Accessed */
  2276. cs->s = 1;
  2277. cs->dpl = 0; /* will be adjusted later */
  2278. cs->p = 1;
  2279. cs->d = 1;
  2280. cs->avl = 0;
  2281. set_desc_base(ss, 0); /* flat segment */
  2282. set_desc_limit(ss, 0xfffff); /* 4GB limit */
  2283. ss->g = 1; /* 4kb granularity */
  2284. ss->s = 1;
  2285. ss->type = 0x03; /* Read/Write, Accessed */
  2286. ss->d = 1; /* 32bit stack segment */
  2287. ss->dpl = 0;
  2288. ss->p = 1;
  2289. ss->l = 0;
  2290. ss->avl = 0;
  2291. }
  2292. static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
  2293. {
  2294. u32 eax, ebx, ecx, edx;
  2295. eax = ecx = 0;
  2296. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
  2297. return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
  2298. && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
  2299. && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
  2300. }
  2301. static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
  2302. {
  2303. const struct x86_emulate_ops *ops = ctxt->ops;
  2304. u32 eax, ebx, ecx, edx;
  2305. /*
  2306. * syscall should always be enabled in longmode - so only become
  2307. * vendor specific (cpuid) if other modes are active...
  2308. */
  2309. if (ctxt->mode == X86EMUL_MODE_PROT64)
  2310. return true;
  2311. eax = 0x00000000;
  2312. ecx = 0x00000000;
  2313. ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
  2314. /*
  2315. * Intel ("GenuineIntel")
  2316. * remark: Intel CPUs only support "syscall" in 64bit
  2317. * longmode. Also an 64bit guest with a
  2318. * 32bit compat-app running will #UD !! While this
  2319. * behaviour can be fixed (by emulating) into AMD
  2320. * response - CPUs of AMD can't behave like Intel.
  2321. */
  2322. if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
  2323. ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
  2324. edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
  2325. return false;
  2326. /* AMD ("AuthenticAMD") */
  2327. if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
  2328. ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
  2329. edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
  2330. return true;
  2331. /* AMD ("AMDisbetter!") */
  2332. if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
  2333. ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
  2334. edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
  2335. return true;
  2336. /* default: (not Intel, not AMD), apply Intel's stricter rules... */
  2337. return false;
  2338. }
  2339. static int em_syscall(struct x86_emulate_ctxt *ctxt)
  2340. {
  2341. const struct x86_emulate_ops *ops = ctxt->ops;
  2342. struct desc_struct cs, ss;
  2343. u64 msr_data;
  2344. u16 cs_sel, ss_sel;
  2345. u64 efer = 0;
  2346. /* syscall is not available in real mode */
  2347. if (ctxt->mode == X86EMUL_MODE_REAL ||
  2348. ctxt->mode == X86EMUL_MODE_VM86)
  2349. return emulate_ud(ctxt);
  2350. if (!(em_syscall_is_enabled(ctxt)))
  2351. return emulate_ud(ctxt);
  2352. ops->get_msr(ctxt, MSR_EFER, &efer);
  2353. setup_syscalls_segments(ctxt, &cs, &ss);
  2354. if (!(efer & EFER_SCE))
  2355. return emulate_ud(ctxt);
  2356. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  2357. msr_data >>= 32;
  2358. cs_sel = (u16)(msr_data & 0xfffc);
  2359. ss_sel = (u16)(msr_data + 8);
  2360. if (efer & EFER_LMA) {
  2361. cs.d = 0;
  2362. cs.l = 1;
  2363. }
  2364. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2365. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2366. *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
  2367. if (efer & EFER_LMA) {
  2368. #ifdef CONFIG_X86_64
  2369. *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
  2370. ops->get_msr(ctxt,
  2371. ctxt->mode == X86EMUL_MODE_PROT64 ?
  2372. MSR_LSTAR : MSR_CSTAR, &msr_data);
  2373. ctxt->_eip = msr_data;
  2374. ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
  2375. ctxt->eflags &= ~msr_data;
  2376. ctxt->eflags |= X86_EFLAGS_FIXED;
  2377. #endif
  2378. } else {
  2379. /* legacy mode */
  2380. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  2381. ctxt->_eip = (u32)msr_data;
  2382. ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
  2383. }
  2384. ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
  2385. return X86EMUL_CONTINUE;
  2386. }
  2387. static int em_sysenter(struct x86_emulate_ctxt *ctxt)
  2388. {
  2389. const struct x86_emulate_ops *ops = ctxt->ops;
  2390. struct desc_struct cs, ss;
  2391. u64 msr_data;
  2392. u16 cs_sel, ss_sel;
  2393. u64 efer = 0;
  2394. ops->get_msr(ctxt, MSR_EFER, &efer);
  2395. /* inject #GP if in real mode */
  2396. if (ctxt->mode == X86EMUL_MODE_REAL)
  2397. return emulate_gp(ctxt, 0);
  2398. /*
  2399. * Not recognized on AMD in compat mode (but is recognized in legacy
  2400. * mode).
  2401. */
  2402. if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
  2403. && !vendor_intel(ctxt))
  2404. return emulate_ud(ctxt);
  2405. /* sysenter/sysexit have not been tested in 64bit mode. */
  2406. if (ctxt->mode == X86EMUL_MODE_PROT64)
  2407. return X86EMUL_UNHANDLEABLE;
  2408. setup_syscalls_segments(ctxt, &cs, &ss);
  2409. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  2410. if ((msr_data & 0xfffc) == 0x0)
  2411. return emulate_gp(ctxt, 0);
  2412. ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
  2413. cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
  2414. ss_sel = cs_sel + 8;
  2415. if (efer & EFER_LMA) {
  2416. cs.d = 0;
  2417. cs.l = 1;
  2418. }
  2419. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2420. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2421. ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
  2422. ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
  2423. ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
  2424. *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
  2425. (u32)msr_data;
  2426. return X86EMUL_CONTINUE;
  2427. }
  2428. static int em_sysexit(struct x86_emulate_ctxt *ctxt)
  2429. {
  2430. const struct x86_emulate_ops *ops = ctxt->ops;
  2431. struct desc_struct cs, ss;
  2432. u64 msr_data, rcx, rdx;
  2433. int usermode;
  2434. u16 cs_sel = 0, ss_sel = 0;
  2435. /* inject #GP if in real mode or Virtual 8086 mode */
  2436. if (ctxt->mode == X86EMUL_MODE_REAL ||
  2437. ctxt->mode == X86EMUL_MODE_VM86)
  2438. return emulate_gp(ctxt, 0);
  2439. setup_syscalls_segments(ctxt, &cs, &ss);
  2440. if ((ctxt->rex_prefix & 0x8) != 0x0)
  2441. usermode = X86EMUL_MODE_PROT64;
  2442. else
  2443. usermode = X86EMUL_MODE_PROT32;
  2444. rcx = reg_read(ctxt, VCPU_REGS_RCX);
  2445. rdx = reg_read(ctxt, VCPU_REGS_RDX);
  2446. cs.dpl = 3;
  2447. ss.dpl = 3;
  2448. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  2449. switch (usermode) {
  2450. case X86EMUL_MODE_PROT32:
  2451. cs_sel = (u16)(msr_data + 16);
  2452. if ((msr_data & 0xfffc) == 0x0)
  2453. return emulate_gp(ctxt, 0);
  2454. ss_sel = (u16)(msr_data + 24);
  2455. rcx = (u32)rcx;
  2456. rdx = (u32)rdx;
  2457. break;
  2458. case X86EMUL_MODE_PROT64:
  2459. cs_sel = (u16)(msr_data + 32);
  2460. if (msr_data == 0x0)
  2461. return emulate_gp(ctxt, 0);
  2462. ss_sel = cs_sel + 8;
  2463. cs.d = 0;
  2464. cs.l = 1;
  2465. if (emul_is_noncanonical_address(rcx, ctxt) ||
  2466. emul_is_noncanonical_address(rdx, ctxt))
  2467. return emulate_gp(ctxt, 0);
  2468. break;
  2469. }
  2470. cs_sel |= SEGMENT_RPL_MASK;
  2471. ss_sel |= SEGMENT_RPL_MASK;
  2472. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  2473. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  2474. ctxt->_eip = rdx;
  2475. *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
  2476. return X86EMUL_CONTINUE;
  2477. }
  2478. static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
  2479. {
  2480. int iopl;
  2481. if (ctxt->mode == X86EMUL_MODE_REAL)
  2482. return false;
  2483. if (ctxt->mode == X86EMUL_MODE_VM86)
  2484. return true;
  2485. iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
  2486. return ctxt->ops->cpl(ctxt) > iopl;
  2487. }
  2488. static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
  2489. u16 port, u16 len)
  2490. {
  2491. const struct x86_emulate_ops *ops = ctxt->ops;
  2492. struct desc_struct tr_seg;
  2493. u32 base3;
  2494. int r;
  2495. u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
  2496. unsigned mask = (1 << len) - 1;
  2497. unsigned long base;
  2498. ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
  2499. if (!tr_seg.p)
  2500. return false;
  2501. if (desc_limit_scaled(&tr_seg) < 103)
  2502. return false;
  2503. base = get_desc_base(&tr_seg);
  2504. #ifdef CONFIG_X86_64
  2505. base |= ((u64)base3) << 32;
  2506. #endif
  2507. r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
  2508. if (r != X86EMUL_CONTINUE)
  2509. return false;
  2510. if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
  2511. return false;
  2512. r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
  2513. if (r != X86EMUL_CONTINUE)
  2514. return false;
  2515. if ((perm >> bit_idx) & mask)
  2516. return false;
  2517. return true;
  2518. }
  2519. static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
  2520. u16 port, u16 len)
  2521. {
  2522. if (ctxt->perm_ok)
  2523. return true;
  2524. if (emulator_bad_iopl(ctxt))
  2525. if (!emulator_io_port_access_allowed(ctxt, port, len))
  2526. return false;
  2527. ctxt->perm_ok = true;
  2528. return true;
  2529. }
  2530. static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
  2531. {
  2532. /*
  2533. * Intel CPUs mask the counter and pointers in quite strange
  2534. * manner when ECX is zero due to REP-string optimizations.
  2535. */
  2536. #ifdef CONFIG_X86_64
  2537. if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
  2538. return;
  2539. *reg_write(ctxt, VCPU_REGS_RCX) = 0;
  2540. switch (ctxt->b) {
  2541. case 0xa4: /* movsb */
  2542. case 0xa5: /* movsd/w */
  2543. *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
  2544. /* fall through */
  2545. case 0xaa: /* stosb */
  2546. case 0xab: /* stosd/w */
  2547. *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
  2548. }
  2549. #endif
  2550. }
  2551. static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
  2552. struct tss_segment_16 *tss)
  2553. {
  2554. tss->ip = ctxt->_eip;
  2555. tss->flag = ctxt->eflags;
  2556. tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
  2557. tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
  2558. tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
  2559. tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
  2560. tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
  2561. tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
  2562. tss->si = reg_read(ctxt, VCPU_REGS_RSI);
  2563. tss->di = reg_read(ctxt, VCPU_REGS_RDI);
  2564. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2565. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2566. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2567. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2568. tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
  2569. }
  2570. static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
  2571. struct tss_segment_16 *tss)
  2572. {
  2573. int ret;
  2574. u8 cpl;
  2575. ctxt->_eip = tss->ip;
  2576. ctxt->eflags = tss->flag | 2;
  2577. *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
  2578. *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
  2579. *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
  2580. *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
  2581. *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
  2582. *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
  2583. *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
  2584. *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
  2585. /*
  2586. * SDM says that segment selectors are loaded before segment
  2587. * descriptors
  2588. */
  2589. set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
  2590. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2591. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2592. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2593. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2594. cpl = tss->cs & 3;
  2595. /*
  2596. * Now load segment descriptors. If fault happens at this stage
  2597. * it is handled in a context of new task
  2598. */
  2599. ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
  2600. X86_TRANSFER_TASK_SWITCH, NULL);
  2601. if (ret != X86EMUL_CONTINUE)
  2602. return ret;
  2603. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2604. X86_TRANSFER_TASK_SWITCH, NULL);
  2605. if (ret != X86EMUL_CONTINUE)
  2606. return ret;
  2607. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2608. X86_TRANSFER_TASK_SWITCH, NULL);
  2609. if (ret != X86EMUL_CONTINUE)
  2610. return ret;
  2611. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2612. X86_TRANSFER_TASK_SWITCH, NULL);
  2613. if (ret != X86EMUL_CONTINUE)
  2614. return ret;
  2615. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2616. X86_TRANSFER_TASK_SWITCH, NULL);
  2617. if (ret != X86EMUL_CONTINUE)
  2618. return ret;
  2619. return X86EMUL_CONTINUE;
  2620. }
  2621. static int task_switch_16(struct x86_emulate_ctxt *ctxt,
  2622. u16 tss_selector, u16 old_tss_sel,
  2623. ulong old_tss_base, struct desc_struct *new_desc)
  2624. {
  2625. struct tss_segment_16 tss_seg;
  2626. int ret;
  2627. u32 new_tss_base = get_desc_base(new_desc);
  2628. ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
  2629. if (ret != X86EMUL_CONTINUE)
  2630. return ret;
  2631. save_state_to_tss16(ctxt, &tss_seg);
  2632. ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
  2633. if (ret != X86EMUL_CONTINUE)
  2634. return ret;
  2635. ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
  2636. if (ret != X86EMUL_CONTINUE)
  2637. return ret;
  2638. if (old_tss_sel != 0xffff) {
  2639. tss_seg.prev_task_link = old_tss_sel;
  2640. ret = linear_write_system(ctxt, new_tss_base,
  2641. &tss_seg.prev_task_link,
  2642. sizeof tss_seg.prev_task_link);
  2643. if (ret != X86EMUL_CONTINUE)
  2644. return ret;
  2645. }
  2646. return load_state_from_tss16(ctxt, &tss_seg);
  2647. }
  2648. static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
  2649. struct tss_segment_32 *tss)
  2650. {
  2651. /* CR3 and ldt selector are not saved intentionally */
  2652. tss->eip = ctxt->_eip;
  2653. tss->eflags = ctxt->eflags;
  2654. tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
  2655. tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
  2656. tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
  2657. tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
  2658. tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
  2659. tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
  2660. tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
  2661. tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
  2662. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2663. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2664. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2665. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2666. tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
  2667. tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
  2668. }
  2669. static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
  2670. struct tss_segment_32 *tss)
  2671. {
  2672. int ret;
  2673. u8 cpl;
  2674. if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
  2675. return emulate_gp(ctxt, 0);
  2676. ctxt->_eip = tss->eip;
  2677. ctxt->eflags = tss->eflags | 2;
  2678. /* General purpose registers */
  2679. *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
  2680. *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
  2681. *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
  2682. *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
  2683. *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
  2684. *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
  2685. *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
  2686. *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
  2687. /*
  2688. * SDM says that segment selectors are loaded before segment
  2689. * descriptors. This is important because CPL checks will
  2690. * use CS.RPL.
  2691. */
  2692. set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
  2693. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2694. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2695. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2696. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2697. set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
  2698. set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
  2699. /*
  2700. * If we're switching between Protected Mode and VM86, we need to make
  2701. * sure to update the mode before loading the segment descriptors so
  2702. * that the selectors are interpreted correctly.
  2703. */
  2704. if (ctxt->eflags & X86_EFLAGS_VM) {
  2705. ctxt->mode = X86EMUL_MODE_VM86;
  2706. cpl = 3;
  2707. } else {
  2708. ctxt->mode = X86EMUL_MODE_PROT32;
  2709. cpl = tss->cs & 3;
  2710. }
  2711. /*
  2712. * Now load segment descriptors. If fault happenes at this stage
  2713. * it is handled in a context of new task
  2714. */
  2715. ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
  2716. cpl, X86_TRANSFER_TASK_SWITCH, NULL);
  2717. if (ret != X86EMUL_CONTINUE)
  2718. return ret;
  2719. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2720. X86_TRANSFER_TASK_SWITCH, NULL);
  2721. if (ret != X86EMUL_CONTINUE)
  2722. return ret;
  2723. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2724. X86_TRANSFER_TASK_SWITCH, NULL);
  2725. if (ret != X86EMUL_CONTINUE)
  2726. return ret;
  2727. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2728. X86_TRANSFER_TASK_SWITCH, NULL);
  2729. if (ret != X86EMUL_CONTINUE)
  2730. return ret;
  2731. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2732. X86_TRANSFER_TASK_SWITCH, NULL);
  2733. if (ret != X86EMUL_CONTINUE)
  2734. return ret;
  2735. ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
  2736. X86_TRANSFER_TASK_SWITCH, NULL);
  2737. if (ret != X86EMUL_CONTINUE)
  2738. return ret;
  2739. ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
  2740. X86_TRANSFER_TASK_SWITCH, NULL);
  2741. return ret;
  2742. }
  2743. static int task_switch_32(struct x86_emulate_ctxt *ctxt,
  2744. u16 tss_selector, u16 old_tss_sel,
  2745. ulong old_tss_base, struct desc_struct *new_desc)
  2746. {
  2747. struct tss_segment_32 tss_seg;
  2748. int ret;
  2749. u32 new_tss_base = get_desc_base(new_desc);
  2750. u32 eip_offset = offsetof(struct tss_segment_32, eip);
  2751. u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
  2752. ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
  2753. if (ret != X86EMUL_CONTINUE)
  2754. return ret;
  2755. save_state_to_tss32(ctxt, &tss_seg);
  2756. /* Only GP registers and segment selectors are saved */
  2757. ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
  2758. ldt_sel_offset - eip_offset);
  2759. if (ret != X86EMUL_CONTINUE)
  2760. return ret;
  2761. ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
  2762. if (ret != X86EMUL_CONTINUE)
  2763. return ret;
  2764. if (old_tss_sel != 0xffff) {
  2765. tss_seg.prev_task_link = old_tss_sel;
  2766. ret = linear_write_system(ctxt, new_tss_base,
  2767. &tss_seg.prev_task_link,
  2768. sizeof tss_seg.prev_task_link);
  2769. if (ret != X86EMUL_CONTINUE)
  2770. return ret;
  2771. }
  2772. return load_state_from_tss32(ctxt, &tss_seg);
  2773. }
  2774. static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
  2775. u16 tss_selector, int idt_index, int reason,
  2776. bool has_error_code, u32 error_code)
  2777. {
  2778. const struct x86_emulate_ops *ops = ctxt->ops;
  2779. struct desc_struct curr_tss_desc, next_tss_desc;
  2780. int ret;
  2781. u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
  2782. ulong old_tss_base =
  2783. ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
  2784. u32 desc_limit;
  2785. ulong desc_addr, dr7;
  2786. /* FIXME: old_tss_base == ~0 ? */
  2787. ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
  2788. if (ret != X86EMUL_CONTINUE)
  2789. return ret;
  2790. ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
  2791. if (ret != X86EMUL_CONTINUE)
  2792. return ret;
  2793. /* FIXME: check that next_tss_desc is tss */
  2794. /*
  2795. * Check privileges. The three cases are task switch caused by...
  2796. *
  2797. * 1. jmp/call/int to task gate: Check against DPL of the task gate
  2798. * 2. Exception/IRQ/iret: No check is performed
  2799. * 3. jmp/call to TSS/task-gate: No check is performed since the
  2800. * hardware checks it before exiting.
  2801. */
  2802. if (reason == TASK_SWITCH_GATE) {
  2803. if (idt_index != -1) {
  2804. /* Software interrupts */
  2805. struct desc_struct task_gate_desc;
  2806. int dpl;
  2807. ret = read_interrupt_descriptor(ctxt, idt_index,
  2808. &task_gate_desc);
  2809. if (ret != X86EMUL_CONTINUE)
  2810. return ret;
  2811. dpl = task_gate_desc.dpl;
  2812. if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
  2813. return emulate_gp(ctxt, (idt_index << 3) | 0x2);
  2814. }
  2815. }
  2816. desc_limit = desc_limit_scaled(&next_tss_desc);
  2817. if (!next_tss_desc.p ||
  2818. ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
  2819. desc_limit < 0x2b)) {
  2820. return emulate_ts(ctxt, tss_selector & 0xfffc);
  2821. }
  2822. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  2823. curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
  2824. write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
  2825. }
  2826. if (reason == TASK_SWITCH_IRET)
  2827. ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
  2828. /* set back link to prev task only if NT bit is set in eflags
  2829. note that old_tss_sel is not used after this point */
  2830. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  2831. old_tss_sel = 0xffff;
  2832. if (next_tss_desc.type & 8)
  2833. ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
  2834. old_tss_base, &next_tss_desc);
  2835. else
  2836. ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
  2837. old_tss_base, &next_tss_desc);
  2838. if (ret != X86EMUL_CONTINUE)
  2839. return ret;
  2840. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
  2841. ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
  2842. if (reason != TASK_SWITCH_IRET) {
  2843. next_tss_desc.type |= (1 << 1); /* set busy flag */
  2844. write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
  2845. }
  2846. ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
  2847. ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
  2848. if (has_error_code) {
  2849. ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
  2850. ctxt->lock_prefix = 0;
  2851. ctxt->src.val = (unsigned long) error_code;
  2852. ret = em_push(ctxt);
  2853. }
  2854. ops->get_dr(ctxt, 7, &dr7);
  2855. ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
  2856. return ret;
  2857. }
  2858. int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
  2859. u16 tss_selector, int idt_index, int reason,
  2860. bool has_error_code, u32 error_code)
  2861. {
  2862. int rc;
  2863. invalidate_registers(ctxt);
  2864. ctxt->_eip = ctxt->eip;
  2865. ctxt->dst.type = OP_NONE;
  2866. rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
  2867. has_error_code, error_code);
  2868. if (rc == X86EMUL_CONTINUE) {
  2869. ctxt->eip = ctxt->_eip;
  2870. writeback_registers(ctxt);
  2871. }
  2872. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  2873. }
  2874. static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
  2875. struct operand *op)
  2876. {
  2877. int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
  2878. register_address_increment(ctxt, reg, df * op->bytes);
  2879. op->addr.mem.ea = register_address(ctxt, reg);
  2880. }
  2881. static int em_das(struct x86_emulate_ctxt *ctxt)
  2882. {
  2883. u8 al, old_al;
  2884. bool af, cf, old_cf;
  2885. cf = ctxt->eflags & X86_EFLAGS_CF;
  2886. al = ctxt->dst.val;
  2887. old_al = al;
  2888. old_cf = cf;
  2889. cf = false;
  2890. af = ctxt->eflags & X86_EFLAGS_AF;
  2891. if ((al & 0x0f) > 9 || af) {
  2892. al -= 6;
  2893. cf = old_cf | (al >= 250);
  2894. af = true;
  2895. } else {
  2896. af = false;
  2897. }
  2898. if (old_al > 0x99 || old_cf) {
  2899. al -= 0x60;
  2900. cf = true;
  2901. }
  2902. ctxt->dst.val = al;
  2903. /* Set PF, ZF, SF */
  2904. ctxt->src.type = OP_IMM;
  2905. ctxt->src.val = 0;
  2906. ctxt->src.bytes = 1;
  2907. fastop(ctxt, em_or);
  2908. ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
  2909. if (cf)
  2910. ctxt->eflags |= X86_EFLAGS_CF;
  2911. if (af)
  2912. ctxt->eflags |= X86_EFLAGS_AF;
  2913. return X86EMUL_CONTINUE;
  2914. }
  2915. static int em_aam(struct x86_emulate_ctxt *ctxt)
  2916. {
  2917. u8 al, ah;
  2918. if (ctxt->src.val == 0)
  2919. return emulate_de(ctxt);
  2920. al = ctxt->dst.val & 0xff;
  2921. ah = al / ctxt->src.val;
  2922. al %= ctxt->src.val;
  2923. ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
  2924. /* Set PF, ZF, SF */
  2925. ctxt->src.type = OP_IMM;
  2926. ctxt->src.val = 0;
  2927. ctxt->src.bytes = 1;
  2928. fastop(ctxt, em_or);
  2929. return X86EMUL_CONTINUE;
  2930. }
  2931. static int em_aad(struct x86_emulate_ctxt *ctxt)
  2932. {
  2933. u8 al = ctxt->dst.val & 0xff;
  2934. u8 ah = (ctxt->dst.val >> 8) & 0xff;
  2935. al = (al + (ah * ctxt->src.val)) & 0xff;
  2936. ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
  2937. /* Set PF, ZF, SF */
  2938. ctxt->src.type = OP_IMM;
  2939. ctxt->src.val = 0;
  2940. ctxt->src.bytes = 1;
  2941. fastop(ctxt, em_or);
  2942. return X86EMUL_CONTINUE;
  2943. }
  2944. static int em_call(struct x86_emulate_ctxt *ctxt)
  2945. {
  2946. int rc;
  2947. long rel = ctxt->src.val;
  2948. ctxt->src.val = (unsigned long)ctxt->_eip;
  2949. rc = jmp_rel(ctxt, rel);
  2950. if (rc != X86EMUL_CONTINUE)
  2951. return rc;
  2952. return em_push(ctxt);
  2953. }
  2954. static int em_call_far(struct x86_emulate_ctxt *ctxt)
  2955. {
  2956. u16 sel, old_cs;
  2957. ulong old_eip;
  2958. int rc;
  2959. struct desc_struct old_desc, new_desc;
  2960. const struct x86_emulate_ops *ops = ctxt->ops;
  2961. int cpl = ctxt->ops->cpl(ctxt);
  2962. enum x86emul_mode prev_mode = ctxt->mode;
  2963. old_eip = ctxt->_eip;
  2964. ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
  2965. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  2966. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
  2967. X86_TRANSFER_CALL_JMP, &new_desc);
  2968. if (rc != X86EMUL_CONTINUE)
  2969. return rc;
  2970. rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
  2971. if (rc != X86EMUL_CONTINUE)
  2972. goto fail;
  2973. ctxt->src.val = old_cs;
  2974. rc = em_push(ctxt);
  2975. if (rc != X86EMUL_CONTINUE)
  2976. goto fail;
  2977. ctxt->src.val = old_eip;
  2978. rc = em_push(ctxt);
  2979. /* If we failed, we tainted the memory, but the very least we should
  2980. restore cs */
  2981. if (rc != X86EMUL_CONTINUE) {
  2982. pr_warn_once("faulting far call emulation tainted memory\n");
  2983. goto fail;
  2984. }
  2985. return rc;
  2986. fail:
  2987. ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
  2988. ctxt->mode = prev_mode;
  2989. return rc;
  2990. }
  2991. static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
  2992. {
  2993. int rc;
  2994. unsigned long eip;
  2995. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  2996. if (rc != X86EMUL_CONTINUE)
  2997. return rc;
  2998. rc = assign_eip_near(ctxt, eip);
  2999. if (rc != X86EMUL_CONTINUE)
  3000. return rc;
  3001. rsp_increment(ctxt, ctxt->src.val);
  3002. return X86EMUL_CONTINUE;
  3003. }
  3004. static int em_xchg(struct x86_emulate_ctxt *ctxt)
  3005. {
  3006. /* Write back the register source. */
  3007. ctxt->src.val = ctxt->dst.val;
  3008. write_register_operand(&ctxt->src);
  3009. /* Write back the memory destination with implicit LOCK prefix. */
  3010. ctxt->dst.val = ctxt->src.orig_val;
  3011. ctxt->lock_prefix = 1;
  3012. return X86EMUL_CONTINUE;
  3013. }
  3014. static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
  3015. {
  3016. ctxt->dst.val = ctxt->src2.val;
  3017. return fastop(ctxt, em_imul);
  3018. }
  3019. static int em_cwd(struct x86_emulate_ctxt *ctxt)
  3020. {
  3021. ctxt->dst.type = OP_REG;
  3022. ctxt->dst.bytes = ctxt->src.bytes;
  3023. ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  3024. ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
  3025. return X86EMUL_CONTINUE;
  3026. }
  3027. static int em_rdpid(struct x86_emulate_ctxt *ctxt)
  3028. {
  3029. u64 tsc_aux = 0;
  3030. if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
  3031. return emulate_ud(ctxt);
  3032. ctxt->dst.val = tsc_aux;
  3033. return X86EMUL_CONTINUE;
  3034. }
  3035. static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
  3036. {
  3037. u64 tsc = 0;
  3038. ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
  3039. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
  3040. *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
  3041. return X86EMUL_CONTINUE;
  3042. }
  3043. static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
  3044. {
  3045. u64 pmc;
  3046. if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
  3047. return emulate_gp(ctxt, 0);
  3048. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
  3049. *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
  3050. return X86EMUL_CONTINUE;
  3051. }
  3052. static int em_mov(struct x86_emulate_ctxt *ctxt)
  3053. {
  3054. memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
  3055. return X86EMUL_CONTINUE;
  3056. }
  3057. #define FFL(x) bit(X86_FEATURE_##x)
  3058. static int em_movbe(struct x86_emulate_ctxt *ctxt)
  3059. {
  3060. u32 ebx, ecx, edx, eax = 1;
  3061. u16 tmp;
  3062. /*
  3063. * Check MOVBE is set in the guest-visible CPUID leaf.
  3064. */
  3065. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
  3066. if (!(ecx & FFL(MOVBE)))
  3067. return emulate_ud(ctxt);
  3068. switch (ctxt->op_bytes) {
  3069. case 2:
  3070. /*
  3071. * From MOVBE definition: "...When the operand size is 16 bits,
  3072. * the upper word of the destination register remains unchanged
  3073. * ..."
  3074. *
  3075. * Both casting ->valptr and ->val to u16 breaks strict aliasing
  3076. * rules so we have to do the operation almost per hand.
  3077. */
  3078. tmp = (u16)ctxt->src.val;
  3079. ctxt->dst.val &= ~0xffffUL;
  3080. ctxt->dst.val |= (unsigned long)swab16(tmp);
  3081. break;
  3082. case 4:
  3083. ctxt->dst.val = swab32((u32)ctxt->src.val);
  3084. break;
  3085. case 8:
  3086. ctxt->dst.val = swab64(ctxt->src.val);
  3087. break;
  3088. default:
  3089. BUG();
  3090. }
  3091. return X86EMUL_CONTINUE;
  3092. }
  3093. static int em_cr_write(struct x86_emulate_ctxt *ctxt)
  3094. {
  3095. if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
  3096. return emulate_gp(ctxt, 0);
  3097. /* Disable writeback. */
  3098. ctxt->dst.type = OP_NONE;
  3099. return X86EMUL_CONTINUE;
  3100. }
  3101. static int em_dr_write(struct x86_emulate_ctxt *ctxt)
  3102. {
  3103. unsigned long val;
  3104. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3105. val = ctxt->src.val & ~0ULL;
  3106. else
  3107. val = ctxt->src.val & ~0U;
  3108. /* #UD condition is already handled. */
  3109. if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
  3110. return emulate_gp(ctxt, 0);
  3111. /* Disable writeback. */
  3112. ctxt->dst.type = OP_NONE;
  3113. return X86EMUL_CONTINUE;
  3114. }
  3115. static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
  3116. {
  3117. u64 msr_data;
  3118. msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
  3119. | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
  3120. if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
  3121. return emulate_gp(ctxt, 0);
  3122. return X86EMUL_CONTINUE;
  3123. }
  3124. static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
  3125. {
  3126. u64 msr_data;
  3127. if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
  3128. return emulate_gp(ctxt, 0);
  3129. *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
  3130. *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
  3131. return X86EMUL_CONTINUE;
  3132. }
  3133. static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
  3134. {
  3135. if (ctxt->modrm_reg > VCPU_SREG_GS)
  3136. return emulate_ud(ctxt);
  3137. ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
  3138. if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
  3139. ctxt->dst.bytes = 2;
  3140. return X86EMUL_CONTINUE;
  3141. }
  3142. static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
  3143. {
  3144. u16 sel = ctxt->src.val;
  3145. if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
  3146. return emulate_ud(ctxt);
  3147. if (ctxt->modrm_reg == VCPU_SREG_SS)
  3148. ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
  3149. /* Disable writeback. */
  3150. ctxt->dst.type = OP_NONE;
  3151. return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
  3152. }
  3153. static int em_lldt(struct x86_emulate_ctxt *ctxt)
  3154. {
  3155. u16 sel = ctxt->src.val;
  3156. /* Disable writeback. */
  3157. ctxt->dst.type = OP_NONE;
  3158. return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
  3159. }
  3160. static int em_ltr(struct x86_emulate_ctxt *ctxt)
  3161. {
  3162. u16 sel = ctxt->src.val;
  3163. /* Disable writeback. */
  3164. ctxt->dst.type = OP_NONE;
  3165. return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
  3166. }
  3167. static int em_invlpg(struct x86_emulate_ctxt *ctxt)
  3168. {
  3169. int rc;
  3170. ulong linear;
  3171. rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
  3172. if (rc == X86EMUL_CONTINUE)
  3173. ctxt->ops->invlpg(ctxt, linear);
  3174. /* Disable writeback. */
  3175. ctxt->dst.type = OP_NONE;
  3176. return X86EMUL_CONTINUE;
  3177. }
  3178. static int em_clts(struct x86_emulate_ctxt *ctxt)
  3179. {
  3180. ulong cr0;
  3181. cr0 = ctxt->ops->get_cr(ctxt, 0);
  3182. cr0 &= ~X86_CR0_TS;
  3183. ctxt->ops->set_cr(ctxt, 0, cr0);
  3184. return X86EMUL_CONTINUE;
  3185. }
  3186. static int em_hypercall(struct x86_emulate_ctxt *ctxt)
  3187. {
  3188. int rc = ctxt->ops->fix_hypercall(ctxt);
  3189. if (rc != X86EMUL_CONTINUE)
  3190. return rc;
  3191. /* Let the processor re-execute the fixed hypercall */
  3192. ctxt->_eip = ctxt->eip;
  3193. /* Disable writeback. */
  3194. ctxt->dst.type = OP_NONE;
  3195. return X86EMUL_CONTINUE;
  3196. }
  3197. static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
  3198. void (*get)(struct x86_emulate_ctxt *ctxt,
  3199. struct desc_ptr *ptr))
  3200. {
  3201. struct desc_ptr desc_ptr;
  3202. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3203. ctxt->op_bytes = 8;
  3204. get(ctxt, &desc_ptr);
  3205. if (ctxt->op_bytes == 2) {
  3206. ctxt->op_bytes = 4;
  3207. desc_ptr.address &= 0x00ffffff;
  3208. }
  3209. /* Disable writeback. */
  3210. ctxt->dst.type = OP_NONE;
  3211. return segmented_write_std(ctxt, ctxt->dst.addr.mem,
  3212. &desc_ptr, 2 + ctxt->op_bytes);
  3213. }
  3214. static int em_sgdt(struct x86_emulate_ctxt *ctxt)
  3215. {
  3216. return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
  3217. }
  3218. static int em_sidt(struct x86_emulate_ctxt *ctxt)
  3219. {
  3220. return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
  3221. }
  3222. static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
  3223. {
  3224. struct desc_ptr desc_ptr;
  3225. int rc;
  3226. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3227. ctxt->op_bytes = 8;
  3228. rc = read_descriptor(ctxt, ctxt->src.addr.mem,
  3229. &desc_ptr.size, &desc_ptr.address,
  3230. ctxt->op_bytes);
  3231. if (rc != X86EMUL_CONTINUE)
  3232. return rc;
  3233. if (ctxt->mode == X86EMUL_MODE_PROT64 &&
  3234. emul_is_noncanonical_address(desc_ptr.address, ctxt))
  3235. return emulate_gp(ctxt, 0);
  3236. if (lgdt)
  3237. ctxt->ops->set_gdt(ctxt, &desc_ptr);
  3238. else
  3239. ctxt->ops->set_idt(ctxt, &desc_ptr);
  3240. /* Disable writeback. */
  3241. ctxt->dst.type = OP_NONE;
  3242. return X86EMUL_CONTINUE;
  3243. }
  3244. static int em_lgdt(struct x86_emulate_ctxt *ctxt)
  3245. {
  3246. return em_lgdt_lidt(ctxt, true);
  3247. }
  3248. static int em_lidt(struct x86_emulate_ctxt *ctxt)
  3249. {
  3250. return em_lgdt_lidt(ctxt, false);
  3251. }
  3252. static int em_smsw(struct x86_emulate_ctxt *ctxt)
  3253. {
  3254. if (ctxt->dst.type == OP_MEM)
  3255. ctxt->dst.bytes = 2;
  3256. ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
  3257. return X86EMUL_CONTINUE;
  3258. }
  3259. static int em_lmsw(struct x86_emulate_ctxt *ctxt)
  3260. {
  3261. ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
  3262. | (ctxt->src.val & 0x0f));
  3263. ctxt->dst.type = OP_NONE;
  3264. return X86EMUL_CONTINUE;
  3265. }
  3266. static int em_loop(struct x86_emulate_ctxt *ctxt)
  3267. {
  3268. int rc = X86EMUL_CONTINUE;
  3269. register_address_increment(ctxt, VCPU_REGS_RCX, -1);
  3270. if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
  3271. (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
  3272. rc = jmp_rel(ctxt, ctxt->src.val);
  3273. return rc;
  3274. }
  3275. static int em_jcxz(struct x86_emulate_ctxt *ctxt)
  3276. {
  3277. int rc = X86EMUL_CONTINUE;
  3278. if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
  3279. rc = jmp_rel(ctxt, ctxt->src.val);
  3280. return rc;
  3281. }
  3282. static int em_in(struct x86_emulate_ctxt *ctxt)
  3283. {
  3284. if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
  3285. &ctxt->dst.val))
  3286. return X86EMUL_IO_NEEDED;
  3287. return X86EMUL_CONTINUE;
  3288. }
  3289. static int em_out(struct x86_emulate_ctxt *ctxt)
  3290. {
  3291. ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
  3292. &ctxt->src.val, 1);
  3293. /* Disable writeback. */
  3294. ctxt->dst.type = OP_NONE;
  3295. return X86EMUL_CONTINUE;
  3296. }
  3297. static int em_cli(struct x86_emulate_ctxt *ctxt)
  3298. {
  3299. if (emulator_bad_iopl(ctxt))
  3300. return emulate_gp(ctxt, 0);
  3301. ctxt->eflags &= ~X86_EFLAGS_IF;
  3302. return X86EMUL_CONTINUE;
  3303. }
  3304. static int em_sti(struct x86_emulate_ctxt *ctxt)
  3305. {
  3306. if (emulator_bad_iopl(ctxt))
  3307. return emulate_gp(ctxt, 0);
  3308. ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
  3309. ctxt->eflags |= X86_EFLAGS_IF;
  3310. return X86EMUL_CONTINUE;
  3311. }
  3312. static int em_cpuid(struct x86_emulate_ctxt *ctxt)
  3313. {
  3314. u32 eax, ebx, ecx, edx;
  3315. u64 msr = 0;
  3316. ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
  3317. if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
  3318. ctxt->ops->cpl(ctxt)) {
  3319. return emulate_gp(ctxt, 0);
  3320. }
  3321. eax = reg_read(ctxt, VCPU_REGS_RAX);
  3322. ecx = reg_read(ctxt, VCPU_REGS_RCX);
  3323. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
  3324. *reg_write(ctxt, VCPU_REGS_RAX) = eax;
  3325. *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
  3326. *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
  3327. *reg_write(ctxt, VCPU_REGS_RDX) = edx;
  3328. return X86EMUL_CONTINUE;
  3329. }
  3330. static int em_sahf(struct x86_emulate_ctxt *ctxt)
  3331. {
  3332. u32 flags;
  3333. flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
  3334. X86_EFLAGS_SF;
  3335. flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
  3336. ctxt->eflags &= ~0xffUL;
  3337. ctxt->eflags |= flags | X86_EFLAGS_FIXED;
  3338. return X86EMUL_CONTINUE;
  3339. }
  3340. static int em_lahf(struct x86_emulate_ctxt *ctxt)
  3341. {
  3342. *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
  3343. *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
  3344. return X86EMUL_CONTINUE;
  3345. }
  3346. static int em_bswap(struct x86_emulate_ctxt *ctxt)
  3347. {
  3348. switch (ctxt->op_bytes) {
  3349. #ifdef CONFIG_X86_64
  3350. case 8:
  3351. asm("bswap %0" : "+r"(ctxt->dst.val));
  3352. break;
  3353. #endif
  3354. default:
  3355. asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
  3356. break;
  3357. }
  3358. return X86EMUL_CONTINUE;
  3359. }
  3360. static int em_clflush(struct x86_emulate_ctxt *ctxt)
  3361. {
  3362. /* emulating clflush regardless of cpuid */
  3363. return X86EMUL_CONTINUE;
  3364. }
  3365. static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
  3366. {
  3367. /* emulating clflushopt regardless of cpuid */
  3368. return X86EMUL_CONTINUE;
  3369. }
  3370. static int em_movsxd(struct x86_emulate_ctxt *ctxt)
  3371. {
  3372. ctxt->dst.val = (s32) ctxt->src.val;
  3373. return X86EMUL_CONTINUE;
  3374. }
  3375. static int check_fxsr(struct x86_emulate_ctxt *ctxt)
  3376. {
  3377. u32 eax = 1, ebx, ecx = 0, edx;
  3378. ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
  3379. if (!(edx & FFL(FXSR)))
  3380. return emulate_ud(ctxt);
  3381. if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
  3382. return emulate_nm(ctxt);
  3383. /*
  3384. * Don't emulate a case that should never be hit, instead of working
  3385. * around a lack of fxsave64/fxrstor64 on old compilers.
  3386. */
  3387. if (ctxt->mode >= X86EMUL_MODE_PROT64)
  3388. return X86EMUL_UNHANDLEABLE;
  3389. return X86EMUL_CONTINUE;
  3390. }
  3391. /*
  3392. * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
  3393. * and restore MXCSR.
  3394. */
  3395. static size_t __fxstate_size(int nregs)
  3396. {
  3397. return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
  3398. }
  3399. static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
  3400. {
  3401. bool cr4_osfxsr;
  3402. if (ctxt->mode == X86EMUL_MODE_PROT64)
  3403. return __fxstate_size(16);
  3404. cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
  3405. return __fxstate_size(cr4_osfxsr ? 8 : 0);
  3406. }
  3407. /*
  3408. * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
  3409. * 1) 16 bit mode
  3410. * 2) 32 bit mode
  3411. * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
  3412. * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
  3413. * save and restore
  3414. * 3) 64-bit mode with REX.W prefix
  3415. * - like (2), but XMM 8-15 are being saved and restored
  3416. * 4) 64-bit mode without REX.W prefix
  3417. * - like (3), but FIP and FDP are 64 bit
  3418. *
  3419. * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
  3420. * desired result. (4) is not emulated.
  3421. *
  3422. * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
  3423. * and FPU DS) should match.
  3424. */
  3425. static int em_fxsave(struct x86_emulate_ctxt *ctxt)
  3426. {
  3427. struct fxregs_state fx_state;
  3428. int rc;
  3429. rc = check_fxsr(ctxt);
  3430. if (rc != X86EMUL_CONTINUE)
  3431. return rc;
  3432. ctxt->ops->get_fpu(ctxt);
  3433. rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
  3434. ctxt->ops->put_fpu(ctxt);
  3435. if (rc != X86EMUL_CONTINUE)
  3436. return rc;
  3437. return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
  3438. fxstate_size(ctxt));
  3439. }
  3440. /*
  3441. * FXRSTOR might restore XMM registers not provided by the guest. Fill
  3442. * in the host registers (via FXSAVE) instead, so they won't be modified.
  3443. * (preemption has to stay disabled until FXRSTOR).
  3444. *
  3445. * Use noinline to keep the stack for other functions called by callers small.
  3446. */
  3447. static noinline int fxregs_fixup(struct fxregs_state *fx_state,
  3448. const size_t used_size)
  3449. {
  3450. struct fxregs_state fx_tmp;
  3451. int rc;
  3452. rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
  3453. memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
  3454. __fxstate_size(16) - used_size);
  3455. return rc;
  3456. }
  3457. static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
  3458. {
  3459. struct fxregs_state fx_state;
  3460. int rc;
  3461. size_t size;
  3462. rc = check_fxsr(ctxt);
  3463. if (rc != X86EMUL_CONTINUE)
  3464. return rc;
  3465. size = fxstate_size(ctxt);
  3466. rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
  3467. if (rc != X86EMUL_CONTINUE)
  3468. return rc;
  3469. ctxt->ops->get_fpu(ctxt);
  3470. if (size < __fxstate_size(16)) {
  3471. rc = fxregs_fixup(&fx_state, size);
  3472. if (rc != X86EMUL_CONTINUE)
  3473. goto out;
  3474. }
  3475. if (fx_state.mxcsr >> 16) {
  3476. rc = emulate_gp(ctxt, 0);
  3477. goto out;
  3478. }
  3479. if (rc == X86EMUL_CONTINUE)
  3480. rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
  3481. out:
  3482. ctxt->ops->put_fpu(ctxt);
  3483. return rc;
  3484. }
  3485. static bool valid_cr(int nr)
  3486. {
  3487. switch (nr) {
  3488. case 0:
  3489. case 2 ... 4:
  3490. case 8:
  3491. return true;
  3492. default:
  3493. return false;
  3494. }
  3495. }
  3496. static int check_cr_read(struct x86_emulate_ctxt *ctxt)
  3497. {
  3498. if (!valid_cr(ctxt->modrm_reg))
  3499. return emulate_ud(ctxt);
  3500. return X86EMUL_CONTINUE;
  3501. }
  3502. static int check_cr_write(struct x86_emulate_ctxt *ctxt)
  3503. {
  3504. u64 new_val = ctxt->src.val64;
  3505. int cr = ctxt->modrm_reg;
  3506. u64 efer = 0;
  3507. static u64 cr_reserved_bits[] = {
  3508. 0xffffffff00000000ULL,
  3509. 0, 0, 0, /* CR3 checked later */
  3510. CR4_RESERVED_BITS,
  3511. 0, 0, 0,
  3512. CR8_RESERVED_BITS,
  3513. };
  3514. if (!valid_cr(cr))
  3515. return emulate_ud(ctxt);
  3516. if (new_val & cr_reserved_bits[cr])
  3517. return emulate_gp(ctxt, 0);
  3518. switch (cr) {
  3519. case 0: {
  3520. u64 cr4;
  3521. if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
  3522. ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
  3523. return emulate_gp(ctxt, 0);
  3524. cr4 = ctxt->ops->get_cr(ctxt, 4);
  3525. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3526. if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
  3527. !(cr4 & X86_CR4_PAE))
  3528. return emulate_gp(ctxt, 0);
  3529. break;
  3530. }
  3531. case 3: {
  3532. u64 rsvd = 0;
  3533. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3534. if (efer & EFER_LMA) {
  3535. u64 maxphyaddr;
  3536. u32 eax, ebx, ecx, edx;
  3537. eax = 0x80000008;
  3538. ecx = 0;
  3539. if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
  3540. &edx, false))
  3541. maxphyaddr = eax & 0xff;
  3542. else
  3543. maxphyaddr = 36;
  3544. rsvd = rsvd_bits(maxphyaddr, 63);
  3545. if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
  3546. rsvd &= ~CR3_PCID_INVD;
  3547. }
  3548. if (new_val & rsvd)
  3549. return emulate_gp(ctxt, 0);
  3550. break;
  3551. }
  3552. case 4: {
  3553. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3554. if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
  3555. return emulate_gp(ctxt, 0);
  3556. break;
  3557. }
  3558. }
  3559. return X86EMUL_CONTINUE;
  3560. }
  3561. static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
  3562. {
  3563. unsigned long dr7;
  3564. ctxt->ops->get_dr(ctxt, 7, &dr7);
  3565. /* Check if DR7.Global_Enable is set */
  3566. return dr7 & (1 << 13);
  3567. }
  3568. static int check_dr_read(struct x86_emulate_ctxt *ctxt)
  3569. {
  3570. int dr = ctxt->modrm_reg;
  3571. u64 cr4;
  3572. if (dr > 7)
  3573. return emulate_ud(ctxt);
  3574. cr4 = ctxt->ops->get_cr(ctxt, 4);
  3575. if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
  3576. return emulate_ud(ctxt);
  3577. if (check_dr7_gd(ctxt)) {
  3578. ulong dr6;
  3579. ctxt->ops->get_dr(ctxt, 6, &dr6);
  3580. dr6 &= ~15;
  3581. dr6 |= DR6_BD | DR6_RTM;
  3582. ctxt->ops->set_dr(ctxt, 6, dr6);
  3583. return emulate_db(ctxt);
  3584. }
  3585. return X86EMUL_CONTINUE;
  3586. }
  3587. static int check_dr_write(struct x86_emulate_ctxt *ctxt)
  3588. {
  3589. u64 new_val = ctxt->src.val64;
  3590. int dr = ctxt->modrm_reg;
  3591. if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
  3592. return emulate_gp(ctxt, 0);
  3593. return check_dr_read(ctxt);
  3594. }
  3595. static int check_svme(struct x86_emulate_ctxt *ctxt)
  3596. {
  3597. u64 efer = 0;
  3598. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  3599. if (!(efer & EFER_SVME))
  3600. return emulate_ud(ctxt);
  3601. return X86EMUL_CONTINUE;
  3602. }
  3603. static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
  3604. {
  3605. u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
  3606. /* Valid physical address? */
  3607. if (rax & 0xffff000000000000ULL)
  3608. return emulate_gp(ctxt, 0);
  3609. return check_svme(ctxt);
  3610. }
  3611. static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
  3612. {
  3613. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  3614. if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
  3615. return emulate_ud(ctxt);
  3616. return X86EMUL_CONTINUE;
  3617. }
  3618. static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
  3619. {
  3620. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  3621. u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
  3622. if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
  3623. ctxt->ops->check_pmc(ctxt, rcx))
  3624. return emulate_gp(ctxt, 0);
  3625. return X86EMUL_CONTINUE;
  3626. }
  3627. static int check_perm_in(struct x86_emulate_ctxt *ctxt)
  3628. {
  3629. ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
  3630. if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
  3631. return emulate_gp(ctxt, 0);
  3632. return X86EMUL_CONTINUE;
  3633. }
  3634. static int check_perm_out(struct x86_emulate_ctxt *ctxt)
  3635. {
  3636. ctxt->src.bytes = min(ctxt->src.bytes, 4u);
  3637. if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
  3638. return emulate_gp(ctxt, 0);
  3639. return X86EMUL_CONTINUE;
  3640. }
  3641. #define D(_y) { .flags = (_y) }
  3642. #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
  3643. #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
  3644. .intercept = x86_intercept_##_i, .check_perm = (_p) }
  3645. #define N D(NotImpl)
  3646. #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
  3647. #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
  3648. #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
  3649. #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
  3650. #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
  3651. #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
  3652. #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
  3653. #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
  3654. #define II(_f, _e, _i) \
  3655. { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
  3656. #define IIP(_f, _e, _i, _p) \
  3657. { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
  3658. .intercept = x86_intercept_##_i, .check_perm = (_p) }
  3659. #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
  3660. #define D2bv(_f) D((_f) | ByteOp), D(_f)
  3661. #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
  3662. #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
  3663. #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
  3664. #define I2bvIP(_f, _e, _i, _p) \
  3665. IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
  3666. #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
  3667. F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
  3668. F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
  3669. static const struct opcode group7_rm0[] = {
  3670. N,
  3671. I(SrcNone | Priv | EmulateOnUD, em_hypercall),
  3672. N, N, N, N, N, N,
  3673. };
  3674. static const struct opcode group7_rm1[] = {
  3675. DI(SrcNone | Priv, monitor),
  3676. DI(SrcNone | Priv, mwait),
  3677. N, N, N, N, N, N,
  3678. };
  3679. static const struct opcode group7_rm3[] = {
  3680. DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
  3681. II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
  3682. DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
  3683. DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
  3684. DIP(SrcNone | Prot | Priv, stgi, check_svme),
  3685. DIP(SrcNone | Prot | Priv, clgi, check_svme),
  3686. DIP(SrcNone | Prot | Priv, skinit, check_svme),
  3687. DIP(SrcNone | Prot | Priv, invlpga, check_svme),
  3688. };
  3689. static const struct opcode group7_rm7[] = {
  3690. N,
  3691. DIP(SrcNone, rdtscp, check_rdtsc),
  3692. N, N, N, N, N, N,
  3693. };
  3694. static const struct opcode group1[] = {
  3695. F(Lock, em_add),
  3696. F(Lock | PageTable, em_or),
  3697. F(Lock, em_adc),
  3698. F(Lock, em_sbb),
  3699. F(Lock | PageTable, em_and),
  3700. F(Lock, em_sub),
  3701. F(Lock, em_xor),
  3702. F(NoWrite, em_cmp),
  3703. };
  3704. static const struct opcode group1A[] = {
  3705. I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
  3706. };
  3707. static const struct opcode group2[] = {
  3708. F(DstMem | ModRM, em_rol),
  3709. F(DstMem | ModRM, em_ror),
  3710. F(DstMem | ModRM, em_rcl),
  3711. F(DstMem | ModRM, em_rcr),
  3712. F(DstMem | ModRM, em_shl),
  3713. F(DstMem | ModRM, em_shr),
  3714. F(DstMem | ModRM, em_shl),
  3715. F(DstMem | ModRM, em_sar),
  3716. };
  3717. static const struct opcode group3[] = {
  3718. F(DstMem | SrcImm | NoWrite, em_test),
  3719. F(DstMem | SrcImm | NoWrite, em_test),
  3720. F(DstMem | SrcNone | Lock, em_not),
  3721. F(DstMem | SrcNone | Lock, em_neg),
  3722. F(DstXacc | Src2Mem, em_mul_ex),
  3723. F(DstXacc | Src2Mem, em_imul_ex),
  3724. F(DstXacc | Src2Mem, em_div_ex),
  3725. F(DstXacc | Src2Mem, em_idiv_ex),
  3726. };
  3727. static const struct opcode group4[] = {
  3728. F(ByteOp | DstMem | SrcNone | Lock, em_inc),
  3729. F(ByteOp | DstMem | SrcNone | Lock, em_dec),
  3730. N, N, N, N, N, N,
  3731. };
  3732. static const struct opcode group5[] = {
  3733. F(DstMem | SrcNone | Lock, em_inc),
  3734. F(DstMem | SrcNone | Lock, em_dec),
  3735. I(SrcMem | NearBranch, em_call_near_abs),
  3736. I(SrcMemFAddr | ImplicitOps, em_call_far),
  3737. I(SrcMem | NearBranch, em_jmp_abs),
  3738. I(SrcMemFAddr | ImplicitOps, em_jmp_far),
  3739. I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
  3740. };
  3741. static const struct opcode group6[] = {
  3742. DI(Prot | DstMem, sldt),
  3743. DI(Prot | DstMem, str),
  3744. II(Prot | Priv | SrcMem16, em_lldt, lldt),
  3745. II(Prot | Priv | SrcMem16, em_ltr, ltr),
  3746. N, N, N, N,
  3747. };
  3748. static const struct group_dual group7 = { {
  3749. II(Mov | DstMem, em_sgdt, sgdt),
  3750. II(Mov | DstMem, em_sidt, sidt),
  3751. II(SrcMem | Priv, em_lgdt, lgdt),
  3752. II(SrcMem | Priv, em_lidt, lidt),
  3753. II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
  3754. II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
  3755. II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
  3756. }, {
  3757. EXT(0, group7_rm0),
  3758. EXT(0, group7_rm1),
  3759. N, EXT(0, group7_rm3),
  3760. II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
  3761. II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
  3762. EXT(0, group7_rm7),
  3763. } };
  3764. static const struct opcode group8[] = {
  3765. N, N, N, N,
  3766. F(DstMem | SrcImmByte | NoWrite, em_bt),
  3767. F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
  3768. F(DstMem | SrcImmByte | Lock, em_btr),
  3769. F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
  3770. };
  3771. /*
  3772. * The "memory" destination is actually always a register, since we come
  3773. * from the register case of group9.
  3774. */
  3775. static const struct gprefix pfx_0f_c7_7 = {
  3776. N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
  3777. };
  3778. static const struct group_dual group9 = { {
  3779. N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
  3780. }, {
  3781. N, N, N, N, N, N, N,
  3782. GP(0, &pfx_0f_c7_7),
  3783. } };
  3784. static const struct opcode group11[] = {
  3785. I(DstMem | SrcImm | Mov | PageTable, em_mov),
  3786. X7(D(Undefined)),
  3787. };
  3788. static const struct gprefix pfx_0f_ae_7 = {
  3789. I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
  3790. };
  3791. static const struct group_dual group15 = { {
  3792. I(ModRM | Aligned16, em_fxsave),
  3793. I(ModRM | Aligned16, em_fxrstor),
  3794. N, N, N, N, N, GP(0, &pfx_0f_ae_7),
  3795. }, {
  3796. N, N, N, N, N, N, N, N,
  3797. } };
  3798. static const struct gprefix pfx_0f_6f_0f_7f = {
  3799. I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
  3800. };
  3801. static const struct instr_dual instr_dual_0f_2b = {
  3802. I(0, em_mov), N
  3803. };
  3804. static const struct gprefix pfx_0f_2b = {
  3805. ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
  3806. };
  3807. static const struct gprefix pfx_0f_28_0f_29 = {
  3808. I(Aligned, em_mov), I(Aligned, em_mov), N, N,
  3809. };
  3810. static const struct gprefix pfx_0f_e7 = {
  3811. N, I(Sse, em_mov), N, N,
  3812. };
  3813. static const struct escape escape_d9 = { {
  3814. N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
  3815. }, {
  3816. /* 0xC0 - 0xC7 */
  3817. N, N, N, N, N, N, N, N,
  3818. /* 0xC8 - 0xCF */
  3819. N, N, N, N, N, N, N, N,
  3820. /* 0xD0 - 0xC7 */
  3821. N, N, N, N, N, N, N, N,
  3822. /* 0xD8 - 0xDF */
  3823. N, N, N, N, N, N, N, N,
  3824. /* 0xE0 - 0xE7 */
  3825. N, N, N, N, N, N, N, N,
  3826. /* 0xE8 - 0xEF */
  3827. N, N, N, N, N, N, N, N,
  3828. /* 0xF0 - 0xF7 */
  3829. N, N, N, N, N, N, N, N,
  3830. /* 0xF8 - 0xFF */
  3831. N, N, N, N, N, N, N, N,
  3832. } };
  3833. static const struct escape escape_db = { {
  3834. N, N, N, N, N, N, N, N,
  3835. }, {
  3836. /* 0xC0 - 0xC7 */
  3837. N, N, N, N, N, N, N, N,
  3838. /* 0xC8 - 0xCF */
  3839. N, N, N, N, N, N, N, N,
  3840. /* 0xD0 - 0xC7 */
  3841. N, N, N, N, N, N, N, N,
  3842. /* 0xD8 - 0xDF */
  3843. N, N, N, N, N, N, N, N,
  3844. /* 0xE0 - 0xE7 */
  3845. N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
  3846. /* 0xE8 - 0xEF */
  3847. N, N, N, N, N, N, N, N,
  3848. /* 0xF0 - 0xF7 */
  3849. N, N, N, N, N, N, N, N,
  3850. /* 0xF8 - 0xFF */
  3851. N, N, N, N, N, N, N, N,
  3852. } };
  3853. static const struct escape escape_dd = { {
  3854. N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
  3855. }, {
  3856. /* 0xC0 - 0xC7 */
  3857. N, N, N, N, N, N, N, N,
  3858. /* 0xC8 - 0xCF */
  3859. N, N, N, N, N, N, N, N,
  3860. /* 0xD0 - 0xC7 */
  3861. N, N, N, N, N, N, N, N,
  3862. /* 0xD8 - 0xDF */
  3863. N, N, N, N, N, N, N, N,
  3864. /* 0xE0 - 0xE7 */
  3865. N, N, N, N, N, N, N, N,
  3866. /* 0xE8 - 0xEF */
  3867. N, N, N, N, N, N, N, N,
  3868. /* 0xF0 - 0xF7 */
  3869. N, N, N, N, N, N, N, N,
  3870. /* 0xF8 - 0xFF */
  3871. N, N, N, N, N, N, N, N,
  3872. } };
  3873. static const struct instr_dual instr_dual_0f_c3 = {
  3874. I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
  3875. };
  3876. static const struct mode_dual mode_dual_63 = {
  3877. N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
  3878. };
  3879. static const struct opcode opcode_table[256] = {
  3880. /* 0x00 - 0x07 */
  3881. F6ALU(Lock, em_add),
  3882. I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
  3883. I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
  3884. /* 0x08 - 0x0F */
  3885. F6ALU(Lock | PageTable, em_or),
  3886. I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
  3887. N,
  3888. /* 0x10 - 0x17 */
  3889. F6ALU(Lock, em_adc),
  3890. I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
  3891. I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
  3892. /* 0x18 - 0x1F */
  3893. F6ALU(Lock, em_sbb),
  3894. I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
  3895. I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
  3896. /* 0x20 - 0x27 */
  3897. F6ALU(Lock | PageTable, em_and), N, N,
  3898. /* 0x28 - 0x2F */
  3899. F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
  3900. /* 0x30 - 0x37 */
  3901. F6ALU(Lock, em_xor), N, N,
  3902. /* 0x38 - 0x3F */
  3903. F6ALU(NoWrite, em_cmp), N, N,
  3904. /* 0x40 - 0x4F */
  3905. X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
  3906. /* 0x50 - 0x57 */
  3907. X8(I(SrcReg | Stack, em_push)),
  3908. /* 0x58 - 0x5F */
  3909. X8(I(DstReg | Stack, em_pop)),
  3910. /* 0x60 - 0x67 */
  3911. I(ImplicitOps | Stack | No64, em_pusha),
  3912. I(ImplicitOps | Stack | No64, em_popa),
  3913. N, MD(ModRM, &mode_dual_63),
  3914. N, N, N, N,
  3915. /* 0x68 - 0x6F */
  3916. I(SrcImm | Mov | Stack, em_push),
  3917. I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
  3918. I(SrcImmByte | Mov | Stack, em_push),
  3919. I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
  3920. I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
  3921. I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
  3922. /* 0x70 - 0x7F */
  3923. X16(D(SrcImmByte | NearBranch)),
  3924. /* 0x80 - 0x87 */
  3925. G(ByteOp | DstMem | SrcImm, group1),
  3926. G(DstMem | SrcImm, group1),
  3927. G(ByteOp | DstMem | SrcImm | No64, group1),
  3928. G(DstMem | SrcImmByte, group1),
  3929. F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
  3930. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
  3931. /* 0x88 - 0x8F */
  3932. I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
  3933. I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
  3934. I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
  3935. D(ModRM | SrcMem | NoAccess | DstReg),
  3936. I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
  3937. G(0, group1A),
  3938. /* 0x90 - 0x97 */
  3939. DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
  3940. /* 0x98 - 0x9F */
  3941. D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
  3942. I(SrcImmFAddr | No64, em_call_far), N,
  3943. II(ImplicitOps | Stack, em_pushf, pushf),
  3944. II(ImplicitOps | Stack, em_popf, popf),
  3945. I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
  3946. /* 0xA0 - 0xA7 */
  3947. I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
  3948. I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
  3949. I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
  3950. F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
  3951. /* 0xA8 - 0xAF */
  3952. F2bv(DstAcc | SrcImm | NoWrite, em_test),
  3953. I2bv(SrcAcc | DstDI | Mov | String, em_mov),
  3954. I2bv(SrcSI | DstAcc | Mov | String, em_mov),
  3955. F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
  3956. /* 0xB0 - 0xB7 */
  3957. X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
  3958. /* 0xB8 - 0xBF */
  3959. X8(I(DstReg | SrcImm64 | Mov, em_mov)),
  3960. /* 0xC0 - 0xC7 */
  3961. G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
  3962. I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
  3963. I(ImplicitOps | NearBranch, em_ret),
  3964. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
  3965. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
  3966. G(ByteOp, group11), G(0, group11),
  3967. /* 0xC8 - 0xCF */
  3968. I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
  3969. I(ImplicitOps | SrcImmU16, em_ret_far_imm),
  3970. I(ImplicitOps, em_ret_far),
  3971. D(ImplicitOps), DI(SrcImmByte, intn),
  3972. D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
  3973. /* 0xD0 - 0xD7 */
  3974. G(Src2One | ByteOp, group2), G(Src2One, group2),
  3975. G(Src2CL | ByteOp, group2), G(Src2CL, group2),
  3976. I(DstAcc | SrcImmUByte | No64, em_aam),
  3977. I(DstAcc | SrcImmUByte | No64, em_aad),
  3978. F(DstAcc | ByteOp | No64, em_salc),
  3979. I(DstAcc | SrcXLat | ByteOp, em_mov),
  3980. /* 0xD8 - 0xDF */
  3981. N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
  3982. /* 0xE0 - 0xE7 */
  3983. X3(I(SrcImmByte | NearBranch, em_loop)),
  3984. I(SrcImmByte | NearBranch, em_jcxz),
  3985. I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
  3986. I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
  3987. /* 0xE8 - 0xEF */
  3988. I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
  3989. I(SrcImmFAddr | No64, em_jmp_far),
  3990. D(SrcImmByte | ImplicitOps | NearBranch),
  3991. I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
  3992. I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
  3993. /* 0xF0 - 0xF7 */
  3994. N, DI(ImplicitOps, icebp), N, N,
  3995. DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
  3996. G(ByteOp, group3), G(0, group3),
  3997. /* 0xF8 - 0xFF */
  3998. D(ImplicitOps), D(ImplicitOps),
  3999. I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
  4000. D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
  4001. };
  4002. static const struct opcode twobyte_table[256] = {
  4003. /* 0x00 - 0x0F */
  4004. G(0, group6), GD(0, &group7), N, N,
  4005. N, I(ImplicitOps | EmulateOnUD, em_syscall),
  4006. II(ImplicitOps | Priv, em_clts, clts), N,
  4007. DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
  4008. N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
  4009. /* 0x10 - 0x1F */
  4010. N, N, N, N, N, N, N, N,
  4011. D(ImplicitOps | ModRM | SrcMem | NoAccess),
  4012. N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
  4013. /* 0x20 - 0x2F */
  4014. DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
  4015. DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
  4016. IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
  4017. check_cr_write),
  4018. IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
  4019. check_dr_write),
  4020. N, N, N, N,
  4021. GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
  4022. GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
  4023. N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
  4024. N, N, N, N,
  4025. /* 0x30 - 0x3F */
  4026. II(ImplicitOps | Priv, em_wrmsr, wrmsr),
  4027. IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
  4028. II(ImplicitOps | Priv, em_rdmsr, rdmsr),
  4029. IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
  4030. I(ImplicitOps | EmulateOnUD, em_sysenter),
  4031. I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
  4032. N, N,
  4033. N, N, N, N, N, N, N, N,
  4034. /* 0x40 - 0x4F */
  4035. X16(D(DstReg | SrcMem | ModRM)),
  4036. /* 0x50 - 0x5F */
  4037. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  4038. /* 0x60 - 0x6F */
  4039. N, N, N, N,
  4040. N, N, N, N,
  4041. N, N, N, N,
  4042. N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
  4043. /* 0x70 - 0x7F */
  4044. N, N, N, N,
  4045. N, N, N, N,
  4046. N, N, N, N,
  4047. N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
  4048. /* 0x80 - 0x8F */
  4049. X16(D(SrcImm | NearBranch)),
  4050. /* 0x90 - 0x9F */
  4051. X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
  4052. /* 0xA0 - 0xA7 */
  4053. I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
  4054. II(ImplicitOps, em_cpuid, cpuid),
  4055. F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
  4056. F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
  4057. F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
  4058. /* 0xA8 - 0xAF */
  4059. I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
  4060. II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
  4061. F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
  4062. F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
  4063. F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
  4064. GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
  4065. /* 0xB0 - 0xB7 */
  4066. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
  4067. I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
  4068. F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
  4069. I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
  4070. I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
  4071. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  4072. /* 0xB8 - 0xBF */
  4073. N, N,
  4074. G(BitOp, group8),
  4075. F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
  4076. I(DstReg | SrcMem | ModRM, em_bsf_c),
  4077. I(DstReg | SrcMem | ModRM, em_bsr_c),
  4078. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  4079. /* 0xC0 - 0xC7 */
  4080. F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
  4081. N, ID(0, &instr_dual_0f_c3),
  4082. N, N, N, GD(0, &group9),
  4083. /* 0xC8 - 0xCF */
  4084. X8(I(DstReg, em_bswap)),
  4085. /* 0xD0 - 0xDF */
  4086. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  4087. /* 0xE0 - 0xEF */
  4088. N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
  4089. N, N, N, N, N, N, N, N,
  4090. /* 0xF0 - 0xFF */
  4091. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
  4092. };
  4093. static const struct instr_dual instr_dual_0f_38_f0 = {
  4094. I(DstReg | SrcMem | Mov, em_movbe), N
  4095. };
  4096. static const struct instr_dual instr_dual_0f_38_f1 = {
  4097. I(DstMem | SrcReg | Mov, em_movbe), N
  4098. };
  4099. static const struct gprefix three_byte_0f_38_f0 = {
  4100. ID(0, &instr_dual_0f_38_f0), N, N, N
  4101. };
  4102. static const struct gprefix three_byte_0f_38_f1 = {
  4103. ID(0, &instr_dual_0f_38_f1), N, N, N
  4104. };
  4105. /*
  4106. * Insns below are selected by the prefix which indexed by the third opcode
  4107. * byte.
  4108. */
  4109. static const struct opcode opcode_map_0f_38[256] = {
  4110. /* 0x00 - 0x7f */
  4111. X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
  4112. /* 0x80 - 0xef */
  4113. X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
  4114. /* 0xf0 - 0xf1 */
  4115. GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
  4116. GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
  4117. /* 0xf2 - 0xff */
  4118. N, N, X4(N), X8(N)
  4119. };
  4120. #undef D
  4121. #undef N
  4122. #undef G
  4123. #undef GD
  4124. #undef I
  4125. #undef GP
  4126. #undef EXT
  4127. #undef MD
  4128. #undef ID
  4129. #undef D2bv
  4130. #undef D2bvIP
  4131. #undef I2bv
  4132. #undef I2bvIP
  4133. #undef I6ALU
  4134. static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
  4135. {
  4136. unsigned size;
  4137. size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4138. if (size == 8)
  4139. size = 4;
  4140. return size;
  4141. }
  4142. static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
  4143. unsigned size, bool sign_extension)
  4144. {
  4145. int rc = X86EMUL_CONTINUE;
  4146. op->type = OP_IMM;
  4147. op->bytes = size;
  4148. op->addr.mem.ea = ctxt->_eip;
  4149. /* NB. Immediates are sign-extended as necessary. */
  4150. switch (op->bytes) {
  4151. case 1:
  4152. op->val = insn_fetch(s8, ctxt);
  4153. break;
  4154. case 2:
  4155. op->val = insn_fetch(s16, ctxt);
  4156. break;
  4157. case 4:
  4158. op->val = insn_fetch(s32, ctxt);
  4159. break;
  4160. case 8:
  4161. op->val = insn_fetch(s64, ctxt);
  4162. break;
  4163. }
  4164. if (!sign_extension) {
  4165. switch (op->bytes) {
  4166. case 1:
  4167. op->val &= 0xff;
  4168. break;
  4169. case 2:
  4170. op->val &= 0xffff;
  4171. break;
  4172. case 4:
  4173. op->val &= 0xffffffff;
  4174. break;
  4175. }
  4176. }
  4177. done:
  4178. return rc;
  4179. }
  4180. static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
  4181. unsigned d)
  4182. {
  4183. int rc = X86EMUL_CONTINUE;
  4184. switch (d) {
  4185. case OpReg:
  4186. decode_register_operand(ctxt, op);
  4187. break;
  4188. case OpImmUByte:
  4189. rc = decode_imm(ctxt, op, 1, false);
  4190. break;
  4191. case OpMem:
  4192. ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4193. mem_common:
  4194. *op = ctxt->memop;
  4195. ctxt->memopp = op;
  4196. if (ctxt->d & BitOp)
  4197. fetch_bit_operand(ctxt);
  4198. op->orig_val = op->val;
  4199. break;
  4200. case OpMem64:
  4201. ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
  4202. goto mem_common;
  4203. case OpAcc:
  4204. op->type = OP_REG;
  4205. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4206. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  4207. fetch_register_operand(op);
  4208. op->orig_val = op->val;
  4209. break;
  4210. case OpAccLo:
  4211. op->type = OP_REG;
  4212. op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
  4213. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
  4214. fetch_register_operand(op);
  4215. op->orig_val = op->val;
  4216. break;
  4217. case OpAccHi:
  4218. if (ctxt->d & ByteOp) {
  4219. op->type = OP_NONE;
  4220. break;
  4221. }
  4222. op->type = OP_REG;
  4223. op->bytes = ctxt->op_bytes;
  4224. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  4225. fetch_register_operand(op);
  4226. op->orig_val = op->val;
  4227. break;
  4228. case OpDI:
  4229. op->type = OP_MEM;
  4230. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4231. op->addr.mem.ea =
  4232. register_address(ctxt, VCPU_REGS_RDI);
  4233. op->addr.mem.seg = VCPU_SREG_ES;
  4234. op->val = 0;
  4235. op->count = 1;
  4236. break;
  4237. case OpDX:
  4238. op->type = OP_REG;
  4239. op->bytes = 2;
  4240. op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
  4241. fetch_register_operand(op);
  4242. break;
  4243. case OpCL:
  4244. op->type = OP_IMM;
  4245. op->bytes = 1;
  4246. op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
  4247. break;
  4248. case OpImmByte:
  4249. rc = decode_imm(ctxt, op, 1, true);
  4250. break;
  4251. case OpOne:
  4252. op->type = OP_IMM;
  4253. op->bytes = 1;
  4254. op->val = 1;
  4255. break;
  4256. case OpImm:
  4257. rc = decode_imm(ctxt, op, imm_size(ctxt), true);
  4258. break;
  4259. case OpImm64:
  4260. rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
  4261. break;
  4262. case OpMem8:
  4263. ctxt->memop.bytes = 1;
  4264. if (ctxt->memop.type == OP_REG) {
  4265. ctxt->memop.addr.reg = decode_register(ctxt,
  4266. ctxt->modrm_rm, true);
  4267. fetch_register_operand(&ctxt->memop);
  4268. }
  4269. goto mem_common;
  4270. case OpMem16:
  4271. ctxt->memop.bytes = 2;
  4272. goto mem_common;
  4273. case OpMem32:
  4274. ctxt->memop.bytes = 4;
  4275. goto mem_common;
  4276. case OpImmU16:
  4277. rc = decode_imm(ctxt, op, 2, false);
  4278. break;
  4279. case OpImmU:
  4280. rc = decode_imm(ctxt, op, imm_size(ctxt), false);
  4281. break;
  4282. case OpSI:
  4283. op->type = OP_MEM;
  4284. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4285. op->addr.mem.ea =
  4286. register_address(ctxt, VCPU_REGS_RSI);
  4287. op->addr.mem.seg = ctxt->seg_override;
  4288. op->val = 0;
  4289. op->count = 1;
  4290. break;
  4291. case OpXLat:
  4292. op->type = OP_MEM;
  4293. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  4294. op->addr.mem.ea =
  4295. address_mask(ctxt,
  4296. reg_read(ctxt, VCPU_REGS_RBX) +
  4297. (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
  4298. op->addr.mem.seg = ctxt->seg_override;
  4299. op->val = 0;
  4300. break;
  4301. case OpImmFAddr:
  4302. op->type = OP_IMM;
  4303. op->addr.mem.ea = ctxt->_eip;
  4304. op->bytes = ctxt->op_bytes + 2;
  4305. insn_fetch_arr(op->valptr, op->bytes, ctxt);
  4306. break;
  4307. case OpMemFAddr:
  4308. ctxt->memop.bytes = ctxt->op_bytes + 2;
  4309. goto mem_common;
  4310. case OpES:
  4311. op->type = OP_IMM;
  4312. op->val = VCPU_SREG_ES;
  4313. break;
  4314. case OpCS:
  4315. op->type = OP_IMM;
  4316. op->val = VCPU_SREG_CS;
  4317. break;
  4318. case OpSS:
  4319. op->type = OP_IMM;
  4320. op->val = VCPU_SREG_SS;
  4321. break;
  4322. case OpDS:
  4323. op->type = OP_IMM;
  4324. op->val = VCPU_SREG_DS;
  4325. break;
  4326. case OpFS:
  4327. op->type = OP_IMM;
  4328. op->val = VCPU_SREG_FS;
  4329. break;
  4330. case OpGS:
  4331. op->type = OP_IMM;
  4332. op->val = VCPU_SREG_GS;
  4333. break;
  4334. case OpImplicit:
  4335. /* Special instructions do their own operand decoding. */
  4336. default:
  4337. op->type = OP_NONE; /* Disable writeback. */
  4338. break;
  4339. }
  4340. done:
  4341. return rc;
  4342. }
  4343. int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
  4344. {
  4345. int rc = X86EMUL_CONTINUE;
  4346. int mode = ctxt->mode;
  4347. int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
  4348. bool op_prefix = false;
  4349. bool has_seg_override = false;
  4350. struct opcode opcode;
  4351. u16 dummy;
  4352. struct desc_struct desc;
  4353. ctxt->memop.type = OP_NONE;
  4354. ctxt->memopp = NULL;
  4355. ctxt->_eip = ctxt->eip;
  4356. ctxt->fetch.ptr = ctxt->fetch.data;
  4357. ctxt->fetch.end = ctxt->fetch.data + insn_len;
  4358. ctxt->opcode_len = 1;
  4359. ctxt->intercept = x86_intercept_none;
  4360. if (insn_len > 0)
  4361. memcpy(ctxt->fetch.data, insn, insn_len);
  4362. else {
  4363. rc = __do_insn_fetch_bytes(ctxt, 1);
  4364. if (rc != X86EMUL_CONTINUE)
  4365. return rc;
  4366. }
  4367. switch (mode) {
  4368. case X86EMUL_MODE_REAL:
  4369. case X86EMUL_MODE_VM86:
  4370. def_op_bytes = def_ad_bytes = 2;
  4371. ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
  4372. if (desc.d)
  4373. def_op_bytes = def_ad_bytes = 4;
  4374. break;
  4375. case X86EMUL_MODE_PROT16:
  4376. def_op_bytes = def_ad_bytes = 2;
  4377. break;
  4378. case X86EMUL_MODE_PROT32:
  4379. def_op_bytes = def_ad_bytes = 4;
  4380. break;
  4381. #ifdef CONFIG_X86_64
  4382. case X86EMUL_MODE_PROT64:
  4383. def_op_bytes = 4;
  4384. def_ad_bytes = 8;
  4385. break;
  4386. #endif
  4387. default:
  4388. return EMULATION_FAILED;
  4389. }
  4390. ctxt->op_bytes = def_op_bytes;
  4391. ctxt->ad_bytes = def_ad_bytes;
  4392. /* Legacy prefixes. */
  4393. for (;;) {
  4394. switch (ctxt->b = insn_fetch(u8, ctxt)) {
  4395. case 0x66: /* operand-size override */
  4396. op_prefix = true;
  4397. /* switch between 2/4 bytes */
  4398. ctxt->op_bytes = def_op_bytes ^ 6;
  4399. break;
  4400. case 0x67: /* address-size override */
  4401. if (mode == X86EMUL_MODE_PROT64)
  4402. /* switch between 4/8 bytes */
  4403. ctxt->ad_bytes = def_ad_bytes ^ 12;
  4404. else
  4405. /* switch between 2/4 bytes */
  4406. ctxt->ad_bytes = def_ad_bytes ^ 6;
  4407. break;
  4408. case 0x26: /* ES override */
  4409. has_seg_override = true;
  4410. ctxt->seg_override = VCPU_SREG_ES;
  4411. break;
  4412. case 0x2e: /* CS override */
  4413. has_seg_override = true;
  4414. ctxt->seg_override = VCPU_SREG_CS;
  4415. break;
  4416. case 0x36: /* SS override */
  4417. has_seg_override = true;
  4418. ctxt->seg_override = VCPU_SREG_SS;
  4419. break;
  4420. case 0x3e: /* DS override */
  4421. has_seg_override = true;
  4422. ctxt->seg_override = VCPU_SREG_DS;
  4423. break;
  4424. case 0x64: /* FS override */
  4425. has_seg_override = true;
  4426. ctxt->seg_override = VCPU_SREG_FS;
  4427. break;
  4428. case 0x65: /* GS override */
  4429. has_seg_override = true;
  4430. ctxt->seg_override = VCPU_SREG_GS;
  4431. break;
  4432. case 0x40 ... 0x4f: /* REX */
  4433. if (mode != X86EMUL_MODE_PROT64)
  4434. goto done_prefixes;
  4435. ctxt->rex_prefix = ctxt->b;
  4436. continue;
  4437. case 0xf0: /* LOCK */
  4438. ctxt->lock_prefix = 1;
  4439. break;
  4440. case 0xf2: /* REPNE/REPNZ */
  4441. case 0xf3: /* REP/REPE/REPZ */
  4442. ctxt->rep_prefix = ctxt->b;
  4443. break;
  4444. default:
  4445. goto done_prefixes;
  4446. }
  4447. /* Any legacy prefix after a REX prefix nullifies its effect. */
  4448. ctxt->rex_prefix = 0;
  4449. }
  4450. done_prefixes:
  4451. /* REX prefix. */
  4452. if (ctxt->rex_prefix & 8)
  4453. ctxt->op_bytes = 8; /* REX.W */
  4454. /* Opcode byte(s). */
  4455. opcode = opcode_table[ctxt->b];
  4456. /* Two-byte opcode? */
  4457. if (ctxt->b == 0x0f) {
  4458. ctxt->opcode_len = 2;
  4459. ctxt->b = insn_fetch(u8, ctxt);
  4460. opcode = twobyte_table[ctxt->b];
  4461. /* 0F_38 opcode map */
  4462. if (ctxt->b == 0x38) {
  4463. ctxt->opcode_len = 3;
  4464. ctxt->b = insn_fetch(u8, ctxt);
  4465. opcode = opcode_map_0f_38[ctxt->b];
  4466. }
  4467. }
  4468. ctxt->d = opcode.flags;
  4469. if (ctxt->d & ModRM)
  4470. ctxt->modrm = insn_fetch(u8, ctxt);
  4471. /* vex-prefix instructions are not implemented */
  4472. if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
  4473. (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
  4474. ctxt->d = NotImpl;
  4475. }
  4476. while (ctxt->d & GroupMask) {
  4477. switch (ctxt->d & GroupMask) {
  4478. case Group:
  4479. goffset = (ctxt->modrm >> 3) & 7;
  4480. opcode = opcode.u.group[goffset];
  4481. break;
  4482. case GroupDual:
  4483. goffset = (ctxt->modrm >> 3) & 7;
  4484. if ((ctxt->modrm >> 6) == 3)
  4485. opcode = opcode.u.gdual->mod3[goffset];
  4486. else
  4487. opcode = opcode.u.gdual->mod012[goffset];
  4488. break;
  4489. case RMExt:
  4490. goffset = ctxt->modrm & 7;
  4491. opcode = opcode.u.group[goffset];
  4492. break;
  4493. case Prefix:
  4494. if (ctxt->rep_prefix && op_prefix)
  4495. return EMULATION_FAILED;
  4496. simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
  4497. switch (simd_prefix) {
  4498. case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
  4499. case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
  4500. case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
  4501. case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
  4502. }
  4503. break;
  4504. case Escape:
  4505. if (ctxt->modrm > 0xbf) {
  4506. size_t size = ARRAY_SIZE(opcode.u.esc->high);
  4507. u32 index = array_index_nospec(
  4508. ctxt->modrm - 0xc0, size);
  4509. opcode = opcode.u.esc->high[index];
  4510. } else {
  4511. opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
  4512. }
  4513. break;
  4514. case InstrDual:
  4515. if ((ctxt->modrm >> 6) == 3)
  4516. opcode = opcode.u.idual->mod3;
  4517. else
  4518. opcode = opcode.u.idual->mod012;
  4519. break;
  4520. case ModeDual:
  4521. if (ctxt->mode == X86EMUL_MODE_PROT64)
  4522. opcode = opcode.u.mdual->mode64;
  4523. else
  4524. opcode = opcode.u.mdual->mode32;
  4525. break;
  4526. default:
  4527. return EMULATION_FAILED;
  4528. }
  4529. ctxt->d &= ~(u64)GroupMask;
  4530. ctxt->d |= opcode.flags;
  4531. }
  4532. /* Unrecognised? */
  4533. if (ctxt->d == 0)
  4534. return EMULATION_FAILED;
  4535. ctxt->execute = opcode.u.execute;
  4536. if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
  4537. return EMULATION_FAILED;
  4538. if (unlikely(ctxt->d &
  4539. (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
  4540. No16))) {
  4541. /*
  4542. * These are copied unconditionally here, and checked unconditionally
  4543. * in x86_emulate_insn.
  4544. */
  4545. ctxt->check_perm = opcode.check_perm;
  4546. ctxt->intercept = opcode.intercept;
  4547. if (ctxt->d & NotImpl)
  4548. return EMULATION_FAILED;
  4549. if (mode == X86EMUL_MODE_PROT64) {
  4550. if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
  4551. ctxt->op_bytes = 8;
  4552. else if (ctxt->d & NearBranch)
  4553. ctxt->op_bytes = 8;
  4554. }
  4555. if (ctxt->d & Op3264) {
  4556. if (mode == X86EMUL_MODE_PROT64)
  4557. ctxt->op_bytes = 8;
  4558. else
  4559. ctxt->op_bytes = 4;
  4560. }
  4561. if ((ctxt->d & No16) && ctxt->op_bytes == 2)
  4562. ctxt->op_bytes = 4;
  4563. if (ctxt->d & Sse)
  4564. ctxt->op_bytes = 16;
  4565. else if (ctxt->d & Mmx)
  4566. ctxt->op_bytes = 8;
  4567. }
  4568. /* ModRM and SIB bytes. */
  4569. if (ctxt->d & ModRM) {
  4570. rc = decode_modrm(ctxt, &ctxt->memop);
  4571. if (!has_seg_override) {
  4572. has_seg_override = true;
  4573. ctxt->seg_override = ctxt->modrm_seg;
  4574. }
  4575. } else if (ctxt->d & MemAbs)
  4576. rc = decode_abs(ctxt, &ctxt->memop);
  4577. if (rc != X86EMUL_CONTINUE)
  4578. goto done;
  4579. if (!has_seg_override)
  4580. ctxt->seg_override = VCPU_SREG_DS;
  4581. ctxt->memop.addr.mem.seg = ctxt->seg_override;
  4582. /*
  4583. * Decode and fetch the source operand: register, memory
  4584. * or immediate.
  4585. */
  4586. rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
  4587. if (rc != X86EMUL_CONTINUE)
  4588. goto done;
  4589. /*
  4590. * Decode and fetch the second source operand: register, memory
  4591. * or immediate.
  4592. */
  4593. rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
  4594. if (rc != X86EMUL_CONTINUE)
  4595. goto done;
  4596. /* Decode and fetch the destination operand: register or memory. */
  4597. rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
  4598. if (ctxt->rip_relative && likely(ctxt->memopp))
  4599. ctxt->memopp->addr.mem.ea = address_mask(ctxt,
  4600. ctxt->memopp->addr.mem.ea + ctxt->_eip);
  4601. done:
  4602. if (rc == X86EMUL_PROPAGATE_FAULT)
  4603. ctxt->have_exception = true;
  4604. return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
  4605. }
  4606. bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
  4607. {
  4608. return ctxt->d & PageTable;
  4609. }
  4610. static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
  4611. {
  4612. /* The second termination condition only applies for REPE
  4613. * and REPNE. Test if the repeat string operation prefix is
  4614. * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
  4615. * corresponding termination condition according to:
  4616. * - if REPE/REPZ and ZF = 0 then done
  4617. * - if REPNE/REPNZ and ZF = 1 then done
  4618. */
  4619. if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
  4620. (ctxt->b == 0xae) || (ctxt->b == 0xaf))
  4621. && (((ctxt->rep_prefix == REPE_PREFIX) &&
  4622. ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
  4623. || ((ctxt->rep_prefix == REPNE_PREFIX) &&
  4624. ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
  4625. return true;
  4626. return false;
  4627. }
  4628. static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
  4629. {
  4630. int rc;
  4631. ctxt->ops->get_fpu(ctxt);
  4632. rc = asm_safe("fwait");
  4633. ctxt->ops->put_fpu(ctxt);
  4634. if (unlikely(rc != X86EMUL_CONTINUE))
  4635. return emulate_exception(ctxt, MF_VECTOR, 0, false);
  4636. return X86EMUL_CONTINUE;
  4637. }
  4638. static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
  4639. struct operand *op)
  4640. {
  4641. if (op->type == OP_MM)
  4642. read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
  4643. }
  4644. static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
  4645. {
  4646. ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
  4647. if (!(ctxt->d & ByteOp))
  4648. fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
  4649. asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
  4650. : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
  4651. [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
  4652. : "c"(ctxt->src2.val));
  4653. ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
  4654. if (!fop) /* exception is returned in fop variable */
  4655. return emulate_de(ctxt);
  4656. return X86EMUL_CONTINUE;
  4657. }
  4658. void init_decode_cache(struct x86_emulate_ctxt *ctxt)
  4659. {
  4660. memset(&ctxt->rip_relative, 0,
  4661. (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
  4662. ctxt->io_read.pos = 0;
  4663. ctxt->io_read.end = 0;
  4664. ctxt->mem_read.end = 0;
  4665. }
  4666. int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
  4667. {
  4668. const struct x86_emulate_ops *ops = ctxt->ops;
  4669. int rc = X86EMUL_CONTINUE;
  4670. int saved_dst_type = ctxt->dst.type;
  4671. unsigned emul_flags;
  4672. ctxt->mem_read.pos = 0;
  4673. /* LOCK prefix is allowed only with some instructions */
  4674. if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
  4675. rc = emulate_ud(ctxt);
  4676. goto done;
  4677. }
  4678. if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
  4679. rc = emulate_ud(ctxt);
  4680. goto done;
  4681. }
  4682. emul_flags = ctxt->ops->get_hflags(ctxt);
  4683. if (unlikely(ctxt->d &
  4684. (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
  4685. if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
  4686. (ctxt->d & Undefined)) {
  4687. rc = emulate_ud(ctxt);
  4688. goto done;
  4689. }
  4690. if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
  4691. || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
  4692. rc = emulate_ud(ctxt);
  4693. goto done;
  4694. }
  4695. if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
  4696. rc = emulate_nm(ctxt);
  4697. goto done;
  4698. }
  4699. if (ctxt->d & Mmx) {
  4700. rc = flush_pending_x87_faults(ctxt);
  4701. if (rc != X86EMUL_CONTINUE)
  4702. goto done;
  4703. /*
  4704. * Now that we know the fpu is exception safe, we can fetch
  4705. * operands from it.
  4706. */
  4707. fetch_possible_mmx_operand(ctxt, &ctxt->src);
  4708. fetch_possible_mmx_operand(ctxt, &ctxt->src2);
  4709. if (!(ctxt->d & Mov))
  4710. fetch_possible_mmx_operand(ctxt, &ctxt->dst);
  4711. }
  4712. if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
  4713. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4714. X86_ICPT_PRE_EXCEPT);
  4715. if (rc != X86EMUL_CONTINUE)
  4716. goto done;
  4717. }
  4718. /* Instruction can only be executed in protected mode */
  4719. if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
  4720. rc = emulate_ud(ctxt);
  4721. goto done;
  4722. }
  4723. /* Privileged instruction can be executed only in CPL=0 */
  4724. if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
  4725. if (ctxt->d & PrivUD)
  4726. rc = emulate_ud(ctxt);
  4727. else
  4728. rc = emulate_gp(ctxt, 0);
  4729. goto done;
  4730. }
  4731. /* Do instruction specific permission checks */
  4732. if (ctxt->d & CheckPerm) {
  4733. rc = ctxt->check_perm(ctxt);
  4734. if (rc != X86EMUL_CONTINUE)
  4735. goto done;
  4736. }
  4737. if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
  4738. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4739. X86_ICPT_POST_EXCEPT);
  4740. if (rc != X86EMUL_CONTINUE)
  4741. goto done;
  4742. }
  4743. if (ctxt->rep_prefix && (ctxt->d & String)) {
  4744. /* All REP prefixes have the same first termination condition */
  4745. if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
  4746. string_registers_quirk(ctxt);
  4747. ctxt->eip = ctxt->_eip;
  4748. ctxt->eflags &= ~X86_EFLAGS_RF;
  4749. goto done;
  4750. }
  4751. }
  4752. }
  4753. if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
  4754. rc = segmented_read(ctxt, ctxt->src.addr.mem,
  4755. ctxt->src.valptr, ctxt->src.bytes);
  4756. if (rc != X86EMUL_CONTINUE)
  4757. goto done;
  4758. ctxt->src.orig_val64 = ctxt->src.val64;
  4759. }
  4760. if (ctxt->src2.type == OP_MEM) {
  4761. rc = segmented_read(ctxt, ctxt->src2.addr.mem,
  4762. &ctxt->src2.val, ctxt->src2.bytes);
  4763. if (rc != X86EMUL_CONTINUE)
  4764. goto done;
  4765. }
  4766. if ((ctxt->d & DstMask) == ImplicitOps)
  4767. goto special_insn;
  4768. if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
  4769. /* optimisation - avoid slow emulated read if Mov */
  4770. rc = segmented_read(ctxt, ctxt->dst.addr.mem,
  4771. &ctxt->dst.val, ctxt->dst.bytes);
  4772. if (rc != X86EMUL_CONTINUE) {
  4773. if (!(ctxt->d & NoWrite) &&
  4774. rc == X86EMUL_PROPAGATE_FAULT &&
  4775. ctxt->exception.vector == PF_VECTOR)
  4776. ctxt->exception.error_code |= PFERR_WRITE_MASK;
  4777. goto done;
  4778. }
  4779. }
  4780. /* Copy full 64-bit value for CMPXCHG8B. */
  4781. ctxt->dst.orig_val64 = ctxt->dst.val64;
  4782. special_insn:
  4783. if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
  4784. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  4785. X86_ICPT_POST_MEMACCESS);
  4786. if (rc != X86EMUL_CONTINUE)
  4787. goto done;
  4788. }
  4789. if (ctxt->rep_prefix && (ctxt->d & String))
  4790. ctxt->eflags |= X86_EFLAGS_RF;
  4791. else
  4792. ctxt->eflags &= ~X86_EFLAGS_RF;
  4793. if (ctxt->execute) {
  4794. if (ctxt->d & Fastop) {
  4795. void (*fop)(struct fastop *) = (void *)ctxt->execute;
  4796. rc = fastop(ctxt, fop);
  4797. if (rc != X86EMUL_CONTINUE)
  4798. goto done;
  4799. goto writeback;
  4800. }
  4801. rc = ctxt->execute(ctxt);
  4802. if (rc != X86EMUL_CONTINUE)
  4803. goto done;
  4804. goto writeback;
  4805. }
  4806. if (ctxt->opcode_len == 2)
  4807. goto twobyte_insn;
  4808. else if (ctxt->opcode_len == 3)
  4809. goto threebyte_insn;
  4810. switch (ctxt->b) {
  4811. case 0x70 ... 0x7f: /* jcc (short) */
  4812. if (test_cc(ctxt->b, ctxt->eflags))
  4813. rc = jmp_rel(ctxt, ctxt->src.val);
  4814. break;
  4815. case 0x8d: /* lea r16/r32, m */
  4816. ctxt->dst.val = ctxt->src.addr.mem.ea;
  4817. break;
  4818. case 0x90 ... 0x97: /* nop / xchg reg, rax */
  4819. if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
  4820. ctxt->dst.type = OP_NONE;
  4821. else
  4822. rc = em_xchg(ctxt);
  4823. break;
  4824. case 0x98: /* cbw/cwde/cdqe */
  4825. switch (ctxt->op_bytes) {
  4826. case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
  4827. case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
  4828. case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
  4829. }
  4830. break;
  4831. case 0xcc: /* int3 */
  4832. rc = emulate_int(ctxt, 3);
  4833. break;
  4834. case 0xcd: /* int n */
  4835. rc = emulate_int(ctxt, ctxt->src.val);
  4836. break;
  4837. case 0xce: /* into */
  4838. if (ctxt->eflags & X86_EFLAGS_OF)
  4839. rc = emulate_int(ctxt, 4);
  4840. break;
  4841. case 0xe9: /* jmp rel */
  4842. case 0xeb: /* jmp rel short */
  4843. rc = jmp_rel(ctxt, ctxt->src.val);
  4844. ctxt->dst.type = OP_NONE; /* Disable writeback. */
  4845. break;
  4846. case 0xf4: /* hlt */
  4847. ctxt->ops->halt(ctxt);
  4848. break;
  4849. case 0xf5: /* cmc */
  4850. /* complement carry flag from eflags reg */
  4851. ctxt->eflags ^= X86_EFLAGS_CF;
  4852. break;
  4853. case 0xf8: /* clc */
  4854. ctxt->eflags &= ~X86_EFLAGS_CF;
  4855. break;
  4856. case 0xf9: /* stc */
  4857. ctxt->eflags |= X86_EFLAGS_CF;
  4858. break;
  4859. case 0xfc: /* cld */
  4860. ctxt->eflags &= ~X86_EFLAGS_DF;
  4861. break;
  4862. case 0xfd: /* std */
  4863. ctxt->eflags |= X86_EFLAGS_DF;
  4864. break;
  4865. default:
  4866. goto cannot_emulate;
  4867. }
  4868. if (rc != X86EMUL_CONTINUE)
  4869. goto done;
  4870. writeback:
  4871. if (ctxt->d & SrcWrite) {
  4872. BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
  4873. rc = writeback(ctxt, &ctxt->src);
  4874. if (rc != X86EMUL_CONTINUE)
  4875. goto done;
  4876. }
  4877. if (!(ctxt->d & NoWrite)) {
  4878. rc = writeback(ctxt, &ctxt->dst);
  4879. if (rc != X86EMUL_CONTINUE)
  4880. goto done;
  4881. }
  4882. /*
  4883. * restore dst type in case the decoding will be reused
  4884. * (happens for string instruction )
  4885. */
  4886. ctxt->dst.type = saved_dst_type;
  4887. if ((ctxt->d & SrcMask) == SrcSI)
  4888. string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
  4889. if ((ctxt->d & DstMask) == DstDI)
  4890. string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
  4891. if (ctxt->rep_prefix && (ctxt->d & String)) {
  4892. unsigned int count;
  4893. struct read_cache *r = &ctxt->io_read;
  4894. if ((ctxt->d & SrcMask) == SrcSI)
  4895. count = ctxt->src.count;
  4896. else
  4897. count = ctxt->dst.count;
  4898. register_address_increment(ctxt, VCPU_REGS_RCX, -count);
  4899. if (!string_insn_completed(ctxt)) {
  4900. /*
  4901. * Re-enter guest when pio read ahead buffer is empty
  4902. * or, if it is not used, after each 1024 iteration.
  4903. */
  4904. if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
  4905. (r->end == 0 || r->end != r->pos)) {
  4906. /*
  4907. * Reset read cache. Usually happens before
  4908. * decode, but since instruction is restarted
  4909. * we have to do it here.
  4910. */
  4911. ctxt->mem_read.end = 0;
  4912. writeback_registers(ctxt);
  4913. return EMULATION_RESTART;
  4914. }
  4915. goto done; /* skip rip writeback */
  4916. }
  4917. ctxt->eflags &= ~X86_EFLAGS_RF;
  4918. }
  4919. ctxt->eip = ctxt->_eip;
  4920. done:
  4921. if (rc == X86EMUL_PROPAGATE_FAULT) {
  4922. WARN_ON(ctxt->exception.vector > 0x1f);
  4923. ctxt->have_exception = true;
  4924. }
  4925. if (rc == X86EMUL_INTERCEPTED)
  4926. return EMULATION_INTERCEPTED;
  4927. if (rc == X86EMUL_CONTINUE)
  4928. writeback_registers(ctxt);
  4929. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  4930. twobyte_insn:
  4931. switch (ctxt->b) {
  4932. case 0x09: /* wbinvd */
  4933. (ctxt->ops->wbinvd)(ctxt);
  4934. break;
  4935. case 0x08: /* invd */
  4936. case 0x0d: /* GrpP (prefetch) */
  4937. case 0x18: /* Grp16 (prefetch/nop) */
  4938. case 0x1f: /* nop */
  4939. break;
  4940. case 0x20: /* mov cr, reg */
  4941. ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
  4942. break;
  4943. case 0x21: /* mov from dr to reg */
  4944. ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
  4945. break;
  4946. case 0x40 ... 0x4f: /* cmov */
  4947. if (test_cc(ctxt->b, ctxt->eflags))
  4948. ctxt->dst.val = ctxt->src.val;
  4949. else if (ctxt->op_bytes != 4)
  4950. ctxt->dst.type = OP_NONE; /* no writeback */
  4951. break;
  4952. case 0x80 ... 0x8f: /* jnz rel, etc*/
  4953. if (test_cc(ctxt->b, ctxt->eflags))
  4954. rc = jmp_rel(ctxt, ctxt->src.val);
  4955. break;
  4956. case 0x90 ... 0x9f: /* setcc r/m8 */
  4957. ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
  4958. break;
  4959. case 0xb6 ... 0xb7: /* movzx */
  4960. ctxt->dst.bytes = ctxt->op_bytes;
  4961. ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
  4962. : (u16) ctxt->src.val;
  4963. break;
  4964. case 0xbe ... 0xbf: /* movsx */
  4965. ctxt->dst.bytes = ctxt->op_bytes;
  4966. ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
  4967. (s16) ctxt->src.val;
  4968. break;
  4969. default:
  4970. goto cannot_emulate;
  4971. }
  4972. threebyte_insn:
  4973. if (rc != X86EMUL_CONTINUE)
  4974. goto done;
  4975. goto writeback;
  4976. cannot_emulate:
  4977. return EMULATION_FAILED;
  4978. }
  4979. void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
  4980. {
  4981. invalidate_registers(ctxt);
  4982. }
  4983. void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
  4984. {
  4985. writeback_registers(ctxt);
  4986. }
  4987. bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
  4988. {
  4989. if (ctxt->rep_prefix && (ctxt->d & String))
  4990. return false;
  4991. if (ctxt->d & TwoMemOp)
  4992. return false;
  4993. return true;
  4994. }