12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489 |
- /*
- * Copyright © 2006-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * Authors: David Woodhouse <dwmw2@infradead.org>,
- * Ashok Raj <ashok.raj@intel.com>,
- * Shaohua Li <shaohua.li@intel.com>,
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
- * Fenghua Yu <fenghua.yu@intel.com>
- * Joerg Roedel <jroedel@suse.de>
- */
- #define pr_fmt(fmt) "DMAR: " fmt
- #include <linux/init.h>
- #include <linux/bitmap.h>
- #include <linux/debugfs.h>
- #include <linux/export.h>
- #include <linux/slab.h>
- #include <linux/irq.h>
- #include <linux/interrupt.h>
- #include <linux/spinlock.h>
- #include <linux/pci.h>
- #include <linux/dmar.h>
- #include <linux/dma-mapping.h>
- #include <linux/mempool.h>
- #include <linux/memory.h>
- #include <linux/cpu.h>
- #include <linux/timer.h>
- #include <linux/io.h>
- #include <linux/iova.h>
- #include <linux/iommu.h>
- #include <linux/intel-iommu.h>
- #include <linux/syscore_ops.h>
- #include <linux/tboot.h>
- #include <linux/dmi.h>
- #include <linux/pci-ats.h>
- #include <linux/memblock.h>
- #include <linux/dma-contiguous.h>
- #include <linux/crash_dump.h>
- #include <asm/irq_remapping.h>
- #include <asm/cacheflush.h>
- #include <asm/iommu.h>
- #include "irq_remapping.h"
- #define ROOT_SIZE VTD_PAGE_SIZE
- #define CONTEXT_SIZE VTD_PAGE_SIZE
- #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
- #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
- #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
- #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
- #define IOAPIC_RANGE_START (0xfee00000)
- #define IOAPIC_RANGE_END (0xfeefffff)
- #define IOVA_START_ADDR (0x1000)
- #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
- #define MAX_AGAW_WIDTH 64
- #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
- #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
- #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
- /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
- to match. That way, we can use 'unsigned long' for PFNs with impunity. */
- #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
- __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
- #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
- /* IO virtual address start page frame number */
- #define IOVA_START_PFN (1)
- #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
- #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
- #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
- /* page table handling */
- #define LEVEL_STRIDE (9)
- #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
- /*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
- */
- #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
- static inline int agaw_to_level(int agaw)
- {
- return agaw + 2;
- }
- static inline int agaw_to_width(int agaw)
- {
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
- }
- static inline int width_to_agaw(int width)
- {
- return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
- }
- static inline unsigned int level_to_offset_bits(int level)
- {
- return (level - 1) * LEVEL_STRIDE;
- }
- static inline int pfn_level_offset(unsigned long pfn, int level)
- {
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
- }
- static inline unsigned long level_mask(int level)
- {
- return -1UL << level_to_offset_bits(level);
- }
- static inline unsigned long level_size(int level)
- {
- return 1UL << level_to_offset_bits(level);
- }
- static inline unsigned long align_to_level(unsigned long pfn, int level)
- {
- return (pfn + level_size(level) - 1) & level_mask(level);
- }
- static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
- {
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
- }
- /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
- static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
- {
- return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
- }
- static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
- {
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
- }
- static inline unsigned long page_to_dma_pfn(struct page *pg)
- {
- return mm_to_dma_pfn(page_to_pfn(pg));
- }
- static inline unsigned long virt_to_dma_pfn(void *p)
- {
- return page_to_dma_pfn(virt_to_page(p));
- }
- /* global iommu list, set NULL for ignored DMAR units */
- static struct intel_iommu **g_iommus;
- static void __init check_tylersburg_isoch(void);
- static int rwbf_quirk;
- /*
- * set to 1 to panic kernel if can't successfully enable VT-d
- * (used when kernel is launched w/ TXT)
- */
- static int force_on = 0;
- /*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
- struct root_entry {
- u64 lo;
- u64 hi;
- };
- #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
- /*
- * Take a root_entry and return the Lower Context Table Pointer (LCTP)
- * if marked present.
- */
- static phys_addr_t root_entry_lctp(struct root_entry *re)
- {
- if (!(re->lo & 1))
- return 0;
- return re->lo & VTD_PAGE_MASK;
- }
- /*
- * Take a root_entry and return the Upper Context Table Pointer (UCTP)
- * if marked present.
- */
- static phys_addr_t root_entry_uctp(struct root_entry *re)
- {
- if (!(re->hi & 1))
- return 0;
- return re->hi & VTD_PAGE_MASK;
- }
- /*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
- struct context_entry {
- u64 lo;
- u64 hi;
- };
- static inline void context_clear_pasid_enable(struct context_entry *context)
- {
- context->lo &= ~(1ULL << 11);
- }
- static inline bool context_pasid_enabled(struct context_entry *context)
- {
- return !!(context->lo & (1ULL << 11));
- }
- static inline void context_set_copied(struct context_entry *context)
- {
- context->hi |= (1ull << 3);
- }
- static inline bool context_copied(struct context_entry *context)
- {
- return !!(context->hi & (1ULL << 3));
- }
- static inline bool __context_present(struct context_entry *context)
- {
- return (context->lo & 1);
- }
- static inline bool context_present(struct context_entry *context)
- {
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
- }
- static inline void context_set_present(struct context_entry *context)
- {
- context->lo |= 1;
- }
- static inline void context_set_fault_enable(struct context_entry *context)
- {
- context->lo &= (((u64)-1) << 2) | 1;
- }
- static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
- {
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
- }
- static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
- {
- context->lo &= ~VTD_PAGE_MASK;
- context->lo |= value & VTD_PAGE_MASK;
- }
- static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
- {
- context->hi |= value & 7;
- }
- static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
- {
- context->hi |= (value & ((1 << 16) - 1)) << 8;
- }
- static inline int context_domain_id(struct context_entry *c)
- {
- return((c->hi >> 8) & 0xffff);
- }
- static inline void context_clear_entry(struct context_entry *context)
- {
- context->lo = 0;
- context->hi = 0;
- }
- /*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
- struct dma_pte {
- u64 val;
- };
- static inline void dma_clear_pte(struct dma_pte *pte)
- {
- pte->val = 0;
- }
- static inline u64 dma_pte_addr(struct dma_pte *pte)
- {
- #ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK;
- #else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
- #endif
- }
- static inline bool dma_pte_present(struct dma_pte *pte)
- {
- return (pte->val & 3) != 0;
- }
- static inline bool dma_pte_superpage(struct dma_pte *pte)
- {
- return (pte->val & DMA_PTE_LARGE_PAGE);
- }
- static inline int first_pte_in_page(struct dma_pte *pte)
- {
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
- }
- /*
- * This domain is a statically identity mapping domain.
- * 1. This domain creats a static 1:1 mapping to all usable memory.
- * 2. It maps to each iommu if successful.
- * 3. Each iommu mapps to this domain if successful.
- */
- static struct dmar_domain *si_domain;
- static int hw_pass_through = 1;
- /*
- * Domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
- */
- #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
- /* si_domain contains mulitple devices */
- #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
- #define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
- struct dmar_domain {
- int nid; /* node id */
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
- int flags; /* flags to find out type of domain */
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
- };
- /* PCI domain-device relationship */
- struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
- };
- struct dmar_rmrr_unit {
- struct list_head list; /* list of rmrr units */
- struct acpi_dmar_header *hdr; /* ACPI header */
- u64 base_address; /* reserved base address*/
- u64 end_address; /* reserved end address */
- struct dmar_dev_scope *devices; /* target devices */
- int devices_cnt; /* target device count */
- };
- struct dmar_atsr_unit {
- struct list_head list; /* list of ATSR units */
- struct acpi_dmar_header *hdr; /* ACPI header */
- struct dmar_dev_scope *devices; /* target devices */
- int devices_cnt; /* target device count */
- u8 include_all:1; /* include all ports */
- };
- static LIST_HEAD(dmar_atsr_units);
- static LIST_HEAD(dmar_rmrr_units);
- #define for_each_rmrr_units(rmrr) \
- list_for_each_entry(rmrr, &dmar_rmrr_units, list)
- static void flush_unmaps_timeout(unsigned long data);
- struct deferred_flush_entry {
- unsigned long iova_pfn;
- unsigned long nrpages;
- struct dmar_domain *domain;
- struct page *freelist;
- };
- #define HIGH_WATER_MARK 250
- struct deferred_flush_table {
- int next;
- struct deferred_flush_entry entries[HIGH_WATER_MARK];
- };
- struct deferred_flush_data {
- spinlock_t lock;
- int timer_on;
- struct timer_list timer;
- long size;
- struct deferred_flush_table *tables;
- };
- DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
- /* bitmap for indexing intel_iommus */
- static int g_num_of_iommus;
- static void domain_exit(struct dmar_domain *domain);
- static void domain_remove_dev_info(struct dmar_domain *domain);
- static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
- static void __dmar_remove_one_dev_info(struct device_domain_info *info);
- static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
- static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu);
- #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
- int dmar_disabled = 0;
- #else
- int dmar_disabled = 1;
- #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
- int intel_iommu_enabled = 0;
- EXPORT_SYMBOL_GPL(intel_iommu_enabled);
- static int dmar_map_gfx = 1;
- static int dmar_forcedac;
- static int intel_iommu_strict;
- static int intel_iommu_superpage = 1;
- static int intel_iommu_ecs = 1;
- static int intel_iommu_pasid28;
- static int iommu_identity_mapping;
- #define IDENTMAP_ALL 1
- #define IDENTMAP_GFX 2
- #define IDENTMAP_AZALIA 4
- /* Broadwell and Skylake have broken ECS support — normal so-called "second
- * level" translation of DMA requests-without-PASID doesn't actually happen
- * unless you also set the NESTE bit in an extended context-entry. Which of
- * course means that SVM doesn't work because it's trying to do nested
- * translation of the physical addresses it finds in the process page tables,
- * through the IOVA->phys mapping found in the "second level" page tables.
- *
- * The VT-d specification was retroactively changed to change the definition
- * of the capability bits and pretend that Broadwell/Skylake never happened...
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
- * for some reason it was the PASID capability bit which was redefined (from
- * bit 28 on BDW/SKL to bit 40 in future).
- *
- * So our test for ECS needs to eschew those implementations which set the old
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
- * Unless we are working around the 'pasid28' limitations, that is, by putting
- * the device into passthrough mode for normal DMA and thus masking the bug.
- */
- #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
- /* PASID support is thus enabled if ECS is enabled and *either* of the old
- * or new capability bits are set. */
- #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
- int intel_iommu_gfx_mapped;
- EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
- #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
- static DEFINE_SPINLOCK(device_domain_lock);
- static LIST_HEAD(device_domain_list);
- static const struct iommu_ops intel_iommu_ops;
- static bool translation_pre_enabled(struct intel_iommu *iommu)
- {
- return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
- }
- static void clear_translation_pre_enabled(struct intel_iommu *iommu)
- {
- iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
- }
- static void init_translation_status(struct intel_iommu *iommu)
- {
- u32 gsts;
- gsts = readl(iommu->reg + DMAR_GSTS_REG);
- if (gsts & DMA_GSTS_TES)
- iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
- }
- /* Convert generic 'struct iommu_domain to private struct dmar_domain */
- static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
- {
- return container_of(dom, struct dmar_domain, domain);
- }
- static int __init intel_iommu_setup(char *str)
- {
- if (!str)
- return -EINVAL;
- while (*str) {
- if (!strncmp(str, "on", 2)) {
- dmar_disabled = 0;
- pr_info("IOMMU enabled\n");
- } else if (!strncmp(str, "off", 3)) {
- dmar_disabled = 1;
- pr_info("IOMMU disabled\n");
- } else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
- pr_info("Disable GFX device mapping\n");
- } else if (!strncmp(str, "forcedac", 8)) {
- pr_info("Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
- } else if (!strncmp(str, "strict", 6)) {
- pr_info("Disable batched IOTLB flush\n");
- intel_iommu_strict = 1;
- } else if (!strncmp(str, "sp_off", 6)) {
- pr_info("Disable supported super page\n");
- intel_iommu_superpage = 0;
- } else if (!strncmp(str, "ecs_off", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: disable extended context table support\n");
- intel_iommu_ecs = 0;
- } else if (!strncmp(str, "pasid28", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: enable pre-production PASID support\n");
- intel_iommu_pasid28 = 1;
- iommu_identity_mapping |= IDENTMAP_GFX;
- }
- str += strcspn(str, ",");
- while (*str == ',')
- str++;
- }
- return 0;
- }
- __setup("intel_iommu=", intel_iommu_setup);
- static struct kmem_cache *iommu_domain_cache;
- static struct kmem_cache *iommu_devinfo_cache;
- static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
- {
- struct dmar_domain **domains;
- int idx = did >> 8;
- domains = iommu->domains[idx];
- if (!domains)
- return NULL;
- return domains[did & 0xff];
- }
- static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
- struct dmar_domain *domain)
- {
- struct dmar_domain **domains;
- int idx = did >> 8;
- if (!iommu->domains[idx]) {
- size_t size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
- }
- domains = iommu->domains[idx];
- if (WARN_ON(!domains))
- return;
- else
- domains[did & 0xff] = domain;
- }
- static inline void *alloc_pgtable_page(int node)
- {
- struct page *page;
- void *vaddr = NULL;
- page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
- if (page)
- vaddr = page_address(page);
- return vaddr;
- }
- static inline void free_pgtable_page(void *vaddr)
- {
- free_page((unsigned long)vaddr);
- }
- static inline void *alloc_domain_mem(void)
- {
- return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
- }
- static void free_domain_mem(void *vaddr)
- {
- kmem_cache_free(iommu_domain_cache, vaddr);
- }
- static inline void * alloc_devinfo_mem(void)
- {
- return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
- }
- static inline void free_devinfo_mem(void *vaddr)
- {
- kmem_cache_free(iommu_devinfo_cache, vaddr);
- }
- static inline int domain_type_is_vm(struct dmar_domain *domain)
- {
- return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
- }
- static inline int domain_type_is_si(struct dmar_domain *domain)
- {
- return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
- }
- static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
- {
- return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
- DOMAIN_FLAG_STATIC_IDENTITY);
- }
- static inline int domain_pfn_supported(struct dmar_domain *domain,
- unsigned long pfn)
- {
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
- }
- static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
- {
- unsigned long sagaw;
- int agaw = -1;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
- if (test_bit(agaw, &sagaw))
- break;
- }
- return agaw;
- }
- /*
- * Calculate max SAGAW for each iommu.
- */
- int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
- {
- return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
- }
- /*
- * calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
- int iommu_calculate_agaw(struct intel_iommu *iommu)
- {
- return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- }
- /* This functionin only returns single iommu in a domain */
- static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
- {
- int iommu_id;
- /* si_domain and vm domain should not get here. */
- BUG_ON(domain_type_is_vm_or_si(domain));
- for_each_domain_iommu(iommu_id, domain)
- break;
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
- return g_iommus[iommu_id];
- }
- static void domain_update_iommu_coherency(struct dmar_domain *domain)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool found = false;
- int i;
- domain->iommu_coherency = 1;
- for_each_domain_iommu(i, domain) {
- found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
- domain->iommu_coherency = 0;
- break;
- }
- }
- if (found)
- return;
- /* No hardware attached; use lowest common denominator */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
- domain->iommu_coherency = 0;
- break;
- }
- }
- rcu_read_unlock();
- }
- static int domain_update_iommu_snooping(struct intel_iommu *skip)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int ret = 1;
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (iommu != skip) {
- if (!ecap_sc_support(iommu->ecap)) {
- ret = 0;
- break;
- }
- }
- }
- rcu_read_unlock();
- return ret;
- }
- static int domain_update_iommu_superpage(struct intel_iommu *skip)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int mask = 0xf;
- if (!intel_iommu_superpage) {
- return 0;
- }
- /* set iommu_superpage to the smallest common denominator */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (iommu != skip) {
- mask &= cap_super_page_val(iommu->cap);
- if (!mask)
- break;
- }
- }
- rcu_read_unlock();
- return fls(mask);
- }
- /* Some capabilities may be different across iommus */
- static void domain_update_iommu_cap(struct dmar_domain *domain)
- {
- domain_update_iommu_coherency(domain);
- domain->iommu_snooping = domain_update_iommu_snooping(NULL);
- domain->iommu_superpage = domain_update_iommu_superpage(NULL);
- }
- static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
- u8 bus, u8 devfn, int alloc)
- {
- struct root_entry *root = &iommu->root_entry[bus];
- struct context_entry *context;
- u64 *entry;
- entry = &root->lo;
- if (ecs_enabled(iommu)) {
- if (devfn >= 0x80) {
- devfn -= 0x80;
- entry = &root->hi;
- }
- devfn *= 2;
- }
- if (*entry & 1)
- context = phys_to_virt(*entry & VTD_PAGE_MASK);
- else {
- unsigned long phy_addr;
- if (!alloc)
- return NULL;
- context = alloc_pgtable_page(iommu->node);
- if (!context)
- return NULL;
- __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
- phy_addr = virt_to_phys((void *)context);
- *entry = phy_addr | 1;
- __iommu_flush_cache(iommu, entry, sizeof(*entry));
- }
- return &context[devfn];
- }
- static int iommu_dummy(struct device *dev)
- {
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
- }
- static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
- {
- struct dmar_drhd_unit *drhd = NULL;
- struct intel_iommu *iommu;
- struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
- u16 segment = 0;
- int i;
- if (iommu_dummy(dev))
- return NULL;
- if (dev_is_pci(dev)) {
- struct pci_dev *pf_pdev;
- pdev = to_pci_dev(dev);
- /* VFs aren't listed in scope tables; we need to look up
- * the PF instead to find the IOMMU. */
- pf_pdev = pci_physfn(pdev);
- dev = &pf_pdev->dev;
- segment = pci_domain_nr(pdev->bus);
- } else if (has_acpi_companion(dev))
- dev = &ACPI_COMPANION(dev)->dev;
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (pdev && segment != drhd->segment)
- continue;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, tmp) {
- if (tmp == dev) {
- /* For a VF use its original BDF# not that of the PF
- * which we used for the IOMMU lookup. Strictly speaking
- * we could do this for all PCI devices; we only need to
- * get the BDF# from the scope table for ACPI matches. */
- if (pdev && pdev->is_virtfn)
- goto got_pdev;
- *bus = drhd->devices[i].bus;
- *devfn = drhd->devices[i].devfn;
- goto out;
- }
- if (!pdev || !dev_is_pci(tmp))
- continue;
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
- goto got_pdev;
- }
- if (pdev && drhd->include_all) {
- got_pdev:
- *bus = pdev->bus->number;
- *devfn = pdev->devfn;
- goto out;
- }
- }
- iommu = NULL;
- out:
- rcu_read_unlock();
- return iommu;
- }
- static void domain_flush_cache(struct dmar_domain *domain,
- void *addr, int size)
- {
- if (!domain->iommu_coherency)
- clflush_cache_range(addr, size);
- }
- static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- struct context_entry *context;
- int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
- context = iommu_context_addr(iommu, bus, devfn, 0);
- if (context)
- ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
- return ret;
- }
- static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- struct context_entry *context;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
- context = iommu_context_addr(iommu, bus, devfn, 0);
- if (context) {
- context_clear_entry(context);
- __iommu_flush_cache(iommu, context, sizeof(*context));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
- static void free_context_table(struct intel_iommu *iommu)
- {
- int i;
- unsigned long flags;
- struct context_entry *context;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
- for (i = 0; i < ROOT_ENTRY_NR; i++) {
- context = iommu_context_addr(iommu, i, 0, 0);
- if (context)
- free_pgtable_page(context);
- if (!ecs_enabled(iommu))
- continue;
- context = iommu_context_addr(iommu, i, 0x80, 0);
- if (context)
- free_pgtable_page(context);
- }
- free_pgtable_page(iommu->root_entry);
- iommu->root_entry = NULL;
- out:
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
- static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int *target_level)
- {
- struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(domain->agaw);
- int offset;
- BUG_ON(!domain->pgd);
- if (!domain_pfn_supported(domain, pfn))
- /* Address beyond IOMMU's addressing capabilities. */
- return NULL;
- parent = domain->pgd;
- while (1) {
- void *tmp_page;
- offset = pfn_level_offset(pfn, level);
- pte = &parent[offset];
- if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
- break;
- if (level == *target_level)
- break;
- if (!dma_pte_present(pte)) {
- uint64_t pteval;
- tmp_page = alloc_pgtable_page(domain->nid);
- if (!tmp_page)
- return NULL;
- domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (cmpxchg64(&pte->val, 0ULL, pteval))
- /* Someone else set it while we were thinking; use theirs. */
- free_pgtable_page(tmp_page);
- else
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
- if (level == 1)
- break;
- parent = phys_to_virt(dma_pte_addr(pte));
- level--;
- }
- if (!*target_level)
- *target_level = level;
- return pte;
- }
- /* return address's pte at specific level */
- static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
- unsigned long pfn,
- int level, int *large_page)
- {
- struct dma_pte *parent, *pte = NULL;
- int total = agaw_to_level(domain->agaw);
- int offset;
- parent = domain->pgd;
- while (level <= total) {
- offset = pfn_level_offset(pfn, total);
- pte = &parent[offset];
- if (level == total)
- return pte;
- if (!dma_pte_present(pte)) {
- *large_page = total;
- break;
- }
- if (dma_pte_superpage(pte)) {
- *large_page = total;
- return pte;
- }
- parent = phys_to_virt(dma_pte_addr(pte));
- total--;
- }
- return NULL;
- }
- /* clear last level pte, a tlb flush should be followed */
- static void dma_pte_clear_range(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- unsigned int large_page = 1;
- struct dma_pte *first_pte, *pte;
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- /* we don't need lock here; nobody else touches the iova range */
- do {
- large_page = 1;
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
- if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, large_page + 1);
- continue;
- }
- do {
- dma_clear_pte(pte);
- start_pfn += lvl_to_nr_pages(large_page);
- pte++;
- } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- } while (start_pfn && start_pfn <= last_pfn);
- }
- static void dma_pte_free_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn, unsigned long last_pfn)
- {
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
- do {
- unsigned long level_pfn;
- struct dma_pte *level_pte;
- if (!dma_pte_present(pte) || dma_pte_superpage(pte))
- goto next;
- level_pfn = pfn & level_mask(level);
- level_pte = phys_to_virt(dma_pte_addr(pte));
- if (level > 2)
- dma_pte_free_level(domain, level - 1, level_pte,
- level_pfn, start_pfn, last_pfn);
- /* If range covers entire pagetable, free it */
- if (!(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level) - 1)) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- free_pgtable_page(level_pte);
- }
- next:
- pfn += level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
- }
- /* clear last level (leaf) ptes and free page table pages. */
- static void dma_pte_free_pagetable(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- dma_pte_clear_range(domain, start_pfn, last_pfn);
- /* We don't need lock here; nobody else touches the iova range */
- dma_pte_free_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn);
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- free_pgtable_page(domain->pgd);
- domain->pgd = NULL;
- }
- }
- /* When a page at a given level is being unlinked from its parent, we don't
- need to *modify* it at all. All we need to do is make a list of all the
- pages which can be freed just as soon as we've flushed the IOTLB and we
- know the hardware page-walk will no longer touch them.
- The 'pte' argument is the *parent* PTE, pointing to the page that is to
- be freed. */
- static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct page *freelist)
- {
- struct page *pg;
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- pg->freelist = freelist;
- freelist = pg;
- if (level == 1)
- return freelist;
- pte = page_address(pg);
- do {
- if (dma_pte_present(pte) && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1,
- pte, freelist);
- pte++;
- } while (!first_pte_in_page(pte));
- return freelist;
- }
- static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
- {
- struct dma_pte *first_pte = NULL, *last_pte = NULL;
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
- do {
- unsigned long level_pfn;
- if (!dma_pte_present(pte))
- goto next;
- level_pfn = pfn & level_mask(level);
- /* If range covers entire pagetable, free it */
- if (start_pfn <= level_pfn &&
- last_pfn >= level_pfn + level_size(level) - 1) {
- /* These suborbinate page tables are going away entirely. Don't
- bother to clear them; we're just going to *free* them. */
- if (level > 1 && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
- dma_clear_pte(pte);
- if (!first_pte)
- first_pte = pte;
- last_pte = pte;
- } else if (level > 1) {
- /* Recurse down into a level that isn't *entirely* obsolete */
- freelist = dma_pte_clear_level(domain, level - 1,
- phys_to_virt(dma_pte_addr(pte)),
- level_pfn, start_pfn, last_pfn,
- freelist);
- }
- next:
- pfn += level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
- if (first_pte)
- domain_flush_cache(domain, first_pte,
- (void *)++last_pte - (void *)first_pte);
- return freelist;
- }
- /* We can't just free the pages because the IOMMU may still be walking
- the page tables, and may have cached the intermediate levels. The
- pages can only be freed after the IOTLB flush has been done. */
- static struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- struct page *freelist = NULL;
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- /* we don't need lock here; nobody else touches the iova range */
- freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn, NULL);
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- pgd_page->freelist = freelist;
- freelist = pgd_page;
- domain->pgd = NULL;
- }
- return freelist;
- }
- static void dma_free_pagelist(struct page *freelist)
- {
- struct page *pg;
- while ((pg = freelist)) {
- freelist = pg->freelist;
- free_pgtable_page(page_address(pg));
- }
- }
- /* iommu handling */
- static int iommu_alloc_root_entry(struct intel_iommu *iommu)
- {
- struct root_entry *root;
- unsigned long flags;
- root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root) {
- pr_err("Allocating root entry for %s failed\n",
- iommu->name);
- return -ENOMEM;
- }
- __iommu_flush_cache(iommu, root, ROOT_SIZE);
- spin_lock_irqsave(&iommu->lock, flags);
- iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
- }
- static void iommu_set_root_entry(struct intel_iommu *iommu)
- {
- u64 addr;
- u32 sts;
- unsigned long flag;
- addr = virt_to_phys(iommu->root_entry);
- if (ecs_enabled(iommu))
- addr |= DMA_RTADDR_RTT;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
- writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- static void iommu_flush_write_buffer(struct intel_iommu *iommu)
- {
- u32 val;
- unsigned long flag;
- if (!rwbf_quirk && !cap_rwbf(iommu->cap))
- return;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- /* return value determine if we need a write buffer flush */
- static void __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask,
- u64 type)
- {
- u64 val = 0;
- unsigned long flag;
- switch (type) {
- case DMA_CCMD_GLOBAL_INVL:
- val = DMA_CCMD_GLOBAL_INVL;
- break;
- case DMA_CCMD_DOMAIN_INVL:
- val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
- break;
- case DMA_CCMD_DEVICE_INVL:
- val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
- | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
- break;
- default:
- BUG();
- }
- val |= DMA_CCMD_ICC;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
- dmar_readq, (!(val & DMA_CCMD_ICC)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- /* return value determine if we need a write buffer flush */
- static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type)
- {
- int tlb_offset = ecap_iotlb_offset(iommu->ecap);
- u64 val = 0, val_iva = 0;
- unsigned long flag;
- switch (type) {
- case DMA_TLB_GLOBAL_FLUSH:
- /* global flush doesn't need set IVA_REG */
- val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
- break;
- case DMA_TLB_DSI_FLUSH:
- val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- break;
- case DMA_TLB_PSI_FLUSH:
- val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- /* IH bit is passed in as part of address */
- val_iva = size_order | addr;
- break;
- default:
- BUG();
- }
- /* Note: set drain read/write */
- #if 0
- /*
- * This is probably to be super secure.. Looks like we can
- * ignore it without any impact.
- */
- if (cap_read_drain(iommu->cap))
- val |= DMA_TLB_READ_DRAIN;
- #endif
- if (cap_write_drain(iommu->cap))
- val |= DMA_TLB_WRITE_DRAIN;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- /* Note: Only uses first TLB reg currently */
- if (val_iva)
- dmar_writeq(iommu->reg + tlb_offset, val_iva);
- dmar_writeq(iommu->reg + tlb_offset + 8, val);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, tlb_offset + 8,
- dmar_readq, (!(val & DMA_TLB_IVT)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- /* check IOTLB invalidation granularity */
- if (DMA_TLB_IAIG(val) == 0)
- pr_err("Flush IOTLB failed\n");
- if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
- pr_debug("TLB flush request %Lx, actual %Lx\n",
- (unsigned long long)DMA_TLB_IIRG(type),
- (unsigned long long)DMA_TLB_IAIG(val));
- }
- static struct device_domain_info *
- iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
- {
- struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- if (!iommu->qi)
- return NULL;
- list_for_each_entry(info, &domain->devices, link)
- if (info->iommu == iommu && info->bus == bus &&
- info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
- }
- return NULL;
- }
- static void domain_update_iotlb(struct dmar_domain *domain)
- {
- struct device_domain_info *info;
- bool has_iotlb_device = false;
- assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
- has_iotlb_device = true;
- break;
- }
- }
- domain->has_iotlb_device = has_iotlb_device;
- }
- static void iommu_enable_dev_iotlb(struct device_domain_info *info)
- {
- struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
- if (!info || !dev_is_pci(info->dev))
- return;
- pdev = to_pci_dev(info->dev);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- /* The PCIe spec, in its wisdom, declares that the behaviour of
- the device if you enable PASID support after ATS support is
- undefined. So always enable PASID support on devices which
- have it, even if we can't yet know if we're ever going to
- use it. */
- if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
- info->pasid_enabled = 1;
- if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
- info->pri_enabled = 1;
- #endif
- if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
- info->ats_enabled = 1;
- domain_update_iotlb(info->domain);
- info->ats_qdep = pci_ats_queue_depth(pdev);
- }
- }
- static void iommu_disable_dev_iotlb(struct device_domain_info *info)
- {
- struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
- if (!dev_is_pci(info->dev))
- return;
- pdev = to_pci_dev(info->dev);
- if (info->ats_enabled) {
- pci_disable_ats(pdev);
- info->ats_enabled = 0;
- domain_update_iotlb(info->domain);
- }
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (info->pri_enabled) {
- pci_disable_pri(pdev);
- info->pri_enabled = 0;
- }
- if (info->pasid_enabled) {
- pci_disable_pasid(pdev);
- info->pasid_enabled = 0;
- }
- #endif
- }
- static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
- u64 addr, unsigned mask)
- {
- u16 sid, qdep;
- unsigned long flags;
- struct device_domain_info *info;
- if (!domain->has_iotlb_device)
- return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages,
- int ih, int map)
- {
- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
- uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
- BUG_ON(pages == 0);
- if (ih)
- ih = 1 << 6;
- /*
- * Fallback to domain selective flush if no PSI support or the size is
- * too big.
- * PSI requires page size to be 2 ^ x, and the base address is naturally
- * aligned to the size
- */
- if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- /*
- * In caching mode, changes of pages from non-present to present require
- * flush. However, device IOTLB doesn't need to be flushed in this case.
- */
- if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(domain, addr, mask);
- }
- static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
- {
- u32 pmen;
- unsigned long flags;
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- pmen = readl(iommu->reg + DMAR_PMEN_REG);
- pmen &= ~DMA_PMEN_EPM;
- writel(pmen, iommu->reg + DMAR_PMEN_REG);
- /* wait for the protected region status bit to clear */
- IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
- readl, !(pmen & DMA_PMEN_PRS), pmen);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
- static void iommu_enable_translation(struct intel_iommu *iommu)
- {
- u32 sts;
- unsigned long flags;
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
- static void iommu_disable_translation(struct intel_iommu *iommu)
- {
- u32 sts;
- unsigned long flag;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->gcmd &= ~DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- static int iommu_init_domains(struct intel_iommu *iommu)
- {
- u32 ndomains, nlongs;
- size_t size;
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
- nlongs = BITS_TO_LONGS(ndomains);
- spin_lock_init(&iommu->lock);
- iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->domain_ids) {
- pr_err("%s: Allocating domain id array failed\n",
- iommu->name);
- return -ENOMEM;
- }
- size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
- iommu->domains = kzalloc(size, GFP_KERNEL);
- if (iommu->domains) {
- size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[0] = kzalloc(size, GFP_KERNEL);
- }
- if (!iommu->domains || !iommu->domains[0]) {
- pr_err("%s: Allocating domain array failed\n",
- iommu->name);
- kfree(iommu->domain_ids);
- kfree(iommu->domains);
- iommu->domain_ids = NULL;
- iommu->domains = NULL;
- return -ENOMEM;
- }
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
- return 0;
- }
- static void disable_dmar_iommu(struct intel_iommu *iommu)
- {
- struct device_domain_info *info, *tmp;
- unsigned long flags;
- if (!iommu->domains || !iommu->domain_ids)
- return;
- again:
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- struct dmar_domain *domain;
- if (info->iommu != iommu)
- continue;
- if (!info->dev || !info->domain)
- continue;
- domain = info->domain;
- __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain)) {
- /*
- * The domain_exit() function can't be called under
- * device_domain_lock, as it takes this lock itself.
- * So release the lock here and re-run the loop
- * afterwards.
- */
- spin_unlock_irqrestore(&device_domain_lock, flags);
- domain_exit(domain);
- goto again;
- }
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
- }
- static void free_dmar_iommu(struct intel_iommu *iommu)
- {
- if ((iommu->domains) && (iommu->domain_ids)) {
- int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
- int i;
- for (i = 0; i < elems; i++)
- kfree(iommu->domains[i]);
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
- }
- g_iommus[iommu->seq_id] = NULL;
- /* free context mapping */
- free_context_table(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu)) {
- if (ecap_prs(iommu->ecap))
- intel_svm_finish_prq(iommu);
- intel_svm_free_pasid_tables(iommu);
- }
- #endif
- }
- static struct dmar_domain *alloc_domain(int flags)
- {
- struct dmar_domain *domain;
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
- memset(domain, 0, sizeof(*domain));
- domain->nid = -1;
- domain->flags = flags;
- domain->has_iotlb_device = false;
- INIT_LIST_HEAD(&domain->devices);
- return domain;
- }
- /* Must be called with iommu->lock */
- static int domain_attach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
- {
- unsigned long ndomains;
- int num;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
- domain->iommu_refcnt[iommu->seq_id] += 1;
- domain->iommu_count += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- domain->iommu_count -= 1;
- return -ENOSPC;
- }
- set_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, domain);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
- }
- return 0;
- }
- static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
- {
- int num, count = INT_MAX;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- count = --domain->iommu_count;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, NULL);
- domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
- }
- return count;
- }
- static struct iova_domain reserved_iova_list;
- static struct lock_class_key reserved_rbtree_key;
- static int dmar_init_reserved_ranges(void)
- {
- struct pci_dev *pdev = NULL;
- struct iova *iova;
- int i;
- init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
- &reserved_rbtree_key);
- /* IOAPIC ranges shouldn't be accessed by DMA */
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
- IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova) {
- pr_err("Reserve IOAPIC range failed\n");
- return -ENODEV;
- }
- /* Reserve all PCI MMIO to avoid peer-to-peer access */
- for_each_pci_dev(pdev) {
- struct resource *r;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- r = &pdev->resource[i];
- if (!r->flags || !(r->flags & IORESOURCE_MEM))
- continue;
- iova = reserve_iova(&reserved_iova_list,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!iova) {
- pr_err("Reserve iova failed\n");
- return -ENODEV;
- }
- }
- }
- return 0;
- }
- static void domain_reserve_special_ranges(struct dmar_domain *domain)
- {
- copy_reserved_iova(&reserved_iova_list, &domain->iovad);
- }
- static inline int guestwidth_to_adjustwidth(int gaw)
- {
- int agaw;
- int r = (gaw - 12) % 9;
- if (r == 0)
- agaw = gaw;
- else
- agaw = gaw + 9 - r;
- if (agaw > 64)
- agaw = 64;
- return agaw;
- }
- static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
- {
- int adjust_width, agaw;
- unsigned long sagaw;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- domain_reserve_special_ranges(domain);
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
- domain->nid = iommu->node;
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
- }
- static void domain_exit(struct dmar_domain *domain)
- {
- struct page *freelist = NULL;
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
- /* Flush any lazy unmaps that may reference this domain */
- if (!intel_iommu_strict) {
- int cpu;
- for_each_possible_cpu(cpu)
- flush_unmaps_timeout(cpu);
- }
- /* Remove associated devices and clear attached or cached domains */
- rcu_read_lock();
- domain_remove_dev_info(domain);
- rcu_read_unlock();
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
- dma_free_pagelist(freelist);
- free_domain_mem(domain);
- }
- static int domain_context_mapping_one(struct dmar_domain *domain,
- struct intel_iommu *iommu,
- u8 bus, u8 devfn)
- {
- u16 did = domain->iommu_did[iommu->seq_id];
- int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
- struct context_entry *context;
- unsigned long flags;
- struct dma_pte *pgd;
- int ret, agaw;
- WARN_ON(did == 0);
- if (hw_pass_through && domain_type_is_si(domain))
- translation = CONTEXT_TT_PASS_THROUGH;
- pr_debug("Set context mapping for %02x:%02x.%d\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
- spin_lock(&iommu->lock);
- ret = -ENOMEM;
- context = iommu_context_addr(iommu, bus, devfn, 1);
- if (!context)
- goto out_unlock;
- ret = 0;
- if (context_present(context))
- goto out_unlock;
- /*
- * For kdump cases, old valid entries may be cached due to the
- * in-flight DMA and copied pgtable, but there is no unmapping
- * behaviour for them, thus we need an explicit cache flush for
- * the newly-mapped device. For kdump, at this point, the device
- * is supposed to finish reset at its driver probe stage, so no
- * in-flight DMA will exist, and we don't need to worry anymore
- * hereafter.
- */
- if (context_copied(context)) {
- u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
- iommu->flush.flush_context(iommu, did_old,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
- DMA_TLB_DSI_FLUSH);
- }
- }
- pgd = domain->pgd;
- context_clear_entry(context);
- context_set_domain_id(context, did);
- /*
- * Skip top levels of page tables for iommu which has less agaw
- * than default. Unnecessary for PT mode.
- */
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, iommu->agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
- }
- context_set_translation_type(context, translation);
- context_set_fault_enable(context);
- context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
- /*
- * It's a non-present to present mapping. If hardware doesn't cache
- * non-present entry we only need to flush the write-buffer. If the
- * _does_ cache non-present entries, then it does so in the special
- * domain #0, which we have to flush:
- */
- if (cap_caching_mode(iommu->cap)) {
- iommu->flush.flush_context(iommu, 0,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- } else {
- iommu_flush_write_buffer(iommu);
- }
- iommu_enable_dev_iotlb(info);
- ret = 0;
- out_unlock:
- spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return ret;
- }
- struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- };
- static int domain_context_mapping_cb(struct pci_dev *pdev,
- u16 alias, void *opaque)
- {
- struct domain_context_mapping_data *data = opaque;
- return domain_context_mapping_one(data->domain, data->iommu,
- PCI_BUS_NUM(alias), alias & 0xff);
- }
- static int
- domain_context_mapping(struct dmar_domain *domain, struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- struct domain_context_mapping_data data;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, bus, devfn);
- data.domain = domain;
- data.iommu = iommu;
- return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
- }
- static int domain_context_mapped_cb(struct pci_dev *pdev,
- u16 alias, void *opaque)
- {
- struct intel_iommu *iommu = opaque;
- return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
- }
- static int domain_context_mapped(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- if (!dev_is_pci(dev))
- return device_context_mapped(iommu, bus, devfn);
- return !pci_for_each_dma_alias(to_pci_dev(dev),
- domain_context_mapped_cb, iommu);
- }
- /* Returns a number of VTD pages, but aligned to MM page size */
- static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
- {
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
- }
- /* Return largest possible superpage level for a given mapping */
- static inline int hardware_largepage_caps(struct dmar_domain *domain,
- unsigned long iov_pfn,
- unsigned long phy_pfn,
- unsigned long pages)
- {
- int support, level = 1;
- unsigned long pfnmerge;
- support = domain->iommu_superpage;
- /* To use a large page, the virtual *and* physical addresses
- must be aligned to 2MiB/1GiB/etc. Lower bits set in either
- of them will mean we have to use smaller pages. So just
- merge them and check both at once. */
- pfnmerge = iov_pfn | phy_pfn;
- while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
- pages >>= VTD_STRIDE_SHIFT;
- if (!pages)
- break;
- pfnmerge >>= VTD_STRIDE_SHIFT;
- level++;
- support--;
- }
- return level;
- }
- static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
- {
- struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t uninitialized_var(pteval);
- unsigned long sg_res = 0;
- unsigned int largepage_lvl = 0;
- unsigned long lvl_pages = 0;
- BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
- if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
- return -EINVAL;
- prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (!sg) {
- sg_res = nr_pages;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
- }
- while (nr_pages > 0) {
- uint64_t tmp;
- if (!sg_res) {
- unsigned int pgoff = sg->offset & ~PAGE_MASK;
- sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
- sg->dma_length = sg->length;
- pteval = (sg_phys(sg) - pgoff) | prot;
- phys_pfn = pteval >> VTD_PAGE_SHIFT;
- }
- if (!pte) {
- largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
- if (!pte)
- return -ENOMEM;
- /* It is large page*/
- if (largepage_lvl > 1) {
- unsigned long nr_superpages, end_pfn;
- pteval |= DMA_PTE_LARGE_PAGE;
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
- nr_superpages = sg_res / lvl_pages;
- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
- /*
- * Ensure that old small page tables are
- * removed to make room for superpage(s).
- */
- dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
- } else {
- pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
- }
- }
- /* We don't need lock here, nobody else
- * touches the iova range
- */
- tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
- if (tmp) {
- static int dumps = 5;
- pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
- iov_pfn, tmp, (unsigned long long)pteval);
- if (dumps) {
- dumps--;
- debug_dma_dump_mappings(NULL);
- }
- WARN_ON(1);
- }
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
- BUG_ON(nr_pages < lvl_pages);
- BUG_ON(sg_res < lvl_pages);
- nr_pages -= lvl_pages;
- iov_pfn += lvl_pages;
- phys_pfn += lvl_pages;
- pteval += lvl_pages * VTD_PAGE_SIZE;
- sg_res -= lvl_pages;
- /* If the next PTE would be the first in a new page, then we
- need to flush the cache on the entries we've just written.
- And then we'll need to recalculate 'pte', so clear it and
- let it get set again in the if (!pte) block above.
- If we're done (!nr_pages) we need to flush the cache too.
- Also if we've been setting superpages, we may need to
- recalculate 'pte' and switch back to smaller pages for the
- end of the mapping, if the trailing size is not enough to
- use another superpage (i.e. sg_res < lvl_pages). */
- pte++;
- if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && sg_res < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- pte = NULL;
- }
- if (!sg_res && nr_pages)
- sg = sg_next(sg);
- }
- return 0;
- }
- static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long nr_pages,
- int prot)
- {
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
- }
- static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages,
- int prot)
- {
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
- }
- static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- if (!iommu)
- return;
- clear_context_table(iommu, bus, devfn);
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- }
- static inline void unlink_domain_info(struct device_domain_info *info)
- {
- assert_spin_locked(&device_domain_lock);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->archdata.iommu = NULL;
- }
- static void domain_remove_dev_info(struct dmar_domain *domain)
- {
- struct device_domain_info *info, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- /*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
- static struct dmar_domain *find_domain(struct device *dev)
- {
- struct device_domain_info *info;
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
- if (info)
- return info->domain;
- return NULL;
- }
- static inline struct device_domain_info *
- dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
- {
- struct device_domain_info *info;
- list_for_each_entry(info, &device_domain_list, global)
- if (info->iommu->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
- return NULL;
- }
- static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
- int bus, int devfn,
- struct device *dev,
- struct dmar_domain *domain)
- {
- struct dmar_domain *found = NULL;
- struct device_domain_info *info;
- unsigned long flags;
- int ret;
- info = alloc_devinfo_mem();
- if (!info)
- return NULL;
- info->bus = bus;
- info->devfn = devfn;
- info->ats_supported = info->pasid_supported = info->pri_supported = 0;
- info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
- info->ats_qdep = 0;
- info->dev = dev;
- info->domain = domain;
- info->iommu = iommu;
- if (dev && dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(info->dev);
- if (ecap_dev_iotlb_support(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
- dmar_find_matched_atsr_unit(pdev))
- info->ats_supported = 1;
- if (ecs_enabled(iommu)) {
- if (pasid_enabled(iommu)) {
- int features = pci_pasid_features(pdev);
- if (features >= 0)
- info->pasid_supported = features | 1;
- }
- if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
- info->pri_supported = 1;
- }
- }
- spin_lock_irqsave(&device_domain_lock, flags);
- if (dev)
- found = find_domain(dev);
- if (!found) {
- struct device_domain_info *info2;
- info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
- if (info2) {
- found = info2->domain;
- info2->dev = dev;
- }
- }
- if (found) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- /* Caller must free the original domain */
- return found;
- }
- spin_lock(&iommu->lock);
- ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- return NULL;
- }
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- if (dev)
- dev->archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (dev && domain_context_mapping(domain, dev)) {
- pr_err("Domain context map for %s failed\n", dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
- return NULL;
- }
- return domain;
- }
- static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
- {
- *(u16 *)opaque = alias;
- return 0;
- }
- static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
- {
- struct device_domain_info *info = NULL;
- struct dmar_domain *domain = NULL;
- struct intel_iommu *iommu;
- u16 req_id, dma_alias;
- unsigned long flags;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
- req_id = ((u16)bus << 8) | devfn;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
- PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff);
- if (info) {
- iommu = info->iommu;
- domain = info->domain;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- /* DMA alias already has a domain, use it */
- if (info)
- goto out;
- }
- /* Allocate and initialize new domain for the device */
- domain = alloc_domain(0);
- if (!domain)
- return NULL;
- if (domain_init(domain, iommu, gaw)) {
- domain_exit(domain);
- return NULL;
- }
- out:
- return domain;
- }
- static struct dmar_domain *set_domain_for_dev(struct device *dev,
- struct dmar_domain *domain)
- {
- struct intel_iommu *iommu;
- struct dmar_domain *tmp;
- u16 req_id, dma_alias;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
- req_id = ((u16)bus << 8) | devfn;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
- /* register PCI DMA alias device */
- if (req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
- if (!tmp || tmp != domain)
- return tmp;
- }
- }
- tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (!tmp || tmp != domain)
- return tmp;
- return domain;
- }
- static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
- {
- struct dmar_domain *domain, *tmp;
- domain = find_domain(dev);
- if (domain)
- goto out;
- domain = find_or_alloc_domain(dev, gaw);
- if (!domain)
- goto out;
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
- out:
- return domain;
- }
- static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
- {
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
- if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
- dma_to_mm_pfn(last_vpfn))) {
- pr_err("Reserving iova failed\n");
- return -ENOMEM;
- }
- pr_debug("Mapping reserved region %llx-%llx\n", start, end);
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
- }
- static int domain_prepare_identity_map(struct device *dev,
- struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
- {
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
- return 0;
- }
- pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
- return iommu_domain_identity_map(domain, start, end);
- }
- static int iommu_prepare_identity_map(struct device *dev,
- unsigned long long start,
- unsigned long long end)
- {
- struct dmar_domain *domain;
- int ret;
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
- ret = domain_prepare_identity_map(dev, domain, start, end);
- if (ret)
- domain_exit(domain);
- return ret;
- }
- static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct device *dev)
- {
- if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return 0;
- return iommu_prepare_identity_map(dev, rmrr->base_address,
- rmrr->end_address);
- }
- #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
- static inline void iommu_prepare_isa(void)
- {
- struct pci_dev *pdev;
- int ret;
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
- pr_info("Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
- if (ret)
- pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
- pci_dev_put(pdev);
- }
- #else
- static inline void iommu_prepare_isa(void)
- {
- return;
- }
- #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
- static int md_domain_init(struct dmar_domain *domain, int guest_width);
- static int __init si_domain_init(int hw)
- {
- int nid, ret = 0;
- si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
- if (!si_domain)
- return -EFAULT;
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- domain_exit(si_domain);
- return -EFAULT;
- }
- pr_debug("Identity mapping domain allocated\n");
- if (hw)
- return 0;
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn;
- int i;
- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
- ret = iommu_domain_identity_map(si_domain,
- PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
- if (ret)
- return ret;
- }
- }
- return 0;
- }
- static int identity_mapping(struct device *dev)
- {
- struct device_domain_info *info;
- if (likely(!iommu_identity_mapping))
- return 0;
- info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
- return (info->domain == si_domain);
- return 0;
- }
- static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
- {
- struct dmar_domain *ndomain;
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (ndomain != domain)
- return -EBUSY;
- return 0;
- }
- static bool device_has_rmrr(struct device *dev)
- {
- struct dmar_rmrr_unit *rmrr;
- struct device *tmp;
- int i;
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- /*
- * Return TRUE if this RMRR contains the device that
- * is passed in.
- */
- for_each_active_dev_scope(rmrr->devices,
- rmrr->devices_cnt, i, tmp)
- if (tmp == dev) {
- rcu_read_unlock();
- return true;
- }
- }
- rcu_read_unlock();
- return false;
- }
- /*
- * There are a couple cases where we need to restrict the functionality of
- * devices associated with RMRRs. The first is when evaluating a device for
- * identity mapping because problems exist when devices are moved in and out
- * of domains and their respective RMRR information is lost. This means that
- * a device with associated RMRRs will never be in a "passthrough" domain.
- * The second is use of the device through the IOMMU API. This interface
- * expects to have full control of the IOVA space for the device. We cannot
- * satisfy both the requirement that RMRR access is maintained and have an
- * unencumbered IOVA space. We also have no ability to quiesce the device's
- * use of the RMRR space or even inform the IOMMU API user of the restriction.
- * We therefore prevent devices associated with an RMRR from participating in
- * the IOMMU API, which eliminates them from device assignment.
- *
- * In both cases we assume that PCI USB devices with RMRRs have them largely
- * for historical reasons and that the RMRR space is not actively used post
- * boot. This exclusion may change if vendors begin to abuse it.
- *
- * The same exception is made for graphics devices, with the requirement that
- * any use of the RMRR regions will be torn down before assigning the device
- * to a guest.
- */
- static bool device_is_rmrr_locked(struct device *dev)
- {
- if (!device_has_rmrr(dev))
- return false;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
- return false;
- }
- return true;
- }
- static int iommu_should_identity_map(struct device *dev, int startup)
- {
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- if (device_is_rmrr_locked(dev))
- return 0;
- if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
- return 0;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return 0;
- } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
- } else {
- if (device_has_rmrr(dev))
- return 0;
- }
- /*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will — if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
- */
- if (!startup) {
- /*
- * If the device's dma_mask is less than the system's memory
- * size then this is not a candidate for identity mapping.
- */
- u64 dma_mask = *dev->dma_mask;
- if (dev->coherent_dma_mask &&
- dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
- return dma_mask >= dma_get_required_mask(dev);
- }
- return 1;
- }
- static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
- {
- int ret;
- if (!iommu_should_identity_map(dev, 1))
- return 0;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret)
- pr_info("%s identity mapping for device %s\n",
- hw ? "Hardware" : "Software", dev_name(dev));
- else if (ret == -ENODEV)
- /* device not associated with an iommu */
- ret = 0;
- return ret;
- }
- static int __init iommu_prepare_static_identity_mapping(int hw)
- {
- struct pci_dev *pdev = NULL;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- struct device *dev;
- int i;
- int ret = 0;
- for_each_pci_dev(pdev) {
- ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- if (ret)
- return ret;
- }
- for_each_active_iommu(iommu, drhd)
- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
- struct acpi_device_physical_node *pn;
- struct acpi_device *adev;
- if (dev->bus != &acpi_bus_type)
- continue;
- adev= to_acpi_device(dev);
- mutex_lock(&adev->physical_node_lock);
- list_for_each_entry(pn, &adev->physical_node_list, node) {
- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
- if (ret)
- break;
- }
- mutex_unlock(&adev->physical_node_lock);
- if (ret)
- return ret;
- }
- return 0;
- }
- static void intel_iommu_init_qi(struct intel_iommu *iommu)
- {
- /*
- * Start from the sane iommu hardware state.
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (!iommu->qi) {
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- pr_info("%s: Using Register based invalidation\n",
- iommu->name);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- pr_info("%s: Using Queued invalidation\n", iommu->name);
- }
- }
- static int copy_context_table(struct intel_iommu *iommu,
- struct root_entry *old_re,
- struct context_entry **tbl,
- int bus, bool ext)
- {
- int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
- struct context_entry *new_ce = NULL, ce;
- struct context_entry *old_ce = NULL;
- struct root_entry re;
- phys_addr_t old_ce_phys;
- tbl_idx = ext ? bus * 2 : bus;
- memcpy(&re, old_re, sizeof(re));
- for (devfn = 0; devfn < 256; devfn++) {
- /* First calculate the correct index */
- idx = (ext ? devfn * 2 : devfn) % 256;
- if (idx == 0) {
- /* First save what we may have and clean up */
- if (new_ce) {
- tbl[tbl_idx] = new_ce;
- __iommu_flush_cache(iommu, new_ce,
- VTD_PAGE_SIZE);
- pos = 1;
- }
- if (old_ce)
- iounmap(old_ce);
- ret = 0;
- if (devfn < 0x80)
- old_ce_phys = root_entry_lctp(&re);
- else
- old_ce_phys = root_entry_uctp(&re);
- if (!old_ce_phys) {
- if (ext && devfn == 0) {
- /* No LCTP, try UCTP */
- devfn = 0x7f;
- continue;
- } else {
- goto out;
- }
- }
- ret = -ENOMEM;
- old_ce = memremap(old_ce_phys, PAGE_SIZE,
- MEMREMAP_WB);
- if (!old_ce)
- goto out;
- new_ce = alloc_pgtable_page(iommu->node);
- if (!new_ce)
- goto out_unmap;
- ret = 0;
- }
- /* Now copy the context entry */
- memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
- continue;
- did = context_domain_id(&ce);
- if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
- new_ce[idx] = ce;
- }
- tbl[tbl_idx + pos] = new_ce;
- __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
- out_unmap:
- memunmap(old_ce);
- out:
- return ret;
- }
- static int copy_translation_tables(struct intel_iommu *iommu)
- {
- struct context_entry **ctxt_tbls;
- struct root_entry *old_rt;
- phys_addr_t old_rt_phys;
- int ctxt_table_entries;
- unsigned long flags;
- u64 rtaddr_reg;
- int bus, ret;
- bool new_ext, ext;
- rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
- /*
- * The RTT bit can only be changed when translation is disabled,
- * but disabling translation means to open a window for data
- * corruption. So bail out and don't copy anything if we would
- * have to change the bit.
- */
- if (new_ext != ext)
- return -EINVAL;
- old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
- if (!old_rt_phys)
- return -EINVAL;
- old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
- if (!old_rt)
- return -ENOMEM;
- /* This is too big for the stack - allocate it from slab */
- ctxt_table_entries = ext ? 512 : 256;
- ret = -ENOMEM;
- ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
- if (!ctxt_tbls)
- goto out_unmap;
- for (bus = 0; bus < 256; bus++) {
- ret = copy_context_table(iommu, &old_rt[bus],
- ctxt_tbls, bus, ext);
- if (ret) {
- pr_err("%s: Failed to copy context table for bus %d\n",
- iommu->name, bus);
- continue;
- }
- }
- spin_lock_irqsave(&iommu->lock, flags);
- /* Context tables are copied, now write them to the root_entry table */
- for (bus = 0; bus < 256; bus++) {
- int idx = ext ? bus * 2 : bus;
- u64 val;
- if (ctxt_tbls[idx]) {
- val = virt_to_phys(ctxt_tbls[idx]) | 1;
- iommu->root_entry[bus].lo = val;
- }
- if (!ext || !ctxt_tbls[idx + 1])
- continue;
- val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
- iommu->root_entry[bus].hi = val;
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- kfree(ctxt_tbls);
- __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
- ret = 0;
- out_unmap:
- memunmap(old_rt);
- return ret;
- }
- static int __init init_dmars(void)
- {
- struct dmar_drhd_unit *drhd;
- struct dmar_rmrr_unit *rmrr;
- bool copied_tables = false;
- struct device *dev;
- struct intel_iommu *iommu;
- int i, ret, cpu;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- pr_err("Allocating global iommu array failed\n");
- ret = -ENOMEM;
- goto error;
- }
- for_each_possible_cpu(cpu) {
- struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
- cpu);
- dfd->tables = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_table),
- GFP_KERNEL);
- if (!dfd->tables) {
- ret = -ENOMEM;
- goto free_g_iommus;
- }
- spin_lock_init(&dfd->lock);
- setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
- }
- for_each_active_iommu(iommu, drhd) {
- g_iommus[iommu->seq_id] = iommu;
- intel_iommu_init_qi(iommu);
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
- init_translation_status(iommu);
- if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
- iommu_disable_translation(iommu);
- clear_translation_pre_enabled(iommu);
- pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
- iommu->name);
- }
- /*
- * TBD:
- * we could share the same root & context tables
- * among all IOMMU's. Need to Split it later.
- */
- ret = iommu_alloc_root_entry(iommu);
- if (ret)
- goto free_iommu;
- if (translation_pre_enabled(iommu)) {
- pr_info("Translation already enabled - trying to copy translation structures\n");
- ret = copy_translation_tables(iommu);
- if (ret) {
- /*
- * We found the IOMMU with translation
- * enabled - but failed to copy over the
- * old root-entry table. Try to proceed
- * by disabling translation now and
- * allocating a clean root-entry table.
- * This might cause DMAR faults, but
- * probably the dump will still succeed.
- */
- pr_err("Failed to copy translation tables from previous kernel for %s\n",
- iommu->name);
- iommu_disable_translation(iommu);
- clear_translation_pre_enabled(iommu);
- } else {
- pr_info("Copied translation tables from previous kernel for %s\n",
- iommu->name);
- copied_tables = true;
- }
- }
- if (!ecap_pass_through(iommu->ecap))
- hw_pass_through = 0;
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
- #endif
- }
- /*
- * Now that qi is enabled on all iommus, set the root entry and flush
- * caches. This is required on some Intel X58 chipsets, otherwise the
- * flush_context function will loop forever and the boot hangs.
- */
- for_each_active_iommu(iommu, drhd) {
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- }
- if (iommu_pass_through)
- iommu_identity_mapping |= IDENTMAP_ALL;
- #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
- #endif
- check_tylersburg_isoch();
- if (iommu_identity_mapping) {
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
- }
- /*
- * If we copied translations from a previous kernel in the kdump
- * case, we can not assign the devices to domains now, as that
- * would eliminate the old mappings. So skip this part and defer
- * the assignment to device driver initialization time.
- */
- if (copied_tables)
- goto domains_done;
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- pr_crit("Failed to setup IOMMU pass-through\n");
- goto free_iommu;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- pr_info("Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- /* some BIOS lists non-exist devices in DMAR table. */
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- ret = iommu_prepare_rmrr_dev(rmrr, dev);
- if (ret)
- pr_err("Mapping reserved region failed\n");
- }
- }
- iommu_prepare_isa();
- domains_done:
- /*
- * for each drhd
- * enable fault log
- * global invalidate context cache
- * global invalidate iotlb
- * enable translation
- */
- for_each_iommu(iommu, drhd) {
- if (drhd->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on
- * this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- continue;
- }
- iommu_flush_write_buffer(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
- ret = intel_svm_enable_prq(iommu);
- if (ret)
- goto free_iommu;
- }
- #endif
- ret = dmar_set_interrupt(iommu);
- if (ret)
- goto free_iommu;
- if (!translation_pre_enabled(iommu))
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- }
- return 0;
- free_iommu:
- for_each_active_iommu(iommu, drhd) {
- disable_dmar_iommu(iommu);
- free_dmar_iommu(iommu);
- }
- free_g_iommus:
- for_each_possible_cpu(cpu)
- kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
- kfree(g_iommus);
- error:
- return ret;
- }
- /* This takes a number of _MM_ pages, not VTD pages */
- static unsigned long intel_alloc_iova(struct device *dev,
- struct dmar_domain *domain,
- unsigned long nrpages, uint64_t dma_mask)
- {
- unsigned long iova_pfn = 0;
- /* Restrict dma_mask to the width that the iommu can handle */
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
- /* Ensure we reserve the whole size-aligned region */
- nrpages = __roundup_pow_of_two(nrpages);
- if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
- /*
- * First try to allocate an io virtual address in
- * DMA_BIT_MASK(32) and if that fails then try allocating
- * from higher range
- */
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)));
- if (iova_pfn)
- return iova_pfn;
- }
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
- if (unlikely(!iova_pfn)) {
- pr_err("Allocating %ld-page iova for %s failed",
- nrpages, dev_name(dev));
- return 0;
- }
- return iova_pfn;
- }
- static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
- {
- struct dmar_domain *domain, *tmp;
- struct dmar_rmrr_unit *rmrr;
- struct device *i_dev;
- int i, ret;
- domain = find_domain(dev);
- if (domain)
- goto out;
- domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- goto out;
- /* We have a new domain - setup possible RMRRs for the device */
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, i_dev) {
- if (i_dev != dev)
- continue;
- ret = domain_prepare_identity_map(dev, domain,
- rmrr->base_address,
- rmrr->end_address);
- if (ret)
- dev_err(dev, "Mapping reserved region failed\n");
- }
- }
- rcu_read_unlock();
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
- out:
- if (!domain)
- pr_err("Allocating domain for %s failed\n", dev_name(dev));
- return domain;
- }
- static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
- {
- struct device_domain_info *info;
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
- if (likely(info))
- return info->domain;
- return __get_valid_domain_for_dev(dev);
- }
- /* Check if the dev needs to go through non-identity map and unmap process.*/
- static int iommu_no_mapping(struct device *dev)
- {
- int found;
- if (iommu_dummy(dev))
- return 1;
- if (!iommu_identity_mapping)
- return 0;
- found = identity_mapping(dev);
- if (found) {
- if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(si_domain, dev);
- pr_info("32bit %s uses non-identity mapping\n",
- dev_name(dev));
- return 0;
- }
- } else {
- /*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
- */
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- pr_info("64bit %s uses identity mapping\n",
- dev_name(dev));
- return 1;
- }
- }
- }
- return 0;
- }
- static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir, u64 dma_mask)
- {
- struct dmar_domain *domain;
- phys_addr_t start_paddr;
- unsigned long iova_pfn;
- int prot = 0;
- int ret;
- struct intel_iommu *iommu;
- unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
- BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
- domain = get_valid_domain_for_dev(dev);
- if (!domain)
- return 0;
- iommu = domain_get_iommu(domain);
- size = aligned_nrpages(paddr, size);
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova_pfn)
- goto error;
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- /*
- * paddr - (paddr + size) might be partial page, we should map the whole
- * page. Note: if two part of one page are separately mapped, we
- * might have two guest_addr mapping to the same host paddr, but this
- * is not a big problem
- */
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
- mm_to_dma_pfn(paddr_pfn), size, prot);
- if (ret)
- goto error;
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
- start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
- start_paddr += paddr & ~PAGE_MASK;
- return start_paddr;
- error:
- if (iova_pfn)
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
- dev_name(dev), size, (unsigned long long)paddr, dir);
- return 0;
- }
- static dma_addr_t intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
- {
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
- }
- static void flush_unmaps(struct deferred_flush_data *flush_data)
- {
- int i, j;
- flush_data->timer_on = 0;
- /* just flush them all */
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct deferred_flush_table *flush_table =
- &flush_data->tables[i];
- if (!iommu)
- continue;
- if (!flush_table->next)
- continue;
- /* In caching mode, global flushes turn emulation expensive */
- if (!cap_caching_mode(iommu->cap))
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < flush_table->next; j++) {
- unsigned long mask;
- struct deferred_flush_entry *entry =
- &flush_table->entries[j];
- unsigned long iova_pfn = entry->iova_pfn;
- unsigned long nrpages = entry->nrpages;
- struct dmar_domain *domain = entry->domain;
- struct page *freelist = entry->freelist;
- /* On real hardware multiple invalidations are expensive */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- nrpages, !freelist, 0);
- else {
- mask = ilog2(nrpages);
- iommu_flush_dev_iotlb(domain,
- (uint64_t)iova_pfn << PAGE_SHIFT, mask);
- }
- free_iova_fast(&domain->iovad, iova_pfn, nrpages);
- if (freelist)
- dma_free_pagelist(freelist);
- }
- flush_table->next = 0;
- }
- flush_data->size = 0;
- }
- static void flush_unmaps_timeout(unsigned long cpuid)
- {
- struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
- unsigned long flags;
- spin_lock_irqsave(&flush_data->lock, flags);
- flush_unmaps(flush_data);
- spin_unlock_irqrestore(&flush_data->lock, flags);
- }
- static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
- unsigned long nrpages, struct page *freelist)
- {
- unsigned long flags;
- int entry_id, iommu_id;
- struct intel_iommu *iommu;
- struct deferred_flush_entry *entry;
- struct deferred_flush_data *flush_data;
- unsigned int cpuid;
- cpuid = get_cpu();
- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
- /* Flush all CPUs' entries to avoid deferring too much. If
- * this becomes a bottleneck, can just flush us, and rely on
- * flush timer for the rest.
- */
- if (flush_data->size == HIGH_WATER_MARK) {
- int cpu;
- for_each_online_cpu(cpu)
- flush_unmaps_timeout(cpu);
- }
- spin_lock_irqsave(&flush_data->lock, flags);
- iommu = domain_get_iommu(dom);
- iommu_id = iommu->seq_id;
- entry_id = flush_data->tables[iommu_id].next;
- ++(flush_data->tables[iommu_id].next);
- entry = &flush_data->tables[iommu_id].entries[entry_id];
- entry->domain = dom;
- entry->iova_pfn = iova_pfn;
- entry->nrpages = nrpages;
- entry->freelist = freelist;
- if (!flush_data->timer_on) {
- mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
- flush_data->timer_on = 1;
- }
- flush_data->size++;
- spin_unlock_irqrestore(&flush_data->lock, flags);
- put_cpu();
- }
- static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
- {
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- unsigned long nrpages;
- unsigned long iova_pfn;
- struct intel_iommu *iommu;
- struct page *freelist;
- if (iommu_no_mapping(dev))
- return;
- domain = find_domain(dev);
- BUG_ON(!domain);
- iommu = domain_get_iommu(domain);
- iova_pfn = IOVA_PFN(dev_addr);
- nrpages = aligned_nrpages(dev_addr, size);
- start_pfn = mm_to_dma_pfn(iova_pfn);
- last_pfn = start_pfn + nrpages - 1;
- pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- dev_name(dev), start_pfn, last_pfn);
- freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- nrpages, !freelist, 0);
- /* free iova */
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
- dma_free_pagelist(freelist);
- } else {
- add_unmap(domain, iova_pfn, nrpages, freelist);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
- }
- static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
- {
- intel_unmap(dev, dev_addr, size);
- }
- static void *intel_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
- {
- struct page *page = NULL;
- int order;
- size = PAGE_ALIGN(size);
- order = get_order(size);
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
- if (gfpflags_allow_blocking(flags)) {
- unsigned int count = size >> PAGE_SHIFT;
- page = dma_alloc_from_contiguous(dev, count, order);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
- if (!page)
- page = alloc_pages(flags, order);
- if (!page)
- return NULL;
- memset(page_address(page), 0, size);
- *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
- DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (*dma_handle)
- return page_address(page);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
- return NULL;
- }
- static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
- {
- int order;
- struct page *page = virt_to_page(vaddr);
- size = PAGE_ALIGN(size);
- order = get_order(size);
- intel_unmap(dev, dma_handle, size);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
- }
- static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
- {
- dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
- unsigned long nrpages = 0;
- struct scatterlist *sg;
- int i;
- for_each_sg(sglist, sg, nelems, i) {
- nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
- }
- intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
- }
- static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
- {
- int i;
- struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
- }
- static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
- {
- int i;
- struct dmar_domain *domain;
- size_t size = 0;
- int prot = 0;
- unsigned long iova_pfn;
- int ret;
- struct scatterlist *sg;
- unsigned long start_vpfn;
- struct intel_iommu *iommu;
- BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
- domain = get_valid_domain_for_dev(dev);
- if (!domain)
- return 0;
- iommu = domain_get_iommu(domain);
- for_each_sg(sglist, sg, nelems, i)
- size += aligned_nrpages(sg->offset, sg->length);
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
- *dev->dma_mask);
- if (!iova_pfn) {
- sglist->dma_length = 0;
- return 0;
- }
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- start_vpfn = mm_to_dma_pfn(iova_pfn);
- ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
- if (unlikely(ret)) {
- dma_pte_free_pagetable(domain, start_vpfn,
- start_vpfn + size - 1);
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- return 0;
- }
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
- return nelems;
- }
- static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
- {
- return !dma_addr;
- }
- struct dma_map_ops intel_dma_ops = {
- .alloc = intel_alloc_coherent,
- .free = intel_free_coherent,
- .map_sg = intel_map_sg,
- .unmap_sg = intel_unmap_sg,
- .map_page = intel_map_page,
- .unmap_page = intel_unmap_page,
- .mapping_error = intel_mapping_error,
- };
- static inline int iommu_domain_cache_init(void)
- {
- int ret = 0;
- iommu_domain_cache = kmem_cache_create("iommu_domain",
- sizeof(struct dmar_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_domain_cache) {
- pr_err("Couldn't create iommu_domain cache\n");
- ret = -ENOMEM;
- }
- return ret;
- }
- static inline int iommu_devinfo_cache_init(void)
- {
- int ret = 0;
- iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_devinfo_cache) {
- pr_err("Couldn't create devinfo cache\n");
- ret = -ENOMEM;
- }
- return ret;
- }
- static int __init iommu_init_mempool(void)
- {
- int ret;
- ret = iova_cache_get();
- if (ret)
- return ret;
- ret = iommu_domain_cache_init();
- if (ret)
- goto domain_error;
- ret = iommu_devinfo_cache_init();
- if (!ret)
- return ret;
- kmem_cache_destroy(iommu_domain_cache);
- domain_error:
- iova_cache_put();
- return -ENOMEM;
- }
- static void __init iommu_exit_mempool(void)
- {
- kmem_cache_destroy(iommu_devinfo_cache);
- kmem_cache_destroy(iommu_domain_cache);
- iova_cache_put();
- }
- static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
- {
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
- TAINT_FIRMWARE_WORKAROUND,
- "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
- pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
- DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
- static void __init init_no_remapping_devices(void)
- {
- struct dmar_drhd_unit *drhd;
- struct device *dev;
- int i;
- for_each_drhd_unit(drhd) {
- if (!drhd->include_all) {
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- break;
- /* ignore DMAR unit if no devices exist */
- if (i == drhd->devices_cnt)
- drhd->ignored = 1;
- }
- }
- for_each_active_drhd_unit(drhd) {
- if (drhd->include_all)
- continue;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
- break;
- if (i < drhd->devices_cnt)
- continue;
- /* This IOMMU has *only* gfx devices. Either bypass it or
- set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
- drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
- }
- }
- #ifdef CONFIG_SUSPEND
- static int init_iommu_hw(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- for_each_active_iommu(iommu, drhd)
- if (iommu->qi)
- dmar_reenable_qi(iommu);
- for_each_iommu(iommu, drhd) {
- if (drhd->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on
- * this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- continue;
- }
-
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- }
- return 0;
- }
- static void iommu_flush_all(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- for_each_active_iommu(iommu, drhd) {
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- }
- }
- static int iommu_suspend(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
- for_each_active_iommu(iommu, drhd) {
- iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
- GFP_ATOMIC);
- if (!iommu->iommu_state)
- goto nomem;
- }
- iommu_flush_all();
- for_each_active_iommu(iommu, drhd) {
- iommu_disable_translation(iommu);
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->iommu_state[SR_DMAR_FECTL_REG] =
- readl(iommu->reg + DMAR_FECTL_REG);
- iommu->iommu_state[SR_DMAR_FEDATA_REG] =
- readl(iommu->reg + DMAR_FEDATA_REG);
- iommu->iommu_state[SR_DMAR_FEADDR_REG] =
- readl(iommu->reg + DMAR_FEADDR_REG);
- iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
- readl(iommu->reg + DMAR_FEUADDR_REG);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- return 0;
- nomem:
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
- return -ENOMEM;
- }
- static void iommu_resume(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
- if (init_iommu_hw()) {
- if (force_on)
- panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
- else
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
- return;
- }
- for_each_active_iommu(iommu, drhd) {
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
- iommu->reg + DMAR_FECTL_REG);
- writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
- iommu->reg + DMAR_FEDATA_REG);
- writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
- iommu->reg + DMAR_FEADDR_REG);
- writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
- iommu->reg + DMAR_FEUADDR_REG);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
- }
- static struct syscore_ops iommu_syscore_ops = {
- .resume = iommu_resume,
- .suspend = iommu_suspend,
- };
- static void __init init_iommu_pm_ops(void)
- {
- register_syscore_ops(&iommu_syscore_ops);
- }
- #else
- static inline void init_iommu_pm_ops(void) {}
- #endif /* CONFIG_PM */
- int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
- {
- struct acpi_dmar_reserved_memory *rmrr;
- struct dmar_rmrr_unit *rmrru;
- rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
- if (!rmrru)
- return -ENOMEM;
- rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
- rmrru->base_address = rmrr->base_address;
- rmrru->end_address = rmrr->end_address;
- rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt);
- if (rmrru->devices_cnt && rmrru->devices == NULL) {
- kfree(rmrru);
- return -ENOMEM;
- }
- list_add(&rmrru->list, &dmar_rmrr_units);
- return 0;
- }
- static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
- {
- struct dmar_atsr_unit *atsru;
- struct acpi_dmar_atsr *tmp;
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
- tmp = (struct acpi_dmar_atsr *)atsru->hdr;
- if (atsr->segment != tmp->segment)
- continue;
- if (atsr->header.length != tmp->header.length)
- continue;
- if (memcmp(atsr, tmp, atsr->header.length) == 0)
- return atsru;
- }
- return NULL;
- }
- int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
- return 0;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (atsru)
- return 0;
- atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
- if (!atsru)
- return -ENOMEM;
- /*
- * If memory is allocated from slab by ACPI _DSM method, we need to
- * copy the memory content because the memory buffer will be freed
- * on return.
- */
- atsru->hdr = (void *)(atsru + 1);
- memcpy(atsru->hdr, hdr, hdr->length);
- atsru->include_all = atsr->flags & 0x1;
- if (!atsru->include_all) {
- atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt);
- if (atsru->devices_cnt && atsru->devices == NULL) {
- kfree(atsru);
- return -ENOMEM;
- }
- }
- list_add_rcu(&atsru->list, &dmar_atsr_units);
- return 0;
- }
- static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
- {
- dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
- kfree(atsru);
- }
- int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (atsru) {
- list_del_rcu(&atsru->list);
- synchronize_rcu();
- intel_iommu_free_atsr(atsru);
- }
- return 0;
- }
- int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- int i;
- struct device *dev;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (!atsru)
- return 0;
- if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
- for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
- i, dev)
- return -EBUSY;
- }
- return 0;
- }
- static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
- {
- int sp, ret = 0;
- struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
- if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
- pr_warn("%s: Doesn't support hardware pass through.\n",
- iommu->name);
- return -ENXIO;
- }
- if (!ecap_sc_support(iommu->ecap) &&
- domain_update_iommu_snooping(iommu)) {
- pr_warn("%s: Doesn't support snooping.\n",
- iommu->name);
- return -ENXIO;
- }
- sp = domain_update_iommu_superpage(iommu) - 1;
- if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
- pr_warn("%s: Doesn't support large page.\n",
- iommu->name);
- return -ENXIO;
- }
- /*
- * Disable translation if already enabled prior to OS handover.
- */
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
- if (ret)
- goto out;
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
- #endif
- if (dmaru->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- return 0;
- }
- intel_iommu_init_qi(iommu);
- iommu_flush_write_buffer(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
- ret = intel_svm_enable_prq(iommu);
- if (ret)
- goto disable_iommu;
- }
- #endif
- ret = dmar_set_interrupt(iommu);
- if (ret)
- goto disable_iommu;
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- return 0;
- disable_iommu:
- disable_dmar_iommu(iommu);
- out:
- free_dmar_iommu(iommu);
- return ret;
- }
- int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
- {
- int ret = 0;
- struct intel_iommu *iommu = dmaru->iommu;
- if (!intel_iommu_enabled)
- return 0;
- if (iommu == NULL)
- return -EINVAL;
- if (insert) {
- ret = intel_iommu_add(dmaru);
- } else {
- disable_dmar_iommu(iommu);
- free_dmar_iommu(iommu);
- }
- return ret;
- }
- static void intel_iommu_free_dmars(void)
- {
- struct dmar_rmrr_unit *rmrru, *rmrr_n;
- struct dmar_atsr_unit *atsru, *atsr_n;
- list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
- list_del(&rmrru->list);
- dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
- kfree(rmrru);
- }
- list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
- list_del(&atsru->list);
- intel_iommu_free_atsr(atsru);
- }
- }
- int dmar_find_matched_atsr_unit(struct pci_dev *dev)
- {
- int i, ret = 1;
- struct pci_bus *bus;
- struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- dev = pci_physfn(dev);
- for (bus = dev->bus; bus; bus = bus->parent) {
- bridge = bus->self;
- /* If it's an integrated device, allow ATS */
- if (!bridge)
- return 1;
- /* Connected via non-PCIe: no ATS */
- if (!pci_is_pcie(bridge) ||
- pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
- /* If we found the root port, look it up in the ATSR */
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
- break;
- }
- rcu_read_lock();
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (atsr->segment != pci_domain_nr(dev->bus))
- continue;
- for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
- if (tmp == &bridge->dev)
- goto out;
- if (atsru->include_all)
- goto out;
- }
- ret = 0;
- out:
- rcu_read_unlock();
- return ret;
- }
- int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
- {
- int ret = 0;
- struct dmar_rmrr_unit *rmrru;
- struct dmar_atsr_unit *atsru;
- struct acpi_dmar_atsr *atsr;
- struct acpi_dmar_reserved_memory *rmrr;
- if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
- return 0;
- list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
- rmrr = container_of(rmrru->hdr,
- struct acpi_dmar_reserved_memory, header);
- if (info->event == BUS_NOTIFY_ADD_DEVICE) {
- ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- rmrr->segment, rmrru->devices,
- rmrru->devices_cnt);
- if(ret < 0)
- return ret;
- } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
- dmar_remove_dev_scope(info, rmrr->segment,
- rmrru->devices, rmrru->devices_cnt);
- }
- }
- list_for_each_entry(atsru, &dmar_atsr_units, list) {
- if (atsru->include_all)
- continue;
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (info->event == BUS_NOTIFY_ADD_DEVICE) {
- ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- atsr->segment, atsru->devices,
- atsru->devices_cnt);
- if (ret > 0)
- break;
- else if(ret < 0)
- return ret;
- } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
- if (dmar_remove_dev_scope(info, atsr->segment,
- atsru->devices, atsru->devices_cnt))
- break;
- }
- }
- return 0;
- }
- /*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
- static int device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
- {
- struct device *dev = data;
- struct dmar_domain *domain;
- if (iommu_dummy(dev))
- return 0;
- if (action != BUS_NOTIFY_REMOVED_DEVICE)
- return 0;
- domain = find_domain(dev);
- if (!domain)
- return 0;
- dmar_remove_one_dev_info(domain, dev);
- if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
- domain_exit(domain);
- return 0;
- }
- static struct notifier_block device_nb = {
- .notifier_call = device_notifier,
- };
- static int intel_iommu_memory_notifier(struct notifier_block *nb,
- unsigned long val, void *v)
- {
- struct memory_notify *mhp = v;
- unsigned long long start, end;
- unsigned long start_vpfn, last_vpfn;
- switch (val) {
- case MEM_GOING_ONLINE:
- start = mhp->start_pfn << PAGE_SHIFT;
- end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
- if (iommu_domain_identity_map(si_domain, start, end)) {
- pr_warn("Failed to build identity map for [%llx-%llx]\n",
- start, end);
- return NOTIFY_BAD;
- }
- break;
- case MEM_OFFLINE:
- case MEM_CANCEL_ONLINE:
- start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
- while (start_vpfn <= last_vpfn) {
- struct iova *iova;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- struct page *freelist;
- iova = find_iova(&si_domain->iovad, start_vpfn);
- if (iova == NULL) {
- pr_debug("Failed get IOVA for PFN %lx\n",
- start_vpfn);
- break;
- }
- iova = split_and_remove_iova(&si_domain->iovad, iova,
- start_vpfn, last_vpfn);
- if (iova == NULL) {
- pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
- start_vpfn, last_vpfn);
- return NOTIFY_BAD;
- }
- freelist = domain_unmap(si_domain, iova->pfn_lo,
- iova->pfn_hi);
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- iommu_flush_iotlb_psi(iommu, si_domain,
- iova->pfn_lo, iova_size(iova),
- !freelist, 0);
- rcu_read_unlock();
- dma_free_pagelist(freelist);
- start_vpfn = iova->pfn_hi + 1;
- free_iova_mem(iova);
- }
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block intel_iommu_memory_nb = {
- .notifier_call = intel_iommu_memory_notifier,
- .priority = 0
- };
- static void free_all_cpu_cached_iovas(unsigned int cpu)
- {
- int i;
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct dmar_domain *domain;
- int did;
- if (!iommu)
- continue;
- for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, (u16)did);
- if (!domain)
- continue;
- free_cpu_cached_iovas(cpu, &domain->iovad);
- }
- }
- }
- static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *v)
- {
- unsigned int cpu = (unsigned long)v;
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_all_cpu_cached_iovas(cpu);
- flush_unmaps_timeout(cpu);
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block intel_iommu_cpu_nb = {
- .notifier_call = intel_iommu_cpu_notifier,
- };
- static ssize_t intel_iommu_show_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- u32 ver = readl(iommu->reg + DMAR_VER_REG);
- return sprintf(buf, "%d:%d\n",
- DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
- }
- static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
- static ssize_t intel_iommu_show_address(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->reg_phys);
- }
- static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
- static ssize_t intel_iommu_show_cap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->cap);
- }
- static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
- static ssize_t intel_iommu_show_ecap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->ecap);
- }
- static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
- static ssize_t intel_iommu_show_ndoms(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
- }
- static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
- static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
- }
- static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
- static struct attribute *intel_iommu_attrs[] = {
- &dev_attr_version.attr,
- &dev_attr_address.attr,
- &dev_attr_cap.attr,
- &dev_attr_ecap.attr,
- &dev_attr_domains_supported.attr,
- &dev_attr_domains_used.attr,
- NULL,
- };
- static struct attribute_group intel_iommu_group = {
- .name = "intel-iommu",
- .attrs = intel_iommu_attrs,
- };
- const struct attribute_group *intel_iommu_groups[] = {
- &intel_iommu_group,
- NULL,
- };
- int __init intel_iommu_init(void)
- {
- int ret = -ENODEV;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- /* VT-d is required for a TXT/tboot launch, so enforce that */
- force_on = tboot_force_iommu();
- if (iommu_init_mempool()) {
- if (force_on)
- panic("tboot: Failed to initialize iommu memory\n");
- return -ENOMEM;
- }
- down_write(&dmar_global_lock);
- if (dmar_table_init()) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR table\n");
- goto out_free_dmar;
- }
- if (dmar_dev_scope_init() < 0) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR device scope\n");
- goto out_free_dmar;
- }
- if (no_iommu || dmar_disabled)
- goto out_free_dmar;
- if (list_empty(&dmar_rmrr_units))
- pr_info("No RMRR found\n");
- if (list_empty(&dmar_atsr_units))
- pr_info("No ATSR found\n");
- if (dmar_init_reserved_ranges()) {
- if (force_on)
- panic("tboot: Failed to reserve iommu ranges\n");
- goto out_free_reserved_range;
- }
- init_no_remapping_devices();
- ret = init_dmars();
- if (ret) {
- if (force_on)
- panic("tboot: Failed to initialize DMARs\n");
- pr_err("Initialization failed\n");
- goto out_free_reserved_range;
- }
- up_write(&dmar_global_lock);
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
- #ifdef CONFIG_SWIOTLB
- swiotlb = 0;
- #endif
- dma_ops = &intel_dma_ops;
- init_iommu_pm_ops();
- for_each_active_iommu(iommu, drhd)
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
- bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
- bus_register_notifier(&pci_bus_type, &device_nb);
- if (si_domain && !hw_pass_through)
- register_memory_notifier(&intel_iommu_memory_nb);
- register_hotcpu_notifier(&intel_iommu_cpu_nb);
- intel_iommu_enabled = 1;
- return 0;
- out_free_reserved_range:
- put_iova_domain(&reserved_iova_list);
- out_free_dmar:
- intel_iommu_free_dmars();
- up_write(&dmar_global_lock);
- iommu_exit_mempool();
- return ret;
- }
- static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
- {
- struct intel_iommu *iommu = opaque;
- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
- return 0;
- }
- /*
- * NB - intel-iommu lacks any sort of reference counting for the users of
- * dependent devices. If multiple endpoints have intersecting dependent
- * devices, unbinding the driver from any one of them will possibly leave
- * the others unable to operate.
- */
- static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
- {
- if (!iommu || !dev || !dev_is_pci(dev))
- return;
- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
- }
- static void __dmar_remove_one_dev_info(struct device_domain_info *info)
- {
- struct intel_iommu *iommu;
- unsigned long flags;
- assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
- iommu = info->iommu;
- if (info->dev) {
- iommu_disable_dev_iotlb(info);
- domain_context_clear(iommu, info->dev);
- }
- unlink_domain_info(info);
- spin_lock_irqsave(&iommu->lock, flags);
- domain_detach_iommu(info->domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- free_devinfo_mem(info);
- }
- static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
- {
- struct device_domain_info *info;
- unsigned long flags;
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- static int md_domain_init(struct dmar_domain *domain, int guest_width)
- {
- int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- domain_reserve_special_ranges(domain);
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
- }
- static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
- {
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
- if (!dmar_domain) {
- pr_err("Can't allocate dmar_domain\n");
- return NULL;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- pr_err("Domain initialization failed\n");
- domain_exit(dmar_domain);
- return NULL;
- }
- domain_update_iommu_cap(dmar_domain);
- domain = &dmar_domain->domain;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- domain->geometry.force_aperture = true;
- return domain;
- }
- static void intel_iommu_domain_free(struct iommu_domain *domain)
- {
- domain_exit(to_dmar_domain(domain));
- }
- static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(old_domain, dev);
- rcu_read_unlock();
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
- }
- }
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- /* check if this iommu agaw is sufficient for max mapped address */
- addr_width = agaw_to_width(iommu->agaw);
- if (addr_width > cap_mgaw(iommu->cap))
- addr_width = cap_mgaw(iommu->cap);
- if (dmar_domain->max_addr > (1LL << addr_width)) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
- return -EFAULT;
- }
- dmar_domain->gaw = addr_width;
- /*
- * Knock out extra levels of page tables if necessary
- */
- while (iommu->agaw < dmar_domain->agaw) {
- struct dma_pte *pte;
- pte = dmar_domain->pgd;
- if (dma_pte_present(pte)) {
- dmar_domain->pgd = (struct dma_pte *)
- phys_to_virt(dma_pte_addr(pte));
- free_pgtable_page(pte);
- }
- dmar_domain->agaw--;
- }
- return domain_add_dev_info(dmar_domain, dev);
- }
- static void intel_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
- {
- dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
- }
- static int intel_iommu_map(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- u64 max_addr;
- int prot = 0;
- int ret;
- if (iommu_prot & IOMMU_READ)
- prot |= DMA_PTE_READ;
- if (iommu_prot & IOMMU_WRITE)
- prot |= DMA_PTE_WRITE;
- if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
- prot |= DMA_PTE_SNP;
- max_addr = iova + size;
- if (dmar_domain->max_addr < max_addr) {
- u64 end;
- /* check if minimum agaw is sufficient for mapped address */
- end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
- if (end < max_addr) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, dmar_domain->gaw, max_addr);
- return -EFAULT;
- }
- dmar_domain->max_addr = max_addr;
- }
- /* Round up size to next multiple of PAGE_SIZE, if it and
- the low bits of hpa would take us onto the next page */
- size = aligned_nrpages(hpa, size);
- ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
- }
- static size_t intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct page *freelist = NULL;
- struct intel_iommu *iommu;
- unsigned long start_pfn, last_pfn;
- unsigned int npages;
- int iommu_id, level = 0;
- /* Cope with horrid API which requires us to unmap more than the
- size argument if it happens to be a large-page mapping. */
- BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
- if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
- size = VTD_PAGE_SIZE << level_to_offset_bits(level);
- start_pfn = iova >> VTD_PAGE_SHIFT;
- last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
- freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
- npages = last_pfn - start_pfn + 1;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
- }
- dma_free_pagelist(freelist);
- if (dmar_domain->max_addr == iova + size)
- dmar_domain->max_addr = iova;
- return size;
- }
- static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct dma_pte *pte;
- int level = 0;
- u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
- if (pte)
- phys = dma_pte_addr(pte);
- return phys;
- }
- static bool intel_iommu_capable(enum iommu_cap cap)
- {
- if (cap == IOMMU_CAP_CACHE_COHERENCY)
- return domain_update_iommu_snooping(NULL) == 1;
- if (cap == IOMMU_CAP_INTR_REMAP)
- return irq_remapping_enabled == 1;
- return false;
- }
- static int intel_iommu_add_device(struct device *dev)
- {
- struct intel_iommu *iommu;
- struct iommu_group *group;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- iommu_device_link(iommu->iommu_dev, dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
- iommu_group_put(group);
- return 0;
- }
- static void intel_iommu_remove_device(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return;
- iommu_group_remove_device(dev);
- iommu_device_unlink(iommu->iommu_dev, dev);
- }
- #ifdef CONFIG_INTEL_IOMMU_SVM
- #define MAX_NR_PASID_BITS (20)
- static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
- {
- /*
- * Convert ecap_pss to extend context entry pts encoding, also
- * respect the soft pasid_max value set by the iommu.
- * - number of PASID bits = ecap_pss + 1
- * - number of PASID table entries = 2^(pts + 5)
- * Therefore, pts = ecap_pss - 4
- * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
- */
- if (ecap_pss(iommu->ecap) < 5)
- return 0;
- /* pasid_max is encoded as actual number of entries not the bits */
- return find_first_bit((unsigned long *)&iommu->pasid_max,
- MAX_NR_PASID_BITS) - 5;
- }
- int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
- {
- struct device_domain_info *info;
- struct context_entry *context;
- struct dmar_domain *domain;
- unsigned long flags;
- u64 ctx_lo;
- int ret;
- domain = get_valid_domain_for_dev(sdev->dev);
- if (!domain)
- return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
- spin_lock(&iommu->lock);
- ret = -EINVAL;
- info = sdev->dev->archdata.iommu;
- if (!info || !info->pasid_supported)
- goto out;
- context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
- if (WARN_ON(!context))
- goto out;
- ctx_lo = context[0].lo;
- sdev->did = domain->iommu_did[iommu->seq_id];
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
- if (!(ctx_lo & CONTEXT_PASIDE)) {
- context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
- context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
- intel_iommu_get_pts(iommu);
- wmb();
- /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
- * extended to permit requests-with-PASID if the PASIDE bit
- * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
- * however, the PASIDE bit is ignored and requests-with-PASID
- * are unconditionally blocked. Which makes less sense.
- * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
- * "guest mode" translation types depending on whether ATS
- * is available or not. Annoyingly, we can't use the new
- * modes *unless* PASIDE is set. */
- if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
- ctx_lo &= ~CONTEXT_TT_MASK;
- if (info->ats_supported)
- ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
- else
- ctx_lo |= CONTEXT_TT_PT_PASID << 2;
- }
- ctx_lo |= CONTEXT_PASIDE;
- if (iommu->pasid_state_table)
- ctx_lo |= CONTEXT_DINVE;
- if (info->pri_supported)
- ctx_lo |= CONTEXT_PRS;
- context[0].lo = ctx_lo;
- wmb();
- iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- }
- /* Enable PASID support in the device, if it wasn't already */
- if (!info->pasid_enabled)
- iommu_enable_dev_iotlb(info);
- if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
- ret = 0;
- out:
- spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return ret;
- }
- struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- if (iommu_dummy(dev)) {
- dev_warn(dev,
- "No IOMMU translation for device; cannot enable SVM\n");
- return NULL;
- }
- iommu = device_to_iommu(dev, &bus, &devfn);
- if ((!iommu)) {
- dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
- return NULL;
- }
- if (!iommu->pasid_table) {
- dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
- return NULL;
- }
- return iommu;
- }
- #endif /* CONFIG_INTEL_IOMMU_SVM */
- static const struct iommu_ops intel_iommu_ops = {
- .capable = intel_iommu_capable,
- .domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
- .device_group = pci_device_group,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
- };
- static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
- {
- /* G4x/GM45 integrated gfx dmar support is totally busted. */
- pr_info("Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
- static void quirk_iommu_rwbf(struct pci_dev *dev)
- {
- /*
- * Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it. Same seems to hold for the desktop versions.
- */
- pr_info("Forcing write-buffer flush capability\n");
- rwbf_quirk = 1;
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
- #define GGC 0x52
- #define GGC_MEMORY_SIZE_MASK (0xf << 8)
- #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
- #define GGC_MEMORY_SIZE_1M (0x1 << 8)
- #define GGC_MEMORY_SIZE_2M (0x3 << 8)
- #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
- #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
- #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
- #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
- static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
- {
- unsigned short ggc;
- if (pci_read_config_word(dev, GGC, &ggc))
- return;
- if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
- pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- } else if (dmar_map_gfx) {
- /* we have to ensure the gfx device is idle before we flush */
- pr_info("Disabling batched IOTLB flush on Ironlake\n");
- intel_iommu_strict = 1;
- }
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
- /* On Tylersburg chipsets, some BIOSes have been known to enable the
- ISOCH DMAR unit for the Azalia sound device, but not give it any
- TLB entries, which causes it to deadlock. Check for that. We do
- this in a function called from init_dmars(), instead of in a PCI
- quirk, because we don't want to print the obnoxious "BIOS broken"
- message if VT-d is actually disabled.
- */
- static void __init check_tylersburg_isoch(void)
- {
- struct pci_dev *pdev;
- uint32_t vtisochctrl;
- /* If there's no Azalia in the system anyway, forget it. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
- if (!pdev)
- return;
- pci_dev_put(pdev);
- /* System Management Registers. Might be hidden, in which case
- we can't do the sanity check. But that's OK, because the
- known-broken BIOSes _don't_ actually hide it, so far. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
- if (!pdev)
- return;
- if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
- pci_dev_put(pdev);
- return;
- }
- pci_dev_put(pdev);
- /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
- if (vtisochctrl & 1)
- return;
- /* Drop all bits other than the number of TLB entries */
- vtisochctrl &= 0x1c;
- /* If we have the recommended number of TLB entries (16), fine. */
- if (vtisochctrl == 0x10)
- return;
- /* Zero TLB entries? You get to ride the short bus to school. */
- if (!vtisochctrl) {
- WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- iommu_identity_mapping |= IDENTMAP_AZALIA;
- return;
- }
- pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
- vtisochctrl);
- }
|