vgic-its.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595
  1. /*
  2. * GICv3 ITS emulation
  3. *
  4. * Copyright (C) 2015,2016 ARM Ltd.
  5. * Author: Andre Przywara <andre.przywara@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/kvm.h>
  21. #include <linux/kvm_host.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/list.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/list_sort.h>
  26. #include <linux/irqchip/arm-gic-v3.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_arm.h>
  29. #include <asm/kvm_mmu.h>
  30. #include "vgic.h"
  31. #include "vgic-mmio.h"
  32. static int vgic_its_save_tables_v0(struct vgic_its *its);
  33. static int vgic_its_restore_tables_v0(struct vgic_its *its);
  34. static int vgic_its_commit_v0(struct vgic_its *its);
  35. static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  36. struct kvm_vcpu *filter_vcpu, bool needs_inv);
  37. /*
  38. * Creates a new (reference to a) struct vgic_irq for a given LPI.
  39. * If this LPI is already mapped on another ITS, we increase its refcount
  40. * and return a pointer to the existing structure.
  41. * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  42. * This function returns a pointer to the _unlocked_ structure.
  43. */
  44. static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
  45. struct kvm_vcpu *vcpu)
  46. {
  47. struct vgic_dist *dist = &kvm->arch.vgic;
  48. struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  49. unsigned long flags;
  50. int ret;
  51. /* In this case there is no put, since we keep the reference. */
  52. if (irq)
  53. return irq;
  54. irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  55. if (!irq)
  56. return ERR_PTR(-ENOMEM);
  57. INIT_LIST_HEAD(&irq->lpi_list);
  58. INIT_LIST_HEAD(&irq->ap_list);
  59. spin_lock_init(&irq->irq_lock);
  60. irq->config = VGIC_CONFIG_EDGE;
  61. kref_init(&irq->refcount);
  62. irq->intid = intid;
  63. irq->target_vcpu = vcpu;
  64. irq->group = 1;
  65. raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
  66. /*
  67. * There could be a race with another vgic_add_lpi(), so we need to
  68. * check that we don't add a second list entry with the same LPI.
  69. */
  70. list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  71. if (oldirq->intid != intid)
  72. continue;
  73. /* Someone was faster with adding this LPI, lets use that. */
  74. kfree(irq);
  75. irq = oldirq;
  76. /*
  77. * This increases the refcount, the caller is expected to
  78. * call vgic_put_irq() on the returned pointer once it's
  79. * finished with the IRQ.
  80. */
  81. vgic_get_irq_kref(irq);
  82. goto out_unlock;
  83. }
  84. list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
  85. dist->lpi_list_count++;
  86. out_unlock:
  87. raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  88. /*
  89. * We "cache" the configuration table entries in our struct vgic_irq's.
  90. * However we only have those structs for mapped IRQs, so we read in
  91. * the respective config data from memory here upon mapping the LPI.
  92. */
  93. ret = update_lpi_config(kvm, irq, NULL, false);
  94. if (ret)
  95. return ERR_PTR(ret);
  96. ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
  97. if (ret)
  98. return ERR_PTR(ret);
  99. return irq;
  100. }
  101. struct its_device {
  102. struct list_head dev_list;
  103. /* the head for the list of ITTEs */
  104. struct list_head itt_head;
  105. u32 num_eventid_bits;
  106. gpa_t itt_addr;
  107. u32 device_id;
  108. };
  109. #define COLLECTION_NOT_MAPPED ((u32)~0)
  110. struct its_collection {
  111. struct list_head coll_list;
  112. u32 collection_id;
  113. u32 target_addr;
  114. };
  115. #define its_is_collection_mapped(coll) ((coll) && \
  116. ((coll)->target_addr != COLLECTION_NOT_MAPPED))
  117. struct its_ite {
  118. struct list_head ite_list;
  119. struct vgic_irq *irq;
  120. struct its_collection *collection;
  121. u32 event_id;
  122. };
  123. /**
  124. * struct vgic_its_abi - ITS abi ops and settings
  125. * @cte_esz: collection table entry size
  126. * @dte_esz: device table entry size
  127. * @ite_esz: interrupt translation table entry size
  128. * @save tables: save the ITS tables into guest RAM
  129. * @restore_tables: restore the ITS internal structs from tables
  130. * stored in guest RAM
  131. * @commit: initialize the registers which expose the ABI settings,
  132. * especially the entry sizes
  133. */
  134. struct vgic_its_abi {
  135. int cte_esz;
  136. int dte_esz;
  137. int ite_esz;
  138. int (*save_tables)(struct vgic_its *its);
  139. int (*restore_tables)(struct vgic_its *its);
  140. int (*commit)(struct vgic_its *its);
  141. };
  142. #define ABI_0_ESZ 8
  143. #define ESZ_MAX ABI_0_ESZ
  144. static const struct vgic_its_abi its_table_abi_versions[] = {
  145. [0] = {
  146. .cte_esz = ABI_0_ESZ,
  147. .dte_esz = ABI_0_ESZ,
  148. .ite_esz = ABI_0_ESZ,
  149. .save_tables = vgic_its_save_tables_v0,
  150. .restore_tables = vgic_its_restore_tables_v0,
  151. .commit = vgic_its_commit_v0,
  152. },
  153. };
  154. #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
  155. inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
  156. {
  157. return &its_table_abi_versions[its->abi_rev];
  158. }
  159. static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
  160. {
  161. const struct vgic_its_abi *abi;
  162. its->abi_rev = rev;
  163. abi = vgic_its_get_abi(its);
  164. return abi->commit(its);
  165. }
  166. /*
  167. * Find and returns a device in the device table for an ITS.
  168. * Must be called with the its_lock mutex held.
  169. */
  170. static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
  171. {
  172. struct its_device *device;
  173. list_for_each_entry(device, &its->device_list, dev_list)
  174. if (device_id == device->device_id)
  175. return device;
  176. return NULL;
  177. }
  178. /*
  179. * Find and returns an interrupt translation table entry (ITTE) for a given
  180. * Device ID/Event ID pair on an ITS.
  181. * Must be called with the its_lock mutex held.
  182. */
  183. static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
  184. u32 event_id)
  185. {
  186. struct its_device *device;
  187. struct its_ite *ite;
  188. device = find_its_device(its, device_id);
  189. if (device == NULL)
  190. return NULL;
  191. list_for_each_entry(ite, &device->itt_head, ite_list)
  192. if (ite->event_id == event_id)
  193. return ite;
  194. return NULL;
  195. }
  196. /* To be used as an iterator this macro misses the enclosing parentheses */
  197. #define for_each_lpi_its(dev, ite, its) \
  198. list_for_each_entry(dev, &(its)->device_list, dev_list) \
  199. list_for_each_entry(ite, &(dev)->itt_head, ite_list)
  200. /*
  201. * We only implement 48 bits of PA at the moment, although the ITS
  202. * supports more. Let's be restrictive here.
  203. */
  204. #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
  205. #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
  206. #define GIC_LPI_OFFSET 8192
  207. #define VITS_TYPER_IDBITS 16
  208. #define VITS_TYPER_DEVBITS 16
  209. #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
  210. #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
  211. /*
  212. * Finds and returns a collection in the ITS collection table.
  213. * Must be called with the its_lock mutex held.
  214. */
  215. static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
  216. {
  217. struct its_collection *collection;
  218. list_for_each_entry(collection, &its->collection_list, coll_list) {
  219. if (coll_id == collection->collection_id)
  220. return collection;
  221. }
  222. return NULL;
  223. }
  224. #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
  225. #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
  226. /*
  227. * Reads the configuration data for a given LPI from guest memory and
  228. * updates the fields in struct vgic_irq.
  229. * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
  230. * VCPU. Unconditionally applies if filter_vcpu is NULL.
  231. */
  232. static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  233. struct kvm_vcpu *filter_vcpu, bool needs_inv)
  234. {
  235. u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
  236. u8 prop;
  237. int ret;
  238. unsigned long flags;
  239. ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
  240. &prop, 1);
  241. if (ret)
  242. return ret;
  243. spin_lock_irqsave(&irq->irq_lock, flags);
  244. if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
  245. irq->priority = LPI_PROP_PRIORITY(prop);
  246. irq->enabled = LPI_PROP_ENABLE_BIT(prop);
  247. if (!irq->hw) {
  248. vgic_queue_irq_unlock(kvm, irq, flags);
  249. return 0;
  250. }
  251. }
  252. spin_unlock_irqrestore(&irq->irq_lock, flags);
  253. if (irq->hw)
  254. return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
  255. return 0;
  256. }
  257. /*
  258. * Create a snapshot of the current LPIs targeting @vcpu, so that we can
  259. * enumerate those LPIs without holding any lock.
  260. * Returns their number and puts the kmalloc'ed array into intid_ptr.
  261. */
  262. int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
  263. {
  264. struct vgic_dist *dist = &kvm->arch.vgic;
  265. struct vgic_irq *irq;
  266. unsigned long flags;
  267. u32 *intids;
  268. int irq_count, i = 0;
  269. /*
  270. * There is an obvious race between allocating the array and LPIs
  271. * being mapped/unmapped. If we ended up here as a result of a
  272. * command, we're safe (locks are held, preventing another
  273. * command). If coming from another path (such as enabling LPIs),
  274. * we must be careful not to overrun the array.
  275. */
  276. irq_count = READ_ONCE(dist->lpi_list_count);
  277. intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
  278. if (!intids)
  279. return -ENOMEM;
  280. raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
  281. list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  282. if (i == irq_count)
  283. break;
  284. /* We don't need to "get" the IRQ, as we hold the list lock. */
  285. if (vcpu && irq->target_vcpu != vcpu)
  286. continue;
  287. intids[i++] = irq->intid;
  288. }
  289. raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  290. *intid_ptr = intids;
  291. return i;
  292. }
  293. static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
  294. {
  295. int ret = 0;
  296. unsigned long flags;
  297. spin_lock_irqsave(&irq->irq_lock, flags);
  298. irq->target_vcpu = vcpu;
  299. spin_unlock_irqrestore(&irq->irq_lock, flags);
  300. if (irq->hw) {
  301. struct its_vlpi_map map;
  302. ret = its_get_vlpi(irq->host_irq, &map);
  303. if (ret)
  304. return ret;
  305. map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
  306. ret = its_map_vlpi(irq->host_irq, &map);
  307. }
  308. return ret;
  309. }
  310. /*
  311. * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
  312. * is targeting) to the VGIC's view, which deals with target VCPUs.
  313. * Needs to be called whenever either the collection for a LPIs has
  314. * changed or the collection itself got retargeted.
  315. */
  316. static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
  317. {
  318. struct kvm_vcpu *vcpu;
  319. if (!its_is_collection_mapped(ite->collection))
  320. return;
  321. vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
  322. update_affinity(ite->irq, vcpu);
  323. }
  324. /*
  325. * Updates the target VCPU for every LPI targeting this collection.
  326. * Must be called with the its_lock mutex held.
  327. */
  328. static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
  329. struct its_collection *coll)
  330. {
  331. struct its_device *device;
  332. struct its_ite *ite;
  333. for_each_lpi_its(device, ite, its) {
  334. if (!ite->collection || coll != ite->collection)
  335. continue;
  336. update_affinity_ite(kvm, ite);
  337. }
  338. }
  339. static u32 max_lpis_propbaser(u64 propbaser)
  340. {
  341. int nr_idbits = (propbaser & 0x1f) + 1;
  342. return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
  343. }
  344. /*
  345. * Sync the pending table pending bit of LPIs targeting @vcpu
  346. * with our own data structures. This relies on the LPI being
  347. * mapped before.
  348. */
  349. static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
  350. {
  351. gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  352. struct vgic_irq *irq;
  353. int last_byte_offset = -1;
  354. int ret = 0;
  355. u32 *intids;
  356. int nr_irqs, i;
  357. unsigned long flags;
  358. u8 pendmask;
  359. nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
  360. if (nr_irqs < 0)
  361. return nr_irqs;
  362. for (i = 0; i < nr_irqs; i++) {
  363. int byte_offset, bit_nr;
  364. byte_offset = intids[i] / BITS_PER_BYTE;
  365. bit_nr = intids[i] % BITS_PER_BYTE;
  366. /*
  367. * For contiguously allocated LPIs chances are we just read
  368. * this very same byte in the last iteration. Reuse that.
  369. */
  370. if (byte_offset != last_byte_offset) {
  371. ret = kvm_read_guest_lock(vcpu->kvm,
  372. pendbase + byte_offset,
  373. &pendmask, 1);
  374. if (ret) {
  375. kfree(intids);
  376. return ret;
  377. }
  378. last_byte_offset = byte_offset;
  379. }
  380. irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
  381. spin_lock_irqsave(&irq->irq_lock, flags);
  382. irq->pending_latch = pendmask & (1U << bit_nr);
  383. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  384. vgic_put_irq(vcpu->kvm, irq);
  385. }
  386. kfree(intids);
  387. return ret;
  388. }
  389. static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
  390. struct vgic_its *its,
  391. gpa_t addr, unsigned int len)
  392. {
  393. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  394. u64 reg = GITS_TYPER_PLPIS;
  395. /*
  396. * We use linear CPU numbers for redistributor addressing,
  397. * so GITS_TYPER.PTA is 0.
  398. * Also we force all PROPBASER registers to be the same, so
  399. * CommonLPIAff is 0 as well.
  400. * To avoid memory waste in the guest, we keep the number of IDBits and
  401. * DevBits low - as least for the time being.
  402. */
  403. reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
  404. reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
  405. reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
  406. return extract_bytes(reg, addr & 7, len);
  407. }
  408. static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
  409. struct vgic_its *its,
  410. gpa_t addr, unsigned int len)
  411. {
  412. u32 val;
  413. val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
  414. val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
  415. return val;
  416. }
  417. static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
  418. struct vgic_its *its,
  419. gpa_t addr, unsigned int len,
  420. unsigned long val)
  421. {
  422. u32 rev = GITS_IIDR_REV(val);
  423. if (rev >= NR_ITS_ABIS)
  424. return -EINVAL;
  425. return vgic_its_set_abi(its, rev);
  426. }
  427. static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
  428. struct vgic_its *its,
  429. gpa_t addr, unsigned int len)
  430. {
  431. switch (addr & 0xffff) {
  432. case GITS_PIDR0:
  433. return 0x92; /* part number, bits[7:0] */
  434. case GITS_PIDR1:
  435. return 0xb4; /* part number, bits[11:8] */
  436. case GITS_PIDR2:
  437. return GIC_PIDR2_ARCH_GICv3 | 0x0b;
  438. case GITS_PIDR4:
  439. return 0x40; /* This is a 64K software visible page */
  440. /* The following are the ID registers for (any) GIC. */
  441. case GITS_CIDR0:
  442. return 0x0d;
  443. case GITS_CIDR1:
  444. return 0xf0;
  445. case GITS_CIDR2:
  446. return 0x05;
  447. case GITS_CIDR3:
  448. return 0xb1;
  449. }
  450. return 0;
  451. }
  452. int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
  453. u32 devid, u32 eventid, struct vgic_irq **irq)
  454. {
  455. struct kvm_vcpu *vcpu;
  456. struct its_ite *ite;
  457. if (!its->enabled)
  458. return -EBUSY;
  459. ite = find_ite(its, devid, eventid);
  460. if (!ite || !its_is_collection_mapped(ite->collection))
  461. return E_ITS_INT_UNMAPPED_INTERRUPT;
  462. vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
  463. if (!vcpu)
  464. return E_ITS_INT_UNMAPPED_INTERRUPT;
  465. if (!vcpu->arch.vgic_cpu.lpis_enabled)
  466. return -EBUSY;
  467. *irq = ite->irq;
  468. return 0;
  469. }
  470. struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
  471. {
  472. u64 address;
  473. struct kvm_io_device *kvm_io_dev;
  474. struct vgic_io_device *iodev;
  475. if (!vgic_has_its(kvm))
  476. return ERR_PTR(-ENODEV);
  477. if (!(msi->flags & KVM_MSI_VALID_DEVID))
  478. return ERR_PTR(-EINVAL);
  479. address = (u64)msi->address_hi << 32 | msi->address_lo;
  480. kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
  481. if (!kvm_io_dev)
  482. return ERR_PTR(-EINVAL);
  483. if (kvm_io_dev->ops != &kvm_io_gic_ops)
  484. return ERR_PTR(-EINVAL);
  485. iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
  486. if (iodev->iodev_type != IODEV_ITS)
  487. return ERR_PTR(-EINVAL);
  488. return iodev->its;
  489. }
  490. /*
  491. * Find the target VCPU and the LPI number for a given devid/eventid pair
  492. * and make this IRQ pending, possibly injecting it.
  493. * Must be called with the its_lock mutex held.
  494. * Returns 0 on success, a positive error value for any ITS mapping
  495. * related errors and negative error values for generic errors.
  496. */
  497. static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
  498. u32 devid, u32 eventid)
  499. {
  500. struct vgic_irq *irq = NULL;
  501. unsigned long flags;
  502. int err;
  503. err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
  504. if (err)
  505. return err;
  506. if (irq->hw)
  507. return irq_set_irqchip_state(irq->host_irq,
  508. IRQCHIP_STATE_PENDING, true);
  509. spin_lock_irqsave(&irq->irq_lock, flags);
  510. irq->pending_latch = true;
  511. vgic_queue_irq_unlock(kvm, irq, flags);
  512. return 0;
  513. }
  514. /*
  515. * Queries the KVM IO bus framework to get the ITS pointer from the given
  516. * doorbell address.
  517. * We then call vgic_its_trigger_msi() with the decoded data.
  518. * According to the KVM_SIGNAL_MSI API description returns 1 on success.
  519. */
  520. int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
  521. {
  522. struct vgic_its *its;
  523. int ret;
  524. its = vgic_msi_to_its(kvm, msi);
  525. if (IS_ERR(its))
  526. return PTR_ERR(its);
  527. mutex_lock(&its->its_lock);
  528. ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
  529. mutex_unlock(&its->its_lock);
  530. if (ret < 0)
  531. return ret;
  532. /*
  533. * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
  534. * if the guest has blocked the MSI. So we map any LPI mapping
  535. * related error to that.
  536. */
  537. if (ret)
  538. return 0;
  539. else
  540. return 1;
  541. }
  542. /* Requires the its_lock to be held. */
  543. static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
  544. {
  545. list_del(&ite->ite_list);
  546. /* This put matches the get in vgic_add_lpi. */
  547. if (ite->irq) {
  548. if (ite->irq->hw)
  549. WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
  550. vgic_put_irq(kvm, ite->irq);
  551. }
  552. kfree(ite);
  553. }
  554. static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
  555. {
  556. return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
  557. }
  558. #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
  559. #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
  560. #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
  561. #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
  562. #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
  563. #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
  564. #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
  565. #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
  566. #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
  567. /*
  568. * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
  569. * Must be called with the its_lock mutex held.
  570. */
  571. static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
  572. u64 *its_cmd)
  573. {
  574. u32 device_id = its_cmd_get_deviceid(its_cmd);
  575. u32 event_id = its_cmd_get_id(its_cmd);
  576. struct its_ite *ite;
  577. ite = find_ite(its, device_id, event_id);
  578. if (ite && ite->collection) {
  579. /*
  580. * Though the spec talks about removing the pending state, we
  581. * don't bother here since we clear the ITTE anyway and the
  582. * pending state is a property of the ITTE struct.
  583. */
  584. its_free_ite(kvm, ite);
  585. return 0;
  586. }
  587. return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
  588. }
  589. /*
  590. * The MOVI command moves an ITTE to a different collection.
  591. * Must be called with the its_lock mutex held.
  592. */
  593. static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
  594. u64 *its_cmd)
  595. {
  596. u32 device_id = its_cmd_get_deviceid(its_cmd);
  597. u32 event_id = its_cmd_get_id(its_cmd);
  598. u32 coll_id = its_cmd_get_collection(its_cmd);
  599. struct kvm_vcpu *vcpu;
  600. struct its_ite *ite;
  601. struct its_collection *collection;
  602. ite = find_ite(its, device_id, event_id);
  603. if (!ite)
  604. return E_ITS_MOVI_UNMAPPED_INTERRUPT;
  605. if (!its_is_collection_mapped(ite->collection))
  606. return E_ITS_MOVI_UNMAPPED_COLLECTION;
  607. collection = find_collection(its, coll_id);
  608. if (!its_is_collection_mapped(collection))
  609. return E_ITS_MOVI_UNMAPPED_COLLECTION;
  610. ite->collection = collection;
  611. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  612. return update_affinity(ite->irq, vcpu);
  613. }
  614. /*
  615. * Check whether an ID can be stored into the corresponding guest table.
  616. * For a direct table this is pretty easy, but gets a bit nasty for
  617. * indirect tables. We check whether the resulting guest physical address
  618. * is actually valid (covered by a memslot and guest accessible).
  619. * For this we have to read the respective first level entry.
  620. */
  621. static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
  622. gpa_t *eaddr)
  623. {
  624. int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  625. u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
  626. int esz = GITS_BASER_ENTRY_SIZE(baser);
  627. int index, idx;
  628. gfn_t gfn;
  629. bool ret;
  630. switch (type) {
  631. case GITS_BASER_TYPE_DEVICE:
  632. if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
  633. return false;
  634. break;
  635. case GITS_BASER_TYPE_COLLECTION:
  636. /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
  637. if (id >= BIT_ULL(16))
  638. return false;
  639. break;
  640. default:
  641. return false;
  642. }
  643. if (!(baser & GITS_BASER_INDIRECT)) {
  644. phys_addr_t addr;
  645. if (id >= (l1_tbl_size / esz))
  646. return false;
  647. addr = BASER_ADDRESS(baser) + id * esz;
  648. gfn = addr >> PAGE_SHIFT;
  649. if (eaddr)
  650. *eaddr = addr;
  651. goto out;
  652. }
  653. /* calculate and check the index into the 1st level */
  654. index = id / (SZ_64K / esz);
  655. if (index >= (l1_tbl_size / sizeof(u64)))
  656. return false;
  657. /* Each 1st level entry is represented by a 64-bit value. */
  658. if (kvm_read_guest_lock(its->dev->kvm,
  659. BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
  660. &indirect_ptr, sizeof(indirect_ptr)))
  661. return false;
  662. indirect_ptr = le64_to_cpu(indirect_ptr);
  663. /* check the valid bit of the first level entry */
  664. if (!(indirect_ptr & BIT_ULL(63)))
  665. return false;
  666. /*
  667. * Mask the guest physical address and calculate the frame number.
  668. * Any address beyond our supported 48 bits of PA will be caught
  669. * by the actual check in the final step.
  670. */
  671. indirect_ptr &= GENMASK_ULL(51, 16);
  672. /* Find the address of the actual entry */
  673. index = id % (SZ_64K / esz);
  674. indirect_ptr += index * esz;
  675. gfn = indirect_ptr >> PAGE_SHIFT;
  676. if (eaddr)
  677. *eaddr = indirect_ptr;
  678. out:
  679. idx = srcu_read_lock(&its->dev->kvm->srcu);
  680. ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
  681. srcu_read_unlock(&its->dev->kvm->srcu, idx);
  682. return ret;
  683. }
  684. static int vgic_its_alloc_collection(struct vgic_its *its,
  685. struct its_collection **colp,
  686. u32 coll_id)
  687. {
  688. struct its_collection *collection;
  689. if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
  690. return E_ITS_MAPC_COLLECTION_OOR;
  691. collection = kzalloc(sizeof(*collection), GFP_KERNEL);
  692. if (!collection)
  693. return -ENOMEM;
  694. collection->collection_id = coll_id;
  695. collection->target_addr = COLLECTION_NOT_MAPPED;
  696. list_add_tail(&collection->coll_list, &its->collection_list);
  697. *colp = collection;
  698. return 0;
  699. }
  700. static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
  701. {
  702. struct its_collection *collection;
  703. struct its_device *device;
  704. struct its_ite *ite;
  705. /*
  706. * Clearing the mapping for that collection ID removes the
  707. * entry from the list. If there wasn't any before, we can
  708. * go home early.
  709. */
  710. collection = find_collection(its, coll_id);
  711. if (!collection)
  712. return;
  713. for_each_lpi_its(device, ite, its)
  714. if (ite->collection &&
  715. ite->collection->collection_id == coll_id)
  716. ite->collection = NULL;
  717. list_del(&collection->coll_list);
  718. kfree(collection);
  719. }
  720. /* Must be called with its_lock mutex held */
  721. static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
  722. struct its_collection *collection,
  723. u32 event_id)
  724. {
  725. struct its_ite *ite;
  726. ite = kzalloc(sizeof(*ite), GFP_KERNEL);
  727. if (!ite)
  728. return ERR_PTR(-ENOMEM);
  729. ite->event_id = event_id;
  730. ite->collection = collection;
  731. list_add_tail(&ite->ite_list, &device->itt_head);
  732. return ite;
  733. }
  734. /*
  735. * The MAPTI and MAPI commands map LPIs to ITTEs.
  736. * Must be called with its_lock mutex held.
  737. */
  738. static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
  739. u64 *its_cmd)
  740. {
  741. u32 device_id = its_cmd_get_deviceid(its_cmd);
  742. u32 event_id = its_cmd_get_id(its_cmd);
  743. u32 coll_id = its_cmd_get_collection(its_cmd);
  744. struct its_ite *ite;
  745. struct kvm_vcpu *vcpu = NULL;
  746. struct its_device *device;
  747. struct its_collection *collection, *new_coll = NULL;
  748. struct vgic_irq *irq;
  749. int lpi_nr;
  750. device = find_its_device(its, device_id);
  751. if (!device)
  752. return E_ITS_MAPTI_UNMAPPED_DEVICE;
  753. if (event_id >= BIT_ULL(device->num_eventid_bits))
  754. return E_ITS_MAPTI_ID_OOR;
  755. if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
  756. lpi_nr = its_cmd_get_physical_id(its_cmd);
  757. else
  758. lpi_nr = event_id;
  759. if (lpi_nr < GIC_LPI_OFFSET ||
  760. lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
  761. return E_ITS_MAPTI_PHYSICALID_OOR;
  762. /* If there is an existing mapping, behavior is UNPREDICTABLE. */
  763. if (find_ite(its, device_id, event_id))
  764. return 0;
  765. collection = find_collection(its, coll_id);
  766. if (!collection) {
  767. int ret = vgic_its_alloc_collection(its, &collection, coll_id);
  768. if (ret)
  769. return ret;
  770. new_coll = collection;
  771. }
  772. ite = vgic_its_alloc_ite(device, collection, event_id);
  773. if (IS_ERR(ite)) {
  774. if (new_coll)
  775. vgic_its_free_collection(its, coll_id);
  776. return PTR_ERR(ite);
  777. }
  778. if (its_is_collection_mapped(collection))
  779. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  780. irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
  781. if (IS_ERR(irq)) {
  782. if (new_coll)
  783. vgic_its_free_collection(its, coll_id);
  784. its_free_ite(kvm, ite);
  785. return PTR_ERR(irq);
  786. }
  787. ite->irq = irq;
  788. return 0;
  789. }
  790. /* Requires the its_lock to be held. */
  791. static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
  792. {
  793. struct its_ite *ite, *temp;
  794. /*
  795. * The spec says that unmapping a device with still valid
  796. * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
  797. * since we cannot leave the memory unreferenced.
  798. */
  799. list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
  800. its_free_ite(kvm, ite);
  801. list_del(&device->dev_list);
  802. kfree(device);
  803. }
  804. /* its lock must be held */
  805. static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
  806. {
  807. struct its_device *cur, *temp;
  808. list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
  809. vgic_its_free_device(kvm, cur);
  810. }
  811. /* its lock must be held */
  812. static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
  813. {
  814. struct its_collection *cur, *temp;
  815. list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
  816. vgic_its_free_collection(its, cur->collection_id);
  817. }
  818. /* Must be called with its_lock mutex held */
  819. static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
  820. u32 device_id, gpa_t itt_addr,
  821. u8 num_eventid_bits)
  822. {
  823. struct its_device *device;
  824. device = kzalloc(sizeof(*device), GFP_KERNEL);
  825. if (!device)
  826. return ERR_PTR(-ENOMEM);
  827. device->device_id = device_id;
  828. device->itt_addr = itt_addr;
  829. device->num_eventid_bits = num_eventid_bits;
  830. INIT_LIST_HEAD(&device->itt_head);
  831. list_add_tail(&device->dev_list, &its->device_list);
  832. return device;
  833. }
  834. /*
  835. * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
  836. * Must be called with the its_lock mutex held.
  837. */
  838. static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
  839. u64 *its_cmd)
  840. {
  841. u32 device_id = its_cmd_get_deviceid(its_cmd);
  842. bool valid = its_cmd_get_validbit(its_cmd);
  843. u8 num_eventid_bits = its_cmd_get_size(its_cmd);
  844. gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
  845. struct its_device *device;
  846. if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
  847. return E_ITS_MAPD_DEVICE_OOR;
  848. if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
  849. return E_ITS_MAPD_ITTSIZE_OOR;
  850. device = find_its_device(its, device_id);
  851. /*
  852. * The spec says that calling MAPD on an already mapped device
  853. * invalidates all cached data for this device. We implement this
  854. * by removing the mapping and re-establishing it.
  855. */
  856. if (device)
  857. vgic_its_free_device(kvm, device);
  858. /*
  859. * The spec does not say whether unmapping a not-mapped device
  860. * is an error, so we are done in any case.
  861. */
  862. if (!valid)
  863. return 0;
  864. device = vgic_its_alloc_device(its, device_id, itt_addr,
  865. num_eventid_bits);
  866. return PTR_ERR_OR_ZERO(device);
  867. }
  868. /*
  869. * The MAPC command maps collection IDs to redistributors.
  870. * Must be called with the its_lock mutex held.
  871. */
  872. static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
  873. u64 *its_cmd)
  874. {
  875. u16 coll_id;
  876. u32 target_addr;
  877. struct its_collection *collection;
  878. bool valid;
  879. valid = its_cmd_get_validbit(its_cmd);
  880. coll_id = its_cmd_get_collection(its_cmd);
  881. target_addr = its_cmd_get_target_addr(its_cmd);
  882. if (target_addr >= atomic_read(&kvm->online_vcpus))
  883. return E_ITS_MAPC_PROCNUM_OOR;
  884. if (!valid) {
  885. vgic_its_free_collection(its, coll_id);
  886. } else {
  887. collection = find_collection(its, coll_id);
  888. if (!collection) {
  889. int ret;
  890. ret = vgic_its_alloc_collection(its, &collection,
  891. coll_id);
  892. if (ret)
  893. return ret;
  894. collection->target_addr = target_addr;
  895. } else {
  896. collection->target_addr = target_addr;
  897. update_affinity_collection(kvm, its, collection);
  898. }
  899. }
  900. return 0;
  901. }
  902. /*
  903. * The CLEAR command removes the pending state for a particular LPI.
  904. * Must be called with the its_lock mutex held.
  905. */
  906. static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
  907. u64 *its_cmd)
  908. {
  909. u32 device_id = its_cmd_get_deviceid(its_cmd);
  910. u32 event_id = its_cmd_get_id(its_cmd);
  911. struct its_ite *ite;
  912. ite = find_ite(its, device_id, event_id);
  913. if (!ite)
  914. return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
  915. ite->irq->pending_latch = false;
  916. if (ite->irq->hw)
  917. return irq_set_irqchip_state(ite->irq->host_irq,
  918. IRQCHIP_STATE_PENDING, false);
  919. return 0;
  920. }
  921. /*
  922. * The INV command syncs the configuration bits from the memory table.
  923. * Must be called with the its_lock mutex held.
  924. */
  925. static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
  926. u64 *its_cmd)
  927. {
  928. u32 device_id = its_cmd_get_deviceid(its_cmd);
  929. u32 event_id = its_cmd_get_id(its_cmd);
  930. struct its_ite *ite;
  931. ite = find_ite(its, device_id, event_id);
  932. if (!ite)
  933. return E_ITS_INV_UNMAPPED_INTERRUPT;
  934. return update_lpi_config(kvm, ite->irq, NULL, true);
  935. }
  936. /*
  937. * The INVALL command requests flushing of all IRQ data in this collection.
  938. * Find the VCPU mapped to that collection, then iterate over the VM's list
  939. * of mapped LPIs and update the configuration for each IRQ which targets
  940. * the specified vcpu. The configuration will be read from the in-memory
  941. * configuration table.
  942. * Must be called with the its_lock mutex held.
  943. */
  944. static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
  945. u64 *its_cmd)
  946. {
  947. u32 coll_id = its_cmd_get_collection(its_cmd);
  948. struct its_collection *collection;
  949. struct kvm_vcpu *vcpu;
  950. struct vgic_irq *irq;
  951. u32 *intids;
  952. int irq_count, i;
  953. collection = find_collection(its, coll_id);
  954. if (!its_is_collection_mapped(collection))
  955. return E_ITS_INVALL_UNMAPPED_COLLECTION;
  956. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  957. irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
  958. if (irq_count < 0)
  959. return irq_count;
  960. for (i = 0; i < irq_count; i++) {
  961. irq = vgic_get_irq(kvm, NULL, intids[i]);
  962. if (!irq)
  963. continue;
  964. update_lpi_config(kvm, irq, vcpu, false);
  965. vgic_put_irq(kvm, irq);
  966. }
  967. kfree(intids);
  968. if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
  969. its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
  970. return 0;
  971. }
  972. /*
  973. * The MOVALL command moves the pending state of all IRQs targeting one
  974. * redistributor to another. We don't hold the pending state in the VCPUs,
  975. * but in the IRQs instead, so there is really not much to do for us here.
  976. * However the spec says that no IRQ must target the old redistributor
  977. * afterwards, so we make sure that no LPI is using the associated target_vcpu.
  978. * This command affects all LPIs in the system that target that redistributor.
  979. */
  980. static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
  981. u64 *its_cmd)
  982. {
  983. u32 target1_addr = its_cmd_get_target_addr(its_cmd);
  984. u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
  985. struct kvm_vcpu *vcpu1, *vcpu2;
  986. struct vgic_irq *irq;
  987. u32 *intids;
  988. int irq_count, i;
  989. if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
  990. target2_addr >= atomic_read(&kvm->online_vcpus))
  991. return E_ITS_MOVALL_PROCNUM_OOR;
  992. if (target1_addr == target2_addr)
  993. return 0;
  994. vcpu1 = kvm_get_vcpu(kvm, target1_addr);
  995. vcpu2 = kvm_get_vcpu(kvm, target2_addr);
  996. irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
  997. if (irq_count < 0)
  998. return irq_count;
  999. for (i = 0; i < irq_count; i++) {
  1000. irq = vgic_get_irq(kvm, NULL, intids[i]);
  1001. update_affinity(irq, vcpu2);
  1002. vgic_put_irq(kvm, irq);
  1003. }
  1004. kfree(intids);
  1005. return 0;
  1006. }
  1007. /*
  1008. * The INT command injects the LPI associated with that DevID/EvID pair.
  1009. * Must be called with the its_lock mutex held.
  1010. */
  1011. static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
  1012. u64 *its_cmd)
  1013. {
  1014. u32 msi_data = its_cmd_get_id(its_cmd);
  1015. u64 msi_devid = its_cmd_get_deviceid(its_cmd);
  1016. return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
  1017. }
  1018. /*
  1019. * This function is called with the its_cmd lock held, but the ITS data
  1020. * structure lock dropped.
  1021. */
  1022. static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
  1023. u64 *its_cmd)
  1024. {
  1025. int ret = -ENODEV;
  1026. mutex_lock(&its->its_lock);
  1027. switch (its_cmd_get_command(its_cmd)) {
  1028. case GITS_CMD_MAPD:
  1029. ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
  1030. break;
  1031. case GITS_CMD_MAPC:
  1032. ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
  1033. break;
  1034. case GITS_CMD_MAPI:
  1035. ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
  1036. break;
  1037. case GITS_CMD_MAPTI:
  1038. ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
  1039. break;
  1040. case GITS_CMD_MOVI:
  1041. ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
  1042. break;
  1043. case GITS_CMD_DISCARD:
  1044. ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
  1045. break;
  1046. case GITS_CMD_CLEAR:
  1047. ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
  1048. break;
  1049. case GITS_CMD_MOVALL:
  1050. ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
  1051. break;
  1052. case GITS_CMD_INT:
  1053. ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
  1054. break;
  1055. case GITS_CMD_INV:
  1056. ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
  1057. break;
  1058. case GITS_CMD_INVALL:
  1059. ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
  1060. break;
  1061. case GITS_CMD_SYNC:
  1062. /* we ignore this command: we are in sync all of the time */
  1063. ret = 0;
  1064. break;
  1065. }
  1066. mutex_unlock(&its->its_lock);
  1067. return ret;
  1068. }
  1069. static u64 vgic_sanitise_its_baser(u64 reg)
  1070. {
  1071. reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
  1072. GITS_BASER_SHAREABILITY_SHIFT,
  1073. vgic_sanitise_shareability);
  1074. reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
  1075. GITS_BASER_INNER_CACHEABILITY_SHIFT,
  1076. vgic_sanitise_inner_cacheability);
  1077. reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
  1078. GITS_BASER_OUTER_CACHEABILITY_SHIFT,
  1079. vgic_sanitise_outer_cacheability);
  1080. /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
  1081. reg &= ~GENMASK_ULL(15, 12);
  1082. /* We support only one (ITS) page size: 64K */
  1083. reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
  1084. return reg;
  1085. }
  1086. static u64 vgic_sanitise_its_cbaser(u64 reg)
  1087. {
  1088. reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
  1089. GITS_CBASER_SHAREABILITY_SHIFT,
  1090. vgic_sanitise_shareability);
  1091. reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
  1092. GITS_CBASER_INNER_CACHEABILITY_SHIFT,
  1093. vgic_sanitise_inner_cacheability);
  1094. reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
  1095. GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
  1096. vgic_sanitise_outer_cacheability);
  1097. /*
  1098. * Sanitise the physical address to be 64k aligned.
  1099. * Also limit the physical addresses to 48 bits.
  1100. */
  1101. reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
  1102. return reg;
  1103. }
  1104. static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
  1105. struct vgic_its *its,
  1106. gpa_t addr, unsigned int len)
  1107. {
  1108. return extract_bytes(its->cbaser, addr & 7, len);
  1109. }
  1110. static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
  1111. gpa_t addr, unsigned int len,
  1112. unsigned long val)
  1113. {
  1114. /* When GITS_CTLR.Enable is 1, this register is RO. */
  1115. if (its->enabled)
  1116. return;
  1117. mutex_lock(&its->cmd_lock);
  1118. its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
  1119. its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
  1120. its->creadr = 0;
  1121. /*
  1122. * CWRITER is architecturally UNKNOWN on reset, but we need to reset
  1123. * it to CREADR to make sure we start with an empty command buffer.
  1124. */
  1125. its->cwriter = its->creadr;
  1126. mutex_unlock(&its->cmd_lock);
  1127. }
  1128. #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
  1129. #define ITS_CMD_SIZE 32
  1130. #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
  1131. /* Must be called with the cmd_lock held. */
  1132. static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
  1133. {
  1134. gpa_t cbaser;
  1135. u64 cmd_buf[4];
  1136. /* Commands are only processed when the ITS is enabled. */
  1137. if (!its->enabled)
  1138. return;
  1139. cbaser = CBASER_ADDRESS(its->cbaser);
  1140. while (its->cwriter != its->creadr) {
  1141. int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
  1142. cmd_buf, ITS_CMD_SIZE);
  1143. /*
  1144. * If kvm_read_guest() fails, this could be due to the guest
  1145. * programming a bogus value in CBASER or something else going
  1146. * wrong from which we cannot easily recover.
  1147. * According to section 6.3.2 in the GICv3 spec we can just
  1148. * ignore that command then.
  1149. */
  1150. if (!ret)
  1151. vgic_its_handle_command(kvm, its, cmd_buf);
  1152. its->creadr += ITS_CMD_SIZE;
  1153. if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
  1154. its->creadr = 0;
  1155. }
  1156. }
  1157. /*
  1158. * By writing to CWRITER the guest announces new commands to be processed.
  1159. * To avoid any races in the first place, we take the its_cmd lock, which
  1160. * protects our ring buffer variables, so that there is only one user
  1161. * per ITS handling commands at a given time.
  1162. */
  1163. static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
  1164. gpa_t addr, unsigned int len,
  1165. unsigned long val)
  1166. {
  1167. u64 reg;
  1168. if (!its)
  1169. return;
  1170. mutex_lock(&its->cmd_lock);
  1171. reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
  1172. reg = ITS_CMD_OFFSET(reg);
  1173. if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
  1174. mutex_unlock(&its->cmd_lock);
  1175. return;
  1176. }
  1177. its->cwriter = reg;
  1178. vgic_its_process_commands(kvm, its);
  1179. mutex_unlock(&its->cmd_lock);
  1180. }
  1181. static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
  1182. struct vgic_its *its,
  1183. gpa_t addr, unsigned int len)
  1184. {
  1185. return extract_bytes(its->cwriter, addr & 0x7, len);
  1186. }
  1187. static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
  1188. struct vgic_its *its,
  1189. gpa_t addr, unsigned int len)
  1190. {
  1191. return extract_bytes(its->creadr, addr & 0x7, len);
  1192. }
  1193. static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
  1194. struct vgic_its *its,
  1195. gpa_t addr, unsigned int len,
  1196. unsigned long val)
  1197. {
  1198. u32 cmd_offset;
  1199. int ret = 0;
  1200. mutex_lock(&its->cmd_lock);
  1201. if (its->enabled) {
  1202. ret = -EBUSY;
  1203. goto out;
  1204. }
  1205. cmd_offset = ITS_CMD_OFFSET(val);
  1206. if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
  1207. ret = -EINVAL;
  1208. goto out;
  1209. }
  1210. its->creadr = cmd_offset;
  1211. out:
  1212. mutex_unlock(&its->cmd_lock);
  1213. return ret;
  1214. }
  1215. #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
  1216. static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
  1217. struct vgic_its *its,
  1218. gpa_t addr, unsigned int len)
  1219. {
  1220. u64 reg;
  1221. switch (BASER_INDEX(addr)) {
  1222. case 0:
  1223. reg = its->baser_device_table;
  1224. break;
  1225. case 1:
  1226. reg = its->baser_coll_table;
  1227. break;
  1228. default:
  1229. reg = 0;
  1230. break;
  1231. }
  1232. return extract_bytes(reg, addr & 7, len);
  1233. }
  1234. #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
  1235. static void vgic_mmio_write_its_baser(struct kvm *kvm,
  1236. struct vgic_its *its,
  1237. gpa_t addr, unsigned int len,
  1238. unsigned long val)
  1239. {
  1240. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1241. u64 entry_size, table_type;
  1242. u64 reg, *regptr, clearbits = 0;
  1243. /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
  1244. if (its->enabled)
  1245. return;
  1246. switch (BASER_INDEX(addr)) {
  1247. case 0:
  1248. regptr = &its->baser_device_table;
  1249. entry_size = abi->dte_esz;
  1250. table_type = GITS_BASER_TYPE_DEVICE;
  1251. break;
  1252. case 1:
  1253. regptr = &its->baser_coll_table;
  1254. entry_size = abi->cte_esz;
  1255. table_type = GITS_BASER_TYPE_COLLECTION;
  1256. clearbits = GITS_BASER_INDIRECT;
  1257. break;
  1258. default:
  1259. return;
  1260. }
  1261. reg = update_64bit_reg(*regptr, addr & 7, len, val);
  1262. reg &= ~GITS_BASER_RO_MASK;
  1263. reg &= ~clearbits;
  1264. reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
  1265. reg |= table_type << GITS_BASER_TYPE_SHIFT;
  1266. reg = vgic_sanitise_its_baser(reg);
  1267. *regptr = reg;
  1268. if (!(reg & GITS_BASER_VALID)) {
  1269. /* Take the its_lock to prevent a race with a save/restore */
  1270. mutex_lock(&its->its_lock);
  1271. switch (table_type) {
  1272. case GITS_BASER_TYPE_DEVICE:
  1273. vgic_its_free_device_list(kvm, its);
  1274. break;
  1275. case GITS_BASER_TYPE_COLLECTION:
  1276. vgic_its_free_collection_list(kvm, its);
  1277. break;
  1278. }
  1279. mutex_unlock(&its->its_lock);
  1280. }
  1281. }
  1282. static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
  1283. struct vgic_its *its,
  1284. gpa_t addr, unsigned int len)
  1285. {
  1286. u32 reg = 0;
  1287. mutex_lock(&its->cmd_lock);
  1288. if (its->creadr == its->cwriter)
  1289. reg |= GITS_CTLR_QUIESCENT;
  1290. if (its->enabled)
  1291. reg |= GITS_CTLR_ENABLE;
  1292. mutex_unlock(&its->cmd_lock);
  1293. return reg;
  1294. }
  1295. static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
  1296. gpa_t addr, unsigned int len,
  1297. unsigned long val)
  1298. {
  1299. mutex_lock(&its->cmd_lock);
  1300. /*
  1301. * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
  1302. * device/collection BASER are invalid
  1303. */
  1304. if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
  1305. (!(its->baser_device_table & GITS_BASER_VALID) ||
  1306. !(its->baser_coll_table & GITS_BASER_VALID) ||
  1307. !(its->cbaser & GITS_CBASER_VALID)))
  1308. goto out;
  1309. its->enabled = !!(val & GITS_CTLR_ENABLE);
  1310. /*
  1311. * Try to process any pending commands. This function bails out early
  1312. * if the ITS is disabled or no commands have been queued.
  1313. */
  1314. vgic_its_process_commands(kvm, its);
  1315. out:
  1316. mutex_unlock(&its->cmd_lock);
  1317. }
  1318. #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
  1319. { \
  1320. .reg_offset = off, \
  1321. .len = length, \
  1322. .access_flags = acc, \
  1323. .its_read = rd, \
  1324. .its_write = wr, \
  1325. }
  1326. #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
  1327. { \
  1328. .reg_offset = off, \
  1329. .len = length, \
  1330. .access_flags = acc, \
  1331. .its_read = rd, \
  1332. .its_write = wr, \
  1333. .uaccess_its_write = uwr, \
  1334. }
  1335. static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
  1336. gpa_t addr, unsigned int len, unsigned long val)
  1337. {
  1338. /* Ignore */
  1339. }
  1340. static struct vgic_register_region its_registers[] = {
  1341. REGISTER_ITS_DESC(GITS_CTLR,
  1342. vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
  1343. VGIC_ACCESS_32bit),
  1344. REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
  1345. vgic_mmio_read_its_iidr, its_mmio_write_wi,
  1346. vgic_mmio_uaccess_write_its_iidr, 4,
  1347. VGIC_ACCESS_32bit),
  1348. REGISTER_ITS_DESC(GITS_TYPER,
  1349. vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
  1350. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1351. REGISTER_ITS_DESC(GITS_CBASER,
  1352. vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
  1353. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1354. REGISTER_ITS_DESC(GITS_CWRITER,
  1355. vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
  1356. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1357. REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
  1358. vgic_mmio_read_its_creadr, its_mmio_write_wi,
  1359. vgic_mmio_uaccess_write_its_creadr, 8,
  1360. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1361. REGISTER_ITS_DESC(GITS_BASER,
  1362. vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
  1363. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1364. REGISTER_ITS_DESC(GITS_IDREGS_BASE,
  1365. vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
  1366. VGIC_ACCESS_32bit),
  1367. };
  1368. /* This is called on setting the LPI enable bit in the redistributor. */
  1369. void vgic_enable_lpis(struct kvm_vcpu *vcpu)
  1370. {
  1371. if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
  1372. its_sync_lpi_pending_table(vcpu);
  1373. }
  1374. static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
  1375. u64 addr)
  1376. {
  1377. struct vgic_io_device *iodev = &its->iodev;
  1378. int ret;
  1379. mutex_lock(&kvm->slots_lock);
  1380. if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
  1381. ret = -EBUSY;
  1382. goto out;
  1383. }
  1384. its->vgic_its_base = addr;
  1385. iodev->regions = its_registers;
  1386. iodev->nr_regions = ARRAY_SIZE(its_registers);
  1387. kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
  1388. iodev->base_addr = its->vgic_its_base;
  1389. iodev->iodev_type = IODEV_ITS;
  1390. iodev->its = its;
  1391. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
  1392. KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
  1393. out:
  1394. mutex_unlock(&kvm->slots_lock);
  1395. return ret;
  1396. }
  1397. #define INITIAL_BASER_VALUE \
  1398. (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
  1399. GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
  1400. GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
  1401. GITS_BASER_PAGE_SIZE_64K)
  1402. #define INITIAL_PROPBASER_VALUE \
  1403. (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
  1404. GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
  1405. GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
  1406. static int vgic_its_create(struct kvm_device *dev, u32 type)
  1407. {
  1408. struct vgic_its *its;
  1409. if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
  1410. return -ENODEV;
  1411. its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
  1412. if (!its)
  1413. return -ENOMEM;
  1414. if (vgic_initialized(dev->kvm)) {
  1415. int ret = vgic_v4_init(dev->kvm);
  1416. if (ret < 0) {
  1417. kfree(its);
  1418. return ret;
  1419. }
  1420. }
  1421. mutex_init(&its->its_lock);
  1422. mutex_init(&its->cmd_lock);
  1423. its->vgic_its_base = VGIC_ADDR_UNDEF;
  1424. INIT_LIST_HEAD(&its->device_list);
  1425. INIT_LIST_HEAD(&its->collection_list);
  1426. dev->kvm->arch.vgic.msis_require_devid = true;
  1427. dev->kvm->arch.vgic.has_its = true;
  1428. its->enabled = false;
  1429. its->dev = dev;
  1430. its->baser_device_table = INITIAL_BASER_VALUE |
  1431. ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
  1432. its->baser_coll_table = INITIAL_BASER_VALUE |
  1433. ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
  1434. dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
  1435. dev->private = its;
  1436. return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
  1437. }
  1438. static void vgic_its_destroy(struct kvm_device *kvm_dev)
  1439. {
  1440. struct kvm *kvm = kvm_dev->kvm;
  1441. struct vgic_its *its = kvm_dev->private;
  1442. mutex_lock(&its->its_lock);
  1443. vgic_its_free_device_list(kvm, its);
  1444. vgic_its_free_collection_list(kvm, its);
  1445. mutex_unlock(&its->its_lock);
  1446. kfree(its);
  1447. kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
  1448. }
  1449. int vgic_its_has_attr_regs(struct kvm_device *dev,
  1450. struct kvm_device_attr *attr)
  1451. {
  1452. const struct vgic_register_region *region;
  1453. gpa_t offset = attr->attr;
  1454. int align;
  1455. align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
  1456. if (offset & align)
  1457. return -EINVAL;
  1458. region = vgic_find_mmio_region(its_registers,
  1459. ARRAY_SIZE(its_registers),
  1460. offset);
  1461. if (!region)
  1462. return -ENXIO;
  1463. return 0;
  1464. }
  1465. int vgic_its_attr_regs_access(struct kvm_device *dev,
  1466. struct kvm_device_attr *attr,
  1467. u64 *reg, bool is_write)
  1468. {
  1469. const struct vgic_register_region *region;
  1470. struct vgic_its *its;
  1471. gpa_t addr, offset;
  1472. unsigned int len;
  1473. int align, ret = 0;
  1474. its = dev->private;
  1475. offset = attr->attr;
  1476. /*
  1477. * Although the spec supports upper/lower 32-bit accesses to
  1478. * 64-bit ITS registers, the userspace ABI requires 64-bit
  1479. * accesses to all 64-bit wide registers. We therefore only
  1480. * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
  1481. * registers
  1482. */
  1483. if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
  1484. align = 0x3;
  1485. else
  1486. align = 0x7;
  1487. if (offset & align)
  1488. return -EINVAL;
  1489. mutex_lock(&dev->kvm->lock);
  1490. if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
  1491. ret = -ENXIO;
  1492. goto out;
  1493. }
  1494. region = vgic_find_mmio_region(its_registers,
  1495. ARRAY_SIZE(its_registers),
  1496. offset);
  1497. if (!region) {
  1498. ret = -ENXIO;
  1499. goto out;
  1500. }
  1501. if (!lock_all_vcpus(dev->kvm)) {
  1502. ret = -EBUSY;
  1503. goto out;
  1504. }
  1505. addr = its->vgic_its_base + offset;
  1506. len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
  1507. if (is_write) {
  1508. if (region->uaccess_its_write)
  1509. ret = region->uaccess_its_write(dev->kvm, its, addr,
  1510. len, *reg);
  1511. else
  1512. region->its_write(dev->kvm, its, addr, len, *reg);
  1513. } else {
  1514. *reg = region->its_read(dev->kvm, its, addr, len);
  1515. }
  1516. unlock_all_vcpus(dev->kvm);
  1517. out:
  1518. mutex_unlock(&dev->kvm->lock);
  1519. return ret;
  1520. }
  1521. static u32 compute_next_devid_offset(struct list_head *h,
  1522. struct its_device *dev)
  1523. {
  1524. struct its_device *next;
  1525. u32 next_offset;
  1526. if (list_is_last(&dev->dev_list, h))
  1527. return 0;
  1528. next = list_next_entry(dev, dev_list);
  1529. next_offset = next->device_id - dev->device_id;
  1530. return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
  1531. }
  1532. static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
  1533. {
  1534. struct its_ite *next;
  1535. u32 next_offset;
  1536. if (list_is_last(&ite->ite_list, h))
  1537. return 0;
  1538. next = list_next_entry(ite, ite_list);
  1539. next_offset = next->event_id - ite->event_id;
  1540. return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
  1541. }
  1542. /**
  1543. * entry_fn_t - Callback called on a table entry restore path
  1544. * @its: its handle
  1545. * @id: id of the entry
  1546. * @entry: pointer to the entry
  1547. * @opaque: pointer to an opaque data
  1548. *
  1549. * Return: < 0 on error, 0 if last element was identified, id offset to next
  1550. * element otherwise
  1551. */
  1552. typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
  1553. void *opaque);
  1554. /**
  1555. * scan_its_table - Scan a contiguous table in guest RAM and applies a function
  1556. * to each entry
  1557. *
  1558. * @its: its handle
  1559. * @base: base gpa of the table
  1560. * @size: size of the table in bytes
  1561. * @esz: entry size in bytes
  1562. * @start_id: the ID of the first entry in the table
  1563. * (non zero for 2d level tables)
  1564. * @fn: function to apply on each entry
  1565. *
  1566. * Return: < 0 on error, 0 if last element was identified, 1 otherwise
  1567. * (the last element may not be found on second level tables)
  1568. */
  1569. static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
  1570. int start_id, entry_fn_t fn, void *opaque)
  1571. {
  1572. struct kvm *kvm = its->dev->kvm;
  1573. unsigned long len = size;
  1574. int id = start_id;
  1575. gpa_t gpa = base;
  1576. char entry[ESZ_MAX];
  1577. int ret;
  1578. memset(entry, 0, esz);
  1579. while (len > 0) {
  1580. int next_offset;
  1581. size_t byte_offset;
  1582. ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
  1583. if (ret)
  1584. return ret;
  1585. next_offset = fn(its, id, entry, opaque);
  1586. if (next_offset <= 0)
  1587. return next_offset;
  1588. byte_offset = next_offset * esz;
  1589. id += next_offset;
  1590. gpa += byte_offset;
  1591. len -= byte_offset;
  1592. }
  1593. return 1;
  1594. }
  1595. /**
  1596. * vgic_its_save_ite - Save an interrupt translation entry at @gpa
  1597. */
  1598. static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
  1599. struct its_ite *ite, gpa_t gpa, int ite_esz)
  1600. {
  1601. struct kvm *kvm = its->dev->kvm;
  1602. u32 next_offset;
  1603. u64 val;
  1604. next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
  1605. val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
  1606. ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
  1607. ite->collection->collection_id;
  1608. val = cpu_to_le64(val);
  1609. return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
  1610. }
  1611. /**
  1612. * vgic_its_restore_ite - restore an interrupt translation entry
  1613. * @event_id: id used for indexing
  1614. * @ptr: pointer to the ITE entry
  1615. * @opaque: pointer to the its_device
  1616. */
  1617. static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
  1618. void *ptr, void *opaque)
  1619. {
  1620. struct its_device *dev = (struct its_device *)opaque;
  1621. struct its_collection *collection;
  1622. struct kvm *kvm = its->dev->kvm;
  1623. struct kvm_vcpu *vcpu = NULL;
  1624. u64 val;
  1625. u64 *p = (u64 *)ptr;
  1626. struct vgic_irq *irq;
  1627. u32 coll_id, lpi_id;
  1628. struct its_ite *ite;
  1629. u32 offset;
  1630. val = *p;
  1631. val = le64_to_cpu(val);
  1632. coll_id = val & KVM_ITS_ITE_ICID_MASK;
  1633. lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
  1634. if (!lpi_id)
  1635. return 1; /* invalid entry, no choice but to scan next entry */
  1636. if (lpi_id < VGIC_MIN_LPI)
  1637. return -EINVAL;
  1638. offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
  1639. if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
  1640. return -EINVAL;
  1641. collection = find_collection(its, coll_id);
  1642. if (!collection)
  1643. return -EINVAL;
  1644. ite = vgic_its_alloc_ite(dev, collection, event_id);
  1645. if (IS_ERR(ite))
  1646. return PTR_ERR(ite);
  1647. if (its_is_collection_mapped(collection))
  1648. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  1649. irq = vgic_add_lpi(kvm, lpi_id, vcpu);
  1650. if (IS_ERR(irq))
  1651. return PTR_ERR(irq);
  1652. ite->irq = irq;
  1653. return offset;
  1654. }
  1655. static int vgic_its_ite_cmp(void *priv, struct list_head *a,
  1656. struct list_head *b)
  1657. {
  1658. struct its_ite *itea = container_of(a, struct its_ite, ite_list);
  1659. struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
  1660. if (itea->event_id < iteb->event_id)
  1661. return -1;
  1662. else
  1663. return 1;
  1664. }
  1665. static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
  1666. {
  1667. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1668. gpa_t base = device->itt_addr;
  1669. struct its_ite *ite;
  1670. int ret;
  1671. int ite_esz = abi->ite_esz;
  1672. list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
  1673. list_for_each_entry(ite, &device->itt_head, ite_list) {
  1674. gpa_t gpa = base + ite->event_id * ite_esz;
  1675. /*
  1676. * If an LPI carries the HW bit, this means that this
  1677. * interrupt is controlled by GICv4, and we do not
  1678. * have direct access to that state. Let's simply fail
  1679. * the save operation...
  1680. */
  1681. if (ite->irq->hw)
  1682. return -EACCES;
  1683. ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
  1684. if (ret)
  1685. return ret;
  1686. }
  1687. return 0;
  1688. }
  1689. /**
  1690. * vgic_its_restore_itt - restore the ITT of a device
  1691. *
  1692. * @its: its handle
  1693. * @dev: device handle
  1694. *
  1695. * Return 0 on success, < 0 on error
  1696. */
  1697. static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
  1698. {
  1699. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1700. gpa_t base = dev->itt_addr;
  1701. int ret;
  1702. int ite_esz = abi->ite_esz;
  1703. size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
  1704. ret = scan_its_table(its, base, max_size, ite_esz, 0,
  1705. vgic_its_restore_ite, dev);
  1706. /* scan_its_table returns +1 if all ITEs are invalid */
  1707. if (ret > 0)
  1708. ret = 0;
  1709. return ret;
  1710. }
  1711. /**
  1712. * vgic_its_save_dte - Save a device table entry at a given GPA
  1713. *
  1714. * @its: ITS handle
  1715. * @dev: ITS device
  1716. * @ptr: GPA
  1717. */
  1718. static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
  1719. gpa_t ptr, int dte_esz)
  1720. {
  1721. struct kvm *kvm = its->dev->kvm;
  1722. u64 val, itt_addr_field;
  1723. u32 next_offset;
  1724. itt_addr_field = dev->itt_addr >> 8;
  1725. next_offset = compute_next_devid_offset(&its->device_list, dev);
  1726. val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
  1727. ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
  1728. (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
  1729. (dev->num_eventid_bits - 1));
  1730. val = cpu_to_le64(val);
  1731. return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
  1732. }
  1733. /**
  1734. * vgic_its_restore_dte - restore a device table entry
  1735. *
  1736. * @its: its handle
  1737. * @id: device id the DTE corresponds to
  1738. * @ptr: kernel VA where the 8 byte DTE is located
  1739. * @opaque: unused
  1740. *
  1741. * Return: < 0 on error, 0 if the dte is the last one, id offset to the
  1742. * next dte otherwise
  1743. */
  1744. static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
  1745. void *ptr, void *opaque)
  1746. {
  1747. struct its_device *dev;
  1748. gpa_t itt_addr;
  1749. u8 num_eventid_bits;
  1750. u64 entry = *(u64 *)ptr;
  1751. bool valid;
  1752. u32 offset;
  1753. int ret;
  1754. entry = le64_to_cpu(entry);
  1755. valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
  1756. num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
  1757. itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
  1758. >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
  1759. if (!valid)
  1760. return 1;
  1761. /* dte entry is valid */
  1762. offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
  1763. dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
  1764. if (IS_ERR(dev))
  1765. return PTR_ERR(dev);
  1766. ret = vgic_its_restore_itt(its, dev);
  1767. if (ret) {
  1768. vgic_its_free_device(its->dev->kvm, dev);
  1769. return ret;
  1770. }
  1771. return offset;
  1772. }
  1773. static int vgic_its_device_cmp(void *priv, struct list_head *a,
  1774. struct list_head *b)
  1775. {
  1776. struct its_device *deva = container_of(a, struct its_device, dev_list);
  1777. struct its_device *devb = container_of(b, struct its_device, dev_list);
  1778. if (deva->device_id < devb->device_id)
  1779. return -1;
  1780. else
  1781. return 1;
  1782. }
  1783. /**
  1784. * vgic_its_save_device_tables - Save the device table and all ITT
  1785. * into guest RAM
  1786. *
  1787. * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
  1788. * returns the GPA of the device entry
  1789. */
  1790. static int vgic_its_save_device_tables(struct vgic_its *its)
  1791. {
  1792. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1793. u64 baser = its->baser_device_table;
  1794. struct its_device *dev;
  1795. int dte_esz = abi->dte_esz;
  1796. if (!(baser & GITS_BASER_VALID))
  1797. return 0;
  1798. list_sort(NULL, &its->device_list, vgic_its_device_cmp);
  1799. list_for_each_entry(dev, &its->device_list, dev_list) {
  1800. int ret;
  1801. gpa_t eaddr;
  1802. if (!vgic_its_check_id(its, baser,
  1803. dev->device_id, &eaddr))
  1804. return -EINVAL;
  1805. ret = vgic_its_save_itt(its, dev);
  1806. if (ret)
  1807. return ret;
  1808. ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
  1809. if (ret)
  1810. return ret;
  1811. }
  1812. return 0;
  1813. }
  1814. /**
  1815. * handle_l1_dte - callback used for L1 device table entries (2 stage case)
  1816. *
  1817. * @its: its handle
  1818. * @id: index of the entry in the L1 table
  1819. * @addr: kernel VA
  1820. * @opaque: unused
  1821. *
  1822. * L1 table entries are scanned by steps of 1 entry
  1823. * Return < 0 if error, 0 if last dte was found when scanning the L2
  1824. * table, +1 otherwise (meaning next L1 entry must be scanned)
  1825. */
  1826. static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
  1827. void *opaque)
  1828. {
  1829. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1830. int l2_start_id = id * (SZ_64K / abi->dte_esz);
  1831. u64 entry = *(u64 *)addr;
  1832. int dte_esz = abi->dte_esz;
  1833. gpa_t gpa;
  1834. int ret;
  1835. entry = le64_to_cpu(entry);
  1836. if (!(entry & KVM_ITS_L1E_VALID_MASK))
  1837. return 1;
  1838. gpa = entry & KVM_ITS_L1E_ADDR_MASK;
  1839. ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
  1840. l2_start_id, vgic_its_restore_dte, NULL);
  1841. return ret;
  1842. }
  1843. /**
  1844. * vgic_its_restore_device_tables - Restore the device table and all ITT
  1845. * from guest RAM to internal data structs
  1846. */
  1847. static int vgic_its_restore_device_tables(struct vgic_its *its)
  1848. {
  1849. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1850. u64 baser = its->baser_device_table;
  1851. int l1_esz, ret;
  1852. int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  1853. gpa_t l1_gpa;
  1854. if (!(baser & GITS_BASER_VALID))
  1855. return 0;
  1856. l1_gpa = BASER_ADDRESS(baser);
  1857. if (baser & GITS_BASER_INDIRECT) {
  1858. l1_esz = GITS_LVL1_ENTRY_SIZE;
  1859. ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
  1860. handle_l1_dte, NULL);
  1861. } else {
  1862. l1_esz = abi->dte_esz;
  1863. ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
  1864. vgic_its_restore_dte, NULL);
  1865. }
  1866. /* scan_its_table returns +1 if all entries are invalid */
  1867. if (ret > 0)
  1868. ret = 0;
  1869. return ret;
  1870. }
  1871. static int vgic_its_save_cte(struct vgic_its *its,
  1872. struct its_collection *collection,
  1873. gpa_t gpa, int esz)
  1874. {
  1875. u64 val;
  1876. val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
  1877. ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
  1878. collection->collection_id);
  1879. val = cpu_to_le64(val);
  1880. return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
  1881. }
  1882. static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
  1883. {
  1884. struct its_collection *collection;
  1885. struct kvm *kvm = its->dev->kvm;
  1886. u32 target_addr, coll_id;
  1887. u64 val;
  1888. int ret;
  1889. BUG_ON(esz > sizeof(val));
  1890. ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
  1891. if (ret)
  1892. return ret;
  1893. val = le64_to_cpu(val);
  1894. if (!(val & KVM_ITS_CTE_VALID_MASK))
  1895. return 0;
  1896. target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
  1897. coll_id = val & KVM_ITS_CTE_ICID_MASK;
  1898. if (target_addr != COLLECTION_NOT_MAPPED &&
  1899. target_addr >= atomic_read(&kvm->online_vcpus))
  1900. return -EINVAL;
  1901. collection = find_collection(its, coll_id);
  1902. if (collection)
  1903. return -EEXIST;
  1904. ret = vgic_its_alloc_collection(its, &collection, coll_id);
  1905. if (ret)
  1906. return ret;
  1907. collection->target_addr = target_addr;
  1908. return 1;
  1909. }
  1910. /**
  1911. * vgic_its_save_collection_table - Save the collection table into
  1912. * guest RAM
  1913. */
  1914. static int vgic_its_save_collection_table(struct vgic_its *its)
  1915. {
  1916. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1917. u64 baser = its->baser_coll_table;
  1918. gpa_t gpa = BASER_ADDRESS(baser);
  1919. struct its_collection *collection;
  1920. u64 val;
  1921. size_t max_size, filled = 0;
  1922. int ret, cte_esz = abi->cte_esz;
  1923. if (!(baser & GITS_BASER_VALID))
  1924. return 0;
  1925. max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  1926. list_for_each_entry(collection, &its->collection_list, coll_list) {
  1927. ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
  1928. if (ret)
  1929. return ret;
  1930. gpa += cte_esz;
  1931. filled += cte_esz;
  1932. }
  1933. if (filled == max_size)
  1934. return 0;
  1935. /*
  1936. * table is not fully filled, add a last dummy element
  1937. * with valid bit unset
  1938. */
  1939. val = 0;
  1940. BUG_ON(cte_esz > sizeof(val));
  1941. ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
  1942. return ret;
  1943. }
  1944. /**
  1945. * vgic_its_restore_collection_table - reads the collection table
  1946. * in guest memory and restores the ITS internal state. Requires the
  1947. * BASER registers to be restored before.
  1948. */
  1949. static int vgic_its_restore_collection_table(struct vgic_its *its)
  1950. {
  1951. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  1952. u64 baser = its->baser_coll_table;
  1953. int cte_esz = abi->cte_esz;
  1954. size_t max_size, read = 0;
  1955. gpa_t gpa;
  1956. int ret;
  1957. if (!(baser & GITS_BASER_VALID))
  1958. return 0;
  1959. gpa = BASER_ADDRESS(baser);
  1960. max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  1961. while (read < max_size) {
  1962. ret = vgic_its_restore_cte(its, gpa, cte_esz);
  1963. if (ret <= 0)
  1964. break;
  1965. gpa += cte_esz;
  1966. read += cte_esz;
  1967. }
  1968. if (ret > 0)
  1969. return 0;
  1970. return ret;
  1971. }
  1972. /**
  1973. * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
  1974. * according to v0 ABI
  1975. */
  1976. static int vgic_its_save_tables_v0(struct vgic_its *its)
  1977. {
  1978. int ret;
  1979. ret = vgic_its_save_device_tables(its);
  1980. if (ret)
  1981. return ret;
  1982. return vgic_its_save_collection_table(its);
  1983. }
  1984. /**
  1985. * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
  1986. * to internal data structs according to V0 ABI
  1987. *
  1988. */
  1989. static int vgic_its_restore_tables_v0(struct vgic_its *its)
  1990. {
  1991. int ret;
  1992. ret = vgic_its_restore_collection_table(its);
  1993. if (ret)
  1994. return ret;
  1995. return vgic_its_restore_device_tables(its);
  1996. }
  1997. static int vgic_its_commit_v0(struct vgic_its *its)
  1998. {
  1999. const struct vgic_its_abi *abi;
  2000. abi = vgic_its_get_abi(its);
  2001. its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
  2002. its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
  2003. its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
  2004. << GITS_BASER_ENTRY_SIZE_SHIFT);
  2005. its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
  2006. << GITS_BASER_ENTRY_SIZE_SHIFT);
  2007. return 0;
  2008. }
  2009. static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
  2010. {
  2011. /* We need to keep the ABI specific field values */
  2012. its->baser_coll_table &= ~GITS_BASER_VALID;
  2013. its->baser_device_table &= ~GITS_BASER_VALID;
  2014. its->cbaser = 0;
  2015. its->creadr = 0;
  2016. its->cwriter = 0;
  2017. its->enabled = 0;
  2018. vgic_its_free_device_list(kvm, its);
  2019. vgic_its_free_collection_list(kvm, its);
  2020. }
  2021. static int vgic_its_has_attr(struct kvm_device *dev,
  2022. struct kvm_device_attr *attr)
  2023. {
  2024. switch (attr->group) {
  2025. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  2026. switch (attr->attr) {
  2027. case KVM_VGIC_ITS_ADDR_TYPE:
  2028. return 0;
  2029. }
  2030. break;
  2031. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  2032. switch (attr->attr) {
  2033. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  2034. return 0;
  2035. case KVM_DEV_ARM_ITS_CTRL_RESET:
  2036. return 0;
  2037. case KVM_DEV_ARM_ITS_SAVE_TABLES:
  2038. return 0;
  2039. case KVM_DEV_ARM_ITS_RESTORE_TABLES:
  2040. return 0;
  2041. }
  2042. break;
  2043. case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
  2044. return vgic_its_has_attr_regs(dev, attr);
  2045. }
  2046. return -ENXIO;
  2047. }
  2048. static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
  2049. {
  2050. const struct vgic_its_abi *abi = vgic_its_get_abi(its);
  2051. int ret = 0;
  2052. if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
  2053. return 0;
  2054. mutex_lock(&kvm->lock);
  2055. mutex_lock(&its->its_lock);
  2056. if (!lock_all_vcpus(kvm)) {
  2057. mutex_unlock(&its->its_lock);
  2058. mutex_unlock(&kvm->lock);
  2059. return -EBUSY;
  2060. }
  2061. switch (attr) {
  2062. case KVM_DEV_ARM_ITS_CTRL_RESET:
  2063. vgic_its_reset(kvm, its);
  2064. break;
  2065. case KVM_DEV_ARM_ITS_SAVE_TABLES:
  2066. ret = abi->save_tables(its);
  2067. break;
  2068. case KVM_DEV_ARM_ITS_RESTORE_TABLES:
  2069. ret = abi->restore_tables(its);
  2070. break;
  2071. }
  2072. unlock_all_vcpus(kvm);
  2073. mutex_unlock(&its->its_lock);
  2074. mutex_unlock(&kvm->lock);
  2075. return ret;
  2076. }
  2077. static int vgic_its_set_attr(struct kvm_device *dev,
  2078. struct kvm_device_attr *attr)
  2079. {
  2080. struct vgic_its *its = dev->private;
  2081. int ret;
  2082. switch (attr->group) {
  2083. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  2084. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  2085. unsigned long type = (unsigned long)attr->attr;
  2086. u64 addr;
  2087. if (type != KVM_VGIC_ITS_ADDR_TYPE)
  2088. return -ENODEV;
  2089. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  2090. return -EFAULT;
  2091. ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
  2092. addr, SZ_64K);
  2093. if (ret)
  2094. return ret;
  2095. return vgic_register_its_iodev(dev->kvm, its, addr);
  2096. }
  2097. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  2098. return vgic_its_ctrl(dev->kvm, its, attr->attr);
  2099. case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
  2100. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  2101. u64 reg;
  2102. if (get_user(reg, uaddr))
  2103. return -EFAULT;
  2104. return vgic_its_attr_regs_access(dev, attr, &reg, true);
  2105. }
  2106. }
  2107. return -ENXIO;
  2108. }
  2109. static int vgic_its_get_attr(struct kvm_device *dev,
  2110. struct kvm_device_attr *attr)
  2111. {
  2112. switch (attr->group) {
  2113. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  2114. struct vgic_its *its = dev->private;
  2115. u64 addr = its->vgic_its_base;
  2116. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  2117. unsigned long type = (unsigned long)attr->attr;
  2118. if (type != KVM_VGIC_ITS_ADDR_TYPE)
  2119. return -ENODEV;
  2120. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  2121. return -EFAULT;
  2122. break;
  2123. }
  2124. case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
  2125. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  2126. u64 reg;
  2127. int ret;
  2128. ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
  2129. if (ret)
  2130. return ret;
  2131. return put_user(reg, uaddr);
  2132. }
  2133. default:
  2134. return -ENXIO;
  2135. }
  2136. return 0;
  2137. }
  2138. static struct kvm_device_ops kvm_arm_vgic_its_ops = {
  2139. .name = "kvm-arm-vgic-its",
  2140. .create = vgic_its_create,
  2141. .destroy = vgic_its_destroy,
  2142. .set_attr = vgic_its_set_attr,
  2143. .get_attr = vgic_its_get_attr,
  2144. .has_attr = vgic_its_has_attr,
  2145. };
  2146. int kvm_vgic_register_its_device(void)
  2147. {
  2148. return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
  2149. KVM_DEV_TYPE_ARM_VGIC_ITS);
  2150. }