vgic-mmio.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * VGIC MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/bsearch.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_host.h>
  17. #include <kvm/iodev.h>
  18. #include <kvm/arm_arch_timer.h>
  19. #include <kvm/arm_vgic.h>
  20. #include "vgic.h"
  21. #include "vgic-mmio.h"
  22. unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  23. gpa_t addr, unsigned int len)
  24. {
  25. return 0;
  26. }
  27. unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  28. gpa_t addr, unsigned int len)
  29. {
  30. return -1UL;
  31. }
  32. void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  33. unsigned int len, unsigned long val)
  34. {
  35. /* Ignore */
  36. }
  37. int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  38. unsigned int len, unsigned long val)
  39. {
  40. /* Ignore */
  41. return 0;
  42. }
  43. unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
  44. gpa_t addr, unsigned int len)
  45. {
  46. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  47. u32 value = 0;
  48. int i;
  49. /* Loop over all IRQs affected by this read */
  50. for (i = 0; i < len * 8; i++) {
  51. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  52. if (irq->group)
  53. value |= BIT(i);
  54. vgic_put_irq(vcpu->kvm, irq);
  55. }
  56. return value;
  57. }
  58. void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
  59. unsigned int len, unsigned long val)
  60. {
  61. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  62. int i;
  63. unsigned long flags;
  64. for (i = 0; i < len * 8; i++) {
  65. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  66. spin_lock_irqsave(&irq->irq_lock, flags);
  67. irq->group = !!(val & BIT(i));
  68. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  69. vgic_put_irq(vcpu->kvm, irq);
  70. }
  71. }
  72. /*
  73. * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  74. * of the enabled bit, so there is only one function for both here.
  75. */
  76. unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  77. gpa_t addr, unsigned int len)
  78. {
  79. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  80. u32 value = 0;
  81. int i;
  82. /* Loop over all IRQs affected by this read */
  83. for (i = 0; i < len * 8; i++) {
  84. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  85. if (irq->enabled)
  86. value |= (1U << i);
  87. vgic_put_irq(vcpu->kvm, irq);
  88. }
  89. return value;
  90. }
  91. void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
  92. gpa_t addr, unsigned int len,
  93. unsigned long val)
  94. {
  95. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  96. int i;
  97. unsigned long flags;
  98. for_each_set_bit(i, &val, len * 8) {
  99. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  100. spin_lock_irqsave(&irq->irq_lock, flags);
  101. irq->enabled = true;
  102. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  103. vgic_put_irq(vcpu->kvm, irq);
  104. }
  105. }
  106. void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
  107. gpa_t addr, unsigned int len,
  108. unsigned long val)
  109. {
  110. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  111. int i;
  112. unsigned long flags;
  113. for_each_set_bit(i, &val, len * 8) {
  114. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  115. spin_lock_irqsave(&irq->irq_lock, flags);
  116. irq->enabled = false;
  117. spin_unlock_irqrestore(&irq->irq_lock, flags);
  118. vgic_put_irq(vcpu->kvm, irq);
  119. }
  120. }
  121. unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
  122. gpa_t addr, unsigned int len)
  123. {
  124. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  125. u32 value = 0;
  126. int i;
  127. /* Loop over all IRQs affected by this read */
  128. for (i = 0; i < len * 8; i++) {
  129. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  130. unsigned long flags;
  131. spin_lock_irqsave(&irq->irq_lock, flags);
  132. if (irq_is_pending(irq))
  133. value |= (1U << i);
  134. spin_unlock_irqrestore(&irq->irq_lock, flags);
  135. vgic_put_irq(vcpu->kvm, irq);
  136. }
  137. return value;
  138. }
  139. /*
  140. * This function will return the VCPU that performed the MMIO access and
  141. * trapped from within the VM, and will return NULL if this is a userspace
  142. * access.
  143. *
  144. * We can disable preemption locally around accessing the per-CPU variable,
  145. * and use the resolved vcpu pointer after enabling preemption again, because
  146. * even if the current thread is migrated to another CPU, reading the per-CPU
  147. * value later will give us the same value as we update the per-CPU variable
  148. * in the preempt notifier handlers.
  149. */
  150. static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
  151. {
  152. struct kvm_vcpu *vcpu;
  153. preempt_disable();
  154. vcpu = kvm_arm_get_running_vcpu();
  155. preempt_enable();
  156. return vcpu;
  157. }
  158. /* Must be called with irq->irq_lock held */
  159. static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  160. bool is_uaccess)
  161. {
  162. if (is_uaccess)
  163. return;
  164. irq->pending_latch = true;
  165. vgic_irq_set_phys_active(irq, true);
  166. }
  167. static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
  168. {
  169. return (vgic_irq_is_sgi(irq->intid) &&
  170. vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
  171. }
  172. void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
  173. gpa_t addr, unsigned int len,
  174. unsigned long val)
  175. {
  176. bool is_uaccess = !vgic_get_mmio_requester_vcpu();
  177. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  178. int i;
  179. unsigned long flags;
  180. for_each_set_bit(i, &val, len * 8) {
  181. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  182. /* GICD_ISPENDR0 SGI bits are WI */
  183. if (is_vgic_v2_sgi(vcpu, irq)) {
  184. vgic_put_irq(vcpu->kvm, irq);
  185. continue;
  186. }
  187. spin_lock_irqsave(&irq->irq_lock, flags);
  188. if (irq->hw)
  189. vgic_hw_irq_spending(vcpu, irq, is_uaccess);
  190. else
  191. irq->pending_latch = true;
  192. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  193. vgic_put_irq(vcpu->kvm, irq);
  194. }
  195. }
  196. /* Must be called with irq->irq_lock held */
  197. static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  198. bool is_uaccess)
  199. {
  200. if (is_uaccess)
  201. return;
  202. irq->pending_latch = false;
  203. /*
  204. * We don't want the guest to effectively mask the physical
  205. * interrupt by doing a write to SPENDR followed by a write to
  206. * CPENDR for HW interrupts, so we clear the active state on
  207. * the physical side if the virtual interrupt is not active.
  208. * This may lead to taking an additional interrupt on the
  209. * host, but that should not be a problem as the worst that
  210. * can happen is an additional vgic injection. We also clear
  211. * the pending state to maintain proper semantics for edge HW
  212. * interrupts.
  213. */
  214. vgic_irq_set_phys_pending(irq, false);
  215. if (!irq->active)
  216. vgic_irq_set_phys_active(irq, false);
  217. }
  218. void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
  219. gpa_t addr, unsigned int len,
  220. unsigned long val)
  221. {
  222. bool is_uaccess = !vgic_get_mmio_requester_vcpu();
  223. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  224. int i;
  225. unsigned long flags;
  226. for_each_set_bit(i, &val, len * 8) {
  227. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  228. /* GICD_ICPENDR0 SGI bits are WI */
  229. if (is_vgic_v2_sgi(vcpu, irq)) {
  230. vgic_put_irq(vcpu->kvm, irq);
  231. continue;
  232. }
  233. spin_lock_irqsave(&irq->irq_lock, flags);
  234. if (irq->hw)
  235. vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
  236. else
  237. irq->pending_latch = false;
  238. spin_unlock_irqrestore(&irq->irq_lock, flags);
  239. vgic_put_irq(vcpu->kvm, irq);
  240. }
  241. }
  242. unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
  243. gpa_t addr, unsigned int len)
  244. {
  245. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  246. u32 value = 0;
  247. int i;
  248. /* Loop over all IRQs affected by this read */
  249. for (i = 0; i < len * 8; i++) {
  250. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  251. if (irq->active)
  252. value |= (1U << i);
  253. vgic_put_irq(vcpu->kvm, irq);
  254. }
  255. return value;
  256. }
  257. /* Must be called with irq->irq_lock held */
  258. static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  259. bool active, bool is_uaccess)
  260. {
  261. if (is_uaccess)
  262. return;
  263. irq->active = active;
  264. vgic_irq_set_phys_active(irq, active);
  265. }
  266. static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  267. bool active)
  268. {
  269. unsigned long flags;
  270. struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
  271. spin_lock_irqsave(&irq->irq_lock, flags);
  272. if (irq->hw) {
  273. vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
  274. } else {
  275. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  276. u8 active_source;
  277. irq->active = active;
  278. /*
  279. * The GICv2 architecture indicates that the source CPUID for
  280. * an SGI should be provided during an EOI which implies that
  281. * the active state is stored somewhere, but at the same time
  282. * this state is not architecturally exposed anywhere and we
  283. * have no way of knowing the right source.
  284. *
  285. * This may lead to a VCPU not being able to receive
  286. * additional instances of a particular SGI after migration
  287. * for a GICv2 VM on some GIC implementations. Oh well.
  288. */
  289. active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
  290. if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
  291. active && vgic_irq_is_sgi(irq->intid))
  292. irq->active_source = active_source;
  293. }
  294. if (irq->active)
  295. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  296. else
  297. spin_unlock_irqrestore(&irq->irq_lock, flags);
  298. }
  299. /*
  300. * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
  301. * is not queued on some running VCPU's LRs, because then the change to the
  302. * active state can be overwritten when the VCPU's state is synced coming back
  303. * from the guest.
  304. *
  305. * For shared interrupts, we have to stop all the VCPUs because interrupts can
  306. * be migrated while we don't hold the IRQ locks and we don't want to be
  307. * chasing moving targets.
  308. *
  309. * For private interrupts we don't have to do anything because userspace
  310. * accesses to the VGIC state already require all VCPUs to be stopped, and
  311. * only the VCPU itself can modify its private interrupts active state, which
  312. * guarantees that the VCPU is not running.
  313. */
  314. static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
  315. {
  316. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  317. intid > VGIC_NR_PRIVATE_IRQS)
  318. kvm_arm_halt_guest(vcpu->kvm);
  319. }
  320. /* See vgic_change_active_prepare */
  321. static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
  322. {
  323. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  324. intid > VGIC_NR_PRIVATE_IRQS)
  325. kvm_arm_resume_guest(vcpu->kvm);
  326. }
  327. static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  328. gpa_t addr, unsigned int len,
  329. unsigned long val)
  330. {
  331. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  332. int i;
  333. for_each_set_bit(i, &val, len * 8) {
  334. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  335. vgic_mmio_change_active(vcpu, irq, false);
  336. vgic_put_irq(vcpu->kvm, irq);
  337. }
  338. }
  339. void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  340. gpa_t addr, unsigned int len,
  341. unsigned long val)
  342. {
  343. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  344. mutex_lock(&vcpu->kvm->lock);
  345. vgic_change_active_prepare(vcpu, intid);
  346. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  347. vgic_change_active_finish(vcpu, intid);
  348. mutex_unlock(&vcpu->kvm->lock);
  349. }
  350. int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
  351. gpa_t addr, unsigned int len,
  352. unsigned long val)
  353. {
  354. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  355. return 0;
  356. }
  357. static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  358. gpa_t addr, unsigned int len,
  359. unsigned long val)
  360. {
  361. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  362. int i;
  363. for_each_set_bit(i, &val, len * 8) {
  364. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  365. vgic_mmio_change_active(vcpu, irq, true);
  366. vgic_put_irq(vcpu->kvm, irq);
  367. }
  368. }
  369. void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  370. gpa_t addr, unsigned int len,
  371. unsigned long val)
  372. {
  373. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  374. mutex_lock(&vcpu->kvm->lock);
  375. vgic_change_active_prepare(vcpu, intid);
  376. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  377. vgic_change_active_finish(vcpu, intid);
  378. mutex_unlock(&vcpu->kvm->lock);
  379. }
  380. int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
  381. gpa_t addr, unsigned int len,
  382. unsigned long val)
  383. {
  384. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  385. return 0;
  386. }
  387. unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
  388. gpa_t addr, unsigned int len)
  389. {
  390. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  391. int i;
  392. u64 val = 0;
  393. for (i = 0; i < len; i++) {
  394. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  395. val |= (u64)irq->priority << (i * 8);
  396. vgic_put_irq(vcpu->kvm, irq);
  397. }
  398. return val;
  399. }
  400. /*
  401. * We currently don't handle changing the priority of an interrupt that
  402. * is already pending on a VCPU. If there is a need for this, we would
  403. * need to make this VCPU exit and re-evaluate the priorities, potentially
  404. * leading to this interrupt getting presented now to the guest (if it has
  405. * been masked by the priority mask before).
  406. */
  407. void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
  408. gpa_t addr, unsigned int len,
  409. unsigned long val)
  410. {
  411. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  412. int i;
  413. unsigned long flags;
  414. for (i = 0; i < len; i++) {
  415. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  416. spin_lock_irqsave(&irq->irq_lock, flags);
  417. /* Narrow the priority range to what we actually support */
  418. irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
  419. spin_unlock_irqrestore(&irq->irq_lock, flags);
  420. vgic_put_irq(vcpu->kvm, irq);
  421. }
  422. }
  423. unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
  424. gpa_t addr, unsigned int len)
  425. {
  426. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  427. u32 value = 0;
  428. int i;
  429. for (i = 0; i < len * 4; i++) {
  430. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  431. if (irq->config == VGIC_CONFIG_EDGE)
  432. value |= (2U << (i * 2));
  433. vgic_put_irq(vcpu->kvm, irq);
  434. }
  435. return value;
  436. }
  437. void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
  438. gpa_t addr, unsigned int len,
  439. unsigned long val)
  440. {
  441. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  442. int i;
  443. unsigned long flags;
  444. for (i = 0; i < len * 4; i++) {
  445. struct vgic_irq *irq;
  446. /*
  447. * The configuration cannot be changed for SGIs in general,
  448. * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
  449. * code relies on PPIs being level triggered, so we also
  450. * make them read-only here.
  451. */
  452. if (intid + i < VGIC_NR_PRIVATE_IRQS)
  453. continue;
  454. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  455. spin_lock_irqsave(&irq->irq_lock, flags);
  456. if (test_bit(i * 2 + 1, &val))
  457. irq->config = VGIC_CONFIG_EDGE;
  458. else
  459. irq->config = VGIC_CONFIG_LEVEL;
  460. spin_unlock_irqrestore(&irq->irq_lock, flags);
  461. vgic_put_irq(vcpu->kvm, irq);
  462. }
  463. }
  464. u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
  465. {
  466. int i;
  467. u64 val = 0;
  468. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  469. for (i = 0; i < 32; i++) {
  470. struct vgic_irq *irq;
  471. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  472. continue;
  473. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  474. if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
  475. val |= (1U << i);
  476. vgic_put_irq(vcpu->kvm, irq);
  477. }
  478. return val;
  479. }
  480. void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
  481. const u64 val)
  482. {
  483. int i;
  484. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  485. unsigned long flags;
  486. for (i = 0; i < 32; i++) {
  487. struct vgic_irq *irq;
  488. bool new_level;
  489. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  490. continue;
  491. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  492. /*
  493. * Line level is set irrespective of irq type
  494. * (level or edge) to avoid dependency that VM should
  495. * restore irq config before line level.
  496. */
  497. new_level = !!(val & (1U << i));
  498. spin_lock_irqsave(&irq->irq_lock, flags);
  499. irq->line_level = new_level;
  500. if (new_level)
  501. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  502. else
  503. spin_unlock_irqrestore(&irq->irq_lock, flags);
  504. vgic_put_irq(vcpu->kvm, irq);
  505. }
  506. }
  507. static int match_region(const void *key, const void *elt)
  508. {
  509. const unsigned int offset = (unsigned long)key;
  510. const struct vgic_register_region *region = elt;
  511. if (offset < region->reg_offset)
  512. return -1;
  513. if (offset >= region->reg_offset + region->len)
  514. return 1;
  515. return 0;
  516. }
  517. const struct vgic_register_region *
  518. vgic_find_mmio_region(const struct vgic_register_region *regions,
  519. int nr_regions, unsigned int offset)
  520. {
  521. return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
  522. sizeof(regions[0]), match_region);
  523. }
  524. void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  525. {
  526. if (kvm_vgic_global_state.type == VGIC_V2)
  527. vgic_v2_set_vmcr(vcpu, vmcr);
  528. else
  529. vgic_v3_set_vmcr(vcpu, vmcr);
  530. }
  531. void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  532. {
  533. if (kvm_vgic_global_state.type == VGIC_V2)
  534. vgic_v2_get_vmcr(vcpu, vmcr);
  535. else
  536. vgic_v3_get_vmcr(vcpu, vmcr);
  537. }
  538. /*
  539. * kvm_mmio_read_buf() returns a value in a format where it can be converted
  540. * to a byte array and be directly observed as the guest wanted it to appear
  541. * in memory if it had done the store itself, which is LE for the GIC, as the
  542. * guest knows the GIC is always LE.
  543. *
  544. * We convert this value to the CPUs native format to deal with it as a data
  545. * value.
  546. */
  547. unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
  548. {
  549. unsigned long data = kvm_mmio_read_buf(val, len);
  550. switch (len) {
  551. case 1:
  552. return data;
  553. case 2:
  554. return le16_to_cpu(data);
  555. case 4:
  556. return le32_to_cpu(data);
  557. default:
  558. return le64_to_cpu(data);
  559. }
  560. }
  561. /*
  562. * kvm_mmio_write_buf() expects a value in a format such that if converted to
  563. * a byte array it is observed as the guest would see it if it could perform
  564. * the load directly. Since the GIC is LE, and the guest knows this, the
  565. * guest expects a value in little endian format.
  566. *
  567. * We convert the data value from the CPUs native format to LE so that the
  568. * value is returned in the proper format.
  569. */
  570. void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
  571. unsigned long data)
  572. {
  573. switch (len) {
  574. case 1:
  575. break;
  576. case 2:
  577. data = cpu_to_le16(data);
  578. break;
  579. case 4:
  580. data = cpu_to_le32(data);
  581. break;
  582. default:
  583. data = cpu_to_le64(data);
  584. }
  585. kvm_mmio_write_buf(buf, len, data);
  586. }
  587. static
  588. struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
  589. {
  590. return container_of(dev, struct vgic_io_device, dev);
  591. }
  592. static bool check_region(const struct kvm *kvm,
  593. const struct vgic_register_region *region,
  594. gpa_t addr, int len)
  595. {
  596. int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  597. switch (len) {
  598. case sizeof(u8):
  599. flags = VGIC_ACCESS_8bit;
  600. break;
  601. case sizeof(u32):
  602. flags = VGIC_ACCESS_32bit;
  603. break;
  604. case sizeof(u64):
  605. flags = VGIC_ACCESS_64bit;
  606. break;
  607. default:
  608. return false;
  609. }
  610. if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
  611. if (!region->bits_per_irq)
  612. return true;
  613. /* Do we access a non-allocated IRQ? */
  614. return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
  615. }
  616. return false;
  617. }
  618. const struct vgic_register_region *
  619. vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  620. gpa_t addr, int len)
  621. {
  622. const struct vgic_register_region *region;
  623. region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
  624. addr - iodev->base_addr);
  625. if (!region || !check_region(vcpu->kvm, region, addr, len))
  626. return NULL;
  627. return region;
  628. }
  629. static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  630. gpa_t addr, u32 *val)
  631. {
  632. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  633. const struct vgic_register_region *region;
  634. struct kvm_vcpu *r_vcpu;
  635. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  636. if (!region) {
  637. *val = 0;
  638. return 0;
  639. }
  640. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  641. if (region->uaccess_read)
  642. *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
  643. else
  644. *val = region->read(r_vcpu, addr, sizeof(u32));
  645. return 0;
  646. }
  647. static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  648. gpa_t addr, const u32 *val)
  649. {
  650. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  651. const struct vgic_register_region *region;
  652. struct kvm_vcpu *r_vcpu;
  653. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  654. if (!region)
  655. return 0;
  656. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  657. if (region->uaccess_write)
  658. return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
  659. region->write(r_vcpu, addr, sizeof(u32), *val);
  660. return 0;
  661. }
  662. /*
  663. * Userland access to VGIC registers.
  664. */
  665. int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
  666. bool is_write, int offset, u32 *val)
  667. {
  668. if (is_write)
  669. return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
  670. else
  671. return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
  672. }
  673. static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  674. gpa_t addr, int len, void *val)
  675. {
  676. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  677. const struct vgic_register_region *region;
  678. unsigned long data = 0;
  679. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  680. if (!region) {
  681. memset(val, 0, len);
  682. return 0;
  683. }
  684. switch (iodev->iodev_type) {
  685. case IODEV_CPUIF:
  686. data = region->read(vcpu, addr, len);
  687. break;
  688. case IODEV_DIST:
  689. data = region->read(vcpu, addr, len);
  690. break;
  691. case IODEV_REDIST:
  692. data = region->read(iodev->redist_vcpu, addr, len);
  693. break;
  694. case IODEV_ITS:
  695. data = region->its_read(vcpu->kvm, iodev->its, addr, len);
  696. break;
  697. }
  698. vgic_data_host_to_mmio_bus(val, len, data);
  699. return 0;
  700. }
  701. static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  702. gpa_t addr, int len, const void *val)
  703. {
  704. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  705. const struct vgic_register_region *region;
  706. unsigned long data = vgic_data_mmio_bus_to_host(val, len);
  707. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  708. if (!region)
  709. return 0;
  710. switch (iodev->iodev_type) {
  711. case IODEV_CPUIF:
  712. region->write(vcpu, addr, len, data);
  713. break;
  714. case IODEV_DIST:
  715. region->write(vcpu, addr, len, data);
  716. break;
  717. case IODEV_REDIST:
  718. region->write(iodev->redist_vcpu, addr, len, data);
  719. break;
  720. case IODEV_ITS:
  721. region->its_write(vcpu->kvm, iodev->its, addr, len, data);
  722. break;
  723. }
  724. return 0;
  725. }
  726. struct kvm_io_device_ops kvm_io_gic_ops = {
  727. .read = dispatch_mmio_read,
  728. .write = dispatch_mmio_write,
  729. };
  730. int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
  731. enum vgic_type type)
  732. {
  733. struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
  734. int ret = 0;
  735. unsigned int len;
  736. switch (type) {
  737. case VGIC_V2:
  738. len = vgic_v2_init_dist_iodev(io_device);
  739. break;
  740. case VGIC_V3:
  741. len = vgic_v3_init_dist_iodev(io_device);
  742. break;
  743. default:
  744. BUG_ON(1);
  745. }
  746. io_device->base_addr = dist_base_address;
  747. io_device->iodev_type = IODEV_DIST;
  748. io_device->redist_vcpu = NULL;
  749. mutex_lock(&kvm->slots_lock);
  750. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
  751. len, &io_device->dev);
  752. mutex_unlock(&kvm->slots_lock);
  753. return ret;
  754. }