vgic-v4.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * Copyright (C) 2017 ARM Ltd.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/interrupt.h>
  18. #include <linux/irq.h>
  19. #include <linux/irqdomain.h>
  20. #include <linux/kvm_host.h>
  21. #include <linux/irqchip/arm-gic-v3.h>
  22. #include "vgic.h"
  23. /*
  24. * How KVM uses GICv4 (insert rude comments here):
  25. *
  26. * The vgic-v4 layer acts as a bridge between several entities:
  27. * - The GICv4 ITS representation offered by the ITS driver
  28. * - VFIO, which is in charge of the PCI endpoint
  29. * - The virtual ITS, which is the only thing the guest sees
  30. *
  31. * The configuration of VLPIs is triggered by a callback from VFIO,
  32. * instructing KVM that a PCI device has been configured to deliver
  33. * MSIs to a vITS.
  34. *
  35. * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
  36. * and this is used to find the corresponding vITS data structures
  37. * (ITS instance, device, event and irq) using a process that is
  38. * extremely similar to the injection of an MSI.
  39. *
  40. * At this stage, we can link the guest's view of an LPI (uniquely
  41. * identified by the routing entry) and the host irq, using the GICv4
  42. * driver mapping operation. Should the mapping succeed, we've then
  43. * successfully upgraded the guest's LPI to a VLPI. We can then start
  44. * with updating GICv4's view of the property table and generating an
  45. * INValidation in order to kickstart the delivery of this VLPI to the
  46. * guest directly, without software intervention. Well, almost.
  47. *
  48. * When the PCI endpoint is deconfigured, this operation is reversed
  49. * with VFIO calling kvm_vgic_v4_unset_forwarding().
  50. *
  51. * Once the VLPI has been mapped, it needs to follow any change the
  52. * guest performs on its LPI through the vITS. For that, a number of
  53. * command handlers have hooks to communicate these changes to the HW:
  54. * - Any invalidation triggers a call to its_prop_update_vlpi()
  55. * - The INT command results in a irq_set_irqchip_state(), which
  56. * generates an INT on the corresponding VLPI.
  57. * - The CLEAR command results in a irq_set_irqchip_state(), which
  58. * generates an CLEAR on the corresponding VLPI.
  59. * - DISCARD translates into an unmap, similar to a call to
  60. * kvm_vgic_v4_unset_forwarding().
  61. * - MOVI is translated by an update of the existing mapping, changing
  62. * the target vcpu, resulting in a VMOVI being generated.
  63. * - MOVALL is translated by a string of mapping updates (similar to
  64. * the handling of MOVI). MOVALL is horrible.
  65. *
  66. * Note that a DISCARD/MAPTI sequence emitted from the guest without
  67. * reprogramming the PCI endpoint after MAPTI does not result in a
  68. * VLPI being mapped, as there is no callback from VFIO (the guest
  69. * will get the interrupt via the normal SW injection). Fixing this is
  70. * not trivial, and requires some horrible messing with the VFIO
  71. * internals. Not fun. Don't do that.
  72. *
  73. * Then there is the scheduling. Each time a vcpu is about to run on a
  74. * physical CPU, KVM must tell the corresponding redistributor about
  75. * it. And if we've migrated our vcpu from one CPU to another, we must
  76. * tell the ITS (so that the messages reach the right redistributor).
  77. * This is done in two steps: first issue a irq_set_affinity() on the
  78. * irq corresponding to the vcpu, then call its_schedule_vpe(). You
  79. * must be in a non-preemptible context. On exit, another call to
  80. * its_schedule_vpe() tells the redistributor that we're done with the
  81. * vcpu.
  82. *
  83. * Finally, the doorbell handling: Each vcpu is allocated an interrupt
  84. * which will fire each time a VLPI is made pending whilst the vcpu is
  85. * not running. Each time the vcpu gets blocked, the doorbell
  86. * interrupt gets enabled. When the vcpu is unblocked (for whatever
  87. * reason), the doorbell interrupt is disabled.
  88. */
  89. #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
  90. static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
  91. {
  92. struct kvm_vcpu *vcpu = info;
  93. vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
  94. kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
  95. kvm_vcpu_kick(vcpu);
  96. return IRQ_HANDLED;
  97. }
  98. /**
  99. * vgic_v4_init - Initialize the GICv4 data structures
  100. * @kvm: Pointer to the VM being initialized
  101. *
  102. * We may be called each time a vITS is created, or when the
  103. * vgic is initialized. This relies on kvm->lock to be
  104. * held. In both cases, the number of vcpus should now be
  105. * fixed.
  106. */
  107. int vgic_v4_init(struct kvm *kvm)
  108. {
  109. struct vgic_dist *dist = &kvm->arch.vgic;
  110. struct kvm_vcpu *vcpu;
  111. int i, nr_vcpus, ret;
  112. if (!kvm_vgic_global_state.has_gicv4)
  113. return 0; /* Nothing to see here... move along. */
  114. if (dist->its_vm.vpes)
  115. return 0;
  116. nr_vcpus = atomic_read(&kvm->online_vcpus);
  117. dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
  118. GFP_KERNEL);
  119. if (!dist->its_vm.vpes)
  120. return -ENOMEM;
  121. dist->its_vm.nr_vpes = nr_vcpus;
  122. kvm_for_each_vcpu(i, vcpu, kvm)
  123. dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
  124. ret = its_alloc_vcpu_irqs(&dist->its_vm);
  125. if (ret < 0) {
  126. kvm_err("VPE IRQ allocation failure\n");
  127. kfree(dist->its_vm.vpes);
  128. dist->its_vm.nr_vpes = 0;
  129. dist->its_vm.vpes = NULL;
  130. return ret;
  131. }
  132. kvm_for_each_vcpu(i, vcpu, kvm) {
  133. int irq = dist->its_vm.vpes[i]->irq;
  134. /*
  135. * Don't automatically enable the doorbell, as we're
  136. * flipping it back and forth when the vcpu gets
  137. * blocked. Also disable the lazy disabling, as the
  138. * doorbell could kick us out of the guest too
  139. * early...
  140. */
  141. irq_set_status_flags(irq, DB_IRQ_FLAGS);
  142. ret = request_irq(irq, vgic_v4_doorbell_handler,
  143. 0, "vcpu", vcpu);
  144. if (ret) {
  145. kvm_err("failed to allocate vcpu IRQ%d\n", irq);
  146. /*
  147. * Trick: adjust the number of vpes so we know
  148. * how many to nuke on teardown...
  149. */
  150. dist->its_vm.nr_vpes = i;
  151. break;
  152. }
  153. }
  154. if (ret)
  155. vgic_v4_teardown(kvm);
  156. return ret;
  157. }
  158. /**
  159. * vgic_v4_teardown - Free the GICv4 data structures
  160. * @kvm: Pointer to the VM being destroyed
  161. *
  162. * Relies on kvm->lock to be held.
  163. */
  164. void vgic_v4_teardown(struct kvm *kvm)
  165. {
  166. struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
  167. int i;
  168. if (!its_vm->vpes)
  169. return;
  170. for (i = 0; i < its_vm->nr_vpes; i++) {
  171. struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
  172. int irq = its_vm->vpes[i]->irq;
  173. irq_clear_status_flags(irq, DB_IRQ_FLAGS);
  174. free_irq(irq, vcpu);
  175. }
  176. its_free_vcpu_irqs(its_vm);
  177. kfree(its_vm->vpes);
  178. its_vm->nr_vpes = 0;
  179. its_vm->vpes = NULL;
  180. }
  181. int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
  182. {
  183. if (!vgic_supports_direct_msis(vcpu->kvm))
  184. return 0;
  185. return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
  186. }
  187. int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
  188. {
  189. int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
  190. int err;
  191. if (!vgic_supports_direct_msis(vcpu->kvm))
  192. return 0;
  193. /*
  194. * Before making the VPE resident, make sure the redistributor
  195. * corresponding to our current CPU expects us here. See the
  196. * doc in drivers/irqchip/irq-gic-v4.c to understand how this
  197. * turns into a VMOVP command at the ITS level.
  198. */
  199. err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
  200. if (err)
  201. return err;
  202. err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
  203. if (err)
  204. return err;
  205. /*
  206. * Now that the VPE is resident, let's get rid of a potential
  207. * doorbell interrupt that would still be pending.
  208. */
  209. err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
  210. return err;
  211. }
  212. static struct vgic_its *vgic_get_its(struct kvm *kvm,
  213. struct kvm_kernel_irq_routing_entry *irq_entry)
  214. {
  215. struct kvm_msi msi = (struct kvm_msi) {
  216. .address_lo = irq_entry->msi.address_lo,
  217. .address_hi = irq_entry->msi.address_hi,
  218. .data = irq_entry->msi.data,
  219. .flags = irq_entry->msi.flags,
  220. .devid = irq_entry->msi.devid,
  221. };
  222. return vgic_msi_to_its(kvm, &msi);
  223. }
  224. int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
  225. struct kvm_kernel_irq_routing_entry *irq_entry)
  226. {
  227. struct vgic_its *its;
  228. struct vgic_irq *irq;
  229. struct its_vlpi_map map;
  230. int ret;
  231. if (!vgic_supports_direct_msis(kvm))
  232. return 0;
  233. /*
  234. * Get the ITS, and escape early on error (not a valid
  235. * doorbell for any of our vITSs).
  236. */
  237. its = vgic_get_its(kvm, irq_entry);
  238. if (IS_ERR(its))
  239. return 0;
  240. mutex_lock(&its->its_lock);
  241. /* Perform then actual DevID/EventID -> LPI translation. */
  242. ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
  243. irq_entry->msi.data, &irq);
  244. if (ret)
  245. goto out;
  246. /*
  247. * Emit the mapping request. If it fails, the ITS probably
  248. * isn't v4 compatible, so let's silently bail out. Holding
  249. * the ITS lock should ensure that nothing can modify the
  250. * target vcpu.
  251. */
  252. map = (struct its_vlpi_map) {
  253. .vm = &kvm->arch.vgic.its_vm,
  254. .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
  255. .vintid = irq->intid,
  256. .properties = ((irq->priority & 0xfc) |
  257. (irq->enabled ? LPI_PROP_ENABLED : 0) |
  258. LPI_PROP_GROUP1),
  259. .db_enabled = true,
  260. };
  261. ret = its_map_vlpi(virq, &map);
  262. if (ret)
  263. goto out;
  264. irq->hw = true;
  265. irq->host_irq = virq;
  266. out:
  267. mutex_unlock(&its->its_lock);
  268. return ret;
  269. }
  270. int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
  271. struct kvm_kernel_irq_routing_entry *irq_entry)
  272. {
  273. struct vgic_its *its;
  274. struct vgic_irq *irq;
  275. int ret;
  276. if (!vgic_supports_direct_msis(kvm))
  277. return 0;
  278. /*
  279. * Get the ITS, and escape early on error (not a valid
  280. * doorbell for any of our vITSs).
  281. */
  282. its = vgic_get_its(kvm, irq_entry);
  283. if (IS_ERR(its))
  284. return 0;
  285. mutex_lock(&its->its_lock);
  286. ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
  287. irq_entry->msi.data, &irq);
  288. if (ret)
  289. goto out;
  290. WARN_ON(!(irq->hw && irq->host_irq == virq));
  291. if (irq->hw) {
  292. irq->hw = false;
  293. ret = its_unmap_vlpi(virq);
  294. }
  295. out:
  296. mutex_unlock(&its->its_lock);
  297. return ret;
  298. }
  299. void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
  300. {
  301. if (vgic_supports_direct_msis(vcpu->kvm)) {
  302. int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
  303. if (irq)
  304. enable_irq(irq);
  305. }
  306. }
  307. void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
  308. {
  309. if (vgic_supports_direct_msis(vcpu->kvm)) {
  310. int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
  311. if (irq)
  312. disable_irq(irq);
  313. }
  314. }