vgic-v3.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  13. */
  14. #include <linux/irqchip/arm-gic-v3.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_host.h>
  17. #include <kvm/arm_vgic.h>
  18. #include <asm/kvm_hyp.h>
  19. #include <asm/kvm_mmu.h>
  20. #include <asm/kvm_asm.h>
  21. #include "vgic.h"
  22. static bool group0_trap;
  23. static bool group1_trap;
  24. static bool common_trap;
  25. static bool gicv4_enable;
  26. void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
  27. {
  28. struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
  29. cpuif->vgic_hcr |= ICH_HCR_UIE;
  30. }
  31. static bool lr_signals_eoi_mi(u64 lr_val)
  32. {
  33. return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
  34. !(lr_val & ICH_LR_HW);
  35. }
  36. void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
  37. {
  38. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  39. struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
  40. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  41. int lr;
  42. DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
  43. cpuif->vgic_hcr &= ~ICH_HCR_UIE;
  44. for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
  45. u64 val = cpuif->vgic_lr[lr];
  46. u32 intid, cpuid;
  47. struct vgic_irq *irq;
  48. bool is_v2_sgi = false;
  49. cpuid = val & GICH_LR_PHYSID_CPUID;
  50. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  51. if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
  52. intid = val & ICH_LR_VIRTUAL_ID_MASK;
  53. } else {
  54. intid = val & GICH_LR_VIRTUALID;
  55. is_v2_sgi = vgic_irq_is_sgi(intid);
  56. }
  57. /* Notify fds when the guest EOI'ed a level-triggered IRQ */
  58. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  59. kvm_notify_acked_irq(vcpu->kvm, 0,
  60. intid - VGIC_NR_PRIVATE_IRQS);
  61. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  62. if (!irq) /* An LPI could have been unmapped. */
  63. continue;
  64. spin_lock(&irq->irq_lock);
  65. /* Always preserve the active bit */
  66. irq->active = !!(val & ICH_LR_ACTIVE_BIT);
  67. if (irq->active && is_v2_sgi)
  68. irq->active_source = cpuid;
  69. /* Edge is the only case where we preserve the pending bit */
  70. if (irq->config == VGIC_CONFIG_EDGE &&
  71. (val & ICH_LR_PENDING_BIT)) {
  72. irq->pending_latch = true;
  73. if (is_v2_sgi)
  74. irq->source |= (1 << cpuid);
  75. }
  76. /*
  77. * Clear soft pending state when level irqs have been acked.
  78. */
  79. if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
  80. irq->pending_latch = false;
  81. /*
  82. * Level-triggered mapped IRQs are special because we only
  83. * observe rising edges as input to the VGIC.
  84. *
  85. * If the guest never acked the interrupt we have to sample
  86. * the physical line and set the line level, because the
  87. * device state could have changed or we simply need to
  88. * process the still pending interrupt later.
  89. *
  90. * If this causes us to lower the level, we have to also clear
  91. * the physical active state, since we will otherwise never be
  92. * told when the interrupt becomes asserted again.
  93. */
  94. if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
  95. irq->line_level = vgic_get_phys_line_level(irq);
  96. if (!irq->line_level)
  97. vgic_irq_set_phys_active(irq, false);
  98. }
  99. spin_unlock(&irq->irq_lock);
  100. vgic_put_irq(vcpu->kvm, irq);
  101. }
  102. vgic_cpu->used_lrs = 0;
  103. }
  104. /* Requires the irq to be locked already */
  105. void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  106. {
  107. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  108. u64 val = irq->intid;
  109. bool allow_pending = true, is_v2_sgi;
  110. is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
  111. model == KVM_DEV_TYPE_ARM_VGIC_V2);
  112. if (irq->active) {
  113. val |= ICH_LR_ACTIVE_BIT;
  114. if (is_v2_sgi)
  115. val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
  116. if (vgic_irq_is_multi_sgi(irq)) {
  117. allow_pending = false;
  118. val |= ICH_LR_EOI;
  119. }
  120. }
  121. if (irq->hw) {
  122. val |= ICH_LR_HW;
  123. val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
  124. /*
  125. * Never set pending+active on a HW interrupt, as the
  126. * pending state is kept at the physical distributor
  127. * level.
  128. */
  129. if (irq->active)
  130. allow_pending = false;
  131. } else {
  132. if (irq->config == VGIC_CONFIG_LEVEL) {
  133. val |= ICH_LR_EOI;
  134. /*
  135. * Software resampling doesn't work very well
  136. * if we allow P+A, so let's not do that.
  137. */
  138. if (irq->active)
  139. allow_pending = false;
  140. }
  141. }
  142. if (allow_pending && irq_is_pending(irq)) {
  143. val |= ICH_LR_PENDING_BIT;
  144. if (irq->config == VGIC_CONFIG_EDGE)
  145. irq->pending_latch = false;
  146. if (vgic_irq_is_sgi(irq->intid) &&
  147. model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  148. u32 src = ffs(irq->source);
  149. if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
  150. irq->intid))
  151. return;
  152. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  153. irq->source &= ~(1 << (src - 1));
  154. if (irq->source) {
  155. irq->pending_latch = true;
  156. val |= ICH_LR_EOI;
  157. }
  158. }
  159. }
  160. /*
  161. * Level-triggered mapped IRQs are special because we only observe
  162. * rising edges as input to the VGIC. We therefore lower the line
  163. * level here, so that we can take new virtual IRQs. See
  164. * vgic_v3_fold_lr_state for more info.
  165. */
  166. if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
  167. irq->line_level = false;
  168. if (irq->group)
  169. val |= ICH_LR_GROUP;
  170. val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
  171. vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
  172. }
  173. void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
  174. {
  175. vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
  176. }
  177. void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  178. {
  179. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  180. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  181. u32 vmcr;
  182. if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  183. vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
  184. ICH_VMCR_ACK_CTL_MASK;
  185. vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
  186. ICH_VMCR_FIQ_EN_MASK;
  187. } else {
  188. /*
  189. * When emulating GICv3 on GICv3 with SRE=1 on the
  190. * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
  191. */
  192. vmcr = ICH_VMCR_FIQ_EN_MASK;
  193. }
  194. vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
  195. vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
  196. vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
  197. vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
  198. vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
  199. vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
  200. vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
  201. cpu_if->vgic_vmcr = vmcr;
  202. }
  203. void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  204. {
  205. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  206. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  207. u32 vmcr;
  208. vmcr = cpu_if->vgic_vmcr;
  209. if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  210. vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
  211. ICH_VMCR_ACK_CTL_SHIFT;
  212. vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
  213. ICH_VMCR_FIQ_EN_SHIFT;
  214. } else {
  215. /*
  216. * When emulating GICv3 on GICv3 with SRE=1 on the
  217. * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
  218. */
  219. vmcrp->fiqen = 1;
  220. vmcrp->ackctl = 0;
  221. }
  222. vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
  223. vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
  224. vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
  225. vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
  226. vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
  227. vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
  228. vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
  229. }
  230. #define INITIAL_PENDBASER_VALUE \
  231. (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
  232. GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
  233. GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
  234. void vgic_v3_enable(struct kvm_vcpu *vcpu)
  235. {
  236. struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
  237. /*
  238. * By forcing VMCR to zero, the GIC will restore the binary
  239. * points to their reset values. Anything else resets to zero
  240. * anyway.
  241. */
  242. vgic_v3->vgic_vmcr = 0;
  243. /*
  244. * If we are emulating a GICv3, we do it in an non-GICv2-compatible
  245. * way, so we force SRE to 1 to demonstrate this to the guest.
  246. * Also, we don't support any form of IRQ/FIQ bypass.
  247. * This goes with the spec allowing the value to be RAO/WI.
  248. */
  249. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
  250. vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
  251. ICC_SRE_EL1_DFB |
  252. ICC_SRE_EL1_SRE);
  253. vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
  254. } else {
  255. vgic_v3->vgic_sre = 0;
  256. }
  257. vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
  258. ICH_VTR_ID_BITS_MASK) >>
  259. ICH_VTR_ID_BITS_SHIFT;
  260. vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
  261. ICH_VTR_PRI_BITS_MASK) >>
  262. ICH_VTR_PRI_BITS_SHIFT) + 1;
  263. /* Get the show on the road... */
  264. vgic_v3->vgic_hcr = ICH_HCR_EN;
  265. if (group0_trap)
  266. vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
  267. if (group1_trap)
  268. vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
  269. if (common_trap)
  270. vgic_v3->vgic_hcr |= ICH_HCR_TC;
  271. }
  272. int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
  273. {
  274. struct kvm_vcpu *vcpu;
  275. int byte_offset, bit_nr;
  276. gpa_t pendbase, ptr;
  277. bool status;
  278. u8 val;
  279. int ret;
  280. unsigned long flags;
  281. retry:
  282. vcpu = irq->target_vcpu;
  283. if (!vcpu)
  284. return 0;
  285. pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  286. byte_offset = irq->intid / BITS_PER_BYTE;
  287. bit_nr = irq->intid % BITS_PER_BYTE;
  288. ptr = pendbase + byte_offset;
  289. ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
  290. if (ret)
  291. return ret;
  292. status = val & (1 << bit_nr);
  293. spin_lock_irqsave(&irq->irq_lock, flags);
  294. if (irq->target_vcpu != vcpu) {
  295. spin_unlock_irqrestore(&irq->irq_lock, flags);
  296. goto retry;
  297. }
  298. irq->pending_latch = status;
  299. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  300. if (status) {
  301. /* clear consumed data */
  302. val &= ~(1 << bit_nr);
  303. ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
  304. if (ret)
  305. return ret;
  306. }
  307. return 0;
  308. }
  309. /**
  310. * vgic_its_save_pending_tables - Save the pending tables into guest RAM
  311. * kvm lock and all vcpu lock must be held
  312. */
  313. int vgic_v3_save_pending_tables(struct kvm *kvm)
  314. {
  315. struct vgic_dist *dist = &kvm->arch.vgic;
  316. struct vgic_irq *irq;
  317. gpa_t last_ptr = ~(gpa_t)0;
  318. int ret;
  319. u8 val;
  320. list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  321. int byte_offset, bit_nr;
  322. struct kvm_vcpu *vcpu;
  323. gpa_t pendbase, ptr;
  324. bool stored;
  325. vcpu = irq->target_vcpu;
  326. if (!vcpu)
  327. continue;
  328. pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  329. byte_offset = irq->intid / BITS_PER_BYTE;
  330. bit_nr = irq->intid % BITS_PER_BYTE;
  331. ptr = pendbase + byte_offset;
  332. if (ptr != last_ptr) {
  333. ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
  334. if (ret)
  335. return ret;
  336. last_ptr = ptr;
  337. }
  338. stored = val & (1U << bit_nr);
  339. if (stored == irq->pending_latch)
  340. continue;
  341. if (irq->pending_latch)
  342. val |= 1 << bit_nr;
  343. else
  344. val &= ~(1 << bit_nr);
  345. ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
  346. if (ret)
  347. return ret;
  348. }
  349. return 0;
  350. }
  351. /**
  352. * vgic_v3_rdist_overlap - check if a region overlaps with any
  353. * existing redistributor region
  354. *
  355. * @kvm: kvm handle
  356. * @base: base of the region
  357. * @size: size of region
  358. *
  359. * Return: true if there is an overlap
  360. */
  361. bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
  362. {
  363. struct vgic_dist *d = &kvm->arch.vgic;
  364. struct vgic_redist_region *rdreg;
  365. list_for_each_entry(rdreg, &d->rd_regions, list) {
  366. if ((base + size > rdreg->base) &&
  367. (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
  368. return true;
  369. }
  370. return false;
  371. }
  372. /*
  373. * Check for overlapping regions and for regions crossing the end of memory
  374. * for base addresses which have already been set.
  375. */
  376. bool vgic_v3_check_base(struct kvm *kvm)
  377. {
  378. struct vgic_dist *d = &kvm->arch.vgic;
  379. struct vgic_redist_region *rdreg;
  380. if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
  381. d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
  382. return false;
  383. list_for_each_entry(rdreg, &d->rd_regions, list) {
  384. if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
  385. rdreg->base)
  386. return false;
  387. }
  388. if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
  389. return true;
  390. return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
  391. KVM_VGIC_V3_DIST_SIZE);
  392. }
  393. /**
  394. * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
  395. * which has free space to put a new rdist region.
  396. *
  397. * @rd_regions: redistributor region list head
  398. *
  399. * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
  400. * Stride between redistributors is 0 and regions are filled in the index order.
  401. *
  402. * Return: the redist region handle, if any, that has space to map a new rdist
  403. * region.
  404. */
  405. struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
  406. {
  407. struct vgic_redist_region *rdreg;
  408. list_for_each_entry(rdreg, rd_regions, list) {
  409. if (!vgic_v3_redist_region_full(rdreg))
  410. return rdreg;
  411. }
  412. return NULL;
  413. }
  414. struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
  415. u32 index)
  416. {
  417. struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
  418. struct vgic_redist_region *rdreg;
  419. list_for_each_entry(rdreg, rd_regions, list) {
  420. if (rdreg->index == index)
  421. return rdreg;
  422. }
  423. return NULL;
  424. }
  425. int vgic_v3_map_resources(struct kvm *kvm)
  426. {
  427. struct vgic_dist *dist = &kvm->arch.vgic;
  428. struct kvm_vcpu *vcpu;
  429. int ret = 0;
  430. int c;
  431. if (vgic_ready(kvm))
  432. goto out;
  433. kvm_for_each_vcpu(c, vcpu, kvm) {
  434. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  435. if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
  436. kvm_debug("vcpu %d redistributor base not set\n", c);
  437. ret = -ENXIO;
  438. goto out;
  439. }
  440. }
  441. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
  442. kvm_err("Need to set vgic distributor addresses first\n");
  443. ret = -ENXIO;
  444. goto out;
  445. }
  446. if (!vgic_v3_check_base(kvm)) {
  447. kvm_err("VGIC redist and dist frames overlap\n");
  448. ret = -EINVAL;
  449. goto out;
  450. }
  451. /*
  452. * For a VGICv3 we require the userland to explicitly initialize
  453. * the VGIC before we need to use it.
  454. */
  455. if (!vgic_initialized(kvm)) {
  456. ret = -EBUSY;
  457. goto out;
  458. }
  459. ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
  460. if (ret) {
  461. kvm_err("Unable to register VGICv3 dist MMIO regions\n");
  462. goto out;
  463. }
  464. dist->ready = true;
  465. out:
  466. return ret;
  467. }
  468. DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
  469. static int __init early_group0_trap_cfg(char *buf)
  470. {
  471. return strtobool(buf, &group0_trap);
  472. }
  473. early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
  474. static int __init early_group1_trap_cfg(char *buf)
  475. {
  476. return strtobool(buf, &group1_trap);
  477. }
  478. early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
  479. static int __init early_common_trap_cfg(char *buf)
  480. {
  481. return strtobool(buf, &common_trap);
  482. }
  483. early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
  484. static int __init early_gicv4_enable(char *buf)
  485. {
  486. return strtobool(buf, &gicv4_enable);
  487. }
  488. early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
  489. /**
  490. * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
  491. * @node: pointer to the DT node
  492. *
  493. * Returns 0 if a GICv3 has been found, returns an error code otherwise
  494. */
  495. int vgic_v3_probe(const struct gic_kvm_info *info)
  496. {
  497. u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
  498. int ret;
  499. /*
  500. * The ListRegs field is 5 bits, but there is a architectural
  501. * maximum of 16 list registers. Just ignore bit 4...
  502. */
  503. kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
  504. kvm_vgic_global_state.can_emulate_gicv2 = false;
  505. kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
  506. /* GICv4 support? */
  507. if (info->has_v4) {
  508. kvm_vgic_global_state.has_gicv4 = gicv4_enable;
  509. kvm_info("GICv4 support %sabled\n",
  510. gicv4_enable ? "en" : "dis");
  511. }
  512. if (!info->vcpu.start) {
  513. kvm_info("GICv3: no GICV resource entry\n");
  514. kvm_vgic_global_state.vcpu_base = 0;
  515. } else if (!PAGE_ALIGNED(info->vcpu.start)) {
  516. pr_warn("GICV physical address 0x%llx not page aligned\n",
  517. (unsigned long long)info->vcpu.start);
  518. kvm_vgic_global_state.vcpu_base = 0;
  519. } else {
  520. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  521. kvm_vgic_global_state.can_emulate_gicv2 = true;
  522. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  523. if (ret) {
  524. kvm_err("Cannot register GICv2 KVM device.\n");
  525. return ret;
  526. }
  527. kvm_info("vgic-v2@%llx\n", info->vcpu.start);
  528. }
  529. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
  530. if (ret) {
  531. kvm_err("Cannot register GICv3 KVM device.\n");
  532. kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
  533. return ret;
  534. }
  535. if (kvm_vgic_global_state.vcpu_base == 0)
  536. kvm_info("disabling GICv2 emulation\n");
  537. #ifdef CONFIG_ARM64
  538. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
  539. group0_trap = true;
  540. group1_trap = true;
  541. }
  542. #endif
  543. if (group0_trap || group1_trap || common_trap) {
  544. kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
  545. group0_trap ? "G0" : "",
  546. group1_trap ? "G1" : "",
  547. common_trap ? "C" : "");
  548. static_branch_enable(&vgic_v3_cpuif_trap);
  549. }
  550. kvm_vgic_global_state.vctrl_base = NULL;
  551. kvm_vgic_global_state.type = VGIC_V3;
  552. kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
  553. return 0;
  554. }
  555. void vgic_v3_load(struct kvm_vcpu *vcpu)
  556. {
  557. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  558. /*
  559. * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
  560. * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
  561. * VMCR_EL2 save/restore in the world switch.
  562. */
  563. if (likely(cpu_if->vgic_sre))
  564. kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
  565. kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
  566. if (has_vhe())
  567. __vgic_v3_activate_traps(vcpu);
  568. }
  569. void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
  570. {
  571. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  572. if (likely(cpu_if->vgic_sre))
  573. cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
  574. }
  575. void vgic_v3_put(struct kvm_vcpu *vcpu)
  576. {
  577. vgic_v3_vmcr_sync(vcpu);
  578. kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
  579. if (has_vhe())
  580. __vgic_v3_deactivate_traps(vcpu);
  581. }