vgic-v2.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /*
  2. * Copyright (C) 2015, 2016 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/irqchip/arm-gic.h>
  17. #include <linux/kvm.h>
  18. #include <linux/kvm_host.h>
  19. #include <kvm/arm_vgic.h>
  20. #include <asm/kvm_mmu.h>
  21. #include "vgic.h"
  22. static inline void vgic_v2_write_lr(int lr, u32 val)
  23. {
  24. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  25. writel_relaxed(val, base + GICH_LR0 + (lr * 4));
  26. }
  27. void vgic_v2_init_lrs(void)
  28. {
  29. int i;
  30. for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
  31. vgic_v2_write_lr(i, 0);
  32. }
  33. void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  34. {
  35. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  36. cpuif->vgic_hcr |= GICH_HCR_UIE;
  37. }
  38. static bool lr_signals_eoi_mi(u32 lr_val)
  39. {
  40. return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
  41. !(lr_val & GICH_LR_HW);
  42. }
  43. /*
  44. * transfer the content of the LRs back into the corresponding ap_list:
  45. * - active bit is transferred as is
  46. * - pending bit is
  47. * - transferred as is in case of edge sensitive IRQs
  48. * - set to the line-level (resample time) for level sensitive IRQs
  49. */
  50. void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
  51. {
  52. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  53. struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
  54. int lr;
  55. DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
  56. cpuif->vgic_hcr &= ~GICH_HCR_UIE;
  57. for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
  58. u32 val = cpuif->vgic_lr[lr];
  59. u32 cpuid, intid = val & GICH_LR_VIRTUALID;
  60. struct vgic_irq *irq;
  61. /* Extract the source vCPU id from the LR */
  62. cpuid = val & GICH_LR_PHYSID_CPUID;
  63. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  64. cpuid &= 7;
  65. /* Notify fds when the guest EOI'ed a level-triggered SPI */
  66. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  67. kvm_notify_acked_irq(vcpu->kvm, 0,
  68. intid - VGIC_NR_PRIVATE_IRQS);
  69. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  70. spin_lock(&irq->irq_lock);
  71. /* Always preserve the active bit */
  72. irq->active = !!(val & GICH_LR_ACTIVE_BIT);
  73. if (irq->active && vgic_irq_is_sgi(intid))
  74. irq->active_source = cpuid;
  75. /* Edge is the only case where we preserve the pending bit */
  76. if (irq->config == VGIC_CONFIG_EDGE &&
  77. (val & GICH_LR_PENDING_BIT)) {
  78. irq->pending_latch = true;
  79. if (vgic_irq_is_sgi(intid))
  80. irq->source |= (1 << cpuid);
  81. }
  82. /*
  83. * Clear soft pending state when level irqs have been acked.
  84. */
  85. if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
  86. irq->pending_latch = false;
  87. /*
  88. * Level-triggered mapped IRQs are special because we only
  89. * observe rising edges as input to the VGIC.
  90. *
  91. * If the guest never acked the interrupt we have to sample
  92. * the physical line and set the line level, because the
  93. * device state could have changed or we simply need to
  94. * process the still pending interrupt later.
  95. *
  96. * If this causes us to lower the level, we have to also clear
  97. * the physical active state, since we will otherwise never be
  98. * told when the interrupt becomes asserted again.
  99. */
  100. if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
  101. irq->line_level = vgic_get_phys_line_level(irq);
  102. if (!irq->line_level)
  103. vgic_irq_set_phys_active(irq, false);
  104. }
  105. spin_unlock(&irq->irq_lock);
  106. vgic_put_irq(vcpu->kvm, irq);
  107. }
  108. vgic_cpu->used_lrs = 0;
  109. }
  110. /*
  111. * Populates the particular LR with the state of a given IRQ:
  112. * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
  113. * - for a level sensitive IRQ the pending state value is unchanged;
  114. * it is dictated directly by the input level
  115. *
  116. * If @irq describes an SGI with multiple sources, we choose the
  117. * lowest-numbered source VCPU and clear that bit in the source bitmap.
  118. *
  119. * The irq_lock must be held by the caller.
  120. */
  121. void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  122. {
  123. u32 val = irq->intid;
  124. bool allow_pending = true;
  125. if (irq->active) {
  126. val |= GICH_LR_ACTIVE_BIT;
  127. if (vgic_irq_is_sgi(irq->intid))
  128. val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
  129. if (vgic_irq_is_multi_sgi(irq)) {
  130. allow_pending = false;
  131. val |= GICH_LR_EOI;
  132. }
  133. }
  134. if (irq->group)
  135. val |= GICH_LR_GROUP1;
  136. if (irq->hw) {
  137. val |= GICH_LR_HW;
  138. val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
  139. /*
  140. * Never set pending+active on a HW interrupt, as the
  141. * pending state is kept at the physical distributor
  142. * level.
  143. */
  144. if (irq->active)
  145. allow_pending = false;
  146. } else {
  147. if (irq->config == VGIC_CONFIG_LEVEL) {
  148. val |= GICH_LR_EOI;
  149. /*
  150. * Software resampling doesn't work very well
  151. * if we allow P+A, so let's not do that.
  152. */
  153. if (irq->active)
  154. allow_pending = false;
  155. }
  156. }
  157. if (allow_pending && irq_is_pending(irq)) {
  158. val |= GICH_LR_PENDING_BIT;
  159. if (irq->config == VGIC_CONFIG_EDGE)
  160. irq->pending_latch = false;
  161. if (vgic_irq_is_sgi(irq->intid)) {
  162. u32 src = ffs(irq->source);
  163. if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
  164. irq->intid))
  165. return;
  166. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  167. irq->source &= ~(1 << (src - 1));
  168. if (irq->source) {
  169. irq->pending_latch = true;
  170. val |= GICH_LR_EOI;
  171. }
  172. }
  173. }
  174. /*
  175. * Level-triggered mapped IRQs are special because we only observe
  176. * rising edges as input to the VGIC. We therefore lower the line
  177. * level here, so that we can take new virtual IRQs. See
  178. * vgic_v2_fold_lr_state for more info.
  179. */
  180. if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
  181. irq->line_level = false;
  182. /* The GICv2 LR only holds five bits of priority. */
  183. val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
  184. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
  185. }
  186. void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
  187. {
  188. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
  189. }
  190. void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  191. {
  192. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  193. u32 vmcr;
  194. vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
  195. GICH_VMCR_ENABLE_GRP0_MASK;
  196. vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
  197. GICH_VMCR_ENABLE_GRP1_MASK;
  198. vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
  199. GICH_VMCR_ACK_CTL_MASK;
  200. vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
  201. GICH_VMCR_FIQ_EN_MASK;
  202. vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
  203. GICH_VMCR_CBPR_MASK;
  204. vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
  205. GICH_VMCR_EOI_MODE_MASK;
  206. vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
  207. GICH_VMCR_ALIAS_BINPOINT_MASK;
  208. vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
  209. GICH_VMCR_BINPOINT_MASK;
  210. vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
  211. GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  212. cpu_if->vgic_vmcr = vmcr;
  213. }
  214. void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  215. {
  216. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  217. u32 vmcr;
  218. vmcr = cpu_if->vgic_vmcr;
  219. vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
  220. GICH_VMCR_ENABLE_GRP0_SHIFT;
  221. vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
  222. GICH_VMCR_ENABLE_GRP1_SHIFT;
  223. vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
  224. GICH_VMCR_ACK_CTL_SHIFT;
  225. vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
  226. GICH_VMCR_FIQ_EN_SHIFT;
  227. vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
  228. GICH_VMCR_CBPR_SHIFT;
  229. vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
  230. GICH_VMCR_EOI_MODE_SHIFT;
  231. vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
  232. GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  233. vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
  234. GICH_VMCR_BINPOINT_SHIFT;
  235. vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
  236. GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
  237. }
  238. void vgic_v2_enable(struct kvm_vcpu *vcpu)
  239. {
  240. /*
  241. * By forcing VMCR to zero, the GIC will restore the binary
  242. * points to their reset values. Anything else resets to zero
  243. * anyway.
  244. */
  245. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
  246. /* Get the show on the road... */
  247. vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
  248. }
  249. /* check for overlapping regions and for regions crossing the end of memory */
  250. static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
  251. {
  252. if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
  253. return false;
  254. if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
  255. return false;
  256. if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
  257. return true;
  258. if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
  259. return true;
  260. return false;
  261. }
  262. int vgic_v2_map_resources(struct kvm *kvm)
  263. {
  264. struct vgic_dist *dist = &kvm->arch.vgic;
  265. int ret = 0;
  266. if (vgic_ready(kvm))
  267. goto out;
  268. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  269. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  270. kvm_err("Need to set vgic cpu and dist addresses first\n");
  271. ret = -ENXIO;
  272. goto out;
  273. }
  274. if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
  275. kvm_err("VGIC CPU and dist frames overlap\n");
  276. ret = -EINVAL;
  277. goto out;
  278. }
  279. /*
  280. * Initialize the vgic if this hasn't already been done on demand by
  281. * accessing the vgic state from userspace.
  282. */
  283. ret = vgic_init(kvm);
  284. if (ret) {
  285. kvm_err("Unable to initialize VGIC dynamic data structures\n");
  286. goto out;
  287. }
  288. ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
  289. if (ret) {
  290. kvm_err("Unable to register VGIC MMIO regions\n");
  291. goto out;
  292. }
  293. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  294. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  295. kvm_vgic_global_state.vcpu_base,
  296. KVM_VGIC_V2_CPU_SIZE, true);
  297. if (ret) {
  298. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  299. goto out;
  300. }
  301. }
  302. dist->ready = true;
  303. out:
  304. return ret;
  305. }
  306. DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
  307. /**
  308. * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
  309. * @node: pointer to the DT node
  310. *
  311. * Returns 0 if a GICv2 has been found, returns an error code otherwise
  312. */
  313. int vgic_v2_probe(const struct gic_kvm_info *info)
  314. {
  315. int ret;
  316. u32 vtr;
  317. if (!info->vctrl.start) {
  318. kvm_err("GICH not present in the firmware table\n");
  319. return -ENXIO;
  320. }
  321. if (!PAGE_ALIGNED(info->vcpu.start) ||
  322. !PAGE_ALIGNED(resource_size(&info->vcpu))) {
  323. kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
  324. ret = create_hyp_io_mappings(info->vcpu.start,
  325. resource_size(&info->vcpu),
  326. &kvm_vgic_global_state.vcpu_base_va,
  327. &kvm_vgic_global_state.vcpu_hyp_va);
  328. if (ret) {
  329. kvm_err("Cannot map GICV into hyp\n");
  330. goto out;
  331. }
  332. static_branch_enable(&vgic_v2_cpuif_trap);
  333. }
  334. ret = create_hyp_io_mappings(info->vctrl.start,
  335. resource_size(&info->vctrl),
  336. &kvm_vgic_global_state.vctrl_base,
  337. &kvm_vgic_global_state.vctrl_hyp);
  338. if (ret) {
  339. kvm_err("Cannot map VCTRL into hyp\n");
  340. goto out;
  341. }
  342. vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
  343. kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
  344. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  345. if (ret) {
  346. kvm_err("Cannot register GICv2 KVM device\n");
  347. goto out;
  348. }
  349. kvm_vgic_global_state.can_emulate_gicv2 = true;
  350. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  351. kvm_vgic_global_state.type = VGIC_V2;
  352. kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
  353. kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
  354. return 0;
  355. out:
  356. if (kvm_vgic_global_state.vctrl_base)
  357. iounmap(kvm_vgic_global_state.vctrl_base);
  358. if (kvm_vgic_global_state.vcpu_base_va)
  359. iounmap(kvm_vgic_global_state.vcpu_base_va);
  360. return ret;
  361. }
  362. static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  363. {
  364. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  365. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  366. u64 elrsr;
  367. int i;
  368. elrsr = readl_relaxed(base + GICH_ELRSR0);
  369. if (unlikely(used_lrs > 32))
  370. elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
  371. for (i = 0; i < used_lrs; i++) {
  372. if (elrsr & (1UL << i))
  373. cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  374. else
  375. cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
  376. writel_relaxed(0, base + GICH_LR0 + (i * 4));
  377. }
  378. }
  379. void vgic_v2_save_state(struct kvm_vcpu *vcpu)
  380. {
  381. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  382. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  383. if (!base)
  384. return;
  385. if (used_lrs) {
  386. save_lrs(vcpu, base);
  387. writel_relaxed(0, base + GICH_HCR);
  388. }
  389. }
  390. void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
  391. {
  392. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  393. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  394. u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  395. int i;
  396. if (!base)
  397. return;
  398. if (used_lrs) {
  399. writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
  400. for (i = 0; i < used_lrs; i++) {
  401. writel_relaxed(cpu_if->vgic_lr[i],
  402. base + GICH_LR0 + (i * 4));
  403. }
  404. }
  405. }
  406. void vgic_v2_load(struct kvm_vcpu *vcpu)
  407. {
  408. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  409. writel_relaxed(cpu_if->vgic_vmcr,
  410. kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  411. writel_relaxed(cpu_if->vgic_apr,
  412. kvm_vgic_global_state.vctrl_base + GICH_APR);
  413. }
  414. void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
  415. {
  416. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  417. cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  418. }
  419. void vgic_v2_put(struct kvm_vcpu *vcpu)
  420. {
  421. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  422. vgic_v2_vmcr_sync(vcpu);
  423. cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
  424. }