vgic-kvm-device.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * VGIC: KVM DEVICE API
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <kvm/arm_vgic.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/kvm_mmu.h>
  20. #include <asm/cputype.h>
  21. #include "vgic.h"
  22. /* common helpers */
  23. int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
  24. phys_addr_t addr, phys_addr_t alignment)
  25. {
  26. if (addr & ~KVM_PHYS_MASK)
  27. return -E2BIG;
  28. if (!IS_ALIGNED(addr, alignment))
  29. return -EINVAL;
  30. if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
  31. return -EEXIST;
  32. return 0;
  33. }
  34. static int vgic_check_type(struct kvm *kvm, int type_needed)
  35. {
  36. if (kvm->arch.vgic.vgic_model != type_needed)
  37. return -ENODEV;
  38. else
  39. return 0;
  40. }
  41. /**
  42. * kvm_vgic_addr - set or get vgic VM base addresses
  43. * @kvm: pointer to the vm struct
  44. * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
  45. * @addr: pointer to address value
  46. * @write: if true set the address in the VM address space, if false read the
  47. * address
  48. *
  49. * Set or get the vgic base addresses for the distributor and the virtual CPU
  50. * interface in the VM physical address space. These addresses are properties
  51. * of the emulated core/SoC and therefore user space initially knows this
  52. * information.
  53. * Check them for sanity (alignment, double assignment). We can't check for
  54. * overlapping regions in case of a virtual GICv3 here, since we don't know
  55. * the number of VCPUs yet, so we defer this check to map_resources().
  56. */
  57. int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
  58. {
  59. int r = 0;
  60. struct vgic_dist *vgic = &kvm->arch.vgic;
  61. phys_addr_t *addr_ptr, alignment;
  62. u64 undef_value = VGIC_ADDR_UNDEF;
  63. mutex_lock(&kvm->lock);
  64. switch (type) {
  65. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  66. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
  67. addr_ptr = &vgic->vgic_dist_base;
  68. alignment = SZ_4K;
  69. break;
  70. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  71. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
  72. addr_ptr = &vgic->vgic_cpu_base;
  73. alignment = SZ_4K;
  74. break;
  75. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  76. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
  77. addr_ptr = &vgic->vgic_dist_base;
  78. alignment = SZ_64K;
  79. break;
  80. case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
  81. struct vgic_redist_region *rdreg;
  82. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
  83. if (r)
  84. break;
  85. if (write) {
  86. r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
  87. goto out;
  88. }
  89. rdreg = list_first_entry(&vgic->rd_regions,
  90. struct vgic_redist_region, list);
  91. if (!rdreg)
  92. addr_ptr = &undef_value;
  93. else
  94. addr_ptr = &rdreg->base;
  95. break;
  96. }
  97. case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
  98. {
  99. struct vgic_redist_region *rdreg;
  100. u8 index;
  101. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
  102. if (r)
  103. break;
  104. index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
  105. if (write) {
  106. gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
  107. u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
  108. >> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
  109. u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
  110. >> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
  111. if (!count || flags)
  112. r = -EINVAL;
  113. else
  114. r = vgic_v3_set_redist_base(kvm, index,
  115. base, count);
  116. goto out;
  117. }
  118. rdreg = vgic_v3_rdist_region_from_index(kvm, index);
  119. if (!rdreg) {
  120. r = -ENOENT;
  121. goto out;
  122. }
  123. *addr = index;
  124. *addr |= rdreg->base;
  125. *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
  126. goto out;
  127. }
  128. default:
  129. r = -ENODEV;
  130. }
  131. if (r)
  132. goto out;
  133. if (write) {
  134. r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
  135. if (!r)
  136. *addr_ptr = *addr;
  137. } else {
  138. *addr = *addr_ptr;
  139. }
  140. out:
  141. mutex_unlock(&kvm->lock);
  142. return r;
  143. }
  144. static int vgic_set_common_attr(struct kvm_device *dev,
  145. struct kvm_device_attr *attr)
  146. {
  147. int r;
  148. switch (attr->group) {
  149. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  150. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  151. u64 addr;
  152. unsigned long type = (unsigned long)attr->attr;
  153. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  154. return -EFAULT;
  155. r = kvm_vgic_addr(dev->kvm, type, &addr, true);
  156. return (r == -ENODEV) ? -ENXIO : r;
  157. }
  158. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  159. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  160. u32 val;
  161. int ret = 0;
  162. if (get_user(val, uaddr))
  163. return -EFAULT;
  164. /*
  165. * We require:
  166. * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
  167. * - at most 1024 interrupts
  168. * - a multiple of 32 interrupts
  169. */
  170. if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
  171. val > VGIC_MAX_RESERVED ||
  172. (val & 31))
  173. return -EINVAL;
  174. mutex_lock(&dev->kvm->lock);
  175. if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
  176. ret = -EBUSY;
  177. else
  178. dev->kvm->arch.vgic.nr_spis =
  179. val - VGIC_NR_PRIVATE_IRQS;
  180. mutex_unlock(&dev->kvm->lock);
  181. return ret;
  182. }
  183. case KVM_DEV_ARM_VGIC_GRP_CTRL: {
  184. switch (attr->attr) {
  185. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  186. mutex_lock(&dev->kvm->lock);
  187. r = vgic_init(dev->kvm);
  188. mutex_unlock(&dev->kvm->lock);
  189. return r;
  190. }
  191. break;
  192. }
  193. }
  194. return -ENXIO;
  195. }
  196. static int vgic_get_common_attr(struct kvm_device *dev,
  197. struct kvm_device_attr *attr)
  198. {
  199. int r = -ENXIO;
  200. switch (attr->group) {
  201. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  202. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  203. u64 addr;
  204. unsigned long type = (unsigned long)attr->attr;
  205. r = kvm_vgic_addr(dev->kvm, type, &addr, false);
  206. if (r)
  207. return (r == -ENODEV) ? -ENXIO : r;
  208. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  209. return -EFAULT;
  210. break;
  211. }
  212. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  213. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  214. r = put_user(dev->kvm->arch.vgic.nr_spis +
  215. VGIC_NR_PRIVATE_IRQS, uaddr);
  216. break;
  217. }
  218. }
  219. return r;
  220. }
  221. static int vgic_create(struct kvm_device *dev, u32 type)
  222. {
  223. return kvm_vgic_create(dev->kvm, type);
  224. }
  225. static void vgic_destroy(struct kvm_device *dev)
  226. {
  227. kfree(dev);
  228. }
  229. int kvm_register_vgic_device(unsigned long type)
  230. {
  231. int ret = -ENODEV;
  232. switch (type) {
  233. case KVM_DEV_TYPE_ARM_VGIC_V2:
  234. ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
  235. KVM_DEV_TYPE_ARM_VGIC_V2);
  236. break;
  237. case KVM_DEV_TYPE_ARM_VGIC_V3:
  238. ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
  239. KVM_DEV_TYPE_ARM_VGIC_V3);
  240. if (ret)
  241. break;
  242. ret = kvm_vgic_register_its_device();
  243. break;
  244. }
  245. return ret;
  246. }
  247. int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
  248. struct vgic_reg_attr *reg_attr)
  249. {
  250. int cpuid;
  251. cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
  252. KVM_DEV_ARM_VGIC_CPUID_SHIFT;
  253. if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
  254. return -EINVAL;
  255. reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
  256. reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  257. return 0;
  258. }
  259. /* unlocks vcpus from @vcpu_lock_idx and smaller */
  260. static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
  261. {
  262. struct kvm_vcpu *tmp_vcpu;
  263. for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
  264. tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
  265. mutex_unlock(&tmp_vcpu->mutex);
  266. }
  267. }
  268. void unlock_all_vcpus(struct kvm *kvm)
  269. {
  270. unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
  271. }
  272. /* Returns true if all vcpus were locked, false otherwise */
  273. bool lock_all_vcpus(struct kvm *kvm)
  274. {
  275. struct kvm_vcpu *tmp_vcpu;
  276. int c;
  277. /*
  278. * Any time a vcpu is run, vcpu_load is called which tries to grab the
  279. * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
  280. * that no other VCPUs are run and fiddle with the vgic state while we
  281. * access it.
  282. */
  283. kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
  284. if (!mutex_trylock(&tmp_vcpu->mutex)) {
  285. unlock_vcpus(kvm, c - 1);
  286. return false;
  287. }
  288. }
  289. return true;
  290. }
  291. /**
  292. * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
  293. *
  294. * @dev: kvm device handle
  295. * @attr: kvm device attribute
  296. * @reg: address the value is read or written
  297. * @is_write: true if userspace is writing a register
  298. */
  299. static int vgic_v2_attr_regs_access(struct kvm_device *dev,
  300. struct kvm_device_attr *attr,
  301. u32 *reg, bool is_write)
  302. {
  303. struct vgic_reg_attr reg_attr;
  304. gpa_t addr;
  305. struct kvm_vcpu *vcpu;
  306. int ret;
  307. ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
  308. if (ret)
  309. return ret;
  310. vcpu = reg_attr.vcpu;
  311. addr = reg_attr.addr;
  312. mutex_lock(&dev->kvm->lock);
  313. ret = vgic_init(dev->kvm);
  314. if (ret)
  315. goto out;
  316. if (!lock_all_vcpus(dev->kvm)) {
  317. ret = -EBUSY;
  318. goto out;
  319. }
  320. switch (attr->group) {
  321. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  322. ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
  323. break;
  324. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  325. ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
  326. break;
  327. default:
  328. ret = -EINVAL;
  329. break;
  330. }
  331. unlock_all_vcpus(dev->kvm);
  332. out:
  333. mutex_unlock(&dev->kvm->lock);
  334. return ret;
  335. }
  336. static int vgic_v2_set_attr(struct kvm_device *dev,
  337. struct kvm_device_attr *attr)
  338. {
  339. int ret;
  340. ret = vgic_set_common_attr(dev, attr);
  341. if (ret != -ENXIO)
  342. return ret;
  343. switch (attr->group) {
  344. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  345. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  346. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  347. u32 reg;
  348. if (get_user(reg, uaddr))
  349. return -EFAULT;
  350. return vgic_v2_attr_regs_access(dev, attr, &reg, true);
  351. }
  352. }
  353. return -ENXIO;
  354. }
  355. static int vgic_v2_get_attr(struct kvm_device *dev,
  356. struct kvm_device_attr *attr)
  357. {
  358. int ret;
  359. ret = vgic_get_common_attr(dev, attr);
  360. if (ret != -ENXIO)
  361. return ret;
  362. switch (attr->group) {
  363. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  364. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  365. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  366. u32 reg = 0;
  367. ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
  368. if (ret)
  369. return ret;
  370. return put_user(reg, uaddr);
  371. }
  372. }
  373. return -ENXIO;
  374. }
  375. static int vgic_v2_has_attr(struct kvm_device *dev,
  376. struct kvm_device_attr *attr)
  377. {
  378. switch (attr->group) {
  379. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  380. switch (attr->attr) {
  381. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  382. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  383. return 0;
  384. }
  385. break;
  386. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  387. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  388. return vgic_v2_has_attr_regs(dev, attr);
  389. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  390. return 0;
  391. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  392. switch (attr->attr) {
  393. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  394. return 0;
  395. }
  396. }
  397. return -ENXIO;
  398. }
  399. struct kvm_device_ops kvm_arm_vgic_v2_ops = {
  400. .name = "kvm-arm-vgic-v2",
  401. .create = vgic_create,
  402. .destroy = vgic_destroy,
  403. .set_attr = vgic_v2_set_attr,
  404. .get_attr = vgic_v2_get_attr,
  405. .has_attr = vgic_v2_has_attr,
  406. };
  407. int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
  408. struct vgic_reg_attr *reg_attr)
  409. {
  410. unsigned long vgic_mpidr, mpidr_reg;
  411. /*
  412. * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
  413. * attr might not hold MPIDR. Hence assume vcpu0.
  414. */
  415. if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
  416. vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
  417. KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
  418. mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
  419. reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
  420. } else {
  421. reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
  422. }
  423. if (!reg_attr->vcpu)
  424. return -EINVAL;
  425. reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  426. return 0;
  427. }
  428. /*
  429. * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
  430. *
  431. * @dev: kvm device handle
  432. * @attr: kvm device attribute
  433. * @reg: address the value is read or written
  434. * @is_write: true if userspace is writing a register
  435. */
  436. static int vgic_v3_attr_regs_access(struct kvm_device *dev,
  437. struct kvm_device_attr *attr,
  438. u64 *reg, bool is_write)
  439. {
  440. struct vgic_reg_attr reg_attr;
  441. gpa_t addr;
  442. struct kvm_vcpu *vcpu;
  443. int ret;
  444. u32 tmp32;
  445. ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
  446. if (ret)
  447. return ret;
  448. vcpu = reg_attr.vcpu;
  449. addr = reg_attr.addr;
  450. mutex_lock(&dev->kvm->lock);
  451. if (unlikely(!vgic_initialized(dev->kvm))) {
  452. ret = -EBUSY;
  453. goto out;
  454. }
  455. if (!lock_all_vcpus(dev->kvm)) {
  456. ret = -EBUSY;
  457. goto out;
  458. }
  459. switch (attr->group) {
  460. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  461. if (is_write)
  462. tmp32 = *reg;
  463. ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
  464. if (!is_write)
  465. *reg = tmp32;
  466. break;
  467. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
  468. if (is_write)
  469. tmp32 = *reg;
  470. ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
  471. if (!is_write)
  472. *reg = tmp32;
  473. break;
  474. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  475. u64 regid;
  476. regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
  477. ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
  478. regid, reg);
  479. break;
  480. }
  481. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  482. unsigned int info, intid;
  483. info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
  484. KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
  485. if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
  486. intid = attr->attr &
  487. KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
  488. ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
  489. intid, reg);
  490. } else {
  491. ret = -EINVAL;
  492. }
  493. break;
  494. }
  495. default:
  496. ret = -EINVAL;
  497. break;
  498. }
  499. unlock_all_vcpus(dev->kvm);
  500. out:
  501. mutex_unlock(&dev->kvm->lock);
  502. return ret;
  503. }
  504. static int vgic_v3_set_attr(struct kvm_device *dev,
  505. struct kvm_device_attr *attr)
  506. {
  507. int ret;
  508. ret = vgic_set_common_attr(dev, attr);
  509. if (ret != -ENXIO)
  510. return ret;
  511. switch (attr->group) {
  512. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  513. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
  514. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  515. u32 tmp32;
  516. u64 reg;
  517. if (get_user(tmp32, uaddr))
  518. return -EFAULT;
  519. reg = tmp32;
  520. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  521. }
  522. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  523. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  524. u64 reg;
  525. if (get_user(reg, uaddr))
  526. return -EFAULT;
  527. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  528. }
  529. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  530. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  531. u64 reg;
  532. u32 tmp32;
  533. if (get_user(tmp32, uaddr))
  534. return -EFAULT;
  535. reg = tmp32;
  536. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  537. }
  538. case KVM_DEV_ARM_VGIC_GRP_CTRL: {
  539. int ret;
  540. switch (attr->attr) {
  541. case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
  542. mutex_lock(&dev->kvm->lock);
  543. if (!lock_all_vcpus(dev->kvm)) {
  544. mutex_unlock(&dev->kvm->lock);
  545. return -EBUSY;
  546. }
  547. ret = vgic_v3_save_pending_tables(dev->kvm);
  548. unlock_all_vcpus(dev->kvm);
  549. mutex_unlock(&dev->kvm->lock);
  550. return ret;
  551. }
  552. break;
  553. }
  554. }
  555. return -ENXIO;
  556. }
  557. static int vgic_v3_get_attr(struct kvm_device *dev,
  558. struct kvm_device_attr *attr)
  559. {
  560. int ret;
  561. ret = vgic_get_common_attr(dev, attr);
  562. if (ret != -ENXIO)
  563. return ret;
  564. switch (attr->group) {
  565. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  566. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
  567. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  568. u64 reg;
  569. u32 tmp32;
  570. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  571. if (ret)
  572. return ret;
  573. tmp32 = reg;
  574. return put_user(tmp32, uaddr);
  575. }
  576. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  577. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  578. u64 reg;
  579. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  580. if (ret)
  581. return ret;
  582. return put_user(reg, uaddr);
  583. }
  584. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  585. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  586. u64 reg;
  587. u32 tmp32;
  588. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  589. if (ret)
  590. return ret;
  591. tmp32 = reg;
  592. return put_user(tmp32, uaddr);
  593. }
  594. }
  595. return -ENXIO;
  596. }
  597. static int vgic_v3_has_attr(struct kvm_device *dev,
  598. struct kvm_device_attr *attr)
  599. {
  600. switch (attr->group) {
  601. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  602. switch (attr->attr) {
  603. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  604. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  605. case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
  606. return 0;
  607. }
  608. break;
  609. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  610. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
  611. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
  612. return vgic_v3_has_attr_regs(dev, attr);
  613. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  614. return 0;
  615. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  616. if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
  617. KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
  618. VGIC_LEVEL_INFO_LINE_LEVEL)
  619. return 0;
  620. break;
  621. }
  622. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  623. switch (attr->attr) {
  624. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  625. return 0;
  626. case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
  627. return 0;
  628. }
  629. }
  630. return -ENXIO;
  631. }
  632. struct kvm_device_ops kvm_arm_vgic_v3_ops = {
  633. .name = "kvm-arm-vgic-v3",
  634. .create = vgic_create,
  635. .destroy = vgic_destroy,
  636. .set_attr = vgic_v3_set_attr,
  637. .get_attr = vgic_v3_get_attr,
  638. .has_attr = vgic_v3_has_attr,
  639. };