vsoc.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/android/staging/vsoc.c
  4. *
  5. * Android Virtual System on a Chip (VSoC) driver
  6. *
  7. * Copyright (C) 2017 Google, Inc.
  8. *
  9. * Author: ghartman@google.com
  10. *
  11. * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
  12. * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
  13. *
  14. * Based on cirrusfb.c and 8139cp.c:
  15. * Copyright 1999-2001 Jeff Garzik
  16. * Copyright 2001-2004 Jeff Garzik
  17. */
  18. #include <linux/dma-mapping.h>
  19. #include <linux/freezer.h>
  20. #include <linux/futex.h>
  21. #include <linux/init.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/mutex.h>
  25. #include <linux/pci.h>
  26. #include <linux/proc_fs.h>
  27. #include <linux/sched.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/mutex.h>
  32. #include <linux/cdev.h>
  33. #include <linux/file.h>
  34. #include "uapi/vsoc_shm.h"
  35. #define VSOC_DEV_NAME "vsoc"
  36. /*
  37. * Description of the ivshmem-doorbell PCI device used by QEmu. These
  38. * constants follow docs/specs/ivshmem-spec.txt, which can be found in
  39. * the QEmu repository. This was last reconciled with the version that
  40. * came out with 2.8
  41. */
  42. /*
  43. * These constants are determined KVM Inter-VM shared memory device
  44. * register offsets
  45. */
  46. enum {
  47. INTR_MASK = 0x00, /* Interrupt Mask */
  48. INTR_STATUS = 0x04, /* Interrupt Status */
  49. IV_POSITION = 0x08, /* VM ID */
  50. DOORBELL = 0x0c, /* Doorbell */
  51. };
  52. static const int REGISTER_BAR; /* Equal to 0 */
  53. static const int MAX_REGISTER_BAR_LEN = 0x100;
  54. /*
  55. * The MSI-x BAR is not used directly.
  56. *
  57. * static const int MSI_X_BAR = 1;
  58. */
  59. static const int SHARED_MEMORY_BAR = 2;
  60. struct vsoc_region_data {
  61. char name[VSOC_DEVICE_NAME_SZ + 1];
  62. wait_queue_head_t interrupt_wait_queue;
  63. /* TODO(b/73664181): Use multiple futex wait queues */
  64. wait_queue_head_t futex_wait_queue;
  65. /* Flag indicating that an interrupt has been signalled by the host. */
  66. atomic_t *incoming_signalled;
  67. /* Flag indicating the guest has signalled the host. */
  68. atomic_t *outgoing_signalled;
  69. bool irq_requested;
  70. bool device_created;
  71. };
  72. struct vsoc_device {
  73. /* Kernel virtual address of REGISTER_BAR. */
  74. void __iomem *regs;
  75. /* Physical address of SHARED_MEMORY_BAR. */
  76. phys_addr_t shm_phys_start;
  77. /* Kernel virtual address of SHARED_MEMORY_BAR. */
  78. void __iomem *kernel_mapped_shm;
  79. /* Size of the entire shared memory window in bytes. */
  80. size_t shm_size;
  81. /*
  82. * Pointer to the virtual address of the shared memory layout structure.
  83. * This is probably identical to kernel_mapped_shm, but saving this
  84. * here saves a lot of annoying casts.
  85. */
  86. struct vsoc_shm_layout_descriptor *layout;
  87. /*
  88. * Points to a table of region descriptors in the kernel's virtual
  89. * address space. Calculated from
  90. * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
  91. */
  92. struct vsoc_device_region *regions;
  93. /* Head of a list of permissions that have been granted. */
  94. struct list_head permissions;
  95. struct pci_dev *dev;
  96. /* Per-region (and therefore per-interrupt) information. */
  97. struct vsoc_region_data *regions_data;
  98. /*
  99. * Table of msi-x entries. This has to be separated from struct
  100. * vsoc_region_data because the kernel deals with them as an array.
  101. */
  102. struct msix_entry *msix_entries;
  103. /* Mutex that protectes the permission list */
  104. struct mutex mtx;
  105. /* Major number assigned by the kernel */
  106. int major;
  107. /* Character device assigned by the kernel */
  108. struct cdev cdev;
  109. /* Device class assigned by the kernel */
  110. struct class *class;
  111. /*
  112. * Flags that indicate what we've initialized. These are used to do an
  113. * orderly cleanup of the device.
  114. */
  115. bool enabled_device;
  116. bool requested_regions;
  117. bool cdev_added;
  118. bool class_added;
  119. bool msix_enabled;
  120. };
  121. static struct vsoc_device vsoc_dev;
  122. /*
  123. * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
  124. */
  125. struct fd_scoped_permission_node {
  126. struct fd_scoped_permission permission;
  127. struct list_head list;
  128. };
  129. struct vsoc_private_data {
  130. struct fd_scoped_permission_node *fd_scoped_permission_node;
  131. };
  132. static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
  133. static int vsoc_mmap(struct file *, struct vm_area_struct *);
  134. static int vsoc_open(struct inode *, struct file *);
  135. static int vsoc_release(struct inode *, struct file *);
  136. static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
  137. static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
  138. static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
  139. static int
  140. do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
  141. struct fd_scoped_permission_node *np,
  142. struct fd_scoped_permission_arg __user *arg);
  143. static void
  144. do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
  145. struct fd_scoped_permission *perm);
  146. static long do_vsoc_describe_region(struct file *,
  147. struct vsoc_device_region __user *);
  148. static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
  149. /**
  150. * Validate arguments on entry points to the driver.
  151. */
  152. inline int vsoc_validate_inode(struct inode *inode)
  153. {
  154. if (iminor(inode) >= vsoc_dev.layout->region_count) {
  155. dev_err(&vsoc_dev.dev->dev,
  156. "describe_region: invalid region %d\n", iminor(inode));
  157. return -ENODEV;
  158. }
  159. return 0;
  160. }
  161. inline int vsoc_validate_filep(struct file *filp)
  162. {
  163. int ret = vsoc_validate_inode(file_inode(filp));
  164. if (ret)
  165. return ret;
  166. if (!filp->private_data) {
  167. dev_err(&vsoc_dev.dev->dev,
  168. "No private data on fd, region %d\n",
  169. iminor(file_inode(filp)));
  170. return -EBADFD;
  171. }
  172. return 0;
  173. }
  174. /* Converts from shared memory offset to virtual address */
  175. static inline void *shm_off_to_virtual_addr(__u32 offset)
  176. {
  177. return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
  178. }
  179. /* Converts from shared memory offset to physical address */
  180. static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
  181. {
  182. return vsoc_dev.shm_phys_start + offset;
  183. }
  184. /**
  185. * Convenience functions to obtain the region from the inode or file.
  186. * Dangerous to call before validating the inode/file.
  187. */
  188. static
  189. inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode)
  190. {
  191. return &vsoc_dev.regions[iminor(inode)];
  192. }
  193. static
  194. inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode)
  195. {
  196. return vsoc_region_from_inode(file_inode(inode));
  197. }
  198. static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
  199. {
  200. return r->region_end_offset - r->region_begin_offset;
  201. }
  202. static const struct file_operations vsoc_ops = {
  203. .owner = THIS_MODULE,
  204. .open = vsoc_open,
  205. .mmap = vsoc_mmap,
  206. .read = vsoc_read,
  207. .unlocked_ioctl = vsoc_ioctl,
  208. .compat_ioctl = vsoc_ioctl,
  209. .write = vsoc_write,
  210. .llseek = vsoc_lseek,
  211. .release = vsoc_release,
  212. };
  213. static struct pci_device_id vsoc_id_table[] = {
  214. {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  215. {0},
  216. };
  217. MODULE_DEVICE_TABLE(pci, vsoc_id_table);
  218. static void vsoc_remove_device(struct pci_dev *pdev);
  219. static int vsoc_probe_device(struct pci_dev *pdev,
  220. const struct pci_device_id *ent);
  221. static struct pci_driver vsoc_pci_driver = {
  222. .name = "vsoc",
  223. .id_table = vsoc_id_table,
  224. .probe = vsoc_probe_device,
  225. .remove = vsoc_remove_device,
  226. };
  227. static int
  228. do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
  229. struct fd_scoped_permission_node *np,
  230. struct fd_scoped_permission_arg __user *arg)
  231. {
  232. struct file *managed_filp;
  233. s32 managed_fd;
  234. atomic_t *owner_ptr = NULL;
  235. struct vsoc_device_region *managed_region_p;
  236. if (copy_from_user(&np->permission,
  237. &arg->perm, sizeof(np->permission)) ||
  238. copy_from_user(&managed_fd,
  239. &arg->managed_region_fd, sizeof(managed_fd))) {
  240. return -EFAULT;
  241. }
  242. managed_filp = fdget(managed_fd).file;
  243. /* Check that it's a valid fd, */
  244. if (!managed_filp || vsoc_validate_filep(managed_filp))
  245. return -EPERM;
  246. /* EEXIST if the given fd already has a permission. */
  247. if (((struct vsoc_private_data *)managed_filp->private_data)->
  248. fd_scoped_permission_node)
  249. return -EEXIST;
  250. managed_region_p = vsoc_region_from_filep(managed_filp);
  251. /* Check that the provided region is managed by this one */
  252. if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
  253. return -EPERM;
  254. /* The area must be well formed and have non-zero size */
  255. if (np->permission.begin_offset >= np->permission.end_offset)
  256. return -EINVAL;
  257. /* The area must fit in the memory window */
  258. if (np->permission.end_offset >
  259. vsoc_device_region_size(managed_region_p))
  260. return -ERANGE;
  261. /* The area must be in the region data section */
  262. if (np->permission.begin_offset <
  263. managed_region_p->offset_of_region_data)
  264. return -ERANGE;
  265. /* The area must be page aligned */
  266. if (!PAGE_ALIGNED(np->permission.begin_offset) ||
  267. !PAGE_ALIGNED(np->permission.end_offset))
  268. return -EINVAL;
  269. /* Owner offset must be naturally aligned in the window */
  270. if (np->permission.owner_offset &
  271. (sizeof(np->permission.owner_offset) - 1))
  272. return -EINVAL;
  273. /* The owner flag must reside in the owner memory */
  274. if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
  275. vsoc_device_region_size(region_p))
  276. return -ERANGE;
  277. /* The owner flag must reside in the data section */
  278. if (np->permission.owner_offset < region_p->offset_of_region_data)
  279. return -EINVAL;
  280. /* The owner value must change to claim the memory */
  281. if (np->permission.owned_value == VSOC_REGION_FREE)
  282. return -EINVAL;
  283. owner_ptr =
  284. (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
  285. np->permission.owner_offset);
  286. /* We've already verified that this is in the shared memory window, so
  287. * it should be safe to write to this address.
  288. */
  289. if (atomic_cmpxchg(owner_ptr,
  290. VSOC_REGION_FREE,
  291. np->permission.owned_value) != VSOC_REGION_FREE) {
  292. return -EBUSY;
  293. }
  294. ((struct vsoc_private_data *)managed_filp->private_data)->
  295. fd_scoped_permission_node = np;
  296. /* The file offset needs to be adjusted if the calling
  297. * process did any read/write operations on the fd
  298. * before creating the permission.
  299. */
  300. if (managed_filp->f_pos) {
  301. if (managed_filp->f_pos > np->permission.end_offset) {
  302. /* If the offset is beyond the permission end, set it
  303. * to the end.
  304. */
  305. managed_filp->f_pos = np->permission.end_offset;
  306. } else {
  307. /* If the offset is within the permission interval
  308. * keep it there otherwise reset it to zero.
  309. */
  310. if (managed_filp->f_pos < np->permission.begin_offset) {
  311. managed_filp->f_pos = 0;
  312. } else {
  313. managed_filp->f_pos -=
  314. np->permission.begin_offset;
  315. }
  316. }
  317. }
  318. return 0;
  319. }
  320. static void
  321. do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p,
  322. struct fd_scoped_permission_node *node)
  323. {
  324. if (node) {
  325. do_destroy_fd_scoped_permission(owner_region_p,
  326. &node->permission);
  327. mutex_lock(&vsoc_dev.mtx);
  328. list_del(&node->list);
  329. mutex_unlock(&vsoc_dev.mtx);
  330. kfree(node);
  331. }
  332. }
  333. static void
  334. do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
  335. struct fd_scoped_permission *perm)
  336. {
  337. atomic_t *owner_ptr = NULL;
  338. int prev = 0;
  339. if (!perm)
  340. return;
  341. owner_ptr = (atomic_t *)shm_off_to_virtual_addr
  342. (owner_region_p->region_begin_offset + perm->owner_offset);
  343. prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
  344. if (prev != perm->owned_value)
  345. dev_err(&vsoc_dev.dev->dev,
  346. "%x-%x: owner (%s) %x: expected to be %x was %x",
  347. perm->begin_offset, perm->end_offset,
  348. owner_region_p->device_name, perm->owner_offset,
  349. perm->owned_value, prev);
  350. }
  351. static long do_vsoc_describe_region(struct file *filp,
  352. struct vsoc_device_region __user *dest)
  353. {
  354. struct vsoc_device_region *region_p;
  355. int retval = vsoc_validate_filep(filp);
  356. if (retval)
  357. return retval;
  358. region_p = vsoc_region_from_filep(filp);
  359. if (copy_to_user(dest, region_p, sizeof(*region_p)))
  360. return -EFAULT;
  361. return 0;
  362. }
  363. /**
  364. * Implements the inner logic of cond_wait. Copies to and from userspace are
  365. * done in the helper function below.
  366. */
  367. static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
  368. {
  369. DEFINE_WAIT(wait);
  370. u32 region_number = iminor(file_inode(filp));
  371. struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
  372. struct hrtimer_sleeper timeout, *to = NULL;
  373. int ret = 0;
  374. struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
  375. atomic_t *address = NULL;
  376. ktime_t wake_time;
  377. /* Ensure that the offset is aligned */
  378. if (arg->offset & (sizeof(uint32_t) - 1))
  379. return -EADDRNOTAVAIL;
  380. /* Ensure that the offset is within shared memory */
  381. if (((uint64_t)arg->offset) + region_p->region_begin_offset +
  382. sizeof(uint32_t) > region_p->region_end_offset)
  383. return -E2BIG;
  384. address = shm_off_to_virtual_addr(region_p->region_begin_offset +
  385. arg->offset);
  386. /* Ensure that the type of wait is valid */
  387. switch (arg->wait_type) {
  388. case VSOC_WAIT_IF_EQUAL:
  389. break;
  390. case VSOC_WAIT_IF_EQUAL_TIMEOUT:
  391. to = &timeout;
  392. break;
  393. default:
  394. return -EINVAL;
  395. }
  396. if (to) {
  397. /* Copy the user-supplied timesec into the kernel structure.
  398. * We do things this way to flatten differences between 32 bit
  399. * and 64 bit timespecs.
  400. */
  401. if (arg->wake_time_nsec >= NSEC_PER_SEC)
  402. return -EINVAL;
  403. wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
  404. hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
  405. HRTIMER_MODE_ABS);
  406. hrtimer_set_expires_range_ns(&to->timer, wake_time,
  407. current->timer_slack_ns);
  408. hrtimer_init_sleeper(to, current);
  409. }
  410. while (1) {
  411. prepare_to_wait(&data->futex_wait_queue, &wait,
  412. TASK_INTERRUPTIBLE);
  413. /*
  414. * Check the sentinel value after prepare_to_wait. If the value
  415. * changes after this check the writer will call signal,
  416. * changing the task state from INTERRUPTIBLE to RUNNING. That
  417. * will ensure that schedule() will eventually schedule this
  418. * task.
  419. */
  420. if (atomic_read(address) != arg->value) {
  421. ret = 0;
  422. break;
  423. }
  424. if (to) {
  425. hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
  426. if (likely(to->task))
  427. freezable_schedule();
  428. hrtimer_cancel(&to->timer);
  429. if (!to->task) {
  430. ret = -ETIMEDOUT;
  431. break;
  432. }
  433. } else {
  434. freezable_schedule();
  435. }
  436. /* Count the number of times that we woke up. This is useful
  437. * for unit testing.
  438. */
  439. ++arg->wakes;
  440. if (signal_pending(current)) {
  441. ret = -EINTR;
  442. break;
  443. }
  444. }
  445. finish_wait(&data->futex_wait_queue, &wait);
  446. if (to)
  447. destroy_hrtimer_on_stack(&to->timer);
  448. return ret;
  449. }
  450. /**
  451. * Handles the details of copying from/to userspace to ensure that the copies
  452. * happen on all of the return paths of cond_wait.
  453. */
  454. static int do_vsoc_cond_wait(struct file *filp,
  455. struct vsoc_cond_wait __user *untrusted_in)
  456. {
  457. struct vsoc_cond_wait arg;
  458. int rval = 0;
  459. if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
  460. return -EFAULT;
  461. /* wakes is an out parameter. Initialize it to something sensible. */
  462. arg.wakes = 0;
  463. rval = handle_vsoc_cond_wait(filp, &arg);
  464. if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
  465. return -EFAULT;
  466. return rval;
  467. }
  468. static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
  469. {
  470. struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
  471. u32 region_number = iminor(file_inode(filp));
  472. struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
  473. /* Ensure that the offset is aligned */
  474. if (offset & (sizeof(uint32_t) - 1))
  475. return -EADDRNOTAVAIL;
  476. /* Ensure that the offset is within shared memory */
  477. if (((uint64_t)offset) + region_p->region_begin_offset +
  478. sizeof(uint32_t) > region_p->region_end_offset)
  479. return -E2BIG;
  480. /*
  481. * TODO(b/73664181): Use multiple futex wait queues.
  482. * We need to wake every sleeper when the condition changes. Typically
  483. * only a single thread will be waiting on the condition, but there
  484. * are exceptions. The worst case is about 10 threads.
  485. */
  486. wake_up_interruptible_all(&data->futex_wait_queue);
  487. return 0;
  488. }
  489. static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  490. {
  491. int rv = 0;
  492. struct vsoc_device_region *region_p;
  493. u32 reg_num;
  494. struct vsoc_region_data *reg_data;
  495. int retval = vsoc_validate_filep(filp);
  496. if (retval)
  497. return retval;
  498. region_p = vsoc_region_from_filep(filp);
  499. reg_num = iminor(file_inode(filp));
  500. reg_data = vsoc_dev.regions_data + reg_num;
  501. switch (cmd) {
  502. case VSOC_CREATE_FD_SCOPED_PERMISSION:
  503. {
  504. struct fd_scoped_permission_node *node = NULL;
  505. node = kzalloc(sizeof(*node), GFP_KERNEL);
  506. /* We can't allocate memory for the permission */
  507. if (!node)
  508. return -ENOMEM;
  509. INIT_LIST_HEAD(&node->list);
  510. rv = do_create_fd_scoped_permission
  511. (region_p,
  512. node,
  513. (struct fd_scoped_permission_arg __user *)arg);
  514. if (!rv) {
  515. mutex_lock(&vsoc_dev.mtx);
  516. list_add(&node->list, &vsoc_dev.permissions);
  517. mutex_unlock(&vsoc_dev.mtx);
  518. } else {
  519. kfree(node);
  520. return rv;
  521. }
  522. }
  523. break;
  524. case VSOC_GET_FD_SCOPED_PERMISSION:
  525. {
  526. struct fd_scoped_permission_node *node =
  527. ((struct vsoc_private_data *)filp->private_data)->
  528. fd_scoped_permission_node;
  529. if (!node)
  530. return -ENOENT;
  531. if (copy_to_user
  532. ((struct fd_scoped_permission __user *)arg,
  533. &node->permission, sizeof(node->permission)))
  534. return -EFAULT;
  535. }
  536. break;
  537. case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
  538. if (!atomic_xchg(reg_data->outgoing_signalled, 1)) {
  539. writel(reg_num, vsoc_dev.regs + DOORBELL);
  540. return 0;
  541. } else {
  542. return -EBUSY;
  543. }
  544. break;
  545. case VSOC_SEND_INTERRUPT_TO_HOST:
  546. writel(reg_num, vsoc_dev.regs + DOORBELL);
  547. return 0;
  548. case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
  549. wait_event_interruptible
  550. (reg_data->interrupt_wait_queue,
  551. (atomic_read(reg_data->incoming_signalled) != 0));
  552. break;
  553. case VSOC_DESCRIBE_REGION:
  554. return do_vsoc_describe_region
  555. (filp,
  556. (struct vsoc_device_region __user *)arg);
  557. case VSOC_SELF_INTERRUPT:
  558. atomic_set(reg_data->incoming_signalled, 1);
  559. wake_up_interruptible(&reg_data->interrupt_wait_queue);
  560. break;
  561. case VSOC_COND_WAIT:
  562. return do_vsoc_cond_wait(filp,
  563. (struct vsoc_cond_wait __user *)arg);
  564. case VSOC_COND_WAKE:
  565. return do_vsoc_cond_wake(filp, arg);
  566. default:
  567. return -EINVAL;
  568. }
  569. return 0;
  570. }
  571. static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
  572. loff_t *poffset)
  573. {
  574. __u32 area_off;
  575. const void *area_p;
  576. ssize_t area_len;
  577. int retval = vsoc_validate_filep(filp);
  578. if (retval)
  579. return retval;
  580. area_len = vsoc_get_area(filp, &area_off);
  581. area_p = shm_off_to_virtual_addr(area_off);
  582. area_p += *poffset;
  583. area_len -= *poffset;
  584. if (area_len <= 0)
  585. return 0;
  586. if (area_len < len)
  587. len = area_len;
  588. if (copy_to_user(buffer, area_p, len))
  589. return -EFAULT;
  590. *poffset += len;
  591. return len;
  592. }
  593. static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
  594. {
  595. ssize_t area_len = 0;
  596. int retval = vsoc_validate_filep(filp);
  597. if (retval)
  598. return retval;
  599. area_len = vsoc_get_area(filp, NULL);
  600. switch (origin) {
  601. case SEEK_SET:
  602. break;
  603. case SEEK_CUR:
  604. if (offset > 0 && offset + filp->f_pos < 0)
  605. return -EOVERFLOW;
  606. offset += filp->f_pos;
  607. break;
  608. case SEEK_END:
  609. if (offset > 0 && offset + area_len < 0)
  610. return -EOVERFLOW;
  611. offset += area_len;
  612. break;
  613. case SEEK_DATA:
  614. if (offset >= area_len)
  615. return -EINVAL;
  616. if (offset < 0)
  617. offset = 0;
  618. break;
  619. case SEEK_HOLE:
  620. /* Next hole is always the end of the region, unless offset is
  621. * beyond that
  622. */
  623. if (offset < area_len)
  624. offset = area_len;
  625. break;
  626. default:
  627. return -EINVAL;
  628. }
  629. if (offset < 0 || offset > area_len)
  630. return -EINVAL;
  631. filp->f_pos = offset;
  632. return offset;
  633. }
  634. static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
  635. size_t len, loff_t *poffset)
  636. {
  637. __u32 area_off;
  638. void *area_p;
  639. ssize_t area_len;
  640. int retval = vsoc_validate_filep(filp);
  641. if (retval)
  642. return retval;
  643. area_len = vsoc_get_area(filp, &area_off);
  644. area_p = shm_off_to_virtual_addr(area_off);
  645. area_p += *poffset;
  646. area_len -= *poffset;
  647. if (area_len <= 0)
  648. return 0;
  649. if (area_len < len)
  650. len = area_len;
  651. if (copy_from_user(area_p, buffer, len))
  652. return -EFAULT;
  653. *poffset += len;
  654. return len;
  655. }
  656. static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
  657. {
  658. struct vsoc_region_data *region_data =
  659. (struct vsoc_region_data *)region_data_v;
  660. int reg_num = region_data - vsoc_dev.regions_data;
  661. if (unlikely(!region_data))
  662. return IRQ_NONE;
  663. if (unlikely(reg_num < 0 ||
  664. reg_num >= vsoc_dev.layout->region_count)) {
  665. dev_err(&vsoc_dev.dev->dev,
  666. "invalid irq @%p reg_num=0x%04x\n",
  667. region_data, reg_num);
  668. return IRQ_NONE;
  669. }
  670. if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
  671. dev_err(&vsoc_dev.dev->dev,
  672. "irq not aligned @%p reg_num=0x%04x\n",
  673. region_data, reg_num);
  674. return IRQ_NONE;
  675. }
  676. wake_up_interruptible(&region_data->interrupt_wait_queue);
  677. return IRQ_HANDLED;
  678. }
  679. static int vsoc_probe_device(struct pci_dev *pdev,
  680. const struct pci_device_id *ent)
  681. {
  682. int result;
  683. int i;
  684. resource_size_t reg_size;
  685. dev_t devt;
  686. vsoc_dev.dev = pdev;
  687. result = pci_enable_device(pdev);
  688. if (result) {
  689. dev_err(&pdev->dev,
  690. "pci_enable_device failed %s: error %d\n",
  691. pci_name(pdev), result);
  692. return result;
  693. }
  694. vsoc_dev.enabled_device = true;
  695. result = pci_request_regions(pdev, "vsoc");
  696. if (result < 0) {
  697. dev_err(&pdev->dev, "pci_request_regions failed\n");
  698. vsoc_remove_device(pdev);
  699. return -EBUSY;
  700. }
  701. vsoc_dev.requested_regions = true;
  702. /* Set up the control registers in BAR 0 */
  703. reg_size = pci_resource_len(pdev, REGISTER_BAR);
  704. if (reg_size > MAX_REGISTER_BAR_LEN)
  705. vsoc_dev.regs =
  706. pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
  707. else
  708. vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
  709. if (!vsoc_dev.regs) {
  710. dev_err(&pdev->dev,
  711. "cannot map registers of size %zu\n",
  712. (size_t)reg_size);
  713. vsoc_remove_device(pdev);
  714. return -EBUSY;
  715. }
  716. /* Map the shared memory in BAR 2 */
  717. vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
  718. vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
  719. dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
  720. &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
  721. vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
  722. if (!vsoc_dev.kernel_mapped_shm) {
  723. dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
  724. vsoc_remove_device(pdev);
  725. return -EBUSY;
  726. }
  727. vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
  728. vsoc_dev.kernel_mapped_shm;
  729. dev_info(&pdev->dev, "major_version: %d\n",
  730. vsoc_dev.layout->major_version);
  731. dev_info(&pdev->dev, "minor_version: %d\n",
  732. vsoc_dev.layout->minor_version);
  733. dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
  734. dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
  735. if (vsoc_dev.layout->major_version !=
  736. CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
  737. dev_err(&vsoc_dev.dev->dev,
  738. "driver supports only major_version %d\n",
  739. CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
  740. vsoc_remove_device(pdev);
  741. return -EBUSY;
  742. }
  743. result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
  744. VSOC_DEV_NAME);
  745. if (result) {
  746. dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
  747. vsoc_remove_device(pdev);
  748. return -EBUSY;
  749. }
  750. vsoc_dev.major = MAJOR(devt);
  751. cdev_init(&vsoc_dev.cdev, &vsoc_ops);
  752. vsoc_dev.cdev.owner = THIS_MODULE;
  753. result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
  754. if (result) {
  755. dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
  756. vsoc_remove_device(pdev);
  757. return -EBUSY;
  758. }
  759. vsoc_dev.cdev_added = true;
  760. vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
  761. if (IS_ERR(vsoc_dev.class)) {
  762. dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
  763. vsoc_remove_device(pdev);
  764. return PTR_ERR(vsoc_dev.class);
  765. }
  766. vsoc_dev.class_added = true;
  767. vsoc_dev.regions = (struct vsoc_device_region __force *)
  768. ((void *)vsoc_dev.layout +
  769. vsoc_dev.layout->vsoc_region_desc_offset);
  770. vsoc_dev.msix_entries =
  771. kcalloc(vsoc_dev.layout->region_count,
  772. sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
  773. if (!vsoc_dev.msix_entries) {
  774. dev_err(&vsoc_dev.dev->dev,
  775. "unable to allocate msix_entries\n");
  776. vsoc_remove_device(pdev);
  777. return -ENOSPC;
  778. }
  779. vsoc_dev.regions_data =
  780. kcalloc(vsoc_dev.layout->region_count,
  781. sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
  782. if (!vsoc_dev.regions_data) {
  783. dev_err(&vsoc_dev.dev->dev,
  784. "unable to allocate regions' data\n");
  785. vsoc_remove_device(pdev);
  786. return -ENOSPC;
  787. }
  788. for (i = 0; i < vsoc_dev.layout->region_count; ++i)
  789. vsoc_dev.msix_entries[i].entry = i;
  790. result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
  791. vsoc_dev.layout->region_count);
  792. if (result) {
  793. dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
  794. vsoc_remove_device(pdev);
  795. return -ENOSPC;
  796. }
  797. /* Check that all regions are well formed */
  798. for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
  799. const struct vsoc_device_region *region = vsoc_dev.regions + i;
  800. if (!PAGE_ALIGNED(region->region_begin_offset) ||
  801. !PAGE_ALIGNED(region->region_end_offset)) {
  802. dev_err(&vsoc_dev.dev->dev,
  803. "region %d not aligned (%x:%x)", i,
  804. region->region_begin_offset,
  805. region->region_end_offset);
  806. vsoc_remove_device(pdev);
  807. return -EFAULT;
  808. }
  809. if (region->region_begin_offset >= region->region_end_offset ||
  810. region->region_end_offset > vsoc_dev.shm_size) {
  811. dev_err(&vsoc_dev.dev->dev,
  812. "region %d offsets are wrong: %x %x %zx",
  813. i, region->region_begin_offset,
  814. region->region_end_offset, vsoc_dev.shm_size);
  815. vsoc_remove_device(pdev);
  816. return -EFAULT;
  817. }
  818. if (region->managed_by >= vsoc_dev.layout->region_count) {
  819. dev_err(&vsoc_dev.dev->dev,
  820. "region %d has invalid owner: %u",
  821. i, region->managed_by);
  822. vsoc_remove_device(pdev);
  823. return -EFAULT;
  824. }
  825. }
  826. vsoc_dev.msix_enabled = true;
  827. for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
  828. const struct vsoc_device_region *region = vsoc_dev.regions + i;
  829. size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
  830. const struct vsoc_signal_table_layout *h_to_g_signal_table =
  831. &region->host_to_guest_signal_table;
  832. const struct vsoc_signal_table_layout *g_to_h_signal_table =
  833. &region->guest_to_host_signal_table;
  834. vsoc_dev.regions_data[i].name[name_sz] = '\0';
  835. memcpy(vsoc_dev.regions_data[i].name, region->device_name,
  836. name_sz);
  837. dev_info(&pdev->dev, "region %d name=%s\n",
  838. i, vsoc_dev.regions_data[i].name);
  839. init_waitqueue_head
  840. (&vsoc_dev.regions_data[i].interrupt_wait_queue);
  841. init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
  842. vsoc_dev.regions_data[i].incoming_signalled =
  843. shm_off_to_virtual_addr(region->region_begin_offset) +
  844. h_to_g_signal_table->interrupt_signalled_offset;
  845. vsoc_dev.regions_data[i].outgoing_signalled =
  846. shm_off_to_virtual_addr(region->region_begin_offset) +
  847. g_to_h_signal_table->interrupt_signalled_offset;
  848. result = request_irq(vsoc_dev.msix_entries[i].vector,
  849. vsoc_interrupt, 0,
  850. vsoc_dev.regions_data[i].name,
  851. vsoc_dev.regions_data + i);
  852. if (result) {
  853. dev_info(&pdev->dev,
  854. "request_irq failed irq=%d vector=%d\n",
  855. i, vsoc_dev.msix_entries[i].vector);
  856. vsoc_remove_device(pdev);
  857. return -ENOSPC;
  858. }
  859. vsoc_dev.regions_data[i].irq_requested = true;
  860. if (!device_create(vsoc_dev.class, NULL,
  861. MKDEV(vsoc_dev.major, i),
  862. NULL, vsoc_dev.regions_data[i].name)) {
  863. dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
  864. vsoc_remove_device(pdev);
  865. return -EBUSY;
  866. }
  867. vsoc_dev.regions_data[i].device_created = true;
  868. }
  869. return 0;
  870. }
  871. /*
  872. * This should undo all of the allocations in the probe function in reverse
  873. * order.
  874. *
  875. * Notes:
  876. *
  877. * The device may have been partially initialized, so double check
  878. * that the allocations happened.
  879. *
  880. * This function may be called multiple times, so mark resources as freed
  881. * as they are deallocated.
  882. */
  883. static void vsoc_remove_device(struct pci_dev *pdev)
  884. {
  885. int i;
  886. /*
  887. * pdev is the first thing to be set on probe and the last thing
  888. * to be cleared here. If it's NULL then there is no cleanup.
  889. */
  890. if (!pdev || !vsoc_dev.dev)
  891. return;
  892. dev_info(&pdev->dev, "remove_device\n");
  893. if (vsoc_dev.regions_data) {
  894. for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
  895. if (vsoc_dev.regions_data[i].device_created) {
  896. device_destroy(vsoc_dev.class,
  897. MKDEV(vsoc_dev.major, i));
  898. vsoc_dev.regions_data[i].device_created = false;
  899. }
  900. if (vsoc_dev.regions_data[i].irq_requested)
  901. free_irq(vsoc_dev.msix_entries[i].vector, NULL);
  902. vsoc_dev.regions_data[i].irq_requested = false;
  903. }
  904. kfree(vsoc_dev.regions_data);
  905. vsoc_dev.regions_data = NULL;
  906. }
  907. if (vsoc_dev.msix_enabled) {
  908. pci_disable_msix(pdev);
  909. vsoc_dev.msix_enabled = false;
  910. }
  911. kfree(vsoc_dev.msix_entries);
  912. vsoc_dev.msix_entries = NULL;
  913. vsoc_dev.regions = NULL;
  914. if (vsoc_dev.class_added) {
  915. class_destroy(vsoc_dev.class);
  916. vsoc_dev.class_added = false;
  917. }
  918. if (vsoc_dev.cdev_added) {
  919. cdev_del(&vsoc_dev.cdev);
  920. vsoc_dev.cdev_added = false;
  921. }
  922. if (vsoc_dev.major && vsoc_dev.layout) {
  923. unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
  924. vsoc_dev.layout->region_count);
  925. vsoc_dev.major = 0;
  926. }
  927. vsoc_dev.layout = NULL;
  928. if (vsoc_dev.kernel_mapped_shm) {
  929. pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
  930. vsoc_dev.kernel_mapped_shm = NULL;
  931. }
  932. if (vsoc_dev.regs) {
  933. pci_iounmap(pdev, vsoc_dev.regs);
  934. vsoc_dev.regs = NULL;
  935. }
  936. if (vsoc_dev.requested_regions) {
  937. pci_release_regions(pdev);
  938. vsoc_dev.requested_regions = false;
  939. }
  940. if (vsoc_dev.enabled_device) {
  941. pci_disable_device(pdev);
  942. vsoc_dev.enabled_device = false;
  943. }
  944. /* Do this last: it indicates that the device is not initialized. */
  945. vsoc_dev.dev = NULL;
  946. }
  947. static void __exit vsoc_cleanup_module(void)
  948. {
  949. vsoc_remove_device(vsoc_dev.dev);
  950. pci_unregister_driver(&vsoc_pci_driver);
  951. }
  952. static int __init vsoc_init_module(void)
  953. {
  954. int err = -ENOMEM;
  955. INIT_LIST_HEAD(&vsoc_dev.permissions);
  956. mutex_init(&vsoc_dev.mtx);
  957. err = pci_register_driver(&vsoc_pci_driver);
  958. if (err < 0)
  959. return err;
  960. return 0;
  961. }
  962. static int vsoc_open(struct inode *inode, struct file *filp)
  963. {
  964. /* Can't use vsoc_validate_filep because filp is still incomplete */
  965. int ret = vsoc_validate_inode(inode);
  966. if (ret)
  967. return ret;
  968. filp->private_data =
  969. kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
  970. if (!filp->private_data)
  971. return -ENOMEM;
  972. return 0;
  973. }
  974. static int vsoc_release(struct inode *inode, struct file *filp)
  975. {
  976. struct vsoc_private_data *private_data = NULL;
  977. struct fd_scoped_permission_node *node = NULL;
  978. struct vsoc_device_region *owner_region_p = NULL;
  979. int retval = vsoc_validate_filep(filp);
  980. if (retval)
  981. return retval;
  982. private_data = (struct vsoc_private_data *)filp->private_data;
  983. if (!private_data)
  984. return 0;
  985. node = private_data->fd_scoped_permission_node;
  986. if (node) {
  987. owner_region_p = vsoc_region_from_inode(inode);
  988. if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
  989. owner_region_p =
  990. &vsoc_dev.regions[owner_region_p->managed_by];
  991. }
  992. do_destroy_fd_scoped_permission_node(owner_region_p, node);
  993. private_data->fd_scoped_permission_node = NULL;
  994. }
  995. kfree(private_data);
  996. filp->private_data = NULL;
  997. return 0;
  998. }
  999. /*
  1000. * Returns the device relative offset and length of the area specified by the
  1001. * fd scoped permission. If there is no fd scoped permission set, a default
  1002. * permission covering the entire region is assumed, unless the region is owned
  1003. * by another one, in which case the default is a permission with zero size.
  1004. */
  1005. static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
  1006. {
  1007. __u32 off = 0;
  1008. ssize_t length = 0;
  1009. struct vsoc_device_region *region_p;
  1010. struct fd_scoped_permission *perm;
  1011. region_p = vsoc_region_from_filep(filp);
  1012. off = region_p->region_begin_offset;
  1013. perm = &((struct vsoc_private_data *)filp->private_data)->
  1014. fd_scoped_permission_node->permission;
  1015. if (perm) {
  1016. off += perm->begin_offset;
  1017. length = perm->end_offset - perm->begin_offset;
  1018. } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
  1019. /* No permission set and the regions is not owned by another,
  1020. * default to full region access.
  1021. */
  1022. length = vsoc_device_region_size(region_p);
  1023. } else {
  1024. /* return zero length, access is denied. */
  1025. length = 0;
  1026. }
  1027. if (area_offset)
  1028. *area_offset = off;
  1029. return length;
  1030. }
  1031. static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
  1032. {
  1033. unsigned long len = vma->vm_end - vma->vm_start;
  1034. __u32 area_off;
  1035. phys_addr_t mem_off;
  1036. ssize_t area_len;
  1037. int retval = vsoc_validate_filep(filp);
  1038. if (retval)
  1039. return retval;
  1040. area_len = vsoc_get_area(filp, &area_off);
  1041. /* Add the requested offset */
  1042. area_off += (vma->vm_pgoff << PAGE_SHIFT);
  1043. area_len -= (vma->vm_pgoff << PAGE_SHIFT);
  1044. if (area_len < len)
  1045. return -EINVAL;
  1046. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1047. mem_off = shm_off_to_phys_addr(area_off);
  1048. if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
  1049. len, vma->vm_page_prot))
  1050. return -EAGAIN;
  1051. return 0;
  1052. }
  1053. module_init(vsoc_init_module);
  1054. module_exit(vsoc_cleanup_module);
  1055. MODULE_LICENSE("GPL");
  1056. MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
  1057. MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
  1058. MODULE_VERSION("1.0");