virqfd.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * VFIO generic eventfd code for IRQFD support.
  3. * Derived from drivers/vfio/pci/vfio_pci_intrs.c
  4. *
  5. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  6. * Author: Alex Williamson <alex.williamson@redhat.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/vfio.h>
  13. #include <linux/eventfd.h>
  14. #include <linux/file.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #define DRIVER_VERSION "0.1"
  18. #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
  19. #define DRIVER_DESC "IRQFD support for VFIO bus drivers"
  20. static struct workqueue_struct *vfio_irqfd_cleanup_wq;
  21. static DEFINE_SPINLOCK(virqfd_lock);
  22. static int __init vfio_virqfd_init(void)
  23. {
  24. vfio_irqfd_cleanup_wq =
  25. create_singlethread_workqueue("vfio-irqfd-cleanup");
  26. if (!vfio_irqfd_cleanup_wq)
  27. return -ENOMEM;
  28. return 0;
  29. }
  30. static void __exit vfio_virqfd_exit(void)
  31. {
  32. destroy_workqueue(vfio_irqfd_cleanup_wq);
  33. }
  34. static void virqfd_deactivate(struct virqfd *virqfd)
  35. {
  36. queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
  37. }
  38. static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
  39. {
  40. struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
  41. unsigned long flags = (unsigned long)key;
  42. if (flags & POLLIN) {
  43. /* An event has been signaled, call function */
  44. if ((!virqfd->handler ||
  45. virqfd->handler(virqfd->opaque, virqfd->data)) &&
  46. virqfd->thread)
  47. schedule_work(&virqfd->inject);
  48. }
  49. if (flags & POLLHUP) {
  50. unsigned long flags;
  51. spin_lock_irqsave(&virqfd_lock, flags);
  52. /*
  53. * The eventfd is closing, if the virqfd has not yet been
  54. * queued for release, as determined by testing whether the
  55. * virqfd pointer to it is still valid, queue it now. As
  56. * with kvm irqfds, we know we won't race against the virqfd
  57. * going away because we hold the lock to get here.
  58. */
  59. if (*(virqfd->pvirqfd) == virqfd) {
  60. *(virqfd->pvirqfd) = NULL;
  61. virqfd_deactivate(virqfd);
  62. }
  63. spin_unlock_irqrestore(&virqfd_lock, flags);
  64. }
  65. return 0;
  66. }
  67. static void virqfd_ptable_queue_proc(struct file *file,
  68. wait_queue_head_t *wqh, poll_table *pt)
  69. {
  70. struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
  71. add_wait_queue(wqh, &virqfd->wait);
  72. }
  73. static void virqfd_shutdown(struct work_struct *work)
  74. {
  75. struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
  76. u64 cnt;
  77. eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
  78. flush_work(&virqfd->inject);
  79. eventfd_ctx_put(virqfd->eventfd);
  80. kfree(virqfd);
  81. }
  82. static void virqfd_inject(struct work_struct *work)
  83. {
  84. struct virqfd *virqfd = container_of(work, struct virqfd, inject);
  85. if (virqfd->thread)
  86. virqfd->thread(virqfd->opaque, virqfd->data);
  87. }
  88. int vfio_virqfd_enable(void *opaque,
  89. int (*handler)(void *, void *),
  90. void (*thread)(void *, void *),
  91. void *data, struct virqfd **pvirqfd, int fd)
  92. {
  93. struct fd irqfd;
  94. struct eventfd_ctx *ctx;
  95. struct virqfd *virqfd;
  96. int ret = 0;
  97. unsigned int events;
  98. virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
  99. if (!virqfd)
  100. return -ENOMEM;
  101. virqfd->pvirqfd = pvirqfd;
  102. virqfd->opaque = opaque;
  103. virqfd->handler = handler;
  104. virqfd->thread = thread;
  105. virqfd->data = data;
  106. INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
  107. INIT_WORK(&virqfd->inject, virqfd_inject);
  108. irqfd = fdget(fd);
  109. if (!irqfd.file) {
  110. ret = -EBADF;
  111. goto err_fd;
  112. }
  113. ctx = eventfd_ctx_fileget(irqfd.file);
  114. if (IS_ERR(ctx)) {
  115. ret = PTR_ERR(ctx);
  116. goto err_ctx;
  117. }
  118. virqfd->eventfd = ctx;
  119. /*
  120. * virqfds can be released by closing the eventfd or directly
  121. * through ioctl. These are both done through a workqueue, so
  122. * we update the pointer to the virqfd under lock to avoid
  123. * pushing multiple jobs to release the same virqfd.
  124. */
  125. spin_lock_irq(&virqfd_lock);
  126. if (*pvirqfd) {
  127. spin_unlock_irq(&virqfd_lock);
  128. ret = -EBUSY;
  129. goto err_busy;
  130. }
  131. *pvirqfd = virqfd;
  132. spin_unlock_irq(&virqfd_lock);
  133. /*
  134. * Install our own custom wake-up handling so we are notified via
  135. * a callback whenever someone signals the underlying eventfd.
  136. */
  137. init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
  138. init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
  139. events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
  140. /*
  141. * Check if there was an event already pending on the eventfd
  142. * before we registered and trigger it as if we didn't miss it.
  143. */
  144. if (events & POLLIN) {
  145. if ((!handler || handler(opaque, data)) && thread)
  146. schedule_work(&virqfd->inject);
  147. }
  148. /*
  149. * Do not drop the file until the irqfd is fully initialized,
  150. * otherwise we might race against the POLLHUP.
  151. */
  152. fdput(irqfd);
  153. return 0;
  154. err_busy:
  155. eventfd_ctx_put(ctx);
  156. err_ctx:
  157. fdput(irqfd);
  158. err_fd:
  159. kfree(virqfd);
  160. return ret;
  161. }
  162. EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
  163. void vfio_virqfd_disable(struct virqfd **pvirqfd)
  164. {
  165. unsigned long flags;
  166. spin_lock_irqsave(&virqfd_lock, flags);
  167. if (*pvirqfd) {
  168. virqfd_deactivate(*pvirqfd);
  169. *pvirqfd = NULL;
  170. }
  171. spin_unlock_irqrestore(&virqfd_lock, flags);
  172. /*
  173. * Block until we know all outstanding shutdown jobs have completed.
  174. * Even if we don't queue the job, flush the wq to be sure it's
  175. * been released.
  176. */
  177. flush_workqueue(vfio_irqfd_cleanup_wq);
  178. }
  179. EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
  180. module_init(vfio_virqfd_init);
  181. module_exit(vfio_virqfd_exit);
  182. MODULE_VERSION(DRIVER_VERSION);
  183. MODULE_LICENSE("GPL v2");
  184. MODULE_AUTHOR(DRIVER_AUTHOR);
  185. MODULE_DESCRIPTION(DRIVER_DESC);