vringh.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Linux host-side vring helpers; for when the kernel needs to access
  4. * someone else's vring.
  5. *
  6. * Copyright IBM Corporation, 2013.
  7. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
  8. *
  9. * Written by: Rusty Russell <rusty@rustcorp.com.au>
  10. */
  11. #ifndef _LINUX_VRINGH_H
  12. #define _LINUX_VRINGH_H
  13. #include <uapi/linux/virtio_ring.h>
  14. #include <linux/virtio_byteorder.h>
  15. #include <linux/uio.h>
  16. #include <linux/slab.h>
  17. #include <asm/barrier.h>
  18. /* virtio_ring with information needed for host access. */
  19. struct vringh {
  20. /* Everything is little endian */
  21. bool little_endian;
  22. /* Guest publishes used event idx (note: we always do). */
  23. bool event_indices;
  24. /* Can we get away with weak barriers? */
  25. bool weak_barriers;
  26. /* Last available index we saw (ie. where we're up to). */
  27. u16 last_avail_idx;
  28. /* Last index we used. */
  29. u16 last_used_idx;
  30. /* How many descriptors we've completed since last need_notify(). */
  31. u32 completed;
  32. /* The vring (note: it may contain user pointers!) */
  33. struct vring vring;
  34. /* The function to call to notify the guest about added buffers */
  35. void (*notify)(struct vringh *);
  36. };
  37. /**
  38. * struct vringh_config_ops - ops for creating a host vring from a virtio driver
  39. * @find_vrhs: find the host vrings and instantiate them
  40. * vdev: the virtio_device
  41. * nhvrs: the number of host vrings to find
  42. * hvrs: on success, includes new host vrings
  43. * callbacks: array of driver callbacks, for each host vring
  44. * include a NULL entry for vqs that do not need a callback
  45. * Returns 0 on success or error status
  46. * @del_vrhs: free the host vrings found by find_vrhs().
  47. */
  48. struct virtio_device;
  49. typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
  50. struct vringh_config_ops {
  51. int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
  52. struct vringh *vrhs[], vrh_callback_t *callbacks[]);
  53. void (*del_vrhs)(struct virtio_device *vdev);
  54. };
  55. /* The memory the vring can access, and what offset to apply. */
  56. struct vringh_range {
  57. u64 start, end_incl;
  58. u64 offset;
  59. };
  60. /**
  61. * struct vringh_iov - iovec mangler.
  62. *
  63. * Mangles iovec in place, and restores it.
  64. * Remaining data is iov + i, of used - i elements.
  65. */
  66. struct vringh_iov {
  67. struct iovec *iov;
  68. size_t consumed; /* Within iov[i] */
  69. unsigned i, used, max_num;
  70. };
  71. /**
  72. * struct vringh_iov - kvec mangler.
  73. *
  74. * Mangles kvec in place, and restores it.
  75. * Remaining data is iov + i, of used - i elements.
  76. */
  77. struct vringh_kiov {
  78. struct kvec *iov;
  79. size_t consumed; /* Within iov[i] */
  80. unsigned i, used, max_num;
  81. };
  82. /* Flag on max_num to indicate we're kmalloced. */
  83. #define VRINGH_IOV_ALLOCATED 0x8000000
  84. /* Helpers for userspace vrings. */
  85. int vringh_init_user(struct vringh *vrh, u64 features,
  86. unsigned int num, bool weak_barriers,
  87. struct vring_desc __user *desc,
  88. struct vring_avail __user *avail,
  89. struct vring_used __user *used);
  90. static inline void vringh_iov_init(struct vringh_iov *iov,
  91. struct iovec *iovec, unsigned num)
  92. {
  93. iov->used = iov->i = 0;
  94. iov->consumed = 0;
  95. iov->max_num = num;
  96. iov->iov = iovec;
  97. }
  98. static inline void vringh_iov_reset(struct vringh_iov *iov)
  99. {
  100. iov->iov[iov->i].iov_len += iov->consumed;
  101. iov->iov[iov->i].iov_base -= iov->consumed;
  102. iov->consumed = 0;
  103. iov->i = 0;
  104. }
  105. static inline void vringh_iov_cleanup(struct vringh_iov *iov)
  106. {
  107. if (iov->max_num & VRINGH_IOV_ALLOCATED)
  108. kfree(iov->iov);
  109. iov->max_num = iov->used = iov->i = iov->consumed = 0;
  110. iov->iov = NULL;
  111. }
  112. /* Convert a descriptor into iovecs. */
  113. int vringh_getdesc_user(struct vringh *vrh,
  114. struct vringh_iov *riov,
  115. struct vringh_iov *wiov,
  116. bool (*getrange)(struct vringh *vrh,
  117. u64 addr, struct vringh_range *r),
  118. u16 *head);
  119. /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
  120. ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
  121. /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
  122. ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
  123. const void *src, size_t len);
  124. /* Mark a descriptor as used. */
  125. int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
  126. int vringh_complete_multi_user(struct vringh *vrh,
  127. const struct vring_used_elem used[],
  128. unsigned num_used);
  129. /* Pretend we've never seen descriptor (for easy error handling). */
  130. void vringh_abandon_user(struct vringh *vrh, unsigned int num);
  131. /* Do we need to fire the eventfd to notify the other side? */
  132. int vringh_need_notify_user(struct vringh *vrh);
  133. bool vringh_notify_enable_user(struct vringh *vrh);
  134. void vringh_notify_disable_user(struct vringh *vrh);
  135. /* Helpers for kernelspace vrings. */
  136. int vringh_init_kern(struct vringh *vrh, u64 features,
  137. unsigned int num, bool weak_barriers,
  138. struct vring_desc *desc,
  139. struct vring_avail *avail,
  140. struct vring_used *used);
  141. static inline void vringh_kiov_init(struct vringh_kiov *kiov,
  142. struct kvec *kvec, unsigned num)
  143. {
  144. kiov->used = kiov->i = 0;
  145. kiov->consumed = 0;
  146. kiov->max_num = num;
  147. kiov->iov = kvec;
  148. }
  149. static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
  150. {
  151. kiov->iov[kiov->i].iov_len += kiov->consumed;
  152. kiov->iov[kiov->i].iov_base -= kiov->consumed;
  153. kiov->consumed = 0;
  154. kiov->i = 0;
  155. }
  156. static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
  157. {
  158. if (kiov->max_num & VRINGH_IOV_ALLOCATED)
  159. kfree(kiov->iov);
  160. kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
  161. kiov->iov = NULL;
  162. }
  163. int vringh_getdesc_kern(struct vringh *vrh,
  164. struct vringh_kiov *riov,
  165. struct vringh_kiov *wiov,
  166. u16 *head,
  167. gfp_t gfp);
  168. ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
  169. ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
  170. const void *src, size_t len);
  171. void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
  172. int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
  173. bool vringh_notify_enable_kern(struct vringh *vrh);
  174. void vringh_notify_disable_kern(struct vringh *vrh);
  175. int vringh_need_notify_kern(struct vringh *vrh);
  176. /* Notify the guest about buffers added to the used ring */
  177. static inline void vringh_notify(struct vringh *vrh)
  178. {
  179. if (vrh->notify)
  180. vrh->notify(vrh);
  181. }
  182. static inline bool vringh_is_little_endian(const struct vringh *vrh)
  183. {
  184. return vrh->little_endian ||
  185. virtio_legacy_is_little_endian();
  186. }
  187. static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
  188. {
  189. return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
  190. }
  191. static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
  192. {
  193. return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
  194. }
  195. static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
  196. {
  197. return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
  198. }
  199. static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
  200. {
  201. return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
  202. }
  203. static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
  204. {
  205. return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
  206. }
  207. static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
  208. {
  209. return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
  210. }
  211. #endif /* _LINUX_VRINGH_H */