virtio_ring.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_VIRTIO_RING_H
  3. #define _LINUX_VIRTIO_RING_H
  4. #include <asm/barrier.h>
  5. #include <linux/irqreturn.h>
  6. #include <uapi/linux/virtio_ring.h>
  7. /*
  8. * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
  9. * they're not on an SMP host system, so they need to assume real
  10. * barriers. Non-SMP virtio hosts could skip the barriers, but does
  11. * anyone care?
  12. *
  13. * For virtio_pci on SMP, we don't need to order with respect to MMIO
  14. * accesses through relaxed memory I/O windows, so virt_mb() et al are
  15. * sufficient.
  16. *
  17. * For using virtio to talk to real devices (eg. other heterogeneous
  18. * CPUs) we do need real barriers. In theory, we could be using both
  19. * kinds of virtio, so it's a runtime decision, and the branch is
  20. * actually quite cheap.
  21. */
  22. static inline void virtio_mb(bool weak_barriers)
  23. {
  24. if (weak_barriers)
  25. virt_mb();
  26. else
  27. mb();
  28. }
  29. static inline void virtio_rmb(bool weak_barriers)
  30. {
  31. if (weak_barriers)
  32. virt_rmb();
  33. else
  34. dma_rmb();
  35. }
  36. static inline void virtio_wmb(bool weak_barriers)
  37. {
  38. if (weak_barriers)
  39. virt_wmb();
  40. else
  41. dma_wmb();
  42. }
  43. static inline void virtio_store_mb(bool weak_barriers,
  44. __virtio16 *p, __virtio16 v)
  45. {
  46. if (weak_barriers) {
  47. virt_store_mb(*p, v);
  48. } else {
  49. WRITE_ONCE(*p, v);
  50. mb();
  51. }
  52. }
  53. struct virtio_device;
  54. struct virtqueue;
  55. /*
  56. * Creates a virtqueue and allocates the descriptor ring. If
  57. * may_reduce_num is set, then this may allocate a smaller ring than
  58. * expected. The caller should query virtqueue_get_vring_size to learn
  59. * the actual size of the ring.
  60. */
  61. struct virtqueue *vring_create_virtqueue(unsigned int index,
  62. unsigned int num,
  63. unsigned int vring_align,
  64. struct virtio_device *vdev,
  65. bool weak_barriers,
  66. bool may_reduce_num,
  67. bool ctx,
  68. bool (*notify)(struct virtqueue *vq),
  69. void (*callback)(struct virtqueue *vq),
  70. const char *name);
  71. /* Creates a virtqueue with a custom layout. */
  72. struct virtqueue *__vring_new_virtqueue(unsigned int index,
  73. struct vring vring,
  74. struct virtio_device *vdev,
  75. bool weak_barriers,
  76. bool ctx,
  77. bool (*notify)(struct virtqueue *),
  78. void (*callback)(struct virtqueue *),
  79. const char *name);
  80. /*
  81. * Creates a virtqueue with a standard layout but a caller-allocated
  82. * ring.
  83. */
  84. struct virtqueue *vring_new_virtqueue(unsigned int index,
  85. unsigned int num,
  86. unsigned int vring_align,
  87. struct virtio_device *vdev,
  88. bool weak_barriers,
  89. bool ctx,
  90. void *pages,
  91. bool (*notify)(struct virtqueue *vq),
  92. void (*callback)(struct virtqueue *vq),
  93. const char *name);
  94. /*
  95. * Destroys a virtqueue. If created with vring_create_virtqueue, this
  96. * also frees the ring.
  97. */
  98. void vring_del_virtqueue(struct virtqueue *vq);
  99. /* Filter out transport-specific feature bits. */
  100. void vring_transport_features(struct virtio_device *vdev);
  101. irqreturn_t vring_interrupt(int irq, void *_vq);
  102. #endif /* _LINUX_VIRTIO_RING_H */