ptr_ring.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define _GNU_SOURCE
  3. #include "main.h"
  4. #include <stdlib.h>
  5. #include <stdio.h>
  6. #include <string.h>
  7. #include <pthread.h>
  8. #include <malloc.h>
  9. #include <assert.h>
  10. #include <errno.h>
  11. #include <limits.h>
  12. #define SMP_CACHE_BYTES 64
  13. #define cache_line_size() SMP_CACHE_BYTES
  14. #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
  15. #define unlikely(x) (__builtin_expect(!!(x), 0))
  16. #define likely(x) (__builtin_expect(!!(x), 1))
  17. #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
  18. #define SIZE_MAX (~(size_t)0)
  19. #define KMALLOC_MAX_SIZE SIZE_MAX
  20. #define BUG_ON(x) assert(x)
  21. typedef pthread_spinlock_t spinlock_t;
  22. typedef int gfp_t;
  23. #define __GFP_ZERO 0x1
  24. static void *kmalloc(unsigned size, gfp_t gfp)
  25. {
  26. void *p = memalign(64, size);
  27. if (!p)
  28. return p;
  29. if (gfp & __GFP_ZERO)
  30. memset(p, 0, size);
  31. return p;
  32. }
  33. static inline void *kzalloc(unsigned size, gfp_t flags)
  34. {
  35. return kmalloc(size, flags | __GFP_ZERO);
  36. }
  37. static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  38. {
  39. if (size != 0 && n > SIZE_MAX / size)
  40. return NULL;
  41. return kmalloc(n * size, flags);
  42. }
  43. static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  44. {
  45. return kmalloc_array(n, size, flags | __GFP_ZERO);
  46. }
  47. static void kfree(void *p)
  48. {
  49. if (p)
  50. free(p);
  51. }
  52. #define kvmalloc_array kmalloc_array
  53. #define kvfree kfree
  54. static void spin_lock_init(spinlock_t *lock)
  55. {
  56. int r = pthread_spin_init(lock, 0);
  57. assert(!r);
  58. }
  59. static void spin_lock(spinlock_t *lock)
  60. {
  61. int ret = pthread_spin_lock(lock);
  62. assert(!ret);
  63. }
  64. static void spin_unlock(spinlock_t *lock)
  65. {
  66. int ret = pthread_spin_unlock(lock);
  67. assert(!ret);
  68. }
  69. static void spin_lock_bh(spinlock_t *lock)
  70. {
  71. spin_lock(lock);
  72. }
  73. static void spin_unlock_bh(spinlock_t *lock)
  74. {
  75. spin_unlock(lock);
  76. }
  77. static void spin_lock_irq(spinlock_t *lock)
  78. {
  79. spin_lock(lock);
  80. }
  81. static void spin_unlock_irq(spinlock_t *lock)
  82. {
  83. spin_unlock(lock);
  84. }
  85. static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
  86. {
  87. spin_lock(lock);
  88. }
  89. static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
  90. {
  91. spin_unlock(lock);
  92. }
  93. #include "../../../include/linux/ptr_ring.h"
  94. static unsigned long long headcnt, tailcnt;
  95. static struct ptr_ring array ____cacheline_aligned_in_smp;
  96. /* implemented by ring */
  97. void alloc_ring(void)
  98. {
  99. int ret = ptr_ring_init(&array, ring_size, 0);
  100. assert(!ret);
  101. /* Hacky way to poke at ring internals. Useful for testing though. */
  102. if (param)
  103. array.batch = param;
  104. }
  105. /* guest side */
  106. int add_inbuf(unsigned len, void *buf, void *datap)
  107. {
  108. int ret;
  109. ret = __ptr_ring_produce(&array, buf);
  110. if (ret >= 0) {
  111. ret = 0;
  112. headcnt++;
  113. }
  114. return ret;
  115. }
  116. /*
  117. * ptr_ring API provides no way for producer to find out whether a given
  118. * buffer was consumed. Our tests merely require that a successful get_buf
  119. * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
  120. * fake it accordingly.
  121. */
  122. void *get_buf(unsigned *lenp, void **bufp)
  123. {
  124. void *datap;
  125. if (tailcnt == headcnt || __ptr_ring_full(&array))
  126. datap = NULL;
  127. else {
  128. datap = "Buffer\n";
  129. ++tailcnt;
  130. }
  131. return datap;
  132. }
  133. bool used_empty()
  134. {
  135. return (tailcnt == headcnt || __ptr_ring_full(&array));
  136. }
  137. void disable_call()
  138. {
  139. assert(0);
  140. }
  141. bool enable_call()
  142. {
  143. assert(0);
  144. }
  145. void kick_available(void)
  146. {
  147. assert(0);
  148. }
  149. /* host side */
  150. void disable_kick()
  151. {
  152. assert(0);
  153. }
  154. bool enable_kick()
  155. {
  156. assert(0);
  157. }
  158. bool avail_empty()
  159. {
  160. return __ptr_ring_empty(&array);
  161. }
  162. bool use_buf(unsigned *lenp, void **bufp)
  163. {
  164. void *ptr;
  165. ptr = __ptr_ring_consume(&array);
  166. return ptr;
  167. }
  168. void call_used(void)
  169. {
  170. assert(0);
  171. }