kfifo_buf.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. #include <linux/slab.h>
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/device.h>
  5. #include <linux/workqueue.h>
  6. #include <linux/kfifo.h>
  7. #include <linux/mutex.h>
  8. #include <linux/iio/kfifo_buf.h>
  9. #include <linux/sched.h>
  10. #include <linux/poll.h>
  11. struct iio_kfifo {
  12. struct iio_buffer buffer;
  13. struct kfifo kf;
  14. struct mutex user_lock;
  15. int update_needed;
  16. };
  17. #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
  18. static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
  19. int bytes_per_datum, int length)
  20. {
  21. if ((length == 0) || (bytes_per_datum == 0))
  22. return -EINVAL;
  23. return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
  24. bytes_per_datum, GFP_KERNEL);
  25. }
  26. static int iio_request_update_kfifo(struct iio_buffer *r)
  27. {
  28. int ret = 0;
  29. struct iio_kfifo *buf = iio_to_kfifo(r);
  30. mutex_lock(&buf->user_lock);
  31. if (buf->update_needed) {
  32. kfifo_free(&buf->kf);
  33. ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
  34. buf->buffer.length);
  35. if (ret >= 0)
  36. buf->update_needed = false;
  37. } else {
  38. kfifo_reset_out(&buf->kf);
  39. }
  40. mutex_unlock(&buf->user_lock);
  41. return ret;
  42. }
  43. static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
  44. {
  45. struct iio_kfifo *kf = iio_to_kfifo(r);
  46. kf->update_needed = true;
  47. return 0;
  48. }
  49. static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
  50. {
  51. if (r->bytes_per_datum != bpd) {
  52. r->bytes_per_datum = bpd;
  53. iio_mark_update_needed_kfifo(r);
  54. }
  55. return 0;
  56. }
  57. static int iio_set_length_kfifo(struct iio_buffer *r, int length)
  58. {
  59. /* Avoid an invalid state */
  60. if (length < 2)
  61. length = 2;
  62. if (r->length != length) {
  63. r->length = length;
  64. iio_mark_update_needed_kfifo(r);
  65. }
  66. return 0;
  67. }
  68. static int iio_store_to_kfifo(struct iio_buffer *r,
  69. const void *data)
  70. {
  71. int ret;
  72. struct iio_kfifo *kf = iio_to_kfifo(r);
  73. ret = kfifo_in(&kf->kf, data, 1);
  74. if (ret != 1)
  75. return -EBUSY;
  76. return 0;
  77. }
  78. static int iio_read_first_n_kfifo(struct iio_buffer *r,
  79. size_t n, char __user *buf)
  80. {
  81. int ret, copied;
  82. struct iio_kfifo *kf = iio_to_kfifo(r);
  83. if (mutex_lock_interruptible(&kf->user_lock))
  84. return -ERESTARTSYS;
  85. if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
  86. ret = -EINVAL;
  87. else
  88. ret = kfifo_to_user(&kf->kf, buf, n, &copied);
  89. mutex_unlock(&kf->user_lock);
  90. if (ret < 0)
  91. return ret;
  92. return copied;
  93. }
  94. static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
  95. {
  96. struct iio_kfifo *kf = iio_to_kfifo(r);
  97. size_t samples;
  98. mutex_lock(&kf->user_lock);
  99. samples = kfifo_len(&kf->kf);
  100. mutex_unlock(&kf->user_lock);
  101. return samples;
  102. }
  103. static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
  104. {
  105. struct iio_kfifo *kf = iio_to_kfifo(buffer);
  106. mutex_destroy(&kf->user_lock);
  107. kfifo_free(&kf->kf);
  108. kfree(kf);
  109. }
  110. static const struct iio_buffer_access_funcs kfifo_access_funcs = {
  111. .store_to = &iio_store_to_kfifo,
  112. .read_first_n = &iio_read_first_n_kfifo,
  113. .data_available = iio_kfifo_buf_data_available,
  114. .request_update = &iio_request_update_kfifo,
  115. .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
  116. .set_length = &iio_set_length_kfifo,
  117. .release = &iio_kfifo_buffer_release,
  118. .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
  119. };
  120. struct iio_buffer *iio_kfifo_allocate(void)
  121. {
  122. struct iio_kfifo *kf;
  123. kf = kzalloc(sizeof(*kf), GFP_KERNEL);
  124. if (!kf)
  125. return NULL;
  126. kf->update_needed = true;
  127. iio_buffer_init(&kf->buffer);
  128. kf->buffer.access = &kfifo_access_funcs;
  129. kf->buffer.length = 2;
  130. mutex_init(&kf->user_lock);
  131. return &kf->buffer;
  132. }
  133. EXPORT_SYMBOL(iio_kfifo_allocate);
  134. void iio_kfifo_free(struct iio_buffer *r)
  135. {
  136. iio_buffer_put(r);
  137. }
  138. EXPORT_SYMBOL(iio_kfifo_free);
  139. static void devm_iio_kfifo_release(struct device *dev, void *res)
  140. {
  141. iio_kfifo_free(*(struct iio_buffer **)res);
  142. }
  143. static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
  144. {
  145. struct iio_buffer **r = res;
  146. if (WARN_ON(!r || !*r))
  147. return 0;
  148. return *r == data;
  149. }
  150. /**
  151. * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
  152. * @dev: Device to allocate kfifo buffer for
  153. *
  154. * RETURNS:
  155. * Pointer to allocated iio_buffer on success, NULL on failure.
  156. */
  157. struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
  158. {
  159. struct iio_buffer **ptr, *r;
  160. ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
  161. if (!ptr)
  162. return NULL;
  163. r = iio_kfifo_allocate();
  164. if (r) {
  165. *ptr = r;
  166. devres_add(dev, ptr);
  167. } else {
  168. devres_free(ptr);
  169. }
  170. return r;
  171. }
  172. EXPORT_SYMBOL(devm_iio_kfifo_allocate);
  173. /**
  174. * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
  175. * @dev: Device the buffer belongs to
  176. * @r: The buffer associated with the device
  177. */
  178. void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
  179. {
  180. WARN_ON(devres_release(dev, devm_iio_kfifo_release,
  181. devm_iio_kfifo_match, r));
  182. }
  183. EXPORT_SYMBOL(devm_iio_kfifo_free);
  184. MODULE_LICENSE("GPL");