scif_rb.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include <linux/circ_buf.h>
  19. #include <linux/types.h>
  20. #include <linux/io.h>
  21. #include <linux/errno.h>
  22. #include "scif_rb.h"
  23. #define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
  24. #define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
  25. /**
  26. * scif_rb_init - Initializes the ring buffer
  27. * @rb: ring buffer
  28. * @read_ptr: A pointer to the read offset
  29. * @write_ptr: A pointer to the write offset
  30. * @rb_base: A pointer to the base of the ring buffer
  31. * @size: The size of the ring buffer in powers of two
  32. */
  33. void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
  34. void *rb_base, u8 size)
  35. {
  36. rb->rb_base = rb_base;
  37. rb->size = (1 << size);
  38. rb->read_ptr = read_ptr;
  39. rb->write_ptr = write_ptr;
  40. rb->current_read_offset = *read_ptr;
  41. rb->current_write_offset = *write_ptr;
  42. }
  43. /* Copies a message to the ring buffer -- handles the wrap around case */
  44. static void memcpy_torb(struct scif_rb *rb, void *header,
  45. void *msg, u32 size)
  46. {
  47. u32 size1, size2;
  48. if (header + size >= rb->rb_base + rb->size) {
  49. /* Need to call two copies if it wraps around */
  50. size1 = (u32)(rb->rb_base + rb->size - header);
  51. size2 = size - size1;
  52. memcpy_toio((void __iomem __force *)header, msg, size1);
  53. memcpy_toio((void __iomem __force *)rb->rb_base,
  54. msg + size1, size2);
  55. } else {
  56. memcpy_toio((void __iomem __force *)header, msg, size);
  57. }
  58. }
  59. /* Copies a message from the ring buffer -- handles the wrap around case */
  60. static void memcpy_fromrb(struct scif_rb *rb, void *header,
  61. void *msg, u32 size)
  62. {
  63. u32 size1, size2;
  64. if (header + size >= rb->rb_base + rb->size) {
  65. /* Need to call two copies if it wraps around */
  66. size1 = (u32)(rb->rb_base + rb->size - header);
  67. size2 = size - size1;
  68. memcpy_fromio(msg, (void __iomem __force *)header, size1);
  69. memcpy_fromio(msg + size1,
  70. (void __iomem __force *)rb->rb_base, size2);
  71. } else {
  72. memcpy_fromio(msg, (void __iomem __force *)header, size);
  73. }
  74. }
  75. /**
  76. * scif_rb_space - Query space available for writing to the RB
  77. * @rb: ring buffer
  78. *
  79. * Return: size available for writing to RB in bytes.
  80. */
  81. u32 scif_rb_space(struct scif_rb *rb)
  82. {
  83. rb->current_read_offset = *rb->read_ptr;
  84. /*
  85. * Update from the HW read pointer only once the peer has exposed the
  86. * new empty slot. This barrier is paired with the memory barrier
  87. * scif_rb_update_read_ptr()
  88. */
  89. mb();
  90. return scif_rb_ring_space(rb->current_write_offset,
  91. rb->current_read_offset, rb->size);
  92. }
  93. /**
  94. * scif_rb_write - Write a message to the RB
  95. * @rb: ring buffer
  96. * @msg: buffer to send the message. Must be at least size bytes long
  97. * @size: the size (in bytes) to be copied to the RB
  98. *
  99. * This API does not block if there isn't enough space in the RB.
  100. * Returns: 0 on success or -ENOMEM on failure
  101. */
  102. int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
  103. {
  104. void *header;
  105. if (scif_rb_space(rb) < size)
  106. return -ENOMEM;
  107. header = rb->rb_base + rb->current_write_offset;
  108. memcpy_torb(rb, header, msg, size);
  109. /*
  110. * Wait until scif_rb_commit(). Update the local ring
  111. * buffer data, not the shared data until commit.
  112. */
  113. rb->current_write_offset =
  114. (rb->current_write_offset + size) & (rb->size - 1);
  115. return 0;
  116. }
  117. /**
  118. * scif_rb_commit - To submit the message to let the peer fetch it
  119. * @rb: ring buffer
  120. */
  121. void scif_rb_commit(struct scif_rb *rb)
  122. {
  123. /*
  124. * We must ensure ordering between the all the data committed
  125. * previously before we expose the new message to the peer by
  126. * updating the write_ptr. This write barrier is paired with
  127. * the read barrier in scif_rb_count(..)
  128. */
  129. wmb();
  130. ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
  131. #ifdef CONFIG_INTEL_MIC_CARD
  132. /*
  133. * X100 Si bug: For the case where a Core is performing an EXT_WR
  134. * followed by a Doorbell Write, the Core must perform two EXT_WR to the
  135. * same address with the same data before it does the Doorbell Write.
  136. * This way, if ordering is violated for the Interrupt Message, it will
  137. * fall just behind the first Posted associated with the first EXT_WR.
  138. */
  139. ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
  140. #endif
  141. }
  142. /**
  143. * scif_rb_get - To get next message from the ring buffer
  144. * @rb: ring buffer
  145. * @size: Number of bytes to be read
  146. *
  147. * Return: NULL if no bytes to be read from the ring buffer, otherwise the
  148. * pointer to the next byte
  149. */
  150. static void *scif_rb_get(struct scif_rb *rb, u32 size)
  151. {
  152. void *header = NULL;
  153. if (scif_rb_count(rb, size) >= size)
  154. header = rb->rb_base + rb->current_read_offset;
  155. return header;
  156. }
  157. /*
  158. * scif_rb_get_next - Read from ring buffer.
  159. * @rb: ring buffer
  160. * @msg: buffer to hold the message. Must be at least size bytes long
  161. * @size: Number of bytes to be read
  162. *
  163. * Return: number of bytes read if available bytes are >= size, otherwise
  164. * returns zero.
  165. */
  166. u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
  167. {
  168. void *header = NULL;
  169. int read_size = 0;
  170. header = scif_rb_get(rb, size);
  171. if (header) {
  172. u32 next_cmd_offset =
  173. (rb->current_read_offset + size) & (rb->size - 1);
  174. read_size = size;
  175. rb->current_read_offset = next_cmd_offset;
  176. memcpy_fromrb(rb, header, msg, size);
  177. }
  178. return read_size;
  179. }
  180. /**
  181. * scif_rb_update_read_ptr
  182. * @rb: ring buffer
  183. */
  184. void scif_rb_update_read_ptr(struct scif_rb *rb)
  185. {
  186. u32 new_offset;
  187. new_offset = rb->current_read_offset;
  188. /*
  189. * We must ensure ordering between the all the data committed or read
  190. * previously before we expose the empty slot to the peer by updating
  191. * the read_ptr. This barrier is paired with the memory barrier in
  192. * scif_rb_space(..)
  193. */
  194. mb();
  195. ACCESS_ONCE(*rb->read_ptr) = new_offset;
  196. #ifdef CONFIG_INTEL_MIC_CARD
  197. /*
  198. * X100 Si Bug: For the case where a Core is performing an EXT_WR
  199. * followed by a Doorbell Write, the Core must perform two EXT_WR to the
  200. * same address with the same data before it does the Doorbell Write.
  201. * This way, if ordering is violated for the Interrupt Message, it will
  202. * fall just behind the first Posted associated with the first EXT_WR.
  203. */
  204. ACCESS_ONCE(*rb->read_ptr) = new_offset;
  205. #endif
  206. }
  207. /**
  208. * scif_rb_count
  209. * @rb: ring buffer
  210. * @size: Number of bytes expected to be read
  211. *
  212. * Return: number of bytes that can be read from the RB
  213. */
  214. u32 scif_rb_count(struct scif_rb *rb, u32 size)
  215. {
  216. if (scif_rb_ring_cnt(rb->current_write_offset,
  217. rb->current_read_offset,
  218. rb->size) < size) {
  219. rb->current_write_offset = *rb->write_ptr;
  220. /*
  221. * Update from the HW write pointer if empty only once the peer
  222. * has exposed the new message. This read barrier is paired
  223. * with the write barrier in scif_rb_commit(..)
  224. */
  225. smp_rmb();
  226. }
  227. return scif_rb_ring_cnt(rb->current_write_offset,
  228. rb->current_read_offset,
  229. rb->size);
  230. }