xframe_queue.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #include "xframe_queue.h"
  2. #include "xbus-core.h"
  3. #include "dahdi_debug.h"
  4. extern int debug;
  5. static xframe_t *transport_alloc_xframe(xbus_t *xbus, gfp_t gfp_flags);
  6. static void transport_free_xframe(xbus_t *xbus, xframe_t *xframe);
  7. void xframe_queue_init(struct xframe_queue *q, unsigned int steady_state_count, unsigned int max_count, const char *name, void *priv)
  8. {
  9. memset(q, 0, sizeof(*q));
  10. spin_lock_init(&q->lock);
  11. INIT_LIST_HEAD(&q->head);
  12. q->max_count = XFRAME_QUEUE_MARGIN + max_count;
  13. q->steady_state_count = XFRAME_QUEUE_MARGIN + steady_state_count;
  14. q->name = name;
  15. q->priv = priv;
  16. }
  17. void xframe_queue_clearstats(struct xframe_queue *q)
  18. {
  19. q->worst_count = 0;
  20. //q->overflows = 0; /* Never clear overflows */
  21. q->worst_lag_usec = 0L;
  22. }
  23. static void __xframe_dump_queue(struct xframe_queue *q)
  24. {
  25. xframe_t *xframe;
  26. int i = 0;
  27. char prefix[30];
  28. struct timeval now;
  29. do_gettimeofday(&now);
  30. printk(KERN_DEBUG "%s: dump queue '%s' (first packet in each frame)\n",
  31. THIS_MODULE->name,
  32. q->name);
  33. list_for_each_entry_reverse(xframe, &q->head, frame_list) {
  34. xpacket_t *pack = (xpacket_t *)&xframe->packets[0];
  35. long usec = usec_diff(&now, &xframe->tv_queued);
  36. snprintf(prefix, ARRAY_SIZE(prefix), " %3d> %5ld.%03ld msec",
  37. i++, usec / 1000, usec % 1000);
  38. dump_packet(prefix, pack, 1);
  39. }
  40. }
  41. static bool __xframe_enqueue(struct xframe_queue *q, xframe_t *xframe)
  42. {
  43. int ret = 1;
  44. if(unlikely(q->disabled)) {
  45. ret = 0;
  46. goto out;
  47. }
  48. if(q->count >= q->max_count) {
  49. q->overflows++;
  50. NOTICE("Overflow of %-15s: counts %3d, %3d, %3d worst %3d, overflows %3d worst_lag %02ld.%ld ms\n",
  51. q->name,
  52. q->steady_state_count,
  53. q->count,
  54. q->max_count,
  55. q->worst_count,
  56. q->overflows,
  57. q->worst_lag_usec / 1000,
  58. q->worst_lag_usec % 1000);
  59. __xframe_dump_queue(q);
  60. ret = 0;
  61. goto out;
  62. }
  63. if(++q->count > q->worst_count)
  64. q->worst_count = q->count;
  65. list_add_tail(&xframe->frame_list, &q->head);
  66. do_gettimeofday(&xframe->tv_queued);
  67. out:
  68. return ret;
  69. }
  70. bool xframe_enqueue(struct xframe_queue *q, xframe_t *xframe)
  71. {
  72. unsigned long flags;
  73. int ret;
  74. spin_lock_irqsave(&q->lock, flags);
  75. ret = __xframe_enqueue(q, xframe);
  76. spin_unlock_irqrestore(&q->lock, flags);
  77. return ret;
  78. }
  79. static xframe_t *__xframe_dequeue(struct xframe_queue *q)
  80. {
  81. xframe_t *frm = NULL;
  82. struct list_head *h;
  83. struct timeval now;
  84. unsigned long usec_lag;
  85. if(list_empty(&q->head))
  86. goto out;
  87. h = q->head.next;
  88. list_del_init(h);
  89. --q->count;
  90. frm = list_entry(h, xframe_t, frame_list);
  91. do_gettimeofday(&now);
  92. usec_lag =
  93. (now.tv_sec - frm->tv_queued.tv_sec)*1000*1000 +
  94. (now.tv_usec - frm->tv_queued.tv_usec);
  95. if(q->worst_lag_usec < usec_lag)
  96. q->worst_lag_usec = usec_lag;
  97. out:
  98. return frm;
  99. }
  100. xframe_t *xframe_dequeue(struct xframe_queue *q)
  101. {
  102. unsigned long flags;
  103. xframe_t *frm;
  104. spin_lock_irqsave(&q->lock, flags);
  105. frm = __xframe_dequeue(q);
  106. spin_unlock_irqrestore(&q->lock, flags);
  107. return frm;
  108. }
  109. void xframe_queue_disable(struct xframe_queue *q, bool disabled)
  110. {
  111. q->disabled = disabled;
  112. }
  113. void xframe_queue_clear(struct xframe_queue *q)
  114. {
  115. xframe_t *xframe;
  116. xbus_t *xbus = q->priv;
  117. int i = 0;
  118. xframe_queue_disable(q, 1);
  119. while((xframe = xframe_dequeue(q)) != NULL) {
  120. transport_free_xframe(xbus, xframe);
  121. i++;
  122. }
  123. XBUS_DBG(DEVICES, xbus, "%s: finished queue clear (%d items)\n", q->name, i);
  124. }
  125. uint xframe_queue_count(struct xframe_queue *q)
  126. {
  127. return q->count;
  128. }
  129. /*------------------------- Frame Alloc/Dealloc --------------------*/
  130. static xframe_t *transport_alloc_xframe(xbus_t *xbus, gfp_t gfp_flags)
  131. {
  132. struct xbus_ops *ops;
  133. xframe_t *xframe;
  134. unsigned long flags;
  135. BUG_ON(!xbus);
  136. ops = transportops_get(xbus);
  137. if(unlikely(!ops)) {
  138. XBUS_ERR(xbus, "Missing transport\n");
  139. return NULL;
  140. }
  141. spin_lock_irqsave(&xbus->transport.lock, flags);
  142. //XBUS_INFO(xbus, "%s (transport_refcount=%d)\n", __FUNCTION__, atomic_read(&xbus->transport.transport_refcount));
  143. xframe = ops->alloc_xframe(xbus, gfp_flags);
  144. if(!xframe) {
  145. static int rate_limit;
  146. if((rate_limit++ % 3001) == 0)
  147. XBUS_ERR(xbus,
  148. "Failed xframe allocation from transport (%d)\n",
  149. rate_limit);
  150. transportops_put(xbus);
  151. /* fall through */
  152. }
  153. spin_unlock_irqrestore(&xbus->transport.lock, flags);
  154. return xframe;
  155. }
  156. static void transport_free_xframe(xbus_t *xbus, xframe_t *xframe)
  157. {
  158. struct xbus_ops *ops;
  159. unsigned long flags;
  160. BUG_ON(!xbus);
  161. ops = xbus->transport.ops;
  162. BUG_ON(!ops);
  163. spin_lock_irqsave(&xbus->transport.lock, flags);
  164. //XBUS_INFO(xbus, "%s (transport_refcount=%d)\n", __FUNCTION__, atomic_read(&xbus->transport.transport_refcount));
  165. ops->free_xframe(xbus, xframe);
  166. transportops_put(xbus);
  167. spin_unlock_irqrestore(&xbus->transport.lock, flags);
  168. }
  169. static bool xframe_queue_adjust(struct xframe_queue *q)
  170. {
  171. xbus_t *xbus;
  172. xframe_t *xframe;
  173. int delta;
  174. unsigned long flags;
  175. int ret = 0;
  176. BUG_ON(!q);
  177. xbus = q->priv;
  178. BUG_ON(!xbus);
  179. spin_lock_irqsave(&q->lock, flags);
  180. delta = q->count - q->steady_state_count;
  181. if(delta < -XFRAME_QUEUE_MARGIN) {
  182. /* Increase pool by one frame */
  183. //XBUS_INFO(xbus, "%s(%d): Allocate one\n", q->name, delta);
  184. xframe = transport_alloc_xframe(xbus, GFP_ATOMIC);
  185. if(!xframe) {
  186. static int rate_limit;
  187. if((rate_limit++ % 3001) == 0)
  188. XBUS_ERR(xbus, "%s: failed frame allocation\n", q->name);
  189. goto out;
  190. }
  191. if(!__xframe_enqueue(q, xframe)) {
  192. static int rate_limit;
  193. if((rate_limit++ % 3001) == 0)
  194. XBUS_ERR(xbus, "%s: failed enqueueing frame\n", q->name);
  195. transport_free_xframe(xbus, xframe);
  196. goto out;
  197. }
  198. } else if(delta > XFRAME_QUEUE_MARGIN) {
  199. /* Decrease pool by one frame */
  200. //XBUS_INFO(xbus, "%s(%d): Free one\n", q->name, delta);
  201. xframe = __xframe_dequeue(q);
  202. if(!xframe) {
  203. static int rate_limit;
  204. if((rate_limit++ % 3001) == 0)
  205. XBUS_ERR(xbus, "%s: failed dequeueing frame\n", q->name);
  206. goto out;
  207. }
  208. transport_free_xframe(xbus, xframe);
  209. }
  210. ret = 1;
  211. out:
  212. spin_unlock_irqrestore(&q->lock, flags);
  213. return ret;
  214. }
  215. xframe_t *get_xframe(struct xframe_queue *q)
  216. {
  217. xframe_t *xframe;
  218. xbus_t *xbus;
  219. BUG_ON(!q);
  220. xbus = (xbus_t *)q->priv;
  221. BUG_ON(!xbus);
  222. xframe_queue_adjust(q);
  223. xframe = xframe_dequeue(q);
  224. if(!xframe) {
  225. static int rate_limit;
  226. if((rate_limit++ % 3001) == 0)
  227. XBUS_ERR(xbus, "%s STILL EMPTY (%d)\n", q->name, rate_limit);
  228. return NULL;
  229. }
  230. BUG_ON(xframe->xframe_magic != XFRAME_MAGIC);
  231. atomic_set(&xframe->frame_len, 0);
  232. xframe->first_free = xframe->packets;
  233. do_gettimeofday(&xframe->tv_created);
  234. /*
  235. * If later parts bother to correctly initialize their
  236. * headers, there is no need to memset() the whole data.
  237. *
  238. * ticket:403
  239. *
  240. * memset(xframe->packets, 0, xframe->frame_maxlen);
  241. */
  242. //XBUS_INFO(xbus, "%s\n", __FUNCTION__);
  243. return xframe;
  244. }
  245. void put_xframe(struct xframe_queue *q, xframe_t *xframe)
  246. {
  247. xbus_t *xbus;
  248. BUG_ON(!q);
  249. xbus = (xbus_t *)q->priv;
  250. BUG_ON(!xbus);
  251. //XBUS_INFO(xbus, "%s\n", __FUNCTION__);
  252. BUG_ON(!TRANSPORT_EXIST(xbus));
  253. if(unlikely(!xframe_enqueue(q, xframe))) {
  254. XBUS_ERR(xbus, "Failed returning xframe to %s\n", q->name);
  255. transport_free_xframe(xbus, xframe);
  256. return;
  257. }
  258. xframe_queue_adjust(q);
  259. }
  260. EXPORT_SYMBOL(xframe_queue_init);
  261. EXPORT_SYMBOL(xframe_queue_clearstats);
  262. EXPORT_SYMBOL(xframe_enqueue);
  263. EXPORT_SYMBOL(xframe_dequeue);
  264. EXPORT_SYMBOL(xframe_queue_disable);
  265. EXPORT_SYMBOL(xframe_queue_clear);
  266. EXPORT_SYMBOL(xframe_queue_count);
  267. EXPORT_SYMBOL(get_xframe);
  268. EXPORT_SYMBOL(put_xframe);