dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2
  6. * as published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "mt7601u.h"
  14. #include "dma.h"
  15. #include "usb.h"
  16. #include "trace.h"
  17. static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
  18. struct mt7601u_dma_buf_rx *e, gfp_t gfp);
  19. static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
  20. {
  21. const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
  22. unsigned int hdrlen;
  23. if (unlikely(len < 10))
  24. return 0;
  25. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  26. if (unlikely(hdrlen > len))
  27. return 0;
  28. return hdrlen;
  29. }
  30. static struct sk_buff *
  31. mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
  32. void *data, u32 seg_len, u32 truesize, struct page *p)
  33. {
  34. struct sk_buff *skb;
  35. u32 true_len, hdr_len = 0, copy, frag;
  36. skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
  37. if (!skb)
  38. return NULL;
  39. true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
  40. if (!true_len || true_len > seg_len)
  41. goto bad_frame;
  42. hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
  43. if (!hdr_len)
  44. goto bad_frame;
  45. if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
  46. memcpy(skb_put(skb, hdr_len), data, hdr_len);
  47. data += hdr_len + 2;
  48. true_len -= hdr_len;
  49. hdr_len = 0;
  50. }
  51. /* If not doing paged RX allocated skb will always have enough space */
  52. copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
  53. frag = true_len - copy;
  54. memcpy(skb_put(skb, copy), data, copy);
  55. data += copy;
  56. if (frag) {
  57. skb_add_rx_frag(skb, 0, p, data - page_address(p),
  58. frag, truesize);
  59. get_page(p);
  60. }
  61. return skb;
  62. bad_frame:
  63. dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
  64. true_len, hdr_len);
  65. dev_kfree_skb(skb);
  66. return NULL;
  67. }
  68. static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
  69. u32 seg_len, struct page *p)
  70. {
  71. struct sk_buff *skb;
  72. struct mt7601u_rxwi *rxwi;
  73. u32 fce_info, truesize = seg_len;
  74. /* DMA_INFO field at the beginning of the segment contains only some of
  75. * the information, we need to read the FCE descriptor from the end.
  76. */
  77. fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
  78. seg_len -= MT_FCE_INFO_LEN;
  79. data += MT_DMA_HDR_LEN;
  80. seg_len -= MT_DMA_HDR_LEN;
  81. rxwi = (struct mt7601u_rxwi *) data;
  82. data += sizeof(struct mt7601u_rxwi);
  83. seg_len -= sizeof(struct mt7601u_rxwi);
  84. if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
  85. dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
  86. if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
  87. dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
  88. trace_mt_rx(dev, rxwi, fce_info);
  89. skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
  90. if (!skb)
  91. return;
  92. ieee80211_rx_ni(dev->hw, skb);
  93. }
  94. static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
  95. {
  96. u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
  97. sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
  98. u16 dma_len = get_unaligned_le16(data);
  99. if (data_len < min_seg_len ||
  100. WARN_ON(!dma_len) ||
  101. WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
  102. WARN_ON(dma_len & 0x3))
  103. return 0;
  104. return MT_DMA_HDRS + dma_len;
  105. }
  106. static void
  107. mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
  108. {
  109. u32 seg_len, data_len = e->urb->actual_length;
  110. u8 *data = page_address(e->p);
  111. struct page *new_p = NULL;
  112. int cnt = 0;
  113. if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
  114. return;
  115. /* Copy if there is very little data in the buffer. */
  116. if (data_len > 512)
  117. new_p = dev_alloc_pages(MT_RX_ORDER);
  118. while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
  119. mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
  120. data_len -= seg_len;
  121. data += seg_len;
  122. cnt++;
  123. }
  124. if (cnt > 1)
  125. trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
  126. if (new_p) {
  127. /* we have one extra ref from the allocator */
  128. __free_pages(e->p, MT_RX_ORDER);
  129. e->p = new_p;
  130. }
  131. }
  132. static struct mt7601u_dma_buf_rx *
  133. mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
  134. {
  135. struct mt7601u_rx_queue *q = &dev->rx_q;
  136. struct mt7601u_dma_buf_rx *buf = NULL;
  137. unsigned long flags;
  138. spin_lock_irqsave(&dev->rx_lock, flags);
  139. if (!q->pending)
  140. goto out;
  141. buf = &q->e[q->start];
  142. q->pending--;
  143. q->start = (q->start + 1) % q->entries;
  144. out:
  145. spin_unlock_irqrestore(&dev->rx_lock, flags);
  146. return buf;
  147. }
  148. static void mt7601u_complete_rx(struct urb *urb)
  149. {
  150. struct mt7601u_dev *dev = urb->context;
  151. struct mt7601u_rx_queue *q = &dev->rx_q;
  152. unsigned long flags;
  153. spin_lock_irqsave(&dev->rx_lock, flags);
  154. if (mt7601u_urb_has_error(urb))
  155. dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
  156. if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
  157. goto out;
  158. q->end = (q->end + 1) % q->entries;
  159. q->pending++;
  160. tasklet_schedule(&dev->rx_tasklet);
  161. out:
  162. spin_unlock_irqrestore(&dev->rx_lock, flags);
  163. }
  164. static void mt7601u_rx_tasklet(unsigned long data)
  165. {
  166. struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
  167. struct mt7601u_dma_buf_rx *e;
  168. while ((e = mt7601u_rx_get_pending_entry(dev))) {
  169. if (e->urb->status)
  170. continue;
  171. mt7601u_rx_process_entry(dev, e);
  172. mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
  173. }
  174. }
  175. static void mt7601u_complete_tx(struct urb *urb)
  176. {
  177. struct mt7601u_tx_queue *q = urb->context;
  178. struct mt7601u_dev *dev = q->dev;
  179. struct sk_buff *skb;
  180. unsigned long flags;
  181. spin_lock_irqsave(&dev->tx_lock, flags);
  182. if (mt7601u_urb_has_error(urb))
  183. dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
  184. if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
  185. goto out;
  186. skb = q->e[q->start].skb;
  187. trace_mt_tx_dma_done(dev, skb);
  188. mt7601u_tx_status(dev, skb);
  189. if (q->used == q->entries - q->entries / 8)
  190. ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
  191. q->start = (q->start + 1) % q->entries;
  192. q->used--;
  193. if (urb->status)
  194. goto out;
  195. set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
  196. if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
  197. queue_delayed_work(dev->stat_wq, &dev->stat_work,
  198. msecs_to_jiffies(10));
  199. out:
  200. spin_unlock_irqrestore(&dev->tx_lock, flags);
  201. }
  202. static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
  203. struct sk_buff *skb, u8 ep)
  204. {
  205. struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
  206. unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
  207. struct mt7601u_dma_buf_tx *e;
  208. struct mt7601u_tx_queue *q = &dev->tx_q[ep];
  209. unsigned long flags;
  210. int ret;
  211. spin_lock_irqsave(&dev->tx_lock, flags);
  212. if (WARN_ON(q->entries <= q->used)) {
  213. ret = -ENOSPC;
  214. goto out;
  215. }
  216. e = &q->e[q->end];
  217. e->skb = skb;
  218. usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
  219. mt7601u_complete_tx, q);
  220. ret = usb_submit_urb(e->urb, GFP_ATOMIC);
  221. if (ret) {
  222. /* Special-handle ENODEV from TX urb submission because it will
  223. * often be the first ENODEV we see after device is removed.
  224. */
  225. if (ret == -ENODEV)
  226. set_bit(MT7601U_STATE_REMOVED, &dev->state);
  227. else
  228. dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
  229. ret);
  230. goto out;
  231. }
  232. q->end = (q->end + 1) % q->entries;
  233. q->used++;
  234. if (q->used >= q->entries)
  235. ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
  236. out:
  237. spin_unlock_irqrestore(&dev->tx_lock, flags);
  238. return ret;
  239. }
  240. /* Map hardware Q to USB endpoint number */
  241. static u8 q2ep(u8 qid)
  242. {
  243. /* TODO: take management packets to queue 5 */
  244. return qid + 1;
  245. }
  246. /* Map USB endpoint number to Q id in the DMA engine */
  247. static enum mt76_qsel ep2dmaq(u8 ep)
  248. {
  249. if (ep == 5)
  250. return MT_QSEL_MGMT;
  251. return MT_QSEL_EDCA;
  252. }
  253. int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
  254. struct mt76_wcid *wcid, int hw_q)
  255. {
  256. u8 ep = q2ep(hw_q);
  257. u32 dma_flags;
  258. int ret;
  259. dma_flags = MT_TXD_PKT_INFO_80211;
  260. if (wcid->hw_key_idx == 0xff)
  261. dma_flags |= MT_TXD_PKT_INFO_WIV;
  262. ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
  263. if (ret)
  264. return ret;
  265. ret = mt7601u_dma_submit_tx(dev, skb, ep);
  266. if (ret) {
  267. ieee80211_free_txskb(dev->hw, skb);
  268. return ret;
  269. }
  270. return 0;
  271. }
  272. static void mt7601u_kill_rx(struct mt7601u_dev *dev)
  273. {
  274. int i;
  275. unsigned long flags;
  276. spin_lock_irqsave(&dev->rx_lock, flags);
  277. for (i = 0; i < dev->rx_q.entries; i++) {
  278. int next = dev->rx_q.end;
  279. spin_unlock_irqrestore(&dev->rx_lock, flags);
  280. usb_poison_urb(dev->rx_q.e[next].urb);
  281. spin_lock_irqsave(&dev->rx_lock, flags);
  282. }
  283. spin_unlock_irqrestore(&dev->rx_lock, flags);
  284. }
  285. static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
  286. struct mt7601u_dma_buf_rx *e, gfp_t gfp)
  287. {
  288. struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
  289. u8 *buf = page_address(e->p);
  290. unsigned pipe;
  291. int ret;
  292. pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
  293. usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
  294. mt7601u_complete_rx, dev);
  295. trace_mt_submit_urb(dev, e->urb);
  296. ret = usb_submit_urb(e->urb, gfp);
  297. if (ret)
  298. dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
  299. return ret;
  300. }
  301. static int mt7601u_submit_rx(struct mt7601u_dev *dev)
  302. {
  303. int i, ret;
  304. for (i = 0; i < dev->rx_q.entries; i++) {
  305. ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
  306. if (ret)
  307. return ret;
  308. }
  309. return 0;
  310. }
  311. static void mt7601u_free_rx(struct mt7601u_dev *dev)
  312. {
  313. int i;
  314. for (i = 0; i < dev->rx_q.entries; i++) {
  315. __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
  316. usb_free_urb(dev->rx_q.e[i].urb);
  317. }
  318. }
  319. static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
  320. {
  321. int i;
  322. memset(&dev->rx_q, 0, sizeof(dev->rx_q));
  323. dev->rx_q.dev = dev;
  324. dev->rx_q.entries = N_RX_ENTRIES;
  325. for (i = 0; i < N_RX_ENTRIES; i++) {
  326. dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
  327. dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
  328. if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
  329. return -ENOMEM;
  330. }
  331. return 0;
  332. }
  333. static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
  334. {
  335. int i;
  336. WARN_ON(q->used);
  337. for (i = 0; i < q->entries; i++) {
  338. usb_poison_urb(q->e[i].urb);
  339. usb_free_urb(q->e[i].urb);
  340. }
  341. }
  342. static void mt7601u_free_tx(struct mt7601u_dev *dev)
  343. {
  344. int i;
  345. for (i = 0; i < __MT_EP_OUT_MAX; i++)
  346. mt7601u_free_tx_queue(&dev->tx_q[i]);
  347. }
  348. static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
  349. struct mt7601u_tx_queue *q)
  350. {
  351. int i;
  352. q->dev = dev;
  353. q->entries = N_TX_ENTRIES;
  354. for (i = 0; i < N_TX_ENTRIES; i++) {
  355. q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
  356. if (!q->e[i].urb)
  357. return -ENOMEM;
  358. }
  359. return 0;
  360. }
  361. static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
  362. {
  363. int i;
  364. dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
  365. sizeof(*dev->tx_q), GFP_KERNEL);
  366. for (i = 0; i < __MT_EP_OUT_MAX; i++)
  367. if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
  368. return -ENOMEM;
  369. return 0;
  370. }
  371. int mt7601u_dma_init(struct mt7601u_dev *dev)
  372. {
  373. int ret = -ENOMEM;
  374. tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
  375. ret = mt7601u_alloc_tx(dev);
  376. if (ret)
  377. goto err;
  378. ret = mt7601u_alloc_rx(dev);
  379. if (ret)
  380. goto err;
  381. ret = mt7601u_submit_rx(dev);
  382. if (ret)
  383. goto err;
  384. return 0;
  385. err:
  386. mt7601u_dma_cleanup(dev);
  387. return ret;
  388. }
  389. void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
  390. {
  391. mt7601u_kill_rx(dev);
  392. tasklet_kill(&dev->rx_tasklet);
  393. mt7601u_free_rx(dev);
  394. mt7601u_free_tx(dev);
  395. }