txrx_edma.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599
  1. /*
  2. * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/prefetch.h>
  19. #include <linux/types.h>
  20. #include <linux/list.h>
  21. #include <linux/ip.h>
  22. #include <linux/ipv6.h>
  23. #include "wil6210.h"
  24. #include "txrx_edma.h"
  25. #include "txrx.h"
  26. #include "trace.h"
  27. #define WIL_EDMA_MAX_DATA_OFFSET (2)
  28. /* RX buffer size must be aligned to 4 bytes */
  29. #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
  30. static void wil_tx_desc_unmap_edma(struct device *dev,
  31. union wil_tx_desc *desc,
  32. struct wil_ctx *ctx)
  33. {
  34. struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
  35. dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
  36. u16 dmalen = le16_to_cpu(d->dma.length);
  37. switch (ctx->mapped_as) {
  38. case wil_mapped_as_single:
  39. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  40. break;
  41. case wil_mapped_as_page:
  42. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  43. break;
  44. default:
  45. break;
  46. }
  47. }
  48. static int wil_find_free_sring(struct wil6210_priv *wil)
  49. {
  50. int i;
  51. for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
  52. if (!wil->srings[i].va)
  53. return i;
  54. }
  55. return -EINVAL;
  56. }
  57. static void wil_sring_free(struct wil6210_priv *wil,
  58. struct wil_status_ring *sring)
  59. {
  60. struct device *dev = wil_to_dev(wil);
  61. size_t sz;
  62. if (!sring || !sring->va)
  63. return;
  64. sz = sring->elem_size * sring->size;
  65. wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
  66. sz, sring->va, &sring->pa);
  67. dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
  68. sring->pa = 0;
  69. sring->va = NULL;
  70. }
  71. static int wil_sring_alloc(struct wil6210_priv *wil,
  72. struct wil_status_ring *sring)
  73. {
  74. struct device *dev = wil_to_dev(wil);
  75. size_t sz = sring->elem_size * sring->size;
  76. wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
  77. if (sz == 0) {
  78. wil_err(wil, "Cannot allocate a zero size status ring\n");
  79. return -EINVAL;
  80. }
  81. sring->swhead = 0;
  82. /* Status messages are allocated and initialized to 0. This is necessary
  83. * since DR bit should be initialized to 0.
  84. */
  85. sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
  86. if (!sring->va)
  87. return -ENOMEM;
  88. wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
  89. &sring->pa);
  90. return 0;
  91. }
  92. static int wil_tx_init_edma(struct wil6210_priv *wil)
  93. {
  94. int ring_id = wil_find_free_sring(wil);
  95. struct wil_status_ring *sring;
  96. int rc;
  97. u16 status_ring_size;
  98. if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
  99. wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
  100. wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
  101. status_ring_size = 1 << wil->tx_status_ring_order;
  102. wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
  103. status_ring_size, ring_id);
  104. if (ring_id < 0)
  105. return ring_id;
  106. /* Allocate Tx status ring. Tx descriptor rings will be
  107. * allocated on WMI connect event
  108. */
  109. sring = &wil->srings[ring_id];
  110. sring->is_rx = false;
  111. sring->size = status_ring_size;
  112. sring->elem_size = sizeof(struct wil_ring_tx_status);
  113. rc = wil_sring_alloc(wil, sring);
  114. if (rc)
  115. return rc;
  116. rc = wil_wmi_tx_sring_cfg(wil, ring_id);
  117. if (rc)
  118. goto out_free;
  119. sring->desc_rdy_pol = 1;
  120. wil->tx_sring_idx = ring_id;
  121. return 0;
  122. out_free:
  123. wil_sring_free(wil, sring);
  124. return rc;
  125. }
  126. /**
  127. * Allocate one skb for Rx descriptor RING
  128. */
  129. static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
  130. struct wil_ring *ring, u32 i)
  131. {
  132. struct device *dev = wil_to_dev(wil);
  133. unsigned int sz = ALIGN(wil->rx_buf_len, 4);
  134. dma_addr_t pa;
  135. u16 buff_id;
  136. struct list_head *active = &wil->rx_buff_mgmt.active;
  137. struct list_head *free = &wil->rx_buff_mgmt.free;
  138. struct wil_rx_buff *rx_buff;
  139. struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
  140. struct sk_buff *skb;
  141. struct wil_rx_enhanced_desc dd, *d = &dd;
  142. struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
  143. &ring->va[i].rx.enhanced;
  144. if (unlikely(list_empty(free))) {
  145. wil->rx_buff_mgmt.free_list_empty_cnt++;
  146. return -EAGAIN;
  147. }
  148. skb = dev_alloc_skb(sz);
  149. if (unlikely(!skb))
  150. return -ENOMEM;
  151. skb_put(skb, sz);
  152. /**
  153. * Make sure that the network stack calculates checksum for packets
  154. * which failed the HW checksum calculation
  155. */
  156. skb->ip_summed = CHECKSUM_NONE;
  157. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  158. if (unlikely(dma_mapping_error(dev, pa))) {
  159. kfree_skb(skb);
  160. return -ENOMEM;
  161. }
  162. /* Get the buffer ID - the index of the rx buffer in the buff_arr */
  163. rx_buff = list_first_entry(free, struct wil_rx_buff, list);
  164. buff_id = rx_buff->id;
  165. /* Move a buffer from the free list to the active list */
  166. list_move(&rx_buff->list, active);
  167. buff_arr[buff_id].skb = skb;
  168. wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
  169. d->dma.length = cpu_to_le16(sz);
  170. d->mac.buff_id = cpu_to_le16(buff_id);
  171. *_d = *d;
  172. /* Save the physical address in skb->cb for later use in dma_unmap */
  173. memcpy(skb->cb, &pa, sizeof(pa));
  174. return 0;
  175. }
  176. static inline
  177. void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
  178. {
  179. memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
  180. sring->elem_size);
  181. }
  182. static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
  183. {
  184. sring->swhead = (sring->swhead + 1) % sring->size;
  185. if (sring->swhead == 0)
  186. sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
  187. }
  188. static int wil_rx_refill_edma(struct wil6210_priv *wil)
  189. {
  190. struct wil_ring *ring = &wil->ring_rx;
  191. u32 next_head;
  192. int rc = 0;
  193. ring->swtail = *ring->edma_rx_swtail.va;
  194. for (; next_head = wil_ring_next_head(ring),
  195. (next_head != ring->swtail);
  196. ring->swhead = next_head) {
  197. rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
  198. if (unlikely(rc)) {
  199. if (rc == -EAGAIN)
  200. wil_dbg_txrx(wil, "No free buffer ID found\n");
  201. else
  202. wil_err_ratelimited(wil,
  203. "Error %d in refill desc[%d]\n",
  204. rc, ring->swhead);
  205. break;
  206. }
  207. }
  208. /* make sure all writes to descriptors (shared memory) are done before
  209. * committing them to HW
  210. */
  211. wmb();
  212. wil_w(wil, ring->hwtail, ring->swhead);
  213. return rc;
  214. }
  215. static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
  216. struct wil_ring *ring)
  217. {
  218. struct device *dev = wil_to_dev(wil);
  219. struct list_head *active = &wil->rx_buff_mgmt.active;
  220. dma_addr_t pa;
  221. while (!list_empty(active)) {
  222. struct wil_rx_buff *rx_buff =
  223. list_first_entry(active, struct wil_rx_buff, list);
  224. struct sk_buff *skb = rx_buff->skb;
  225. if (unlikely(!skb)) {
  226. wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
  227. } else {
  228. rx_buff->skb = NULL;
  229. memcpy(&pa, skb->cb, sizeof(pa));
  230. dma_unmap_single(dev, pa, wil->rx_buf_len,
  231. DMA_FROM_DEVICE);
  232. kfree_skb(skb);
  233. }
  234. /* Move the buffer from the active to the free list */
  235. list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
  236. }
  237. }
  238. static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
  239. {
  240. struct wil_ring *ring = &wil->ring_rx;
  241. if (!wil->rx_buff_mgmt.buff_arr)
  242. return;
  243. /* Move all the buffers to the free list in case active list is
  244. * not empty in order to release all SKBs before deleting the array
  245. */
  246. wil_move_all_rx_buff_to_free_list(wil, ring);
  247. kfree(wil->rx_buff_mgmt.buff_arr);
  248. wil->rx_buff_mgmt.buff_arr = NULL;
  249. }
  250. static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
  251. size_t size)
  252. {
  253. struct wil_rx_buff *buff_arr;
  254. struct list_head *active = &wil->rx_buff_mgmt.active;
  255. struct list_head *free = &wil->rx_buff_mgmt.free;
  256. int i;
  257. wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
  258. GFP_KERNEL);
  259. if (!wil->rx_buff_mgmt.buff_arr)
  260. return -ENOMEM;
  261. /* Set list heads */
  262. INIT_LIST_HEAD(active);
  263. INIT_LIST_HEAD(free);
  264. /* Linkify the list */
  265. buff_arr = wil->rx_buff_mgmt.buff_arr;
  266. for (i = 0; i < size; i++) {
  267. list_add(&buff_arr[i].list, free);
  268. buff_arr[i].id = i;
  269. }
  270. wil->rx_buff_mgmt.size = size;
  271. return 0;
  272. }
  273. static int wil_init_rx_sring(struct wil6210_priv *wil,
  274. u16 status_ring_size,
  275. size_t elem_size,
  276. u16 ring_id)
  277. {
  278. struct wil_status_ring *sring = &wil->srings[ring_id];
  279. int rc;
  280. wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
  281. ring_id);
  282. memset(&sring->rx_data, 0, sizeof(sring->rx_data));
  283. sring->is_rx = true;
  284. sring->size = status_ring_size;
  285. sring->elem_size = elem_size;
  286. rc = wil_sring_alloc(wil, sring);
  287. if (rc)
  288. return rc;
  289. rc = wil_wmi_rx_sring_add(wil, ring_id);
  290. if (rc)
  291. goto out_free;
  292. sring->desc_rdy_pol = 1;
  293. return 0;
  294. out_free:
  295. wil_sring_free(wil, sring);
  296. return rc;
  297. }
  298. static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
  299. struct wil_ring *ring)
  300. {
  301. struct device *dev = wil_to_dev(wil);
  302. size_t sz = ring->size * sizeof(ring->va[0]);
  303. wil_dbg_misc(wil, "alloc_desc_ring:\n");
  304. BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
  305. ring->swhead = 0;
  306. ring->swtail = 0;
  307. ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
  308. if (!ring->ctx)
  309. goto err;
  310. ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
  311. if (!ring->va)
  312. goto err_free_ctx;
  313. if (ring->is_rx) {
  314. sz = sizeof(*ring->edma_rx_swtail.va);
  315. ring->edma_rx_swtail.va =
  316. dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
  317. GFP_KERNEL);
  318. if (!ring->edma_rx_swtail.va)
  319. goto err_free_va;
  320. }
  321. wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
  322. ring->is_rx ? "RX" : "TX",
  323. ring->size, ring->va, &ring->pa, ring->ctx);
  324. return 0;
  325. err_free_va:
  326. dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
  327. (void *)ring->va, ring->pa);
  328. ring->va = NULL;
  329. err_free_ctx:
  330. kfree(ring->ctx);
  331. ring->ctx = NULL;
  332. err:
  333. return -ENOMEM;
  334. }
  335. static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
  336. {
  337. struct device *dev = wil_to_dev(wil);
  338. size_t sz;
  339. int ring_index = 0;
  340. if (!ring->va)
  341. return;
  342. sz = ring->size * sizeof(ring->va[0]);
  343. lockdep_assert_held(&wil->mutex);
  344. if (ring->is_rx) {
  345. wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
  346. ring->size, ring->va,
  347. &ring->pa, ring->ctx);
  348. wil_move_all_rx_buff_to_free_list(wil, ring);
  349. goto out;
  350. }
  351. /* TX ring */
  352. ring_index = ring - wil->ring_tx;
  353. wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
  354. ring_index, ring->size, ring->va,
  355. &ring->pa, ring->ctx);
  356. while (!wil_ring_is_empty(ring)) {
  357. struct wil_ctx *ctx;
  358. struct wil_tx_enhanced_desc dd, *d = &dd;
  359. struct wil_tx_enhanced_desc *_d =
  360. (struct wil_tx_enhanced_desc *)
  361. &ring->va[ring->swtail].tx.enhanced;
  362. ctx = &ring->ctx[ring->swtail];
  363. if (!ctx) {
  364. wil_dbg_txrx(wil,
  365. "ctx(%d) was already completed\n",
  366. ring->swtail);
  367. ring->swtail = wil_ring_next_tail(ring);
  368. continue;
  369. }
  370. *d = *_d;
  371. wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
  372. if (ctx->skb)
  373. dev_kfree_skb_any(ctx->skb);
  374. ring->swtail = wil_ring_next_tail(ring);
  375. }
  376. out:
  377. dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
  378. kfree(ring->ctx);
  379. ring->pa = 0;
  380. ring->va = NULL;
  381. ring->ctx = NULL;
  382. }
  383. static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
  384. int status_ring_id)
  385. {
  386. struct wil_ring *ring = &wil->ring_rx;
  387. int rc;
  388. wil_dbg_misc(wil, "init RX desc ring\n");
  389. ring->size = desc_ring_size;
  390. ring->is_rx = true;
  391. rc = wil_ring_alloc_desc_ring(wil, ring);
  392. if (rc)
  393. return rc;
  394. rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
  395. if (rc)
  396. goto out_free;
  397. return 0;
  398. out_free:
  399. wil_ring_free_edma(wil, ring);
  400. return rc;
  401. }
  402. static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
  403. struct sk_buff *skb, int *tid,
  404. int *cid, int *mid, u16 *seq,
  405. int *mcast, int *retry)
  406. {
  407. struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
  408. *tid = wil_rx_status_get_tid(s);
  409. *cid = wil_rx_status_get_cid(s);
  410. *mid = wil_rx_status_get_mid(s);
  411. *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
  412. *mcast = wil_rx_status_get_mcast(s);
  413. *retry = wil_rx_status_get_retry(s);
  414. }
  415. static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
  416. int *security)
  417. {
  418. struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
  419. *cid = wil_rx_status_get_cid(s);
  420. *security = wil_rx_status_get_security(s);
  421. }
  422. static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
  423. struct sk_buff *skb)
  424. {
  425. struct wil_rx_status_extended *st;
  426. int cid, tid, key_id, mc;
  427. struct wil_sta_info *s;
  428. struct wil_tid_crypto_rx *c;
  429. struct wil_tid_crypto_rx_single *cc;
  430. const u8 *pn;
  431. /* In HW reorder, HW is responsible for crypto check */
  432. if (wil->use_rx_hw_reordering)
  433. return 0;
  434. st = wil_skb_rxstatus(skb);
  435. cid = wil_rx_status_get_cid(st);
  436. tid = wil_rx_status_get_tid(st);
  437. key_id = wil_rx_status_get_key_id(st);
  438. mc = wil_rx_status_get_mcast(st);
  439. s = &wil->sta[cid];
  440. c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
  441. cc = &c->key_id[key_id];
  442. pn = (u8 *)&st->ext.pn_15_0;
  443. if (!cc->key_set) {
  444. wil_err_ratelimited(wil,
  445. "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
  446. cid, tid, mc, key_id);
  447. return -EINVAL;
  448. }
  449. if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
  450. wil_err_ratelimited(wil,
  451. "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
  452. cid, tid, mc, key_id, pn, cc->pn);
  453. return -EINVAL;
  454. }
  455. memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
  456. return 0;
  457. }
  458. static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
  459. {
  460. struct wil_status_ring *sring;
  461. struct wil_rx_status_extended msg1;
  462. void *msg = &msg1;
  463. u8 dr_bit;
  464. int i;
  465. for (i = 0; i < wil->num_rx_status_rings; i++) {
  466. sring = &wil->srings[i];
  467. if (!sring->va)
  468. continue;
  469. wil_get_next_rx_status_msg(sring, msg);
  470. dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
  471. /* Check if there are unhandled RX status messages */
  472. if (dr_bit == sring->desc_rdy_pol)
  473. return false;
  474. }
  475. return true;
  476. }
  477. static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
  478. {
  479. wil->rx_buf_len = rx_large_buf ?
  480. WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
  481. }
  482. static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
  483. {
  484. u16 status_ring_size;
  485. struct wil_ring *ring = &wil->ring_rx;
  486. int rc;
  487. size_t elem_size = wil->use_compressed_rx_status ?
  488. sizeof(struct wil_rx_status_compressed) :
  489. sizeof(struct wil_rx_status_extended);
  490. int i;
  491. u16 max_rx_pl_per_desc;
  492. /* In SW reorder one must use extended status messages */
  493. if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
  494. wil_err(wil,
  495. "compressed RX status cannot be used with SW reorder\n");
  496. return -EINVAL;
  497. }
  498. if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
  499. wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
  500. wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
  501. status_ring_size = 1 << wil->rx_status_ring_order;
  502. wil_dbg_misc(wil,
  503. "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
  504. desc_ring_size, status_ring_size, elem_size);
  505. wil_rx_buf_len_init_edma(wil);
  506. max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
  507. /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
  508. if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
  509. wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
  510. wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
  511. wil->num_rx_status_rings);
  512. rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
  513. if (rc)
  514. return rc;
  515. /* Allocate status ring */
  516. for (i = 0; i < wil->num_rx_status_rings; i++) {
  517. int sring_id = wil_find_free_sring(wil);
  518. if (sring_id < 0) {
  519. rc = -EFAULT;
  520. goto err_free_status;
  521. }
  522. rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
  523. sring_id);
  524. if (rc)
  525. goto err_free_status;
  526. }
  527. /* Allocate descriptor ring */
  528. rc = wil_init_rx_desc_ring(wil, desc_ring_size,
  529. WIL_DEFAULT_RX_STATUS_RING_ID);
  530. if (rc)
  531. goto err_free_status;
  532. if (wil->rx_buff_id_count >= status_ring_size) {
  533. wil_info(wil,
  534. "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
  535. wil->rx_buff_id_count, status_ring_size,
  536. status_ring_size - 1);
  537. wil->rx_buff_id_count = status_ring_size - 1;
  538. }
  539. /* Allocate Rx buffer array */
  540. rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
  541. if (rc)
  542. goto err_free_desc;
  543. /* Fill descriptor ring with credits */
  544. rc = wil_rx_refill_edma(wil);
  545. if (rc)
  546. goto err_free_rx_buff_arr;
  547. return 0;
  548. err_free_rx_buff_arr:
  549. wil_free_rx_buff_arr(wil);
  550. err_free_desc:
  551. wil_ring_free_edma(wil, ring);
  552. err_free_status:
  553. for (i = 0; i < wil->num_rx_status_rings; i++)
  554. wil_sring_free(wil, &wil->srings[i]);
  555. return rc;
  556. }
  557. static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
  558. int size, int cid, int tid)
  559. {
  560. struct wil6210_priv *wil = vif_to_wil(vif);
  561. int rc;
  562. struct wil_ring *ring = &wil->ring_tx[ring_id];
  563. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
  564. lockdep_assert_held(&wil->mutex);
  565. wil_dbg_misc(wil,
  566. "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
  567. ring_id, cid, tid, wil->tx_sring_idx);
  568. wil_tx_data_init(txdata);
  569. ring->size = size;
  570. rc = wil_ring_alloc_desc_ring(wil, ring);
  571. if (rc)
  572. goto out;
  573. wil->ring2cid_tid[ring_id][0] = cid;
  574. wil->ring2cid_tid[ring_id][1] = tid;
  575. if (!vif->privacy)
  576. txdata->dot1x_open = true;
  577. rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
  578. if (rc) {
  579. wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
  580. goto out_free;
  581. }
  582. if (txdata->dot1x_open && agg_wsize >= 0)
  583. wil_addba_tx_request(wil, ring_id, agg_wsize);
  584. return 0;
  585. out_free:
  586. spin_lock_bh(&txdata->lock);
  587. txdata->dot1x_open = false;
  588. txdata->enabled = 0;
  589. spin_unlock_bh(&txdata->lock);
  590. wil_ring_free_edma(wil, ring);
  591. wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
  592. wil->ring2cid_tid[ring_id][1] = 0;
  593. out:
  594. return rc;
  595. }
  596. /* This function is used only for RX SW reorder */
  597. static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
  598. struct sk_buff *skb, struct wil_net_stats *stats)
  599. {
  600. u8 ftype;
  601. u8 fc1;
  602. int mid;
  603. int tid;
  604. u16 seq;
  605. struct wil6210_vif *vif;
  606. ftype = wil_rx_status_get_frame_type(wil, msg);
  607. if (ftype == IEEE80211_FTYPE_DATA)
  608. return 0;
  609. fc1 = wil_rx_status_get_fc1(wil, msg);
  610. mid = wil_rx_status_get_mid(msg);
  611. tid = wil_rx_status_get_tid(msg);
  612. seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
  613. vif = wil->vifs[mid];
  614. if (unlikely(!vif)) {
  615. wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
  616. return -EAGAIN;
  617. }
  618. wil_dbg_txrx(wil,
  619. "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  620. fc1, mid, cid, tid, seq);
  621. if (stats)
  622. stats->rx_non_data_frame++;
  623. if (wil_is_back_req(fc1)) {
  624. wil_dbg_txrx(wil,
  625. "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
  626. mid, cid, tid, seq);
  627. wil_rx_bar(wil, vif, cid, tid, seq);
  628. } else {
  629. u32 sz = wil->use_compressed_rx_status ?
  630. sizeof(struct wil_rx_status_compressed) :
  631. sizeof(struct wil_rx_status_extended);
  632. /* print again all info. One can enable only this
  633. * without overhead for printing every Rx frame
  634. */
  635. wil_dbg_txrx(wil,
  636. "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  637. fc1, mid, cid, tid, seq);
  638. wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
  639. (const void *)msg, sz, false);
  640. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  641. skb->data, skb_headlen(skb), false);
  642. }
  643. return -EAGAIN;
  644. }
  645. static int wil_rx_error_check_edma(struct wil6210_priv *wil,
  646. struct sk_buff *skb,
  647. struct wil_net_stats *stats)
  648. {
  649. int error;
  650. int l2_rx_status;
  651. int l3_rx_status;
  652. int l4_rx_status;
  653. void *msg = wil_skb_rxstatus(skb);
  654. error = wil_rx_status_get_error(msg);
  655. if (!error) {
  656. skb->ip_summed = CHECKSUM_UNNECESSARY;
  657. return 0;
  658. }
  659. l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
  660. if (l2_rx_status != 0) {
  661. wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
  662. l2_rx_status);
  663. /* Due to HW issue, KEY error will trigger a MIC error */
  664. if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
  665. wil_err_ratelimited(wil,
  666. "L2 MIC/KEY error, dropping packet\n");
  667. stats->rx_mic_error++;
  668. }
  669. if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
  670. wil_err_ratelimited(wil,
  671. "L2 KEY error, dropping packet\n");
  672. stats->rx_key_error++;
  673. }
  674. if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
  675. wil_err_ratelimited(wil,
  676. "L2 REPLAY error, dropping packet\n");
  677. stats->rx_replay++;
  678. }
  679. if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
  680. wil_err_ratelimited(wil,
  681. "L2 AMSDU error, dropping packet\n");
  682. stats->rx_amsdu_error++;
  683. }
  684. return -EFAULT;
  685. }
  686. l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
  687. l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
  688. if (!l3_rx_status && !l4_rx_status)
  689. skb->ip_summed = CHECKSUM_UNNECESSARY;
  690. /* If HW reports bad checksum, let IP stack re-check it
  691. * For example, HW don't understand Microsoft IP stack that
  692. * mis-calculates TCP checksum - if it should be 0x0,
  693. * it writes 0xffff in violation of RFC 1624
  694. */
  695. else
  696. stats->rx_csum_err++;
  697. return 0;
  698. }
  699. static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
  700. struct wil_status_ring *sring)
  701. {
  702. struct device *dev = wil_to_dev(wil);
  703. struct wil_rx_status_extended msg1;
  704. void *msg = &msg1;
  705. u16 buff_id;
  706. struct sk_buff *skb;
  707. dma_addr_t pa;
  708. struct wil_ring_rx_data *rxdata = &sring->rx_data;
  709. unsigned int sz = ALIGN(wil->rx_buf_len, 4);
  710. struct wil_net_stats *stats = NULL;
  711. u16 dmalen;
  712. int cid;
  713. bool eop, headstolen;
  714. int delta;
  715. u8 dr_bit;
  716. u8 data_offset;
  717. struct wil_rx_status_extended *s;
  718. u16 sring_idx = sring - wil->srings;
  719. BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
  720. again:
  721. wil_get_next_rx_status_msg(sring, msg);
  722. dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
  723. /* Completed handling all the ready status messages */
  724. if (dr_bit != sring->desc_rdy_pol)
  725. return NULL;
  726. /* Extract the buffer ID from the status message */
  727. buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
  728. if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
  729. wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
  730. buff_id, sring->swhead);
  731. wil_sring_advance_swhead(sring);
  732. goto again;
  733. }
  734. wil_sring_advance_swhead(sring);
  735. /* Extract the SKB from the rx_buff management array */
  736. skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
  737. wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
  738. if (!skb) {
  739. wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
  740. /* Move the buffer from the active list to the free list */
  741. list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
  742. &wil->rx_buff_mgmt.free);
  743. goto again;
  744. }
  745. memcpy(&pa, skb->cb, sizeof(pa));
  746. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  747. dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
  748. trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
  749. msg);
  750. wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
  751. buff_id, sring_idx, dmalen);
  752. wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
  753. (const void *)msg, wil->use_compressed_rx_status ?
  754. sizeof(struct wil_rx_status_compressed) :
  755. sizeof(struct wil_rx_status_extended), false);
  756. /* Move the buffer from the active list to the free list */
  757. list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
  758. &wil->rx_buff_mgmt.free);
  759. eop = wil_rx_status_get_eop(msg);
  760. cid = wil_rx_status_get_cid(msg);
  761. if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
  762. wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
  763. cid, sring->swhead);
  764. rxdata->skipping = true;
  765. goto skipping;
  766. }
  767. stats = &wil->sta[cid].stats;
  768. if (unlikely(skb->len < ETH_HLEN)) {
  769. wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
  770. stats->rx_short_frame++;
  771. rxdata->skipping = true;
  772. goto skipping;
  773. }
  774. if (unlikely(dmalen > sz)) {
  775. wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
  776. stats->rx_large_frame++;
  777. rxdata->skipping = true;
  778. }
  779. skipping:
  780. /* skipping indicates if a certain SKB should be dropped.
  781. * It is set in case there is an error on the current SKB or in case
  782. * of RX chaining: as long as we manage to merge the SKBs it will
  783. * be false. once we have a bad SKB or we don't manage to merge SKBs
  784. * it will be set to the !EOP value of the current SKB.
  785. * This guarantees that all the following SKBs until EOP will also
  786. * get dropped.
  787. */
  788. if (unlikely(rxdata->skipping)) {
  789. kfree_skb(skb);
  790. if (rxdata->skb) {
  791. kfree_skb(rxdata->skb);
  792. rxdata->skb = NULL;
  793. }
  794. rxdata->skipping = !eop;
  795. goto again;
  796. }
  797. skb_trim(skb, dmalen);
  798. prefetch(skb->data);
  799. if (!rxdata->skb) {
  800. rxdata->skb = skb;
  801. } else {
  802. if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
  803. &delta))) {
  804. kfree_skb_partial(skb, headstolen);
  805. } else {
  806. wil_err(wil, "failed to merge skbs!\n");
  807. kfree_skb(skb);
  808. kfree_skb(rxdata->skb);
  809. rxdata->skb = NULL;
  810. rxdata->skipping = !eop;
  811. goto again;
  812. }
  813. }
  814. if (!eop)
  815. goto again;
  816. /* reaching here rxdata->skb always contains a full packet */
  817. skb = rxdata->skb;
  818. rxdata->skb = NULL;
  819. rxdata->skipping = false;
  820. if (stats) {
  821. stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
  822. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  823. stats->rx_per_mcs[stats->last_mcs_rx]++;
  824. }
  825. if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
  826. wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
  827. kfree_skb(skb);
  828. goto again;
  829. }
  830. /* Compensate for the HW data alignment according to the status
  831. * message
  832. */
  833. data_offset = wil_rx_status_get_data_offset(msg);
  834. if (data_offset == 0xFF ||
  835. data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
  836. wil_err(wil, "Unexpected data offset %d\n", data_offset);
  837. kfree_skb(skb);
  838. goto again;
  839. }
  840. skb_pull(skb, data_offset);
  841. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  842. skb->data, skb_headlen(skb), false);
  843. /* Has to be done after dma_unmap_single as skb->cb is also
  844. * used for holding the pa
  845. */
  846. s = wil_skb_rxstatus(skb);
  847. memcpy(s, msg, sring->elem_size);
  848. return skb;
  849. }
  850. void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
  851. {
  852. struct net_device *ndev;
  853. struct wil_ring *ring = &wil->ring_rx;
  854. struct wil_status_ring *sring;
  855. struct sk_buff *skb;
  856. int i;
  857. if (unlikely(!ring->va)) {
  858. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  859. return;
  860. }
  861. wil_dbg_txrx(wil, "rx_handle\n");
  862. for (i = 0; i < wil->num_rx_status_rings; i++) {
  863. sring = &wil->srings[i];
  864. if (unlikely(!sring->va)) {
  865. wil_err(wil,
  866. "Rx IRQ while Rx status ring %d not yet initialized\n",
  867. i);
  868. continue;
  869. }
  870. while ((*quota > 0) &&
  871. (NULL != (skb =
  872. wil_sring_reap_rx_edma(wil, sring)))) {
  873. (*quota)--;
  874. if (wil->use_rx_hw_reordering) {
  875. void *msg = wil_skb_rxstatus(skb);
  876. int mid = wil_rx_status_get_mid(msg);
  877. struct wil6210_vif *vif = wil->vifs[mid];
  878. if (unlikely(!vif)) {
  879. wil_dbg_txrx(wil,
  880. "RX desc invalid mid %d",
  881. mid);
  882. kfree_skb(skb);
  883. continue;
  884. }
  885. ndev = vif_to_ndev(vif);
  886. wil_netif_rx_any(skb, ndev);
  887. } else {
  888. wil_rx_reorder(wil, skb);
  889. }
  890. }
  891. wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
  892. }
  893. wil_rx_refill_edma(wil);
  894. }
  895. static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
  896. dma_addr_t pa,
  897. u32 len,
  898. int ring_index)
  899. {
  900. struct wil_tx_enhanced_desc *d =
  901. (struct wil_tx_enhanced_desc *)&desc->enhanced;
  902. memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
  903. wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
  904. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  905. d->dma.length = cpu_to_le16((u16)len);
  906. d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
  907. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
  908. * 3 - eth mode
  909. */
  910. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  911. (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  912. return 0;
  913. }
  914. static inline void
  915. wil_get_next_tx_status_msg(struct wil_status_ring *sring,
  916. struct wil_ring_tx_status *msg)
  917. {
  918. struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
  919. (sring->va + (sring->elem_size * sring->swhead));
  920. *msg = *_msg;
  921. }
  922. /**
  923. * Clean up transmitted skb's from the Tx descriptor RING.
  924. * Return number of descriptors cleared.
  925. */
  926. int wil_tx_sring_handler(struct wil6210_priv *wil,
  927. struct wil_status_ring *sring)
  928. {
  929. struct net_device *ndev;
  930. struct device *dev = wil_to_dev(wil);
  931. struct wil_ring *ring = NULL;
  932. struct wil_ring_tx_data *txdata;
  933. /* Total number of completed descriptors in all descriptor rings */
  934. int desc_cnt = 0;
  935. int cid;
  936. struct wil_net_stats *stats = NULL;
  937. struct wil_tx_enhanced_desc *_d;
  938. unsigned int ring_id;
  939. unsigned int num_descs;
  940. int i;
  941. u8 dr_bit; /* Descriptor Ready bit */
  942. struct wil_ring_tx_status msg;
  943. struct wil6210_vif *vif;
  944. int used_before_complete;
  945. int used_new;
  946. wil_get_next_tx_status_msg(sring, &msg);
  947. dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
  948. /* Process completion messages while DR bit has the expected polarity */
  949. while (dr_bit == sring->desc_rdy_pol) {
  950. num_descs = msg.num_descriptors;
  951. if (!num_descs) {
  952. wil_err(wil, "invalid num_descs 0\n");
  953. goto again;
  954. }
  955. /* Find the corresponding descriptor ring */
  956. ring_id = msg.ring_id;
  957. if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
  958. wil_err(wil, "invalid ring id %d\n", ring_id);
  959. goto again;
  960. }
  961. ring = &wil->ring_tx[ring_id];
  962. if (unlikely(!ring->va)) {
  963. wil_err(wil, "Tx irq[%d]: ring not initialized\n",
  964. ring_id);
  965. goto again;
  966. }
  967. txdata = &wil->ring_tx_data[ring_id];
  968. if (unlikely(!txdata->enabled)) {
  969. wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
  970. goto again;
  971. }
  972. vif = wil->vifs[txdata->mid];
  973. if (unlikely(!vif)) {
  974. wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
  975. txdata->mid, ring_id);
  976. goto again;
  977. }
  978. ndev = vif_to_ndev(vif);
  979. cid = wil->ring2cid_tid[ring_id][0];
  980. if (cid < WIL6210_MAX_CID)
  981. stats = &wil->sta[cid].stats;
  982. wil_dbg_txrx(wil,
  983. "tx_status: completed desc_ring (%d), num_descs (%d)\n",
  984. ring_id, num_descs);
  985. used_before_complete = wil_ring_used_tx(ring);
  986. for (i = 0 ; i < num_descs; ++i) {
  987. struct wil_ctx *ctx = &ring->ctx[ring->swtail];
  988. struct wil_tx_enhanced_desc dd, *d = &dd;
  989. u16 dmalen;
  990. struct sk_buff *skb = ctx->skb;
  991. _d = (struct wil_tx_enhanced_desc *)
  992. &ring->va[ring->swtail].tx.enhanced;
  993. *d = *_d;
  994. dmalen = le16_to_cpu(d->dma.length);
  995. trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
  996. wil_dbg_txrx(wil,
  997. "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
  998. ring_id, ring->swtail, dmalen,
  999. msg.status);
  1000. wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
  1001. (const void *)&msg, sizeof(msg),
  1002. false);
  1003. wil_tx_desc_unmap_edma(dev,
  1004. (union wil_tx_desc *)d,
  1005. ctx);
  1006. if (skb) {
  1007. if (likely(msg.status == 0)) {
  1008. ndev->stats.tx_packets++;
  1009. ndev->stats.tx_bytes += skb->len;
  1010. if (stats) {
  1011. stats->tx_packets++;
  1012. stats->tx_bytes += skb->len;
  1013. wil_tx_latency_calc(wil, skb,
  1014. &wil->sta[cid]);
  1015. }
  1016. } else {
  1017. ndev->stats.tx_errors++;
  1018. if (stats)
  1019. stats->tx_errors++;
  1020. }
  1021. wil_consume_skb(skb, msg.status == 0);
  1022. }
  1023. memset(ctx, 0, sizeof(*ctx));
  1024. /* Make sure the ctx is zeroed before updating the tail
  1025. * to prevent a case where wil_tx_ring will see
  1026. * this descriptor as used and handle it before ctx zero
  1027. * is completed.
  1028. */
  1029. wmb();
  1030. ring->swtail = wil_ring_next_tail(ring);
  1031. desc_cnt++;
  1032. }
  1033. /* performance monitoring */
  1034. used_new = wil_ring_used_tx(ring);
  1035. if (wil_val_in_range(wil->ring_idle_trsh,
  1036. used_new, used_before_complete)) {
  1037. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  1038. ring_id, used_before_complete, used_new);
  1039. txdata->last_idle = get_cycles();
  1040. }
  1041. again:
  1042. wil_sring_advance_swhead(sring);
  1043. wil_get_next_tx_status_msg(sring, &msg);
  1044. dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
  1045. }
  1046. /* shall we wake net queues? */
  1047. if (desc_cnt)
  1048. wil_update_net_queues(wil, vif, NULL, false);
  1049. /* Update the HW tail ptr (RD ptr) */
  1050. wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
  1051. return desc_cnt;
  1052. }
  1053. /**
  1054. * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  1055. * @skb is used to obtain the protocol and headers length.
  1056. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  1057. * 2 - middle, 3 - last descriptor.
  1058. */
  1059. static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
  1060. int tso_desc_type, bool is_ipv4,
  1061. int tcp_hdr_len,
  1062. int skb_net_hdr_len,
  1063. int mss)
  1064. {
  1065. /* Number of descriptors */
  1066. d->mac.d[2] |= 1;
  1067. /* Maximum Segment Size */
  1068. d->mac.tso_mss |= cpu_to_le16(mss >> 2);
  1069. /* L4 header len: TCP header length */
  1070. d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
  1071. /* EOP, TSO desc type, Segmentation enable,
  1072. * Insert IPv4 and TCP / UDP Checksum
  1073. */
  1074. d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
  1075. tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
  1076. BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
  1077. BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
  1078. BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
  1079. /* Calculate pseudo-header */
  1080. d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
  1081. BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
  1082. /* IP Header Length */
  1083. d->dma.ip_length |= skb_net_hdr_len;
  1084. /* MAC header length and IP address family*/
  1085. d->dma.b11 |= ETH_HLEN |
  1086. is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  1087. }
  1088. static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
  1089. int len, uint i, int tso_desc_type,
  1090. skb_frag_t *frag, struct wil_ring *ring,
  1091. struct sk_buff *skb, bool is_ipv4,
  1092. int tcp_hdr_len, int skb_net_hdr_len,
  1093. int mss, int *descs_used)
  1094. {
  1095. struct device *dev = wil_to_dev(wil);
  1096. struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
  1097. &ring->va[i].tx.enhanced;
  1098. struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
  1099. int ring_index = ring - wil->ring_tx;
  1100. dma_addr_t pa;
  1101. if (len == 0)
  1102. return 0;
  1103. if (!frag) {
  1104. pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
  1105. ring->ctx[i].mapped_as = wil_mapped_as_single;
  1106. } else {
  1107. pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
  1108. ring->ctx[i].mapped_as = wil_mapped_as_page;
  1109. }
  1110. if (unlikely(dma_mapping_error(dev, pa))) {
  1111. wil_err(wil, "TSO: Skb DMA map error\n");
  1112. return -EINVAL;
  1113. }
  1114. wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
  1115. len, ring_index);
  1116. wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
  1117. tcp_hdr_len,
  1118. skb_net_hdr_len, mss);
  1119. /* hold reference to skb
  1120. * to prevent skb release before accounting
  1121. * in case of immediate "tx done"
  1122. */
  1123. if (tso_desc_type == wil_tso_type_lst)
  1124. ring->ctx[i].skb = skb_get(skb);
  1125. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1126. (const void *)d, sizeof(*d), false);
  1127. *_desc = *d;
  1128. (*descs_used)++;
  1129. return 0;
  1130. }
  1131. static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
  1132. struct wil6210_vif *vif,
  1133. struct wil_ring *ring,
  1134. struct sk_buff *skb)
  1135. {
  1136. int ring_index = ring - wil->ring_tx;
  1137. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
  1138. int nr_frags = skb_shinfo(skb)->nr_frags;
  1139. int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
  1140. int used, avail = wil_ring_avail_tx(ring);
  1141. int f, hdrlen, headlen;
  1142. int gso_type;
  1143. bool is_ipv4;
  1144. u32 swhead = ring->swhead;
  1145. int descs_used = 0; /* total number of used descriptors */
  1146. int rc = -EINVAL;
  1147. int tcp_hdr_len;
  1148. int skb_net_hdr_len;
  1149. int mss = skb_shinfo(skb)->gso_size;
  1150. wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
  1151. ring_index);
  1152. if (unlikely(!txdata->enabled))
  1153. return -EINVAL;
  1154. if (unlikely(avail < min_desc_required)) {
  1155. wil_err_ratelimited(wil,
  1156. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1157. ring_index, min_desc_required);
  1158. return -ENOMEM;
  1159. }
  1160. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1161. switch (gso_type) {
  1162. case SKB_GSO_TCPV4:
  1163. is_ipv4 = true;
  1164. break;
  1165. case SKB_GSO_TCPV6:
  1166. is_ipv4 = false;
  1167. break;
  1168. default:
  1169. return -EINVAL;
  1170. }
  1171. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1172. return -EINVAL;
  1173. /* tcp header length and skb network header length are fixed for all
  1174. * packet's descriptors - read them once here
  1175. */
  1176. tcp_hdr_len = tcp_hdrlen(skb);
  1177. skb_net_hdr_len = skb_network_header_len(skb);
  1178. /* First descriptor must contain the header only
  1179. * Header Length = MAC header len + IP header len + TCP header len
  1180. */
  1181. hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
  1182. wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
  1183. hdrlen);
  1184. rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
  1185. wil_tso_type_hdr, NULL, ring, skb,
  1186. is_ipv4, tcp_hdr_len, skb_net_hdr_len,
  1187. mss, &descs_used);
  1188. if (rc)
  1189. return -EINVAL;
  1190. /* Second descriptor contains the head */
  1191. headlen = skb_headlen(skb) - hdrlen;
  1192. wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
  1193. rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
  1194. (swhead + descs_used) % ring->size,
  1195. (nr_frags != 0) ? wil_tso_type_first :
  1196. wil_tso_type_lst, NULL, ring, skb,
  1197. is_ipv4, tcp_hdr_len, skb_net_hdr_len,
  1198. mss, &descs_used);
  1199. if (rc)
  1200. goto mem_error;
  1201. /* Rest of the descriptors are from the SKB fragments */
  1202. for (f = 0; f < nr_frags; f++) {
  1203. skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
  1204. int len = frag->size;
  1205. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
  1206. len, descs_used);
  1207. rc = wil_tx_tso_gen_desc(wil, NULL, len,
  1208. (swhead + descs_used) % ring->size,
  1209. (f != nr_frags - 1) ?
  1210. wil_tso_type_mid : wil_tso_type_lst,
  1211. frag, ring, skb, is_ipv4,
  1212. tcp_hdr_len, skb_net_hdr_len,
  1213. mss, &descs_used);
  1214. if (rc)
  1215. goto mem_error;
  1216. }
  1217. /* performance monitoring */
  1218. used = wil_ring_used_tx(ring);
  1219. if (wil_val_in_range(wil->ring_idle_trsh,
  1220. used, used + descs_used)) {
  1221. txdata->idle += get_cycles() - txdata->last_idle;
  1222. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1223. ring_index, used, used + descs_used);
  1224. }
  1225. /* advance swhead */
  1226. wil_ring_advance_head(ring, descs_used);
  1227. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
  1228. /* make sure all writes to descriptors (shared memory) are done before
  1229. * committing them to HW
  1230. */
  1231. wmb();
  1232. if (wil->tx_latency)
  1233. *(ktime_t *)&skb->cb = ktime_get();
  1234. else
  1235. memset(skb->cb, 0, sizeof(ktime_t));
  1236. wil_w(wil, ring->hwtail, ring->swhead);
  1237. return 0;
  1238. mem_error:
  1239. while (descs_used > 0) {
  1240. struct device *dev = wil_to_dev(wil);
  1241. struct wil_ctx *ctx;
  1242. int i = (swhead + descs_used - 1) % ring->size;
  1243. struct wil_tx_enhanced_desc dd, *d = &dd;
  1244. struct wil_tx_enhanced_desc *_desc =
  1245. (struct wil_tx_enhanced_desc *)
  1246. &ring->va[i].tx.enhanced;
  1247. *d = *_desc;
  1248. ctx = &ring->ctx[i];
  1249. wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
  1250. memset(ctx, 0, sizeof(*ctx));
  1251. descs_used--;
  1252. }
  1253. return rc;
  1254. }
  1255. static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
  1256. int size)
  1257. {
  1258. struct wil6210_priv *wil = vif_to_wil(vif);
  1259. struct wil_ring *ring = &wil->ring_tx[ring_id];
  1260. int rc;
  1261. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
  1262. wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
  1263. ring_id, wil->tx_sring_idx);
  1264. lockdep_assert_held(&wil->mutex);
  1265. wil_tx_data_init(txdata);
  1266. ring->size = size;
  1267. ring->is_rx = false;
  1268. rc = wil_ring_alloc_desc_ring(wil, ring);
  1269. if (rc)
  1270. goto out;
  1271. wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
  1272. wil->ring2cid_tid[ring_id][1] = 0; /* TID */
  1273. if (!vif->privacy)
  1274. txdata->dot1x_open = true;
  1275. rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
  1276. if (rc)
  1277. goto out_free;
  1278. return 0;
  1279. out_free:
  1280. spin_lock_bh(&txdata->lock);
  1281. txdata->enabled = 0;
  1282. txdata->dot1x_open = false;
  1283. spin_unlock_bh(&txdata->lock);
  1284. wil_ring_free_edma(wil, ring);
  1285. out:
  1286. return rc;
  1287. }
  1288. static void wil_tx_fini_edma(struct wil6210_priv *wil)
  1289. {
  1290. struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
  1291. wil_dbg_misc(wil, "free TX sring\n");
  1292. wil_sring_free(wil, sring);
  1293. }
  1294. static void wil_rx_data_free(struct wil_status_ring *sring)
  1295. {
  1296. if (!sring)
  1297. return;
  1298. kfree_skb(sring->rx_data.skb);
  1299. sring->rx_data.skb = NULL;
  1300. }
  1301. static void wil_rx_fini_edma(struct wil6210_priv *wil)
  1302. {
  1303. struct wil_ring *ring = &wil->ring_rx;
  1304. int i;
  1305. wil_dbg_misc(wil, "rx_fini_edma\n");
  1306. wil_ring_free_edma(wil, ring);
  1307. for (i = 0; i < wil->num_rx_status_rings; i++) {
  1308. wil_rx_data_free(&wil->srings[i]);
  1309. wil_sring_free(wil, &wil->srings[i]);
  1310. }
  1311. wil_free_rx_buff_arr(wil);
  1312. }
  1313. void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
  1314. {
  1315. wil->txrx_ops.configure_interrupt_moderation =
  1316. wil_configure_interrupt_moderation_edma;
  1317. /* TX ops */
  1318. wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
  1319. wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
  1320. wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
  1321. wil->txrx_ops.tx_init = wil_tx_init_edma;
  1322. wil->txrx_ops.tx_fini = wil_tx_fini_edma;
  1323. wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
  1324. wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
  1325. wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
  1326. /* RX ops */
  1327. wil->txrx_ops.rx_init = wil_rx_init_edma;
  1328. wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
  1329. wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
  1330. wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
  1331. wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
  1332. wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
  1333. wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
  1334. wil->txrx_ops.rx_fini = wil_rx_fini_edma;
  1335. }