rx_reorder.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  3. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "wil6210.h"
  18. #include "txrx.h"
  19. #define SEQ_MODULO 0x1000
  20. #define SEQ_MASK 0xfff
  21. static inline int seq_less(u16 sq1, u16 sq2)
  22. {
  23. return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
  24. }
  25. static inline u16 seq_inc(u16 sq)
  26. {
  27. return (sq + 1) & SEQ_MASK;
  28. }
  29. static inline u16 seq_sub(u16 sq1, u16 sq2)
  30. {
  31. return (sq1 - sq2) & SEQ_MASK;
  32. }
  33. static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
  34. {
  35. return seq_sub(seq, r->ssn) % r->buf_size;
  36. }
  37. static void wil_release_reorder_frame(struct net_device *ndev,
  38. struct wil_tid_ampdu_rx *r,
  39. int index)
  40. {
  41. struct sk_buff *skb = r->reorder_buf[index];
  42. if (!skb)
  43. goto no_frame;
  44. /* release the frame from the reorder ring buffer */
  45. r->stored_mpdu_num--;
  46. r->reorder_buf[index] = NULL;
  47. wil_netif_rx_any(skb, ndev);
  48. no_frame:
  49. r->head_seq_num = seq_inc(r->head_seq_num);
  50. }
  51. static void wil_release_reorder_frames(struct net_device *ndev,
  52. struct wil_tid_ampdu_rx *r,
  53. u16 hseq)
  54. {
  55. int index;
  56. /* note: this function is never called with
  57. * hseq preceding r->head_seq_num, i.e it is always true
  58. * !seq_less(hseq, r->head_seq_num)
  59. * and thus on loop exit it should be
  60. * r->head_seq_num == hseq
  61. */
  62. while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
  63. index = reorder_index(r, r->head_seq_num);
  64. wil_release_reorder_frame(ndev, r, index);
  65. }
  66. r->head_seq_num = hseq;
  67. }
  68. static void wil_reorder_release(struct net_device *ndev,
  69. struct wil_tid_ampdu_rx *r)
  70. {
  71. int index = reorder_index(r, r->head_seq_num);
  72. while (r->reorder_buf[index]) {
  73. wil_release_reorder_frame(ndev, r, index);
  74. index = reorder_index(r, r->head_seq_num);
  75. }
  76. }
  77. /* called in NAPI context */
  78. void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
  79. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  80. {
  81. struct wil6210_vif *vif;
  82. struct net_device *ndev;
  83. int tid, cid, mid, mcast, retry;
  84. u16 seq;
  85. struct wil_sta_info *sta;
  86. struct wil_tid_ampdu_rx *r;
  87. u16 hseq;
  88. int index;
  89. wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
  90. &mcast, &retry);
  91. sta = &wil->sta[cid];
  92. wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
  93. mid, cid, tid, seq, mcast);
  94. vif = wil->vifs[mid];
  95. if (unlikely(!vif)) {
  96. wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid);
  97. dev_kfree_skb(skb);
  98. return;
  99. }
  100. ndev = vif_to_ndev(vif);
  101. spin_lock(&sta->tid_rx_lock);
  102. r = sta->tid_rx[tid];
  103. if (!r) {
  104. wil_netif_rx_any(skb, ndev);
  105. goto out;
  106. }
  107. if (unlikely(mcast)) {
  108. if (retry && seq == r->mcast_last_seq) {
  109. r->drop_dup_mcast++;
  110. wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
  111. seq);
  112. dev_kfree_skb(skb);
  113. goto out;
  114. }
  115. r->mcast_last_seq = seq;
  116. wil_netif_rx_any(skb, ndev);
  117. goto out;
  118. }
  119. r->total++;
  120. hseq = r->head_seq_num;
  121. /** Due to the race between WMI events, where BACK establishment
  122. * reported, and data Rx, few packets may be pass up before reorder
  123. * buffer get allocated. Catch up by pretending SSN is what we
  124. * see in the 1-st Rx packet
  125. *
  126. * Another scenario, Rx get delayed and we got packet from before
  127. * BACK. Pass it to the stack and wait.
  128. */
  129. if (r->first_time) {
  130. r->first_time = false;
  131. if (seq != r->head_seq_num) {
  132. if (seq_less(seq, r->head_seq_num)) {
  133. wil_err(wil,
  134. "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
  135. seq, r->head_seq_num);
  136. r->first_time = true;
  137. wil_netif_rx_any(skb, ndev);
  138. goto out;
  139. }
  140. wil_err(wil,
  141. "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
  142. seq, r->head_seq_num);
  143. r->head_seq_num = seq;
  144. r->ssn = seq;
  145. }
  146. }
  147. /* frame with out of date sequence number */
  148. if (seq_less(seq, r->head_seq_num)) {
  149. r->ssn_last_drop = seq;
  150. r->drop_old++;
  151. wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
  152. seq, r->head_seq_num);
  153. dev_kfree_skb(skb);
  154. goto out;
  155. }
  156. /*
  157. * If frame the sequence number exceeds our buffering window
  158. * size release some previous frames to make room for this one.
  159. */
  160. if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
  161. hseq = seq_inc(seq_sub(seq, r->buf_size));
  162. /* release stored frames up to new head to stack */
  163. wil_release_reorder_frames(ndev, r, hseq);
  164. }
  165. /* Now the new frame is always in the range of the reordering buffer */
  166. index = reorder_index(r, seq);
  167. /* check if we already stored this frame */
  168. if (r->reorder_buf[index]) {
  169. r->drop_dup++;
  170. wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
  171. dev_kfree_skb(skb);
  172. goto out;
  173. }
  174. /*
  175. * If the current MPDU is in the right order and nothing else
  176. * is stored we can process it directly, no need to buffer it.
  177. * If it is first but there's something stored, we may be able
  178. * to release frames after this one.
  179. */
  180. if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
  181. r->head_seq_num = seq_inc(r->head_seq_num);
  182. wil_netif_rx_any(skb, ndev);
  183. goto out;
  184. }
  185. /* put the frame in the reordering buffer */
  186. r->reorder_buf[index] = skb;
  187. r->stored_mpdu_num++;
  188. wil_reorder_release(ndev, r);
  189. out:
  190. spin_unlock(&sta->tid_rx_lock);
  191. }
  192. /* process BAR frame, called in NAPI context */
  193. void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
  194. u8 cid, u8 tid, u16 seq)
  195. {
  196. struct wil_sta_info *sta = &wil->sta[cid];
  197. struct net_device *ndev = vif_to_ndev(vif);
  198. struct wil_tid_ampdu_rx *r;
  199. spin_lock(&sta->tid_rx_lock);
  200. r = sta->tid_rx[tid];
  201. if (!r) {
  202. wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
  203. goto out;
  204. }
  205. if (seq_less(seq, r->head_seq_num)) {
  206. wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
  207. seq, r->head_seq_num);
  208. goto out;
  209. }
  210. wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
  211. cid, vif->mid, tid, seq, r->head_seq_num);
  212. wil_release_reorder_frames(ndev, r, seq);
  213. out:
  214. spin_unlock(&sta->tid_rx_lock);
  215. }
  216. struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
  217. int size, u16 ssn)
  218. {
  219. struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
  220. if (!r)
  221. return NULL;
  222. r->reorder_buf =
  223. kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
  224. if (!r->reorder_buf) {
  225. kfree(r->reorder_buf);
  226. kfree(r);
  227. return NULL;
  228. }
  229. r->ssn = ssn;
  230. r->head_seq_num = ssn;
  231. r->buf_size = size;
  232. r->stored_mpdu_num = 0;
  233. r->first_time = true;
  234. r->mcast_last_seq = U16_MAX;
  235. return r;
  236. }
  237. void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
  238. struct wil_tid_ampdu_rx *r)
  239. {
  240. int i;
  241. if (!r)
  242. return;
  243. /* Do not pass remaining frames to the network stack - it may be
  244. * not expecting to get any more Rx. Rx from here may lead to
  245. * kernel OOPS since some per-socket accounting info was already
  246. * released.
  247. */
  248. for (i = 0; i < r->buf_size; i++)
  249. kfree_skb(r->reorder_buf[i]);
  250. kfree(r->reorder_buf);
  251. kfree(r);
  252. }
  253. /* ADDBA processing */
  254. static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
  255. {
  256. u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
  257. (mtu_max + WIL_MAX_MPDU_OVERHEAD));
  258. if (!req_agg_wsize)
  259. return max_agg_size;
  260. return min(max_agg_size, req_agg_wsize);
  261. }
  262. /* Block Ack - Rx side (recipient) */
  263. int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
  264. u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
  265. __le16 ba_timeout, __le16 ba_seq_ctrl)
  266. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  267. {
  268. u16 param_set = le16_to_cpu(ba_param_set);
  269. u16 agg_timeout = le16_to_cpu(ba_timeout);
  270. u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
  271. struct wil_sta_info *sta;
  272. u8 cid, tid;
  273. u16 agg_wsize = 0;
  274. /* bit 0: A-MSDU supported
  275. * bit 1: policy (should be 0 for us)
  276. * bits 2..5: TID
  277. * bits 6..15: buffer size
  278. */
  279. u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
  280. bool agg_amsdu = wil->use_enhanced_dma_hw &&
  281. wil->use_rx_hw_reordering &&
  282. test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
  283. wil->amsdu_en && (param_set & BIT(0));
  284. int ba_policy = param_set & BIT(1);
  285. u16 status = WLAN_STATUS_SUCCESS;
  286. u16 ssn = seq_ctrl >> 4;
  287. struct wil_tid_ampdu_rx *r;
  288. int rc = 0;
  289. might_sleep();
  290. parse_cidxtid(cidxtid, &cid, &tid);
  291. /* sanity checks */
  292. if (cid >= WIL6210_MAX_CID) {
  293. wil_err(wil, "BACK: invalid CID %d\n", cid);
  294. rc = -EINVAL;
  295. goto out;
  296. }
  297. sta = &wil->sta[cid];
  298. if (sta->status != wil_sta_connected) {
  299. wil_err(wil, "BACK: CID %d not connected\n", cid);
  300. rc = -EINVAL;
  301. goto out;
  302. }
  303. wil_dbg_wmi(wil,
  304. "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
  305. cid, sta->addr, tid, req_agg_wsize, agg_timeout,
  306. agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
  307. /* apply policies */
  308. if (ba_policy) {
  309. wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
  310. status = WLAN_STATUS_INVALID_QOS_PARAM;
  311. }
  312. if (status == WLAN_STATUS_SUCCESS) {
  313. if (req_agg_wsize == 0) {
  314. wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
  315. wil->max_agg_wsize);
  316. agg_wsize = wil->max_agg_wsize;
  317. } else {
  318. agg_wsize = min_t(u16,
  319. wil->max_agg_wsize, req_agg_wsize);
  320. }
  321. }
  322. rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
  323. status, agg_amsdu, agg_wsize,
  324. agg_timeout);
  325. if (rc || (status != WLAN_STATUS_SUCCESS)) {
  326. wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
  327. status);
  328. goto out;
  329. }
  330. /* apply */
  331. r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
  332. spin_lock_bh(&sta->tid_rx_lock);
  333. wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
  334. sta->tid_rx[tid] = r;
  335. spin_unlock_bh(&sta->tid_rx_lock);
  336. out:
  337. return rc;
  338. }
  339. /* BACK - Tx side (originator) */
  340. int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
  341. {
  342. u8 agg_wsize = wil_agg_size(wil, wsize);
  343. u16 agg_timeout = 0;
  344. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
  345. int rc = 0;
  346. if (txdata->addba_in_progress) {
  347. wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
  348. ringid);
  349. goto out;
  350. }
  351. if (txdata->agg_wsize) {
  352. wil_dbg_misc(wil,
  353. "ADDBA for vring[%d] already done for wsize %d\n",
  354. ringid, txdata->agg_wsize);
  355. goto out;
  356. }
  357. txdata->addba_in_progress = true;
  358. rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout);
  359. if (rc) {
  360. wil_err(wil, "wmi_addba failed, rc (%d)", rc);
  361. txdata->addba_in_progress = false;
  362. }
  363. out:
  364. return rc;
  365. }