tx.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called COPYING.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/sched.h>
  32. #include <linux/ieee80211.h>
  33. #include "iwl-io.h"
  34. #include "iwl-trans.h"
  35. #include "iwl-agn-hw.h"
  36. #include "dev.h"
  37. #include "agn.h"
  38. static const u8 tid_to_ac[] = {
  39. IEEE80211_AC_BE,
  40. IEEE80211_AC_BK,
  41. IEEE80211_AC_BK,
  42. IEEE80211_AC_BE,
  43. IEEE80211_AC_VI,
  44. IEEE80211_AC_VI,
  45. IEEE80211_AC_VO,
  46. IEEE80211_AC_VO,
  47. };
  48. static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
  49. struct ieee80211_tx_info *info,
  50. __le16 fc, __le32 *tx_flags)
  51. {
  52. if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
  53. info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
  54. info->flags & IEEE80211_TX_CTL_AMPDU)
  55. *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
  56. }
  57. /*
  58. * handle build REPLY_TX command notification.
  59. */
  60. static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
  61. struct sk_buff *skb,
  62. struct iwl_tx_cmd *tx_cmd,
  63. struct ieee80211_tx_info *info,
  64. struct ieee80211_hdr *hdr, u8 sta_id)
  65. {
  66. __le16 fc = hdr->frame_control;
  67. __le32 tx_flags = tx_cmd->tx_flags;
  68. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  69. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  70. tx_flags |= TX_CMD_FLG_ACK_MSK;
  71. else
  72. tx_flags &= ~TX_CMD_FLG_ACK_MSK;
  73. if (ieee80211_is_probe_resp(fc))
  74. tx_flags |= TX_CMD_FLG_TSF_MSK;
  75. else if (ieee80211_is_back_req(fc))
  76. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  77. else if (info->band == IEEE80211_BAND_2GHZ &&
  78. priv->lib->bt_params &&
  79. priv->lib->bt_params->advanced_bt_coexist &&
  80. (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
  81. ieee80211_is_reassoc_req(fc) ||
  82. info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
  83. tx_flags |= TX_CMD_FLG_IGNORE_BT;
  84. tx_cmd->sta_id = sta_id;
  85. if (ieee80211_has_morefrags(fc))
  86. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  87. if (ieee80211_is_data_qos(fc)) {
  88. u8 *qc = ieee80211_get_qos_ctl(hdr);
  89. tx_cmd->tid_tspec = qc[0] & 0xf;
  90. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  91. } else {
  92. tx_cmd->tid_tspec = IWL_TID_NON_QOS;
  93. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  94. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  95. else
  96. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  97. }
  98. iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
  99. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  100. if (ieee80211_is_mgmt(fc)) {
  101. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  102. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  103. else
  104. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  105. } else {
  106. tx_cmd->timeout.pm_frame_timeout = 0;
  107. }
  108. tx_cmd->driver_txop = 0;
  109. tx_cmd->tx_flags = tx_flags;
  110. tx_cmd->next_frame_len = 0;
  111. }
  112. static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
  113. struct iwl_tx_cmd *tx_cmd,
  114. struct ieee80211_tx_info *info,
  115. struct ieee80211_sta *sta,
  116. __le16 fc)
  117. {
  118. u32 rate_flags;
  119. int rate_idx;
  120. u8 rts_retry_limit;
  121. u8 data_retry_limit;
  122. u8 rate_plcp;
  123. if (priv->wowlan) {
  124. rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  125. data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  126. } else {
  127. /* Set retry limit on RTS packets */
  128. rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
  129. /* Set retry limit on DATA packets and Probe Responses*/
  130. if (ieee80211_is_probe_resp(fc)) {
  131. data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
  132. rts_retry_limit =
  133. min(data_retry_limit, rts_retry_limit);
  134. } else if (ieee80211_is_back_req(fc))
  135. data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
  136. else
  137. data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
  138. }
  139. tx_cmd->data_retry_limit = data_retry_limit;
  140. tx_cmd->rts_retry_limit = rts_retry_limit;
  141. /* DATA packets will use the uCode station table for rate/antenna
  142. * selection */
  143. if (ieee80211_is_data(fc)) {
  144. tx_cmd->initial_rate_index = 0;
  145. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  146. return;
  147. } else if (ieee80211_is_back_req(fc))
  148. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  149. /**
  150. * If the current TX rate stored in mac80211 has the MCS bit set, it's
  151. * not really a TX rate. Thus, we use the lowest supported rate for
  152. * this band. Also use the lowest supported rate if the stored rate
  153. * index is invalid.
  154. */
  155. rate_idx = info->control.rates[0].idx;
  156. if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
  157. (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
  158. rate_idx = rate_lowest_index(
  159. &priv->nvm_data->bands[info->band], sta);
  160. /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
  161. if (info->band == IEEE80211_BAND_5GHZ)
  162. rate_idx += IWL_FIRST_OFDM_RATE;
  163. /* Get PLCP rate for tx_cmd->rate_n_flags */
  164. rate_plcp = iwl_rates[rate_idx].plcp;
  165. /* Zero out flags for this packet */
  166. rate_flags = 0;
  167. /* Set CCK flag as needed */
  168. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  169. rate_flags |= RATE_MCS_CCK_MSK;
  170. /* Set up antennas */
  171. if (priv->lib->bt_params &&
  172. priv->lib->bt_params->advanced_bt_coexist &&
  173. priv->bt_full_concurrent) {
  174. /* operated as 1x1 in full concurrency mode */
  175. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
  176. first_antenna(priv->nvm_data->valid_tx_ant));
  177. } else
  178. priv->mgmt_tx_ant = iwl_toggle_tx_ant(
  179. priv, priv->mgmt_tx_ant,
  180. priv->nvm_data->valid_tx_ant);
  181. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  182. /* Set the rate in the TX cmd */
  183. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  184. }
  185. static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  186. struct ieee80211_tx_info *info,
  187. struct iwl_tx_cmd *tx_cmd,
  188. struct sk_buff *skb_frag)
  189. {
  190. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  191. switch (keyconf->cipher) {
  192. case WLAN_CIPHER_SUITE_CCMP:
  193. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  194. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  195. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  196. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  197. break;
  198. case WLAN_CIPHER_SUITE_TKIP:
  199. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  200. ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
  201. break;
  202. case WLAN_CIPHER_SUITE_WEP104:
  203. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  204. /* fall through */
  205. case WLAN_CIPHER_SUITE_WEP40:
  206. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  207. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  208. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  209. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  210. "with key %d\n", keyconf->keyidx);
  211. break;
  212. default:
  213. IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
  214. break;
  215. }
  216. }
  217. /**
  218. * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
  219. * @context: the current context
  220. * @sta: mac80211 station
  221. *
  222. * In certain circumstances mac80211 passes a station pointer
  223. * that may be %NULL, for example during TX or key setup. In
  224. * that case, we need to use the broadcast station, so this
  225. * inline wraps that pattern.
  226. */
  227. static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
  228. struct ieee80211_sta *sta)
  229. {
  230. int sta_id;
  231. if (!sta)
  232. return context->bcast_sta_id;
  233. sta_id = iwl_sta_id(sta);
  234. /*
  235. * mac80211 should not be passing a partially
  236. * initialised station!
  237. */
  238. WARN_ON(sta_id == IWL_INVALID_STATION);
  239. return sta_id;
  240. }
  241. /*
  242. * start REPLY_TX command process
  243. */
  244. int iwlagn_tx_skb(struct iwl_priv *priv,
  245. struct ieee80211_sta *sta,
  246. struct sk_buff *skb)
  247. {
  248. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  249. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  250. struct iwl_station_priv *sta_priv = NULL;
  251. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  252. struct iwl_device_cmd *dev_cmd;
  253. struct iwl_tx_cmd *tx_cmd;
  254. __le16 fc;
  255. u8 hdr_len;
  256. u16 len, seq_number = 0;
  257. u8 sta_id, tid = IWL_MAX_TID_COUNT;
  258. bool is_agg = false, is_data_qos = false;
  259. int txq_id;
  260. if (info->control.vif)
  261. ctx = iwl_rxon_ctx_from_vif(info->control.vif);
  262. if (iwl_is_rfkill(priv)) {
  263. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  264. goto drop_unlock_priv;
  265. }
  266. fc = hdr->frame_control;
  267. #ifdef CONFIG_IWLWIFI_DEBUG
  268. if (ieee80211_is_auth(fc))
  269. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  270. else if (ieee80211_is_assoc_req(fc))
  271. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  272. else if (ieee80211_is_reassoc_req(fc))
  273. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  274. #endif
  275. if (unlikely(ieee80211_is_probe_resp(fc))) {
  276. struct iwl_wipan_noa_data *noa_data =
  277. rcu_dereference(priv->noa_data);
  278. if (noa_data &&
  279. pskb_expand_head(skb, 0, noa_data->length,
  280. GFP_ATOMIC) == 0) {
  281. memcpy(skb_put(skb, noa_data->length),
  282. noa_data->data, noa_data->length);
  283. hdr = (struct ieee80211_hdr *)skb->data;
  284. }
  285. }
  286. hdr_len = ieee80211_hdrlen(fc);
  287. /* For management frames use broadcast id to do not break aggregation */
  288. if (!ieee80211_is_data(fc))
  289. sta_id = ctx->bcast_sta_id;
  290. else {
  291. /* Find index into station table for destination station */
  292. sta_id = iwl_sta_id_or_broadcast(ctx, sta);
  293. if (sta_id == IWL_INVALID_STATION) {
  294. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  295. hdr->addr1);
  296. goto drop_unlock_priv;
  297. }
  298. }
  299. if (sta)
  300. sta_priv = (void *)sta->drv_priv;
  301. if (sta_priv && sta_priv->asleep &&
  302. (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
  303. /*
  304. * This sends an asynchronous command to the device,
  305. * but we can rely on it being processed before the
  306. * next frame is processed -- and the next frame to
  307. * this station is the one that will consume this
  308. * counter.
  309. * For now set the counter to just 1 since we do not
  310. * support uAPSD yet.
  311. *
  312. * FIXME: If we get two non-bufferable frames one
  313. * after the other, we might only send out one of
  314. * them because this is racy.
  315. */
  316. iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
  317. }
  318. dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
  319. if (unlikely(!dev_cmd))
  320. goto drop_unlock_priv;
  321. memset(dev_cmd, 0, sizeof(*dev_cmd));
  322. dev_cmd->hdr.cmd = REPLY_TX;
  323. tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  324. /* Total # bytes to be transmitted */
  325. len = (u16)skb->len;
  326. tx_cmd->len = cpu_to_le16(len);
  327. if (info->control.hw_key)
  328. iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
  329. /* TODO need this for burst mode later on */
  330. iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
  331. iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
  332. memset(&info->status, 0, sizeof(info->status));
  333. info->driver_data[0] = ctx;
  334. info->driver_data[1] = dev_cmd;
  335. /* From now on, we cannot access info->control */
  336. spin_lock(&priv->sta_lock);
  337. if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
  338. u8 *qc = NULL;
  339. struct iwl_tid_data *tid_data;
  340. qc = ieee80211_get_qos_ctl(hdr);
  341. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  342. if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
  343. goto drop_unlock_sta;
  344. tid_data = &priv->tid_data[sta_id][tid];
  345. /* aggregation is on for this <sta,tid> */
  346. if (info->flags & IEEE80211_TX_CTL_AMPDU &&
  347. tid_data->agg.state != IWL_AGG_ON) {
  348. IWL_ERR(priv,
  349. "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
  350. info->flags, tid_data->agg.state);
  351. IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
  352. sta_id, tid,
  353. IEEE80211_SEQ_TO_SN(tid_data->seq_number));
  354. goto drop_unlock_sta;
  355. }
  356. /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
  357. * only. Check this here.
  358. */
  359. if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
  360. tid_data->agg.state != IWL_AGG_OFF,
  361. "Tx while agg.state = %d\n", tid_data->agg.state))
  362. goto drop_unlock_sta;
  363. seq_number = tid_data->seq_number;
  364. seq_number &= IEEE80211_SCTL_SEQ;
  365. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  366. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  367. seq_number += 0x10;
  368. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  369. is_agg = true;
  370. is_data_qos = true;
  371. }
  372. /* Copy MAC header from skb into command buffer */
  373. memcpy(tx_cmd->hdr, hdr, hdr_len);
  374. txq_id = info->hw_queue;
  375. if (is_agg)
  376. txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
  377. else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  378. /*
  379. * The microcode will clear the more data
  380. * bit in the last frame it transmits.
  381. */
  382. hdr->frame_control |=
  383. cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  384. }
  385. WARN_ON_ONCE(is_agg &&
  386. priv->queue_to_mac80211[txq_id] != info->hw_queue);
  387. IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
  388. txq_id, seq_number);
  389. if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
  390. goto drop_unlock_sta;
  391. if (is_data_qos && !ieee80211_has_morefrags(fc))
  392. priv->tid_data[sta_id][tid].seq_number = seq_number;
  393. spin_unlock(&priv->sta_lock);
  394. /*
  395. * Avoid atomic ops if it isn't an associated client.
  396. * Also, if this is a packet for aggregation, don't
  397. * increase the counter because the ucode will stop
  398. * aggregation queues when their respective station
  399. * goes to sleep.
  400. */
  401. if (sta_priv && sta_priv->client && !is_agg)
  402. atomic_inc(&sta_priv->pending_frames);
  403. return 0;
  404. drop_unlock_sta:
  405. if (dev_cmd)
  406. iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
  407. spin_unlock(&priv->sta_lock);
  408. drop_unlock_priv:
  409. return -1;
  410. }
  411. static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
  412. {
  413. int q;
  414. for (q = IWLAGN_FIRST_AMPDU_QUEUE;
  415. q < priv->cfg->base_params->num_of_queues; q++) {
  416. if (!test_and_set_bit(q, priv->agg_q_alloc)) {
  417. priv->queue_to_mac80211[q] = mq;
  418. return q;
  419. }
  420. }
  421. return -ENOSPC;
  422. }
  423. static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
  424. {
  425. clear_bit(q, priv->agg_q_alloc);
  426. priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
  427. }
  428. int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
  429. struct ieee80211_sta *sta, u16 tid)
  430. {
  431. struct iwl_tid_data *tid_data;
  432. int sta_id, txq_id;
  433. enum iwl_agg_state agg_state;
  434. sta_id = iwl_sta_id(sta);
  435. if (sta_id == IWL_INVALID_STATION) {
  436. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  437. return -ENXIO;
  438. }
  439. spin_lock_bh(&priv->sta_lock);
  440. tid_data = &priv->tid_data[sta_id][tid];
  441. txq_id = tid_data->agg.txq_id;
  442. switch (tid_data->agg.state) {
  443. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  444. /*
  445. * This can happen if the peer stops aggregation
  446. * again before we've had a chance to drain the
  447. * queue we selected previously, i.e. before the
  448. * session was really started completely.
  449. */
  450. IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
  451. goto turn_off;
  452. case IWL_AGG_STARTING:
  453. /*
  454. * This can happen when the session is stopped before
  455. * we receive ADDBA response
  456. */
  457. IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
  458. goto turn_off;
  459. case IWL_AGG_ON:
  460. break;
  461. default:
  462. IWL_WARN(priv,
  463. "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
  464. sta_id, tid, tid_data->agg.state);
  465. spin_unlock_bh(&priv->sta_lock);
  466. return 0;
  467. }
  468. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  469. /* There are still packets for this RA / TID in the HW */
  470. if (!test_bit(txq_id, priv->agg_q_alloc)) {
  471. IWL_DEBUG_TX_QUEUES(priv,
  472. "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
  473. sta_id, tid, txq_id);
  474. } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
  475. IWL_DEBUG_TX_QUEUES(priv,
  476. "Can't proceed: ssn %d, next_recl = %d\n",
  477. tid_data->agg.ssn,
  478. tid_data->next_reclaimed);
  479. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
  480. spin_unlock_bh(&priv->sta_lock);
  481. return 0;
  482. }
  483. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  484. tid_data->agg.ssn);
  485. turn_off:
  486. agg_state = tid_data->agg.state;
  487. tid_data->agg.state = IWL_AGG_OFF;
  488. spin_unlock_bh(&priv->sta_lock);
  489. if (test_bit(txq_id, priv->agg_q_alloc)) {
  490. /*
  491. * If the transport didn't know that we wanted to start
  492. * agreggation, don't tell it that we want to stop them.
  493. * This can happen when we don't get the addBA response on
  494. * time, or we hadn't time to drain the AC queues.
  495. */
  496. if (agg_state == IWL_AGG_ON)
  497. iwl_trans_txq_disable(priv->trans, txq_id, true);
  498. else
  499. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  500. agg_state);
  501. iwlagn_dealloc_agg_txq(priv, txq_id);
  502. }
  503. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  504. return 0;
  505. }
  506. int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
  507. struct ieee80211_sta *sta, u16 tid, u16 *ssn)
  508. {
  509. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  510. struct iwl_tid_data *tid_data;
  511. int sta_id, txq_id, ret;
  512. IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
  513. sta->addr, tid);
  514. sta_id = iwl_sta_id(sta);
  515. if (sta_id == IWL_INVALID_STATION) {
  516. IWL_ERR(priv, "Start AGG on invalid station\n");
  517. return -ENXIO;
  518. }
  519. if (unlikely(tid >= IWL_MAX_TID_COUNT))
  520. return -EINVAL;
  521. if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
  522. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  523. return -ENXIO;
  524. }
  525. txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
  526. if (txq_id < 0) {
  527. IWL_DEBUG_TX_QUEUES(priv,
  528. "No free aggregation queue for %pM/%d\n",
  529. sta->addr, tid);
  530. return txq_id;
  531. }
  532. ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
  533. if (ret)
  534. return ret;
  535. spin_lock_bh(&priv->sta_lock);
  536. tid_data = &priv->tid_data[sta_id][tid];
  537. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  538. tid_data->agg.txq_id = txq_id;
  539. *ssn = tid_data->agg.ssn;
  540. if (*ssn == tid_data->next_reclaimed) {
  541. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  542. tid_data->agg.ssn);
  543. tid_data->agg.state = IWL_AGG_STARTING;
  544. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  545. } else {
  546. IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
  547. "next_reclaimed = %d\n",
  548. tid_data->agg.ssn,
  549. tid_data->next_reclaimed);
  550. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  551. }
  552. spin_unlock_bh(&priv->sta_lock);
  553. return ret;
  554. }
  555. int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
  556. struct ieee80211_sta *sta, u16 tid)
  557. {
  558. struct iwl_tid_data *tid_data;
  559. enum iwl_agg_state agg_state;
  560. int sta_id, txq_id;
  561. sta_id = iwl_sta_id(sta);
  562. /*
  563. * First set the agg state to OFF to avoid calling
  564. * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
  565. */
  566. spin_lock_bh(&priv->sta_lock);
  567. tid_data = &priv->tid_data[sta_id][tid];
  568. txq_id = tid_data->agg.txq_id;
  569. agg_state = tid_data->agg.state;
  570. IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
  571. sta_id, tid, txq_id, tid_data->agg.state);
  572. tid_data->agg.state = IWL_AGG_OFF;
  573. spin_unlock_bh(&priv->sta_lock);
  574. if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
  575. IWL_ERR(priv, "Couldn't flush the AGG queue\n");
  576. if (test_bit(txq_id, priv->agg_q_alloc)) {
  577. /*
  578. * If the transport didn't know that we wanted to start
  579. * agreggation, don't tell it that we want to stop them.
  580. * This can happen when we don't get the addBA response on
  581. * time, or we hadn't time to drain the AC queues.
  582. */
  583. if (agg_state == IWL_AGG_ON)
  584. iwl_trans_txq_disable(priv->trans, txq_id, true);
  585. else
  586. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  587. agg_state);
  588. iwlagn_dealloc_agg_txq(priv, txq_id);
  589. }
  590. return 0;
  591. }
  592. int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
  593. struct ieee80211_sta *sta, u16 tid, u8 buf_size)
  594. {
  595. struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
  596. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  597. int q, fifo;
  598. u16 ssn;
  599. buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
  600. spin_lock_bh(&priv->sta_lock);
  601. ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
  602. q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
  603. priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
  604. spin_unlock_bh(&priv->sta_lock);
  605. fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
  606. iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
  607. buf_size, ssn, 0);
  608. /*
  609. * If the limit is 0, then it wasn't initialised yet,
  610. * use the default. We can do that since we take the
  611. * minimum below, and we don't want to go above our
  612. * default due to hardware restrictions.
  613. */
  614. if (sta_priv->max_agg_bufsize == 0)
  615. sta_priv->max_agg_bufsize =
  616. LINK_QUAL_AGG_FRAME_LIMIT_DEF;
  617. /*
  618. * Even though in theory the peer could have different
  619. * aggregation reorder buffer sizes for different sessions,
  620. * our ucode doesn't allow for that and has a global limit
  621. * for each station. Therefore, use the minimum of all the
  622. * aggregation sessions and our default value.
  623. */
  624. sta_priv->max_agg_bufsize =
  625. min(sta_priv->max_agg_bufsize, buf_size);
  626. if (priv->hw_params.use_rts_for_aggregation) {
  627. /*
  628. * switch to RTS/CTS if it is the prefer protection
  629. * method for HT traffic
  630. */
  631. sta_priv->lq_sta.lq.general_params.flags |=
  632. LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
  633. }
  634. priv->agg_tids_count++;
  635. IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
  636. priv->agg_tids_count);
  637. sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
  638. sta_priv->max_agg_bufsize;
  639. IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
  640. sta->addr, tid);
  641. return iwl_send_lq_cmd(priv, ctx,
  642. &sta_priv->lq_sta.lq, CMD_ASYNC, false);
  643. }
  644. static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
  645. {
  646. struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
  647. enum iwl_rxon_context_id ctx;
  648. struct ieee80211_vif *vif;
  649. u8 *addr;
  650. lockdep_assert_held(&priv->sta_lock);
  651. addr = priv->stations[sta_id].sta.sta.addr;
  652. ctx = priv->stations[sta_id].ctxid;
  653. vif = priv->contexts[ctx].vif;
  654. switch (priv->tid_data[sta_id][tid].agg.state) {
  655. case IWL_EMPTYING_HW_QUEUE_DELBA:
  656. /* There are no packets for this RA / TID in the HW any more */
  657. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  658. IWL_DEBUG_TX_QUEUES(priv,
  659. "Can continue DELBA flow ssn = next_recl = %d\n",
  660. tid_data->next_reclaimed);
  661. iwl_trans_txq_disable(priv->trans,
  662. tid_data->agg.txq_id, true);
  663. iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
  664. tid_data->agg.state = IWL_AGG_OFF;
  665. ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
  666. }
  667. break;
  668. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  669. /* There are no packets for this RA / TID in the HW any more */
  670. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  671. IWL_DEBUG_TX_QUEUES(priv,
  672. "Can continue ADDBA flow ssn = next_recl = %d\n",
  673. tid_data->next_reclaimed);
  674. tid_data->agg.state = IWL_AGG_STARTING;
  675. ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
  676. }
  677. break;
  678. default:
  679. break;
  680. }
  681. }
  682. static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
  683. struct iwl_rxon_context *ctx,
  684. const u8 *addr1)
  685. {
  686. struct ieee80211_sta *sta;
  687. struct iwl_station_priv *sta_priv;
  688. rcu_read_lock();
  689. sta = ieee80211_find_sta(ctx->vif, addr1);
  690. if (sta) {
  691. sta_priv = (void *)sta->drv_priv;
  692. /* avoid atomic ops if this isn't a client */
  693. if (sta_priv->client &&
  694. atomic_dec_return(&sta_priv->pending_frames) == 0)
  695. ieee80211_sta_block_awake(priv->hw, sta, false);
  696. }
  697. rcu_read_unlock();
  698. }
  699. /**
  700. * translate ucode response to mac80211 tx status control values
  701. */
  702. static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
  703. struct ieee80211_tx_info *info)
  704. {
  705. struct ieee80211_tx_rate *r = &info->status.rates[0];
  706. info->status.antenna =
  707. ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
  708. if (rate_n_flags & RATE_MCS_HT_MSK)
  709. r->flags |= IEEE80211_TX_RC_MCS;
  710. if (rate_n_flags & RATE_MCS_GF_MSK)
  711. r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  712. if (rate_n_flags & RATE_MCS_HT40_MSK)
  713. r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  714. if (rate_n_flags & RATE_MCS_DUP_MSK)
  715. r->flags |= IEEE80211_TX_RC_DUP_DATA;
  716. if (rate_n_flags & RATE_MCS_SGI_MSK)
  717. r->flags |= IEEE80211_TX_RC_SHORT_GI;
  718. r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
  719. }
  720. #ifdef CONFIG_IWLWIFI_DEBUG
  721. const char *iwl_get_tx_fail_reason(u32 status)
  722. {
  723. #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
  724. #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
  725. switch (status & TX_STATUS_MSK) {
  726. case TX_STATUS_SUCCESS:
  727. return "SUCCESS";
  728. TX_STATUS_POSTPONE(DELAY);
  729. TX_STATUS_POSTPONE(FEW_BYTES);
  730. TX_STATUS_POSTPONE(BT_PRIO);
  731. TX_STATUS_POSTPONE(QUIET_PERIOD);
  732. TX_STATUS_POSTPONE(CALC_TTAK);
  733. TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
  734. TX_STATUS_FAIL(SHORT_LIMIT);
  735. TX_STATUS_FAIL(LONG_LIMIT);
  736. TX_STATUS_FAIL(FIFO_UNDERRUN);
  737. TX_STATUS_FAIL(DRAIN_FLOW);
  738. TX_STATUS_FAIL(RFKILL_FLUSH);
  739. TX_STATUS_FAIL(LIFE_EXPIRE);
  740. TX_STATUS_FAIL(DEST_PS);
  741. TX_STATUS_FAIL(HOST_ABORTED);
  742. TX_STATUS_FAIL(BT_RETRY);
  743. TX_STATUS_FAIL(STA_INVALID);
  744. TX_STATUS_FAIL(FRAG_DROPPED);
  745. TX_STATUS_FAIL(TID_DISABLE);
  746. TX_STATUS_FAIL(FIFO_FLUSHED);
  747. TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
  748. TX_STATUS_FAIL(PASSIVE_NO_RX);
  749. TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
  750. }
  751. return "UNKNOWN";
  752. #undef TX_STATUS_FAIL
  753. #undef TX_STATUS_POSTPONE
  754. }
  755. #endif /* CONFIG_IWLWIFI_DEBUG */
  756. static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
  757. {
  758. status &= AGG_TX_STATUS_MSK;
  759. switch (status) {
  760. case AGG_TX_STATE_UNDERRUN_MSK:
  761. priv->reply_agg_tx_stats.underrun++;
  762. break;
  763. case AGG_TX_STATE_BT_PRIO_MSK:
  764. priv->reply_agg_tx_stats.bt_prio++;
  765. break;
  766. case AGG_TX_STATE_FEW_BYTES_MSK:
  767. priv->reply_agg_tx_stats.few_bytes++;
  768. break;
  769. case AGG_TX_STATE_ABORT_MSK:
  770. priv->reply_agg_tx_stats.abort++;
  771. break;
  772. case AGG_TX_STATE_LAST_SENT_TTL_MSK:
  773. priv->reply_agg_tx_stats.last_sent_ttl++;
  774. break;
  775. case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
  776. priv->reply_agg_tx_stats.last_sent_try++;
  777. break;
  778. case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
  779. priv->reply_agg_tx_stats.last_sent_bt_kill++;
  780. break;
  781. case AGG_TX_STATE_SCD_QUERY_MSK:
  782. priv->reply_agg_tx_stats.scd_query++;
  783. break;
  784. case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
  785. priv->reply_agg_tx_stats.bad_crc32++;
  786. break;
  787. case AGG_TX_STATE_RESPONSE_MSK:
  788. priv->reply_agg_tx_stats.response++;
  789. break;
  790. case AGG_TX_STATE_DUMP_TX_MSK:
  791. priv->reply_agg_tx_stats.dump_tx++;
  792. break;
  793. case AGG_TX_STATE_DELAY_TX_MSK:
  794. priv->reply_agg_tx_stats.delay_tx++;
  795. break;
  796. default:
  797. priv->reply_agg_tx_stats.unknown++;
  798. break;
  799. }
  800. }
  801. static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
  802. {
  803. return le32_to_cpup((__le32 *)&tx_resp->status +
  804. tx_resp->frame_count) & IEEE80211_MAX_SN;
  805. }
  806. static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
  807. struct iwlagn_tx_resp *tx_resp)
  808. {
  809. struct agg_tx_status *frame_status = &tx_resp->status;
  810. int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  811. IWLAGN_TX_RES_TID_POS;
  812. int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  813. IWLAGN_TX_RES_RA_POS;
  814. struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
  815. u32 status = le16_to_cpu(tx_resp->status.status);
  816. int i;
  817. WARN_ON(tid == IWL_TID_NON_QOS);
  818. if (agg->wait_for_ba)
  819. IWL_DEBUG_TX_REPLY(priv,
  820. "got tx response w/o block-ack\n");
  821. agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
  822. agg->wait_for_ba = (tx_resp->frame_count > 1);
  823. /*
  824. * If the BT kill count is non-zero, we'll get this
  825. * notification again.
  826. */
  827. if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
  828. priv->lib->bt_params &&
  829. priv->lib->bt_params->advanced_bt_coexist) {
  830. IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
  831. }
  832. if (tx_resp->frame_count == 1)
  833. return;
  834. IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
  835. agg->txq_id,
  836. le32_to_cpu(tx_resp->rate_n_flags),
  837. iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
  838. /* Construct bit-map of pending frames within Tx window */
  839. for (i = 0; i < tx_resp->frame_count; i++) {
  840. u16 fstatus = le16_to_cpu(frame_status[i].status);
  841. u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
  842. if (status & AGG_TX_STATUS_MSK)
  843. iwlagn_count_agg_tx_err_status(priv, fstatus);
  844. if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
  845. AGG_TX_STATE_ABORT_MSK))
  846. continue;
  847. if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
  848. IWL_DEBUG_TX_REPLY(priv,
  849. "%d: status %s (0x%04x), try-count (0x%01x)\n",
  850. i,
  851. iwl_get_agg_tx_fail_reason(fstatus),
  852. fstatus & AGG_TX_STATUS_MSK,
  853. retry_cnt);
  854. }
  855. }
  856. #ifdef CONFIG_IWLWIFI_DEBUG
  857. #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
  858. const char *iwl_get_agg_tx_fail_reason(u16 status)
  859. {
  860. status &= AGG_TX_STATUS_MSK;
  861. switch (status) {
  862. case AGG_TX_STATE_TRANSMITTED:
  863. return "SUCCESS";
  864. AGG_TX_STATE_FAIL(UNDERRUN_MSK);
  865. AGG_TX_STATE_FAIL(BT_PRIO_MSK);
  866. AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
  867. AGG_TX_STATE_FAIL(ABORT_MSK);
  868. AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
  869. AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
  870. AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
  871. AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
  872. AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
  873. AGG_TX_STATE_FAIL(RESPONSE_MSK);
  874. AGG_TX_STATE_FAIL(DUMP_TX_MSK);
  875. AGG_TX_STATE_FAIL(DELAY_TX_MSK);
  876. }
  877. return "UNKNOWN";
  878. }
  879. #endif /* CONFIG_IWLWIFI_DEBUG */
  880. static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
  881. {
  882. status &= TX_STATUS_MSK;
  883. switch (status) {
  884. case TX_STATUS_POSTPONE_DELAY:
  885. priv->reply_tx_stats.pp_delay++;
  886. break;
  887. case TX_STATUS_POSTPONE_FEW_BYTES:
  888. priv->reply_tx_stats.pp_few_bytes++;
  889. break;
  890. case TX_STATUS_POSTPONE_BT_PRIO:
  891. priv->reply_tx_stats.pp_bt_prio++;
  892. break;
  893. case TX_STATUS_POSTPONE_QUIET_PERIOD:
  894. priv->reply_tx_stats.pp_quiet_period++;
  895. break;
  896. case TX_STATUS_POSTPONE_CALC_TTAK:
  897. priv->reply_tx_stats.pp_calc_ttak++;
  898. break;
  899. case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
  900. priv->reply_tx_stats.int_crossed_retry++;
  901. break;
  902. case TX_STATUS_FAIL_SHORT_LIMIT:
  903. priv->reply_tx_stats.short_limit++;
  904. break;
  905. case TX_STATUS_FAIL_LONG_LIMIT:
  906. priv->reply_tx_stats.long_limit++;
  907. break;
  908. case TX_STATUS_FAIL_FIFO_UNDERRUN:
  909. priv->reply_tx_stats.fifo_underrun++;
  910. break;
  911. case TX_STATUS_FAIL_DRAIN_FLOW:
  912. priv->reply_tx_stats.drain_flow++;
  913. break;
  914. case TX_STATUS_FAIL_RFKILL_FLUSH:
  915. priv->reply_tx_stats.rfkill_flush++;
  916. break;
  917. case TX_STATUS_FAIL_LIFE_EXPIRE:
  918. priv->reply_tx_stats.life_expire++;
  919. break;
  920. case TX_STATUS_FAIL_DEST_PS:
  921. priv->reply_tx_stats.dest_ps++;
  922. break;
  923. case TX_STATUS_FAIL_HOST_ABORTED:
  924. priv->reply_tx_stats.host_abort++;
  925. break;
  926. case TX_STATUS_FAIL_BT_RETRY:
  927. priv->reply_tx_stats.bt_retry++;
  928. break;
  929. case TX_STATUS_FAIL_STA_INVALID:
  930. priv->reply_tx_stats.sta_invalid++;
  931. break;
  932. case TX_STATUS_FAIL_FRAG_DROPPED:
  933. priv->reply_tx_stats.frag_drop++;
  934. break;
  935. case TX_STATUS_FAIL_TID_DISABLE:
  936. priv->reply_tx_stats.tid_disable++;
  937. break;
  938. case TX_STATUS_FAIL_FIFO_FLUSHED:
  939. priv->reply_tx_stats.fifo_flush++;
  940. break;
  941. case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
  942. priv->reply_tx_stats.insuff_cf_poll++;
  943. break;
  944. case TX_STATUS_FAIL_PASSIVE_NO_RX:
  945. priv->reply_tx_stats.fail_hw_drop++;
  946. break;
  947. case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
  948. priv->reply_tx_stats.sta_color_mismatch++;
  949. break;
  950. default:
  951. priv->reply_tx_stats.unknown++;
  952. break;
  953. }
  954. }
  955. static void iwlagn_set_tx_status(struct iwl_priv *priv,
  956. struct ieee80211_tx_info *info,
  957. struct iwlagn_tx_resp *tx_resp)
  958. {
  959. u16 status = le16_to_cpu(tx_resp->status.status);
  960. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  961. info->status.rates[0].count = tx_resp->failure_frame + 1;
  962. info->flags |= iwl_tx_status_to_mac80211(status);
  963. iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
  964. info);
  965. if (!iwl_is_tx_success(status))
  966. iwlagn_count_tx_err_status(priv, status);
  967. }
  968. static void iwl_check_abort_status(struct iwl_priv *priv,
  969. u8 frame_count, u32 status)
  970. {
  971. if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
  972. IWL_ERR(priv, "Tx flush command to flush out all frames\n");
  973. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  974. queue_work(priv->workqueue, &priv->tx_flush);
  975. }
  976. }
  977. int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
  978. struct iwl_device_cmd *cmd)
  979. {
  980. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  981. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  982. int txq_id = SEQ_TO_QUEUE(sequence);
  983. int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
  984. struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
  985. struct ieee80211_hdr *hdr;
  986. u32 status = le16_to_cpu(tx_resp->status.status);
  987. u16 ssn = iwlagn_get_scd_ssn(tx_resp);
  988. int tid;
  989. int sta_id;
  990. int freed;
  991. struct ieee80211_tx_info *info;
  992. struct sk_buff_head skbs;
  993. struct sk_buff *skb;
  994. struct iwl_rxon_context *ctx;
  995. bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
  996. tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  997. IWLAGN_TX_RES_TID_POS;
  998. sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  999. IWLAGN_TX_RES_RA_POS;
  1000. spin_lock_bh(&priv->sta_lock);
  1001. if (is_agg) {
  1002. WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
  1003. tid >= IWL_MAX_TID_COUNT);
  1004. if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
  1005. IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
  1006. priv->tid_data[sta_id][tid].agg.txq_id);
  1007. iwl_rx_reply_tx_agg(priv, tx_resp);
  1008. }
  1009. __skb_queue_head_init(&skbs);
  1010. if (tx_resp->frame_count == 1) {
  1011. u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
  1012. next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
  1013. if (is_agg) {
  1014. /* If this is an aggregation queue, we can rely on the
  1015. * ssn since the wifi sequence number corresponds to
  1016. * the index in the TFD ring (%256).
  1017. * The seq_ctl is the sequence control of the packet
  1018. * to which this Tx response relates. But if there is a
  1019. * hole in the bitmap of the BA we received, this Tx
  1020. * response may allow to reclaim the hole and all the
  1021. * subsequent packets that were already acked.
  1022. * In that case, seq_ctl != ssn, and the next packet
  1023. * to be reclaimed will be ssn and not seq_ctl.
  1024. */
  1025. next_reclaimed = ssn;
  1026. }
  1027. if (tid != IWL_TID_NON_QOS) {
  1028. priv->tid_data[sta_id][tid].next_reclaimed =
  1029. next_reclaimed;
  1030. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1031. next_reclaimed);
  1032. }
  1033. iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
  1034. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1035. freed = 0;
  1036. /* process frames */
  1037. skb_queue_walk(&skbs, skb) {
  1038. hdr = (struct ieee80211_hdr *)skb->data;
  1039. if (!ieee80211_is_data_qos(hdr->frame_control))
  1040. priv->last_seq_ctl = tx_resp->seq_ctl;
  1041. info = IEEE80211_SKB_CB(skb);
  1042. ctx = info->driver_data[0];
  1043. iwl_trans_free_tx_cmd(priv->trans,
  1044. info->driver_data[1]);
  1045. memset(&info->status, 0, sizeof(info->status));
  1046. if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
  1047. ctx->vif &&
  1048. ctx->vif->type == NL80211_IFTYPE_STATION) {
  1049. /* block and stop all queues */
  1050. priv->passive_no_rx = true;
  1051. IWL_DEBUG_TX_QUEUES(priv,
  1052. "stop all queues: passive channel\n");
  1053. ieee80211_stop_queues(priv->hw);
  1054. IWL_DEBUG_TX_REPLY(priv,
  1055. "TXQ %d status %s (0x%08x) "
  1056. "rate_n_flags 0x%x retries %d\n",
  1057. txq_id,
  1058. iwl_get_tx_fail_reason(status),
  1059. status,
  1060. le32_to_cpu(tx_resp->rate_n_flags),
  1061. tx_resp->failure_frame);
  1062. IWL_DEBUG_TX_REPLY(priv,
  1063. "FrameCnt = %d, idx=%d\n",
  1064. tx_resp->frame_count, cmd_index);
  1065. }
  1066. /* check if BAR is needed */
  1067. if (is_agg && !iwl_is_tx_success(status))
  1068. info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1069. iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
  1070. tx_resp);
  1071. if (!is_agg)
  1072. iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
  1073. freed++;
  1074. }
  1075. if (tid != IWL_TID_NON_QOS) {
  1076. priv->tid_data[sta_id][tid].next_reclaimed =
  1077. next_reclaimed;
  1078. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1079. next_reclaimed);
  1080. }
  1081. if (!is_agg && freed != 1)
  1082. IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
  1083. IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
  1084. iwl_get_tx_fail_reason(status), status);
  1085. IWL_DEBUG_TX_REPLY(priv,
  1086. "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
  1087. le32_to_cpu(tx_resp->rate_n_flags),
  1088. tx_resp->failure_frame,
  1089. SEQ_TO_INDEX(sequence), ssn,
  1090. le16_to_cpu(tx_resp->seq_ctl));
  1091. }
  1092. iwl_check_abort_status(priv, tx_resp->frame_count, status);
  1093. spin_unlock_bh(&priv->sta_lock);
  1094. while (!skb_queue_empty(&skbs)) {
  1095. skb = __skb_dequeue(&skbs);
  1096. ieee80211_tx_status(priv->hw, skb);
  1097. }
  1098. return 0;
  1099. }
  1100. /**
  1101. * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1102. *
  1103. * Handles block-acknowledge notification from device, which reports success
  1104. * of frames sent via aggregation.
  1105. */
  1106. int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
  1107. struct iwl_rx_cmd_buffer *rxb,
  1108. struct iwl_device_cmd *cmd)
  1109. {
  1110. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1111. struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
  1112. struct iwl_ht_agg *agg;
  1113. struct sk_buff_head reclaimed_skbs;
  1114. struct sk_buff *skb;
  1115. int sta_id;
  1116. int tid;
  1117. int freed;
  1118. /* "flow" corresponds to Tx queue */
  1119. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1120. /* "ssn" is start of block-ack Tx window, corresponds to index
  1121. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1122. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1123. if (scd_flow >= priv->cfg->base_params->num_of_queues) {
  1124. IWL_ERR(priv,
  1125. "BUG_ON scd_flow is bigger than number of queues\n");
  1126. return 0;
  1127. }
  1128. sta_id = ba_resp->sta_id;
  1129. tid = ba_resp->tid;
  1130. agg = &priv->tid_data[sta_id][tid].agg;
  1131. spin_lock_bh(&priv->sta_lock);
  1132. if (unlikely(!agg->wait_for_ba)) {
  1133. if (unlikely(ba_resp->bitmap))
  1134. IWL_ERR(priv, "Received BA when not expected\n");
  1135. spin_unlock_bh(&priv->sta_lock);
  1136. return 0;
  1137. }
  1138. if (unlikely(scd_flow != agg->txq_id)) {
  1139. /*
  1140. * FIXME: this is a uCode bug which need to be addressed,
  1141. * log the information and return for now.
  1142. * Since it is can possibly happen very often and in order
  1143. * not to fill the syslog, don't use IWL_ERR or IWL_WARN
  1144. */
  1145. IWL_DEBUG_TX_QUEUES(priv,
  1146. "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
  1147. scd_flow, sta_id, tid, agg->txq_id);
  1148. spin_unlock_bh(&priv->sta_lock);
  1149. return 0;
  1150. }
  1151. __skb_queue_head_init(&reclaimed_skbs);
  1152. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1153. * block-ack window (we assume that they've been successfully
  1154. * transmitted ... if not, it's too late anyway). */
  1155. iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
  1156. &reclaimed_skbs);
  1157. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1158. "sta_id = %d\n",
  1159. agg->wait_for_ba,
  1160. (u8 *) &ba_resp->sta_addr_lo32,
  1161. ba_resp->sta_id);
  1162. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
  1163. "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
  1164. ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
  1165. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1166. scd_flow, ba_resp_scd_ssn, ba_resp->txed,
  1167. ba_resp->txed_2_done);
  1168. /* Mark that the expected block-ack response arrived */
  1169. agg->wait_for_ba = false;
  1170. /* Sanity check values reported by uCode */
  1171. if (ba_resp->txed_2_done > ba_resp->txed) {
  1172. IWL_DEBUG_TX_REPLY(priv,
  1173. "bogus sent(%d) and ack(%d) count\n",
  1174. ba_resp->txed, ba_resp->txed_2_done);
  1175. /*
  1176. * set txed_2_done = txed,
  1177. * so it won't impact rate scale
  1178. */
  1179. ba_resp->txed = ba_resp->txed_2_done;
  1180. }
  1181. priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
  1182. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1183. freed = 0;
  1184. skb_queue_walk(&reclaimed_skbs, skb) {
  1185. struct ieee80211_hdr *hdr = (void *)skb->data;
  1186. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1187. if (ieee80211_is_data_qos(hdr->frame_control))
  1188. freed++;
  1189. else
  1190. WARN_ON_ONCE(1);
  1191. iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
  1192. memset(&info->status, 0, sizeof(info->status));
  1193. /* Packet was transmitted successfully, failures come as single
  1194. * frames because before failing a frame the firmware transmits
  1195. * it without aggregation at least once.
  1196. */
  1197. info->flags |= IEEE80211_TX_STAT_ACK;
  1198. if (freed == 1) {
  1199. /* this is the first skb we deliver in this batch */
  1200. /* put the rate scaling data there */
  1201. info = IEEE80211_SKB_CB(skb);
  1202. memset(&info->status, 0, sizeof(info->status));
  1203. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1204. info->status.ampdu_ack_len = ba_resp->txed_2_done;
  1205. info->status.ampdu_len = ba_resp->txed;
  1206. iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
  1207. info);
  1208. }
  1209. }
  1210. spin_unlock_bh(&priv->sta_lock);
  1211. while (!skb_queue_empty(&reclaimed_skbs)) {
  1212. skb = __skb_dequeue(&reclaimed_skbs);
  1213. ieee80211_tx_status(priv->hw, skb);
  1214. }
  1215. return 0;
  1216. }