rsi_91x_core.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /**
  2. * Copyright (c) 2014 Redpine Signals Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "rsi_mgmt.h"
  17. #include "rsi_common.h"
  18. #include "rsi_hal.h"
  19. #include "rsi_coex.h"
  20. /**
  21. * rsi_determine_min_weight_queue() - This function determines the queue with
  22. * the min weight.
  23. * @common: Pointer to the driver private structure.
  24. *
  25. * Return: q_num: Corresponding queue number.
  26. */
  27. static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
  28. {
  29. struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
  30. u32 q_len = 0;
  31. u8 ii = 0;
  32. for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
  33. q_len = skb_queue_len(&common->tx_queue[ii]);
  34. if ((tx_qinfo[ii].pkt_contended) && q_len) {
  35. common->min_weight = tx_qinfo[ii].weight;
  36. break;
  37. }
  38. }
  39. return ii;
  40. }
  41. /**
  42. * rsi_recalculate_weights() - This function recalculates the weights
  43. * corresponding to each queue.
  44. * @common: Pointer to the driver private structure.
  45. *
  46. * Return: recontend_queue bool variable
  47. */
  48. static bool rsi_recalculate_weights(struct rsi_common *common)
  49. {
  50. struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
  51. bool recontend_queue = false;
  52. u8 ii = 0;
  53. u32 q_len = 0;
  54. for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
  55. q_len = skb_queue_len(&common->tx_queue[ii]);
  56. /* Check for the need of contention */
  57. if (q_len) {
  58. if (tx_qinfo[ii].pkt_contended) {
  59. tx_qinfo[ii].weight =
  60. ((tx_qinfo[ii].weight > common->min_weight) ?
  61. tx_qinfo[ii].weight - common->min_weight : 0);
  62. } else {
  63. tx_qinfo[ii].pkt_contended = 1;
  64. tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
  65. recontend_queue = true;
  66. }
  67. } else { /* No packets so no contention */
  68. tx_qinfo[ii].weight = 0;
  69. tx_qinfo[ii].pkt_contended = 0;
  70. }
  71. }
  72. return recontend_queue;
  73. }
  74. /**
  75. * rsi_get_num_pkts_dequeue() - This function determines the number of
  76. * packets to be dequeued based on the number
  77. * of bytes calculated using txop.
  78. *
  79. * @common: Pointer to the driver private structure.
  80. * @q_num: the queue from which pkts have to be dequeued
  81. *
  82. * Return: pkt_num: Number of pkts to be dequeued.
  83. */
  84. static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
  85. {
  86. struct rsi_hw *adapter = common->priv;
  87. struct sk_buff *skb;
  88. u32 pkt_cnt = 0;
  89. s16 txop = common->tx_qinfo[q_num].txop * 32;
  90. __le16 r_txop;
  91. struct ieee80211_rate rate;
  92. struct ieee80211_hdr *wh;
  93. struct ieee80211_vif *vif;
  94. rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
  95. if (q_num == VI_Q)
  96. txop = ((txop << 5) / 80);
  97. if (skb_queue_len(&common->tx_queue[q_num]))
  98. skb = skb_peek(&common->tx_queue[q_num]);
  99. else
  100. return 0;
  101. do {
  102. wh = (struct ieee80211_hdr *)skb->data;
  103. vif = rsi_get_vif(adapter, wh->addr2);
  104. r_txop = ieee80211_generic_frame_duration(adapter->hw,
  105. vif,
  106. common->band,
  107. skb->len, &rate);
  108. txop -= le16_to_cpu(r_txop);
  109. pkt_cnt += 1;
  110. /*checking if pkts are still there*/
  111. if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
  112. skb = skb->next;
  113. else
  114. break;
  115. } while (txop > 0);
  116. return pkt_cnt;
  117. }
  118. /**
  119. * rsi_core_determine_hal_queue() - This function determines the queue from
  120. * which packet has to be dequeued.
  121. * @common: Pointer to the driver private structure.
  122. *
  123. * Return: q_num: Corresponding queue number on success.
  124. */
  125. static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
  126. {
  127. bool recontend_queue = false;
  128. u32 q_len = 0;
  129. u8 q_num = INVALID_QUEUE;
  130. u8 ii = 0;
  131. if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) {
  132. q_num = MGMT_BEACON_Q;
  133. return q_num;
  134. }
  135. if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
  136. if (!common->mgmt_q_block)
  137. q_num = MGMT_SOFT_Q;
  138. return q_num;
  139. }
  140. if (common->hw_data_qs_blocked)
  141. return q_num;
  142. if (common->pkt_cnt != 0) {
  143. --common->pkt_cnt;
  144. return common->selected_qnum;
  145. }
  146. get_queue_num:
  147. recontend_queue = false;
  148. q_num = rsi_determine_min_weight_queue(common);
  149. ii = q_num;
  150. /* Selecting the queue with least back off */
  151. for (; ii < NUM_EDCA_QUEUES; ii++) {
  152. q_len = skb_queue_len(&common->tx_queue[ii]);
  153. if (((common->tx_qinfo[ii].pkt_contended) &&
  154. (common->tx_qinfo[ii].weight < common->min_weight)) &&
  155. q_len) {
  156. common->min_weight = common->tx_qinfo[ii].weight;
  157. q_num = ii;
  158. }
  159. }
  160. if (q_num < NUM_EDCA_QUEUES)
  161. common->tx_qinfo[q_num].pkt_contended = 0;
  162. /* Adjust the back off values for all queues again */
  163. recontend_queue = rsi_recalculate_weights(common);
  164. q_len = skb_queue_len(&common->tx_queue[q_num]);
  165. if (!q_len) {
  166. /* If any queues are freshly contended and the selected queue
  167. * doesn't have any packets
  168. * then get the queue number again with fresh values
  169. */
  170. if (recontend_queue)
  171. goto get_queue_num;
  172. q_num = INVALID_QUEUE;
  173. return q_num;
  174. }
  175. common->selected_qnum = q_num;
  176. q_len = skb_queue_len(&common->tx_queue[q_num]);
  177. if (q_num == VO_Q || q_num == VI_Q) {
  178. common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
  179. common->pkt_cnt -= 1;
  180. }
  181. return q_num;
  182. }
  183. /**
  184. * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
  185. * specified by the queue number.
  186. * @common: Pointer to the driver private structure.
  187. * @skb: Pointer to the socket buffer structure.
  188. *
  189. * Return: None.
  190. */
  191. static void rsi_core_queue_pkt(struct rsi_common *common,
  192. struct sk_buff *skb)
  193. {
  194. u8 q_num = skb->priority;
  195. if (q_num >= NUM_SOFT_QUEUES) {
  196. rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
  197. __func__, q_num);
  198. dev_kfree_skb(skb);
  199. return;
  200. }
  201. skb_queue_tail(&common->tx_queue[q_num], skb);
  202. }
  203. /**
  204. * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
  205. * specified by the queue number.
  206. * @common: Pointer to the driver private structure.
  207. * @q_num: Queue number.
  208. *
  209. * Return: Pointer to sk_buff structure.
  210. */
  211. static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
  212. u8 q_num)
  213. {
  214. if (q_num >= NUM_SOFT_QUEUES) {
  215. rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
  216. __func__, q_num);
  217. return NULL;
  218. }
  219. return skb_dequeue(&common->tx_queue[q_num]);
  220. }
  221. /**
  222. * rsi_core_qos_processor() - This function is used to determine the wmm queue
  223. * based on the backoff procedure. Data packets are
  224. * dequeued from the selected hal queue and sent to
  225. * the below layers.
  226. * @common: Pointer to the driver private structure.
  227. *
  228. * Return: None.
  229. */
  230. void rsi_core_qos_processor(struct rsi_common *common)
  231. {
  232. struct rsi_hw *adapter = common->priv;
  233. struct sk_buff *skb;
  234. unsigned long tstamp_1, tstamp_2;
  235. u8 q_num;
  236. int status;
  237. tstamp_1 = jiffies;
  238. while (1) {
  239. q_num = rsi_core_determine_hal_queue(common);
  240. rsi_dbg(DATA_TX_ZONE,
  241. "%s: Queue number = %d\n", __func__, q_num);
  242. if (q_num == INVALID_QUEUE) {
  243. rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
  244. break;
  245. }
  246. if (common->hibernate_resume)
  247. break;
  248. mutex_lock(&common->tx_lock);
  249. status = adapter->check_hw_queue_status(adapter, q_num);
  250. if ((status <= 0)) {
  251. mutex_unlock(&common->tx_lock);
  252. break;
  253. }
  254. if ((q_num < MGMT_SOFT_Q) &&
  255. ((skb_queue_len(&common->tx_queue[q_num])) <=
  256. MIN_DATA_QUEUE_WATER_MARK)) {
  257. if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
  258. ieee80211_wake_queue(adapter->hw,
  259. WME_AC(q_num));
  260. }
  261. skb = rsi_core_dequeue_pkt(common, q_num);
  262. if (skb == NULL) {
  263. rsi_dbg(ERR_ZONE, "skb null\n");
  264. mutex_unlock(&common->tx_lock);
  265. break;
  266. }
  267. if (q_num == MGMT_BEACON_Q) {
  268. status = rsi_send_pkt_to_bus(common, skb);
  269. dev_kfree_skb(skb);
  270. } else {
  271. #ifdef CONFIG_RSI_COEX
  272. if (common->coex_mode > 1) {
  273. status = rsi_coex_send_pkt(common, skb,
  274. RSI_WLAN_Q);
  275. } else {
  276. #endif
  277. if (q_num == MGMT_SOFT_Q)
  278. status = rsi_send_mgmt_pkt(common, skb);
  279. else
  280. status = rsi_send_data_pkt(common, skb);
  281. #ifdef CONFIG_RSI_COEX
  282. }
  283. #endif
  284. }
  285. if (status) {
  286. mutex_unlock(&common->tx_lock);
  287. break;
  288. }
  289. common->tx_stats.total_tx_pkt_send[q_num]++;
  290. tstamp_2 = jiffies;
  291. mutex_unlock(&common->tx_lock);
  292. if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
  293. schedule();
  294. }
  295. }
  296. struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
  297. {
  298. int i;
  299. for (i = 0; i < common->max_stations; i++) {
  300. if (!common->stations[i].sta)
  301. continue;
  302. if (!(memcmp(common->stations[i].sta->addr,
  303. mac_addr, ETH_ALEN)))
  304. return &common->stations[i];
  305. }
  306. return NULL;
  307. }
  308. struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac)
  309. {
  310. struct ieee80211_vif *vif;
  311. int i;
  312. for (i = 0; i < RSI_MAX_VIFS; i++) {
  313. vif = adapter->vifs[i];
  314. if (!vif)
  315. continue;
  316. if (!memcmp(vif->addr, mac, ETH_ALEN))
  317. return vif;
  318. }
  319. return NULL;
  320. }
  321. /**
  322. * rsi_core_xmit() - This function transmits the packets received from mac80211
  323. * @common: Pointer to the driver private structure.
  324. * @skb: Pointer to the socket buffer structure.
  325. *
  326. * Return: None.
  327. */
  328. void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
  329. {
  330. struct rsi_hw *adapter = common->priv;
  331. struct ieee80211_tx_info *info;
  332. struct skb_info *tx_params;
  333. struct ieee80211_hdr *wh = NULL;
  334. struct ieee80211_vif *vif;
  335. u8 q_num, tid = 0;
  336. struct rsi_sta *rsta = NULL;
  337. if ((!skb) || (!skb->len)) {
  338. rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
  339. __func__);
  340. goto xmit_fail;
  341. }
  342. if (common->fsm_state != FSM_MAC_INIT_DONE) {
  343. rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
  344. goto xmit_fail;
  345. }
  346. if (common->wow_flags & RSI_WOW_ENABLED) {
  347. rsi_dbg(ERR_ZONE,
  348. "%s: Blocking Tx_packets when WOWLAN is enabled\n",
  349. __func__);
  350. goto xmit_fail;
  351. }
  352. info = IEEE80211_SKB_CB(skb);
  353. tx_params = (struct skb_info *)info->driver_data;
  354. wh = (struct ieee80211_hdr *)&skb->data[0];
  355. tx_params->sta_id = 0;
  356. vif = rsi_get_vif(adapter, wh->addr2);
  357. if (!vif)
  358. goto xmit_fail;
  359. tx_params->vif = vif;
  360. tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
  361. if ((ieee80211_is_mgmt(wh->frame_control)) ||
  362. (ieee80211_is_ctl(wh->frame_control)) ||
  363. (ieee80211_is_qos_nullfunc(wh->frame_control))) {
  364. if (ieee80211_is_assoc_req(wh->frame_control) ||
  365. ieee80211_is_reassoc_req(wh->frame_control)) {
  366. struct ieee80211_bss_conf *bss = &vif->bss_conf;
  367. common->eapol4_confirm = false;
  368. rsi_hal_send_sta_notify_frame(common,
  369. RSI_IFTYPE_STATION,
  370. STA_CONNECTED, bss->bssid,
  371. bss->qos, bss->aid, 0,
  372. vif);
  373. }
  374. q_num = MGMT_SOFT_Q;
  375. skb->priority = q_num;
  376. if (rsi_prepare_mgmt_desc(common, skb)) {
  377. rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
  378. goto xmit_fail;
  379. }
  380. } else {
  381. if (ieee80211_is_data_qos(wh->frame_control)) {
  382. u8 *qos = ieee80211_get_qos_ctl(wh);
  383. tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
  384. skb->priority = TID_TO_WME_AC(tid);
  385. } else {
  386. tid = IEEE80211_NONQOS_TID;
  387. skb->priority = BE_Q;
  388. }
  389. q_num = skb->priority;
  390. tx_params->tid = tid;
  391. if (((vif->type == NL80211_IFTYPE_AP) ||
  392. (vif->type == NL80211_IFTYPE_P2P_GO)) &&
  393. (!is_broadcast_ether_addr(wh->addr1)) &&
  394. (!is_multicast_ether_addr(wh->addr1))) {
  395. rsta = rsi_find_sta(common, wh->addr1);
  396. if (!rsta)
  397. goto xmit_fail;
  398. tx_params->sta_id = rsta->sta_id;
  399. } else {
  400. tx_params->sta_id = 0;
  401. }
  402. if (rsta) {
  403. /* Start aggregation if not done for this tid */
  404. if (!rsta->start_tx_aggr[tid]) {
  405. rsta->start_tx_aggr[tid] = true;
  406. ieee80211_start_tx_ba_session(rsta->sta,
  407. tid, 0);
  408. }
  409. }
  410. if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
  411. q_num = MGMT_SOFT_Q;
  412. skb->priority = q_num;
  413. }
  414. if (rsi_prepare_data_desc(common, skb)) {
  415. rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
  416. goto xmit_fail;
  417. }
  418. }
  419. if ((q_num < MGMT_SOFT_Q) &&
  420. ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
  421. DATA_QUEUE_WATER_MARK)) {
  422. rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
  423. if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
  424. ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
  425. rsi_set_event(&common->tx_thread.event);
  426. goto xmit_fail;
  427. }
  428. rsi_core_queue_pkt(common, skb);
  429. rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
  430. rsi_set_event(&common->tx_thread.event);
  431. return;
  432. xmit_fail:
  433. rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
  434. /* Dropping pkt here */
  435. ieee80211_free_txskb(common->priv->hw, skb);
  436. }