bh.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
  4. *
  5. * Copyright (c) 2010, ST-Ericsson
  6. * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
  7. *
  8. * Based on:
  9. * ST-Ericsson UMAC CW1200 driver, which is
  10. * Copyright (c) 2010, ST-Ericsson
  11. * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
  12. */
  13. #include <linux/module.h>
  14. #include <net/mac80211.h>
  15. #include <linux/kthread.h>
  16. #include <linux/timer.h>
  17. #include "cw1200.h"
  18. #include "bh.h"
  19. #include "hwio.h"
  20. #include "wsm.h"
  21. #include "hwbus.h"
  22. #include "debug.h"
  23. #include "fwio.h"
  24. static int cw1200_bh(void *arg);
  25. #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
  26. /* an SPI message cannot be bigger than (2"12-1)*2 bytes
  27. * "*2" to cvt to bytes
  28. */
  29. #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
  30. #define PIGGYBACK_CTRL_REG (2)
  31. #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
  32. /* Suspend state privates */
  33. enum cw1200_bh_pm_state {
  34. CW1200_BH_RESUMED = 0,
  35. CW1200_BH_SUSPEND,
  36. CW1200_BH_SUSPENDED,
  37. CW1200_BH_RESUME,
  38. };
  39. typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
  40. u8 *data, size_t size);
  41. static void cw1200_bh_work(struct work_struct *work)
  42. {
  43. struct cw1200_common *priv =
  44. container_of(work, struct cw1200_common, bh_work);
  45. cw1200_bh(priv);
  46. }
  47. int cw1200_register_bh(struct cw1200_common *priv)
  48. {
  49. int err = 0;
  50. /* Realtime workqueue */
  51. priv->bh_workqueue = alloc_workqueue("cw1200_bh",
  52. WQ_MEM_RECLAIM | WQ_HIGHPRI
  53. | WQ_CPU_INTENSIVE, 1);
  54. if (!priv->bh_workqueue)
  55. return -ENOMEM;
  56. INIT_WORK(&priv->bh_work, cw1200_bh_work);
  57. pr_debug("[BH] register.\n");
  58. atomic_set(&priv->bh_rx, 0);
  59. atomic_set(&priv->bh_tx, 0);
  60. atomic_set(&priv->bh_term, 0);
  61. atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
  62. priv->bh_error = 0;
  63. priv->hw_bufs_used = 0;
  64. priv->buf_id_tx = 0;
  65. priv->buf_id_rx = 0;
  66. init_waitqueue_head(&priv->bh_wq);
  67. init_waitqueue_head(&priv->bh_evt_wq);
  68. err = !queue_work(priv->bh_workqueue, &priv->bh_work);
  69. WARN_ON(err);
  70. return err;
  71. }
  72. void cw1200_unregister_bh(struct cw1200_common *priv)
  73. {
  74. atomic_add(1, &priv->bh_term);
  75. wake_up(&priv->bh_wq);
  76. flush_workqueue(priv->bh_workqueue);
  77. destroy_workqueue(priv->bh_workqueue);
  78. priv->bh_workqueue = NULL;
  79. pr_debug("[BH] unregistered.\n");
  80. }
  81. void cw1200_irq_handler(struct cw1200_common *priv)
  82. {
  83. pr_debug("[BH] irq.\n");
  84. /* Disable Interrupts! */
  85. /* NOTE: hwbus_ops->lock already held */
  86. __cw1200_irq_enable(priv, 0);
  87. if (/* WARN_ON */(priv->bh_error))
  88. return;
  89. if (atomic_add_return(1, &priv->bh_rx) == 1)
  90. wake_up(&priv->bh_wq);
  91. }
  92. EXPORT_SYMBOL_GPL(cw1200_irq_handler);
  93. void cw1200_bh_wakeup(struct cw1200_common *priv)
  94. {
  95. pr_debug("[BH] wakeup.\n");
  96. if (priv->bh_error) {
  97. pr_err("[BH] wakeup failed (BH error)\n");
  98. return;
  99. }
  100. if (atomic_add_return(1, &priv->bh_tx) == 1)
  101. wake_up(&priv->bh_wq);
  102. }
  103. int cw1200_bh_suspend(struct cw1200_common *priv)
  104. {
  105. pr_debug("[BH] suspend.\n");
  106. if (priv->bh_error) {
  107. wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
  108. return -EINVAL;
  109. }
  110. atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
  111. wake_up(&priv->bh_wq);
  112. return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
  113. (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
  114. 1 * HZ) ? 0 : -ETIMEDOUT;
  115. }
  116. int cw1200_bh_resume(struct cw1200_common *priv)
  117. {
  118. pr_debug("[BH] resume.\n");
  119. if (priv->bh_error) {
  120. wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
  121. return -EINVAL;
  122. }
  123. atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
  124. wake_up(&priv->bh_wq);
  125. return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
  126. (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
  127. 1 * HZ) ? 0 : -ETIMEDOUT;
  128. }
  129. static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
  130. {
  131. ++priv->hw_bufs_used;
  132. }
  133. int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
  134. {
  135. int ret = 0;
  136. int hw_bufs_used = priv->hw_bufs_used;
  137. priv->hw_bufs_used -= count;
  138. if (WARN_ON(priv->hw_bufs_used < 0))
  139. ret = -1;
  140. else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
  141. ret = 1;
  142. if (!priv->hw_bufs_used)
  143. wake_up(&priv->bh_evt_wq);
  144. return ret;
  145. }
  146. static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
  147. u16 *ctrl_reg)
  148. {
  149. int ret;
  150. ret = cw1200_reg_read_16(priv,
  151. ST90TDS_CONTROL_REG_ID, ctrl_reg);
  152. if (ret) {
  153. ret = cw1200_reg_read_16(priv,
  154. ST90TDS_CONTROL_REG_ID, ctrl_reg);
  155. if (ret)
  156. pr_err("[BH] Failed to read control register.\n");
  157. }
  158. return ret;
  159. }
  160. static int cw1200_device_wakeup(struct cw1200_common *priv)
  161. {
  162. u16 ctrl_reg;
  163. int ret;
  164. pr_debug("[BH] Device wakeup.\n");
  165. /* First, set the dpll register */
  166. ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
  167. cw1200_dpll_from_clk(priv->hw_refclk));
  168. if (WARN_ON(ret))
  169. return ret;
  170. /* To force the device to be always-on, the host sets WLAN_UP to 1 */
  171. ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
  172. ST90TDS_CONT_WUP_BIT);
  173. if (WARN_ON(ret))
  174. return ret;
  175. ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
  176. if (WARN_ON(ret))
  177. return ret;
  178. /* If the device returns WLAN_RDY as 1, the device is active and will
  179. * remain active.
  180. */
  181. if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
  182. pr_debug("[BH] Device awake.\n");
  183. return 1;
  184. }
  185. return 0;
  186. }
  187. /* Must be called from BH thraed. */
  188. void cw1200_enable_powersave(struct cw1200_common *priv,
  189. bool enable)
  190. {
  191. pr_debug("[BH] Powerave is %s.\n",
  192. enable ? "enabled" : "disabled");
  193. priv->powersave_enabled = enable;
  194. }
  195. static int cw1200_bh_rx_helper(struct cw1200_common *priv,
  196. uint16_t *ctrl_reg,
  197. int *tx)
  198. {
  199. size_t read_len = 0;
  200. struct sk_buff *skb_rx = NULL;
  201. struct wsm_hdr *wsm;
  202. size_t wsm_len;
  203. u16 wsm_id;
  204. u8 wsm_seq;
  205. int rx_resync = 1;
  206. size_t alloc_len;
  207. u8 *data;
  208. read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
  209. if (!read_len)
  210. return 0; /* No more work */
  211. if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
  212. (read_len > EFFECTIVE_BUF_SIZE))) {
  213. pr_debug("Invalid read len: %zu (%04x)",
  214. read_len, *ctrl_reg);
  215. goto err;
  216. }
  217. /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
  218. * to the NEXT Message length + 2 Bytes for SKB
  219. */
  220. read_len = read_len + 2;
  221. alloc_len = priv->hwbus_ops->align_size(
  222. priv->hwbus_priv, read_len);
  223. /* Check if not exceeding CW1200 capabilities */
  224. if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
  225. pr_debug("Read aligned len: %zu\n",
  226. alloc_len);
  227. }
  228. skb_rx = dev_alloc_skb(alloc_len);
  229. if (WARN_ON(!skb_rx))
  230. goto err;
  231. skb_trim(skb_rx, 0);
  232. skb_put(skb_rx, read_len);
  233. data = skb_rx->data;
  234. if (WARN_ON(!data))
  235. goto err;
  236. if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
  237. pr_err("rx blew up, len %zu\n", alloc_len);
  238. goto err;
  239. }
  240. /* Piggyback */
  241. *ctrl_reg = __le16_to_cpu(
  242. ((__le16 *)data)[alloc_len / 2 - 1]);
  243. wsm = (struct wsm_hdr *)data;
  244. wsm_len = __le16_to_cpu(wsm->len);
  245. if (WARN_ON(wsm_len > read_len))
  246. goto err;
  247. if (priv->wsm_enable_wsm_dumps)
  248. print_hex_dump_bytes("<-- ",
  249. DUMP_PREFIX_NONE,
  250. data, wsm_len);
  251. wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
  252. wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
  253. skb_trim(skb_rx, wsm_len);
  254. if (wsm_id == 0x0800) {
  255. wsm_handle_exception(priv,
  256. &data[sizeof(*wsm)],
  257. wsm_len - sizeof(*wsm));
  258. goto err;
  259. } else if (!rx_resync) {
  260. if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
  261. goto err;
  262. }
  263. priv->wsm_rx_seq = (wsm_seq + 1) & 7;
  264. rx_resync = 0;
  265. if (wsm_id & 0x0400) {
  266. int rc = wsm_release_tx_buffer(priv, 1);
  267. if (WARN_ON(rc < 0))
  268. return rc;
  269. else if (rc > 0)
  270. *tx = 1;
  271. }
  272. /* cw1200_wsm_rx takes care on SKB livetime */
  273. if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
  274. goto err;
  275. if (skb_rx) {
  276. dev_kfree_skb(skb_rx);
  277. skb_rx = NULL;
  278. }
  279. return 0;
  280. err:
  281. if (skb_rx) {
  282. dev_kfree_skb(skb_rx);
  283. skb_rx = NULL;
  284. }
  285. return -1;
  286. }
  287. static int cw1200_bh_tx_helper(struct cw1200_common *priv,
  288. int *pending_tx,
  289. int *tx_burst)
  290. {
  291. size_t tx_len;
  292. u8 *data;
  293. int ret;
  294. struct wsm_hdr *wsm;
  295. if (priv->device_can_sleep) {
  296. ret = cw1200_device_wakeup(priv);
  297. if (WARN_ON(ret < 0)) { /* Error in wakeup */
  298. *pending_tx = 1;
  299. return 0;
  300. } else if (ret) { /* Woke up */
  301. priv->device_can_sleep = false;
  302. } else { /* Did not awake */
  303. *pending_tx = 1;
  304. return 0;
  305. }
  306. }
  307. wsm_alloc_tx_buffer(priv);
  308. ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
  309. if (ret <= 0) {
  310. wsm_release_tx_buffer(priv, 1);
  311. if (WARN_ON(ret < 0))
  312. return ret; /* Error */
  313. return 0; /* No work */
  314. }
  315. wsm = (struct wsm_hdr *)data;
  316. BUG_ON(tx_len < sizeof(*wsm));
  317. BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
  318. atomic_add(1, &priv->bh_tx);
  319. tx_len = priv->hwbus_ops->align_size(
  320. priv->hwbus_priv, tx_len);
  321. /* Check if not exceeding CW1200 capabilities */
  322. if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
  323. pr_debug("Write aligned len: %zu\n", tx_len);
  324. wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
  325. wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
  326. if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
  327. pr_err("tx blew up, len %zu\n", tx_len);
  328. wsm_release_tx_buffer(priv, 1);
  329. return -1; /* Error */
  330. }
  331. if (priv->wsm_enable_wsm_dumps)
  332. print_hex_dump_bytes("--> ",
  333. DUMP_PREFIX_NONE,
  334. data,
  335. __le16_to_cpu(wsm->len));
  336. wsm_txed(priv, data);
  337. priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
  338. if (*tx_burst > 1) {
  339. cw1200_debug_tx_burst(priv);
  340. return 1; /* Work remains */
  341. }
  342. return 0;
  343. }
  344. static int cw1200_bh(void *arg)
  345. {
  346. struct cw1200_common *priv = arg;
  347. int rx, tx, term, suspend;
  348. u16 ctrl_reg = 0;
  349. int tx_allowed;
  350. int pending_tx = 0;
  351. int tx_burst;
  352. long status;
  353. u32 dummy;
  354. int ret;
  355. for (;;) {
  356. if (!priv->hw_bufs_used &&
  357. priv->powersave_enabled &&
  358. !priv->device_can_sleep &&
  359. !atomic_read(&priv->recent_scan)) {
  360. status = 1 * HZ;
  361. pr_debug("[BH] Device wakedown. No data.\n");
  362. cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
  363. priv->device_can_sleep = true;
  364. } else if (priv->hw_bufs_used) {
  365. /* Interrupt loss detection */
  366. status = 1 * HZ;
  367. } else {
  368. status = MAX_SCHEDULE_TIMEOUT;
  369. }
  370. /* Dummy Read for SDIO retry mechanism*/
  371. if ((priv->hw_type != -1) &&
  372. (atomic_read(&priv->bh_rx) == 0) &&
  373. (atomic_read(&priv->bh_tx) == 0))
  374. cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
  375. &dummy, sizeof(dummy));
  376. pr_debug("[BH] waiting ...\n");
  377. status = wait_event_interruptible_timeout(priv->bh_wq, ({
  378. rx = atomic_xchg(&priv->bh_rx, 0);
  379. tx = atomic_xchg(&priv->bh_tx, 0);
  380. term = atomic_xchg(&priv->bh_term, 0);
  381. suspend = pending_tx ?
  382. 0 : atomic_read(&priv->bh_suspend);
  383. (rx || tx || term || suspend || priv->bh_error);
  384. }), status);
  385. pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
  386. rx, tx, term, suspend, priv->bh_error, status);
  387. /* Did an error occur? */
  388. if ((status < 0 && status != -ERESTARTSYS) ||
  389. term || priv->bh_error) {
  390. break;
  391. }
  392. if (!status) { /* wait_event timed out */
  393. unsigned long timestamp = jiffies;
  394. long timeout;
  395. int pending = 0;
  396. int i;
  397. /* Check to see if we have any outstanding frames */
  398. if (priv->hw_bufs_used && (!rx || !tx)) {
  399. wiphy_warn(priv->hw->wiphy,
  400. "Missed interrupt? (%d frames outstanding)\n",
  401. priv->hw_bufs_used);
  402. rx = 1;
  403. /* Get a timestamp of "oldest" frame */
  404. for (i = 0; i < 4; ++i)
  405. pending += cw1200_queue_get_xmit_timestamp(
  406. &priv->tx_queue[i],
  407. &timestamp,
  408. priv->pending_frame_id);
  409. /* Check if frame transmission is timed out.
  410. * Add an extra second with respect to possible
  411. * interrupt loss.
  412. */
  413. timeout = timestamp +
  414. WSM_CMD_LAST_CHANCE_TIMEOUT +
  415. 1 * HZ -
  416. jiffies;
  417. /* And terminate BH thread if the frame is "stuck" */
  418. if (pending && timeout < 0) {
  419. wiphy_warn(priv->hw->wiphy,
  420. "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
  421. priv->hw_bufs_used, pending,
  422. timestamp, jiffies);
  423. break;
  424. }
  425. } else if (!priv->device_can_sleep &&
  426. !atomic_read(&priv->recent_scan)) {
  427. pr_debug("[BH] Device wakedown. Timeout.\n");
  428. cw1200_reg_write_16(priv,
  429. ST90TDS_CONTROL_REG_ID, 0);
  430. priv->device_can_sleep = true;
  431. }
  432. goto done;
  433. } else if (suspend) {
  434. pr_debug("[BH] Device suspend.\n");
  435. if (priv->powersave_enabled) {
  436. pr_debug("[BH] Device wakedown. Suspend.\n");
  437. cw1200_reg_write_16(priv,
  438. ST90TDS_CONTROL_REG_ID, 0);
  439. priv->device_can_sleep = true;
  440. }
  441. atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
  442. wake_up(&priv->bh_evt_wq);
  443. status = wait_event_interruptible(priv->bh_wq,
  444. CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
  445. if (status < 0) {
  446. wiphy_err(priv->hw->wiphy,
  447. "Failed to wait for resume: %ld.\n",
  448. status);
  449. break;
  450. }
  451. pr_debug("[BH] Device resume.\n");
  452. atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
  453. wake_up(&priv->bh_evt_wq);
  454. atomic_add(1, &priv->bh_rx);
  455. goto done;
  456. }
  457. rx:
  458. tx += pending_tx;
  459. pending_tx = 0;
  460. if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
  461. break;
  462. /* Don't bother trying to rx unless we have data to read */
  463. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
  464. ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
  465. if (ret < 0)
  466. break;
  467. /* Double up here if there's more data.. */
  468. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
  469. ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
  470. if (ret < 0)
  471. break;
  472. }
  473. }
  474. tx:
  475. if (tx) {
  476. tx = 0;
  477. BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
  478. tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
  479. tx_allowed = tx_burst > 0;
  480. if (!tx_allowed) {
  481. /* Buffers full. Ensure we process tx
  482. * after we handle rx..
  483. */
  484. pending_tx = tx;
  485. goto done_rx;
  486. }
  487. ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
  488. if (ret < 0)
  489. break;
  490. if (ret > 0) /* More to transmit */
  491. tx = ret;
  492. /* Re-read ctrl reg */
  493. if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
  494. break;
  495. }
  496. done_rx:
  497. if (priv->bh_error)
  498. break;
  499. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
  500. goto rx;
  501. if (tx)
  502. goto tx;
  503. done:
  504. /* Re-enable device interrupts */
  505. priv->hwbus_ops->lock(priv->hwbus_priv);
  506. __cw1200_irq_enable(priv, 1);
  507. priv->hwbus_ops->unlock(priv->hwbus_priv);
  508. }
  509. /* Explicitly disable device interrupts */
  510. priv->hwbus_ops->lock(priv->hwbus_priv);
  511. __cw1200_irq_enable(priv, 0);
  512. priv->hwbus_ops->unlock(priv->hwbus_priv);
  513. if (!term) {
  514. pr_err("[BH] Fatal error, exiting.\n");
  515. priv->bh_error = 1;
  516. /* TODO: schedule_work(recovery) */
  517. }
  518. return 0;
  519. }