mcu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * (c) Copyright 2002-2010, Ralink Technology, Inc.
  4. * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
  5. * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/firmware.h>
  9. #include <linux/delay.h>
  10. #include <linux/usb.h>
  11. #include <linux/skbuff.h>
  12. #include "mt7601u.h"
  13. #include "dma.h"
  14. #include "mcu.h"
  15. #include "usb.h"
  16. #include "trace.h"
  17. #define MCU_FW_URB_MAX_PAYLOAD 0x3800
  18. #define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
  19. #define MCU_RESP_URB_SIZE 1024
  20. static inline int firmware_running(struct mt7601u_dev *dev)
  21. {
  22. return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
  23. }
  24. static inline void skb_put_le32(struct sk_buff *skb, u32 val)
  25. {
  26. put_unaligned_le32(val, skb_put(skb, 4));
  27. }
  28. static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
  29. u8 seq, enum mcu_cmd cmd)
  30. {
  31. WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
  32. FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
  33. FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
  34. }
  35. static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
  36. struct sk_buff *skb, bool need_resp)
  37. {
  38. u32 i, csum = 0;
  39. for (i = 0; i < skb->len / 4; i++)
  40. csum ^= get_unaligned_le32(skb->data + i * 4);
  41. trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
  42. }
  43. static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len)
  44. {
  45. struct sk_buff *skb;
  46. WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
  47. skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
  48. if (skb) {
  49. skb_reserve(skb, MT_DMA_HDR_LEN);
  50. skb_put_data(skb, data, len);
  51. }
  52. return skb;
  53. }
  54. static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
  55. {
  56. struct urb *urb = dev->mcu.resp.urb;
  57. u32 rxfce;
  58. int urb_status, ret, i = 5;
  59. while (i--) {
  60. if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
  61. msecs_to_jiffies(300))) {
  62. dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
  63. continue;
  64. }
  65. /* Make copies of important data before reusing the urb */
  66. rxfce = get_unaligned_le32(dev->mcu.resp.buf);
  67. urb_status = urb->status * mt7601u_urb_has_error(urb);
  68. ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
  69. &dev->mcu.resp, GFP_KERNEL,
  70. mt7601u_complete_urb,
  71. &dev->mcu.resp_cmpl);
  72. if (ret)
  73. return ret;
  74. if (urb_status)
  75. dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
  76. urb_status);
  77. if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
  78. FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
  79. return 0;
  80. dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
  81. FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
  82. seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
  83. }
  84. dev_err(dev->dev, "Error: %s timed out\n", __func__);
  85. return -ETIMEDOUT;
  86. }
  87. static int
  88. mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
  89. enum mcu_cmd cmd, bool wait_resp)
  90. {
  91. struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
  92. unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
  93. dev->out_eps[MT_EP_OUT_INBAND_CMD]);
  94. int sent, ret;
  95. u8 seq = 0;
  96. if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
  97. return 0;
  98. mutex_lock(&dev->mcu.mutex);
  99. if (wait_resp)
  100. while (!seq)
  101. seq = ++dev->mcu.msg_seq & 0xf;
  102. mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
  103. if (dev->mcu.resp_cmpl.done)
  104. dev_err(dev->dev, "Error: MCU response pre-completed!\n");
  105. trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
  106. trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
  107. ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
  108. if (ret) {
  109. dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
  110. goto out;
  111. }
  112. if (sent != skb->len)
  113. dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
  114. if (wait_resp)
  115. ret = mt7601u_mcu_wait_resp(dev, seq);
  116. out:
  117. mutex_unlock(&dev->mcu.mutex);
  118. consume_skb(skb);
  119. return ret;
  120. }
  121. static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
  122. enum mcu_function func, u32 val)
  123. {
  124. struct sk_buff *skb;
  125. struct {
  126. __le32 id;
  127. __le32 value;
  128. } __packed __aligned(4) msg = {
  129. .id = cpu_to_le32(func),
  130. .value = cpu_to_le32(val),
  131. };
  132. skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
  133. if (!skb)
  134. return -ENOMEM;
  135. return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
  136. }
  137. int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
  138. {
  139. int ret;
  140. if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
  141. return 0;
  142. ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
  143. use_hvga);
  144. if (ret) {
  145. dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
  146. return ret;
  147. }
  148. dev->tssi_read_trig = true;
  149. return 0;
  150. }
  151. int
  152. mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
  153. {
  154. struct sk_buff *skb;
  155. struct {
  156. __le32 id;
  157. __le32 value;
  158. } __packed __aligned(4) msg = {
  159. .id = cpu_to_le32(cal),
  160. .value = cpu_to_le32(val),
  161. };
  162. skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
  163. if (!skb)
  164. return -ENOMEM;
  165. return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
  166. }
  167. int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
  168. const struct mt76_reg_pair *data, int n)
  169. {
  170. const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
  171. struct sk_buff *skb;
  172. int cnt, i, ret;
  173. if (!n)
  174. return 0;
  175. cnt = min(max_vals_per_cmd, n);
  176. skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
  177. if (!skb)
  178. return -ENOMEM;
  179. skb_reserve(skb, MT_DMA_HDR_LEN);
  180. for (i = 0; i < cnt; i++) {
  181. skb_put_le32(skb, base + data[i].reg);
  182. skb_put_le32(skb, data[i].value);
  183. }
  184. ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
  185. if (ret)
  186. return ret;
  187. return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
  188. }
  189. int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
  190. const u32 *data, int n)
  191. {
  192. const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
  193. struct sk_buff *skb;
  194. int cnt, i, ret;
  195. if (!n)
  196. return 0;
  197. cnt = min(max_regs_per_cmd, n);
  198. skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
  199. if (!skb)
  200. return -ENOMEM;
  201. skb_reserve(skb, MT_DMA_HDR_LEN);
  202. skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
  203. for (i = 0; i < cnt; i++)
  204. skb_put_le32(skb, data[i]);
  205. ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
  206. if (ret)
  207. return ret;
  208. return mt7601u_burst_write_regs(dev, offset + cnt * 4,
  209. data + cnt, n - cnt);
  210. }
  211. struct mt76_fw_header {
  212. __le32 ilm_len;
  213. __le32 dlm_len;
  214. __le16 build_ver;
  215. __le16 fw_ver;
  216. u8 pad[4];
  217. char build_time[16];
  218. };
  219. struct mt76_fw {
  220. struct mt76_fw_header hdr;
  221. u8 ivb[MT_MCU_IVB_SIZE];
  222. u8 ilm[];
  223. };
  224. static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
  225. const struct mt7601u_dma_buf *dma_buf,
  226. const void *data, u32 len, u32 dst_addr)
  227. {
  228. DECLARE_COMPLETION_ONSTACK(cmpl);
  229. struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
  230. __le32 reg;
  231. u32 val;
  232. int ret;
  233. reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
  234. FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
  235. FIELD_PREP(MT_TXD_INFO_LEN, len));
  236. memcpy(buf.buf, &reg, sizeof(reg));
  237. memcpy(buf.buf + sizeof(reg), data, len);
  238. memset(buf.buf + sizeof(reg) + len, 0, 8);
  239. ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
  240. MT_FCE_DMA_ADDR, dst_addr);
  241. if (ret)
  242. return ret;
  243. len = roundup(len, 4);
  244. ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
  245. MT_FCE_DMA_LEN, len << 16);
  246. if (ret)
  247. return ret;
  248. buf.len = MT_DMA_HDR_LEN + len + 4;
  249. ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
  250. &buf, GFP_KERNEL,
  251. mt7601u_complete_urb, &cmpl);
  252. if (ret)
  253. return ret;
  254. if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
  255. dev_err(dev->dev, "Error: firmware upload timed out\n");
  256. usb_kill_urb(buf.urb);
  257. return -ETIMEDOUT;
  258. }
  259. if (mt7601u_urb_has_error(buf.urb)) {
  260. dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
  261. buf.urb->status);
  262. return buf.urb->status;
  263. }
  264. val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
  265. val++;
  266. mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
  267. return 0;
  268. }
  269. static int
  270. mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
  271. const void *data, int len, u32 dst_addr)
  272. {
  273. int n, ret;
  274. if (len == 0)
  275. return 0;
  276. n = min(MCU_FW_URB_MAX_PAYLOAD, len);
  277. ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
  278. if (ret)
  279. return ret;
  280. if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
  281. return -ETIMEDOUT;
  282. return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
  283. }
  284. static int
  285. mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
  286. {
  287. struct mt7601u_dma_buf dma_buf;
  288. void *ivb;
  289. u32 ilm_len, dlm_len;
  290. int i, ret;
  291. ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
  292. if (!ivb)
  293. return -ENOMEM;
  294. if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
  295. ret = -ENOMEM;
  296. goto error;
  297. }
  298. ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
  299. dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
  300. ilm_len, sizeof(fw->ivb));
  301. ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
  302. if (ret)
  303. goto error;
  304. dlm_len = le32_to_cpu(fw->hdr.dlm_len);
  305. dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
  306. ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
  307. dlm_len, MT_MCU_DLM_OFFSET);
  308. if (ret)
  309. goto error;
  310. ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
  311. 0x12, 0, ivb, sizeof(fw->ivb));
  312. if (ret < 0)
  313. goto error;
  314. ret = 0;
  315. for (i = 100; i && !firmware_running(dev); i--)
  316. msleep(10);
  317. if (!i) {
  318. ret = -ETIMEDOUT;
  319. goto error;
  320. }
  321. dev_dbg(dev->dev, "Firmware running!\n");
  322. error:
  323. kfree(ivb);
  324. mt7601u_usb_free_buf(dev, &dma_buf);
  325. return ret;
  326. }
  327. static int mt7601u_load_firmware(struct mt7601u_dev *dev)
  328. {
  329. const struct firmware *fw;
  330. const struct mt76_fw_header *hdr;
  331. int len, ret;
  332. u32 val;
  333. mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
  334. MT_USB_DMA_CFG_TX_BULK_EN));
  335. if (firmware_running(dev))
  336. return firmware_request_cache(dev->dev, MT7601U_FIRMWARE);
  337. ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
  338. if (ret)
  339. return ret;
  340. if (!fw || !fw->data || fw->size < sizeof(*hdr))
  341. goto err_inv_fw;
  342. hdr = (const struct mt76_fw_header *) fw->data;
  343. if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
  344. goto err_inv_fw;
  345. len = sizeof(*hdr);
  346. len += le32_to_cpu(hdr->ilm_len);
  347. len += le32_to_cpu(hdr->dlm_len);
  348. if (fw->size != len)
  349. goto err_inv_fw;
  350. val = le16_to_cpu(hdr->fw_ver);
  351. dev_info(dev->dev,
  352. "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
  353. (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
  354. le16_to_cpu(hdr->build_ver), hdr->build_time);
  355. len = le32_to_cpu(hdr->ilm_len);
  356. mt7601u_wr(dev, 0x94c, 0);
  357. mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
  358. mt7601u_vendor_reset(dev);
  359. msleep(5);
  360. mt7601u_wr(dev, 0xa44, 0);
  361. mt7601u_wr(dev, 0x230, 0x84210);
  362. mt7601u_wr(dev, 0x400, 0x80c00);
  363. mt7601u_wr(dev, 0x800, 1);
  364. mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
  365. MT_PBF_CFG_TX1Q_EN |
  366. MT_PBF_CFG_TX2Q_EN |
  367. MT_PBF_CFG_TX3Q_EN));
  368. mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
  369. mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
  370. MT_USB_DMA_CFG_TX_BULK_EN));
  371. val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
  372. val &= ~MT_USB_DMA_CFG_TX_CLR;
  373. mt7601u_wr(dev, MT_USB_DMA_CFG, val);
  374. /* FCE tx_fs_base_ptr */
  375. mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
  376. /* FCE tx_fs_max_cnt */
  377. mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
  378. /* FCE pdma enable */
  379. mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
  380. /* FCE skip_fs_en */
  381. mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
  382. ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
  383. release_firmware(fw);
  384. return ret;
  385. err_inv_fw:
  386. dev_err(dev->dev, "Invalid firmware image\n");
  387. release_firmware(fw);
  388. return -ENOENT;
  389. }
  390. int mt7601u_mcu_init(struct mt7601u_dev *dev)
  391. {
  392. int ret;
  393. mutex_init(&dev->mcu.mutex);
  394. ret = mt7601u_load_firmware(dev);
  395. if (ret)
  396. return ret;
  397. set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
  398. return 0;
  399. }
  400. int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
  401. {
  402. int ret;
  403. ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
  404. if (ret)
  405. return ret;
  406. init_completion(&dev->mcu.resp_cmpl);
  407. if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
  408. mt7601u_usb_free_buf(dev, &dev->mcu.resp);
  409. return -ENOMEM;
  410. }
  411. ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
  412. &dev->mcu.resp, GFP_KERNEL,
  413. mt7601u_complete_urb, &dev->mcu.resp_cmpl);
  414. if (ret) {
  415. mt7601u_usb_free_buf(dev, &dev->mcu.resp);
  416. return ret;
  417. }
  418. return 0;
  419. }
  420. void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
  421. {
  422. usb_kill_urb(dev->mcu.resp.urb);
  423. mt7601u_usb_free_buf(dev, &dev->mcu.resp);
  424. }