wkup_m3_ipc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMx3 Wkup M3 IPC driver
  4. *
  5. * Copyright (C) 2015 Texas Instruments, Inc.
  6. *
  7. * Dave Gerlach <d-gerlach@ti.com>
  8. */
  9. #include <linux/err.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kthread.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/omap-mailbox.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/remoteproc.h>
  19. #include <linux/suspend.h>
  20. #include <linux/wkup_m3_ipc.h>
  21. #define AM33XX_CTRL_IPC_REG_COUNT 0x8
  22. #define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
  23. /* AM33XX M3_TXEV_EOI register */
  24. #define AM33XX_CONTROL_M3_TXEV_EOI 0x00
  25. #define AM33XX_M3_TXEV_ACK (0x1 << 0)
  26. #define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
  27. #define IPC_CMD_DS0 0x4
  28. #define IPC_CMD_STANDBY 0xc
  29. #define IPC_CMD_IDLE 0x10
  30. #define IPC_CMD_RESET 0xe
  31. #define DS_IPC_DEFAULT 0xffffffff
  32. #define M3_VERSION_UNKNOWN 0x0000ffff
  33. #define M3_BASELINE_VERSION 0x191
  34. #define M3_STATUS_RESP_MASK (0xffff << 16)
  35. #define M3_FW_VERSION_MASK 0xffff
  36. #define M3_WAKE_SRC_MASK 0xff
  37. #define M3_STATE_UNKNOWN 0
  38. #define M3_STATE_RESET 1
  39. #define M3_STATE_INITED 2
  40. #define M3_STATE_MSG_FOR_LP 3
  41. #define M3_STATE_MSG_FOR_RESET 4
  42. static struct wkup_m3_ipc *m3_ipc_state;
  43. static const struct wkup_m3_wakeup_src wakeups[] = {
  44. {.irq_nr = 16, .src = "PRCM"},
  45. {.irq_nr = 35, .src = "USB0_PHY"},
  46. {.irq_nr = 36, .src = "USB1_PHY"},
  47. {.irq_nr = 40, .src = "I2C0"},
  48. {.irq_nr = 41, .src = "RTC Timer"},
  49. {.irq_nr = 42, .src = "RTC Alarm"},
  50. {.irq_nr = 43, .src = "Timer0"},
  51. {.irq_nr = 44, .src = "Timer1"},
  52. {.irq_nr = 45, .src = "UART"},
  53. {.irq_nr = 46, .src = "GPIO0"},
  54. {.irq_nr = 48, .src = "MPU_WAKE"},
  55. {.irq_nr = 49, .src = "WDT0"},
  56. {.irq_nr = 50, .src = "WDT1"},
  57. {.irq_nr = 51, .src = "ADC_TSC"},
  58. {.irq_nr = 0, .src = "Unknown"},
  59. };
  60. static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
  61. {
  62. writel(AM33XX_M3_TXEV_ACK,
  63. m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
  64. }
  65. static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
  66. {
  67. writel(AM33XX_M3_TXEV_ENABLE,
  68. m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
  69. }
  70. static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
  71. u32 val, int ipc_reg_num)
  72. {
  73. if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
  74. "ipc register operation out of range"))
  75. return;
  76. writel(val, m3_ipc->ipc_mem_base +
  77. AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
  78. }
  79. static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
  80. int ipc_reg_num)
  81. {
  82. if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
  83. "ipc register operation out of range"))
  84. return 0;
  85. return readl(m3_ipc->ipc_mem_base +
  86. AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
  87. }
  88. static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
  89. {
  90. int val;
  91. val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
  92. return val & M3_FW_VERSION_MASK;
  93. }
  94. static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
  95. {
  96. struct wkup_m3_ipc *m3_ipc = ipc_data;
  97. struct device *dev = m3_ipc->dev;
  98. int ver = 0;
  99. am33xx_txev_eoi(m3_ipc);
  100. switch (m3_ipc->state) {
  101. case M3_STATE_RESET:
  102. ver = wkup_m3_fw_version_read(m3_ipc);
  103. if (ver == M3_VERSION_UNKNOWN ||
  104. ver < M3_BASELINE_VERSION) {
  105. dev_warn(dev, "CM3 Firmware Version %x not supported\n",
  106. ver);
  107. } else {
  108. dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
  109. }
  110. m3_ipc->state = M3_STATE_INITED;
  111. complete(&m3_ipc->sync_complete);
  112. break;
  113. case M3_STATE_MSG_FOR_RESET:
  114. m3_ipc->state = M3_STATE_INITED;
  115. complete(&m3_ipc->sync_complete);
  116. break;
  117. case M3_STATE_MSG_FOR_LP:
  118. complete(&m3_ipc->sync_complete);
  119. break;
  120. case M3_STATE_UNKNOWN:
  121. dev_warn(dev, "Unknown CM3 State\n");
  122. }
  123. am33xx_txev_enable(m3_ipc);
  124. return IRQ_HANDLED;
  125. }
  126. static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
  127. {
  128. struct device *dev = m3_ipc->dev;
  129. mbox_msg_t dummy_msg = 0;
  130. int ret;
  131. if (!m3_ipc->mbox) {
  132. dev_err(dev,
  133. "No IPC channel to communicate with wkup_m3!\n");
  134. return -EIO;
  135. }
  136. /*
  137. * Write a dummy message to the mailbox in order to trigger the RX
  138. * interrupt to alert the M3 that data is available in the IPC
  139. * registers. We must enable the IRQ here and disable it after in
  140. * the RX callback to avoid multiple interrupts being received
  141. * by the CM3.
  142. */
  143. ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
  144. if (ret < 0) {
  145. dev_err(dev, "%s: mbox_send_message() failed: %d\n",
  146. __func__, ret);
  147. return ret;
  148. }
  149. ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
  150. msecs_to_jiffies(500));
  151. if (!ret) {
  152. dev_err(dev, "MPU<->CM3 sync failure\n");
  153. m3_ipc->state = M3_STATE_UNKNOWN;
  154. return -EIO;
  155. }
  156. mbox_client_txdone(m3_ipc->mbox, 0);
  157. return 0;
  158. }
  159. static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
  160. {
  161. struct device *dev = m3_ipc->dev;
  162. mbox_msg_t dummy_msg = 0;
  163. int ret;
  164. if (!m3_ipc->mbox) {
  165. dev_err(dev,
  166. "No IPC channel to communicate with wkup_m3!\n");
  167. return -EIO;
  168. }
  169. ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
  170. if (ret < 0) {
  171. dev_err(dev, "%s: mbox_send_message() failed: %d\n",
  172. __func__, ret);
  173. return ret;
  174. }
  175. mbox_client_txdone(m3_ipc->mbox, 0);
  176. return 0;
  177. }
  178. static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
  179. {
  180. return ((m3_ipc->state != M3_STATE_RESET) &&
  181. (m3_ipc->state != M3_STATE_UNKNOWN));
  182. }
  183. /* Public functions */
  184. /**
  185. * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
  186. * @mem_type: memory type value read directly from emif
  187. *
  188. * wkup_m3 must know what memory type is in use to properly suspend
  189. * and resume.
  190. */
  191. static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
  192. {
  193. m3_ipc->mem_type = mem_type;
  194. }
  195. /**
  196. * wkup_m3_set_resume_address - Pass wkup_m3 resume address
  197. * @addr: Physical address from which resume code should execute
  198. */
  199. static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
  200. {
  201. m3_ipc->resume_addr = (unsigned long)addr;
  202. }
  203. /**
  204. * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
  205. *
  206. * Returns code representing the status of a low power mode transition.
  207. * 0 - Successful transition
  208. * 1 - Failure to transition to low power state
  209. */
  210. static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
  211. {
  212. unsigned int i;
  213. int val;
  214. val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
  215. i = M3_STATUS_RESP_MASK & val;
  216. i >>= __ffs(M3_STATUS_RESP_MASK);
  217. return i;
  218. }
  219. /**
  220. * wkup_m3_prepare_low_power - Request preparation for transition to
  221. * low power state
  222. * @state: A kernel suspend state to enter, either MEM or STANDBY
  223. *
  224. * Returns 0 if preparation was successful, otherwise returns error code
  225. */
  226. static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
  227. {
  228. struct device *dev = m3_ipc->dev;
  229. int m3_power_state;
  230. int ret = 0;
  231. if (!wkup_m3_is_available(m3_ipc))
  232. return -ENODEV;
  233. switch (state) {
  234. case WKUP_M3_DEEPSLEEP:
  235. m3_power_state = IPC_CMD_DS0;
  236. break;
  237. case WKUP_M3_STANDBY:
  238. m3_power_state = IPC_CMD_STANDBY;
  239. break;
  240. case WKUP_M3_IDLE:
  241. m3_power_state = IPC_CMD_IDLE;
  242. break;
  243. default:
  244. return 1;
  245. }
  246. /* Program each required IPC register then write defaults to others */
  247. wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
  248. wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
  249. wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
  250. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
  251. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
  252. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
  253. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
  254. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
  255. m3_ipc->state = M3_STATE_MSG_FOR_LP;
  256. if (state == WKUP_M3_IDLE)
  257. ret = wkup_m3_ping_noirq(m3_ipc);
  258. else
  259. ret = wkup_m3_ping(m3_ipc);
  260. if (ret) {
  261. dev_err(dev, "Unable to ping CM3\n");
  262. return ret;
  263. }
  264. return 0;
  265. }
  266. /**
  267. * wkup_m3_finish_low_power - Return m3 to reset state
  268. *
  269. * Returns 0 if reset was successful, otherwise returns error code
  270. */
  271. static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
  272. {
  273. struct device *dev = m3_ipc->dev;
  274. int ret = 0;
  275. if (!wkup_m3_is_available(m3_ipc))
  276. return -ENODEV;
  277. wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
  278. wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
  279. m3_ipc->state = M3_STATE_MSG_FOR_RESET;
  280. ret = wkup_m3_ping(m3_ipc);
  281. if (ret) {
  282. dev_err(dev, "Unable to ping CM3\n");
  283. return ret;
  284. }
  285. return 0;
  286. }
  287. /**
  288. * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
  289. * @m3_ipc: Pointer to wkup_m3_ipc context
  290. */
  291. static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
  292. {
  293. unsigned int wakeup_src_idx;
  294. int j, val;
  295. val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
  296. wakeup_src_idx = val & M3_WAKE_SRC_MASK;
  297. for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
  298. if (wakeups[j].irq_nr == wakeup_src_idx)
  299. return wakeups[j].src;
  300. }
  301. return wakeups[j].src;
  302. }
  303. /**
  304. * wkup_m3_set_rtc_only - Set the rtc_only flag
  305. * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
  306. * wakeup src value
  307. */
  308. static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
  309. {
  310. if (m3_ipc_state)
  311. m3_ipc_state->is_rtc_only = true;
  312. }
  313. static struct wkup_m3_ipc_ops ipc_ops = {
  314. .set_mem_type = wkup_m3_set_mem_type,
  315. .set_resume_address = wkup_m3_set_resume_address,
  316. .prepare_low_power = wkup_m3_prepare_low_power,
  317. .finish_low_power = wkup_m3_finish_low_power,
  318. .request_pm_status = wkup_m3_request_pm_status,
  319. .request_wake_src = wkup_m3_request_wake_src,
  320. .set_rtc_only = wkup_m3_set_rtc_only,
  321. };
  322. /**
  323. * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
  324. *
  325. * Returns NULL if the wkup_m3 is not yet available, otherwise returns
  326. * pointer to wkup_m3_ipc struct.
  327. */
  328. struct wkup_m3_ipc *wkup_m3_ipc_get(void)
  329. {
  330. if (m3_ipc_state)
  331. get_device(m3_ipc_state->dev);
  332. else
  333. return NULL;
  334. return m3_ipc_state;
  335. }
  336. EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
  337. /**
  338. * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
  339. * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
  340. */
  341. void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
  342. {
  343. if (m3_ipc_state)
  344. put_device(m3_ipc_state->dev);
  345. }
  346. EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
  347. static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
  348. {
  349. struct device *dev = m3_ipc->dev;
  350. int ret;
  351. init_completion(&m3_ipc->sync_complete);
  352. ret = rproc_boot(m3_ipc->rproc);
  353. if (ret)
  354. dev_err(dev, "rproc_boot failed\n");
  355. else
  356. m3_ipc_state = m3_ipc;
  357. do_exit(0);
  358. }
  359. static int wkup_m3_ipc_probe(struct platform_device *pdev)
  360. {
  361. struct device *dev = &pdev->dev;
  362. int irq, ret;
  363. phandle rproc_phandle;
  364. struct rproc *m3_rproc;
  365. struct resource *res;
  366. struct task_struct *task;
  367. struct wkup_m3_ipc *m3_ipc;
  368. m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
  369. if (!m3_ipc)
  370. return -ENOMEM;
  371. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  372. m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
  373. if (IS_ERR(m3_ipc->ipc_mem_base)) {
  374. dev_err(dev, "could not ioremap ipc_mem\n");
  375. return PTR_ERR(m3_ipc->ipc_mem_base);
  376. }
  377. irq = platform_get_irq(pdev, 0);
  378. if (!irq) {
  379. dev_err(&pdev->dev, "no irq resource\n");
  380. return -ENXIO;
  381. }
  382. ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
  383. 0, "wkup_m3_txev", m3_ipc);
  384. if (ret) {
  385. dev_err(dev, "request_irq failed\n");
  386. return ret;
  387. }
  388. m3_ipc->mbox_client.dev = dev;
  389. m3_ipc->mbox_client.tx_done = NULL;
  390. m3_ipc->mbox_client.tx_prepare = NULL;
  391. m3_ipc->mbox_client.rx_callback = NULL;
  392. m3_ipc->mbox_client.tx_block = false;
  393. m3_ipc->mbox_client.knows_txdone = false;
  394. m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
  395. if (IS_ERR(m3_ipc->mbox)) {
  396. dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
  397. PTR_ERR(m3_ipc->mbox));
  398. return PTR_ERR(m3_ipc->mbox);
  399. }
  400. if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
  401. dev_err(&pdev->dev, "could not get rproc phandle\n");
  402. ret = -ENODEV;
  403. goto err_free_mbox;
  404. }
  405. m3_rproc = rproc_get_by_phandle(rproc_phandle);
  406. if (!m3_rproc) {
  407. dev_err(&pdev->dev, "could not get rproc handle\n");
  408. ret = -EPROBE_DEFER;
  409. goto err_free_mbox;
  410. }
  411. m3_ipc->rproc = m3_rproc;
  412. m3_ipc->dev = dev;
  413. m3_ipc->state = M3_STATE_RESET;
  414. m3_ipc->ops = &ipc_ops;
  415. /*
  416. * Wait for firmware loading completion in a thread so we
  417. * can boot the wkup_m3 as soon as it's ready without holding
  418. * up kernel boot
  419. */
  420. task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc,
  421. "wkup_m3_rproc_loader");
  422. if (IS_ERR(task)) {
  423. dev_err(dev, "can't create rproc_boot thread\n");
  424. ret = PTR_ERR(task);
  425. goto err_put_rproc;
  426. }
  427. return 0;
  428. err_put_rproc:
  429. rproc_put(m3_rproc);
  430. err_free_mbox:
  431. mbox_free_channel(m3_ipc->mbox);
  432. return ret;
  433. }
  434. static int wkup_m3_ipc_remove(struct platform_device *pdev)
  435. {
  436. mbox_free_channel(m3_ipc_state->mbox);
  437. rproc_shutdown(m3_ipc_state->rproc);
  438. rproc_put(m3_ipc_state->rproc);
  439. m3_ipc_state = NULL;
  440. return 0;
  441. }
  442. static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
  443. {
  444. /*
  445. * Nothing needs to be done on suspend even with rtc_only flag set
  446. */
  447. return 0;
  448. }
  449. static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
  450. {
  451. if (m3_ipc_state->is_rtc_only) {
  452. rproc_shutdown(m3_ipc_state->rproc);
  453. rproc_boot(m3_ipc_state->rproc);
  454. }
  455. m3_ipc_state->is_rtc_only = false;
  456. return 0;
  457. }
  458. static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
  459. SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
  460. };
  461. static const struct of_device_id wkup_m3_ipc_of_match[] = {
  462. { .compatible = "ti,am3352-wkup-m3-ipc", },
  463. { .compatible = "ti,am4372-wkup-m3-ipc", },
  464. {},
  465. };
  466. MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
  467. static struct platform_driver wkup_m3_ipc_driver = {
  468. .probe = wkup_m3_ipc_probe,
  469. .remove = wkup_m3_ipc_remove,
  470. .driver = {
  471. .name = "wkup_m3_ipc",
  472. .of_match_table = wkup_m3_ipc_of_match,
  473. .pm = &wkup_m3_ipc_pm_ops,
  474. },
  475. };
  476. module_platform_driver(wkup_m3_ipc_driver);
  477. MODULE_LICENSE("GPL v2");
  478. MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
  479. MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");