mtk-cmdq-helper.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (c) 2018 MediaTek Inc.
  4. #include <linux/completion.h>
  5. #include <linux/errno.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/module.h>
  8. #include <linux/mailbox_controller.h>
  9. #include <linux/soc/mediatek/mtk-cmdq.h>
  10. #define CMDQ_ARG_A_WRITE_MASK 0xffff
  11. #define CMDQ_WRITE_ENABLE_MASK BIT(0)
  12. #define CMDQ_EOC_IRQ_EN BIT(0)
  13. #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
  14. << 32 | CMDQ_EOC_IRQ_EN)
  15. static void cmdq_client_timeout(struct timer_list *t)
  16. {
  17. struct cmdq_client *client = from_timer(client, t, timer);
  18. dev_err(client->client.dev, "cmdq timeout!\n");
  19. }
  20. struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
  21. {
  22. struct cmdq_client *client;
  23. client = kzalloc(sizeof(*client), GFP_KERNEL);
  24. if (!client)
  25. return (struct cmdq_client *)-ENOMEM;
  26. client->timeout_ms = timeout;
  27. if (timeout != CMDQ_NO_TIMEOUT) {
  28. spin_lock_init(&client->lock);
  29. timer_setup(&client->timer, cmdq_client_timeout, 0);
  30. }
  31. client->pkt_cnt = 0;
  32. client->client.dev = dev;
  33. client->client.tx_block = false;
  34. client->client.knows_txdone = true;
  35. client->chan = mbox_request_channel(&client->client, index);
  36. if (IS_ERR(client->chan)) {
  37. long err;
  38. dev_err(dev, "failed to request channel\n");
  39. err = PTR_ERR(client->chan);
  40. kfree(client);
  41. return ERR_PTR(err);
  42. }
  43. return client;
  44. }
  45. EXPORT_SYMBOL(cmdq_mbox_create);
  46. void cmdq_mbox_destroy(struct cmdq_client *client)
  47. {
  48. if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
  49. spin_lock(&client->lock);
  50. del_timer_sync(&client->timer);
  51. spin_unlock(&client->lock);
  52. }
  53. mbox_free_channel(client->chan);
  54. kfree(client);
  55. }
  56. EXPORT_SYMBOL(cmdq_mbox_destroy);
  57. struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
  58. {
  59. struct cmdq_pkt *pkt;
  60. struct device *dev;
  61. dma_addr_t dma_addr;
  62. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  63. if (!pkt)
  64. return ERR_PTR(-ENOMEM);
  65. pkt->va_base = kzalloc(size, GFP_KERNEL);
  66. if (!pkt->va_base) {
  67. kfree(pkt);
  68. return ERR_PTR(-ENOMEM);
  69. }
  70. pkt->buf_size = size;
  71. pkt->cl = (void *)client;
  72. dev = client->chan->mbox->dev;
  73. dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
  74. DMA_TO_DEVICE);
  75. if (dma_mapping_error(dev, dma_addr)) {
  76. dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
  77. kfree(pkt->va_base);
  78. kfree(pkt);
  79. return ERR_PTR(-ENOMEM);
  80. }
  81. pkt->pa_base = dma_addr;
  82. return pkt;
  83. }
  84. EXPORT_SYMBOL(cmdq_pkt_create);
  85. void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
  86. {
  87. struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
  88. dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
  89. DMA_TO_DEVICE);
  90. kfree(pkt->va_base);
  91. kfree(pkt);
  92. }
  93. EXPORT_SYMBOL(cmdq_pkt_destroy);
  94. static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
  95. u32 arg_a, u32 arg_b)
  96. {
  97. u64 *cmd_ptr;
  98. if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
  99. /*
  100. * In the case of allocated buffer size (pkt->buf_size) is used
  101. * up, the real required size (pkt->cmdq_buf_size) is still
  102. * increased, so that the user knows how much memory should be
  103. * ultimately allocated after appending all commands and
  104. * flushing the command packet. Therefor, the user can call
  105. * cmdq_pkt_create() again with the real required buffer size.
  106. */
  107. pkt->cmd_buf_size += CMDQ_INST_SIZE;
  108. WARN_ONCE(1, "%s: buffer size %u is too small !\n",
  109. __func__, (u32)pkt->buf_size);
  110. return -ENOMEM;
  111. }
  112. cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
  113. (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
  114. pkt->cmd_buf_size += CMDQ_INST_SIZE;
  115. return 0;
  116. }
  117. int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
  118. {
  119. u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
  120. (subsys << CMDQ_SUBSYS_SHIFT);
  121. return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
  122. }
  123. EXPORT_SYMBOL(cmdq_pkt_write);
  124. int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
  125. u16 offset, u32 value, u32 mask)
  126. {
  127. u32 offset_mask = offset;
  128. int err = 0;
  129. if (mask != 0xffffffff) {
  130. err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
  131. offset_mask |= CMDQ_WRITE_ENABLE_MASK;
  132. }
  133. err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
  134. return err;
  135. }
  136. EXPORT_SYMBOL(cmdq_pkt_write_mask);
  137. int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
  138. {
  139. u32 arg_b;
  140. if (event >= CMDQ_MAX_EVENT)
  141. return -EINVAL;
  142. /*
  143. * WFE arg_b
  144. * bit 0-11: wait value
  145. * bit 15: 1 - wait, 0 - no wait
  146. * bit 16-27: update value
  147. * bit 31: 1 - update, 0 - no update
  148. */
  149. arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
  150. return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
  151. }
  152. EXPORT_SYMBOL(cmdq_pkt_wfe);
  153. int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
  154. {
  155. if (event >= CMDQ_MAX_EVENT)
  156. return -EINVAL;
  157. return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
  158. CMDQ_WFE_UPDATE);
  159. }
  160. EXPORT_SYMBOL(cmdq_pkt_clear_event);
  161. static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
  162. {
  163. int err;
  164. /* insert EOC and generate IRQ for each command iteration */
  165. err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
  166. /* JUMP to end */
  167. err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
  168. return err;
  169. }
  170. static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
  171. {
  172. struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
  173. struct cmdq_task_cb *cb = &pkt->cb;
  174. struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
  175. if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
  176. unsigned long flags = 0;
  177. spin_lock_irqsave(&client->lock, flags);
  178. if (--client->pkt_cnt == 0)
  179. del_timer(&client->timer);
  180. else
  181. mod_timer(&client->timer, jiffies +
  182. msecs_to_jiffies(client->timeout_ms));
  183. spin_unlock_irqrestore(&client->lock, flags);
  184. }
  185. dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
  186. pkt->cmd_buf_size, DMA_TO_DEVICE);
  187. if (cb->cb) {
  188. data.data = cb->data;
  189. cb->cb(data);
  190. }
  191. }
  192. int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
  193. void *data)
  194. {
  195. int err;
  196. unsigned long flags = 0;
  197. struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
  198. err = cmdq_pkt_finalize(pkt);
  199. if (err < 0)
  200. return err;
  201. pkt->cb.cb = cb;
  202. pkt->cb.data = data;
  203. pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
  204. pkt->async_cb.data = pkt;
  205. dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
  206. pkt->cmd_buf_size, DMA_TO_DEVICE);
  207. if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
  208. spin_lock_irqsave(&client->lock, flags);
  209. if (client->pkt_cnt++ == 0)
  210. mod_timer(&client->timer, jiffies +
  211. msecs_to_jiffies(client->timeout_ms));
  212. spin_unlock_irqrestore(&client->lock, flags);
  213. }
  214. err = mbox_send_message(client->chan, pkt);
  215. if (err < 0)
  216. return err;
  217. /* We can send next packet immediately, so just call txdone. */
  218. mbox_client_txdone(client->chan, 0);
  219. return 0;
  220. }
  221. EXPORT_SYMBOL(cmdq_pkt_flush_async);
  222. struct cmdq_flush_completion {
  223. struct completion cmplt;
  224. bool err;
  225. };
  226. static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
  227. {
  228. struct cmdq_flush_completion *cmplt;
  229. cmplt = (struct cmdq_flush_completion *)data.data;
  230. if (data.sta != CMDQ_CB_NORMAL)
  231. cmplt->err = true;
  232. else
  233. cmplt->err = false;
  234. complete(&cmplt->cmplt);
  235. }
  236. int cmdq_pkt_flush(struct cmdq_pkt *pkt)
  237. {
  238. struct cmdq_flush_completion cmplt;
  239. int err;
  240. init_completion(&cmplt.cmplt);
  241. err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
  242. if (err < 0)
  243. return err;
  244. wait_for_completion(&cmplt.cmplt);
  245. return cmplt.err ? -EFAULT : 0;
  246. }
  247. EXPORT_SYMBOL(cmdq_pkt_flush);
  248. MODULE_LICENSE("GPL v2");