shm_ipc.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * Copyright (c) 2015-2016 Quantenna Communications, Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/types.h>
  17. #include <linux/io.h>
  18. #include "shm_ipc.h"
  19. #undef pr_fmt
  20. #define pr_fmt(fmt) "qtnfmac shm_ipc: %s: " fmt, __func__
  21. static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
  22. {
  23. const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
  24. return (flags & QTNF_SHM_IPC_NEW_DATA);
  25. }
  26. static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
  27. {
  28. size_t size;
  29. bool rx_buff_ok = true;
  30. struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
  31. shm_reg_hdr = &ipc->shm_region->headroom.hdr;
  32. size = readw(&shm_reg_hdr->data_len);
  33. if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
  34. pr_err("wrong rx packet size: %zu\n", size);
  35. rx_buff_ok = false;
  36. } else {
  37. memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
  38. }
  39. writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
  40. readl(&shm_reg_hdr->flags); /* flush PCIe write */
  41. ipc->interrupt.fn(ipc->interrupt.arg);
  42. if (likely(rx_buff_ok)) {
  43. ipc->rx_packet_count++;
  44. ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
  45. }
  46. }
  47. static void qtnf_shm_ipc_irq_work(struct work_struct *work)
  48. {
  49. struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
  50. irq_work);
  51. while (qtnf_shm_ipc_has_new_data(ipc))
  52. qtnf_shm_handle_new_data(ipc);
  53. }
  54. static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
  55. {
  56. u32 flags;
  57. flags = readl(&ipc->shm_region->headroom.hdr.flags);
  58. if (flags & QTNF_SHM_IPC_NEW_DATA)
  59. queue_work(ipc->workqueue, &ipc->irq_work);
  60. }
  61. static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
  62. {
  63. u32 flags;
  64. if (!READ_ONCE(ipc->waiting_for_ack))
  65. return;
  66. flags = readl(&ipc->shm_region->headroom.hdr.flags);
  67. if (flags & QTNF_SHM_IPC_ACK) {
  68. WRITE_ONCE(ipc->waiting_for_ack, 0);
  69. complete(&ipc->tx_completion);
  70. }
  71. }
  72. int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
  73. enum qtnf_shm_ipc_direction direction,
  74. struct qtnf_shm_ipc_region __iomem *shm_region,
  75. struct workqueue_struct *workqueue,
  76. const struct qtnf_shm_ipc_int *interrupt,
  77. const struct qtnf_shm_ipc_rx_callback *rx_callback)
  78. {
  79. BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
  80. QTN_IPC_REG_HDR_SZ);
  81. BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
  82. ipc->shm_region = shm_region;
  83. ipc->direction = direction;
  84. ipc->interrupt = *interrupt;
  85. ipc->rx_callback = *rx_callback;
  86. ipc->tx_packet_count = 0;
  87. ipc->rx_packet_count = 0;
  88. ipc->workqueue = workqueue;
  89. ipc->waiting_for_ack = 0;
  90. ipc->tx_timeout_count = 0;
  91. switch (direction) {
  92. case QTNF_SHM_IPC_OUTBOUND:
  93. ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
  94. break;
  95. case QTNF_SHM_IPC_INBOUND:
  96. ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
  97. break;
  98. default:
  99. return -EINVAL;
  100. }
  101. INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
  102. init_completion(&ipc->tx_completion);
  103. return 0;
  104. }
  105. void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
  106. {
  107. complete_all(&ipc->tx_completion);
  108. }
  109. int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
  110. {
  111. int ret = 0;
  112. struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
  113. shm_reg_hdr = &ipc->shm_region->headroom.hdr;
  114. if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
  115. return -E2BIG;
  116. ipc->tx_packet_count++;
  117. writew(size, &shm_reg_hdr->data_len);
  118. memcpy_toio(ipc->shm_region->data, buf, size);
  119. /* sync previous writes before proceeding */
  120. dma_wmb();
  121. WRITE_ONCE(ipc->waiting_for_ack, 1);
  122. /* sync previous memory write before announcing new data ready */
  123. wmb();
  124. writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
  125. readl(&shm_reg_hdr->flags); /* flush PCIe write */
  126. ipc->interrupt.fn(ipc->interrupt.arg);
  127. if (!wait_for_completion_timeout(&ipc->tx_completion,
  128. QTN_SHM_IPC_ACK_TIMEOUT)) {
  129. ret = -ETIMEDOUT;
  130. ipc->tx_timeout_count++;
  131. pr_err("TX ACK timeout\n");
  132. }
  133. /* now we're not waiting for ACK even in case of timeout */
  134. WRITE_ONCE(ipc->waiting_for_ack, 0);
  135. return ret;
  136. }