cvmx-cmd-queue.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. * Support functions for managing command queues used for
  29. * various hardware blocks.
  30. */
  31. #include <linux/kernel.h>
  32. #include <asm/octeon/octeon.h>
  33. #include <asm/octeon/cvmx-config.h>
  34. #include <asm/octeon/cvmx-fpa.h>
  35. #include <asm/octeon/cvmx-cmd-queue.h>
  36. #include <asm/octeon/cvmx-npei-defs.h>
  37. #include <asm/octeon/cvmx-pexp-defs.h>
  38. #include <asm/octeon/cvmx-pko-defs.h>
  39. /**
  40. * This application uses this pointer to access the global queue
  41. * state. It points to a bootmem named block.
  42. */
  43. __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
  44. EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
  45. /**
  46. * Initialize the Global queue state pointer.
  47. *
  48. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  49. */
  50. static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
  51. {
  52. char *alloc_name = "cvmx_cmd_queues";
  53. #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  54. extern uint64_t octeon_reserve32_memory;
  55. #endif
  56. if (likely(__cvmx_cmd_queue_state_ptr))
  57. return CVMX_CMD_QUEUE_SUCCESS;
  58. #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  59. if (octeon_reserve32_memory)
  60. __cvmx_cmd_queue_state_ptr =
  61. cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
  62. octeon_reserve32_memory,
  63. octeon_reserve32_memory +
  64. (CONFIG_CAVIUM_RESERVE32 <<
  65. 20) - 1, 128, alloc_name);
  66. else
  67. #endif
  68. __cvmx_cmd_queue_state_ptr =
  69. cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
  70. 128,
  71. alloc_name);
  72. if (__cvmx_cmd_queue_state_ptr)
  73. memset(__cvmx_cmd_queue_state_ptr, 0,
  74. sizeof(*__cvmx_cmd_queue_state_ptr));
  75. else {
  76. struct cvmx_bootmem_named_block_desc *block_desc =
  77. cvmx_bootmem_find_named_block(alloc_name);
  78. if (block_desc)
  79. __cvmx_cmd_queue_state_ptr =
  80. cvmx_phys_to_ptr(block_desc->base_addr);
  81. else {
  82. cvmx_dprintf
  83. ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
  84. alloc_name);
  85. return CVMX_CMD_QUEUE_NO_MEMORY;
  86. }
  87. }
  88. return CVMX_CMD_QUEUE_SUCCESS;
  89. }
  90. /**
  91. * Initialize a command queue for use. The initial FPA buffer is
  92. * allocated and the hardware unit is configured to point to the
  93. * new command queue.
  94. *
  95. * @queue_id: Hardware command queue to initialize.
  96. * @max_depth: Maximum outstanding commands that can be queued.
  97. * @fpa_pool: FPA pool the command queues should come from.
  98. * @pool_size: Size of each buffer in the FPA pool (bytes)
  99. *
  100. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  101. */
  102. cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
  103. int max_depth, int fpa_pool,
  104. int pool_size)
  105. {
  106. __cvmx_cmd_queue_state_t *qstate;
  107. cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
  108. if (result != CVMX_CMD_QUEUE_SUCCESS)
  109. return result;
  110. qstate = __cvmx_cmd_queue_get_state(queue_id);
  111. if (qstate == NULL)
  112. return CVMX_CMD_QUEUE_INVALID_PARAM;
  113. /*
  114. * We artificially limit max_depth to 1<<20 words. It is an
  115. * arbitrary limit.
  116. */
  117. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
  118. if ((max_depth < 0) || (max_depth > 1 << 20))
  119. return CVMX_CMD_QUEUE_INVALID_PARAM;
  120. } else if (max_depth != 0)
  121. return CVMX_CMD_QUEUE_INVALID_PARAM;
  122. if ((fpa_pool < 0) || (fpa_pool > 7))
  123. return CVMX_CMD_QUEUE_INVALID_PARAM;
  124. if ((pool_size < 128) || (pool_size > 65536))
  125. return CVMX_CMD_QUEUE_INVALID_PARAM;
  126. /* See if someone else has already initialized the queue */
  127. if (qstate->base_ptr_div128) {
  128. if (max_depth != (int)qstate->max_depth) {
  129. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  130. "Queue already initialized with different "
  131. "max_depth (%d).\n",
  132. (int)qstate->max_depth);
  133. return CVMX_CMD_QUEUE_INVALID_PARAM;
  134. }
  135. if (fpa_pool != qstate->fpa_pool) {
  136. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  137. "Queue already initialized with different "
  138. "FPA pool (%u).\n",
  139. qstate->fpa_pool);
  140. return CVMX_CMD_QUEUE_INVALID_PARAM;
  141. }
  142. if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
  143. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  144. "Queue already initialized with different "
  145. "FPA pool size (%u).\n",
  146. (qstate->pool_size_m1 + 1) << 3);
  147. return CVMX_CMD_QUEUE_INVALID_PARAM;
  148. }
  149. CVMX_SYNCWS;
  150. return CVMX_CMD_QUEUE_ALREADY_SETUP;
  151. } else {
  152. union cvmx_fpa_ctl_status status;
  153. void *buffer;
  154. status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
  155. if (!status.s.enb) {
  156. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  157. "FPA is not enabled.\n");
  158. return CVMX_CMD_QUEUE_NO_MEMORY;
  159. }
  160. buffer = cvmx_fpa_alloc(fpa_pool);
  161. if (buffer == NULL) {
  162. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  163. "Unable to allocate initial buffer.\n");
  164. return CVMX_CMD_QUEUE_NO_MEMORY;
  165. }
  166. memset(qstate, 0, sizeof(*qstate));
  167. qstate->max_depth = max_depth;
  168. qstate->fpa_pool = fpa_pool;
  169. qstate->pool_size_m1 = (pool_size >> 3) - 1;
  170. qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
  171. /*
  172. * We zeroed the now serving field so we need to also
  173. * zero the ticket.
  174. */
  175. __cvmx_cmd_queue_state_ptr->
  176. ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
  177. CVMX_SYNCWS;
  178. return CVMX_CMD_QUEUE_SUCCESS;
  179. }
  180. }
  181. /**
  182. * Shutdown a queue a free it's command buffers to the FPA. The
  183. * hardware connected to the queue must be stopped before this
  184. * function is called.
  185. *
  186. * @queue_id: Queue to shutdown
  187. *
  188. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  189. */
  190. cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
  191. {
  192. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  193. if (qptr == NULL) {
  194. cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
  195. "get queue information.\n");
  196. return CVMX_CMD_QUEUE_INVALID_PARAM;
  197. }
  198. if (cvmx_cmd_queue_length(queue_id) > 0) {
  199. cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
  200. "has data in it.\n");
  201. return CVMX_CMD_QUEUE_FULL;
  202. }
  203. __cvmx_cmd_queue_lock(queue_id, qptr);
  204. if (qptr->base_ptr_div128) {
  205. cvmx_fpa_free(cvmx_phys_to_ptr
  206. ((uint64_t) qptr->base_ptr_div128 << 7),
  207. qptr->fpa_pool, 0);
  208. qptr->base_ptr_div128 = 0;
  209. }
  210. __cvmx_cmd_queue_unlock(qptr);
  211. return CVMX_CMD_QUEUE_SUCCESS;
  212. }
  213. /**
  214. * Return the number of command words pending in the queue. This
  215. * function may be relatively slow for some hardware units.
  216. *
  217. * @queue_id: Hardware command queue to query
  218. *
  219. * Returns Number of outstanding commands
  220. */
  221. int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
  222. {
  223. if (CVMX_ENABLE_PARAMETER_CHECKING) {
  224. if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
  225. return CVMX_CMD_QUEUE_INVALID_PARAM;
  226. }
  227. /*
  228. * The cast is here so gcc with check that all values in the
  229. * cvmx_cmd_queue_id_t enumeration are here.
  230. */
  231. switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
  232. case CVMX_CMD_QUEUE_PKO_BASE:
  233. /*
  234. * FIXME: Need atomic lock on
  235. * CVMX_PKO_REG_READ_IDX. Right now we are normally
  236. * called with the queue lock, so that is a SLIGHT
  237. * amount of protection.
  238. */
  239. cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
  240. if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
  241. union cvmx_pko_mem_debug9 debug9;
  242. debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
  243. return debug9.cn38xx.doorbell;
  244. } else {
  245. union cvmx_pko_mem_debug8 debug8;
  246. debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
  247. return debug8.cn58xx.doorbell;
  248. }
  249. case CVMX_CMD_QUEUE_ZIP:
  250. case CVMX_CMD_QUEUE_DFA:
  251. case CVMX_CMD_QUEUE_RAID:
  252. /* FIXME: Implement other lengths */
  253. return 0;
  254. case CVMX_CMD_QUEUE_DMA_BASE:
  255. {
  256. union cvmx_npei_dmax_counts dmax_counts;
  257. dmax_counts.u64 =
  258. cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
  259. (queue_id & 0x7));
  260. return dmax_counts.s.dbell;
  261. }
  262. case CVMX_CMD_QUEUE_END:
  263. return CVMX_CMD_QUEUE_INVALID_PARAM;
  264. }
  265. return CVMX_CMD_QUEUE_INVALID_PARAM;
  266. }
  267. /**
  268. * Return the command buffer to be written to. The purpose of this
  269. * function is to allow CVMX routine access t othe low level buffer
  270. * for initial hardware setup. User applications should not call this
  271. * function directly.
  272. *
  273. * @queue_id: Command queue to query
  274. *
  275. * Returns Command buffer or NULL on failure
  276. */
  277. void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
  278. {
  279. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  280. if (qptr && qptr->base_ptr_div128)
  281. return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
  282. else
  283. return NULL;
  284. }