cmd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/slab.h>
  36. #include <linux/pci.h>
  37. #include <linux/errno.h>
  38. #include <linux/mlx4/cmd.h>
  39. #include <asm/io.h>
  40. #include "mlx4.h"
  41. #define CMD_POLL_TOKEN 0xffff
  42. enum {
  43. /* command completed successfully: */
  44. CMD_STAT_OK = 0x00,
  45. /* Internal error (such as a bus error) occurred while processing command: */
  46. CMD_STAT_INTERNAL_ERR = 0x01,
  47. /* Operation/command not supported or opcode modifier not supported: */
  48. CMD_STAT_BAD_OP = 0x02,
  49. /* Parameter not supported or parameter out of range: */
  50. CMD_STAT_BAD_PARAM = 0x03,
  51. /* System not enabled or bad system state: */
  52. CMD_STAT_BAD_SYS_STATE = 0x04,
  53. /* Attempt to access reserved or unallocaterd resource: */
  54. CMD_STAT_BAD_RESOURCE = 0x05,
  55. /* Requested resource is currently executing a command, or is otherwise busy: */
  56. CMD_STAT_RESOURCE_BUSY = 0x06,
  57. /* Required capability exceeds device limits: */
  58. CMD_STAT_EXCEED_LIM = 0x08,
  59. /* Resource is not in the appropriate state or ownership: */
  60. CMD_STAT_BAD_RES_STATE = 0x09,
  61. /* Index out of range: */
  62. CMD_STAT_BAD_INDEX = 0x0a,
  63. /* FW image corrupted: */
  64. CMD_STAT_BAD_NVMEM = 0x0b,
  65. /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  66. CMD_STAT_ICM_ERROR = 0x0c,
  67. /* Attempt to modify a QP/EE which is not in the presumed state: */
  68. CMD_STAT_BAD_QP_STATE = 0x10,
  69. /* Bad segment parameters (Address/Size): */
  70. CMD_STAT_BAD_SEG_PARAM = 0x20,
  71. /* Memory Region has Memory Windows bound to: */
  72. CMD_STAT_REG_BOUND = 0x21,
  73. /* HCA local attached memory not present: */
  74. CMD_STAT_LAM_NOT_PRE = 0x22,
  75. /* Bad management packet (silently discarded): */
  76. CMD_STAT_BAD_PKT = 0x30,
  77. /* More outstanding CQEs in CQ than new CQ size: */
  78. CMD_STAT_BAD_SIZE = 0x40,
  79. /* Multi Function device support required: */
  80. CMD_STAT_MULTI_FUNC_REQ = 0x50,
  81. };
  82. enum {
  83. HCR_IN_PARAM_OFFSET = 0x00,
  84. HCR_IN_MODIFIER_OFFSET = 0x08,
  85. HCR_OUT_PARAM_OFFSET = 0x0c,
  86. HCR_TOKEN_OFFSET = 0x14,
  87. HCR_STATUS_OFFSET = 0x18,
  88. HCR_OPMOD_SHIFT = 12,
  89. HCR_T_BIT = 21,
  90. HCR_E_BIT = 22,
  91. HCR_GO_BIT = 23
  92. };
  93. enum {
  94. GO_BIT_TIMEOUT_MSECS = 10000
  95. };
  96. struct mlx4_cmd_context {
  97. struct completion done;
  98. int result;
  99. int next;
  100. u64 out_param;
  101. u16 token;
  102. };
  103. static int mlx4_status_to_errno(u8 status)
  104. {
  105. static const int trans_table[] = {
  106. [CMD_STAT_INTERNAL_ERR] = -EIO,
  107. [CMD_STAT_BAD_OP] = -EPERM,
  108. [CMD_STAT_BAD_PARAM] = -EINVAL,
  109. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  110. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  111. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  112. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  113. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  114. [CMD_STAT_BAD_INDEX] = -EBADF,
  115. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  116. [CMD_STAT_ICM_ERROR] = -ENFILE,
  117. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  118. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  119. [CMD_STAT_REG_BOUND] = -EBUSY,
  120. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  121. [CMD_STAT_BAD_PKT] = -EINVAL,
  122. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  123. [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
  124. };
  125. if (status >= ARRAY_SIZE(trans_table) ||
  126. (status != CMD_STAT_OK && trans_table[status] == 0))
  127. return -EIO;
  128. return trans_table[status];
  129. }
  130. static int cmd_pending(struct mlx4_dev *dev)
  131. {
  132. u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  133. return (status & swab32(1 << HCR_GO_BIT)) ||
  134. (mlx4_priv(dev)->cmd.toggle ==
  135. !!(status & swab32(1 << HCR_T_BIT)));
  136. }
  137. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  138. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  139. int event)
  140. {
  141. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  142. u32 __iomem *hcr = cmd->hcr;
  143. int ret = -EAGAIN;
  144. unsigned long end;
  145. mutex_lock(&cmd->hcr_mutex);
  146. end = jiffies;
  147. if (event)
  148. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  149. while (cmd_pending(dev)) {
  150. if (time_after_eq(jiffies, end))
  151. goto out;
  152. cond_resched();
  153. }
  154. /*
  155. * We use writel (instead of something like memcpy_toio)
  156. * because writes of less than 32 bits to the HCR don't work
  157. * (and some architectures such as ia64 implement memcpy_toio
  158. * in terms of writeb).
  159. */
  160. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  161. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  162. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  163. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  164. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  165. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  166. /* __raw_writel may not order writes. */
  167. wmb();
  168. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  169. (cmd->toggle << HCR_T_BIT) |
  170. (event ? (1 << HCR_E_BIT) : 0) |
  171. (op_modifier << HCR_OPMOD_SHIFT) |
  172. op), hcr + 6);
  173. /*
  174. * Make sure that our HCR writes don't get mixed in with
  175. * writes from another CPU starting a FW command.
  176. */
  177. mmiowb();
  178. cmd->toggle = cmd->toggle ^ 1;
  179. ret = 0;
  180. out:
  181. mutex_unlock(&cmd->hcr_mutex);
  182. return ret;
  183. }
  184. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  185. int out_is_imm, u32 in_modifier, u8 op_modifier,
  186. u16 op, unsigned long timeout)
  187. {
  188. struct mlx4_priv *priv = mlx4_priv(dev);
  189. void __iomem *hcr = priv->cmd.hcr;
  190. int err = 0;
  191. unsigned long end;
  192. down(&priv->cmd.poll_sem);
  193. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  194. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  195. if (err)
  196. goto out;
  197. end = msecs_to_jiffies(timeout) + jiffies;
  198. while (cmd_pending(dev) && time_before(jiffies, end))
  199. cond_resched();
  200. if (cmd_pending(dev)) {
  201. err = -ETIMEDOUT;
  202. goto out;
  203. }
  204. if (out_is_imm)
  205. *out_param =
  206. (u64) be32_to_cpu((__force __be32)
  207. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  208. (u64) be32_to_cpu((__force __be32)
  209. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  210. err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
  211. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
  212. out:
  213. up(&priv->cmd.poll_sem);
  214. return err;
  215. }
  216. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  217. {
  218. struct mlx4_priv *priv = mlx4_priv(dev);
  219. struct mlx4_cmd_context *context =
  220. &priv->cmd.context[token & priv->cmd.token_mask];
  221. /* previously timed out command completing at long last */
  222. if (token != context->token)
  223. return;
  224. context->result = mlx4_status_to_errno(status);
  225. context->out_param = out_param;
  226. complete(&context->done);
  227. }
  228. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  229. int out_is_imm, u32 in_modifier, u8 op_modifier,
  230. u16 op, unsigned long timeout)
  231. {
  232. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  233. struct mlx4_cmd_context *context;
  234. int err = 0;
  235. down(&cmd->event_sem);
  236. spin_lock(&cmd->context_lock);
  237. BUG_ON(cmd->free_head < 0);
  238. context = &cmd->context[cmd->free_head];
  239. context->token += cmd->token_mask + 1;
  240. cmd->free_head = context->next;
  241. spin_unlock(&cmd->context_lock);
  242. init_completion(&context->done);
  243. mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  244. in_modifier, op_modifier, op, context->token, 1);
  245. if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
  246. err = -EBUSY;
  247. goto out;
  248. }
  249. err = context->result;
  250. if (err)
  251. goto out;
  252. if (out_is_imm)
  253. *out_param = context->out_param;
  254. out:
  255. spin_lock(&cmd->context_lock);
  256. context->next = cmd->free_head;
  257. cmd->free_head = context - cmd->context;
  258. spin_unlock(&cmd->context_lock);
  259. up(&cmd->event_sem);
  260. return err;
  261. }
  262. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  263. int out_is_imm, u32 in_modifier, u8 op_modifier,
  264. u16 op, unsigned long timeout)
  265. {
  266. if (mlx4_priv(dev)->cmd.use_events)
  267. return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
  268. in_modifier, op_modifier, op, timeout);
  269. else
  270. return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
  271. in_modifier, op_modifier, op, timeout);
  272. }
  273. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  274. int mlx4_cmd_init(struct mlx4_dev *dev)
  275. {
  276. struct mlx4_priv *priv = mlx4_priv(dev);
  277. mutex_init(&priv->cmd.hcr_mutex);
  278. sema_init(&priv->cmd.poll_sem, 1);
  279. priv->cmd.use_events = 0;
  280. priv->cmd.toggle = 1;
  281. priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
  282. MLX4_HCR_SIZE);
  283. if (!priv->cmd.hcr) {
  284. mlx4_err(dev, "Couldn't map command register.");
  285. return -ENOMEM;
  286. }
  287. priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
  288. MLX4_MAILBOX_SIZE,
  289. MLX4_MAILBOX_SIZE, 0);
  290. if (!priv->cmd.pool) {
  291. iounmap(priv->cmd.hcr);
  292. return -ENOMEM;
  293. }
  294. return 0;
  295. }
  296. void mlx4_cmd_cleanup(struct mlx4_dev *dev)
  297. {
  298. struct mlx4_priv *priv = mlx4_priv(dev);
  299. pci_pool_destroy(priv->cmd.pool);
  300. iounmap(priv->cmd.hcr);
  301. }
  302. /*
  303. * Switch to using events to issue FW commands (can only be called
  304. * after event queue for command events has been initialized).
  305. */
  306. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  307. {
  308. struct mlx4_priv *priv = mlx4_priv(dev);
  309. int i;
  310. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  311. sizeof (struct mlx4_cmd_context),
  312. GFP_KERNEL);
  313. if (!priv->cmd.context)
  314. return -ENOMEM;
  315. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  316. priv->cmd.context[i].token = i;
  317. priv->cmd.context[i].next = i + 1;
  318. }
  319. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  320. priv->cmd.free_head = 0;
  321. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  322. spin_lock_init(&priv->cmd.context_lock);
  323. for (priv->cmd.token_mask = 1;
  324. priv->cmd.token_mask < priv->cmd.max_cmds;
  325. priv->cmd.token_mask <<= 1)
  326. ; /* nothing */
  327. --priv->cmd.token_mask;
  328. priv->cmd.use_events = 1;
  329. down(&priv->cmd.poll_sem);
  330. return 0;
  331. }
  332. /*
  333. * Switch back to polling (used when shutting down the device)
  334. */
  335. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  336. {
  337. struct mlx4_priv *priv = mlx4_priv(dev);
  338. int i;
  339. priv->cmd.use_events = 0;
  340. for (i = 0; i < priv->cmd.max_cmds; ++i)
  341. down(&priv->cmd.event_sem);
  342. kfree(priv->cmd.context);
  343. up(&priv->cmd.poll_sem);
  344. }
  345. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  346. {
  347. struct mlx4_cmd_mailbox *mailbox;
  348. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  349. if (!mailbox)
  350. return ERR_PTR(-ENOMEM);
  351. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  352. &mailbox->dma);
  353. if (!mailbox->buf) {
  354. kfree(mailbox);
  355. return ERR_PTR(-ENOMEM);
  356. }
  357. return mailbox;
  358. }
  359. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  360. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
  361. {
  362. if (!mailbox)
  363. return;
  364. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  365. kfree(mailbox);
  366. }
  367. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);