cmd.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902
  1. /*
  2. * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/highmem.h>
  33. #include <linux/module.h>
  34. #include <linux/errno.h>
  35. #include <linux/pci.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/slab.h>
  38. #include <linux/delay.h>
  39. #include <linux/random.h>
  40. #include <linux/io-mapping.h>
  41. #include <linux/mlx5/driver.h>
  42. #include <linux/debugfs.h>
  43. #include "mlx5_core.h"
  44. enum {
  45. CMD_IF_REV = 5,
  46. };
  47. enum {
  48. CMD_MODE_POLLING,
  49. CMD_MODE_EVENTS
  50. };
  51. enum {
  52. MLX5_CMD_DELIVERY_STAT_OK = 0x0,
  53. MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
  54. MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
  55. MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
  56. MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
  57. MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
  58. MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
  59. MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
  60. MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
  61. MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
  62. MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
  63. };
  64. static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  65. struct mlx5_cmd_msg *in,
  66. struct mlx5_cmd_msg *out,
  67. void *uout, int uout_size,
  68. mlx5_cmd_cbk_t cbk,
  69. void *context, int page_queue)
  70. {
  71. gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  72. struct mlx5_cmd_work_ent *ent;
  73. ent = kzalloc(sizeof(*ent), alloc_flags);
  74. if (!ent)
  75. return ERR_PTR(-ENOMEM);
  76. ent->in = in;
  77. ent->out = out;
  78. ent->uout = uout;
  79. ent->uout_size = uout_size;
  80. ent->callback = cbk;
  81. ent->context = context;
  82. ent->cmd = cmd;
  83. ent->page_queue = page_queue;
  84. return ent;
  85. }
  86. static u8 alloc_token(struct mlx5_cmd *cmd)
  87. {
  88. u8 token;
  89. spin_lock(&cmd->token_lock);
  90. cmd->token++;
  91. if (cmd->token == 0)
  92. cmd->token++;
  93. token = cmd->token;
  94. spin_unlock(&cmd->token_lock);
  95. return token;
  96. }
  97. static int alloc_ent(struct mlx5_cmd *cmd)
  98. {
  99. unsigned long flags;
  100. int ret;
  101. spin_lock_irqsave(&cmd->alloc_lock, flags);
  102. ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
  103. if (ret < cmd->max_reg_cmds)
  104. clear_bit(ret, &cmd->bitmask);
  105. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  106. return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
  107. }
  108. static void free_ent(struct mlx5_cmd *cmd, int idx)
  109. {
  110. unsigned long flags;
  111. spin_lock_irqsave(&cmd->alloc_lock, flags);
  112. set_bit(idx, &cmd->bitmask);
  113. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  114. }
  115. static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
  116. {
  117. return cmd->cmd_buf + (idx << cmd->log_stride);
  118. }
  119. static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
  120. {
  121. int size = msg->len;
  122. int blen = size - min_t(int, sizeof(msg->first.data), size);
  123. return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
  124. }
  125. static u8 xor8_buf(void *buf, size_t offset, int len)
  126. {
  127. u8 *ptr = buf;
  128. u8 sum = 0;
  129. int i;
  130. int end = len + offset;
  131. for (i = offset; i < end; i++)
  132. sum ^= ptr[i];
  133. return sum;
  134. }
  135. static int verify_block_sig(struct mlx5_cmd_prot_block *block)
  136. {
  137. size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
  138. int xor_len = sizeof(*block) - sizeof(block->data) - 1;
  139. if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
  140. return -EINVAL;
  141. if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
  142. return -EINVAL;
  143. return 0;
  144. }
  145. static void calc_block_sig(struct mlx5_cmd_prot_block *block)
  146. {
  147. int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
  148. size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
  149. block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
  150. block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
  151. }
  152. static void calc_chain_sig(struct mlx5_cmd_msg *msg)
  153. {
  154. struct mlx5_cmd_mailbox *next = msg->next;
  155. int n = mlx5_calc_cmd_blocks(msg);
  156. int i = 0;
  157. for (i = 0; i < n && next; i++) {
  158. calc_block_sig(next->buf);
  159. next = next->next;
  160. }
  161. }
  162. static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
  163. {
  164. ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
  165. if (csum) {
  166. calc_chain_sig(ent->in);
  167. calc_chain_sig(ent->out);
  168. }
  169. }
  170. static void poll_timeout(struct mlx5_cmd_work_ent *ent)
  171. {
  172. unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
  173. u8 own;
  174. do {
  175. own = READ_ONCE(ent->lay->status_own);
  176. if (!(own & CMD_OWNER_HW)) {
  177. ent->ret = 0;
  178. return;
  179. }
  180. cond_resched();
  181. } while (time_before(jiffies, poll_end));
  182. ent->ret = -ETIMEDOUT;
  183. }
  184. static void free_cmd(struct mlx5_cmd_work_ent *ent)
  185. {
  186. kfree(ent);
  187. }
  188. static int verify_signature(struct mlx5_cmd_work_ent *ent)
  189. {
  190. struct mlx5_cmd_mailbox *next = ent->out->next;
  191. int n = mlx5_calc_cmd_blocks(ent->out);
  192. int err;
  193. u8 sig;
  194. int i = 0;
  195. sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
  196. if (sig != 0xff)
  197. return -EINVAL;
  198. for (i = 0; i < n && next; i++) {
  199. err = verify_block_sig(next->buf);
  200. if (err)
  201. return err;
  202. next = next->next;
  203. }
  204. return 0;
  205. }
  206. static void dump_buf(void *buf, int size, int data_only, int offset)
  207. {
  208. __be32 *p = buf;
  209. int i;
  210. for (i = 0; i < size; i += 16) {
  211. pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
  212. be32_to_cpu(p[1]), be32_to_cpu(p[2]),
  213. be32_to_cpu(p[3]));
  214. p += 4;
  215. offset += 16;
  216. }
  217. if (!data_only)
  218. pr_debug("\n");
  219. }
  220. static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
  221. u32 *synd, u8 *status)
  222. {
  223. *synd = 0;
  224. *status = 0;
  225. switch (op) {
  226. case MLX5_CMD_OP_TEARDOWN_HCA:
  227. case MLX5_CMD_OP_DISABLE_HCA:
  228. case MLX5_CMD_OP_MANAGE_PAGES:
  229. case MLX5_CMD_OP_DESTROY_MKEY:
  230. case MLX5_CMD_OP_DESTROY_EQ:
  231. case MLX5_CMD_OP_DESTROY_CQ:
  232. case MLX5_CMD_OP_DESTROY_QP:
  233. case MLX5_CMD_OP_DESTROY_PSV:
  234. case MLX5_CMD_OP_DESTROY_SRQ:
  235. case MLX5_CMD_OP_DESTROY_XRC_SRQ:
  236. case MLX5_CMD_OP_DESTROY_XRQ:
  237. case MLX5_CMD_OP_DESTROY_DCT:
  238. case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
  239. case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
  240. case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
  241. case MLX5_CMD_OP_DEALLOC_PD:
  242. case MLX5_CMD_OP_DEALLOC_UAR:
  243. case MLX5_CMD_OP_DETACH_FROM_MCG:
  244. case MLX5_CMD_OP_DEALLOC_XRCD:
  245. case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
  246. case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
  247. case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
  248. case MLX5_CMD_OP_DESTROY_LAG:
  249. case MLX5_CMD_OP_DESTROY_VPORT_LAG:
  250. case MLX5_CMD_OP_DESTROY_TIR:
  251. case MLX5_CMD_OP_DESTROY_SQ:
  252. case MLX5_CMD_OP_DESTROY_RQ:
  253. case MLX5_CMD_OP_DESTROY_RMP:
  254. case MLX5_CMD_OP_DESTROY_TIS:
  255. case MLX5_CMD_OP_DESTROY_RQT:
  256. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  257. case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
  258. case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
  259. case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
  260. case MLX5_CMD_OP_2ERR_QP:
  261. case MLX5_CMD_OP_2RST_QP:
  262. case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
  263. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  264. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  265. case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
  266. case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
  267. case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
  268. case MLX5_CMD_OP_FPGA_DESTROY_QP:
  269. case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
  270. return MLX5_CMD_STAT_OK;
  271. case MLX5_CMD_OP_QUERY_HCA_CAP:
  272. case MLX5_CMD_OP_QUERY_ADAPTER:
  273. case MLX5_CMD_OP_INIT_HCA:
  274. case MLX5_CMD_OP_ENABLE_HCA:
  275. case MLX5_CMD_OP_QUERY_PAGES:
  276. case MLX5_CMD_OP_SET_HCA_CAP:
  277. case MLX5_CMD_OP_QUERY_ISSI:
  278. case MLX5_CMD_OP_SET_ISSI:
  279. case MLX5_CMD_OP_CREATE_MKEY:
  280. case MLX5_CMD_OP_QUERY_MKEY:
  281. case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
  282. case MLX5_CMD_OP_PAGE_FAULT_RESUME:
  283. case MLX5_CMD_OP_CREATE_EQ:
  284. case MLX5_CMD_OP_QUERY_EQ:
  285. case MLX5_CMD_OP_GEN_EQE:
  286. case MLX5_CMD_OP_CREATE_CQ:
  287. case MLX5_CMD_OP_QUERY_CQ:
  288. case MLX5_CMD_OP_MODIFY_CQ:
  289. case MLX5_CMD_OP_CREATE_QP:
  290. case MLX5_CMD_OP_RST2INIT_QP:
  291. case MLX5_CMD_OP_INIT2RTR_QP:
  292. case MLX5_CMD_OP_RTR2RTS_QP:
  293. case MLX5_CMD_OP_RTS2RTS_QP:
  294. case MLX5_CMD_OP_SQERR2RTS_QP:
  295. case MLX5_CMD_OP_QUERY_QP:
  296. case MLX5_CMD_OP_SQD_RTS_QP:
  297. case MLX5_CMD_OP_INIT2INIT_QP:
  298. case MLX5_CMD_OP_CREATE_PSV:
  299. case MLX5_CMD_OP_CREATE_SRQ:
  300. case MLX5_CMD_OP_QUERY_SRQ:
  301. case MLX5_CMD_OP_ARM_RQ:
  302. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  303. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  304. case MLX5_CMD_OP_ARM_XRC_SRQ:
  305. case MLX5_CMD_OP_CREATE_XRQ:
  306. case MLX5_CMD_OP_QUERY_XRQ:
  307. case MLX5_CMD_OP_ARM_XRQ:
  308. case MLX5_CMD_OP_CREATE_DCT:
  309. case MLX5_CMD_OP_DRAIN_DCT:
  310. case MLX5_CMD_OP_QUERY_DCT:
  311. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  312. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  313. case MLX5_CMD_OP_MODIFY_VPORT_STATE:
  314. case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
  315. case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
  316. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  317. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  318. case MLX5_CMD_OP_SET_ROCE_ADDRESS:
  319. case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
  320. case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
  321. case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
  322. case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
  323. case MLX5_CMD_OP_QUERY_VNIC_ENV:
  324. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  325. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  326. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  327. case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
  328. case MLX5_CMD_OP_QUERY_RATE_LIMIT:
  329. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  330. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  331. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  332. case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
  333. case MLX5_CMD_OP_ALLOC_PD:
  334. case MLX5_CMD_OP_ALLOC_UAR:
  335. case MLX5_CMD_OP_CONFIG_INT_MODERATION:
  336. case MLX5_CMD_OP_ACCESS_REG:
  337. case MLX5_CMD_OP_ATTACH_TO_MCG:
  338. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  339. case MLX5_CMD_OP_MAD_IFC:
  340. case MLX5_CMD_OP_QUERY_MAD_DEMUX:
  341. case MLX5_CMD_OP_SET_MAD_DEMUX:
  342. case MLX5_CMD_OP_NOP:
  343. case MLX5_CMD_OP_ALLOC_XRCD:
  344. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  345. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  346. case MLX5_CMD_OP_MODIFY_CONG_STATUS:
  347. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  348. case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
  349. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  350. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  351. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  352. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  353. case MLX5_CMD_OP_CREATE_LAG:
  354. case MLX5_CMD_OP_MODIFY_LAG:
  355. case MLX5_CMD_OP_QUERY_LAG:
  356. case MLX5_CMD_OP_CREATE_VPORT_LAG:
  357. case MLX5_CMD_OP_CREATE_TIR:
  358. case MLX5_CMD_OP_MODIFY_TIR:
  359. case MLX5_CMD_OP_QUERY_TIR:
  360. case MLX5_CMD_OP_CREATE_SQ:
  361. case MLX5_CMD_OP_MODIFY_SQ:
  362. case MLX5_CMD_OP_QUERY_SQ:
  363. case MLX5_CMD_OP_CREATE_RQ:
  364. case MLX5_CMD_OP_MODIFY_RQ:
  365. case MLX5_CMD_OP_QUERY_RQ:
  366. case MLX5_CMD_OP_CREATE_RMP:
  367. case MLX5_CMD_OP_MODIFY_RMP:
  368. case MLX5_CMD_OP_QUERY_RMP:
  369. case MLX5_CMD_OP_CREATE_TIS:
  370. case MLX5_CMD_OP_MODIFY_TIS:
  371. case MLX5_CMD_OP_QUERY_TIS:
  372. case MLX5_CMD_OP_CREATE_RQT:
  373. case MLX5_CMD_OP_MODIFY_RQT:
  374. case MLX5_CMD_OP_QUERY_RQT:
  375. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  376. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  377. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  378. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  379. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  380. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  381. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  382. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  383. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  384. case MLX5_CMD_OP_FPGA_CREATE_QP:
  385. case MLX5_CMD_OP_FPGA_MODIFY_QP:
  386. case MLX5_CMD_OP_FPGA_QUERY_QP:
  387. case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
  388. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  389. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  390. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  391. *status = MLX5_DRIVER_STATUS_ABORTED;
  392. *synd = MLX5_DRIVER_SYND;
  393. return -EIO;
  394. default:
  395. mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
  396. return -EINVAL;
  397. }
  398. }
  399. const char *mlx5_command_str(int command)
  400. {
  401. #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
  402. switch (command) {
  403. MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
  404. MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
  405. MLX5_COMMAND_STR_CASE(INIT_HCA);
  406. MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
  407. MLX5_COMMAND_STR_CASE(ENABLE_HCA);
  408. MLX5_COMMAND_STR_CASE(DISABLE_HCA);
  409. MLX5_COMMAND_STR_CASE(QUERY_PAGES);
  410. MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
  411. MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
  412. MLX5_COMMAND_STR_CASE(QUERY_ISSI);
  413. MLX5_COMMAND_STR_CASE(SET_ISSI);
  414. MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
  415. MLX5_COMMAND_STR_CASE(CREATE_MKEY);
  416. MLX5_COMMAND_STR_CASE(QUERY_MKEY);
  417. MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
  418. MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
  419. MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
  420. MLX5_COMMAND_STR_CASE(CREATE_EQ);
  421. MLX5_COMMAND_STR_CASE(DESTROY_EQ);
  422. MLX5_COMMAND_STR_CASE(QUERY_EQ);
  423. MLX5_COMMAND_STR_CASE(GEN_EQE);
  424. MLX5_COMMAND_STR_CASE(CREATE_CQ);
  425. MLX5_COMMAND_STR_CASE(DESTROY_CQ);
  426. MLX5_COMMAND_STR_CASE(QUERY_CQ);
  427. MLX5_COMMAND_STR_CASE(MODIFY_CQ);
  428. MLX5_COMMAND_STR_CASE(CREATE_QP);
  429. MLX5_COMMAND_STR_CASE(DESTROY_QP);
  430. MLX5_COMMAND_STR_CASE(RST2INIT_QP);
  431. MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
  432. MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
  433. MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
  434. MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
  435. MLX5_COMMAND_STR_CASE(2ERR_QP);
  436. MLX5_COMMAND_STR_CASE(2RST_QP);
  437. MLX5_COMMAND_STR_CASE(QUERY_QP);
  438. MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
  439. MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
  440. MLX5_COMMAND_STR_CASE(CREATE_PSV);
  441. MLX5_COMMAND_STR_CASE(DESTROY_PSV);
  442. MLX5_COMMAND_STR_CASE(CREATE_SRQ);
  443. MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
  444. MLX5_COMMAND_STR_CASE(QUERY_SRQ);
  445. MLX5_COMMAND_STR_CASE(ARM_RQ);
  446. MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
  447. MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
  448. MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
  449. MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
  450. MLX5_COMMAND_STR_CASE(CREATE_DCT);
  451. MLX5_COMMAND_STR_CASE(DESTROY_DCT);
  452. MLX5_COMMAND_STR_CASE(DRAIN_DCT);
  453. MLX5_COMMAND_STR_CASE(QUERY_DCT);
  454. MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
  455. MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
  456. MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
  457. MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
  458. MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
  459. MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
  460. MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
  461. MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
  462. MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
  463. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
  464. MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
  465. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
  466. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
  467. MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
  468. MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
  469. MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
  470. MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
  471. MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
  472. MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
  473. MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
  474. MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
  475. MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
  476. MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
  477. MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
  478. MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
  479. MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
  480. MLX5_COMMAND_STR_CASE(ALLOC_PD);
  481. MLX5_COMMAND_STR_CASE(DEALLOC_PD);
  482. MLX5_COMMAND_STR_CASE(ALLOC_UAR);
  483. MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
  484. MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
  485. MLX5_COMMAND_STR_CASE(ACCESS_REG);
  486. MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
  487. MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
  488. MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
  489. MLX5_COMMAND_STR_CASE(MAD_IFC);
  490. MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
  491. MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
  492. MLX5_COMMAND_STR_CASE(NOP);
  493. MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
  494. MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
  495. MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
  496. MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
  497. MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
  498. MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
  499. MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
  500. MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
  501. MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
  502. MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
  503. MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
  504. MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
  505. MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
  506. MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
  507. MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
  508. MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
  509. MLX5_COMMAND_STR_CASE(CREATE_LAG);
  510. MLX5_COMMAND_STR_CASE(MODIFY_LAG);
  511. MLX5_COMMAND_STR_CASE(QUERY_LAG);
  512. MLX5_COMMAND_STR_CASE(DESTROY_LAG);
  513. MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
  514. MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
  515. MLX5_COMMAND_STR_CASE(CREATE_TIR);
  516. MLX5_COMMAND_STR_CASE(MODIFY_TIR);
  517. MLX5_COMMAND_STR_CASE(DESTROY_TIR);
  518. MLX5_COMMAND_STR_CASE(QUERY_TIR);
  519. MLX5_COMMAND_STR_CASE(CREATE_SQ);
  520. MLX5_COMMAND_STR_CASE(MODIFY_SQ);
  521. MLX5_COMMAND_STR_CASE(DESTROY_SQ);
  522. MLX5_COMMAND_STR_CASE(QUERY_SQ);
  523. MLX5_COMMAND_STR_CASE(CREATE_RQ);
  524. MLX5_COMMAND_STR_CASE(MODIFY_RQ);
  525. MLX5_COMMAND_STR_CASE(DESTROY_RQ);
  526. MLX5_COMMAND_STR_CASE(QUERY_RQ);
  527. MLX5_COMMAND_STR_CASE(CREATE_RMP);
  528. MLX5_COMMAND_STR_CASE(MODIFY_RMP);
  529. MLX5_COMMAND_STR_CASE(DESTROY_RMP);
  530. MLX5_COMMAND_STR_CASE(QUERY_RMP);
  531. MLX5_COMMAND_STR_CASE(CREATE_TIS);
  532. MLX5_COMMAND_STR_CASE(MODIFY_TIS);
  533. MLX5_COMMAND_STR_CASE(DESTROY_TIS);
  534. MLX5_COMMAND_STR_CASE(QUERY_TIS);
  535. MLX5_COMMAND_STR_CASE(CREATE_RQT);
  536. MLX5_COMMAND_STR_CASE(MODIFY_RQT);
  537. MLX5_COMMAND_STR_CASE(DESTROY_RQT);
  538. MLX5_COMMAND_STR_CASE(QUERY_RQT);
  539. MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
  540. MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
  541. MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
  542. MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
  543. MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
  544. MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
  545. MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
  546. MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
  547. MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
  548. MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
  549. MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
  550. MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
  551. MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
  552. MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
  553. MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
  554. MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
  555. MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
  556. MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
  557. MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
  558. MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
  559. MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
  560. MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
  561. MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
  562. MLX5_COMMAND_STR_CASE(CREATE_XRQ);
  563. MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
  564. MLX5_COMMAND_STR_CASE(QUERY_XRQ);
  565. MLX5_COMMAND_STR_CASE(ARM_XRQ);
  566. MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
  567. MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
  568. MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
  569. MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
  570. MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
  571. default: return "unknown command opcode";
  572. }
  573. }
  574. static const char *cmd_status_str(u8 status)
  575. {
  576. switch (status) {
  577. case MLX5_CMD_STAT_OK:
  578. return "OK";
  579. case MLX5_CMD_STAT_INT_ERR:
  580. return "internal error";
  581. case MLX5_CMD_STAT_BAD_OP_ERR:
  582. return "bad operation";
  583. case MLX5_CMD_STAT_BAD_PARAM_ERR:
  584. return "bad parameter";
  585. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
  586. return "bad system state";
  587. case MLX5_CMD_STAT_BAD_RES_ERR:
  588. return "bad resource";
  589. case MLX5_CMD_STAT_RES_BUSY:
  590. return "resource busy";
  591. case MLX5_CMD_STAT_LIM_ERR:
  592. return "limits exceeded";
  593. case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
  594. return "bad resource state";
  595. case MLX5_CMD_STAT_IX_ERR:
  596. return "bad index";
  597. case MLX5_CMD_STAT_NO_RES_ERR:
  598. return "no resources";
  599. case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
  600. return "bad input length";
  601. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
  602. return "bad output length";
  603. case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
  604. return "bad QP state";
  605. case MLX5_CMD_STAT_BAD_PKT_ERR:
  606. return "bad packet (discarded)";
  607. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
  608. return "bad size too many outstanding CQEs";
  609. default:
  610. return "unknown status";
  611. }
  612. }
  613. static int cmd_status_to_err(u8 status)
  614. {
  615. switch (status) {
  616. case MLX5_CMD_STAT_OK: return 0;
  617. case MLX5_CMD_STAT_INT_ERR: return -EIO;
  618. case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
  619. case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
  620. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
  621. case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
  622. case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
  623. case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
  624. case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
  625. case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
  626. case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
  627. case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
  628. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
  629. case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
  630. case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
  631. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
  632. default: return -EIO;
  633. }
  634. }
  635. struct mlx5_ifc_mbox_out_bits {
  636. u8 status[0x8];
  637. u8 reserved_at_8[0x18];
  638. u8 syndrome[0x20];
  639. u8 reserved_at_40[0x40];
  640. };
  641. struct mlx5_ifc_mbox_in_bits {
  642. u8 opcode[0x10];
  643. u8 uid[0x10];
  644. u8 reserved_at_20[0x10];
  645. u8 op_mod[0x10];
  646. u8 reserved_at_40[0x40];
  647. };
  648. void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
  649. {
  650. *status = MLX5_GET(mbox_out, out, status);
  651. *syndrome = MLX5_GET(mbox_out, out, syndrome);
  652. }
  653. static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
  654. {
  655. u32 syndrome;
  656. u8 status;
  657. u16 opcode;
  658. u16 op_mod;
  659. u16 uid;
  660. mlx5_cmd_mbox_status(out, &status, &syndrome);
  661. if (!status)
  662. return 0;
  663. opcode = MLX5_GET(mbox_in, in, opcode);
  664. op_mod = MLX5_GET(mbox_in, in, op_mod);
  665. uid = MLX5_GET(mbox_in, in, uid);
  666. if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
  667. mlx5_core_err_rl(dev,
  668. "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
  669. mlx5_command_str(opcode), opcode, op_mod,
  670. cmd_status_str(status), status, syndrome);
  671. else
  672. mlx5_core_dbg(dev,
  673. "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
  674. mlx5_command_str(opcode),
  675. opcode, op_mod,
  676. cmd_status_str(status),
  677. status,
  678. syndrome);
  679. return cmd_status_to_err(status);
  680. }
  681. static void dump_command(struct mlx5_core_dev *dev,
  682. struct mlx5_cmd_work_ent *ent, int input)
  683. {
  684. struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
  685. u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
  686. struct mlx5_cmd_mailbox *next = msg->next;
  687. int n = mlx5_calc_cmd_blocks(msg);
  688. int data_only;
  689. u32 offset = 0;
  690. int dump_len;
  691. int i;
  692. data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
  693. if (data_only)
  694. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
  695. "dump command data %s(0x%x) %s\n",
  696. mlx5_command_str(op), op,
  697. input ? "INPUT" : "OUTPUT");
  698. else
  699. mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
  700. mlx5_command_str(op), op,
  701. input ? "INPUT" : "OUTPUT");
  702. if (data_only) {
  703. if (input) {
  704. dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
  705. offset += sizeof(ent->lay->in);
  706. } else {
  707. dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
  708. offset += sizeof(ent->lay->out);
  709. }
  710. } else {
  711. dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
  712. offset += sizeof(*ent->lay);
  713. }
  714. for (i = 0; i < n && next; i++) {
  715. if (data_only) {
  716. dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
  717. dump_buf(next->buf, dump_len, 1, offset);
  718. offset += MLX5_CMD_DATA_BLOCK_SIZE;
  719. } else {
  720. mlx5_core_dbg(dev, "command block:\n");
  721. dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
  722. offset += sizeof(struct mlx5_cmd_prot_block);
  723. }
  724. next = next->next;
  725. }
  726. if (data_only)
  727. pr_debug("\n");
  728. }
  729. static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
  730. {
  731. return MLX5_GET(mbox_in, in->first.data, opcode);
  732. }
  733. static void cb_timeout_handler(struct work_struct *work)
  734. {
  735. struct delayed_work *dwork = container_of(work, struct delayed_work,
  736. work);
  737. struct mlx5_cmd_work_ent *ent = container_of(dwork,
  738. struct mlx5_cmd_work_ent,
  739. cb_timeout_work);
  740. struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
  741. cmd);
  742. ent->ret = -ETIMEDOUT;
  743. mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
  744. mlx5_command_str(msg_to_opcode(ent->in)),
  745. msg_to_opcode(ent->in));
  746. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  747. }
  748. static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
  749. static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
  750. struct mlx5_cmd_msg *msg);
  751. static void cmd_work_handler(struct work_struct *work)
  752. {
  753. struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
  754. struct mlx5_cmd *cmd = ent->cmd;
  755. struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
  756. unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
  757. struct mlx5_cmd_layout *lay;
  758. struct semaphore *sem;
  759. unsigned long flags;
  760. bool poll_cmd = ent->polling;
  761. int alloc_ret;
  762. int cmd_mode;
  763. sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
  764. down(sem);
  765. if (!ent->page_queue) {
  766. alloc_ret = alloc_ent(cmd);
  767. if (alloc_ret < 0) {
  768. mlx5_core_err(dev, "failed to allocate command entry\n");
  769. if (ent->callback) {
  770. ent->callback(-EAGAIN, ent->context);
  771. mlx5_free_cmd_msg(dev, ent->out);
  772. free_msg(dev, ent->in);
  773. free_cmd(ent);
  774. } else {
  775. ent->ret = -EAGAIN;
  776. complete(&ent->done);
  777. }
  778. up(sem);
  779. return;
  780. }
  781. ent->idx = alloc_ret;
  782. } else {
  783. ent->idx = cmd->max_reg_cmds;
  784. spin_lock_irqsave(&cmd->alloc_lock, flags);
  785. clear_bit(ent->idx, &cmd->bitmask);
  786. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  787. }
  788. cmd->ent_arr[ent->idx] = ent;
  789. set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
  790. lay = get_inst(cmd, ent->idx);
  791. ent->lay = lay;
  792. memset(lay, 0, sizeof(*lay));
  793. memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
  794. ent->op = be32_to_cpu(lay->in[0]) >> 16;
  795. if (ent->in->next)
  796. lay->in_ptr = cpu_to_be64(ent->in->next->dma);
  797. lay->inlen = cpu_to_be32(ent->in->len);
  798. if (ent->out->next)
  799. lay->out_ptr = cpu_to_be64(ent->out->next->dma);
  800. lay->outlen = cpu_to_be32(ent->out->len);
  801. lay->type = MLX5_PCI_CMD_XPORT;
  802. lay->token = ent->token;
  803. lay->status_own = CMD_OWNER_HW;
  804. set_signature(ent, !cmd->checksum_disabled);
  805. dump_command(dev, ent, 1);
  806. ent->ts1 = ktime_get_ns();
  807. cmd_mode = cmd->mode;
  808. if (ent->callback)
  809. schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
  810. /* Skip sending command to fw if internal error */
  811. if (pci_channel_offline(dev->pdev) ||
  812. dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  813. u8 status = 0;
  814. u32 drv_synd;
  815. ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
  816. MLX5_SET(mbox_out, ent->out, status, status);
  817. MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
  818. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  819. return;
  820. }
  821. /* ring doorbell after the descriptor is valid */
  822. mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
  823. wmb();
  824. iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
  825. mmiowb();
  826. /* if not in polling don't use ent after this point */
  827. if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
  828. poll_timeout(ent);
  829. /* make sure we read the descriptor after ownership is SW */
  830. rmb();
  831. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
  832. }
  833. }
  834. static const char *deliv_status_to_str(u8 status)
  835. {
  836. switch (status) {
  837. case MLX5_CMD_DELIVERY_STAT_OK:
  838. return "no errors";
  839. case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
  840. return "signature error";
  841. case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
  842. return "token error";
  843. case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
  844. return "bad block number";
  845. case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
  846. return "output pointer not aligned to block size";
  847. case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
  848. return "input pointer not aligned to block size";
  849. case MLX5_CMD_DELIVERY_STAT_FW_ERR:
  850. return "firmware internal error";
  851. case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
  852. return "command input length error";
  853. case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
  854. return "command output length error";
  855. case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
  856. return "reserved fields not cleared";
  857. case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
  858. return "bad command descriptor type";
  859. default:
  860. return "unknown status code";
  861. }
  862. }
  863. static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
  864. {
  865. unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
  866. struct mlx5_cmd *cmd = &dev->cmd;
  867. int err;
  868. if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
  869. wait_for_completion(&ent->done);
  870. } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
  871. ent->ret = -ETIMEDOUT;
  872. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  873. }
  874. err = ent->ret;
  875. if (err == -ETIMEDOUT) {
  876. mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
  877. mlx5_command_str(msg_to_opcode(ent->in)),
  878. msg_to_opcode(ent->in));
  879. }
  880. mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
  881. err, deliv_status_to_str(ent->status), ent->status);
  882. return err;
  883. }
  884. /* Notes:
  885. * 1. Callback functions may not sleep
  886. * 2. page queue commands do not support asynchrous completion
  887. */
  888. static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
  889. struct mlx5_cmd_msg *out, void *uout, int uout_size,
  890. mlx5_cmd_cbk_t callback,
  891. void *context, int page_queue, u8 *status,
  892. u8 token, bool force_polling)
  893. {
  894. struct mlx5_cmd *cmd = &dev->cmd;
  895. struct mlx5_cmd_work_ent *ent;
  896. struct mlx5_cmd_stats *stats;
  897. int err = 0;
  898. s64 ds;
  899. u16 op;
  900. if (callback && page_queue)
  901. return -EINVAL;
  902. ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
  903. page_queue);
  904. if (IS_ERR(ent))
  905. return PTR_ERR(ent);
  906. ent->token = token;
  907. ent->polling = force_polling;
  908. if (!callback)
  909. init_completion(&ent->done);
  910. INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
  911. INIT_WORK(&ent->work, cmd_work_handler);
  912. if (page_queue) {
  913. cmd_work_handler(&ent->work);
  914. } else if (!queue_work(cmd->wq, &ent->work)) {
  915. mlx5_core_warn(dev, "failed to queue work\n");
  916. err = -ENOMEM;
  917. goto out_free;
  918. }
  919. if (callback)
  920. goto out;
  921. err = wait_func(dev, ent);
  922. if (err == -ETIMEDOUT)
  923. goto out;
  924. ds = ent->ts2 - ent->ts1;
  925. op = MLX5_GET(mbox_in, in->first.data, opcode);
  926. if (op < ARRAY_SIZE(cmd->stats)) {
  927. stats = &cmd->stats[op];
  928. spin_lock_irq(&stats->lock);
  929. stats->sum += ds;
  930. ++stats->n;
  931. spin_unlock_irq(&stats->lock);
  932. }
  933. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
  934. "fw exec time for %s is %lld nsec\n",
  935. mlx5_command_str(op), ds);
  936. *status = ent->status;
  937. out_free:
  938. free_cmd(ent);
  939. out:
  940. return err;
  941. }
  942. static ssize_t dbg_write(struct file *filp, const char __user *buf,
  943. size_t count, loff_t *pos)
  944. {
  945. struct mlx5_core_dev *dev = filp->private_data;
  946. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  947. char lbuf[3];
  948. int err;
  949. if (!dbg->in_msg || !dbg->out_msg)
  950. return -ENOMEM;
  951. if (count < sizeof(lbuf) - 1)
  952. return -EINVAL;
  953. if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
  954. return -EFAULT;
  955. lbuf[sizeof(lbuf) - 1] = 0;
  956. if (strcmp(lbuf, "go"))
  957. return -EINVAL;
  958. err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
  959. return err ? err : count;
  960. }
  961. static const struct file_operations fops = {
  962. .owner = THIS_MODULE,
  963. .open = simple_open,
  964. .write = dbg_write,
  965. };
  966. static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
  967. u8 token)
  968. {
  969. struct mlx5_cmd_prot_block *block;
  970. struct mlx5_cmd_mailbox *next;
  971. int copy;
  972. if (!to || !from)
  973. return -ENOMEM;
  974. copy = min_t(int, size, sizeof(to->first.data));
  975. memcpy(to->first.data, from, copy);
  976. size -= copy;
  977. from += copy;
  978. next = to->next;
  979. while (size) {
  980. if (!next) {
  981. /* this is a BUG */
  982. return -ENOMEM;
  983. }
  984. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  985. block = next->buf;
  986. memcpy(block->data, from, copy);
  987. from += copy;
  988. size -= copy;
  989. block->token = token;
  990. next = next->next;
  991. }
  992. return 0;
  993. }
  994. static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
  995. {
  996. struct mlx5_cmd_prot_block *block;
  997. struct mlx5_cmd_mailbox *next;
  998. int copy;
  999. if (!to || !from)
  1000. return -ENOMEM;
  1001. copy = min_t(int, size, sizeof(from->first.data));
  1002. memcpy(to, from->first.data, copy);
  1003. size -= copy;
  1004. to += copy;
  1005. next = from->next;
  1006. while (size) {
  1007. if (!next) {
  1008. /* this is a BUG */
  1009. return -ENOMEM;
  1010. }
  1011. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  1012. block = next->buf;
  1013. memcpy(to, block->data, copy);
  1014. to += copy;
  1015. size -= copy;
  1016. next = next->next;
  1017. }
  1018. return 0;
  1019. }
  1020. static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
  1021. gfp_t flags)
  1022. {
  1023. struct mlx5_cmd_mailbox *mailbox;
  1024. mailbox = kmalloc(sizeof(*mailbox), flags);
  1025. if (!mailbox)
  1026. return ERR_PTR(-ENOMEM);
  1027. mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
  1028. &mailbox->dma);
  1029. if (!mailbox->buf) {
  1030. mlx5_core_dbg(dev, "failed allocation\n");
  1031. kfree(mailbox);
  1032. return ERR_PTR(-ENOMEM);
  1033. }
  1034. mailbox->next = NULL;
  1035. return mailbox;
  1036. }
  1037. static void free_cmd_box(struct mlx5_core_dev *dev,
  1038. struct mlx5_cmd_mailbox *mailbox)
  1039. {
  1040. dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
  1041. kfree(mailbox);
  1042. }
  1043. static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
  1044. gfp_t flags, int size,
  1045. u8 token)
  1046. {
  1047. struct mlx5_cmd_mailbox *tmp, *head = NULL;
  1048. struct mlx5_cmd_prot_block *block;
  1049. struct mlx5_cmd_msg *msg;
  1050. int err;
  1051. int n;
  1052. int i;
  1053. msg = kzalloc(sizeof(*msg), flags);
  1054. if (!msg)
  1055. return ERR_PTR(-ENOMEM);
  1056. msg->len = size;
  1057. n = mlx5_calc_cmd_blocks(msg);
  1058. for (i = 0; i < n; i++) {
  1059. tmp = alloc_cmd_box(dev, flags);
  1060. if (IS_ERR(tmp)) {
  1061. mlx5_core_warn(dev, "failed allocating block\n");
  1062. err = PTR_ERR(tmp);
  1063. goto err_alloc;
  1064. }
  1065. block = tmp->buf;
  1066. tmp->next = head;
  1067. block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
  1068. block->block_num = cpu_to_be32(n - i - 1);
  1069. block->token = token;
  1070. head = tmp;
  1071. }
  1072. msg->next = head;
  1073. return msg;
  1074. err_alloc:
  1075. while (head) {
  1076. tmp = head->next;
  1077. free_cmd_box(dev, head);
  1078. head = tmp;
  1079. }
  1080. kfree(msg);
  1081. return ERR_PTR(err);
  1082. }
  1083. static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
  1084. struct mlx5_cmd_msg *msg)
  1085. {
  1086. struct mlx5_cmd_mailbox *head = msg->next;
  1087. struct mlx5_cmd_mailbox *next;
  1088. while (head) {
  1089. next = head->next;
  1090. free_cmd_box(dev, head);
  1091. head = next;
  1092. }
  1093. kfree(msg);
  1094. }
  1095. static ssize_t data_write(struct file *filp, const char __user *buf,
  1096. size_t count, loff_t *pos)
  1097. {
  1098. struct mlx5_core_dev *dev = filp->private_data;
  1099. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1100. void *ptr;
  1101. if (*pos != 0)
  1102. return -EINVAL;
  1103. kfree(dbg->in_msg);
  1104. dbg->in_msg = NULL;
  1105. dbg->inlen = 0;
  1106. ptr = memdup_user(buf, count);
  1107. if (IS_ERR(ptr))
  1108. return PTR_ERR(ptr);
  1109. dbg->in_msg = ptr;
  1110. dbg->inlen = count;
  1111. *pos = count;
  1112. return count;
  1113. }
  1114. static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
  1115. loff_t *pos)
  1116. {
  1117. struct mlx5_core_dev *dev = filp->private_data;
  1118. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1119. if (!dbg->out_msg)
  1120. return -ENOMEM;
  1121. return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
  1122. dbg->outlen);
  1123. }
  1124. static const struct file_operations dfops = {
  1125. .owner = THIS_MODULE,
  1126. .open = simple_open,
  1127. .write = data_write,
  1128. .read = data_read,
  1129. };
  1130. static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
  1131. loff_t *pos)
  1132. {
  1133. struct mlx5_core_dev *dev = filp->private_data;
  1134. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1135. char outlen[8];
  1136. int err;
  1137. err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
  1138. if (err < 0)
  1139. return err;
  1140. return simple_read_from_buffer(buf, count, pos, outlen, err);
  1141. }
  1142. static ssize_t outlen_write(struct file *filp, const char __user *buf,
  1143. size_t count, loff_t *pos)
  1144. {
  1145. struct mlx5_core_dev *dev = filp->private_data;
  1146. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1147. char outlen_str[8] = {0};
  1148. int outlen;
  1149. void *ptr;
  1150. int err;
  1151. if (*pos != 0 || count > 6)
  1152. return -EINVAL;
  1153. kfree(dbg->out_msg);
  1154. dbg->out_msg = NULL;
  1155. dbg->outlen = 0;
  1156. if (copy_from_user(outlen_str, buf, count))
  1157. return -EFAULT;
  1158. err = sscanf(outlen_str, "%d", &outlen);
  1159. if (err < 0)
  1160. return err;
  1161. ptr = kzalloc(outlen, GFP_KERNEL);
  1162. if (!ptr)
  1163. return -ENOMEM;
  1164. dbg->out_msg = ptr;
  1165. dbg->outlen = outlen;
  1166. *pos = count;
  1167. return count;
  1168. }
  1169. static const struct file_operations olfops = {
  1170. .owner = THIS_MODULE,
  1171. .open = simple_open,
  1172. .write = outlen_write,
  1173. .read = outlen_read,
  1174. };
  1175. static void set_wqname(struct mlx5_core_dev *dev)
  1176. {
  1177. struct mlx5_cmd *cmd = &dev->cmd;
  1178. snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
  1179. dev_name(&dev->pdev->dev));
  1180. }
  1181. static void clean_debug_files(struct mlx5_core_dev *dev)
  1182. {
  1183. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1184. if (!mlx5_debugfs_root)
  1185. return;
  1186. mlx5_cmdif_debugfs_cleanup(dev);
  1187. debugfs_remove_recursive(dbg->dbg_root);
  1188. }
  1189. static int create_debugfs_files(struct mlx5_core_dev *dev)
  1190. {
  1191. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1192. int err = -ENOMEM;
  1193. if (!mlx5_debugfs_root)
  1194. return 0;
  1195. dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
  1196. if (!dbg->dbg_root)
  1197. return err;
  1198. dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
  1199. dev, &dfops);
  1200. if (!dbg->dbg_in)
  1201. goto err_dbg;
  1202. dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
  1203. dev, &dfops);
  1204. if (!dbg->dbg_out)
  1205. goto err_dbg;
  1206. dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
  1207. dev, &olfops);
  1208. if (!dbg->dbg_outlen)
  1209. goto err_dbg;
  1210. dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
  1211. &dbg->status);
  1212. if (!dbg->dbg_status)
  1213. goto err_dbg;
  1214. dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
  1215. if (!dbg->dbg_run)
  1216. goto err_dbg;
  1217. mlx5_cmdif_debugfs_init(dev);
  1218. return 0;
  1219. err_dbg:
  1220. clean_debug_files(dev);
  1221. return err;
  1222. }
  1223. static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
  1224. {
  1225. struct mlx5_cmd *cmd = &dev->cmd;
  1226. int i;
  1227. for (i = 0; i < cmd->max_reg_cmds; i++)
  1228. down(&cmd->sem);
  1229. down(&cmd->pages_sem);
  1230. cmd->mode = mode;
  1231. up(&cmd->pages_sem);
  1232. for (i = 0; i < cmd->max_reg_cmds; i++)
  1233. up(&cmd->sem);
  1234. }
  1235. void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
  1236. {
  1237. mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
  1238. }
  1239. void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
  1240. {
  1241. mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
  1242. }
  1243. static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
  1244. {
  1245. unsigned long flags;
  1246. if (msg->parent) {
  1247. spin_lock_irqsave(&msg->parent->lock, flags);
  1248. list_add_tail(&msg->list, &msg->parent->head);
  1249. spin_unlock_irqrestore(&msg->parent->lock, flags);
  1250. } else {
  1251. mlx5_free_cmd_msg(dev, msg);
  1252. }
  1253. }
  1254. void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
  1255. {
  1256. struct mlx5_cmd *cmd = &dev->cmd;
  1257. struct mlx5_cmd_work_ent *ent;
  1258. mlx5_cmd_cbk_t callback;
  1259. void *context;
  1260. int err;
  1261. int i;
  1262. s64 ds;
  1263. struct mlx5_cmd_stats *stats;
  1264. unsigned long flags;
  1265. unsigned long vector;
  1266. /* there can be at most 32 command queues */
  1267. vector = vec & 0xffffffff;
  1268. for (i = 0; i < (1 << cmd->log_sz); i++) {
  1269. if (test_bit(i, &vector)) {
  1270. struct semaphore *sem;
  1271. ent = cmd->ent_arr[i];
  1272. /* if we already completed the command, ignore it */
  1273. if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
  1274. &ent->state)) {
  1275. /* only real completion can free the cmd slot */
  1276. if (!forced) {
  1277. mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
  1278. ent->idx);
  1279. free_ent(cmd, ent->idx);
  1280. free_cmd(ent);
  1281. }
  1282. continue;
  1283. }
  1284. if (ent->callback)
  1285. cancel_delayed_work(&ent->cb_timeout_work);
  1286. if (ent->page_queue)
  1287. sem = &cmd->pages_sem;
  1288. else
  1289. sem = &cmd->sem;
  1290. ent->ts2 = ktime_get_ns();
  1291. memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
  1292. dump_command(dev, ent, 0);
  1293. if (!ent->ret) {
  1294. if (!cmd->checksum_disabled)
  1295. ent->ret = verify_signature(ent);
  1296. else
  1297. ent->ret = 0;
  1298. if (vec & MLX5_TRIGGERED_CMD_COMP)
  1299. ent->status = MLX5_DRIVER_STATUS_ABORTED;
  1300. else
  1301. ent->status = ent->lay->status_own >> 1;
  1302. mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
  1303. ent->ret, deliv_status_to_str(ent->status), ent->status);
  1304. }
  1305. /* only real completion will free the entry slot */
  1306. if (!forced)
  1307. free_ent(cmd, ent->idx);
  1308. if (ent->callback) {
  1309. ds = ent->ts2 - ent->ts1;
  1310. if (ent->op < ARRAY_SIZE(cmd->stats)) {
  1311. stats = &cmd->stats[ent->op];
  1312. spin_lock_irqsave(&stats->lock, flags);
  1313. stats->sum += ds;
  1314. ++stats->n;
  1315. spin_unlock_irqrestore(&stats->lock, flags);
  1316. }
  1317. callback = ent->callback;
  1318. context = ent->context;
  1319. err = ent->ret;
  1320. if (!err) {
  1321. err = mlx5_copy_from_msg(ent->uout,
  1322. ent->out,
  1323. ent->uout_size);
  1324. err = err ? err : mlx5_cmd_check(dev,
  1325. ent->in->first.data,
  1326. ent->uout);
  1327. }
  1328. mlx5_free_cmd_msg(dev, ent->out);
  1329. free_msg(dev, ent->in);
  1330. err = err ? err : ent->status;
  1331. if (!forced)
  1332. free_cmd(ent);
  1333. callback(err, context);
  1334. } else {
  1335. complete(&ent->done);
  1336. }
  1337. up(sem);
  1338. }
  1339. }
  1340. }
  1341. EXPORT_SYMBOL(mlx5_cmd_comp_handler);
  1342. static int status_to_err(u8 status)
  1343. {
  1344. return status ? -1 : 0; /* TBD more meaningful codes */
  1345. }
  1346. static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
  1347. gfp_t gfp)
  1348. {
  1349. struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
  1350. struct cmd_msg_cache *ch = NULL;
  1351. struct mlx5_cmd *cmd = &dev->cmd;
  1352. int i;
  1353. if (in_size <= 16)
  1354. goto cache_miss;
  1355. for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
  1356. ch = &cmd->cache[i];
  1357. if (in_size > ch->max_inbox_size)
  1358. continue;
  1359. spin_lock_irq(&ch->lock);
  1360. if (list_empty(&ch->head)) {
  1361. spin_unlock_irq(&ch->lock);
  1362. continue;
  1363. }
  1364. msg = list_entry(ch->head.next, typeof(*msg), list);
  1365. /* For cached lists, we must explicitly state what is
  1366. * the real size
  1367. */
  1368. msg->len = in_size;
  1369. list_del(&msg->list);
  1370. spin_unlock_irq(&ch->lock);
  1371. break;
  1372. }
  1373. if (!IS_ERR(msg))
  1374. return msg;
  1375. cache_miss:
  1376. msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
  1377. return msg;
  1378. }
  1379. static int is_manage_pages(void *in)
  1380. {
  1381. return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
  1382. }
  1383. static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  1384. int out_size, mlx5_cmd_cbk_t callback, void *context,
  1385. bool force_polling)
  1386. {
  1387. struct mlx5_cmd_msg *inb;
  1388. struct mlx5_cmd_msg *outb;
  1389. int pages_queue;
  1390. gfp_t gfp;
  1391. int err;
  1392. u8 status = 0;
  1393. u32 drv_synd;
  1394. u8 token;
  1395. if (pci_channel_offline(dev->pdev) ||
  1396. dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  1397. u16 opcode = MLX5_GET(mbox_in, in, opcode);
  1398. err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
  1399. MLX5_SET(mbox_out, out, status, status);
  1400. MLX5_SET(mbox_out, out, syndrome, drv_synd);
  1401. return err;
  1402. }
  1403. pages_queue = is_manage_pages(in);
  1404. gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
  1405. inb = alloc_msg(dev, in_size, gfp);
  1406. if (IS_ERR(inb)) {
  1407. err = PTR_ERR(inb);
  1408. return err;
  1409. }
  1410. token = alloc_token(&dev->cmd);
  1411. err = mlx5_copy_to_msg(inb, in, in_size, token);
  1412. if (err) {
  1413. mlx5_core_warn(dev, "err %d\n", err);
  1414. goto out_in;
  1415. }
  1416. outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
  1417. if (IS_ERR(outb)) {
  1418. err = PTR_ERR(outb);
  1419. goto out_in;
  1420. }
  1421. err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
  1422. pages_queue, &status, token, force_polling);
  1423. if (err)
  1424. goto out_out;
  1425. mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
  1426. if (status) {
  1427. err = status_to_err(status);
  1428. goto out_out;
  1429. }
  1430. if (!callback)
  1431. err = mlx5_copy_from_msg(out, outb, out_size);
  1432. out_out:
  1433. if (!callback)
  1434. mlx5_free_cmd_msg(dev, outb);
  1435. out_in:
  1436. if (!callback)
  1437. free_msg(dev, inb);
  1438. return err;
  1439. }
  1440. int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  1441. int out_size)
  1442. {
  1443. int err;
  1444. err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
  1445. return err ? : mlx5_cmd_check(dev, in, out);
  1446. }
  1447. EXPORT_SYMBOL(mlx5_cmd_exec);
  1448. int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
  1449. void *out, int out_size, mlx5_cmd_cbk_t callback,
  1450. void *context)
  1451. {
  1452. return cmd_exec(dev, in, in_size, out, out_size, callback, context,
  1453. false);
  1454. }
  1455. EXPORT_SYMBOL(mlx5_cmd_exec_cb);
  1456. int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
  1457. void *out, int out_size)
  1458. {
  1459. int err;
  1460. err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
  1461. return err ? : mlx5_cmd_check(dev, in, out);
  1462. }
  1463. EXPORT_SYMBOL(mlx5_cmd_exec_polling);
  1464. static void destroy_msg_cache(struct mlx5_core_dev *dev)
  1465. {
  1466. struct cmd_msg_cache *ch;
  1467. struct mlx5_cmd_msg *msg;
  1468. struct mlx5_cmd_msg *n;
  1469. int i;
  1470. for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
  1471. ch = &dev->cmd.cache[i];
  1472. list_for_each_entry_safe(msg, n, &ch->head, list) {
  1473. list_del(&msg->list);
  1474. mlx5_free_cmd_msg(dev, msg);
  1475. }
  1476. }
  1477. }
  1478. static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
  1479. 512, 32, 16, 8, 2
  1480. };
  1481. static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
  1482. 16 + MLX5_CMD_DATA_BLOCK_SIZE,
  1483. 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
  1484. 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
  1485. 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
  1486. 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
  1487. };
  1488. static void create_msg_cache(struct mlx5_core_dev *dev)
  1489. {
  1490. struct mlx5_cmd *cmd = &dev->cmd;
  1491. struct cmd_msg_cache *ch;
  1492. struct mlx5_cmd_msg *msg;
  1493. int i;
  1494. int k;
  1495. /* Initialize and fill the caches with initial entries */
  1496. for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
  1497. ch = &cmd->cache[k];
  1498. spin_lock_init(&ch->lock);
  1499. INIT_LIST_HEAD(&ch->head);
  1500. ch->num_ent = cmd_cache_num_ent[k];
  1501. ch->max_inbox_size = cmd_cache_ent_size[k];
  1502. for (i = 0; i < ch->num_ent; i++) {
  1503. msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
  1504. ch->max_inbox_size, 0);
  1505. if (IS_ERR(msg))
  1506. break;
  1507. msg->parent = ch;
  1508. list_add_tail(&msg->list, &ch->head);
  1509. }
  1510. }
  1511. }
  1512. static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
  1513. {
  1514. struct device *ddev = &dev->pdev->dev;
  1515. cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
  1516. &cmd->alloc_dma, GFP_KERNEL);
  1517. if (!cmd->cmd_alloc_buf)
  1518. return -ENOMEM;
  1519. /* make sure it is aligned to 4K */
  1520. if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
  1521. cmd->cmd_buf = cmd->cmd_alloc_buf;
  1522. cmd->dma = cmd->alloc_dma;
  1523. cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
  1524. return 0;
  1525. }
  1526. dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
  1527. cmd->alloc_dma);
  1528. cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
  1529. 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
  1530. &cmd->alloc_dma, GFP_KERNEL);
  1531. if (!cmd->cmd_alloc_buf)
  1532. return -ENOMEM;
  1533. cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
  1534. cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
  1535. cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
  1536. return 0;
  1537. }
  1538. static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
  1539. {
  1540. struct device *ddev = &dev->pdev->dev;
  1541. dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
  1542. cmd->alloc_dma);
  1543. }
  1544. int mlx5_cmd_init(struct mlx5_core_dev *dev)
  1545. {
  1546. int size = sizeof(struct mlx5_cmd_prot_block);
  1547. int align = roundup_pow_of_two(size);
  1548. struct mlx5_cmd *cmd = &dev->cmd;
  1549. u32 cmd_h, cmd_l;
  1550. u16 cmd_if_rev;
  1551. int err;
  1552. int i;
  1553. memset(cmd, 0, sizeof(*cmd));
  1554. cmd_if_rev = cmdif_rev(dev);
  1555. if (cmd_if_rev != CMD_IF_REV) {
  1556. dev_err(&dev->pdev->dev,
  1557. "Driver cmdif rev(%d) differs from firmware's(%d)\n",
  1558. CMD_IF_REV, cmd_if_rev);
  1559. return -EINVAL;
  1560. }
  1561. cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
  1562. 0);
  1563. if (!cmd->pool)
  1564. return -ENOMEM;
  1565. err = alloc_cmd_page(dev, cmd);
  1566. if (err)
  1567. goto err_free_pool;
  1568. cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
  1569. cmd->log_sz = cmd_l >> 4 & 0xf;
  1570. cmd->log_stride = cmd_l & 0xf;
  1571. if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
  1572. dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
  1573. 1 << cmd->log_sz);
  1574. err = -EINVAL;
  1575. goto err_free_page;
  1576. }
  1577. if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
  1578. dev_err(&dev->pdev->dev, "command queue size overflow\n");
  1579. err = -EINVAL;
  1580. goto err_free_page;
  1581. }
  1582. cmd->checksum_disabled = 1;
  1583. cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
  1584. cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
  1585. cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
  1586. if (cmd->cmdif_rev > CMD_IF_REV) {
  1587. dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
  1588. CMD_IF_REV, cmd->cmdif_rev);
  1589. err = -EOPNOTSUPP;
  1590. goto err_free_page;
  1591. }
  1592. spin_lock_init(&cmd->alloc_lock);
  1593. spin_lock_init(&cmd->token_lock);
  1594. for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
  1595. spin_lock_init(&cmd->stats[i].lock);
  1596. sema_init(&cmd->sem, cmd->max_reg_cmds);
  1597. sema_init(&cmd->pages_sem, 1);
  1598. cmd_h = (u32)((u64)(cmd->dma) >> 32);
  1599. cmd_l = (u32)(cmd->dma);
  1600. if (cmd_l & 0xfff) {
  1601. dev_err(&dev->pdev->dev, "invalid command queue address\n");
  1602. err = -ENOMEM;
  1603. goto err_free_page;
  1604. }
  1605. iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
  1606. iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
  1607. /* Make sure firmware sees the complete address before we proceed */
  1608. wmb();
  1609. mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
  1610. cmd->mode = CMD_MODE_POLLING;
  1611. create_msg_cache(dev);
  1612. set_wqname(dev);
  1613. cmd->wq = create_singlethread_workqueue(cmd->wq_name);
  1614. if (!cmd->wq) {
  1615. dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
  1616. err = -ENOMEM;
  1617. goto err_cache;
  1618. }
  1619. err = create_debugfs_files(dev);
  1620. if (err) {
  1621. err = -ENOMEM;
  1622. goto err_wq;
  1623. }
  1624. return 0;
  1625. err_wq:
  1626. destroy_workqueue(cmd->wq);
  1627. err_cache:
  1628. destroy_msg_cache(dev);
  1629. err_free_page:
  1630. free_cmd_page(dev, cmd);
  1631. err_free_pool:
  1632. dma_pool_destroy(cmd->pool);
  1633. return err;
  1634. }
  1635. EXPORT_SYMBOL(mlx5_cmd_init);
  1636. void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
  1637. {
  1638. struct mlx5_cmd *cmd = &dev->cmd;
  1639. clean_debug_files(dev);
  1640. destroy_workqueue(cmd->wq);
  1641. destroy_msg_cache(dev);
  1642. free_cmd_page(dev, cmd);
  1643. dma_pool_destroy(cmd->pool);
  1644. }
  1645. EXPORT_SYMBOL(mlx5_cmd_cleanup);