cmd.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853
  1. /*
  2. * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/highmem.h>
  33. #include <linux/module.h>
  34. #include <linux/errno.h>
  35. #include <linux/pci.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/slab.h>
  38. #include <linux/delay.h>
  39. #include <linux/random.h>
  40. #include <linux/io-mapping.h>
  41. #include <linux/mlx5/driver.h>
  42. #include <linux/debugfs.h>
  43. #include "mlx5_core.h"
  44. enum {
  45. CMD_IF_REV = 5,
  46. };
  47. enum {
  48. CMD_MODE_POLLING,
  49. CMD_MODE_EVENTS
  50. };
  51. enum {
  52. NUM_LONG_LISTS = 2,
  53. NUM_MED_LISTS = 64,
  54. LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
  55. MLX5_CMD_DATA_BLOCK_SIZE,
  56. MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
  57. };
  58. enum {
  59. MLX5_CMD_DELIVERY_STAT_OK = 0x0,
  60. MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
  61. MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
  62. MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
  63. MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
  64. MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
  65. MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
  66. MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
  67. MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
  68. MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
  69. MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
  70. };
  71. static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  72. struct mlx5_cmd_msg *in,
  73. struct mlx5_cmd_msg *out,
  74. void *uout, int uout_size,
  75. mlx5_cmd_cbk_t cbk,
  76. void *context, int page_queue)
  77. {
  78. gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  79. struct mlx5_cmd_work_ent *ent;
  80. ent = kzalloc(sizeof(*ent), alloc_flags);
  81. if (!ent)
  82. return ERR_PTR(-ENOMEM);
  83. ent->in = in;
  84. ent->out = out;
  85. ent->uout = uout;
  86. ent->uout_size = uout_size;
  87. ent->callback = cbk;
  88. ent->context = context;
  89. ent->cmd = cmd;
  90. ent->page_queue = page_queue;
  91. return ent;
  92. }
  93. static u8 alloc_token(struct mlx5_cmd *cmd)
  94. {
  95. u8 token;
  96. spin_lock(&cmd->token_lock);
  97. cmd->token++;
  98. if (cmd->token == 0)
  99. cmd->token++;
  100. token = cmd->token;
  101. spin_unlock(&cmd->token_lock);
  102. return token;
  103. }
  104. static int alloc_ent(struct mlx5_cmd *cmd)
  105. {
  106. unsigned long flags;
  107. int ret;
  108. spin_lock_irqsave(&cmd->alloc_lock, flags);
  109. ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
  110. if (ret < cmd->max_reg_cmds)
  111. clear_bit(ret, &cmd->bitmask);
  112. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  113. return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
  114. }
  115. static void free_ent(struct mlx5_cmd *cmd, int idx)
  116. {
  117. unsigned long flags;
  118. spin_lock_irqsave(&cmd->alloc_lock, flags);
  119. set_bit(idx, &cmd->bitmask);
  120. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  121. }
  122. static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
  123. {
  124. return cmd->cmd_buf + (idx << cmd->log_stride);
  125. }
  126. static u8 xor8_buf(void *buf, size_t offset, int len)
  127. {
  128. u8 *ptr = buf;
  129. u8 sum = 0;
  130. int i;
  131. int end = len + offset;
  132. for (i = offset; i < end; i++)
  133. sum ^= ptr[i];
  134. return sum;
  135. }
  136. static int verify_block_sig(struct mlx5_cmd_prot_block *block)
  137. {
  138. size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
  139. int xor_len = sizeof(*block) - sizeof(block->data) - 1;
  140. if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
  141. return -EINVAL;
  142. if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
  143. return -EINVAL;
  144. return 0;
  145. }
  146. static void calc_block_sig(struct mlx5_cmd_prot_block *block)
  147. {
  148. int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
  149. size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
  150. block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
  151. block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
  152. }
  153. static void calc_chain_sig(struct mlx5_cmd_msg *msg)
  154. {
  155. struct mlx5_cmd_mailbox *next = msg->next;
  156. int size = msg->len;
  157. int blen = size - min_t(int, sizeof(msg->first.data), size);
  158. int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
  159. / MLX5_CMD_DATA_BLOCK_SIZE;
  160. int i = 0;
  161. for (i = 0; i < n && next; i++) {
  162. calc_block_sig(next->buf);
  163. next = next->next;
  164. }
  165. }
  166. static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
  167. {
  168. ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
  169. if (csum) {
  170. calc_chain_sig(ent->in);
  171. calc_chain_sig(ent->out);
  172. }
  173. }
  174. static void poll_timeout(struct mlx5_cmd_work_ent *ent)
  175. {
  176. unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
  177. u8 own;
  178. do {
  179. own = ent->lay->status_own;
  180. if (!(own & CMD_OWNER_HW)) {
  181. ent->ret = 0;
  182. return;
  183. }
  184. usleep_range(5000, 10000);
  185. } while (time_before(jiffies, poll_end));
  186. ent->ret = -ETIMEDOUT;
  187. }
  188. static void free_cmd(struct mlx5_cmd_work_ent *ent)
  189. {
  190. kfree(ent);
  191. }
  192. static int verify_signature(struct mlx5_cmd_work_ent *ent)
  193. {
  194. struct mlx5_cmd_mailbox *next = ent->out->next;
  195. int err;
  196. u8 sig;
  197. int size = ent->out->len;
  198. int blen = size - min_t(int, sizeof(ent->out->first.data), size);
  199. int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
  200. / MLX5_CMD_DATA_BLOCK_SIZE;
  201. int i = 0;
  202. sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
  203. if (sig != 0xff)
  204. return -EINVAL;
  205. for (i = 0; i < n && next; i++) {
  206. err = verify_block_sig(next->buf);
  207. if (err)
  208. return err;
  209. next = next->next;
  210. }
  211. return 0;
  212. }
  213. static void dump_buf(void *buf, int size, int data_only, int offset)
  214. {
  215. __be32 *p = buf;
  216. int i;
  217. for (i = 0; i < size; i += 16) {
  218. pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
  219. be32_to_cpu(p[1]), be32_to_cpu(p[2]),
  220. be32_to_cpu(p[3]));
  221. p += 4;
  222. offset += 16;
  223. }
  224. if (!data_only)
  225. pr_debug("\n");
  226. }
  227. static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
  228. u32 *synd, u8 *status)
  229. {
  230. *synd = 0;
  231. *status = 0;
  232. switch (op) {
  233. case MLX5_CMD_OP_TEARDOWN_HCA:
  234. case MLX5_CMD_OP_DISABLE_HCA:
  235. case MLX5_CMD_OP_MANAGE_PAGES:
  236. case MLX5_CMD_OP_DESTROY_MKEY:
  237. case MLX5_CMD_OP_DESTROY_EQ:
  238. case MLX5_CMD_OP_DESTROY_CQ:
  239. case MLX5_CMD_OP_DESTROY_QP:
  240. case MLX5_CMD_OP_DESTROY_PSV:
  241. case MLX5_CMD_OP_DESTROY_SRQ:
  242. case MLX5_CMD_OP_DESTROY_XRC_SRQ:
  243. case MLX5_CMD_OP_DESTROY_DCT:
  244. case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
  245. case MLX5_CMD_OP_DEALLOC_PD:
  246. case MLX5_CMD_OP_DEALLOC_UAR:
  247. case MLX5_CMD_OP_DETACH_FROM_MCG:
  248. case MLX5_CMD_OP_DEALLOC_XRCD:
  249. case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
  250. case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
  251. case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
  252. case MLX5_CMD_OP_DESTROY_LAG:
  253. case MLX5_CMD_OP_DESTROY_VPORT_LAG:
  254. case MLX5_CMD_OP_DESTROY_TIR:
  255. case MLX5_CMD_OP_DESTROY_SQ:
  256. case MLX5_CMD_OP_DESTROY_RQ:
  257. case MLX5_CMD_OP_DESTROY_RMP:
  258. case MLX5_CMD_OP_DESTROY_TIS:
  259. case MLX5_CMD_OP_DESTROY_RQT:
  260. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  261. case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
  262. case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
  263. case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
  264. case MLX5_CMD_OP_2ERR_QP:
  265. case MLX5_CMD_OP_2RST_QP:
  266. case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
  267. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  268. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  269. case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
  270. case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
  271. return MLX5_CMD_STAT_OK;
  272. case MLX5_CMD_OP_QUERY_HCA_CAP:
  273. case MLX5_CMD_OP_QUERY_ADAPTER:
  274. case MLX5_CMD_OP_INIT_HCA:
  275. case MLX5_CMD_OP_ENABLE_HCA:
  276. case MLX5_CMD_OP_QUERY_PAGES:
  277. case MLX5_CMD_OP_SET_HCA_CAP:
  278. case MLX5_CMD_OP_QUERY_ISSI:
  279. case MLX5_CMD_OP_SET_ISSI:
  280. case MLX5_CMD_OP_CREATE_MKEY:
  281. case MLX5_CMD_OP_QUERY_MKEY:
  282. case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
  283. case MLX5_CMD_OP_PAGE_FAULT_RESUME:
  284. case MLX5_CMD_OP_CREATE_EQ:
  285. case MLX5_CMD_OP_QUERY_EQ:
  286. case MLX5_CMD_OP_GEN_EQE:
  287. case MLX5_CMD_OP_CREATE_CQ:
  288. case MLX5_CMD_OP_QUERY_CQ:
  289. case MLX5_CMD_OP_MODIFY_CQ:
  290. case MLX5_CMD_OP_CREATE_QP:
  291. case MLX5_CMD_OP_RST2INIT_QP:
  292. case MLX5_CMD_OP_INIT2RTR_QP:
  293. case MLX5_CMD_OP_RTR2RTS_QP:
  294. case MLX5_CMD_OP_RTS2RTS_QP:
  295. case MLX5_CMD_OP_SQERR2RTS_QP:
  296. case MLX5_CMD_OP_QUERY_QP:
  297. case MLX5_CMD_OP_SQD_RTS_QP:
  298. case MLX5_CMD_OP_INIT2INIT_QP:
  299. case MLX5_CMD_OP_CREATE_PSV:
  300. case MLX5_CMD_OP_CREATE_SRQ:
  301. case MLX5_CMD_OP_QUERY_SRQ:
  302. case MLX5_CMD_OP_ARM_RQ:
  303. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  304. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  305. case MLX5_CMD_OP_ARM_XRC_SRQ:
  306. case MLX5_CMD_OP_CREATE_DCT:
  307. case MLX5_CMD_OP_DRAIN_DCT:
  308. case MLX5_CMD_OP_QUERY_DCT:
  309. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  310. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  311. case MLX5_CMD_OP_MODIFY_VPORT_STATE:
  312. case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
  313. case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
  314. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  315. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  316. case MLX5_CMD_OP_SET_ROCE_ADDRESS:
  317. case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
  318. case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
  319. case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
  320. case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
  321. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  322. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  323. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  324. case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
  325. case MLX5_CMD_OP_QUERY_RATE_LIMIT:
  326. case MLX5_CMD_OP_ALLOC_PD:
  327. case MLX5_CMD_OP_ALLOC_UAR:
  328. case MLX5_CMD_OP_CONFIG_INT_MODERATION:
  329. case MLX5_CMD_OP_ACCESS_REG:
  330. case MLX5_CMD_OP_ATTACH_TO_MCG:
  331. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  332. case MLX5_CMD_OP_MAD_IFC:
  333. case MLX5_CMD_OP_QUERY_MAD_DEMUX:
  334. case MLX5_CMD_OP_SET_MAD_DEMUX:
  335. case MLX5_CMD_OP_NOP:
  336. case MLX5_CMD_OP_ALLOC_XRCD:
  337. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  338. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  339. case MLX5_CMD_OP_MODIFY_CONG_STATUS:
  340. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  341. case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
  342. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  343. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  344. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  345. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  346. case MLX5_CMD_OP_CREATE_LAG:
  347. case MLX5_CMD_OP_MODIFY_LAG:
  348. case MLX5_CMD_OP_QUERY_LAG:
  349. case MLX5_CMD_OP_CREATE_VPORT_LAG:
  350. case MLX5_CMD_OP_CREATE_TIR:
  351. case MLX5_CMD_OP_MODIFY_TIR:
  352. case MLX5_CMD_OP_QUERY_TIR:
  353. case MLX5_CMD_OP_CREATE_SQ:
  354. case MLX5_CMD_OP_MODIFY_SQ:
  355. case MLX5_CMD_OP_QUERY_SQ:
  356. case MLX5_CMD_OP_CREATE_RQ:
  357. case MLX5_CMD_OP_MODIFY_RQ:
  358. case MLX5_CMD_OP_QUERY_RQ:
  359. case MLX5_CMD_OP_CREATE_RMP:
  360. case MLX5_CMD_OP_MODIFY_RMP:
  361. case MLX5_CMD_OP_QUERY_RMP:
  362. case MLX5_CMD_OP_CREATE_TIS:
  363. case MLX5_CMD_OP_MODIFY_TIS:
  364. case MLX5_CMD_OP_QUERY_TIS:
  365. case MLX5_CMD_OP_CREATE_RQT:
  366. case MLX5_CMD_OP_MODIFY_RQT:
  367. case MLX5_CMD_OP_QUERY_RQT:
  368. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  369. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  370. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  371. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  372. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  373. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  374. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  375. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  376. *status = MLX5_DRIVER_STATUS_ABORTED;
  377. *synd = MLX5_DRIVER_SYND;
  378. return -EIO;
  379. default:
  380. mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
  381. return -EINVAL;
  382. }
  383. }
  384. const char *mlx5_command_str(int command)
  385. {
  386. #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
  387. switch (command) {
  388. MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
  389. MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
  390. MLX5_COMMAND_STR_CASE(INIT_HCA);
  391. MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
  392. MLX5_COMMAND_STR_CASE(ENABLE_HCA);
  393. MLX5_COMMAND_STR_CASE(DISABLE_HCA);
  394. MLX5_COMMAND_STR_CASE(QUERY_PAGES);
  395. MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
  396. MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
  397. MLX5_COMMAND_STR_CASE(QUERY_ISSI);
  398. MLX5_COMMAND_STR_CASE(SET_ISSI);
  399. MLX5_COMMAND_STR_CASE(CREATE_MKEY);
  400. MLX5_COMMAND_STR_CASE(QUERY_MKEY);
  401. MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
  402. MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
  403. MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
  404. MLX5_COMMAND_STR_CASE(CREATE_EQ);
  405. MLX5_COMMAND_STR_CASE(DESTROY_EQ);
  406. MLX5_COMMAND_STR_CASE(QUERY_EQ);
  407. MLX5_COMMAND_STR_CASE(GEN_EQE);
  408. MLX5_COMMAND_STR_CASE(CREATE_CQ);
  409. MLX5_COMMAND_STR_CASE(DESTROY_CQ);
  410. MLX5_COMMAND_STR_CASE(QUERY_CQ);
  411. MLX5_COMMAND_STR_CASE(MODIFY_CQ);
  412. MLX5_COMMAND_STR_CASE(CREATE_QP);
  413. MLX5_COMMAND_STR_CASE(DESTROY_QP);
  414. MLX5_COMMAND_STR_CASE(RST2INIT_QP);
  415. MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
  416. MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
  417. MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
  418. MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
  419. MLX5_COMMAND_STR_CASE(2ERR_QP);
  420. MLX5_COMMAND_STR_CASE(2RST_QP);
  421. MLX5_COMMAND_STR_CASE(QUERY_QP);
  422. MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
  423. MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
  424. MLX5_COMMAND_STR_CASE(CREATE_PSV);
  425. MLX5_COMMAND_STR_CASE(DESTROY_PSV);
  426. MLX5_COMMAND_STR_CASE(CREATE_SRQ);
  427. MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
  428. MLX5_COMMAND_STR_CASE(QUERY_SRQ);
  429. MLX5_COMMAND_STR_CASE(ARM_RQ);
  430. MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
  431. MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
  432. MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
  433. MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
  434. MLX5_COMMAND_STR_CASE(CREATE_DCT);
  435. MLX5_COMMAND_STR_CASE(DESTROY_DCT);
  436. MLX5_COMMAND_STR_CASE(DRAIN_DCT);
  437. MLX5_COMMAND_STR_CASE(QUERY_DCT);
  438. MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
  439. MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
  440. MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
  441. MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
  442. MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
  443. MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
  444. MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
  445. MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
  446. MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
  447. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
  448. MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
  449. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
  450. MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
  451. MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
  452. MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
  453. MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
  454. MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
  455. MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
  456. MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
  457. MLX5_COMMAND_STR_CASE(ALLOC_PD);
  458. MLX5_COMMAND_STR_CASE(DEALLOC_PD);
  459. MLX5_COMMAND_STR_CASE(ALLOC_UAR);
  460. MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
  461. MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
  462. MLX5_COMMAND_STR_CASE(ACCESS_REG);
  463. MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
  464. MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
  465. MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
  466. MLX5_COMMAND_STR_CASE(MAD_IFC);
  467. MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
  468. MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
  469. MLX5_COMMAND_STR_CASE(NOP);
  470. MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
  471. MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
  472. MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
  473. MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
  474. MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
  475. MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
  476. MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
  477. MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
  478. MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
  479. MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
  480. MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
  481. MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
  482. MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
  483. MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
  484. MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
  485. MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
  486. MLX5_COMMAND_STR_CASE(CREATE_LAG);
  487. MLX5_COMMAND_STR_CASE(MODIFY_LAG);
  488. MLX5_COMMAND_STR_CASE(QUERY_LAG);
  489. MLX5_COMMAND_STR_CASE(DESTROY_LAG);
  490. MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
  491. MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
  492. MLX5_COMMAND_STR_CASE(CREATE_TIR);
  493. MLX5_COMMAND_STR_CASE(MODIFY_TIR);
  494. MLX5_COMMAND_STR_CASE(DESTROY_TIR);
  495. MLX5_COMMAND_STR_CASE(QUERY_TIR);
  496. MLX5_COMMAND_STR_CASE(CREATE_SQ);
  497. MLX5_COMMAND_STR_CASE(MODIFY_SQ);
  498. MLX5_COMMAND_STR_CASE(DESTROY_SQ);
  499. MLX5_COMMAND_STR_CASE(QUERY_SQ);
  500. MLX5_COMMAND_STR_CASE(CREATE_RQ);
  501. MLX5_COMMAND_STR_CASE(MODIFY_RQ);
  502. MLX5_COMMAND_STR_CASE(DESTROY_RQ);
  503. MLX5_COMMAND_STR_CASE(QUERY_RQ);
  504. MLX5_COMMAND_STR_CASE(CREATE_RMP);
  505. MLX5_COMMAND_STR_CASE(MODIFY_RMP);
  506. MLX5_COMMAND_STR_CASE(DESTROY_RMP);
  507. MLX5_COMMAND_STR_CASE(QUERY_RMP);
  508. MLX5_COMMAND_STR_CASE(CREATE_TIS);
  509. MLX5_COMMAND_STR_CASE(MODIFY_TIS);
  510. MLX5_COMMAND_STR_CASE(DESTROY_TIS);
  511. MLX5_COMMAND_STR_CASE(QUERY_TIS);
  512. MLX5_COMMAND_STR_CASE(CREATE_RQT);
  513. MLX5_COMMAND_STR_CASE(MODIFY_RQT);
  514. MLX5_COMMAND_STR_CASE(DESTROY_RQT);
  515. MLX5_COMMAND_STR_CASE(QUERY_RQT);
  516. MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
  517. MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
  518. MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
  519. MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
  520. MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
  521. MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
  522. MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
  523. MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
  524. MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
  525. MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
  526. MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
  527. MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
  528. MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
  529. MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
  530. MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
  531. MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
  532. default: return "unknown command opcode";
  533. }
  534. }
  535. static const char *cmd_status_str(u8 status)
  536. {
  537. switch (status) {
  538. case MLX5_CMD_STAT_OK:
  539. return "OK";
  540. case MLX5_CMD_STAT_INT_ERR:
  541. return "internal error";
  542. case MLX5_CMD_STAT_BAD_OP_ERR:
  543. return "bad operation";
  544. case MLX5_CMD_STAT_BAD_PARAM_ERR:
  545. return "bad parameter";
  546. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
  547. return "bad system state";
  548. case MLX5_CMD_STAT_BAD_RES_ERR:
  549. return "bad resource";
  550. case MLX5_CMD_STAT_RES_BUSY:
  551. return "resource busy";
  552. case MLX5_CMD_STAT_LIM_ERR:
  553. return "limits exceeded";
  554. case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
  555. return "bad resource state";
  556. case MLX5_CMD_STAT_IX_ERR:
  557. return "bad index";
  558. case MLX5_CMD_STAT_NO_RES_ERR:
  559. return "no resources";
  560. case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
  561. return "bad input length";
  562. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
  563. return "bad output length";
  564. case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
  565. return "bad QP state";
  566. case MLX5_CMD_STAT_BAD_PKT_ERR:
  567. return "bad packet (discarded)";
  568. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
  569. return "bad size too many outstanding CQEs";
  570. default:
  571. return "unknown status";
  572. }
  573. }
  574. static int cmd_status_to_err(u8 status)
  575. {
  576. switch (status) {
  577. case MLX5_CMD_STAT_OK: return 0;
  578. case MLX5_CMD_STAT_INT_ERR: return -EIO;
  579. case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
  580. case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
  581. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
  582. case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
  583. case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
  584. case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
  585. case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
  586. case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
  587. case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
  588. case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
  589. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
  590. case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
  591. case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
  592. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
  593. default: return -EIO;
  594. }
  595. }
  596. struct mlx5_ifc_mbox_out_bits {
  597. u8 status[0x8];
  598. u8 reserved_at_8[0x18];
  599. u8 syndrome[0x20];
  600. u8 reserved_at_40[0x40];
  601. };
  602. struct mlx5_ifc_mbox_in_bits {
  603. u8 opcode[0x10];
  604. u8 reserved_at_10[0x10];
  605. u8 reserved_at_20[0x10];
  606. u8 op_mod[0x10];
  607. u8 reserved_at_40[0x40];
  608. };
  609. void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
  610. {
  611. *status = MLX5_GET(mbox_out, out, status);
  612. *syndrome = MLX5_GET(mbox_out, out, syndrome);
  613. }
  614. static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
  615. {
  616. u32 syndrome;
  617. u8 status;
  618. u16 opcode;
  619. u16 op_mod;
  620. mlx5_cmd_mbox_status(out, &status, &syndrome);
  621. if (!status)
  622. return 0;
  623. opcode = MLX5_GET(mbox_in, in, opcode);
  624. op_mod = MLX5_GET(mbox_in, in, op_mod);
  625. mlx5_core_err(dev,
  626. "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
  627. mlx5_command_str(opcode),
  628. opcode, op_mod,
  629. cmd_status_str(status),
  630. status,
  631. syndrome);
  632. return cmd_status_to_err(status);
  633. }
  634. static void dump_command(struct mlx5_core_dev *dev,
  635. struct mlx5_cmd_work_ent *ent, int input)
  636. {
  637. struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
  638. u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
  639. struct mlx5_cmd_mailbox *next = msg->next;
  640. int data_only;
  641. u32 offset = 0;
  642. int dump_len;
  643. data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
  644. if (data_only)
  645. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
  646. "dump command data %s(0x%x) %s\n",
  647. mlx5_command_str(op), op,
  648. input ? "INPUT" : "OUTPUT");
  649. else
  650. mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
  651. mlx5_command_str(op), op,
  652. input ? "INPUT" : "OUTPUT");
  653. if (data_only) {
  654. if (input) {
  655. dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
  656. offset += sizeof(ent->lay->in);
  657. } else {
  658. dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
  659. offset += sizeof(ent->lay->out);
  660. }
  661. } else {
  662. dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
  663. offset += sizeof(*ent->lay);
  664. }
  665. while (next && offset < msg->len) {
  666. if (data_only) {
  667. dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
  668. dump_buf(next->buf, dump_len, 1, offset);
  669. offset += MLX5_CMD_DATA_BLOCK_SIZE;
  670. } else {
  671. mlx5_core_dbg(dev, "command block:\n");
  672. dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
  673. offset += sizeof(struct mlx5_cmd_prot_block);
  674. }
  675. next = next->next;
  676. }
  677. if (data_only)
  678. pr_debug("\n");
  679. }
  680. static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
  681. {
  682. return MLX5_GET(mbox_in, in->first.data, opcode);
  683. }
  684. static void cb_timeout_handler(struct work_struct *work)
  685. {
  686. struct delayed_work *dwork = container_of(work, struct delayed_work,
  687. work);
  688. struct mlx5_cmd_work_ent *ent = container_of(dwork,
  689. struct mlx5_cmd_work_ent,
  690. cb_timeout_work);
  691. struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
  692. cmd);
  693. ent->ret = -ETIMEDOUT;
  694. mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
  695. mlx5_command_str(msg_to_opcode(ent->in)),
  696. msg_to_opcode(ent->in));
  697. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  698. }
  699. static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
  700. static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
  701. struct mlx5_cmd_msg *msg);
  702. static void cmd_work_handler(struct work_struct *work)
  703. {
  704. struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
  705. struct mlx5_cmd *cmd = ent->cmd;
  706. struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
  707. unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
  708. struct mlx5_cmd_layout *lay;
  709. struct semaphore *sem;
  710. unsigned long flags;
  711. int alloc_ret;
  712. int cmd_mode;
  713. sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
  714. down(sem);
  715. if (!ent->page_queue) {
  716. alloc_ret = alloc_ent(cmd);
  717. if (alloc_ret < 0) {
  718. if (ent->callback) {
  719. ent->callback(-EAGAIN, ent->context);
  720. mlx5_free_cmd_msg(dev, ent->out);
  721. free_msg(dev, ent->in);
  722. free_cmd(ent);
  723. } else {
  724. ent->ret = -EAGAIN;
  725. complete(&ent->done);
  726. }
  727. mlx5_core_err(dev, "failed to allocate command entry\n");
  728. up(sem);
  729. return;
  730. }
  731. ent->idx = alloc_ret;
  732. } else {
  733. ent->idx = cmd->max_reg_cmds;
  734. spin_lock_irqsave(&cmd->alloc_lock, flags);
  735. clear_bit(ent->idx, &cmd->bitmask);
  736. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  737. }
  738. cmd->ent_arr[ent->idx] = ent;
  739. set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
  740. lay = get_inst(cmd, ent->idx);
  741. ent->lay = lay;
  742. memset(lay, 0, sizeof(*lay));
  743. memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
  744. ent->op = be32_to_cpu(lay->in[0]) >> 16;
  745. if (ent->in->next)
  746. lay->in_ptr = cpu_to_be64(ent->in->next->dma);
  747. lay->inlen = cpu_to_be32(ent->in->len);
  748. if (ent->out->next)
  749. lay->out_ptr = cpu_to_be64(ent->out->next->dma);
  750. lay->outlen = cpu_to_be32(ent->out->len);
  751. lay->type = MLX5_PCI_CMD_XPORT;
  752. lay->token = ent->token;
  753. lay->status_own = CMD_OWNER_HW;
  754. set_signature(ent, !cmd->checksum_disabled);
  755. dump_command(dev, ent, 1);
  756. ent->ts1 = ktime_get_ns();
  757. cmd_mode = cmd->mode;
  758. if (ent->callback)
  759. schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
  760. /* Skip sending command to fw if internal error */
  761. if (pci_channel_offline(dev->pdev) ||
  762. dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  763. u8 status = 0;
  764. u32 drv_synd;
  765. ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
  766. MLX5_SET(mbox_out, ent->out, status, status);
  767. MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
  768. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  769. return;
  770. }
  771. /* ring doorbell after the descriptor is valid */
  772. mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
  773. wmb();
  774. iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
  775. mmiowb();
  776. /* if not in polling don't use ent after this point */
  777. if (cmd_mode == CMD_MODE_POLLING) {
  778. poll_timeout(ent);
  779. /* make sure we read the descriptor after ownership is SW */
  780. rmb();
  781. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
  782. }
  783. }
  784. static const char *deliv_status_to_str(u8 status)
  785. {
  786. switch (status) {
  787. case MLX5_CMD_DELIVERY_STAT_OK:
  788. return "no errors";
  789. case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
  790. return "signature error";
  791. case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
  792. return "token error";
  793. case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
  794. return "bad block number";
  795. case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
  796. return "output pointer not aligned to block size";
  797. case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
  798. return "input pointer not aligned to block size";
  799. case MLX5_CMD_DELIVERY_STAT_FW_ERR:
  800. return "firmware internal error";
  801. case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
  802. return "command input length error";
  803. case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
  804. return "command ouput length error";
  805. case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
  806. return "reserved fields not cleared";
  807. case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
  808. return "bad command descriptor type";
  809. default:
  810. return "unknown status code";
  811. }
  812. }
  813. static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
  814. {
  815. unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
  816. struct mlx5_cmd *cmd = &dev->cmd;
  817. int err;
  818. if (cmd->mode == CMD_MODE_POLLING) {
  819. wait_for_completion(&ent->done);
  820. } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
  821. ent->ret = -ETIMEDOUT;
  822. mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
  823. }
  824. err = ent->ret;
  825. if (err == -ETIMEDOUT) {
  826. mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
  827. mlx5_command_str(msg_to_opcode(ent->in)),
  828. msg_to_opcode(ent->in));
  829. }
  830. mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
  831. err, deliv_status_to_str(ent->status), ent->status);
  832. return err;
  833. }
  834. /* Notes:
  835. * 1. Callback functions may not sleep
  836. * 2. page queue commands do not support asynchrous completion
  837. */
  838. static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
  839. struct mlx5_cmd_msg *out, void *uout, int uout_size,
  840. mlx5_cmd_cbk_t callback,
  841. void *context, int page_queue, u8 *status,
  842. u8 token)
  843. {
  844. struct mlx5_cmd *cmd = &dev->cmd;
  845. struct mlx5_cmd_work_ent *ent;
  846. struct mlx5_cmd_stats *stats;
  847. int err = 0;
  848. s64 ds;
  849. u16 op;
  850. if (callback && page_queue)
  851. return -EINVAL;
  852. ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
  853. page_queue);
  854. if (IS_ERR(ent))
  855. return PTR_ERR(ent);
  856. ent->token = token;
  857. if (!callback)
  858. init_completion(&ent->done);
  859. INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
  860. INIT_WORK(&ent->work, cmd_work_handler);
  861. if (page_queue) {
  862. cmd_work_handler(&ent->work);
  863. } else if (!queue_work(cmd->wq, &ent->work)) {
  864. mlx5_core_warn(dev, "failed to queue work\n");
  865. err = -ENOMEM;
  866. goto out_free;
  867. }
  868. if (callback)
  869. goto out;
  870. err = wait_func(dev, ent);
  871. if (err == -ETIMEDOUT)
  872. goto out_free;
  873. ds = ent->ts2 - ent->ts1;
  874. op = MLX5_GET(mbox_in, in->first.data, opcode);
  875. if (op < ARRAY_SIZE(cmd->stats)) {
  876. stats = &cmd->stats[op];
  877. spin_lock_irq(&stats->lock);
  878. stats->sum += ds;
  879. ++stats->n;
  880. spin_unlock_irq(&stats->lock);
  881. }
  882. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
  883. "fw exec time for %s is %lld nsec\n",
  884. mlx5_command_str(op), ds);
  885. *status = ent->status;
  886. out_free:
  887. free_cmd(ent);
  888. out:
  889. return err;
  890. }
  891. static ssize_t dbg_write(struct file *filp, const char __user *buf,
  892. size_t count, loff_t *pos)
  893. {
  894. struct mlx5_core_dev *dev = filp->private_data;
  895. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  896. char lbuf[3];
  897. int err;
  898. if (!dbg->in_msg || !dbg->out_msg)
  899. return -ENOMEM;
  900. if (copy_from_user(lbuf, buf, sizeof(lbuf)))
  901. return -EFAULT;
  902. lbuf[sizeof(lbuf) - 1] = 0;
  903. if (strcmp(lbuf, "go"))
  904. return -EINVAL;
  905. err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
  906. return err ? err : count;
  907. }
  908. static const struct file_operations fops = {
  909. .owner = THIS_MODULE,
  910. .open = simple_open,
  911. .write = dbg_write,
  912. };
  913. static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
  914. u8 token)
  915. {
  916. struct mlx5_cmd_prot_block *block;
  917. struct mlx5_cmd_mailbox *next;
  918. int copy;
  919. if (!to || !from)
  920. return -ENOMEM;
  921. copy = min_t(int, size, sizeof(to->first.data));
  922. memcpy(to->first.data, from, copy);
  923. size -= copy;
  924. from += copy;
  925. next = to->next;
  926. while (size) {
  927. if (!next) {
  928. /* this is a BUG */
  929. return -ENOMEM;
  930. }
  931. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  932. block = next->buf;
  933. memcpy(block->data, from, copy);
  934. from += copy;
  935. size -= copy;
  936. block->token = token;
  937. next = next->next;
  938. }
  939. return 0;
  940. }
  941. static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
  942. {
  943. struct mlx5_cmd_prot_block *block;
  944. struct mlx5_cmd_mailbox *next;
  945. int copy;
  946. if (!to || !from)
  947. return -ENOMEM;
  948. copy = min_t(int, size, sizeof(from->first.data));
  949. memcpy(to, from->first.data, copy);
  950. size -= copy;
  951. to += copy;
  952. next = from->next;
  953. while (size) {
  954. if (!next) {
  955. /* this is a BUG */
  956. return -ENOMEM;
  957. }
  958. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  959. block = next->buf;
  960. memcpy(to, block->data, copy);
  961. to += copy;
  962. size -= copy;
  963. next = next->next;
  964. }
  965. return 0;
  966. }
  967. static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
  968. gfp_t flags)
  969. {
  970. struct mlx5_cmd_mailbox *mailbox;
  971. mailbox = kmalloc(sizeof(*mailbox), flags);
  972. if (!mailbox)
  973. return ERR_PTR(-ENOMEM);
  974. mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
  975. &mailbox->dma);
  976. if (!mailbox->buf) {
  977. mlx5_core_dbg(dev, "failed allocation\n");
  978. kfree(mailbox);
  979. return ERR_PTR(-ENOMEM);
  980. }
  981. memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
  982. mailbox->next = NULL;
  983. return mailbox;
  984. }
  985. static void free_cmd_box(struct mlx5_core_dev *dev,
  986. struct mlx5_cmd_mailbox *mailbox)
  987. {
  988. pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
  989. kfree(mailbox);
  990. }
  991. static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
  992. gfp_t flags, int size,
  993. u8 token)
  994. {
  995. struct mlx5_cmd_mailbox *tmp, *head = NULL;
  996. struct mlx5_cmd_prot_block *block;
  997. struct mlx5_cmd_msg *msg;
  998. int blen;
  999. int err;
  1000. int n;
  1001. int i;
  1002. msg = kzalloc(sizeof(*msg), flags);
  1003. if (!msg)
  1004. return ERR_PTR(-ENOMEM);
  1005. blen = size - min_t(int, sizeof(msg->first.data), size);
  1006. n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
  1007. for (i = 0; i < n; i++) {
  1008. tmp = alloc_cmd_box(dev, flags);
  1009. if (IS_ERR(tmp)) {
  1010. mlx5_core_warn(dev, "failed allocating block\n");
  1011. err = PTR_ERR(tmp);
  1012. goto err_alloc;
  1013. }
  1014. block = tmp->buf;
  1015. tmp->next = head;
  1016. block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
  1017. block->block_num = cpu_to_be32(n - i - 1);
  1018. block->token = token;
  1019. head = tmp;
  1020. }
  1021. msg->next = head;
  1022. msg->len = size;
  1023. return msg;
  1024. err_alloc:
  1025. while (head) {
  1026. tmp = head->next;
  1027. free_cmd_box(dev, head);
  1028. head = tmp;
  1029. }
  1030. kfree(msg);
  1031. return ERR_PTR(err);
  1032. }
  1033. static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
  1034. struct mlx5_cmd_msg *msg)
  1035. {
  1036. struct mlx5_cmd_mailbox *head = msg->next;
  1037. struct mlx5_cmd_mailbox *next;
  1038. while (head) {
  1039. next = head->next;
  1040. free_cmd_box(dev, head);
  1041. head = next;
  1042. }
  1043. kfree(msg);
  1044. }
  1045. static ssize_t data_write(struct file *filp, const char __user *buf,
  1046. size_t count, loff_t *pos)
  1047. {
  1048. struct mlx5_core_dev *dev = filp->private_data;
  1049. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1050. void *ptr;
  1051. if (*pos != 0)
  1052. return -EINVAL;
  1053. kfree(dbg->in_msg);
  1054. dbg->in_msg = NULL;
  1055. dbg->inlen = 0;
  1056. ptr = memdup_user(buf, count);
  1057. if (IS_ERR(ptr))
  1058. return PTR_ERR(ptr);
  1059. dbg->in_msg = ptr;
  1060. dbg->inlen = count;
  1061. *pos = count;
  1062. return count;
  1063. }
  1064. static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
  1065. loff_t *pos)
  1066. {
  1067. struct mlx5_core_dev *dev = filp->private_data;
  1068. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1069. int copy;
  1070. if (*pos)
  1071. return 0;
  1072. if (!dbg->out_msg)
  1073. return -ENOMEM;
  1074. copy = min_t(int, count, dbg->outlen);
  1075. if (copy_to_user(buf, dbg->out_msg, copy))
  1076. return -EFAULT;
  1077. *pos += copy;
  1078. return copy;
  1079. }
  1080. static const struct file_operations dfops = {
  1081. .owner = THIS_MODULE,
  1082. .open = simple_open,
  1083. .write = data_write,
  1084. .read = data_read,
  1085. };
  1086. static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
  1087. loff_t *pos)
  1088. {
  1089. struct mlx5_core_dev *dev = filp->private_data;
  1090. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1091. char outlen[8];
  1092. int err;
  1093. if (*pos)
  1094. return 0;
  1095. err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
  1096. if (err < 0)
  1097. return err;
  1098. if (copy_to_user(buf, &outlen, err))
  1099. return -EFAULT;
  1100. *pos += err;
  1101. return err;
  1102. }
  1103. static ssize_t outlen_write(struct file *filp, const char __user *buf,
  1104. size_t count, loff_t *pos)
  1105. {
  1106. struct mlx5_core_dev *dev = filp->private_data;
  1107. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1108. char outlen_str[8] = {0};
  1109. int outlen;
  1110. void *ptr;
  1111. int err;
  1112. if (*pos != 0 || count > 6)
  1113. return -EINVAL;
  1114. kfree(dbg->out_msg);
  1115. dbg->out_msg = NULL;
  1116. dbg->outlen = 0;
  1117. if (copy_from_user(outlen_str, buf, count))
  1118. return -EFAULT;
  1119. err = sscanf(outlen_str, "%d", &outlen);
  1120. if (err < 0)
  1121. return err;
  1122. ptr = kzalloc(outlen, GFP_KERNEL);
  1123. if (!ptr)
  1124. return -ENOMEM;
  1125. dbg->out_msg = ptr;
  1126. dbg->outlen = outlen;
  1127. *pos = count;
  1128. return count;
  1129. }
  1130. static const struct file_operations olfops = {
  1131. .owner = THIS_MODULE,
  1132. .open = simple_open,
  1133. .write = outlen_write,
  1134. .read = outlen_read,
  1135. };
  1136. static void set_wqname(struct mlx5_core_dev *dev)
  1137. {
  1138. struct mlx5_cmd *cmd = &dev->cmd;
  1139. snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
  1140. dev_name(&dev->pdev->dev));
  1141. }
  1142. static void clean_debug_files(struct mlx5_core_dev *dev)
  1143. {
  1144. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1145. if (!mlx5_debugfs_root)
  1146. return;
  1147. mlx5_cmdif_debugfs_cleanup(dev);
  1148. debugfs_remove_recursive(dbg->dbg_root);
  1149. }
  1150. static int create_debugfs_files(struct mlx5_core_dev *dev)
  1151. {
  1152. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  1153. int err = -ENOMEM;
  1154. if (!mlx5_debugfs_root)
  1155. return 0;
  1156. dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
  1157. if (!dbg->dbg_root)
  1158. return err;
  1159. dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
  1160. dev, &dfops);
  1161. if (!dbg->dbg_in)
  1162. goto err_dbg;
  1163. dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
  1164. dev, &dfops);
  1165. if (!dbg->dbg_out)
  1166. goto err_dbg;
  1167. dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
  1168. dev, &olfops);
  1169. if (!dbg->dbg_outlen)
  1170. goto err_dbg;
  1171. dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
  1172. &dbg->status);
  1173. if (!dbg->dbg_status)
  1174. goto err_dbg;
  1175. dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
  1176. if (!dbg->dbg_run)
  1177. goto err_dbg;
  1178. mlx5_cmdif_debugfs_init(dev);
  1179. return 0;
  1180. err_dbg:
  1181. clean_debug_files(dev);
  1182. return err;
  1183. }
  1184. static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
  1185. {
  1186. struct mlx5_cmd *cmd = &dev->cmd;
  1187. int i;
  1188. for (i = 0; i < cmd->max_reg_cmds; i++)
  1189. down(&cmd->sem);
  1190. down(&cmd->pages_sem);
  1191. cmd->mode = mode;
  1192. up(&cmd->pages_sem);
  1193. for (i = 0; i < cmd->max_reg_cmds; i++)
  1194. up(&cmd->sem);
  1195. }
  1196. void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
  1197. {
  1198. mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
  1199. }
  1200. void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
  1201. {
  1202. mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
  1203. }
  1204. static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
  1205. {
  1206. unsigned long flags;
  1207. if (msg->cache) {
  1208. spin_lock_irqsave(&msg->cache->lock, flags);
  1209. list_add_tail(&msg->list, &msg->cache->head);
  1210. spin_unlock_irqrestore(&msg->cache->lock, flags);
  1211. } else {
  1212. mlx5_free_cmd_msg(dev, msg);
  1213. }
  1214. }
  1215. void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
  1216. {
  1217. struct mlx5_cmd *cmd = &dev->cmd;
  1218. struct mlx5_cmd_work_ent *ent;
  1219. mlx5_cmd_cbk_t callback;
  1220. void *context;
  1221. int err;
  1222. int i;
  1223. s64 ds;
  1224. struct mlx5_cmd_stats *stats;
  1225. unsigned long flags;
  1226. unsigned long vector;
  1227. /* there can be at most 32 command queues */
  1228. vector = vec & 0xffffffff;
  1229. for (i = 0; i < (1 << cmd->log_sz); i++) {
  1230. if (test_bit(i, &vector)) {
  1231. struct semaphore *sem;
  1232. ent = cmd->ent_arr[i];
  1233. /* if we already completed the command, ignore it */
  1234. if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
  1235. &ent->state)) {
  1236. /* only real completion can free the cmd slot */
  1237. if (!forced) {
  1238. mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
  1239. ent->idx);
  1240. free_ent(cmd, ent->idx);
  1241. }
  1242. continue;
  1243. }
  1244. if (ent->callback)
  1245. cancel_delayed_work(&ent->cb_timeout_work);
  1246. if (ent->page_queue)
  1247. sem = &cmd->pages_sem;
  1248. else
  1249. sem = &cmd->sem;
  1250. ent->ts2 = ktime_get_ns();
  1251. memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
  1252. dump_command(dev, ent, 0);
  1253. if (!ent->ret) {
  1254. if (!cmd->checksum_disabled)
  1255. ent->ret = verify_signature(ent);
  1256. else
  1257. ent->ret = 0;
  1258. if (vec & MLX5_TRIGGERED_CMD_COMP)
  1259. ent->status = MLX5_DRIVER_STATUS_ABORTED;
  1260. else
  1261. ent->status = ent->lay->status_own >> 1;
  1262. mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
  1263. ent->ret, deliv_status_to_str(ent->status), ent->status);
  1264. }
  1265. /* only real completion will free the entry slot */
  1266. if (!forced)
  1267. free_ent(cmd, ent->idx);
  1268. if (ent->callback) {
  1269. ds = ent->ts2 - ent->ts1;
  1270. if (ent->op < ARRAY_SIZE(cmd->stats)) {
  1271. stats = &cmd->stats[ent->op];
  1272. spin_lock_irqsave(&stats->lock, flags);
  1273. stats->sum += ds;
  1274. ++stats->n;
  1275. spin_unlock_irqrestore(&stats->lock, flags);
  1276. }
  1277. callback = ent->callback;
  1278. context = ent->context;
  1279. err = ent->ret;
  1280. if (!err) {
  1281. err = mlx5_copy_from_msg(ent->uout,
  1282. ent->out,
  1283. ent->uout_size);
  1284. err = err ? err : mlx5_cmd_check(dev,
  1285. ent->in->first.data,
  1286. ent->uout);
  1287. }
  1288. mlx5_free_cmd_msg(dev, ent->out);
  1289. free_msg(dev, ent->in);
  1290. err = err ? err : ent->status;
  1291. free_cmd(ent);
  1292. callback(err, context);
  1293. } else {
  1294. complete(&ent->done);
  1295. }
  1296. up(sem);
  1297. }
  1298. }
  1299. }
  1300. EXPORT_SYMBOL(mlx5_cmd_comp_handler);
  1301. static int status_to_err(u8 status)
  1302. {
  1303. return status ? -1 : 0; /* TBD more meaningful codes */
  1304. }
  1305. static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
  1306. gfp_t gfp)
  1307. {
  1308. struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
  1309. struct mlx5_cmd *cmd = &dev->cmd;
  1310. struct cache_ent *ent = NULL;
  1311. if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
  1312. ent = &cmd->cache.large;
  1313. else if (in_size > 16 && in_size <= MED_LIST_SIZE)
  1314. ent = &cmd->cache.med;
  1315. if (ent) {
  1316. spin_lock_irq(&ent->lock);
  1317. if (!list_empty(&ent->head)) {
  1318. msg = list_entry(ent->head.next, typeof(*msg), list);
  1319. /* For cached lists, we must explicitly state what is
  1320. * the real size
  1321. */
  1322. msg->len = in_size;
  1323. list_del(&msg->list);
  1324. }
  1325. spin_unlock_irq(&ent->lock);
  1326. }
  1327. if (IS_ERR(msg))
  1328. msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
  1329. return msg;
  1330. }
  1331. static int is_manage_pages(void *in)
  1332. {
  1333. return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
  1334. }
  1335. static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  1336. int out_size, mlx5_cmd_cbk_t callback, void *context)
  1337. {
  1338. struct mlx5_cmd_msg *inb;
  1339. struct mlx5_cmd_msg *outb;
  1340. int pages_queue;
  1341. gfp_t gfp;
  1342. int err;
  1343. u8 status = 0;
  1344. u32 drv_synd;
  1345. u8 token;
  1346. if (pci_channel_offline(dev->pdev) ||
  1347. dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  1348. u16 opcode = MLX5_GET(mbox_in, in, opcode);
  1349. err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
  1350. MLX5_SET(mbox_out, out, status, status);
  1351. MLX5_SET(mbox_out, out, syndrome, drv_synd);
  1352. return err;
  1353. }
  1354. pages_queue = is_manage_pages(in);
  1355. gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
  1356. inb = alloc_msg(dev, in_size, gfp);
  1357. if (IS_ERR(inb)) {
  1358. err = PTR_ERR(inb);
  1359. return err;
  1360. }
  1361. token = alloc_token(&dev->cmd);
  1362. err = mlx5_copy_to_msg(inb, in, in_size, token);
  1363. if (err) {
  1364. mlx5_core_warn(dev, "err %d\n", err);
  1365. goto out_in;
  1366. }
  1367. outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
  1368. if (IS_ERR(outb)) {
  1369. err = PTR_ERR(outb);
  1370. goto out_in;
  1371. }
  1372. err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
  1373. pages_queue, &status, token);
  1374. if (err)
  1375. goto out_out;
  1376. mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
  1377. if (status) {
  1378. err = status_to_err(status);
  1379. goto out_out;
  1380. }
  1381. if (!callback)
  1382. err = mlx5_copy_from_msg(out, outb, out_size);
  1383. out_out:
  1384. if (!callback)
  1385. mlx5_free_cmd_msg(dev, outb);
  1386. out_in:
  1387. if (!callback)
  1388. free_msg(dev, inb);
  1389. return err;
  1390. }
  1391. int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  1392. int out_size)
  1393. {
  1394. int err;
  1395. err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
  1396. return err ? : mlx5_cmd_check(dev, in, out);
  1397. }
  1398. EXPORT_SYMBOL(mlx5_cmd_exec);
  1399. int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
  1400. void *out, int out_size, mlx5_cmd_cbk_t callback,
  1401. void *context)
  1402. {
  1403. return cmd_exec(dev, in, in_size, out, out_size, callback, context);
  1404. }
  1405. EXPORT_SYMBOL(mlx5_cmd_exec_cb);
  1406. static void destroy_msg_cache(struct mlx5_core_dev *dev)
  1407. {
  1408. struct mlx5_cmd *cmd = &dev->cmd;
  1409. struct mlx5_cmd_msg *msg;
  1410. struct mlx5_cmd_msg *n;
  1411. list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
  1412. list_del(&msg->list);
  1413. mlx5_free_cmd_msg(dev, msg);
  1414. }
  1415. list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
  1416. list_del(&msg->list);
  1417. mlx5_free_cmd_msg(dev, msg);
  1418. }
  1419. }
  1420. static int create_msg_cache(struct mlx5_core_dev *dev)
  1421. {
  1422. struct mlx5_cmd *cmd = &dev->cmd;
  1423. struct mlx5_cmd_msg *msg;
  1424. int err;
  1425. int i;
  1426. spin_lock_init(&cmd->cache.large.lock);
  1427. INIT_LIST_HEAD(&cmd->cache.large.head);
  1428. spin_lock_init(&cmd->cache.med.lock);
  1429. INIT_LIST_HEAD(&cmd->cache.med.head);
  1430. for (i = 0; i < NUM_LONG_LISTS; i++) {
  1431. msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
  1432. if (IS_ERR(msg)) {
  1433. err = PTR_ERR(msg);
  1434. goto ex_err;
  1435. }
  1436. msg->cache = &cmd->cache.large;
  1437. list_add_tail(&msg->list, &cmd->cache.large.head);
  1438. }
  1439. for (i = 0; i < NUM_MED_LISTS; i++) {
  1440. msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
  1441. if (IS_ERR(msg)) {
  1442. err = PTR_ERR(msg);
  1443. goto ex_err;
  1444. }
  1445. msg->cache = &cmd->cache.med;
  1446. list_add_tail(&msg->list, &cmd->cache.med.head);
  1447. }
  1448. return 0;
  1449. ex_err:
  1450. destroy_msg_cache(dev);
  1451. return err;
  1452. }
  1453. static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
  1454. {
  1455. struct device *ddev = &dev->pdev->dev;
  1456. cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
  1457. &cmd->alloc_dma, GFP_KERNEL);
  1458. if (!cmd->cmd_alloc_buf)
  1459. return -ENOMEM;
  1460. /* make sure it is aligned to 4K */
  1461. if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
  1462. cmd->cmd_buf = cmd->cmd_alloc_buf;
  1463. cmd->dma = cmd->alloc_dma;
  1464. cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
  1465. return 0;
  1466. }
  1467. dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
  1468. cmd->alloc_dma);
  1469. cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
  1470. 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
  1471. &cmd->alloc_dma, GFP_KERNEL);
  1472. if (!cmd->cmd_alloc_buf)
  1473. return -ENOMEM;
  1474. cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
  1475. cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
  1476. cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
  1477. return 0;
  1478. }
  1479. static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
  1480. {
  1481. struct device *ddev = &dev->pdev->dev;
  1482. dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
  1483. cmd->alloc_dma);
  1484. }
  1485. int mlx5_cmd_init(struct mlx5_core_dev *dev)
  1486. {
  1487. int size = sizeof(struct mlx5_cmd_prot_block);
  1488. int align = roundup_pow_of_two(size);
  1489. struct mlx5_cmd *cmd = &dev->cmd;
  1490. u32 cmd_h, cmd_l;
  1491. u16 cmd_if_rev;
  1492. int err;
  1493. int i;
  1494. memset(cmd, 0, sizeof(*cmd));
  1495. cmd_if_rev = cmdif_rev(dev);
  1496. if (cmd_if_rev != CMD_IF_REV) {
  1497. dev_err(&dev->pdev->dev,
  1498. "Driver cmdif rev(%d) differs from firmware's(%d)\n",
  1499. CMD_IF_REV, cmd_if_rev);
  1500. return -EINVAL;
  1501. }
  1502. cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
  1503. if (!cmd->pool)
  1504. return -ENOMEM;
  1505. err = alloc_cmd_page(dev, cmd);
  1506. if (err)
  1507. goto err_free_pool;
  1508. cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
  1509. cmd->log_sz = cmd_l >> 4 & 0xf;
  1510. cmd->log_stride = cmd_l & 0xf;
  1511. if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
  1512. dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
  1513. 1 << cmd->log_sz);
  1514. err = -EINVAL;
  1515. goto err_free_page;
  1516. }
  1517. if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
  1518. dev_err(&dev->pdev->dev, "command queue size overflow\n");
  1519. err = -EINVAL;
  1520. goto err_free_page;
  1521. }
  1522. cmd->checksum_disabled = 1;
  1523. cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
  1524. cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
  1525. cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
  1526. if (cmd->cmdif_rev > CMD_IF_REV) {
  1527. dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
  1528. CMD_IF_REV, cmd->cmdif_rev);
  1529. err = -ENOTSUPP;
  1530. goto err_free_page;
  1531. }
  1532. spin_lock_init(&cmd->alloc_lock);
  1533. spin_lock_init(&cmd->token_lock);
  1534. for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
  1535. spin_lock_init(&cmd->stats[i].lock);
  1536. sema_init(&cmd->sem, cmd->max_reg_cmds);
  1537. sema_init(&cmd->pages_sem, 1);
  1538. cmd_h = (u32)((u64)(cmd->dma) >> 32);
  1539. cmd_l = (u32)(cmd->dma);
  1540. if (cmd_l & 0xfff) {
  1541. dev_err(&dev->pdev->dev, "invalid command queue address\n");
  1542. err = -ENOMEM;
  1543. goto err_free_page;
  1544. }
  1545. iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
  1546. iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
  1547. /* Make sure firmware sees the complete address before we proceed */
  1548. wmb();
  1549. mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
  1550. cmd->mode = CMD_MODE_POLLING;
  1551. err = create_msg_cache(dev);
  1552. if (err) {
  1553. dev_err(&dev->pdev->dev, "failed to create command cache\n");
  1554. goto err_free_page;
  1555. }
  1556. set_wqname(dev);
  1557. cmd->wq = create_singlethread_workqueue(cmd->wq_name);
  1558. if (!cmd->wq) {
  1559. dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
  1560. err = -ENOMEM;
  1561. goto err_cache;
  1562. }
  1563. err = create_debugfs_files(dev);
  1564. if (err) {
  1565. err = -ENOMEM;
  1566. goto err_wq;
  1567. }
  1568. return 0;
  1569. err_wq:
  1570. destroy_workqueue(cmd->wq);
  1571. err_cache:
  1572. destroy_msg_cache(dev);
  1573. err_free_page:
  1574. free_cmd_page(dev, cmd);
  1575. err_free_pool:
  1576. pci_pool_destroy(cmd->pool);
  1577. return err;
  1578. }
  1579. EXPORT_SYMBOL(mlx5_cmd_init);
  1580. void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
  1581. {
  1582. struct mlx5_cmd *cmd = &dev->cmd;
  1583. clean_debug_files(dev);
  1584. destroy_workqueue(cmd->wq);
  1585. destroy_msg_cache(dev);
  1586. free_cmd_page(dev, cmd);
  1587. pci_pool_destroy(cmd->pool);
  1588. }
  1589. EXPORT_SYMBOL(mlx5_cmd_cleanup);