rxe_verbs.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "rxe.h"
  34. #include "rxe_loc.h"
  35. #include "rxe_queue.h"
  36. static int rxe_query_device(struct ib_device *dev,
  37. struct ib_device_attr *attr,
  38. struct ib_udata *uhw)
  39. {
  40. struct rxe_dev *rxe = to_rdev(dev);
  41. if (uhw->inlen || uhw->outlen)
  42. return -EINVAL;
  43. *attr = rxe->attr;
  44. return 0;
  45. }
  46. static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
  47. u8 *active_width)
  48. {
  49. if (speed <= 1000) {
  50. *active_width = IB_WIDTH_1X;
  51. *active_speed = IB_SPEED_SDR;
  52. } else if (speed <= 10000) {
  53. *active_width = IB_WIDTH_1X;
  54. *active_speed = IB_SPEED_FDR10;
  55. } else if (speed <= 20000) {
  56. *active_width = IB_WIDTH_4X;
  57. *active_speed = IB_SPEED_DDR;
  58. } else if (speed <= 30000) {
  59. *active_width = IB_WIDTH_4X;
  60. *active_speed = IB_SPEED_QDR;
  61. } else if (speed <= 40000) {
  62. *active_width = IB_WIDTH_4X;
  63. *active_speed = IB_SPEED_FDR10;
  64. } else {
  65. *active_width = IB_WIDTH_4X;
  66. *active_speed = IB_SPEED_EDR;
  67. }
  68. }
  69. static int rxe_query_port(struct ib_device *dev,
  70. u8 port_num, struct ib_port_attr *attr)
  71. {
  72. struct rxe_dev *rxe = to_rdev(dev);
  73. struct rxe_port *port;
  74. u32 speed;
  75. if (unlikely(port_num != 1)) {
  76. pr_warn("invalid port_number %d\n", port_num);
  77. goto err1;
  78. }
  79. port = &rxe->port;
  80. *attr = port->attr;
  81. mutex_lock(&rxe->usdev_lock);
  82. if (rxe->ndev->ethtool_ops->get_link_ksettings) {
  83. struct ethtool_link_ksettings ks;
  84. rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
  85. speed = ks.base.speed;
  86. } else if (rxe->ndev->ethtool_ops->get_settings) {
  87. struct ethtool_cmd cmd;
  88. rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
  89. speed = cmd.speed;
  90. } else {
  91. pr_warn("%s speed is unknown, defaulting to 1000\n",
  92. rxe->ndev->name);
  93. speed = 1000;
  94. }
  95. rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
  96. &attr->active_width);
  97. mutex_unlock(&rxe->usdev_lock);
  98. return 0;
  99. err1:
  100. return -EINVAL;
  101. }
  102. static int rxe_query_gid(struct ib_device *device,
  103. u8 port_num, int index, union ib_gid *gid)
  104. {
  105. int ret;
  106. if (index > RXE_PORT_GID_TBL_LEN)
  107. return -EINVAL;
  108. ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
  109. if (ret == -EAGAIN) {
  110. memcpy(gid, &zgid, sizeof(*gid));
  111. return 0;
  112. }
  113. return ret;
  114. }
  115. static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
  116. index, const union ib_gid *gid,
  117. const struct ib_gid_attr *attr, void **context)
  118. {
  119. if (index >= RXE_PORT_GID_TBL_LEN)
  120. return -EINVAL;
  121. return 0;
  122. }
  123. static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
  124. index, void **context)
  125. {
  126. if (index >= RXE_PORT_GID_TBL_LEN)
  127. return -EINVAL;
  128. return 0;
  129. }
  130. static struct net_device *rxe_get_netdev(struct ib_device *device,
  131. u8 port_num)
  132. {
  133. struct rxe_dev *rxe = to_rdev(device);
  134. if (rxe->ndev) {
  135. dev_hold(rxe->ndev);
  136. return rxe->ndev;
  137. }
  138. return NULL;
  139. }
  140. static int rxe_query_pkey(struct ib_device *device,
  141. u8 port_num, u16 index, u16 *pkey)
  142. {
  143. struct rxe_dev *rxe = to_rdev(device);
  144. struct rxe_port *port;
  145. if (unlikely(port_num != 1)) {
  146. dev_warn(device->dma_device, "invalid port_num = %d\n",
  147. port_num);
  148. goto err1;
  149. }
  150. port = &rxe->port;
  151. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  152. dev_warn(device->dma_device, "invalid index = %d\n",
  153. index);
  154. goto err1;
  155. }
  156. *pkey = port->pkey_tbl[index];
  157. return 0;
  158. err1:
  159. return -EINVAL;
  160. }
  161. static int rxe_modify_device(struct ib_device *dev,
  162. int mask, struct ib_device_modify *attr)
  163. {
  164. struct rxe_dev *rxe = to_rdev(dev);
  165. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  166. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  167. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  168. memcpy(rxe->ib_dev.node_desc,
  169. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  170. }
  171. return 0;
  172. }
  173. static int rxe_modify_port(struct ib_device *dev,
  174. u8 port_num, int mask, struct ib_port_modify *attr)
  175. {
  176. struct rxe_dev *rxe = to_rdev(dev);
  177. struct rxe_port *port;
  178. if (unlikely(port_num != 1)) {
  179. pr_warn("invalid port_num = %d\n", port_num);
  180. goto err1;
  181. }
  182. port = &rxe->port;
  183. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  184. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  185. if (mask & IB_PORT_RESET_QKEY_CNTR)
  186. port->attr.qkey_viol_cntr = 0;
  187. return 0;
  188. err1:
  189. return -EINVAL;
  190. }
  191. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  192. u8 port_num)
  193. {
  194. struct rxe_dev *rxe = to_rdev(dev);
  195. return rxe->ifc_ops->link_layer(rxe, port_num);
  196. }
  197. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  198. struct ib_udata *udata)
  199. {
  200. struct rxe_dev *rxe = to_rdev(dev);
  201. struct rxe_ucontext *uc;
  202. uc = rxe_alloc(&rxe->uc_pool);
  203. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  204. }
  205. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  206. {
  207. struct rxe_ucontext *uc = to_ruc(ibuc);
  208. rxe_drop_ref(uc);
  209. return 0;
  210. }
  211. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  212. struct ib_port_immutable *immutable)
  213. {
  214. int err;
  215. struct ib_port_attr attr;
  216. err = rxe_query_port(dev, port_num, &attr);
  217. if (err)
  218. return err;
  219. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  220. immutable->gid_tbl_len = attr.gid_tbl_len;
  221. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  222. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  223. return 0;
  224. }
  225. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  226. struct ib_ucontext *context,
  227. struct ib_udata *udata)
  228. {
  229. struct rxe_dev *rxe = to_rdev(dev);
  230. struct rxe_pd *pd;
  231. pd = rxe_alloc(&rxe->pd_pool);
  232. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  233. }
  234. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  235. {
  236. struct rxe_pd *pd = to_rpd(ibpd);
  237. rxe_drop_ref(pd);
  238. return 0;
  239. }
  240. static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
  241. struct rxe_av *av)
  242. {
  243. int err;
  244. union ib_gid sgid;
  245. struct ib_gid_attr sgid_attr;
  246. err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
  247. attr->grh.sgid_index, &sgid,
  248. &sgid_attr);
  249. if (err) {
  250. pr_err("Failed to query sgid. err = %d\n", err);
  251. return err;
  252. }
  253. err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
  254. if (!err)
  255. err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
  256. if (sgid_attr.ndev)
  257. dev_put(sgid_attr.ndev);
  258. return err;
  259. }
  260. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
  261. {
  262. int err;
  263. struct rxe_dev *rxe = to_rdev(ibpd->device);
  264. struct rxe_pd *pd = to_rpd(ibpd);
  265. struct rxe_ah *ah;
  266. err = rxe_av_chk_attr(rxe, attr);
  267. if (err)
  268. goto err1;
  269. ah = rxe_alloc(&rxe->ah_pool);
  270. if (!ah) {
  271. err = -ENOMEM;
  272. goto err1;
  273. }
  274. rxe_add_ref(pd);
  275. ah->pd = pd;
  276. err = rxe_init_av(rxe, attr, &ah->av);
  277. if (err)
  278. goto err2;
  279. return &ah->ibah;
  280. err2:
  281. rxe_drop_ref(pd);
  282. rxe_drop_ref(ah);
  283. err1:
  284. return ERR_PTR(err);
  285. }
  286. static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  287. {
  288. int err;
  289. struct rxe_dev *rxe = to_rdev(ibah->device);
  290. struct rxe_ah *ah = to_rah(ibah);
  291. err = rxe_av_chk_attr(rxe, attr);
  292. if (err)
  293. return err;
  294. err = rxe_init_av(rxe, attr, &ah->av);
  295. if (err)
  296. return err;
  297. return 0;
  298. }
  299. static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  300. {
  301. struct rxe_dev *rxe = to_rdev(ibah->device);
  302. struct rxe_ah *ah = to_rah(ibah);
  303. rxe_av_to_attr(rxe, &ah->av, attr);
  304. return 0;
  305. }
  306. static int rxe_destroy_ah(struct ib_ah *ibah)
  307. {
  308. struct rxe_ah *ah = to_rah(ibah);
  309. rxe_drop_ref(ah->pd);
  310. rxe_drop_ref(ah);
  311. return 0;
  312. }
  313. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  314. {
  315. int err;
  316. int i;
  317. u32 length;
  318. struct rxe_recv_wqe *recv_wqe;
  319. int num_sge = ibwr->num_sge;
  320. if (unlikely(queue_full(rq->queue))) {
  321. err = -ENOMEM;
  322. goto err1;
  323. }
  324. if (unlikely(num_sge > rq->max_sge)) {
  325. err = -EINVAL;
  326. goto err1;
  327. }
  328. length = 0;
  329. for (i = 0; i < num_sge; i++)
  330. length += ibwr->sg_list[i].length;
  331. recv_wqe = producer_addr(rq->queue);
  332. recv_wqe->wr_id = ibwr->wr_id;
  333. recv_wqe->num_sge = num_sge;
  334. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  335. num_sge * sizeof(struct ib_sge));
  336. recv_wqe->dma.length = length;
  337. recv_wqe->dma.resid = length;
  338. recv_wqe->dma.num_sge = num_sge;
  339. recv_wqe->dma.cur_sge = 0;
  340. recv_wqe->dma.sge_offset = 0;
  341. /* make sure all changes to the work queue are written before we
  342. * update the producer pointer
  343. */
  344. smp_wmb();
  345. advance_producer(rq->queue);
  346. return 0;
  347. err1:
  348. return err;
  349. }
  350. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  351. struct ib_srq_init_attr *init,
  352. struct ib_udata *udata)
  353. {
  354. int err;
  355. struct rxe_dev *rxe = to_rdev(ibpd->device);
  356. struct rxe_pd *pd = to_rpd(ibpd);
  357. struct rxe_srq *srq;
  358. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  359. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  360. if (err)
  361. goto err1;
  362. srq = rxe_alloc(&rxe->srq_pool);
  363. if (!srq) {
  364. err = -ENOMEM;
  365. goto err1;
  366. }
  367. rxe_add_index(srq);
  368. rxe_add_ref(pd);
  369. srq->pd = pd;
  370. err = rxe_srq_from_init(rxe, srq, init, context, udata);
  371. if (err)
  372. goto err2;
  373. return &srq->ibsrq;
  374. err2:
  375. rxe_drop_ref(pd);
  376. rxe_drop_index(srq);
  377. rxe_drop_ref(srq);
  378. err1:
  379. return ERR_PTR(err);
  380. }
  381. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  382. enum ib_srq_attr_mask mask,
  383. struct ib_udata *udata)
  384. {
  385. int err;
  386. struct rxe_srq *srq = to_rsrq(ibsrq);
  387. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  388. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  389. if (err)
  390. goto err1;
  391. err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
  392. if (err)
  393. goto err1;
  394. return 0;
  395. err1:
  396. return err;
  397. }
  398. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  399. {
  400. struct rxe_srq *srq = to_rsrq(ibsrq);
  401. if (srq->error)
  402. return -EINVAL;
  403. attr->max_wr = srq->rq.queue->buf->index_mask;
  404. attr->max_sge = srq->rq.max_sge;
  405. attr->srq_limit = srq->limit;
  406. return 0;
  407. }
  408. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  409. {
  410. struct rxe_srq *srq = to_rsrq(ibsrq);
  411. if (srq->rq.queue)
  412. rxe_queue_cleanup(srq->rq.queue);
  413. rxe_drop_ref(srq->pd);
  414. rxe_drop_index(srq);
  415. rxe_drop_ref(srq);
  416. return 0;
  417. }
  418. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  419. struct ib_recv_wr **bad_wr)
  420. {
  421. int err = 0;
  422. unsigned long flags;
  423. struct rxe_srq *srq = to_rsrq(ibsrq);
  424. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  425. while (wr) {
  426. err = post_one_recv(&srq->rq, wr);
  427. if (unlikely(err))
  428. break;
  429. wr = wr->next;
  430. }
  431. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  432. if (err)
  433. *bad_wr = wr;
  434. return err;
  435. }
  436. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  437. struct ib_qp_init_attr *init,
  438. struct ib_udata *udata)
  439. {
  440. int err;
  441. struct rxe_dev *rxe = to_rdev(ibpd->device);
  442. struct rxe_pd *pd = to_rpd(ibpd);
  443. struct rxe_qp *qp;
  444. err = rxe_qp_chk_init(rxe, init);
  445. if (err)
  446. goto err1;
  447. qp = rxe_alloc(&rxe->qp_pool);
  448. if (!qp) {
  449. err = -ENOMEM;
  450. goto err1;
  451. }
  452. if (udata) {
  453. if (udata->inlen) {
  454. err = -EINVAL;
  455. goto err1;
  456. }
  457. qp->is_user = 1;
  458. }
  459. rxe_add_index(qp);
  460. err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
  461. if (err)
  462. goto err2;
  463. return &qp->ibqp;
  464. err2:
  465. rxe_drop_index(qp);
  466. rxe_drop_ref(qp);
  467. err1:
  468. return ERR_PTR(err);
  469. }
  470. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  471. int mask, struct ib_udata *udata)
  472. {
  473. int err;
  474. struct rxe_dev *rxe = to_rdev(ibqp->device);
  475. struct rxe_qp *qp = to_rqp(ibqp);
  476. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  477. if (err)
  478. goto err1;
  479. err = rxe_qp_from_attr(qp, attr, mask, udata);
  480. if (err)
  481. goto err1;
  482. return 0;
  483. err1:
  484. return err;
  485. }
  486. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  487. int mask, struct ib_qp_init_attr *init)
  488. {
  489. struct rxe_qp *qp = to_rqp(ibqp);
  490. rxe_qp_to_init(qp, init);
  491. rxe_qp_to_attr(qp, attr, mask);
  492. return 0;
  493. }
  494. static int rxe_destroy_qp(struct ib_qp *ibqp)
  495. {
  496. struct rxe_qp *qp = to_rqp(ibqp);
  497. rxe_qp_destroy(qp);
  498. rxe_drop_index(qp);
  499. rxe_drop_ref(qp);
  500. return 0;
  501. }
  502. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  503. unsigned int mask, unsigned int length)
  504. {
  505. int num_sge = ibwr->num_sge;
  506. struct rxe_sq *sq = &qp->sq;
  507. if (unlikely(num_sge > sq->max_sge))
  508. goto err1;
  509. if (unlikely(mask & WR_ATOMIC_MASK)) {
  510. if (length < 8)
  511. goto err1;
  512. if (atomic_wr(ibwr)->remote_addr & 0x7)
  513. goto err1;
  514. }
  515. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  516. (length > sq->max_inline)))
  517. goto err1;
  518. return 0;
  519. err1:
  520. return -EINVAL;
  521. }
  522. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  523. struct ib_send_wr *ibwr)
  524. {
  525. wr->wr_id = ibwr->wr_id;
  526. wr->num_sge = ibwr->num_sge;
  527. wr->opcode = ibwr->opcode;
  528. wr->send_flags = ibwr->send_flags;
  529. if (qp_type(qp) == IB_QPT_UD ||
  530. qp_type(qp) == IB_QPT_SMI ||
  531. qp_type(qp) == IB_QPT_GSI) {
  532. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  533. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  534. if (qp_type(qp) == IB_QPT_GSI)
  535. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  536. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  537. wr->ex.imm_data = ibwr->ex.imm_data;
  538. } else {
  539. switch (wr->opcode) {
  540. case IB_WR_RDMA_WRITE_WITH_IMM:
  541. wr->ex.imm_data = ibwr->ex.imm_data;
  542. case IB_WR_RDMA_READ:
  543. case IB_WR_RDMA_WRITE:
  544. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  545. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  546. break;
  547. case IB_WR_SEND_WITH_IMM:
  548. wr->ex.imm_data = ibwr->ex.imm_data;
  549. break;
  550. case IB_WR_SEND_WITH_INV:
  551. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  552. break;
  553. case IB_WR_ATOMIC_CMP_AND_SWP:
  554. case IB_WR_ATOMIC_FETCH_AND_ADD:
  555. wr->wr.atomic.remote_addr =
  556. atomic_wr(ibwr)->remote_addr;
  557. wr->wr.atomic.compare_add =
  558. atomic_wr(ibwr)->compare_add;
  559. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  560. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  561. break;
  562. case IB_WR_LOCAL_INV:
  563. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  564. break;
  565. case IB_WR_REG_MR:
  566. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  567. wr->wr.reg.key = reg_wr(ibwr)->key;
  568. wr->wr.reg.access = reg_wr(ibwr)->access;
  569. break;
  570. default:
  571. break;
  572. }
  573. }
  574. }
  575. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  576. unsigned int mask, unsigned int length,
  577. struct rxe_send_wqe *wqe)
  578. {
  579. int num_sge = ibwr->num_sge;
  580. struct ib_sge *sge;
  581. int i;
  582. u8 *p;
  583. init_send_wr(qp, &wqe->wr, ibwr);
  584. if (qp_type(qp) == IB_QPT_UD ||
  585. qp_type(qp) == IB_QPT_SMI ||
  586. qp_type(qp) == IB_QPT_GSI)
  587. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  588. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  589. p = wqe->dma.inline_data;
  590. sge = ibwr->sg_list;
  591. for (i = 0; i < num_sge; i++, sge++) {
  592. if (qp->is_user && copy_from_user(p, (__user void *)
  593. (uintptr_t)sge->addr, sge->length))
  594. return -EFAULT;
  595. else if (!qp->is_user)
  596. memcpy(p, (void *)(uintptr_t)sge->addr,
  597. sge->length);
  598. p += sge->length;
  599. }
  600. } else if (mask & WR_REG_MASK) {
  601. wqe->mask = mask;
  602. wqe->state = wqe_state_posted;
  603. return 0;
  604. } else
  605. memcpy(wqe->dma.sge, ibwr->sg_list,
  606. num_sge * sizeof(struct ib_sge));
  607. wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
  608. mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
  609. wqe->mask = mask;
  610. wqe->dma.length = length;
  611. wqe->dma.resid = length;
  612. wqe->dma.num_sge = num_sge;
  613. wqe->dma.cur_sge = 0;
  614. wqe->dma.sge_offset = 0;
  615. wqe->state = wqe_state_posted;
  616. wqe->ssn = atomic_add_return(1, &qp->ssn);
  617. return 0;
  618. }
  619. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  620. unsigned int mask, u32 length)
  621. {
  622. int err;
  623. struct rxe_sq *sq = &qp->sq;
  624. struct rxe_send_wqe *send_wqe;
  625. unsigned long flags;
  626. err = validate_send_wr(qp, ibwr, mask, length);
  627. if (err)
  628. return err;
  629. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  630. if (unlikely(queue_full(sq->queue))) {
  631. err = -ENOMEM;
  632. goto err1;
  633. }
  634. send_wqe = producer_addr(sq->queue);
  635. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  636. if (unlikely(err))
  637. goto err1;
  638. /*
  639. * make sure all changes to the work queue are
  640. * written before we update the producer pointer
  641. */
  642. smp_wmb();
  643. advance_producer(sq->queue);
  644. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  645. return 0;
  646. err1:
  647. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  648. return err;
  649. }
  650. static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
  651. struct ib_send_wr **bad_wr)
  652. {
  653. int err = 0;
  654. unsigned int mask;
  655. unsigned int length = 0;
  656. int i;
  657. int must_sched;
  658. while (wr) {
  659. mask = wr_opcode_mask(wr->opcode, qp);
  660. if (unlikely(!mask)) {
  661. err = -EINVAL;
  662. *bad_wr = wr;
  663. break;
  664. }
  665. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  666. !(mask & WR_INLINE_MASK))) {
  667. err = -EINVAL;
  668. *bad_wr = wr;
  669. break;
  670. }
  671. length = 0;
  672. for (i = 0; i < wr->num_sge; i++)
  673. length += wr->sg_list[i].length;
  674. err = post_one_send(qp, wr, mask, length);
  675. if (err) {
  676. *bad_wr = wr;
  677. break;
  678. }
  679. wr = wr->next;
  680. }
  681. /*
  682. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  683. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  684. */
  685. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  686. (queue_count(qp->sq.queue) > 1);
  687. rxe_run_task(&qp->req.task, must_sched);
  688. if (unlikely(qp->req.state == QP_STATE_ERROR))
  689. rxe_run_task(&qp->comp.task, 1);
  690. return err;
  691. }
  692. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  693. struct ib_send_wr **bad_wr)
  694. {
  695. struct rxe_qp *qp = to_rqp(ibqp);
  696. if (unlikely(!qp->valid)) {
  697. *bad_wr = wr;
  698. return -EINVAL;
  699. }
  700. if (unlikely(qp->req.state < QP_STATE_READY)) {
  701. *bad_wr = wr;
  702. return -EINVAL;
  703. }
  704. if (qp->is_user) {
  705. /* Utilize process context to do protocol processing */
  706. rxe_run_task(&qp->req.task, 0);
  707. return 0;
  708. } else
  709. return rxe_post_send_kernel(qp, wr, bad_wr);
  710. }
  711. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  712. struct ib_recv_wr **bad_wr)
  713. {
  714. int err = 0;
  715. struct rxe_qp *qp = to_rqp(ibqp);
  716. struct rxe_rq *rq = &qp->rq;
  717. unsigned long flags;
  718. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  719. *bad_wr = wr;
  720. err = -EINVAL;
  721. goto err1;
  722. }
  723. if (unlikely(qp->srq)) {
  724. *bad_wr = wr;
  725. err = -EINVAL;
  726. goto err1;
  727. }
  728. spin_lock_irqsave(&rq->producer_lock, flags);
  729. while (wr) {
  730. err = post_one_recv(rq, wr);
  731. if (unlikely(err)) {
  732. *bad_wr = wr;
  733. break;
  734. }
  735. wr = wr->next;
  736. }
  737. spin_unlock_irqrestore(&rq->producer_lock, flags);
  738. err1:
  739. return err;
  740. }
  741. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  742. const struct ib_cq_init_attr *attr,
  743. struct ib_ucontext *context,
  744. struct ib_udata *udata)
  745. {
  746. int err;
  747. struct rxe_dev *rxe = to_rdev(dev);
  748. struct rxe_cq *cq;
  749. if (attr->flags)
  750. return ERR_PTR(-EINVAL);
  751. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
  752. if (err)
  753. goto err1;
  754. cq = rxe_alloc(&rxe->cq_pool);
  755. if (!cq) {
  756. err = -ENOMEM;
  757. goto err1;
  758. }
  759. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  760. context, udata);
  761. if (err)
  762. goto err2;
  763. return &cq->ibcq;
  764. err2:
  765. rxe_drop_ref(cq);
  766. err1:
  767. return ERR_PTR(err);
  768. }
  769. static int rxe_destroy_cq(struct ib_cq *ibcq)
  770. {
  771. struct rxe_cq *cq = to_rcq(ibcq);
  772. rxe_drop_ref(cq);
  773. return 0;
  774. }
  775. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  776. {
  777. int err;
  778. struct rxe_cq *cq = to_rcq(ibcq);
  779. struct rxe_dev *rxe = to_rdev(ibcq->device);
  780. err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
  781. if (err)
  782. goto err1;
  783. err = rxe_cq_resize_queue(cq, cqe, udata);
  784. if (err)
  785. goto err1;
  786. return 0;
  787. err1:
  788. return err;
  789. }
  790. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  791. {
  792. int i;
  793. struct rxe_cq *cq = to_rcq(ibcq);
  794. struct rxe_cqe *cqe;
  795. unsigned long flags;
  796. spin_lock_irqsave(&cq->cq_lock, flags);
  797. for (i = 0; i < num_entries; i++) {
  798. cqe = queue_head(cq->queue);
  799. if (!cqe)
  800. break;
  801. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  802. advance_consumer(cq->queue);
  803. }
  804. spin_unlock_irqrestore(&cq->cq_lock, flags);
  805. return i;
  806. }
  807. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  808. {
  809. struct rxe_cq *cq = to_rcq(ibcq);
  810. int count = queue_count(cq->queue);
  811. return (count > wc_cnt) ? wc_cnt : count;
  812. }
  813. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  814. {
  815. struct rxe_cq *cq = to_rcq(ibcq);
  816. if (cq->notify != IB_CQ_NEXT_COMP)
  817. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  818. return 0;
  819. }
  820. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  821. {
  822. struct rxe_dev *rxe = to_rdev(ibpd->device);
  823. struct rxe_pd *pd = to_rpd(ibpd);
  824. struct rxe_mem *mr;
  825. int err;
  826. mr = rxe_alloc(&rxe->mr_pool);
  827. if (!mr) {
  828. err = -ENOMEM;
  829. goto err1;
  830. }
  831. rxe_add_index(mr);
  832. rxe_add_ref(pd);
  833. err = rxe_mem_init_dma(rxe, pd, access, mr);
  834. if (err)
  835. goto err2;
  836. return &mr->ibmr;
  837. err2:
  838. rxe_drop_ref(pd);
  839. rxe_drop_index(mr);
  840. rxe_drop_ref(mr);
  841. err1:
  842. return ERR_PTR(err);
  843. }
  844. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  845. u64 start,
  846. u64 length,
  847. u64 iova,
  848. int access, struct ib_udata *udata)
  849. {
  850. int err;
  851. struct rxe_dev *rxe = to_rdev(ibpd->device);
  852. struct rxe_pd *pd = to_rpd(ibpd);
  853. struct rxe_mem *mr;
  854. mr = rxe_alloc(&rxe->mr_pool);
  855. if (!mr) {
  856. err = -ENOMEM;
  857. goto err2;
  858. }
  859. rxe_add_index(mr);
  860. rxe_add_ref(pd);
  861. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  862. access, udata, mr);
  863. if (err)
  864. goto err3;
  865. return &mr->ibmr;
  866. err3:
  867. rxe_drop_ref(pd);
  868. rxe_drop_index(mr);
  869. rxe_drop_ref(mr);
  870. err2:
  871. return ERR_PTR(err);
  872. }
  873. static int rxe_dereg_mr(struct ib_mr *ibmr)
  874. {
  875. struct rxe_mem *mr = to_rmr(ibmr);
  876. mr->state = RXE_MEM_STATE_ZOMBIE;
  877. rxe_drop_ref(mr->pd);
  878. rxe_drop_index(mr);
  879. rxe_drop_ref(mr);
  880. return 0;
  881. }
  882. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  883. enum ib_mr_type mr_type,
  884. u32 max_num_sg)
  885. {
  886. struct rxe_dev *rxe = to_rdev(ibpd->device);
  887. struct rxe_pd *pd = to_rpd(ibpd);
  888. struct rxe_mem *mr;
  889. int err;
  890. if (mr_type != IB_MR_TYPE_MEM_REG)
  891. return ERR_PTR(-EINVAL);
  892. mr = rxe_alloc(&rxe->mr_pool);
  893. if (!mr) {
  894. err = -ENOMEM;
  895. goto err1;
  896. }
  897. rxe_add_index(mr);
  898. rxe_add_ref(pd);
  899. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  900. if (err)
  901. goto err2;
  902. return &mr->ibmr;
  903. err2:
  904. rxe_drop_ref(pd);
  905. rxe_drop_index(mr);
  906. rxe_drop_ref(mr);
  907. err1:
  908. return ERR_PTR(err);
  909. }
  910. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  911. {
  912. struct rxe_mem *mr = to_rmr(ibmr);
  913. struct rxe_map *map;
  914. struct rxe_phys_buf *buf;
  915. if (unlikely(mr->nbuf == mr->num_buf))
  916. return -ENOMEM;
  917. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  918. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  919. buf->addr = addr;
  920. buf->size = ibmr->page_size;
  921. mr->nbuf++;
  922. return 0;
  923. }
  924. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  925. int sg_nents, unsigned int *sg_offset)
  926. {
  927. struct rxe_mem *mr = to_rmr(ibmr);
  928. int n;
  929. mr->nbuf = 0;
  930. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  931. mr->va = ibmr->iova;
  932. mr->iova = ibmr->iova;
  933. mr->length = ibmr->length;
  934. mr->page_shift = ilog2(ibmr->page_size);
  935. mr->page_mask = ibmr->page_size - 1;
  936. mr->offset = mr->iova & mr->page_mask;
  937. return n;
  938. }
  939. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  940. {
  941. int err;
  942. struct rxe_dev *rxe = to_rdev(ibqp->device);
  943. struct rxe_qp *qp = to_rqp(ibqp);
  944. struct rxe_mc_grp *grp;
  945. /* takes a ref on grp if successful */
  946. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  947. if (err)
  948. return err;
  949. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  950. rxe_drop_ref(grp);
  951. return err;
  952. }
  953. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  954. {
  955. struct rxe_dev *rxe = to_rdev(ibqp->device);
  956. struct rxe_qp *qp = to_rqp(ibqp);
  957. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  958. }
  959. static ssize_t rxe_show_parent(struct device *device,
  960. struct device_attribute *attr, char *buf)
  961. {
  962. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  963. ib_dev.dev);
  964. char *name;
  965. name = rxe->ifc_ops->parent_name(rxe, 1);
  966. return snprintf(buf, 16, "%s\n", name);
  967. }
  968. static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
  969. static struct device_attribute *rxe_dev_attributes[] = {
  970. &dev_attr_parent,
  971. };
  972. int rxe_register_device(struct rxe_dev *rxe)
  973. {
  974. int err;
  975. int i;
  976. struct ib_device *dev = &rxe->ib_dev;
  977. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  978. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  979. dev->owner = THIS_MODULE;
  980. dev->node_type = RDMA_NODE_IB_CA;
  981. dev->phys_port_cnt = 1;
  982. dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
  983. dev->dma_device = rxe->ifc_ops->dma_device(rxe);
  984. dev->local_dma_lkey = 0;
  985. dev->node_guid = rxe->ifc_ops->node_guid(rxe);
  986. dev->dma_ops = &rxe_dma_mapping_ops;
  987. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  988. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  989. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  990. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  991. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  992. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  993. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  994. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  995. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  996. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  997. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  998. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  999. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  1000. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  1001. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  1002. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  1003. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  1004. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  1005. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  1006. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  1007. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  1008. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  1009. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  1010. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  1011. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  1012. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  1013. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  1014. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  1015. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  1016. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  1017. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  1018. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1019. ;
  1020. dev->query_device = rxe_query_device;
  1021. dev->modify_device = rxe_modify_device;
  1022. dev->query_port = rxe_query_port;
  1023. dev->modify_port = rxe_modify_port;
  1024. dev->get_link_layer = rxe_get_link_layer;
  1025. dev->query_gid = rxe_query_gid;
  1026. dev->get_netdev = rxe_get_netdev;
  1027. dev->add_gid = rxe_add_gid;
  1028. dev->del_gid = rxe_del_gid;
  1029. dev->query_pkey = rxe_query_pkey;
  1030. dev->alloc_ucontext = rxe_alloc_ucontext;
  1031. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1032. dev->mmap = rxe_mmap;
  1033. dev->get_port_immutable = rxe_port_immutable;
  1034. dev->alloc_pd = rxe_alloc_pd;
  1035. dev->dealloc_pd = rxe_dealloc_pd;
  1036. dev->create_ah = rxe_create_ah;
  1037. dev->modify_ah = rxe_modify_ah;
  1038. dev->query_ah = rxe_query_ah;
  1039. dev->destroy_ah = rxe_destroy_ah;
  1040. dev->create_srq = rxe_create_srq;
  1041. dev->modify_srq = rxe_modify_srq;
  1042. dev->query_srq = rxe_query_srq;
  1043. dev->destroy_srq = rxe_destroy_srq;
  1044. dev->post_srq_recv = rxe_post_srq_recv;
  1045. dev->create_qp = rxe_create_qp;
  1046. dev->modify_qp = rxe_modify_qp;
  1047. dev->query_qp = rxe_query_qp;
  1048. dev->destroy_qp = rxe_destroy_qp;
  1049. dev->post_send = rxe_post_send;
  1050. dev->post_recv = rxe_post_recv;
  1051. dev->create_cq = rxe_create_cq;
  1052. dev->destroy_cq = rxe_destroy_cq;
  1053. dev->resize_cq = rxe_resize_cq;
  1054. dev->poll_cq = rxe_poll_cq;
  1055. dev->peek_cq = rxe_peek_cq;
  1056. dev->req_notify_cq = rxe_req_notify_cq;
  1057. dev->get_dma_mr = rxe_get_dma_mr;
  1058. dev->reg_user_mr = rxe_reg_user_mr;
  1059. dev->dereg_mr = rxe_dereg_mr;
  1060. dev->alloc_mr = rxe_alloc_mr;
  1061. dev->map_mr_sg = rxe_map_mr_sg;
  1062. dev->attach_mcast = rxe_attach_mcast;
  1063. dev->detach_mcast = rxe_detach_mcast;
  1064. err = ib_register_device(dev, NULL);
  1065. if (err) {
  1066. pr_warn("rxe_register_device failed, err = %d\n", err);
  1067. goto err1;
  1068. }
  1069. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1070. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1071. if (err) {
  1072. pr_warn("device_create_file failed, i = %d, err = %d\n",
  1073. i, err);
  1074. goto err2;
  1075. }
  1076. }
  1077. return 0;
  1078. err2:
  1079. ib_unregister_device(dev);
  1080. err1:
  1081. return err;
  1082. }
  1083. int rxe_unregister_device(struct rxe_dev *rxe)
  1084. {
  1085. int i;
  1086. struct ib_device *dev = &rxe->ib_dev;
  1087. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1088. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1089. ib_unregister_device(dev);
  1090. return 0;
  1091. }