rxe_verbs.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/dma-mapping.h>
  34. #include <net/addrconf.h>
  35. #include "rxe.h"
  36. #include "rxe_loc.h"
  37. #include "rxe_queue.h"
  38. #include "rxe_hw_counters.h"
  39. static int rxe_query_device(struct ib_device *dev,
  40. struct ib_device_attr *attr,
  41. struct ib_udata *uhw)
  42. {
  43. struct rxe_dev *rxe = to_rdev(dev);
  44. if (uhw->inlen || uhw->outlen)
  45. return -EINVAL;
  46. *attr = rxe->attr;
  47. return 0;
  48. }
  49. static int rxe_query_port(struct ib_device *dev,
  50. u8 port_num, struct ib_port_attr *attr)
  51. {
  52. struct rxe_dev *rxe = to_rdev(dev);
  53. struct rxe_port *port;
  54. int rc = -EINVAL;
  55. if (unlikely(port_num != 1)) {
  56. pr_warn("invalid port_number %d\n", port_num);
  57. goto out;
  58. }
  59. port = &rxe->port;
  60. /* *attr being zeroed by the caller, avoid zeroing it here */
  61. *attr = port->attr;
  62. mutex_lock(&rxe->usdev_lock);
  63. rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
  64. &attr->active_width);
  65. mutex_unlock(&rxe->usdev_lock);
  66. out:
  67. return rc;
  68. }
  69. static struct net_device *rxe_get_netdev(struct ib_device *device,
  70. u8 port_num)
  71. {
  72. struct rxe_dev *rxe = to_rdev(device);
  73. if (rxe->ndev) {
  74. dev_hold(rxe->ndev);
  75. return rxe->ndev;
  76. }
  77. return NULL;
  78. }
  79. static int rxe_query_pkey(struct ib_device *device,
  80. u8 port_num, u16 index, u16 *pkey)
  81. {
  82. struct rxe_dev *rxe = to_rdev(device);
  83. struct rxe_port *port;
  84. if (unlikely(port_num != 1)) {
  85. dev_warn(device->dev.parent, "invalid port_num = %d\n",
  86. port_num);
  87. goto err1;
  88. }
  89. port = &rxe->port;
  90. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  91. dev_warn(device->dev.parent, "invalid index = %d\n",
  92. index);
  93. goto err1;
  94. }
  95. *pkey = port->pkey_tbl[index];
  96. return 0;
  97. err1:
  98. return -EINVAL;
  99. }
  100. static int rxe_modify_device(struct ib_device *dev,
  101. int mask, struct ib_device_modify *attr)
  102. {
  103. struct rxe_dev *rxe = to_rdev(dev);
  104. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  105. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  106. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  107. memcpy(rxe->ib_dev.node_desc,
  108. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  109. }
  110. return 0;
  111. }
  112. static int rxe_modify_port(struct ib_device *dev,
  113. u8 port_num, int mask, struct ib_port_modify *attr)
  114. {
  115. struct rxe_dev *rxe = to_rdev(dev);
  116. struct rxe_port *port;
  117. if (unlikely(port_num != 1)) {
  118. pr_warn("invalid port_num = %d\n", port_num);
  119. goto err1;
  120. }
  121. port = &rxe->port;
  122. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  123. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  124. if (mask & IB_PORT_RESET_QKEY_CNTR)
  125. port->attr.qkey_viol_cntr = 0;
  126. return 0;
  127. err1:
  128. return -EINVAL;
  129. }
  130. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  131. u8 port_num)
  132. {
  133. struct rxe_dev *rxe = to_rdev(dev);
  134. return rxe_link_layer(rxe, port_num);
  135. }
  136. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  137. struct ib_udata *udata)
  138. {
  139. struct rxe_dev *rxe = to_rdev(dev);
  140. struct rxe_ucontext *uc;
  141. uc = rxe_alloc(&rxe->uc_pool);
  142. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  143. }
  144. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  145. {
  146. struct rxe_ucontext *uc = to_ruc(ibuc);
  147. rxe_drop_ref(uc);
  148. return 0;
  149. }
  150. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  151. struct ib_port_immutable *immutable)
  152. {
  153. int err;
  154. struct ib_port_attr attr;
  155. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  156. err = ib_query_port(dev, port_num, &attr);
  157. if (err)
  158. return err;
  159. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  160. immutable->gid_tbl_len = attr.gid_tbl_len;
  161. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  162. return 0;
  163. }
  164. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  165. struct ib_ucontext *context,
  166. struct ib_udata *udata)
  167. {
  168. struct rxe_dev *rxe = to_rdev(dev);
  169. struct rxe_pd *pd;
  170. pd = rxe_alloc(&rxe->pd_pool);
  171. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  172. }
  173. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  174. {
  175. struct rxe_pd *pd = to_rpd(ibpd);
  176. rxe_drop_ref(pd);
  177. return 0;
  178. }
  179. static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
  180. struct rxe_av *av)
  181. {
  182. rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
  183. rxe_av_fill_ip_info(av, attr);
  184. }
  185. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
  186. struct rdma_ah_attr *attr,
  187. struct ib_udata *udata)
  188. {
  189. int err;
  190. struct rxe_dev *rxe = to_rdev(ibpd->device);
  191. struct rxe_pd *pd = to_rpd(ibpd);
  192. struct rxe_ah *ah;
  193. err = rxe_av_chk_attr(rxe, attr);
  194. if (err)
  195. return ERR_PTR(err);
  196. ah = rxe_alloc(&rxe->ah_pool);
  197. if (!ah)
  198. return ERR_PTR(-ENOMEM);
  199. rxe_add_ref(pd);
  200. ah->pd = pd;
  201. rxe_init_av(rxe, attr, &ah->av);
  202. return &ah->ibah;
  203. }
  204. static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  205. {
  206. int err;
  207. struct rxe_dev *rxe = to_rdev(ibah->device);
  208. struct rxe_ah *ah = to_rah(ibah);
  209. err = rxe_av_chk_attr(rxe, attr);
  210. if (err)
  211. return err;
  212. rxe_init_av(rxe, attr, &ah->av);
  213. return 0;
  214. }
  215. static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  216. {
  217. struct rxe_ah *ah = to_rah(ibah);
  218. memset(attr, 0, sizeof(*attr));
  219. attr->type = ibah->type;
  220. rxe_av_to_attr(&ah->av, attr);
  221. return 0;
  222. }
  223. static int rxe_destroy_ah(struct ib_ah *ibah)
  224. {
  225. struct rxe_ah *ah = to_rah(ibah);
  226. rxe_drop_ref(ah->pd);
  227. rxe_drop_ref(ah);
  228. return 0;
  229. }
  230. static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
  231. {
  232. int err;
  233. int i;
  234. u32 length;
  235. struct rxe_recv_wqe *recv_wqe;
  236. int num_sge = ibwr->num_sge;
  237. if (unlikely(queue_full(rq->queue))) {
  238. err = -ENOMEM;
  239. goto err1;
  240. }
  241. if (unlikely(num_sge > rq->max_sge)) {
  242. err = -EINVAL;
  243. goto err1;
  244. }
  245. length = 0;
  246. for (i = 0; i < num_sge; i++)
  247. length += ibwr->sg_list[i].length;
  248. recv_wqe = producer_addr(rq->queue);
  249. recv_wqe->wr_id = ibwr->wr_id;
  250. recv_wqe->num_sge = num_sge;
  251. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  252. num_sge * sizeof(struct ib_sge));
  253. recv_wqe->dma.length = length;
  254. recv_wqe->dma.resid = length;
  255. recv_wqe->dma.num_sge = num_sge;
  256. recv_wqe->dma.cur_sge = 0;
  257. recv_wqe->dma.sge_offset = 0;
  258. /* make sure all changes to the work queue are written before we
  259. * update the producer pointer
  260. */
  261. smp_wmb();
  262. advance_producer(rq->queue);
  263. return 0;
  264. err1:
  265. return err;
  266. }
  267. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  268. struct ib_srq_init_attr *init,
  269. struct ib_udata *udata)
  270. {
  271. int err;
  272. struct rxe_dev *rxe = to_rdev(ibpd->device);
  273. struct rxe_pd *pd = to_rpd(ibpd);
  274. struct rxe_srq *srq;
  275. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  276. struct rxe_create_srq_resp __user *uresp = NULL;
  277. if (udata) {
  278. if (udata->outlen < sizeof(*uresp))
  279. return ERR_PTR(-EINVAL);
  280. uresp = udata->outbuf;
  281. }
  282. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  283. if (err)
  284. goto err1;
  285. srq = rxe_alloc(&rxe->srq_pool);
  286. if (!srq) {
  287. err = -ENOMEM;
  288. goto err1;
  289. }
  290. rxe_add_index(srq);
  291. rxe_add_ref(pd);
  292. srq->pd = pd;
  293. err = rxe_srq_from_init(rxe, srq, init, context, uresp);
  294. if (err)
  295. goto err2;
  296. return &srq->ibsrq;
  297. err2:
  298. rxe_drop_ref(pd);
  299. rxe_drop_index(srq);
  300. rxe_drop_ref(srq);
  301. err1:
  302. return ERR_PTR(err);
  303. }
  304. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  305. enum ib_srq_attr_mask mask,
  306. struct ib_udata *udata)
  307. {
  308. int err;
  309. struct rxe_srq *srq = to_rsrq(ibsrq);
  310. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  311. struct rxe_modify_srq_cmd ucmd = {};
  312. if (udata) {
  313. if (udata->inlen < sizeof(ucmd))
  314. return -EINVAL;
  315. err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
  316. if (err)
  317. return err;
  318. }
  319. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  320. if (err)
  321. goto err1;
  322. err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
  323. if (err)
  324. goto err1;
  325. return 0;
  326. err1:
  327. return err;
  328. }
  329. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  330. {
  331. struct rxe_srq *srq = to_rsrq(ibsrq);
  332. if (srq->error)
  333. return -EINVAL;
  334. attr->max_wr = srq->rq.queue->buf->index_mask;
  335. attr->max_sge = srq->rq.max_sge;
  336. attr->srq_limit = srq->limit;
  337. return 0;
  338. }
  339. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  340. {
  341. struct rxe_srq *srq = to_rsrq(ibsrq);
  342. if (srq->rq.queue)
  343. rxe_queue_cleanup(srq->rq.queue);
  344. rxe_drop_ref(srq->pd);
  345. rxe_drop_index(srq);
  346. rxe_drop_ref(srq);
  347. return 0;
  348. }
  349. static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  350. const struct ib_recv_wr **bad_wr)
  351. {
  352. int err = 0;
  353. unsigned long flags;
  354. struct rxe_srq *srq = to_rsrq(ibsrq);
  355. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  356. while (wr) {
  357. err = post_one_recv(&srq->rq, wr);
  358. if (unlikely(err))
  359. break;
  360. wr = wr->next;
  361. }
  362. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  363. if (err)
  364. *bad_wr = wr;
  365. return err;
  366. }
  367. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  368. struct ib_qp_init_attr *init,
  369. struct ib_udata *udata)
  370. {
  371. int err;
  372. struct rxe_dev *rxe = to_rdev(ibpd->device);
  373. struct rxe_pd *pd = to_rpd(ibpd);
  374. struct rxe_qp *qp;
  375. struct rxe_create_qp_resp __user *uresp = NULL;
  376. if (udata) {
  377. if (udata->outlen < sizeof(*uresp))
  378. return ERR_PTR(-EINVAL);
  379. uresp = udata->outbuf;
  380. }
  381. err = rxe_qp_chk_init(rxe, init);
  382. if (err)
  383. goto err1;
  384. qp = rxe_alloc(&rxe->qp_pool);
  385. if (!qp) {
  386. err = -ENOMEM;
  387. goto err1;
  388. }
  389. if (udata) {
  390. if (udata->inlen) {
  391. err = -EINVAL;
  392. goto err2;
  393. }
  394. qp->is_user = 1;
  395. }
  396. rxe_add_index(qp);
  397. err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
  398. if (err)
  399. goto err3;
  400. return &qp->ibqp;
  401. err3:
  402. rxe_drop_index(qp);
  403. err2:
  404. rxe_drop_ref(qp);
  405. err1:
  406. return ERR_PTR(err);
  407. }
  408. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  409. int mask, struct ib_udata *udata)
  410. {
  411. int err;
  412. struct rxe_dev *rxe = to_rdev(ibqp->device);
  413. struct rxe_qp *qp = to_rqp(ibqp);
  414. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  415. if (err)
  416. goto err1;
  417. err = rxe_qp_from_attr(qp, attr, mask, udata);
  418. if (err)
  419. goto err1;
  420. return 0;
  421. err1:
  422. return err;
  423. }
  424. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  425. int mask, struct ib_qp_init_attr *init)
  426. {
  427. struct rxe_qp *qp = to_rqp(ibqp);
  428. rxe_qp_to_init(qp, init);
  429. rxe_qp_to_attr(qp, attr, mask);
  430. return 0;
  431. }
  432. static int rxe_destroy_qp(struct ib_qp *ibqp)
  433. {
  434. struct rxe_qp *qp = to_rqp(ibqp);
  435. rxe_qp_destroy(qp);
  436. rxe_drop_index(qp);
  437. rxe_drop_ref(qp);
  438. return 0;
  439. }
  440. static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
  441. unsigned int mask, unsigned int length)
  442. {
  443. int num_sge = ibwr->num_sge;
  444. struct rxe_sq *sq = &qp->sq;
  445. if (unlikely(num_sge > sq->max_sge))
  446. goto err1;
  447. if (unlikely(mask & WR_ATOMIC_MASK)) {
  448. if (length < 8)
  449. goto err1;
  450. if (atomic_wr(ibwr)->remote_addr & 0x7)
  451. goto err1;
  452. }
  453. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  454. (length > sq->max_inline)))
  455. goto err1;
  456. return 0;
  457. err1:
  458. return -EINVAL;
  459. }
  460. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  461. const struct ib_send_wr *ibwr)
  462. {
  463. wr->wr_id = ibwr->wr_id;
  464. wr->num_sge = ibwr->num_sge;
  465. wr->opcode = ibwr->opcode;
  466. wr->send_flags = ibwr->send_flags;
  467. if (qp_type(qp) == IB_QPT_UD ||
  468. qp_type(qp) == IB_QPT_SMI ||
  469. qp_type(qp) == IB_QPT_GSI) {
  470. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  471. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  472. if (qp_type(qp) == IB_QPT_GSI)
  473. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  474. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  475. wr->ex.imm_data = ibwr->ex.imm_data;
  476. } else {
  477. switch (wr->opcode) {
  478. case IB_WR_RDMA_WRITE_WITH_IMM:
  479. wr->ex.imm_data = ibwr->ex.imm_data;
  480. /* fall through */
  481. case IB_WR_RDMA_READ:
  482. case IB_WR_RDMA_WRITE:
  483. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  484. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  485. break;
  486. case IB_WR_SEND_WITH_IMM:
  487. wr->ex.imm_data = ibwr->ex.imm_data;
  488. break;
  489. case IB_WR_SEND_WITH_INV:
  490. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  491. break;
  492. case IB_WR_ATOMIC_CMP_AND_SWP:
  493. case IB_WR_ATOMIC_FETCH_AND_ADD:
  494. wr->wr.atomic.remote_addr =
  495. atomic_wr(ibwr)->remote_addr;
  496. wr->wr.atomic.compare_add =
  497. atomic_wr(ibwr)->compare_add;
  498. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  499. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  500. break;
  501. case IB_WR_LOCAL_INV:
  502. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  503. break;
  504. case IB_WR_REG_MR:
  505. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  506. wr->wr.reg.key = reg_wr(ibwr)->key;
  507. wr->wr.reg.access = reg_wr(ibwr)->access;
  508. break;
  509. default:
  510. break;
  511. }
  512. }
  513. }
  514. static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
  515. unsigned int mask, unsigned int length,
  516. struct rxe_send_wqe *wqe)
  517. {
  518. int num_sge = ibwr->num_sge;
  519. struct ib_sge *sge;
  520. int i;
  521. u8 *p;
  522. init_send_wr(qp, &wqe->wr, ibwr);
  523. if (qp_type(qp) == IB_QPT_UD ||
  524. qp_type(qp) == IB_QPT_SMI ||
  525. qp_type(qp) == IB_QPT_GSI)
  526. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  527. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  528. p = wqe->dma.inline_data;
  529. sge = ibwr->sg_list;
  530. for (i = 0; i < num_sge; i++, sge++) {
  531. memcpy(p, (void *)(uintptr_t)sge->addr,
  532. sge->length);
  533. p += sge->length;
  534. }
  535. } else if (mask & WR_REG_MASK) {
  536. wqe->mask = mask;
  537. wqe->state = wqe_state_posted;
  538. return 0;
  539. } else
  540. memcpy(wqe->dma.sge, ibwr->sg_list,
  541. num_sge * sizeof(struct ib_sge));
  542. wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
  543. mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
  544. wqe->mask = mask;
  545. wqe->dma.length = length;
  546. wqe->dma.resid = length;
  547. wqe->dma.num_sge = num_sge;
  548. wqe->dma.cur_sge = 0;
  549. wqe->dma.sge_offset = 0;
  550. wqe->state = wqe_state_posted;
  551. wqe->ssn = atomic_add_return(1, &qp->ssn);
  552. return 0;
  553. }
  554. static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
  555. unsigned int mask, u32 length)
  556. {
  557. int err;
  558. struct rxe_sq *sq = &qp->sq;
  559. struct rxe_send_wqe *send_wqe;
  560. unsigned long flags;
  561. err = validate_send_wr(qp, ibwr, mask, length);
  562. if (err)
  563. return err;
  564. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  565. if (unlikely(queue_full(sq->queue))) {
  566. err = -ENOMEM;
  567. goto err1;
  568. }
  569. send_wqe = producer_addr(sq->queue);
  570. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  571. if (unlikely(err))
  572. goto err1;
  573. /*
  574. * make sure all changes to the work queue are
  575. * written before we update the producer pointer
  576. */
  577. smp_wmb();
  578. advance_producer(sq->queue);
  579. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  580. return 0;
  581. err1:
  582. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  583. return err;
  584. }
  585. static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
  586. const struct ib_send_wr **bad_wr)
  587. {
  588. int err = 0;
  589. unsigned int mask;
  590. unsigned int length = 0;
  591. int i;
  592. while (wr) {
  593. mask = wr_opcode_mask(wr->opcode, qp);
  594. if (unlikely(!mask)) {
  595. err = -EINVAL;
  596. *bad_wr = wr;
  597. break;
  598. }
  599. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  600. !(mask & WR_INLINE_MASK))) {
  601. err = -EINVAL;
  602. *bad_wr = wr;
  603. break;
  604. }
  605. length = 0;
  606. for (i = 0; i < wr->num_sge; i++)
  607. length += wr->sg_list[i].length;
  608. err = post_one_send(qp, wr, mask, length);
  609. if (err) {
  610. *bad_wr = wr;
  611. break;
  612. }
  613. wr = wr->next;
  614. }
  615. rxe_run_task(&qp->req.task, 1);
  616. if (unlikely(qp->req.state == QP_STATE_ERROR))
  617. rxe_run_task(&qp->comp.task, 1);
  618. return err;
  619. }
  620. static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  621. const struct ib_send_wr **bad_wr)
  622. {
  623. struct rxe_qp *qp = to_rqp(ibqp);
  624. if (unlikely(!qp->valid)) {
  625. *bad_wr = wr;
  626. return -EINVAL;
  627. }
  628. if (unlikely(qp->req.state < QP_STATE_READY)) {
  629. *bad_wr = wr;
  630. return -EINVAL;
  631. }
  632. if (qp->is_user) {
  633. /* Utilize process context to do protocol processing */
  634. rxe_run_task(&qp->req.task, 0);
  635. return 0;
  636. } else
  637. return rxe_post_send_kernel(qp, wr, bad_wr);
  638. }
  639. static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  640. const struct ib_recv_wr **bad_wr)
  641. {
  642. int err = 0;
  643. struct rxe_qp *qp = to_rqp(ibqp);
  644. struct rxe_rq *rq = &qp->rq;
  645. unsigned long flags;
  646. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  647. *bad_wr = wr;
  648. err = -EINVAL;
  649. goto err1;
  650. }
  651. if (unlikely(qp->srq)) {
  652. *bad_wr = wr;
  653. err = -EINVAL;
  654. goto err1;
  655. }
  656. spin_lock_irqsave(&rq->producer_lock, flags);
  657. while (wr) {
  658. err = post_one_recv(rq, wr);
  659. if (unlikely(err)) {
  660. *bad_wr = wr;
  661. break;
  662. }
  663. wr = wr->next;
  664. }
  665. spin_unlock_irqrestore(&rq->producer_lock, flags);
  666. if (qp->resp.state == QP_STATE_ERROR)
  667. rxe_run_task(&qp->resp.task, 1);
  668. err1:
  669. return err;
  670. }
  671. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  672. const struct ib_cq_init_attr *attr,
  673. struct ib_ucontext *context,
  674. struct ib_udata *udata)
  675. {
  676. int err;
  677. struct rxe_dev *rxe = to_rdev(dev);
  678. struct rxe_cq *cq;
  679. struct rxe_create_cq_resp __user *uresp = NULL;
  680. if (udata) {
  681. if (udata->outlen < sizeof(*uresp))
  682. return ERR_PTR(-EINVAL);
  683. uresp = udata->outbuf;
  684. }
  685. if (attr->flags)
  686. return ERR_PTR(-EINVAL);
  687. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
  688. if (err)
  689. goto err1;
  690. cq = rxe_alloc(&rxe->cq_pool);
  691. if (!cq) {
  692. err = -ENOMEM;
  693. goto err1;
  694. }
  695. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  696. context, uresp);
  697. if (err)
  698. goto err2;
  699. return &cq->ibcq;
  700. err2:
  701. rxe_drop_ref(cq);
  702. err1:
  703. return ERR_PTR(err);
  704. }
  705. static int rxe_destroy_cq(struct ib_cq *ibcq)
  706. {
  707. struct rxe_cq *cq = to_rcq(ibcq);
  708. rxe_cq_disable(cq);
  709. rxe_drop_ref(cq);
  710. return 0;
  711. }
  712. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  713. {
  714. int err;
  715. struct rxe_cq *cq = to_rcq(ibcq);
  716. struct rxe_dev *rxe = to_rdev(ibcq->device);
  717. struct rxe_resize_cq_resp __user *uresp = NULL;
  718. if (udata) {
  719. if (udata->outlen < sizeof(*uresp))
  720. return -EINVAL;
  721. uresp = udata->outbuf;
  722. }
  723. err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
  724. if (err)
  725. goto err1;
  726. err = rxe_cq_resize_queue(cq, cqe, uresp);
  727. if (err)
  728. goto err1;
  729. return 0;
  730. err1:
  731. return err;
  732. }
  733. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  734. {
  735. int i;
  736. struct rxe_cq *cq = to_rcq(ibcq);
  737. struct rxe_cqe *cqe;
  738. unsigned long flags;
  739. spin_lock_irqsave(&cq->cq_lock, flags);
  740. for (i = 0; i < num_entries; i++) {
  741. cqe = queue_head(cq->queue);
  742. if (!cqe)
  743. break;
  744. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  745. advance_consumer(cq->queue);
  746. }
  747. spin_unlock_irqrestore(&cq->cq_lock, flags);
  748. return i;
  749. }
  750. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  751. {
  752. struct rxe_cq *cq = to_rcq(ibcq);
  753. int count = queue_count(cq->queue);
  754. return (count > wc_cnt) ? wc_cnt : count;
  755. }
  756. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  757. {
  758. struct rxe_cq *cq = to_rcq(ibcq);
  759. unsigned long irq_flags;
  760. int ret = 0;
  761. spin_lock_irqsave(&cq->cq_lock, irq_flags);
  762. if (cq->notify != IB_CQ_NEXT_COMP)
  763. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  764. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
  765. ret = 1;
  766. spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
  767. return ret;
  768. }
  769. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  770. {
  771. struct rxe_dev *rxe = to_rdev(ibpd->device);
  772. struct rxe_pd *pd = to_rpd(ibpd);
  773. struct rxe_mem *mr;
  774. int err;
  775. mr = rxe_alloc(&rxe->mr_pool);
  776. if (!mr) {
  777. err = -ENOMEM;
  778. goto err1;
  779. }
  780. rxe_add_index(mr);
  781. rxe_add_ref(pd);
  782. err = rxe_mem_init_dma(pd, access, mr);
  783. if (err)
  784. goto err2;
  785. return &mr->ibmr;
  786. err2:
  787. rxe_drop_ref(pd);
  788. rxe_drop_index(mr);
  789. rxe_drop_ref(mr);
  790. err1:
  791. return ERR_PTR(err);
  792. }
  793. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  794. u64 start,
  795. u64 length,
  796. u64 iova,
  797. int access, struct ib_udata *udata)
  798. {
  799. int err;
  800. struct rxe_dev *rxe = to_rdev(ibpd->device);
  801. struct rxe_pd *pd = to_rpd(ibpd);
  802. struct rxe_mem *mr;
  803. mr = rxe_alloc(&rxe->mr_pool);
  804. if (!mr) {
  805. err = -ENOMEM;
  806. goto err2;
  807. }
  808. rxe_add_index(mr);
  809. rxe_add_ref(pd);
  810. err = rxe_mem_init_user(pd, start, length, iova,
  811. access, udata, mr);
  812. if (err)
  813. goto err3;
  814. return &mr->ibmr;
  815. err3:
  816. rxe_drop_ref(pd);
  817. rxe_drop_index(mr);
  818. rxe_drop_ref(mr);
  819. err2:
  820. return ERR_PTR(err);
  821. }
  822. static int rxe_dereg_mr(struct ib_mr *ibmr)
  823. {
  824. struct rxe_mem *mr = to_rmr(ibmr);
  825. mr->state = RXE_MEM_STATE_ZOMBIE;
  826. rxe_drop_ref(mr->pd);
  827. rxe_drop_index(mr);
  828. rxe_drop_ref(mr);
  829. return 0;
  830. }
  831. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  832. enum ib_mr_type mr_type,
  833. u32 max_num_sg)
  834. {
  835. struct rxe_dev *rxe = to_rdev(ibpd->device);
  836. struct rxe_pd *pd = to_rpd(ibpd);
  837. struct rxe_mem *mr;
  838. int err;
  839. if (mr_type != IB_MR_TYPE_MEM_REG)
  840. return ERR_PTR(-EINVAL);
  841. mr = rxe_alloc(&rxe->mr_pool);
  842. if (!mr) {
  843. err = -ENOMEM;
  844. goto err1;
  845. }
  846. rxe_add_index(mr);
  847. rxe_add_ref(pd);
  848. err = rxe_mem_init_fast(pd, max_num_sg, mr);
  849. if (err)
  850. goto err2;
  851. return &mr->ibmr;
  852. err2:
  853. rxe_drop_ref(pd);
  854. rxe_drop_index(mr);
  855. rxe_drop_ref(mr);
  856. err1:
  857. return ERR_PTR(err);
  858. }
  859. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  860. {
  861. struct rxe_mem *mr = to_rmr(ibmr);
  862. struct rxe_map *map;
  863. struct rxe_phys_buf *buf;
  864. if (unlikely(mr->nbuf == mr->num_buf))
  865. return -ENOMEM;
  866. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  867. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  868. buf->addr = addr;
  869. buf->size = ibmr->page_size;
  870. mr->nbuf++;
  871. return 0;
  872. }
  873. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  874. int sg_nents, unsigned int *sg_offset)
  875. {
  876. struct rxe_mem *mr = to_rmr(ibmr);
  877. int n;
  878. mr->nbuf = 0;
  879. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  880. mr->va = ibmr->iova;
  881. mr->iova = ibmr->iova;
  882. mr->length = ibmr->length;
  883. mr->page_shift = ilog2(ibmr->page_size);
  884. mr->page_mask = ibmr->page_size - 1;
  885. mr->offset = mr->iova & mr->page_mask;
  886. return n;
  887. }
  888. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  889. {
  890. int err;
  891. struct rxe_dev *rxe = to_rdev(ibqp->device);
  892. struct rxe_qp *qp = to_rqp(ibqp);
  893. struct rxe_mc_grp *grp;
  894. /* takes a ref on grp if successful */
  895. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  896. if (err)
  897. return err;
  898. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  899. rxe_drop_ref(grp);
  900. return err;
  901. }
  902. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  903. {
  904. struct rxe_dev *rxe = to_rdev(ibqp->device);
  905. struct rxe_qp *qp = to_rqp(ibqp);
  906. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  907. }
  908. static ssize_t parent_show(struct device *device,
  909. struct device_attribute *attr, char *buf)
  910. {
  911. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  912. ib_dev.dev);
  913. return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
  914. }
  915. static DEVICE_ATTR_RO(parent);
  916. static struct device_attribute *rxe_dev_attributes[] = {
  917. &dev_attr_parent,
  918. };
  919. int rxe_register_device(struct rxe_dev *rxe)
  920. {
  921. int err;
  922. int i;
  923. struct ib_device *dev = &rxe->ib_dev;
  924. struct crypto_shash *tfm;
  925. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  926. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  927. dev->owner = THIS_MODULE;
  928. dev->node_type = RDMA_NODE_IB_CA;
  929. dev->phys_port_cnt = 1;
  930. dev->num_comp_vectors = num_possible_cpus();
  931. dev->dev.parent = rxe_dma_device(rxe);
  932. dev->local_dma_lkey = 0;
  933. addrconf_addr_eui48((unsigned char *)&dev->node_guid,
  934. rxe->ndev->dev_addr);
  935. dev->dev.dma_ops = &dma_virt_ops;
  936. dma_coerce_mask_and_coherent(&dev->dev,
  937. dma_get_required_mask(&dev->dev));
  938. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  939. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  940. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  941. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  942. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  943. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  944. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  945. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  946. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  947. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  948. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  949. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  950. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  951. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  952. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  953. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  954. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  955. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  956. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  957. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  958. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  959. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  960. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  961. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  962. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  963. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  964. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  965. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  966. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  967. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  968. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  969. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  970. ;
  971. dev->query_device = rxe_query_device;
  972. dev->modify_device = rxe_modify_device;
  973. dev->query_port = rxe_query_port;
  974. dev->modify_port = rxe_modify_port;
  975. dev->get_link_layer = rxe_get_link_layer;
  976. dev->get_netdev = rxe_get_netdev;
  977. dev->query_pkey = rxe_query_pkey;
  978. dev->alloc_ucontext = rxe_alloc_ucontext;
  979. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  980. dev->mmap = rxe_mmap;
  981. dev->get_port_immutable = rxe_port_immutable;
  982. dev->alloc_pd = rxe_alloc_pd;
  983. dev->dealloc_pd = rxe_dealloc_pd;
  984. dev->create_ah = rxe_create_ah;
  985. dev->modify_ah = rxe_modify_ah;
  986. dev->query_ah = rxe_query_ah;
  987. dev->destroy_ah = rxe_destroy_ah;
  988. dev->create_srq = rxe_create_srq;
  989. dev->modify_srq = rxe_modify_srq;
  990. dev->query_srq = rxe_query_srq;
  991. dev->destroy_srq = rxe_destroy_srq;
  992. dev->post_srq_recv = rxe_post_srq_recv;
  993. dev->create_qp = rxe_create_qp;
  994. dev->modify_qp = rxe_modify_qp;
  995. dev->query_qp = rxe_query_qp;
  996. dev->destroy_qp = rxe_destroy_qp;
  997. dev->post_send = rxe_post_send;
  998. dev->post_recv = rxe_post_recv;
  999. dev->create_cq = rxe_create_cq;
  1000. dev->destroy_cq = rxe_destroy_cq;
  1001. dev->resize_cq = rxe_resize_cq;
  1002. dev->poll_cq = rxe_poll_cq;
  1003. dev->peek_cq = rxe_peek_cq;
  1004. dev->req_notify_cq = rxe_req_notify_cq;
  1005. dev->get_dma_mr = rxe_get_dma_mr;
  1006. dev->reg_user_mr = rxe_reg_user_mr;
  1007. dev->dereg_mr = rxe_dereg_mr;
  1008. dev->alloc_mr = rxe_alloc_mr;
  1009. dev->map_mr_sg = rxe_map_mr_sg;
  1010. dev->attach_mcast = rxe_attach_mcast;
  1011. dev->detach_mcast = rxe_detach_mcast;
  1012. dev->get_hw_stats = rxe_ib_get_hw_stats;
  1013. dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
  1014. tfm = crypto_alloc_shash("crc32", 0, 0);
  1015. if (IS_ERR(tfm)) {
  1016. pr_err("failed to allocate crc algorithm err:%ld\n",
  1017. PTR_ERR(tfm));
  1018. return PTR_ERR(tfm);
  1019. }
  1020. rxe->tfm = tfm;
  1021. dev->driver_id = RDMA_DRIVER_RXE;
  1022. err = ib_register_device(dev, NULL);
  1023. if (err) {
  1024. pr_warn("%s failed with error %d\n", __func__, err);
  1025. goto err1;
  1026. }
  1027. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1028. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1029. if (err) {
  1030. pr_warn("%s failed with error %d for attr number %d\n",
  1031. __func__, err, i);
  1032. goto err2;
  1033. }
  1034. }
  1035. return 0;
  1036. err2:
  1037. ib_unregister_device(dev);
  1038. err1:
  1039. crypto_free_shash(rxe->tfm);
  1040. return err;
  1041. }
  1042. int rxe_unregister_device(struct rxe_dev *rxe)
  1043. {
  1044. int i;
  1045. struct ib_device *dev = &rxe->ib_dev;
  1046. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1047. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1048. ib_unregister_device(dev);
  1049. return 0;
  1050. }