ocrdma_verbs.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054
  1. /* This file is part of the Emulex RoCE Device Driver for
  2. * RoCE (RDMA over Converged Ethernet) adapters.
  3. * Copyright (C) 2012-2015 Emulex. All rights reserved.
  4. * EMULEX and SLI are trademarks of Emulex.
  5. * www.emulex.com
  6. *
  7. * This software is available to you under a choice of one of two licenses.
  8. * You may choose to be licensed under the terms of the GNU General Public
  9. * License (GPL) Version 2, available from the file COPYING in the main
  10. * directory of this source tree, or the BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright notice,
  17. * this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  30. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  31. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  32. * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  33. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  34. *
  35. * Contact Information:
  36. * linux-drivers@emulex.com
  37. *
  38. * Emulex
  39. * 3333 Susan Street
  40. * Costa Mesa, CA 92626
  41. */
  42. #include <linux/dma-mapping.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/iw_cm.h>
  46. #include <rdma/ib_umem.h>
  47. #include <rdma/ib_addr.h>
  48. #include <rdma/ib_cache.h>
  49. #include "ocrdma.h"
  50. #include "ocrdma_hw.h"
  51. #include "ocrdma_verbs.h"
  52. #include <rdma/ocrdma-abi.h>
  53. int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  54. {
  55. if (index > 0)
  56. return -EINVAL;
  57. *pkey = 0xffff;
  58. return 0;
  59. }
  60. int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
  61. struct ib_udata *uhw)
  62. {
  63. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  64. if (uhw->inlen || uhw->outlen)
  65. return -EINVAL;
  66. memset(attr, 0, sizeof *attr);
  67. memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
  68. min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
  69. ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
  70. attr->max_mr_size = dev->attr.max_mr_size;
  71. attr->page_size_cap = 0xffff000;
  72. attr->vendor_id = dev->nic_info.pdev->vendor;
  73. attr->vendor_part_id = dev->nic_info.pdev->device;
  74. attr->hw_ver = dev->asic_id;
  75. attr->max_qp = dev->attr.max_qp;
  76. attr->max_ah = OCRDMA_MAX_AH;
  77. attr->max_qp_wr = dev->attr.max_wqe;
  78. attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
  79. IB_DEVICE_RC_RNR_NAK_GEN |
  80. IB_DEVICE_SHUTDOWN_PORT |
  81. IB_DEVICE_SYS_IMAGE_GUID |
  82. IB_DEVICE_LOCAL_DMA_LKEY |
  83. IB_DEVICE_MEM_MGT_EXTENSIONS;
  84. attr->max_send_sge = dev->attr.max_send_sge;
  85. attr->max_recv_sge = dev->attr.max_recv_sge;
  86. attr->max_sge_rd = dev->attr.max_rdma_sge;
  87. attr->max_cq = dev->attr.max_cq;
  88. attr->max_cqe = dev->attr.max_cqe;
  89. attr->max_mr = dev->attr.max_mr;
  90. attr->max_mw = dev->attr.max_mw;
  91. attr->max_pd = dev->attr.max_pd;
  92. attr->atomic_cap = 0;
  93. attr->max_fmr = 0;
  94. attr->max_map_per_fmr = 0;
  95. attr->max_qp_rd_atom =
  96. min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
  97. attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
  98. attr->max_srq = dev->attr.max_srq;
  99. attr->max_srq_sge = dev->attr.max_srq_sge;
  100. attr->max_srq_wr = dev->attr.max_rqe;
  101. attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
  102. attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
  103. attr->max_pkeys = 1;
  104. return 0;
  105. }
  106. struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
  107. {
  108. struct ocrdma_dev *dev;
  109. struct net_device *ndev = NULL;
  110. rcu_read_lock();
  111. dev = get_ocrdma_dev(ibdev);
  112. if (dev)
  113. ndev = dev->nic_info.netdev;
  114. if (ndev)
  115. dev_hold(ndev);
  116. rcu_read_unlock();
  117. return ndev;
  118. }
  119. static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
  120. u8 *ib_speed, u8 *ib_width)
  121. {
  122. int status;
  123. u8 speed;
  124. status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
  125. if (status)
  126. speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
  127. switch (speed) {
  128. case OCRDMA_PHYS_LINK_SPEED_1GBPS:
  129. *ib_speed = IB_SPEED_SDR;
  130. *ib_width = IB_WIDTH_1X;
  131. break;
  132. case OCRDMA_PHYS_LINK_SPEED_10GBPS:
  133. *ib_speed = IB_SPEED_QDR;
  134. *ib_width = IB_WIDTH_1X;
  135. break;
  136. case OCRDMA_PHYS_LINK_SPEED_20GBPS:
  137. *ib_speed = IB_SPEED_DDR;
  138. *ib_width = IB_WIDTH_4X;
  139. break;
  140. case OCRDMA_PHYS_LINK_SPEED_40GBPS:
  141. *ib_speed = IB_SPEED_QDR;
  142. *ib_width = IB_WIDTH_4X;
  143. break;
  144. default:
  145. /* Unsupported */
  146. *ib_speed = IB_SPEED_SDR;
  147. *ib_width = IB_WIDTH_1X;
  148. }
  149. }
  150. int ocrdma_query_port(struct ib_device *ibdev,
  151. u8 port, struct ib_port_attr *props)
  152. {
  153. enum ib_port_state port_state;
  154. struct ocrdma_dev *dev;
  155. struct net_device *netdev;
  156. /* props being zeroed by the caller, avoid zeroing it here */
  157. dev = get_ocrdma_dev(ibdev);
  158. if (port > 1) {
  159. pr_err("%s(%d) invalid_port=0x%x\n", __func__,
  160. dev->id, port);
  161. return -EINVAL;
  162. }
  163. netdev = dev->nic_info.netdev;
  164. if (netif_running(netdev) && netif_oper_up(netdev)) {
  165. port_state = IB_PORT_ACTIVE;
  166. props->phys_state = 5;
  167. } else {
  168. port_state = IB_PORT_DOWN;
  169. props->phys_state = 3;
  170. }
  171. props->max_mtu = IB_MTU_4096;
  172. props->active_mtu = iboe_get_mtu(netdev->mtu);
  173. props->lid = 0;
  174. props->lmc = 0;
  175. props->sm_lid = 0;
  176. props->sm_sl = 0;
  177. props->state = port_state;
  178. props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
  179. IB_PORT_DEVICE_MGMT_SUP |
  180. IB_PORT_VENDOR_CLASS_SUP;
  181. props->ip_gids = true;
  182. props->gid_tbl_len = OCRDMA_MAX_SGID;
  183. props->pkey_tbl_len = 1;
  184. props->bad_pkey_cntr = 0;
  185. props->qkey_viol_cntr = 0;
  186. get_link_speed_and_width(dev, &props->active_speed,
  187. &props->active_width);
  188. props->max_msg_sz = 0x80000000;
  189. props->max_vl_num = 4;
  190. return 0;
  191. }
  192. int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
  193. struct ib_port_modify *props)
  194. {
  195. struct ocrdma_dev *dev;
  196. dev = get_ocrdma_dev(ibdev);
  197. if (port > 1) {
  198. pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
  199. return -EINVAL;
  200. }
  201. return 0;
  202. }
  203. static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  204. unsigned long len)
  205. {
  206. struct ocrdma_mm *mm;
  207. mm = kzalloc(sizeof(*mm), GFP_KERNEL);
  208. if (mm == NULL)
  209. return -ENOMEM;
  210. mm->key.phy_addr = phy_addr;
  211. mm->key.len = len;
  212. INIT_LIST_HEAD(&mm->entry);
  213. mutex_lock(&uctx->mm_list_lock);
  214. list_add_tail(&mm->entry, &uctx->mm_head);
  215. mutex_unlock(&uctx->mm_list_lock);
  216. return 0;
  217. }
  218. static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  219. unsigned long len)
  220. {
  221. struct ocrdma_mm *mm, *tmp;
  222. mutex_lock(&uctx->mm_list_lock);
  223. list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
  224. if (len != mm->key.len && phy_addr != mm->key.phy_addr)
  225. continue;
  226. list_del(&mm->entry);
  227. kfree(mm);
  228. break;
  229. }
  230. mutex_unlock(&uctx->mm_list_lock);
  231. }
  232. static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  233. unsigned long len)
  234. {
  235. bool found = false;
  236. struct ocrdma_mm *mm;
  237. mutex_lock(&uctx->mm_list_lock);
  238. list_for_each_entry(mm, &uctx->mm_head, entry) {
  239. if (len != mm->key.len && phy_addr != mm->key.phy_addr)
  240. continue;
  241. found = true;
  242. break;
  243. }
  244. mutex_unlock(&uctx->mm_list_lock);
  245. return found;
  246. }
  247. static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
  248. {
  249. u16 pd_bitmap_idx = 0;
  250. const unsigned long *pd_bitmap;
  251. if (dpp_pool) {
  252. pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
  253. pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
  254. dev->pd_mgr->max_dpp_pd);
  255. __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
  256. dev->pd_mgr->pd_dpp_count++;
  257. if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
  258. dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
  259. } else {
  260. pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
  261. pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
  262. dev->pd_mgr->max_normal_pd);
  263. __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
  264. dev->pd_mgr->pd_norm_count++;
  265. if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
  266. dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
  267. }
  268. return pd_bitmap_idx;
  269. }
  270. static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
  271. bool dpp_pool)
  272. {
  273. u16 pd_count;
  274. u16 pd_bit_index;
  275. pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
  276. dev->pd_mgr->pd_norm_count;
  277. if (pd_count == 0)
  278. return -EINVAL;
  279. if (dpp_pool) {
  280. pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
  281. if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
  282. return -EINVAL;
  283. } else {
  284. __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
  285. dev->pd_mgr->pd_dpp_count--;
  286. }
  287. } else {
  288. pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
  289. if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
  290. return -EINVAL;
  291. } else {
  292. __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
  293. dev->pd_mgr->pd_norm_count--;
  294. }
  295. }
  296. return 0;
  297. }
  298. static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
  299. bool dpp_pool)
  300. {
  301. int status;
  302. mutex_lock(&dev->dev_lock);
  303. status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
  304. mutex_unlock(&dev->dev_lock);
  305. return status;
  306. }
  307. static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
  308. {
  309. u16 pd_idx = 0;
  310. int status = 0;
  311. mutex_lock(&dev->dev_lock);
  312. if (pd->dpp_enabled) {
  313. /* try allocating DPP PD, if not available then normal PD */
  314. if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
  315. pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
  316. pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
  317. pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
  318. } else if (dev->pd_mgr->pd_norm_count <
  319. dev->pd_mgr->max_normal_pd) {
  320. pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
  321. pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
  322. pd->dpp_enabled = false;
  323. } else {
  324. status = -EINVAL;
  325. }
  326. } else {
  327. if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
  328. pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
  329. pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
  330. } else {
  331. status = -EINVAL;
  332. }
  333. }
  334. mutex_unlock(&dev->dev_lock);
  335. return status;
  336. }
  337. static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
  338. struct ocrdma_ucontext *uctx,
  339. struct ib_udata *udata)
  340. {
  341. struct ocrdma_pd *pd = NULL;
  342. int status;
  343. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  344. if (!pd)
  345. return ERR_PTR(-ENOMEM);
  346. if (udata && uctx && dev->attr.max_dpp_pds) {
  347. pd->dpp_enabled =
  348. ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
  349. pd->num_dpp_qp =
  350. pd->dpp_enabled ? (dev->nic_info.db_page_size /
  351. dev->attr.wqe_size) : 0;
  352. }
  353. if (dev->pd_mgr->pd_prealloc_valid) {
  354. status = ocrdma_get_pd_num(dev, pd);
  355. if (status == 0) {
  356. return pd;
  357. } else {
  358. kfree(pd);
  359. return ERR_PTR(status);
  360. }
  361. }
  362. retry:
  363. status = ocrdma_mbx_alloc_pd(dev, pd);
  364. if (status) {
  365. if (pd->dpp_enabled) {
  366. pd->dpp_enabled = false;
  367. pd->num_dpp_qp = 0;
  368. goto retry;
  369. } else {
  370. kfree(pd);
  371. return ERR_PTR(status);
  372. }
  373. }
  374. return pd;
  375. }
  376. static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
  377. struct ocrdma_pd *pd)
  378. {
  379. return (uctx->cntxt_pd == pd);
  380. }
  381. static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
  382. struct ocrdma_pd *pd)
  383. {
  384. int status;
  385. if (dev->pd_mgr->pd_prealloc_valid)
  386. status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
  387. else
  388. status = ocrdma_mbx_dealloc_pd(dev, pd);
  389. kfree(pd);
  390. return status;
  391. }
  392. static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
  393. struct ocrdma_ucontext *uctx,
  394. struct ib_udata *udata)
  395. {
  396. int status = 0;
  397. uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
  398. if (IS_ERR(uctx->cntxt_pd)) {
  399. status = PTR_ERR(uctx->cntxt_pd);
  400. uctx->cntxt_pd = NULL;
  401. goto err;
  402. }
  403. uctx->cntxt_pd->uctx = uctx;
  404. uctx->cntxt_pd->ibpd.device = &dev->ibdev;
  405. err:
  406. return status;
  407. }
  408. static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
  409. {
  410. struct ocrdma_pd *pd = uctx->cntxt_pd;
  411. struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
  412. if (uctx->pd_in_use) {
  413. pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
  414. __func__, dev->id, pd->id);
  415. }
  416. uctx->cntxt_pd = NULL;
  417. (void)_ocrdma_dealloc_pd(dev, pd);
  418. return 0;
  419. }
  420. static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
  421. {
  422. struct ocrdma_pd *pd = NULL;
  423. mutex_lock(&uctx->mm_list_lock);
  424. if (!uctx->pd_in_use) {
  425. uctx->pd_in_use = true;
  426. pd = uctx->cntxt_pd;
  427. }
  428. mutex_unlock(&uctx->mm_list_lock);
  429. return pd;
  430. }
  431. static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
  432. {
  433. mutex_lock(&uctx->mm_list_lock);
  434. uctx->pd_in_use = false;
  435. mutex_unlock(&uctx->mm_list_lock);
  436. }
  437. struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
  438. struct ib_udata *udata)
  439. {
  440. int status;
  441. struct ocrdma_ucontext *ctx;
  442. struct ocrdma_alloc_ucontext_resp resp;
  443. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  444. struct pci_dev *pdev = dev->nic_info.pdev;
  445. u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
  446. if (!udata)
  447. return ERR_PTR(-EFAULT);
  448. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  449. if (!ctx)
  450. return ERR_PTR(-ENOMEM);
  451. INIT_LIST_HEAD(&ctx->mm_head);
  452. mutex_init(&ctx->mm_list_lock);
  453. ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
  454. &ctx->ah_tbl.pa, GFP_KERNEL);
  455. if (!ctx->ah_tbl.va) {
  456. kfree(ctx);
  457. return ERR_PTR(-ENOMEM);
  458. }
  459. ctx->ah_tbl.len = map_len;
  460. memset(&resp, 0, sizeof(resp));
  461. resp.ah_tbl_len = ctx->ah_tbl.len;
  462. resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
  463. status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
  464. if (status)
  465. goto map_err;
  466. status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
  467. if (status)
  468. goto pd_err;
  469. resp.dev_id = dev->id;
  470. resp.max_inline_data = dev->attr.max_inline_data;
  471. resp.wqe_size = dev->attr.wqe_size;
  472. resp.rqe_size = dev->attr.rqe_size;
  473. resp.dpp_wqe_size = dev->attr.wqe_size;
  474. memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
  475. status = ib_copy_to_udata(udata, &resp, sizeof(resp));
  476. if (status)
  477. goto cpy_err;
  478. return &ctx->ibucontext;
  479. cpy_err:
  480. pd_err:
  481. ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
  482. map_err:
  483. dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
  484. ctx->ah_tbl.pa);
  485. kfree(ctx);
  486. return ERR_PTR(status);
  487. }
  488. int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
  489. {
  490. int status;
  491. struct ocrdma_mm *mm, *tmp;
  492. struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
  493. struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
  494. struct pci_dev *pdev = dev->nic_info.pdev;
  495. status = ocrdma_dealloc_ucontext_pd(uctx);
  496. ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
  497. dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
  498. uctx->ah_tbl.pa);
  499. list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
  500. list_del(&mm->entry);
  501. kfree(mm);
  502. }
  503. kfree(uctx);
  504. return status;
  505. }
  506. int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  507. {
  508. struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
  509. struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
  510. unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
  511. u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
  512. unsigned long len = (vma->vm_end - vma->vm_start);
  513. int status;
  514. bool found;
  515. if (vma->vm_start & (PAGE_SIZE - 1))
  516. return -EINVAL;
  517. found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
  518. if (!found)
  519. return -EINVAL;
  520. if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
  521. dev->nic_info.db_total_size)) &&
  522. (len <= dev->nic_info.db_page_size)) {
  523. if (vma->vm_flags & VM_READ)
  524. return -EPERM;
  525. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  526. status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  527. len, vma->vm_page_prot);
  528. } else if (dev->nic_info.dpp_unmapped_len &&
  529. (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
  530. (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
  531. dev->nic_info.dpp_unmapped_len)) &&
  532. (len <= dev->nic_info.dpp_unmapped_len)) {
  533. if (vma->vm_flags & VM_READ)
  534. return -EPERM;
  535. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  536. status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  537. len, vma->vm_page_prot);
  538. } else {
  539. status = remap_pfn_range(vma, vma->vm_start,
  540. vma->vm_pgoff, len, vma->vm_page_prot);
  541. }
  542. return status;
  543. }
  544. static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
  545. struct ib_ucontext *ib_ctx,
  546. struct ib_udata *udata)
  547. {
  548. int status;
  549. u64 db_page_addr;
  550. u64 dpp_page_addr = 0;
  551. u32 db_page_size;
  552. struct ocrdma_alloc_pd_uresp rsp;
  553. struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
  554. memset(&rsp, 0, sizeof(rsp));
  555. rsp.id = pd->id;
  556. rsp.dpp_enabled = pd->dpp_enabled;
  557. db_page_addr = ocrdma_get_db_addr(dev, pd->id);
  558. db_page_size = dev->nic_info.db_page_size;
  559. status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
  560. if (status)
  561. return status;
  562. if (pd->dpp_enabled) {
  563. dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
  564. (pd->id * PAGE_SIZE);
  565. status = ocrdma_add_mmap(uctx, dpp_page_addr,
  566. PAGE_SIZE);
  567. if (status)
  568. goto dpp_map_err;
  569. rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
  570. rsp.dpp_page_addr_lo = dpp_page_addr;
  571. }
  572. status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
  573. if (status)
  574. goto ucopy_err;
  575. pd->uctx = uctx;
  576. return 0;
  577. ucopy_err:
  578. if (pd->dpp_enabled)
  579. ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
  580. dpp_map_err:
  581. ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
  582. return status;
  583. }
  584. struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
  585. struct ib_ucontext *context,
  586. struct ib_udata *udata)
  587. {
  588. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  589. struct ocrdma_pd *pd;
  590. struct ocrdma_ucontext *uctx = NULL;
  591. int status;
  592. u8 is_uctx_pd = false;
  593. if (udata && context) {
  594. uctx = get_ocrdma_ucontext(context);
  595. pd = ocrdma_get_ucontext_pd(uctx);
  596. if (pd) {
  597. is_uctx_pd = true;
  598. goto pd_mapping;
  599. }
  600. }
  601. pd = _ocrdma_alloc_pd(dev, uctx, udata);
  602. if (IS_ERR(pd)) {
  603. status = PTR_ERR(pd);
  604. goto exit;
  605. }
  606. pd_mapping:
  607. if (udata && context) {
  608. status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
  609. if (status)
  610. goto err;
  611. }
  612. return &pd->ibpd;
  613. err:
  614. if (is_uctx_pd) {
  615. ocrdma_release_ucontext_pd(uctx);
  616. } else {
  617. if (_ocrdma_dealloc_pd(dev, pd))
  618. pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
  619. }
  620. exit:
  621. return ERR_PTR(status);
  622. }
  623. int ocrdma_dealloc_pd(struct ib_pd *ibpd)
  624. {
  625. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  626. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  627. struct ocrdma_ucontext *uctx = NULL;
  628. int status = 0;
  629. u64 usr_db;
  630. uctx = pd->uctx;
  631. if (uctx) {
  632. u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
  633. (pd->id * PAGE_SIZE);
  634. if (pd->dpp_enabled)
  635. ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
  636. usr_db = ocrdma_get_db_addr(dev, pd->id);
  637. ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
  638. if (is_ucontext_pd(uctx, pd)) {
  639. ocrdma_release_ucontext_pd(uctx);
  640. return status;
  641. }
  642. }
  643. status = _ocrdma_dealloc_pd(dev, pd);
  644. return status;
  645. }
  646. static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
  647. u32 pdid, int acc, u32 num_pbls, u32 addr_check)
  648. {
  649. int status;
  650. mr->hwmr.fr_mr = 0;
  651. mr->hwmr.local_rd = 1;
  652. mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
  653. mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  654. mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
  655. mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
  656. mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
  657. mr->hwmr.num_pbls = num_pbls;
  658. status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
  659. if (status)
  660. return status;
  661. mr->ibmr.lkey = mr->hwmr.lkey;
  662. if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
  663. mr->ibmr.rkey = mr->hwmr.lkey;
  664. return 0;
  665. }
  666. struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
  667. {
  668. int status;
  669. struct ocrdma_mr *mr;
  670. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  671. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  672. if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
  673. pr_err("%s err, invalid access rights\n", __func__);
  674. return ERR_PTR(-EINVAL);
  675. }
  676. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  677. if (!mr)
  678. return ERR_PTR(-ENOMEM);
  679. status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
  680. OCRDMA_ADDR_CHECK_DISABLE);
  681. if (status) {
  682. kfree(mr);
  683. return ERR_PTR(status);
  684. }
  685. return &mr->ibmr;
  686. }
  687. static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
  688. struct ocrdma_hw_mr *mr)
  689. {
  690. struct pci_dev *pdev = dev->nic_info.pdev;
  691. int i = 0;
  692. if (mr->pbl_table) {
  693. for (i = 0; i < mr->num_pbls; i++) {
  694. if (!mr->pbl_table[i].va)
  695. continue;
  696. dma_free_coherent(&pdev->dev, mr->pbl_size,
  697. mr->pbl_table[i].va,
  698. mr->pbl_table[i].pa);
  699. }
  700. kfree(mr->pbl_table);
  701. mr->pbl_table = NULL;
  702. }
  703. }
  704. static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
  705. u32 num_pbes)
  706. {
  707. u32 num_pbls = 0;
  708. u32 idx = 0;
  709. int status = 0;
  710. u32 pbl_size;
  711. do {
  712. pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
  713. if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
  714. status = -EFAULT;
  715. break;
  716. }
  717. num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
  718. num_pbls = num_pbls / (pbl_size / sizeof(u64));
  719. idx++;
  720. } while (num_pbls >= dev->attr.max_num_mr_pbl);
  721. mr->hwmr.num_pbes = num_pbes;
  722. mr->hwmr.num_pbls = num_pbls;
  723. mr->hwmr.pbl_size = pbl_size;
  724. return status;
  725. }
  726. static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
  727. {
  728. int status = 0;
  729. int i;
  730. u32 dma_len = mr->pbl_size;
  731. struct pci_dev *pdev = dev->nic_info.pdev;
  732. void *va;
  733. dma_addr_t pa;
  734. mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
  735. GFP_KERNEL);
  736. if (!mr->pbl_table)
  737. return -ENOMEM;
  738. for (i = 0; i < mr->num_pbls; i++) {
  739. va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
  740. if (!va) {
  741. ocrdma_free_mr_pbl_tbl(dev, mr);
  742. status = -ENOMEM;
  743. break;
  744. }
  745. mr->pbl_table[i].va = va;
  746. mr->pbl_table[i].pa = pa;
  747. }
  748. return status;
  749. }
  750. static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
  751. u32 num_pbes)
  752. {
  753. struct ocrdma_pbe *pbe;
  754. struct scatterlist *sg;
  755. struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
  756. struct ib_umem *umem = mr->umem;
  757. int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
  758. if (!mr->hwmr.num_pbes)
  759. return;
  760. pbe = (struct ocrdma_pbe *)pbl_tbl->va;
  761. pbe_cnt = 0;
  762. shift = umem->page_shift;
  763. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  764. pages = sg_dma_len(sg) >> shift;
  765. for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
  766. /* store the page address in pbe */
  767. pbe->pa_lo =
  768. cpu_to_le32(sg_dma_address(sg) +
  769. (pg_cnt << shift));
  770. pbe->pa_hi =
  771. cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
  772. (pg_cnt << shift)));
  773. pbe_cnt += 1;
  774. total_num_pbes += 1;
  775. pbe++;
  776. /* if done building pbes, issue the mbx cmd. */
  777. if (total_num_pbes == num_pbes)
  778. return;
  779. /* if the given pbl is full storing the pbes,
  780. * move to next pbl.
  781. */
  782. if (pbe_cnt ==
  783. (mr->hwmr.pbl_size / sizeof(u64))) {
  784. pbl_tbl++;
  785. pbe = (struct ocrdma_pbe *)pbl_tbl->va;
  786. pbe_cnt = 0;
  787. }
  788. }
  789. }
  790. }
  791. struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
  792. u64 usr_addr, int acc, struct ib_udata *udata)
  793. {
  794. int status = -ENOMEM;
  795. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  796. struct ocrdma_mr *mr;
  797. struct ocrdma_pd *pd;
  798. u32 num_pbes;
  799. pd = get_ocrdma_pd(ibpd);
  800. if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
  801. return ERR_PTR(-EINVAL);
  802. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  803. if (!mr)
  804. return ERR_PTR(status);
  805. mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
  806. if (IS_ERR(mr->umem)) {
  807. status = -EFAULT;
  808. goto umem_err;
  809. }
  810. num_pbes = ib_umem_page_count(mr->umem);
  811. status = ocrdma_get_pbl_info(dev, mr, num_pbes);
  812. if (status)
  813. goto umem_err;
  814. mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
  815. mr->hwmr.fbo = ib_umem_offset(mr->umem);
  816. mr->hwmr.va = usr_addr;
  817. mr->hwmr.len = len;
  818. mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  819. mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
  820. mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
  821. mr->hwmr.local_rd = 1;
  822. mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
  823. status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
  824. if (status)
  825. goto umem_err;
  826. build_user_pbes(dev, mr, num_pbes);
  827. status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
  828. if (status)
  829. goto mbx_err;
  830. mr->ibmr.lkey = mr->hwmr.lkey;
  831. if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
  832. mr->ibmr.rkey = mr->hwmr.lkey;
  833. return &mr->ibmr;
  834. mbx_err:
  835. ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
  836. umem_err:
  837. kfree(mr);
  838. return ERR_PTR(status);
  839. }
  840. int ocrdma_dereg_mr(struct ib_mr *ib_mr)
  841. {
  842. struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
  843. struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
  844. (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
  845. kfree(mr->pages);
  846. ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
  847. /* it could be user registered memory. */
  848. if (mr->umem)
  849. ib_umem_release(mr->umem);
  850. kfree(mr);
  851. /* Don't stop cleanup, in case FW is unresponsive */
  852. if (dev->mqe_ctx.fw_error_state) {
  853. pr_err("%s(%d) fw not responding.\n",
  854. __func__, dev->id);
  855. }
  856. return 0;
  857. }
  858. static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
  859. struct ib_udata *udata,
  860. struct ib_ucontext *ib_ctx)
  861. {
  862. int status;
  863. struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
  864. struct ocrdma_create_cq_uresp uresp;
  865. memset(&uresp, 0, sizeof(uresp));
  866. uresp.cq_id = cq->id;
  867. uresp.page_size = PAGE_ALIGN(cq->len);
  868. uresp.num_pages = 1;
  869. uresp.max_hw_cqe = cq->max_hw_cqe;
  870. uresp.page_addr[0] = virt_to_phys(cq->va);
  871. uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
  872. uresp.db_page_size = dev->nic_info.db_page_size;
  873. uresp.phase_change = cq->phase_change ? 1 : 0;
  874. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  875. if (status) {
  876. pr_err("%s(%d) copy error cqid=0x%x.\n",
  877. __func__, dev->id, cq->id);
  878. goto err;
  879. }
  880. status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
  881. if (status)
  882. goto err;
  883. status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
  884. if (status) {
  885. ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
  886. goto err;
  887. }
  888. cq->ucontext = uctx;
  889. err:
  890. return status;
  891. }
  892. struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
  893. const struct ib_cq_init_attr *attr,
  894. struct ib_ucontext *ib_ctx,
  895. struct ib_udata *udata)
  896. {
  897. int entries = attr->cqe;
  898. struct ocrdma_cq *cq;
  899. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  900. struct ocrdma_ucontext *uctx = NULL;
  901. u16 pd_id = 0;
  902. int status;
  903. struct ocrdma_create_cq_ureq ureq;
  904. if (attr->flags)
  905. return ERR_PTR(-EINVAL);
  906. if (udata) {
  907. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  908. return ERR_PTR(-EFAULT);
  909. } else
  910. ureq.dpp_cq = 0;
  911. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  912. if (!cq)
  913. return ERR_PTR(-ENOMEM);
  914. spin_lock_init(&cq->cq_lock);
  915. spin_lock_init(&cq->comp_handler_lock);
  916. INIT_LIST_HEAD(&cq->sq_head);
  917. INIT_LIST_HEAD(&cq->rq_head);
  918. if (ib_ctx) {
  919. uctx = get_ocrdma_ucontext(ib_ctx);
  920. pd_id = uctx->cntxt_pd->id;
  921. }
  922. status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
  923. if (status) {
  924. kfree(cq);
  925. return ERR_PTR(status);
  926. }
  927. if (ib_ctx) {
  928. status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
  929. if (status)
  930. goto ctx_err;
  931. }
  932. cq->phase = OCRDMA_CQE_VALID;
  933. dev->cq_tbl[cq->id] = cq;
  934. return &cq->ibcq;
  935. ctx_err:
  936. ocrdma_mbx_destroy_cq(dev, cq);
  937. kfree(cq);
  938. return ERR_PTR(status);
  939. }
  940. int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
  941. struct ib_udata *udata)
  942. {
  943. int status = 0;
  944. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  945. if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
  946. status = -EINVAL;
  947. return status;
  948. }
  949. ibcq->cqe = new_cnt;
  950. return status;
  951. }
  952. static void ocrdma_flush_cq(struct ocrdma_cq *cq)
  953. {
  954. int cqe_cnt;
  955. int valid_count = 0;
  956. unsigned long flags;
  957. struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
  958. struct ocrdma_cqe *cqe = NULL;
  959. cqe = cq->va;
  960. cqe_cnt = cq->cqe_cnt;
  961. /* Last irq might have scheduled a polling thread
  962. * sync-up with it before hard flushing.
  963. */
  964. spin_lock_irqsave(&cq->cq_lock, flags);
  965. while (cqe_cnt) {
  966. if (is_cqe_valid(cq, cqe))
  967. valid_count++;
  968. cqe++;
  969. cqe_cnt--;
  970. }
  971. ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
  972. spin_unlock_irqrestore(&cq->cq_lock, flags);
  973. }
  974. int ocrdma_destroy_cq(struct ib_cq *ibcq)
  975. {
  976. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  977. struct ocrdma_eq *eq = NULL;
  978. struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
  979. int pdid = 0;
  980. u32 irq, indx;
  981. dev->cq_tbl[cq->id] = NULL;
  982. indx = ocrdma_get_eq_table_index(dev, cq->eqn);
  983. BUG_ON(indx == -EINVAL);
  984. eq = &dev->eq_tbl[indx];
  985. irq = ocrdma_get_irq(dev, eq);
  986. synchronize_irq(irq);
  987. ocrdma_flush_cq(cq);
  988. (void)ocrdma_mbx_destroy_cq(dev, cq);
  989. if (cq->ucontext) {
  990. pdid = cq->ucontext->cntxt_pd->id;
  991. ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
  992. PAGE_ALIGN(cq->len));
  993. ocrdma_del_mmap(cq->ucontext,
  994. ocrdma_get_db_addr(dev, pdid),
  995. dev->nic_info.db_page_size);
  996. }
  997. kfree(cq);
  998. return 0;
  999. }
  1000. static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
  1001. {
  1002. int status = -EINVAL;
  1003. if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
  1004. dev->qp_tbl[qp->id] = qp;
  1005. status = 0;
  1006. }
  1007. return status;
  1008. }
  1009. static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
  1010. {
  1011. dev->qp_tbl[qp->id] = NULL;
  1012. }
  1013. static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
  1014. struct ib_qp_init_attr *attrs)
  1015. {
  1016. if ((attrs->qp_type != IB_QPT_GSI) &&
  1017. (attrs->qp_type != IB_QPT_RC) &&
  1018. (attrs->qp_type != IB_QPT_UC) &&
  1019. (attrs->qp_type != IB_QPT_UD)) {
  1020. pr_err("%s(%d) unsupported qp type=0x%x requested\n",
  1021. __func__, dev->id, attrs->qp_type);
  1022. return -EINVAL;
  1023. }
  1024. /* Skip the check for QP1 to support CM size of 128 */
  1025. if ((attrs->qp_type != IB_QPT_GSI) &&
  1026. (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
  1027. pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
  1028. __func__, dev->id, attrs->cap.max_send_wr);
  1029. pr_err("%s(%d) supported send_wr=0x%x\n",
  1030. __func__, dev->id, dev->attr.max_wqe);
  1031. return -EINVAL;
  1032. }
  1033. if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
  1034. pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
  1035. __func__, dev->id, attrs->cap.max_recv_wr);
  1036. pr_err("%s(%d) supported recv_wr=0x%x\n",
  1037. __func__, dev->id, dev->attr.max_rqe);
  1038. return -EINVAL;
  1039. }
  1040. if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
  1041. pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
  1042. __func__, dev->id, attrs->cap.max_inline_data);
  1043. pr_err("%s(%d) supported inline data size=0x%x\n",
  1044. __func__, dev->id, dev->attr.max_inline_data);
  1045. return -EINVAL;
  1046. }
  1047. if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
  1048. pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
  1049. __func__, dev->id, attrs->cap.max_send_sge);
  1050. pr_err("%s(%d) supported send_sge=0x%x\n",
  1051. __func__, dev->id, dev->attr.max_send_sge);
  1052. return -EINVAL;
  1053. }
  1054. if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
  1055. pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
  1056. __func__, dev->id, attrs->cap.max_recv_sge);
  1057. pr_err("%s(%d) supported recv_sge=0x%x\n",
  1058. __func__, dev->id, dev->attr.max_recv_sge);
  1059. return -EINVAL;
  1060. }
  1061. /* unprivileged user space cannot create special QP */
  1062. if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
  1063. pr_err
  1064. ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
  1065. __func__, dev->id, attrs->qp_type);
  1066. return -EINVAL;
  1067. }
  1068. /* allow creating only one GSI type of QP */
  1069. if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
  1070. pr_err("%s(%d) GSI special QPs already created.\n",
  1071. __func__, dev->id);
  1072. return -EINVAL;
  1073. }
  1074. /* verify consumer QPs are not trying to use GSI QP's CQ */
  1075. if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
  1076. if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
  1077. (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
  1078. pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
  1079. __func__, dev->id);
  1080. return -EINVAL;
  1081. }
  1082. }
  1083. return 0;
  1084. }
  1085. static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
  1086. struct ib_udata *udata, int dpp_offset,
  1087. int dpp_credit_lmt, int srq)
  1088. {
  1089. int status;
  1090. u64 usr_db;
  1091. struct ocrdma_create_qp_uresp uresp;
  1092. struct ocrdma_pd *pd = qp->pd;
  1093. struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
  1094. memset(&uresp, 0, sizeof(uresp));
  1095. usr_db = dev->nic_info.unmapped_db +
  1096. (pd->id * dev->nic_info.db_page_size);
  1097. uresp.qp_id = qp->id;
  1098. uresp.sq_dbid = qp->sq.dbid;
  1099. uresp.num_sq_pages = 1;
  1100. uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
  1101. uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
  1102. uresp.num_wqe_allocated = qp->sq.max_cnt;
  1103. if (!srq) {
  1104. uresp.rq_dbid = qp->rq.dbid;
  1105. uresp.num_rq_pages = 1;
  1106. uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
  1107. uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
  1108. uresp.num_rqe_allocated = qp->rq.max_cnt;
  1109. }
  1110. uresp.db_page_addr = usr_db;
  1111. uresp.db_page_size = dev->nic_info.db_page_size;
  1112. uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
  1113. uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
  1114. uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
  1115. if (qp->dpp_enabled) {
  1116. uresp.dpp_credit = dpp_credit_lmt;
  1117. uresp.dpp_offset = dpp_offset;
  1118. }
  1119. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  1120. if (status) {
  1121. pr_err("%s(%d) user copy error.\n", __func__, dev->id);
  1122. goto err;
  1123. }
  1124. status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
  1125. uresp.sq_page_size);
  1126. if (status)
  1127. goto err;
  1128. if (!srq) {
  1129. status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
  1130. uresp.rq_page_size);
  1131. if (status)
  1132. goto rq_map_err;
  1133. }
  1134. return status;
  1135. rq_map_err:
  1136. ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
  1137. err:
  1138. return status;
  1139. }
  1140. static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
  1141. struct ocrdma_pd *pd)
  1142. {
  1143. if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
  1144. qp->sq_db = dev->nic_info.db +
  1145. (pd->id * dev->nic_info.db_page_size) +
  1146. OCRDMA_DB_GEN2_SQ_OFFSET;
  1147. qp->rq_db = dev->nic_info.db +
  1148. (pd->id * dev->nic_info.db_page_size) +
  1149. OCRDMA_DB_GEN2_RQ_OFFSET;
  1150. } else {
  1151. qp->sq_db = dev->nic_info.db +
  1152. (pd->id * dev->nic_info.db_page_size) +
  1153. OCRDMA_DB_SQ_OFFSET;
  1154. qp->rq_db = dev->nic_info.db +
  1155. (pd->id * dev->nic_info.db_page_size) +
  1156. OCRDMA_DB_RQ_OFFSET;
  1157. }
  1158. }
  1159. static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
  1160. {
  1161. qp->wqe_wr_id_tbl =
  1162. kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
  1163. GFP_KERNEL);
  1164. if (qp->wqe_wr_id_tbl == NULL)
  1165. return -ENOMEM;
  1166. qp->rqe_wr_id_tbl =
  1167. kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
  1168. if (qp->rqe_wr_id_tbl == NULL)
  1169. return -ENOMEM;
  1170. return 0;
  1171. }
  1172. static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
  1173. struct ocrdma_pd *pd,
  1174. struct ib_qp_init_attr *attrs)
  1175. {
  1176. qp->pd = pd;
  1177. spin_lock_init(&qp->q_lock);
  1178. INIT_LIST_HEAD(&qp->sq_entry);
  1179. INIT_LIST_HEAD(&qp->rq_entry);
  1180. qp->qp_type = attrs->qp_type;
  1181. qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
  1182. qp->max_inline_data = attrs->cap.max_inline_data;
  1183. qp->sq.max_sges = attrs->cap.max_send_sge;
  1184. qp->rq.max_sges = attrs->cap.max_recv_sge;
  1185. qp->state = OCRDMA_QPS_RST;
  1186. qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
  1187. }
  1188. static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
  1189. struct ib_qp_init_attr *attrs)
  1190. {
  1191. if (attrs->qp_type == IB_QPT_GSI) {
  1192. dev->gsi_qp_created = 1;
  1193. dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
  1194. dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
  1195. }
  1196. }
  1197. struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
  1198. struct ib_qp_init_attr *attrs,
  1199. struct ib_udata *udata)
  1200. {
  1201. int status;
  1202. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  1203. struct ocrdma_qp *qp;
  1204. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  1205. struct ocrdma_create_qp_ureq ureq;
  1206. u16 dpp_credit_lmt, dpp_offset;
  1207. status = ocrdma_check_qp_params(ibpd, dev, attrs);
  1208. if (status)
  1209. goto gen_err;
  1210. memset(&ureq, 0, sizeof(ureq));
  1211. if (udata) {
  1212. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  1213. return ERR_PTR(-EFAULT);
  1214. }
  1215. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  1216. if (!qp) {
  1217. status = -ENOMEM;
  1218. goto gen_err;
  1219. }
  1220. ocrdma_set_qp_init_params(qp, pd, attrs);
  1221. if (udata == NULL)
  1222. qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
  1223. OCRDMA_QP_FAST_REG);
  1224. mutex_lock(&dev->dev_lock);
  1225. status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
  1226. ureq.dpp_cq_id,
  1227. &dpp_offset, &dpp_credit_lmt);
  1228. if (status)
  1229. goto mbx_err;
  1230. /* user space QP's wr_id table are managed in library */
  1231. if (udata == NULL) {
  1232. status = ocrdma_alloc_wr_id_tbl(qp);
  1233. if (status)
  1234. goto map_err;
  1235. }
  1236. status = ocrdma_add_qpn_map(dev, qp);
  1237. if (status)
  1238. goto map_err;
  1239. ocrdma_set_qp_db(dev, qp, pd);
  1240. if (udata) {
  1241. status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
  1242. dpp_credit_lmt,
  1243. (attrs->srq != NULL));
  1244. if (status)
  1245. goto cpy_err;
  1246. }
  1247. ocrdma_store_gsi_qp_cq(dev, attrs);
  1248. qp->ibqp.qp_num = qp->id;
  1249. mutex_unlock(&dev->dev_lock);
  1250. return &qp->ibqp;
  1251. cpy_err:
  1252. ocrdma_del_qpn_map(dev, qp);
  1253. map_err:
  1254. ocrdma_mbx_destroy_qp(dev, qp);
  1255. mbx_err:
  1256. mutex_unlock(&dev->dev_lock);
  1257. kfree(qp->wqe_wr_id_tbl);
  1258. kfree(qp->rqe_wr_id_tbl);
  1259. kfree(qp);
  1260. pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
  1261. gen_err:
  1262. return ERR_PTR(status);
  1263. }
  1264. int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1265. int attr_mask)
  1266. {
  1267. int status = 0;
  1268. struct ocrdma_qp *qp;
  1269. struct ocrdma_dev *dev;
  1270. enum ib_qp_state old_qps;
  1271. qp = get_ocrdma_qp(ibqp);
  1272. dev = get_ocrdma_dev(ibqp->device);
  1273. if (attr_mask & IB_QP_STATE)
  1274. status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
  1275. /* if new and previous states are same hw doesn't need to
  1276. * know about it.
  1277. */
  1278. if (status < 0)
  1279. return status;
  1280. return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
  1281. }
  1282. int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1283. int attr_mask, struct ib_udata *udata)
  1284. {
  1285. unsigned long flags;
  1286. int status = -EINVAL;
  1287. struct ocrdma_qp *qp;
  1288. struct ocrdma_dev *dev;
  1289. enum ib_qp_state old_qps, new_qps;
  1290. qp = get_ocrdma_qp(ibqp);
  1291. dev = get_ocrdma_dev(ibqp->device);
  1292. /* syncronize with multiple context trying to change, retrive qps */
  1293. mutex_lock(&dev->dev_lock);
  1294. /* syncronize with wqe, rqe posting and cqe processing contexts */
  1295. spin_lock_irqsave(&qp->q_lock, flags);
  1296. old_qps = get_ibqp_state(qp->state);
  1297. if (attr_mask & IB_QP_STATE)
  1298. new_qps = attr->qp_state;
  1299. else
  1300. new_qps = old_qps;
  1301. spin_unlock_irqrestore(&qp->q_lock, flags);
  1302. if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
  1303. IB_LINK_LAYER_ETHERNET)) {
  1304. pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
  1305. "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
  1306. __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
  1307. old_qps, new_qps);
  1308. goto param_err;
  1309. }
  1310. status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
  1311. if (status > 0)
  1312. status = 0;
  1313. param_err:
  1314. mutex_unlock(&dev->dev_lock);
  1315. return status;
  1316. }
  1317. static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
  1318. {
  1319. switch (mtu) {
  1320. case 256:
  1321. return IB_MTU_256;
  1322. case 512:
  1323. return IB_MTU_512;
  1324. case 1024:
  1325. return IB_MTU_1024;
  1326. case 2048:
  1327. return IB_MTU_2048;
  1328. case 4096:
  1329. return IB_MTU_4096;
  1330. default:
  1331. return IB_MTU_1024;
  1332. }
  1333. }
  1334. static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
  1335. {
  1336. int ib_qp_acc_flags = 0;
  1337. if (qp_cap_flags & OCRDMA_QP_INB_WR)
  1338. ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
  1339. if (qp_cap_flags & OCRDMA_QP_INB_RD)
  1340. ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
  1341. return ib_qp_acc_flags;
  1342. }
  1343. int ocrdma_query_qp(struct ib_qp *ibqp,
  1344. struct ib_qp_attr *qp_attr,
  1345. int attr_mask, struct ib_qp_init_attr *qp_init_attr)
  1346. {
  1347. int status;
  1348. u32 qp_state;
  1349. struct ocrdma_qp_params params;
  1350. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  1351. struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
  1352. memset(&params, 0, sizeof(params));
  1353. mutex_lock(&dev->dev_lock);
  1354. status = ocrdma_mbx_query_qp(dev, qp, &params);
  1355. mutex_unlock(&dev->dev_lock);
  1356. if (status)
  1357. goto mbx_err;
  1358. if (qp->qp_type == IB_QPT_UD)
  1359. qp_attr->qkey = params.qkey;
  1360. qp_attr->path_mtu =
  1361. ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
  1362. OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
  1363. OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
  1364. qp_attr->path_mig_state = IB_MIG_MIGRATED;
  1365. qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
  1366. qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
  1367. qp_attr->dest_qp_num =
  1368. params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
  1369. qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
  1370. qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
  1371. qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
  1372. qp_attr->cap.max_send_sge = qp->sq.max_sges;
  1373. qp_attr->cap.max_recv_sge = qp->rq.max_sges;
  1374. qp_attr->cap.max_inline_data = qp->max_inline_data;
  1375. qp_init_attr->cap = qp_attr->cap;
  1376. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  1377. rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
  1378. params.rnt_rc_sl_fl &
  1379. OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
  1380. qp->sgid_idx,
  1381. (params.hop_lmt_rq_psn &
  1382. OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
  1383. OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
  1384. (params.tclass_sq_psn &
  1385. OCRDMA_QP_PARAMS_TCLASS_MASK) >>
  1386. OCRDMA_QP_PARAMS_TCLASS_SHIFT);
  1387. rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
  1388. rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
  1389. rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
  1390. OCRDMA_QP_PARAMS_SL_MASK) >>
  1391. OCRDMA_QP_PARAMS_SL_SHIFT);
  1392. qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
  1393. OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
  1394. OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
  1395. qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
  1396. OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
  1397. OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
  1398. qp_attr->retry_cnt =
  1399. (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
  1400. OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
  1401. qp_attr->min_rnr_timer = 0;
  1402. qp_attr->pkey_index = 0;
  1403. qp_attr->port_num = 1;
  1404. rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
  1405. rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
  1406. qp_attr->alt_pkey_index = 0;
  1407. qp_attr->alt_port_num = 0;
  1408. qp_attr->alt_timeout = 0;
  1409. memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
  1410. qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
  1411. OCRDMA_QP_PARAMS_STATE_SHIFT;
  1412. qp_attr->qp_state = get_ibqp_state(qp_state);
  1413. qp_attr->cur_qp_state = qp_attr->qp_state;
  1414. qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
  1415. qp_attr->max_dest_rd_atomic =
  1416. params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
  1417. qp_attr->max_rd_atomic =
  1418. params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
  1419. qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
  1420. OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
  1421. /* Sync driver QP state with FW */
  1422. ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
  1423. mbx_err:
  1424. return status;
  1425. }
  1426. static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
  1427. {
  1428. unsigned int i = idx / 32;
  1429. u32 mask = (1U << (idx % 32));
  1430. srq->idx_bit_fields[i] ^= mask;
  1431. }
  1432. static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
  1433. {
  1434. return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
  1435. }
  1436. static int is_hw_sq_empty(struct ocrdma_qp *qp)
  1437. {
  1438. return (qp->sq.tail == qp->sq.head);
  1439. }
  1440. static int is_hw_rq_empty(struct ocrdma_qp *qp)
  1441. {
  1442. return (qp->rq.tail == qp->rq.head);
  1443. }
  1444. static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
  1445. {
  1446. return q->va + (q->head * q->entry_size);
  1447. }
  1448. static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
  1449. u32 idx)
  1450. {
  1451. return q->va + (idx * q->entry_size);
  1452. }
  1453. static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
  1454. {
  1455. q->head = (q->head + 1) & q->max_wqe_idx;
  1456. }
  1457. static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
  1458. {
  1459. q->tail = (q->tail + 1) & q->max_wqe_idx;
  1460. }
  1461. /* discard the cqe for a given QP */
  1462. static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
  1463. {
  1464. unsigned long cq_flags;
  1465. unsigned long flags;
  1466. int discard_cnt = 0;
  1467. u32 cur_getp, stop_getp;
  1468. struct ocrdma_cqe *cqe;
  1469. u32 qpn = 0, wqe_idx = 0;
  1470. spin_lock_irqsave(&cq->cq_lock, cq_flags);
  1471. /* traverse through the CQEs in the hw CQ,
  1472. * find the matching CQE for a given qp,
  1473. * mark the matching one discarded by clearing qpn.
  1474. * ring the doorbell in the poll_cq() as
  1475. * we don't complete out of order cqe.
  1476. */
  1477. cur_getp = cq->getp;
  1478. /* find upto when do we reap the cq. */
  1479. stop_getp = cur_getp;
  1480. do {
  1481. if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
  1482. break;
  1483. cqe = cq->va + cur_getp;
  1484. /* if (a) done reaping whole hw cq, or
  1485. * (b) qp_xq becomes empty.
  1486. * then exit
  1487. */
  1488. qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
  1489. /* if previously discarded cqe found, skip that too. */
  1490. /* check for matching qp */
  1491. if (qpn == 0 || qpn != qp->id)
  1492. goto skip_cqe;
  1493. if (is_cqe_for_sq(cqe)) {
  1494. ocrdma_hwq_inc_tail(&qp->sq);
  1495. } else {
  1496. if (qp->srq) {
  1497. wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
  1498. OCRDMA_CQE_BUFTAG_SHIFT) &
  1499. qp->srq->rq.max_wqe_idx;
  1500. BUG_ON(wqe_idx < 1);
  1501. spin_lock_irqsave(&qp->srq->q_lock, flags);
  1502. ocrdma_hwq_inc_tail(&qp->srq->rq);
  1503. ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
  1504. spin_unlock_irqrestore(&qp->srq->q_lock, flags);
  1505. } else {
  1506. ocrdma_hwq_inc_tail(&qp->rq);
  1507. }
  1508. }
  1509. /* mark cqe discarded so that it is not picked up later
  1510. * in the poll_cq().
  1511. */
  1512. discard_cnt += 1;
  1513. cqe->cmn.qpn = 0;
  1514. skip_cqe:
  1515. cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
  1516. } while (cur_getp != stop_getp);
  1517. spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
  1518. }
  1519. void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
  1520. {
  1521. int found = false;
  1522. unsigned long flags;
  1523. struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
  1524. /* sync with any active CQ poll */
  1525. spin_lock_irqsave(&dev->flush_q_lock, flags);
  1526. found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
  1527. if (found)
  1528. list_del(&qp->sq_entry);
  1529. if (!qp->srq) {
  1530. found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
  1531. if (found)
  1532. list_del(&qp->rq_entry);
  1533. }
  1534. spin_unlock_irqrestore(&dev->flush_q_lock, flags);
  1535. }
  1536. int ocrdma_destroy_qp(struct ib_qp *ibqp)
  1537. {
  1538. struct ocrdma_pd *pd;
  1539. struct ocrdma_qp *qp;
  1540. struct ocrdma_dev *dev;
  1541. struct ib_qp_attr attrs;
  1542. int attr_mask;
  1543. unsigned long flags;
  1544. qp = get_ocrdma_qp(ibqp);
  1545. dev = get_ocrdma_dev(ibqp->device);
  1546. pd = qp->pd;
  1547. /* change the QP state to ERROR */
  1548. if (qp->state != OCRDMA_QPS_RST) {
  1549. attrs.qp_state = IB_QPS_ERR;
  1550. attr_mask = IB_QP_STATE;
  1551. _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
  1552. }
  1553. /* ensure that CQEs for newly created QP (whose id may be same with
  1554. * one which just getting destroyed are same), dont get
  1555. * discarded until the old CQEs are discarded.
  1556. */
  1557. mutex_lock(&dev->dev_lock);
  1558. (void) ocrdma_mbx_destroy_qp(dev, qp);
  1559. /*
  1560. * acquire CQ lock while destroy is in progress, in order to
  1561. * protect against proessing in-flight CQEs for this QP.
  1562. */
  1563. spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
  1564. if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
  1565. spin_lock(&qp->rq_cq->cq_lock);
  1566. ocrdma_del_qpn_map(dev, qp);
  1567. spin_unlock(&qp->rq_cq->cq_lock);
  1568. } else {
  1569. ocrdma_del_qpn_map(dev, qp);
  1570. }
  1571. spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
  1572. if (!pd->uctx) {
  1573. ocrdma_discard_cqes(qp, qp->sq_cq);
  1574. ocrdma_discard_cqes(qp, qp->rq_cq);
  1575. }
  1576. mutex_unlock(&dev->dev_lock);
  1577. if (pd->uctx) {
  1578. ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
  1579. PAGE_ALIGN(qp->sq.len));
  1580. if (!qp->srq)
  1581. ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
  1582. PAGE_ALIGN(qp->rq.len));
  1583. }
  1584. ocrdma_del_flush_qp(qp);
  1585. kfree(qp->wqe_wr_id_tbl);
  1586. kfree(qp->rqe_wr_id_tbl);
  1587. kfree(qp);
  1588. return 0;
  1589. }
  1590. static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
  1591. struct ib_udata *udata)
  1592. {
  1593. int status;
  1594. struct ocrdma_create_srq_uresp uresp;
  1595. memset(&uresp, 0, sizeof(uresp));
  1596. uresp.rq_dbid = srq->rq.dbid;
  1597. uresp.num_rq_pages = 1;
  1598. uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
  1599. uresp.rq_page_size = srq->rq.len;
  1600. uresp.db_page_addr = dev->nic_info.unmapped_db +
  1601. (srq->pd->id * dev->nic_info.db_page_size);
  1602. uresp.db_page_size = dev->nic_info.db_page_size;
  1603. uresp.num_rqe_allocated = srq->rq.max_cnt;
  1604. if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
  1605. uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
  1606. uresp.db_shift = 24;
  1607. } else {
  1608. uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
  1609. uresp.db_shift = 16;
  1610. }
  1611. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  1612. if (status)
  1613. return status;
  1614. status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
  1615. uresp.rq_page_size);
  1616. if (status)
  1617. return status;
  1618. return status;
  1619. }
  1620. struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
  1621. struct ib_srq_init_attr *init_attr,
  1622. struct ib_udata *udata)
  1623. {
  1624. int status = -ENOMEM;
  1625. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  1626. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  1627. struct ocrdma_srq *srq;
  1628. if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
  1629. return ERR_PTR(-EINVAL);
  1630. if (init_attr->attr.max_wr > dev->attr.max_rqe)
  1631. return ERR_PTR(-EINVAL);
  1632. srq = kzalloc(sizeof(*srq), GFP_KERNEL);
  1633. if (!srq)
  1634. return ERR_PTR(status);
  1635. spin_lock_init(&srq->q_lock);
  1636. srq->pd = pd;
  1637. srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
  1638. status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
  1639. if (status)
  1640. goto err;
  1641. if (udata == NULL) {
  1642. status = -ENOMEM;
  1643. srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
  1644. GFP_KERNEL);
  1645. if (srq->rqe_wr_id_tbl == NULL)
  1646. goto arm_err;
  1647. srq->bit_fields_len = (srq->rq.max_cnt / 32) +
  1648. (srq->rq.max_cnt % 32 ? 1 : 0);
  1649. srq->idx_bit_fields =
  1650. kmalloc_array(srq->bit_fields_len, sizeof(u32),
  1651. GFP_KERNEL);
  1652. if (srq->idx_bit_fields == NULL)
  1653. goto arm_err;
  1654. memset(srq->idx_bit_fields, 0xff,
  1655. srq->bit_fields_len * sizeof(u32));
  1656. }
  1657. if (init_attr->attr.srq_limit) {
  1658. status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
  1659. if (status)
  1660. goto arm_err;
  1661. }
  1662. if (udata) {
  1663. status = ocrdma_copy_srq_uresp(dev, srq, udata);
  1664. if (status)
  1665. goto arm_err;
  1666. }
  1667. return &srq->ibsrq;
  1668. arm_err:
  1669. ocrdma_mbx_destroy_srq(dev, srq);
  1670. err:
  1671. kfree(srq->rqe_wr_id_tbl);
  1672. kfree(srq->idx_bit_fields);
  1673. kfree(srq);
  1674. return ERR_PTR(status);
  1675. }
  1676. int ocrdma_modify_srq(struct ib_srq *ibsrq,
  1677. struct ib_srq_attr *srq_attr,
  1678. enum ib_srq_attr_mask srq_attr_mask,
  1679. struct ib_udata *udata)
  1680. {
  1681. int status;
  1682. struct ocrdma_srq *srq;
  1683. srq = get_ocrdma_srq(ibsrq);
  1684. if (srq_attr_mask & IB_SRQ_MAX_WR)
  1685. status = -EINVAL;
  1686. else
  1687. status = ocrdma_mbx_modify_srq(srq, srq_attr);
  1688. return status;
  1689. }
  1690. int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
  1691. {
  1692. int status;
  1693. struct ocrdma_srq *srq;
  1694. srq = get_ocrdma_srq(ibsrq);
  1695. status = ocrdma_mbx_query_srq(srq, srq_attr);
  1696. return status;
  1697. }
  1698. int ocrdma_destroy_srq(struct ib_srq *ibsrq)
  1699. {
  1700. int status;
  1701. struct ocrdma_srq *srq;
  1702. struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
  1703. srq = get_ocrdma_srq(ibsrq);
  1704. status = ocrdma_mbx_destroy_srq(dev, srq);
  1705. if (srq->pd->uctx)
  1706. ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
  1707. PAGE_ALIGN(srq->rq.len));
  1708. kfree(srq->idx_bit_fields);
  1709. kfree(srq->rqe_wr_id_tbl);
  1710. kfree(srq);
  1711. return status;
  1712. }
  1713. /* unprivileged verbs and their support functions. */
  1714. static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
  1715. struct ocrdma_hdr_wqe *hdr,
  1716. const struct ib_send_wr *wr)
  1717. {
  1718. struct ocrdma_ewqe_ud_hdr *ud_hdr =
  1719. (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
  1720. struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
  1721. ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
  1722. if (qp->qp_type == IB_QPT_GSI)
  1723. ud_hdr->qkey = qp->qkey;
  1724. else
  1725. ud_hdr->qkey = ud_wr(wr)->remote_qkey;
  1726. ud_hdr->rsvd_ahid = ah->id;
  1727. ud_hdr->hdr_type = ah->hdr_type;
  1728. if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
  1729. hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
  1730. }
  1731. static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
  1732. struct ocrdma_sge *sge, int num_sge,
  1733. struct ib_sge *sg_list)
  1734. {
  1735. int i;
  1736. for (i = 0; i < num_sge; i++) {
  1737. sge[i].lrkey = sg_list[i].lkey;
  1738. sge[i].addr_lo = sg_list[i].addr;
  1739. sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
  1740. sge[i].len = sg_list[i].length;
  1741. hdr->total_len += sg_list[i].length;
  1742. }
  1743. if (num_sge == 0)
  1744. memset(sge, 0, sizeof(*sge));
  1745. }
  1746. static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
  1747. {
  1748. uint32_t total_len = 0, i;
  1749. for (i = 0; i < num_sge; i++)
  1750. total_len += sg_list[i].length;
  1751. return total_len;
  1752. }
  1753. static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
  1754. struct ocrdma_hdr_wqe *hdr,
  1755. struct ocrdma_sge *sge,
  1756. const struct ib_send_wr *wr, u32 wqe_size)
  1757. {
  1758. int i;
  1759. char *dpp_addr;
  1760. if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
  1761. hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
  1762. if (unlikely(hdr->total_len > qp->max_inline_data)) {
  1763. pr_err("%s() supported_len=0x%x,\n"
  1764. " unsupported len req=0x%x\n", __func__,
  1765. qp->max_inline_data, hdr->total_len);
  1766. return -EINVAL;
  1767. }
  1768. dpp_addr = (char *)sge;
  1769. for (i = 0; i < wr->num_sge; i++) {
  1770. memcpy(dpp_addr,
  1771. (void *)(unsigned long)wr->sg_list[i].addr,
  1772. wr->sg_list[i].length);
  1773. dpp_addr += wr->sg_list[i].length;
  1774. }
  1775. wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
  1776. if (0 == hdr->total_len)
  1777. wqe_size += sizeof(struct ocrdma_sge);
  1778. hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
  1779. } else {
  1780. ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
  1781. if (wr->num_sge)
  1782. wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
  1783. else
  1784. wqe_size += sizeof(struct ocrdma_sge);
  1785. hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  1786. }
  1787. hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
  1788. return 0;
  1789. }
  1790. static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1791. const struct ib_send_wr *wr)
  1792. {
  1793. int status;
  1794. struct ocrdma_sge *sge;
  1795. u32 wqe_size = sizeof(*hdr);
  1796. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
  1797. ocrdma_build_ud_hdr(qp, hdr, wr);
  1798. sge = (struct ocrdma_sge *)(hdr + 2);
  1799. wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
  1800. } else {
  1801. sge = (struct ocrdma_sge *)(hdr + 1);
  1802. }
  1803. status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
  1804. return status;
  1805. }
  1806. static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1807. const struct ib_send_wr *wr)
  1808. {
  1809. int status;
  1810. struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
  1811. struct ocrdma_sge *sge = ext_rw + 1;
  1812. u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
  1813. status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
  1814. if (status)
  1815. return status;
  1816. ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
  1817. ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
  1818. ext_rw->lrkey = rdma_wr(wr)->rkey;
  1819. ext_rw->len = hdr->total_len;
  1820. return 0;
  1821. }
  1822. static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1823. const struct ib_send_wr *wr)
  1824. {
  1825. struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
  1826. struct ocrdma_sge *sge = ext_rw + 1;
  1827. u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
  1828. sizeof(struct ocrdma_hdr_wqe);
  1829. ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
  1830. hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
  1831. hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
  1832. hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  1833. ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
  1834. ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
  1835. ext_rw->lrkey = rdma_wr(wr)->rkey;
  1836. ext_rw->len = hdr->total_len;
  1837. }
  1838. static int get_encoded_page_size(int pg_sz)
  1839. {
  1840. /* Max size is 256M 4096 << 16 */
  1841. int i = 0;
  1842. for (; i < 17; i++)
  1843. if (pg_sz == (4096 << i))
  1844. break;
  1845. return i;
  1846. }
  1847. static int ocrdma_build_reg(struct ocrdma_qp *qp,
  1848. struct ocrdma_hdr_wqe *hdr,
  1849. const struct ib_reg_wr *wr)
  1850. {
  1851. u64 fbo;
  1852. struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
  1853. struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
  1854. struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
  1855. struct ocrdma_pbe *pbe;
  1856. u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
  1857. int num_pbes = 0, i;
  1858. wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
  1859. hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
  1860. hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
  1861. if (wr->access & IB_ACCESS_LOCAL_WRITE)
  1862. hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
  1863. if (wr->access & IB_ACCESS_REMOTE_WRITE)
  1864. hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
  1865. if (wr->access & IB_ACCESS_REMOTE_READ)
  1866. hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
  1867. hdr->lkey = wr->key;
  1868. hdr->total_len = mr->ibmr.length;
  1869. fbo = mr->ibmr.iova - mr->pages[0];
  1870. fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
  1871. fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
  1872. fast_reg->fbo_hi = upper_32_bits(fbo);
  1873. fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
  1874. fast_reg->num_sges = mr->npages;
  1875. fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
  1876. pbe = pbl_tbl->va;
  1877. for (i = 0; i < mr->npages; i++) {
  1878. u64 buf_addr = mr->pages[i];
  1879. pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
  1880. pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
  1881. num_pbes += 1;
  1882. pbe++;
  1883. /* if the pbl is full storing the pbes,
  1884. * move to next pbl.
  1885. */
  1886. if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
  1887. pbl_tbl++;
  1888. pbe = (struct ocrdma_pbe *)pbl_tbl->va;
  1889. }
  1890. }
  1891. return 0;
  1892. }
  1893. static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
  1894. {
  1895. u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
  1896. iowrite32(val, qp->sq_db);
  1897. }
  1898. int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  1899. const struct ib_send_wr **bad_wr)
  1900. {
  1901. int status = 0;
  1902. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  1903. struct ocrdma_hdr_wqe *hdr;
  1904. unsigned long flags;
  1905. spin_lock_irqsave(&qp->q_lock, flags);
  1906. if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
  1907. spin_unlock_irqrestore(&qp->q_lock, flags);
  1908. *bad_wr = wr;
  1909. return -EINVAL;
  1910. }
  1911. while (wr) {
  1912. if (qp->qp_type == IB_QPT_UD &&
  1913. (wr->opcode != IB_WR_SEND &&
  1914. wr->opcode != IB_WR_SEND_WITH_IMM)) {
  1915. *bad_wr = wr;
  1916. status = -EINVAL;
  1917. break;
  1918. }
  1919. if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
  1920. wr->num_sge > qp->sq.max_sges) {
  1921. *bad_wr = wr;
  1922. status = -ENOMEM;
  1923. break;
  1924. }
  1925. hdr = ocrdma_hwq_head(&qp->sq);
  1926. hdr->cw = 0;
  1927. if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
  1928. hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
  1929. if (wr->send_flags & IB_SEND_FENCE)
  1930. hdr->cw |=
  1931. (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
  1932. if (wr->send_flags & IB_SEND_SOLICITED)
  1933. hdr->cw |=
  1934. (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
  1935. hdr->total_len = 0;
  1936. switch (wr->opcode) {
  1937. case IB_WR_SEND_WITH_IMM:
  1938. hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
  1939. hdr->immdt = ntohl(wr->ex.imm_data);
  1940. /* fall through */
  1941. case IB_WR_SEND:
  1942. hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
  1943. ocrdma_build_send(qp, hdr, wr);
  1944. break;
  1945. case IB_WR_SEND_WITH_INV:
  1946. hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
  1947. hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
  1948. hdr->lkey = wr->ex.invalidate_rkey;
  1949. status = ocrdma_build_send(qp, hdr, wr);
  1950. break;
  1951. case IB_WR_RDMA_WRITE_WITH_IMM:
  1952. hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
  1953. hdr->immdt = ntohl(wr->ex.imm_data);
  1954. /* fall through */
  1955. case IB_WR_RDMA_WRITE:
  1956. hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
  1957. status = ocrdma_build_write(qp, hdr, wr);
  1958. break;
  1959. case IB_WR_RDMA_READ:
  1960. ocrdma_build_read(qp, hdr, wr);
  1961. break;
  1962. case IB_WR_LOCAL_INV:
  1963. hdr->cw |=
  1964. (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
  1965. hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
  1966. sizeof(struct ocrdma_sge)) /
  1967. OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
  1968. hdr->lkey = wr->ex.invalidate_rkey;
  1969. break;
  1970. case IB_WR_REG_MR:
  1971. status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
  1972. break;
  1973. default:
  1974. status = -EINVAL;
  1975. break;
  1976. }
  1977. if (status) {
  1978. *bad_wr = wr;
  1979. break;
  1980. }
  1981. if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
  1982. qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
  1983. else
  1984. qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
  1985. qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
  1986. ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
  1987. OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
  1988. /* make sure wqe is written before adapter can access it */
  1989. wmb();
  1990. /* inform hw to start processing it */
  1991. ocrdma_ring_sq_db(qp);
  1992. /* update pointer, counter for next wr */
  1993. ocrdma_hwq_inc_head(&qp->sq);
  1994. wr = wr->next;
  1995. }
  1996. spin_unlock_irqrestore(&qp->q_lock, flags);
  1997. return status;
  1998. }
  1999. static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
  2000. {
  2001. u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
  2002. iowrite32(val, qp->rq_db);
  2003. }
  2004. static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
  2005. const struct ib_recv_wr *wr, u16 tag)
  2006. {
  2007. u32 wqe_size = 0;
  2008. struct ocrdma_sge *sge;
  2009. if (wr->num_sge)
  2010. wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
  2011. else
  2012. wqe_size = sizeof(*sge) + sizeof(*rqe);
  2013. rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
  2014. OCRDMA_WQE_SIZE_SHIFT);
  2015. rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
  2016. rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  2017. rqe->total_len = 0;
  2018. rqe->rsvd_tag = tag;
  2019. sge = (struct ocrdma_sge *)(rqe + 1);
  2020. ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
  2021. ocrdma_cpu_to_le32(rqe, wqe_size);
  2022. }
  2023. int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  2024. const struct ib_recv_wr **bad_wr)
  2025. {
  2026. int status = 0;
  2027. unsigned long flags;
  2028. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  2029. struct ocrdma_hdr_wqe *rqe;
  2030. spin_lock_irqsave(&qp->q_lock, flags);
  2031. if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
  2032. spin_unlock_irqrestore(&qp->q_lock, flags);
  2033. *bad_wr = wr;
  2034. return -EINVAL;
  2035. }
  2036. while (wr) {
  2037. if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
  2038. wr->num_sge > qp->rq.max_sges) {
  2039. *bad_wr = wr;
  2040. status = -ENOMEM;
  2041. break;
  2042. }
  2043. rqe = ocrdma_hwq_head(&qp->rq);
  2044. ocrdma_build_rqe(rqe, wr, 0);
  2045. qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
  2046. /* make sure rqe is written before adapter can access it */
  2047. wmb();
  2048. /* inform hw to start processing it */
  2049. ocrdma_ring_rq_db(qp);
  2050. /* update pointer, counter for next wr */
  2051. ocrdma_hwq_inc_head(&qp->rq);
  2052. wr = wr->next;
  2053. }
  2054. spin_unlock_irqrestore(&qp->q_lock, flags);
  2055. return status;
  2056. }
  2057. /* cqe for srq's rqe can potentially arrive out of order.
  2058. * index gives the entry in the shadow table where to store
  2059. * the wr_id. tag/index is returned in cqe to reference back
  2060. * for a given rqe.
  2061. */
  2062. static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
  2063. {
  2064. int row = 0;
  2065. int indx = 0;
  2066. for (row = 0; row < srq->bit_fields_len; row++) {
  2067. if (srq->idx_bit_fields[row]) {
  2068. indx = ffs(srq->idx_bit_fields[row]);
  2069. indx = (row * 32) + (indx - 1);
  2070. BUG_ON(indx >= srq->rq.max_cnt);
  2071. ocrdma_srq_toggle_bit(srq, indx);
  2072. break;
  2073. }
  2074. }
  2075. BUG_ON(row == srq->bit_fields_len);
  2076. return indx + 1; /* Use from index 1 */
  2077. }
  2078. static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
  2079. {
  2080. u32 val = srq->rq.dbid | (1 << 16);
  2081. iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
  2082. }
  2083. int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  2084. const struct ib_recv_wr **bad_wr)
  2085. {
  2086. int status = 0;
  2087. unsigned long flags;
  2088. struct ocrdma_srq *srq;
  2089. struct ocrdma_hdr_wqe *rqe;
  2090. u16 tag;
  2091. srq = get_ocrdma_srq(ibsrq);
  2092. spin_lock_irqsave(&srq->q_lock, flags);
  2093. while (wr) {
  2094. if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
  2095. wr->num_sge > srq->rq.max_sges) {
  2096. status = -ENOMEM;
  2097. *bad_wr = wr;
  2098. break;
  2099. }
  2100. tag = ocrdma_srq_get_idx(srq);
  2101. rqe = ocrdma_hwq_head(&srq->rq);
  2102. ocrdma_build_rqe(rqe, wr, tag);
  2103. srq->rqe_wr_id_tbl[tag] = wr->wr_id;
  2104. /* make sure rqe is written before adapter can perform DMA */
  2105. wmb();
  2106. /* inform hw to start processing it */
  2107. ocrdma_ring_srq_db(srq);
  2108. /* update pointer, counter for next wr */
  2109. ocrdma_hwq_inc_head(&srq->rq);
  2110. wr = wr->next;
  2111. }
  2112. spin_unlock_irqrestore(&srq->q_lock, flags);
  2113. return status;
  2114. }
  2115. static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
  2116. {
  2117. enum ib_wc_status ibwc_status;
  2118. switch (status) {
  2119. case OCRDMA_CQE_GENERAL_ERR:
  2120. ibwc_status = IB_WC_GENERAL_ERR;
  2121. break;
  2122. case OCRDMA_CQE_LOC_LEN_ERR:
  2123. ibwc_status = IB_WC_LOC_LEN_ERR;
  2124. break;
  2125. case OCRDMA_CQE_LOC_QP_OP_ERR:
  2126. ibwc_status = IB_WC_LOC_QP_OP_ERR;
  2127. break;
  2128. case OCRDMA_CQE_LOC_EEC_OP_ERR:
  2129. ibwc_status = IB_WC_LOC_EEC_OP_ERR;
  2130. break;
  2131. case OCRDMA_CQE_LOC_PROT_ERR:
  2132. ibwc_status = IB_WC_LOC_PROT_ERR;
  2133. break;
  2134. case OCRDMA_CQE_WR_FLUSH_ERR:
  2135. ibwc_status = IB_WC_WR_FLUSH_ERR;
  2136. break;
  2137. case OCRDMA_CQE_MW_BIND_ERR:
  2138. ibwc_status = IB_WC_MW_BIND_ERR;
  2139. break;
  2140. case OCRDMA_CQE_BAD_RESP_ERR:
  2141. ibwc_status = IB_WC_BAD_RESP_ERR;
  2142. break;
  2143. case OCRDMA_CQE_LOC_ACCESS_ERR:
  2144. ibwc_status = IB_WC_LOC_ACCESS_ERR;
  2145. break;
  2146. case OCRDMA_CQE_REM_INV_REQ_ERR:
  2147. ibwc_status = IB_WC_REM_INV_REQ_ERR;
  2148. break;
  2149. case OCRDMA_CQE_REM_ACCESS_ERR:
  2150. ibwc_status = IB_WC_REM_ACCESS_ERR;
  2151. break;
  2152. case OCRDMA_CQE_REM_OP_ERR:
  2153. ibwc_status = IB_WC_REM_OP_ERR;
  2154. break;
  2155. case OCRDMA_CQE_RETRY_EXC_ERR:
  2156. ibwc_status = IB_WC_RETRY_EXC_ERR;
  2157. break;
  2158. case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
  2159. ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
  2160. break;
  2161. case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
  2162. ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
  2163. break;
  2164. case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
  2165. ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
  2166. break;
  2167. case OCRDMA_CQE_REM_ABORT_ERR:
  2168. ibwc_status = IB_WC_REM_ABORT_ERR;
  2169. break;
  2170. case OCRDMA_CQE_INV_EECN_ERR:
  2171. ibwc_status = IB_WC_INV_EECN_ERR;
  2172. break;
  2173. case OCRDMA_CQE_INV_EEC_STATE_ERR:
  2174. ibwc_status = IB_WC_INV_EEC_STATE_ERR;
  2175. break;
  2176. case OCRDMA_CQE_FATAL_ERR:
  2177. ibwc_status = IB_WC_FATAL_ERR;
  2178. break;
  2179. case OCRDMA_CQE_RESP_TIMEOUT_ERR:
  2180. ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
  2181. break;
  2182. default:
  2183. ibwc_status = IB_WC_GENERAL_ERR;
  2184. break;
  2185. }
  2186. return ibwc_status;
  2187. }
  2188. static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
  2189. u32 wqe_idx)
  2190. {
  2191. struct ocrdma_hdr_wqe *hdr;
  2192. struct ocrdma_sge *rw;
  2193. int opcode;
  2194. hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
  2195. ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
  2196. /* Undo the hdr->cw swap */
  2197. opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
  2198. switch (opcode) {
  2199. case OCRDMA_WRITE:
  2200. ibwc->opcode = IB_WC_RDMA_WRITE;
  2201. break;
  2202. case OCRDMA_READ:
  2203. rw = (struct ocrdma_sge *)(hdr + 1);
  2204. ibwc->opcode = IB_WC_RDMA_READ;
  2205. ibwc->byte_len = rw->len;
  2206. break;
  2207. case OCRDMA_SEND:
  2208. ibwc->opcode = IB_WC_SEND;
  2209. break;
  2210. case OCRDMA_FR_MR:
  2211. ibwc->opcode = IB_WC_REG_MR;
  2212. break;
  2213. case OCRDMA_LKEY_INV:
  2214. ibwc->opcode = IB_WC_LOCAL_INV;
  2215. break;
  2216. default:
  2217. ibwc->status = IB_WC_GENERAL_ERR;
  2218. pr_err("%s() invalid opcode received = 0x%x\n",
  2219. __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
  2220. break;
  2221. }
  2222. }
  2223. static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
  2224. struct ocrdma_cqe *cqe)
  2225. {
  2226. if (is_cqe_for_sq(cqe)) {
  2227. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2228. cqe->flags_status_srcqpn) &
  2229. ~OCRDMA_CQE_STATUS_MASK);
  2230. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2231. cqe->flags_status_srcqpn) |
  2232. (OCRDMA_CQE_WR_FLUSH_ERR <<
  2233. OCRDMA_CQE_STATUS_SHIFT));
  2234. } else {
  2235. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
  2236. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2237. cqe->flags_status_srcqpn) &
  2238. ~OCRDMA_CQE_UD_STATUS_MASK);
  2239. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2240. cqe->flags_status_srcqpn) |
  2241. (OCRDMA_CQE_WR_FLUSH_ERR <<
  2242. OCRDMA_CQE_UD_STATUS_SHIFT));
  2243. } else {
  2244. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2245. cqe->flags_status_srcqpn) &
  2246. ~OCRDMA_CQE_STATUS_MASK);
  2247. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  2248. cqe->flags_status_srcqpn) |
  2249. (OCRDMA_CQE_WR_FLUSH_ERR <<
  2250. OCRDMA_CQE_STATUS_SHIFT));
  2251. }
  2252. }
  2253. }
  2254. static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  2255. struct ocrdma_qp *qp, int status)
  2256. {
  2257. bool expand = false;
  2258. ibwc->byte_len = 0;
  2259. ibwc->qp = &qp->ibqp;
  2260. ibwc->status = ocrdma_to_ibwc_err(status);
  2261. ocrdma_flush_qp(qp);
  2262. ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
  2263. /* if wqe/rqe pending for which cqe needs to be returned,
  2264. * trigger inflating it.
  2265. */
  2266. if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
  2267. expand = true;
  2268. ocrdma_set_cqe_status_flushed(qp, cqe);
  2269. }
  2270. return expand;
  2271. }
  2272. static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  2273. struct ocrdma_qp *qp, int status)
  2274. {
  2275. ibwc->opcode = IB_WC_RECV;
  2276. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  2277. ocrdma_hwq_inc_tail(&qp->rq);
  2278. return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
  2279. }
  2280. static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  2281. struct ocrdma_qp *qp, int status)
  2282. {
  2283. ocrdma_update_wc(qp, ibwc, qp->sq.tail);
  2284. ocrdma_hwq_inc_tail(&qp->sq);
  2285. return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
  2286. }
  2287. static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
  2288. struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
  2289. bool *polled, bool *stop)
  2290. {
  2291. bool expand;
  2292. struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
  2293. int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2294. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  2295. if (status < OCRDMA_MAX_CQE_ERR)
  2296. atomic_inc(&dev->cqe_err_stats[status]);
  2297. /* when hw sq is empty, but rq is not empty, so we continue
  2298. * to keep the cqe in order to get the cq event again.
  2299. */
  2300. if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
  2301. /* when cq for rq and sq is same, it is safe to return
  2302. * flush cqe for RQEs.
  2303. */
  2304. if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
  2305. *polled = true;
  2306. status = OCRDMA_CQE_WR_FLUSH_ERR;
  2307. expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
  2308. } else {
  2309. /* stop processing further cqe as this cqe is used for
  2310. * triggering cq event on buddy cq of RQ.
  2311. * When QP is destroyed, this cqe will be removed
  2312. * from the cq's hardware q.
  2313. */
  2314. *polled = false;
  2315. *stop = true;
  2316. expand = false;
  2317. }
  2318. } else if (is_hw_sq_empty(qp)) {
  2319. /* Do nothing */
  2320. expand = false;
  2321. *polled = false;
  2322. *stop = false;
  2323. } else {
  2324. *polled = true;
  2325. expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
  2326. }
  2327. return expand;
  2328. }
  2329. static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
  2330. struct ocrdma_cqe *cqe,
  2331. struct ib_wc *ibwc, bool *polled)
  2332. {
  2333. bool expand = false;
  2334. int tail = qp->sq.tail;
  2335. u32 wqe_idx;
  2336. if (!qp->wqe_wr_id_tbl[tail].signaled) {
  2337. *polled = false; /* WC cannot be consumed yet */
  2338. } else {
  2339. ibwc->status = IB_WC_SUCCESS;
  2340. ibwc->wc_flags = 0;
  2341. ibwc->qp = &qp->ibqp;
  2342. ocrdma_update_wc(qp, ibwc, tail);
  2343. *polled = true;
  2344. }
  2345. wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
  2346. OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
  2347. if (tail != wqe_idx)
  2348. expand = true; /* Coalesced CQE can't be consumed yet */
  2349. ocrdma_hwq_inc_tail(&qp->sq);
  2350. return expand;
  2351. }
  2352. static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  2353. struct ib_wc *ibwc, bool *polled, bool *stop)
  2354. {
  2355. int status;
  2356. bool expand;
  2357. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2358. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  2359. if (status == OCRDMA_CQE_SUCCESS)
  2360. expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
  2361. else
  2362. expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
  2363. return expand;
  2364. }
  2365. static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
  2366. struct ocrdma_cqe *cqe)
  2367. {
  2368. int status;
  2369. u16 hdr_type = 0;
  2370. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2371. OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
  2372. ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
  2373. OCRDMA_CQE_SRCQP_MASK;
  2374. ibwc->pkey_index = 0;
  2375. ibwc->wc_flags = IB_WC_GRH;
  2376. ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
  2377. OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
  2378. OCRDMA_CQE_UD_XFER_LEN_MASK;
  2379. if (ocrdma_is_udp_encap_supported(dev)) {
  2380. hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
  2381. OCRDMA_CQE_UD_L3TYPE_SHIFT) &
  2382. OCRDMA_CQE_UD_L3TYPE_MASK;
  2383. ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
  2384. ibwc->network_hdr_type = hdr_type;
  2385. }
  2386. return status;
  2387. }
  2388. static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
  2389. struct ocrdma_cqe *cqe,
  2390. struct ocrdma_qp *qp)
  2391. {
  2392. unsigned long flags;
  2393. struct ocrdma_srq *srq;
  2394. u32 wqe_idx;
  2395. srq = get_ocrdma_srq(qp->ibqp.srq);
  2396. wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
  2397. OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
  2398. BUG_ON(wqe_idx < 1);
  2399. ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
  2400. spin_lock_irqsave(&srq->q_lock, flags);
  2401. ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
  2402. spin_unlock_irqrestore(&srq->q_lock, flags);
  2403. ocrdma_hwq_inc_tail(&srq->rq);
  2404. }
  2405. static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  2406. struct ib_wc *ibwc, bool *polled, bool *stop,
  2407. int status)
  2408. {
  2409. bool expand;
  2410. struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
  2411. if (status < OCRDMA_MAX_CQE_ERR)
  2412. atomic_inc(&dev->cqe_err_stats[status]);
  2413. /* when hw_rq is empty, but wq is not empty, so continue
  2414. * to keep the cqe to get the cq event again.
  2415. */
  2416. if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
  2417. if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
  2418. *polled = true;
  2419. status = OCRDMA_CQE_WR_FLUSH_ERR;
  2420. expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
  2421. } else {
  2422. *polled = false;
  2423. *stop = true;
  2424. expand = false;
  2425. }
  2426. } else if (is_hw_rq_empty(qp)) {
  2427. /* Do nothing */
  2428. expand = false;
  2429. *polled = false;
  2430. *stop = false;
  2431. } else {
  2432. *polled = true;
  2433. expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
  2434. }
  2435. return expand;
  2436. }
  2437. static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
  2438. struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
  2439. {
  2440. struct ocrdma_dev *dev;
  2441. dev = get_ocrdma_dev(qp->ibqp.device);
  2442. ibwc->opcode = IB_WC_RECV;
  2443. ibwc->qp = &qp->ibqp;
  2444. ibwc->status = IB_WC_SUCCESS;
  2445. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
  2446. ocrdma_update_ud_rcqe(dev, ibwc, cqe);
  2447. else
  2448. ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
  2449. if (is_cqe_imm(cqe)) {
  2450. ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
  2451. ibwc->wc_flags |= IB_WC_WITH_IMM;
  2452. } else if (is_cqe_wr_imm(cqe)) {
  2453. ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2454. ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
  2455. ibwc->wc_flags |= IB_WC_WITH_IMM;
  2456. } else if (is_cqe_invalidated(cqe)) {
  2457. ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
  2458. ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2459. }
  2460. if (qp->ibqp.srq) {
  2461. ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
  2462. } else {
  2463. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  2464. ocrdma_hwq_inc_tail(&qp->rq);
  2465. }
  2466. }
  2467. static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  2468. struct ib_wc *ibwc, bool *polled, bool *stop)
  2469. {
  2470. int status;
  2471. bool expand = false;
  2472. ibwc->wc_flags = 0;
  2473. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
  2474. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2475. OCRDMA_CQE_UD_STATUS_MASK) >>
  2476. OCRDMA_CQE_UD_STATUS_SHIFT;
  2477. } else {
  2478. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2479. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  2480. }
  2481. if (status == OCRDMA_CQE_SUCCESS) {
  2482. *polled = true;
  2483. ocrdma_poll_success_rcqe(qp, cqe, ibwc);
  2484. } else {
  2485. expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
  2486. status);
  2487. }
  2488. return expand;
  2489. }
  2490. static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
  2491. u16 cur_getp)
  2492. {
  2493. if (cq->phase_change) {
  2494. if (cur_getp == 0)
  2495. cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
  2496. } else {
  2497. /* clear valid bit */
  2498. cqe->flags_status_srcqpn = 0;
  2499. }
  2500. }
  2501. static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
  2502. struct ib_wc *ibwc)
  2503. {
  2504. u16 qpn = 0;
  2505. int i = 0;
  2506. bool expand = false;
  2507. int polled_hw_cqes = 0;
  2508. struct ocrdma_qp *qp = NULL;
  2509. struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
  2510. struct ocrdma_cqe *cqe;
  2511. u16 cur_getp; bool polled = false; bool stop = false;
  2512. cur_getp = cq->getp;
  2513. while (num_entries) {
  2514. cqe = cq->va + cur_getp;
  2515. /* check whether valid cqe or not */
  2516. if (!is_cqe_valid(cq, cqe))
  2517. break;
  2518. qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
  2519. /* ignore discarded cqe */
  2520. if (qpn == 0)
  2521. goto skip_cqe;
  2522. qp = dev->qp_tbl[qpn];
  2523. BUG_ON(qp == NULL);
  2524. if (is_cqe_for_sq(cqe)) {
  2525. expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
  2526. &stop);
  2527. } else {
  2528. expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
  2529. &stop);
  2530. }
  2531. if (expand)
  2532. goto expand_cqe;
  2533. if (stop)
  2534. goto stop_cqe;
  2535. /* clear qpn to avoid duplicate processing by discard_cqe() */
  2536. cqe->cmn.qpn = 0;
  2537. skip_cqe:
  2538. polled_hw_cqes += 1;
  2539. cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
  2540. ocrdma_change_cq_phase(cq, cqe, cur_getp);
  2541. expand_cqe:
  2542. if (polled) {
  2543. num_entries -= 1;
  2544. i += 1;
  2545. ibwc = ibwc + 1;
  2546. polled = false;
  2547. }
  2548. }
  2549. stop_cqe:
  2550. cq->getp = cur_getp;
  2551. if (polled_hw_cqes)
  2552. ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
  2553. return i;
  2554. }
  2555. /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
  2556. static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
  2557. struct ocrdma_qp *qp, struct ib_wc *ibwc)
  2558. {
  2559. int err_cqes = 0;
  2560. while (num_entries) {
  2561. if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
  2562. break;
  2563. if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
  2564. ocrdma_update_wc(qp, ibwc, qp->sq.tail);
  2565. ocrdma_hwq_inc_tail(&qp->sq);
  2566. } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
  2567. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  2568. ocrdma_hwq_inc_tail(&qp->rq);
  2569. } else {
  2570. return err_cqes;
  2571. }
  2572. ibwc->byte_len = 0;
  2573. ibwc->status = IB_WC_WR_FLUSH_ERR;
  2574. ibwc = ibwc + 1;
  2575. err_cqes += 1;
  2576. num_entries -= 1;
  2577. }
  2578. return err_cqes;
  2579. }
  2580. int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  2581. {
  2582. int cqes_to_poll = num_entries;
  2583. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  2584. struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
  2585. int num_os_cqe = 0, err_cqes = 0;
  2586. struct ocrdma_qp *qp;
  2587. unsigned long flags;
  2588. /* poll cqes from adapter CQ */
  2589. spin_lock_irqsave(&cq->cq_lock, flags);
  2590. num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
  2591. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2592. cqes_to_poll -= num_os_cqe;
  2593. if (cqes_to_poll) {
  2594. wc = wc + num_os_cqe;
  2595. /* adapter returns single error cqe when qp moves to
  2596. * error state. So insert error cqes with wc_status as
  2597. * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
  2598. * respectively which uses this CQ.
  2599. */
  2600. spin_lock_irqsave(&dev->flush_q_lock, flags);
  2601. list_for_each_entry(qp, &cq->sq_head, sq_entry) {
  2602. if (cqes_to_poll == 0)
  2603. break;
  2604. err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
  2605. cqes_to_poll -= err_cqes;
  2606. num_os_cqe += err_cqes;
  2607. wc = wc + err_cqes;
  2608. }
  2609. spin_unlock_irqrestore(&dev->flush_q_lock, flags);
  2610. }
  2611. return num_os_cqe;
  2612. }
  2613. int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
  2614. {
  2615. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  2616. struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
  2617. u16 cq_id;
  2618. unsigned long flags;
  2619. bool arm_needed = false, sol_needed = false;
  2620. cq_id = cq->id;
  2621. spin_lock_irqsave(&cq->cq_lock, flags);
  2622. if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
  2623. arm_needed = true;
  2624. if (cq_flags & IB_CQ_SOLICITED)
  2625. sol_needed = true;
  2626. ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
  2627. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2628. return 0;
  2629. }
  2630. struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
  2631. enum ib_mr_type mr_type,
  2632. u32 max_num_sg)
  2633. {
  2634. int status;
  2635. struct ocrdma_mr *mr;
  2636. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  2637. struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
  2638. if (mr_type != IB_MR_TYPE_MEM_REG)
  2639. return ERR_PTR(-EINVAL);
  2640. if (max_num_sg > dev->attr.max_pages_per_frmr)
  2641. return ERR_PTR(-EINVAL);
  2642. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2643. if (!mr)
  2644. return ERR_PTR(-ENOMEM);
  2645. mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
  2646. if (!mr->pages) {
  2647. status = -ENOMEM;
  2648. goto pl_err;
  2649. }
  2650. status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
  2651. if (status)
  2652. goto pbl_err;
  2653. mr->hwmr.fr_mr = 1;
  2654. mr->hwmr.remote_rd = 0;
  2655. mr->hwmr.remote_wr = 0;
  2656. mr->hwmr.local_rd = 0;
  2657. mr->hwmr.local_wr = 0;
  2658. mr->hwmr.mw_bind = 0;
  2659. status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
  2660. if (status)
  2661. goto pbl_err;
  2662. status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
  2663. if (status)
  2664. goto mbx_err;
  2665. mr->ibmr.rkey = mr->hwmr.lkey;
  2666. mr->ibmr.lkey = mr->hwmr.lkey;
  2667. dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
  2668. (unsigned long) mr;
  2669. return &mr->ibmr;
  2670. mbx_err:
  2671. ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
  2672. pbl_err:
  2673. kfree(mr->pages);
  2674. pl_err:
  2675. kfree(mr);
  2676. return ERR_PTR(-ENOMEM);
  2677. }
  2678. static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
  2679. {
  2680. struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
  2681. if (unlikely(mr->npages == mr->hwmr.num_pbes))
  2682. return -ENOMEM;
  2683. mr->pages[mr->npages++] = addr;
  2684. return 0;
  2685. }
  2686. int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  2687. unsigned int *sg_offset)
  2688. {
  2689. struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
  2690. mr->npages = 0;
  2691. return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
  2692. }