ib_verbs.h 119 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. */
  38. #if !defined(IB_VERBS_H)
  39. #define IB_VERBS_H
  40. #include <linux/types.h>
  41. #include <linux/device.h>
  42. #include <linux/mm.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/kref.h>
  45. #include <linux/list.h>
  46. #include <linux/rwsem.h>
  47. #include <linux/scatterlist.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/socket.h>
  50. #include <linux/irq_poll.h>
  51. #include <uapi/linux/if_ether.h>
  52. #include <net/ipv6.h>
  53. #include <net/ip.h>
  54. #include <linux/string.h>
  55. #include <linux/slab.h>
  56. #include <linux/netdevice.h>
  57. #include <linux/if_link.h>
  58. #include <linux/atomic.h>
  59. #include <linux/mmu_notifier.h>
  60. #include <linux/uaccess.h>
  61. #include <linux/cgroup_rdma.h>
  62. #include <uapi/rdma/ib_user_verbs.h>
  63. #include <rdma/restrack.h>
  64. #include <uapi/rdma/rdma_user_ioctl.h>
  65. #include <uapi/rdma/ib_user_ioctl_verbs.h>
  66. #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
  67. extern struct workqueue_struct *ib_wq;
  68. extern struct workqueue_struct *ib_comp_wq;
  69. extern struct workqueue_struct *ib_comp_unbound_wq;
  70. union ib_gid {
  71. u8 raw[16];
  72. struct {
  73. __be64 subnet_prefix;
  74. __be64 interface_id;
  75. } global;
  76. };
  77. extern union ib_gid zgid;
  78. enum ib_gid_type {
  79. /* If link layer is Ethernet, this is RoCE V1 */
  80. IB_GID_TYPE_IB = 0,
  81. IB_GID_TYPE_ROCE = 0,
  82. IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  83. IB_GID_TYPE_SIZE
  84. };
  85. #define ROCE_V2_UDP_DPORT 4791
  86. struct ib_gid_attr {
  87. struct net_device *ndev;
  88. struct ib_device *device;
  89. union ib_gid gid;
  90. enum ib_gid_type gid_type;
  91. u16 index;
  92. u8 port_num;
  93. };
  94. enum rdma_node_type {
  95. /* IB values map to NodeInfo:NodeType. */
  96. RDMA_NODE_IB_CA = 1,
  97. RDMA_NODE_IB_SWITCH,
  98. RDMA_NODE_IB_ROUTER,
  99. RDMA_NODE_RNIC,
  100. RDMA_NODE_USNIC,
  101. RDMA_NODE_USNIC_UDP,
  102. };
  103. enum {
  104. /* set the local administered indication */
  105. IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
  106. };
  107. enum rdma_transport_type {
  108. RDMA_TRANSPORT_IB,
  109. RDMA_TRANSPORT_IWARP,
  110. RDMA_TRANSPORT_USNIC,
  111. RDMA_TRANSPORT_USNIC_UDP
  112. };
  113. enum rdma_protocol_type {
  114. RDMA_PROTOCOL_IB,
  115. RDMA_PROTOCOL_IBOE,
  116. RDMA_PROTOCOL_IWARP,
  117. RDMA_PROTOCOL_USNIC_UDP
  118. };
  119. __attribute_const__ enum rdma_transport_type
  120. rdma_node_get_transport(enum rdma_node_type node_type);
  121. enum rdma_network_type {
  122. RDMA_NETWORK_IB,
  123. RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
  124. RDMA_NETWORK_IPV4,
  125. RDMA_NETWORK_IPV6
  126. };
  127. static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
  128. {
  129. if (network_type == RDMA_NETWORK_IPV4 ||
  130. network_type == RDMA_NETWORK_IPV6)
  131. return IB_GID_TYPE_ROCE_UDP_ENCAP;
  132. /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
  133. return IB_GID_TYPE_IB;
  134. }
  135. static inline enum rdma_network_type
  136. rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
  137. {
  138. if (attr->gid_type == IB_GID_TYPE_IB)
  139. return RDMA_NETWORK_IB;
  140. if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
  141. return RDMA_NETWORK_IPV4;
  142. else
  143. return RDMA_NETWORK_IPV6;
  144. }
  145. enum rdma_link_layer {
  146. IB_LINK_LAYER_UNSPECIFIED,
  147. IB_LINK_LAYER_INFINIBAND,
  148. IB_LINK_LAYER_ETHERNET,
  149. };
  150. enum ib_device_cap_flags {
  151. IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
  152. IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
  153. IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
  154. IB_DEVICE_RAW_MULTI = (1 << 3),
  155. IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
  156. IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
  157. IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
  158. IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
  159. IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
  160. /* Not in use, former INIT_TYPE = (1 << 9),*/
  161. IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
  162. IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
  163. IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
  164. IB_DEVICE_SRQ_RESIZE = (1 << 13),
  165. IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
  166. /*
  167. * This device supports a per-device lkey or stag that can be
  168. * used without performing a memory registration for the local
  169. * memory. Note that ULPs should never check this flag, but
  170. * instead of use the local_dma_lkey flag in the ib_pd structure,
  171. * which will always contain a usable lkey.
  172. */
  173. IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
  174. /* Reserved, old SEND_W_INV = (1 << 16),*/
  175. IB_DEVICE_MEM_WINDOW = (1 << 17),
  176. /*
  177. * Devices should set IB_DEVICE_UD_IP_SUM if they support
  178. * insertion of UDP and TCP checksum on outgoing UD IPoIB
  179. * messages and can verify the validity of checksum for
  180. * incoming messages. Setting this flag implies that the
  181. * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
  182. */
  183. IB_DEVICE_UD_IP_CSUM = (1 << 18),
  184. IB_DEVICE_UD_TSO = (1 << 19),
  185. IB_DEVICE_XRC = (1 << 20),
  186. /*
  187. * This device supports the IB "base memory management extension",
  188. * which includes support for fast registrations (IB_WR_REG_MR,
  189. * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
  190. * also be set by any iWarp device which must support FRs to comply
  191. * to the iWarp verbs spec. iWarp devices also support the
  192. * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
  193. * stag.
  194. */
  195. IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
  196. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
  197. IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
  198. IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
  199. IB_DEVICE_RC_IP_CSUM = (1 << 25),
  200. /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
  201. IB_DEVICE_RAW_IP_CSUM = (1 << 26),
  202. /*
  203. * Devices should set IB_DEVICE_CROSS_CHANNEL if they
  204. * support execution of WQEs that involve synchronization
  205. * of I/O operations with single completion queue managed
  206. * by hardware.
  207. */
  208. IB_DEVICE_CROSS_CHANNEL = (1 << 27),
  209. IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
  210. IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
  211. IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
  212. IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
  213. IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
  214. /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
  215. IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
  216. IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
  217. /* The device supports padding incoming writes to cacheline. */
  218. IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
  219. };
  220. enum ib_signature_prot_cap {
  221. IB_PROT_T10DIF_TYPE_1 = 1,
  222. IB_PROT_T10DIF_TYPE_2 = 1 << 1,
  223. IB_PROT_T10DIF_TYPE_3 = 1 << 2,
  224. };
  225. enum ib_signature_guard_cap {
  226. IB_GUARD_T10DIF_CRC = 1,
  227. IB_GUARD_T10DIF_CSUM = 1 << 1,
  228. };
  229. enum ib_atomic_cap {
  230. IB_ATOMIC_NONE,
  231. IB_ATOMIC_HCA,
  232. IB_ATOMIC_GLOB
  233. };
  234. enum ib_odp_general_cap_bits {
  235. IB_ODP_SUPPORT = 1 << 0,
  236. IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
  237. };
  238. enum ib_odp_transport_cap_bits {
  239. IB_ODP_SUPPORT_SEND = 1 << 0,
  240. IB_ODP_SUPPORT_RECV = 1 << 1,
  241. IB_ODP_SUPPORT_WRITE = 1 << 2,
  242. IB_ODP_SUPPORT_READ = 1 << 3,
  243. IB_ODP_SUPPORT_ATOMIC = 1 << 4,
  244. };
  245. struct ib_odp_caps {
  246. uint64_t general_caps;
  247. struct {
  248. uint32_t rc_odp_caps;
  249. uint32_t uc_odp_caps;
  250. uint32_t ud_odp_caps;
  251. } per_transport_caps;
  252. };
  253. struct ib_rss_caps {
  254. /* Corresponding bit will be set if qp type from
  255. * 'enum ib_qp_type' is supported, e.g.
  256. * supported_qpts |= 1 << IB_QPT_UD
  257. */
  258. u32 supported_qpts;
  259. u32 max_rwq_indirection_tables;
  260. u32 max_rwq_indirection_table_size;
  261. };
  262. enum ib_tm_cap_flags {
  263. /* Support tag matching with rendezvous offload for RC transport */
  264. IB_TM_CAP_RNDV_RC = 1 << 0,
  265. };
  266. struct ib_tm_caps {
  267. /* Max size of RNDV header */
  268. u32 max_rndv_hdr_size;
  269. /* Max number of entries in tag matching list */
  270. u32 max_num_tags;
  271. /* From enum ib_tm_cap_flags */
  272. u32 flags;
  273. /* Max number of outstanding list operations */
  274. u32 max_ops;
  275. /* Max number of SGE in tag matching entry */
  276. u32 max_sge;
  277. };
  278. struct ib_cq_init_attr {
  279. unsigned int cqe;
  280. u32 comp_vector;
  281. u32 flags;
  282. };
  283. enum ib_cq_attr_mask {
  284. IB_CQ_MODERATE = 1 << 0,
  285. };
  286. struct ib_cq_caps {
  287. u16 max_cq_moderation_count;
  288. u16 max_cq_moderation_period;
  289. };
  290. struct ib_dm_mr_attr {
  291. u64 length;
  292. u64 offset;
  293. u32 access_flags;
  294. };
  295. struct ib_dm_alloc_attr {
  296. u64 length;
  297. u32 alignment;
  298. u32 flags;
  299. };
  300. struct ib_device_attr {
  301. u64 fw_ver;
  302. __be64 sys_image_guid;
  303. u64 max_mr_size;
  304. u64 page_size_cap;
  305. u32 vendor_id;
  306. u32 vendor_part_id;
  307. u32 hw_ver;
  308. int max_qp;
  309. int max_qp_wr;
  310. u64 device_cap_flags;
  311. int max_send_sge;
  312. int max_recv_sge;
  313. int max_sge_rd;
  314. int max_cq;
  315. int max_cqe;
  316. int max_mr;
  317. int max_pd;
  318. int max_qp_rd_atom;
  319. int max_ee_rd_atom;
  320. int max_res_rd_atom;
  321. int max_qp_init_rd_atom;
  322. int max_ee_init_rd_atom;
  323. enum ib_atomic_cap atomic_cap;
  324. enum ib_atomic_cap masked_atomic_cap;
  325. int max_ee;
  326. int max_rdd;
  327. int max_mw;
  328. int max_raw_ipv6_qp;
  329. int max_raw_ethy_qp;
  330. int max_mcast_grp;
  331. int max_mcast_qp_attach;
  332. int max_total_mcast_qp_attach;
  333. int max_ah;
  334. int max_fmr;
  335. int max_map_per_fmr;
  336. int max_srq;
  337. int max_srq_wr;
  338. int max_srq_sge;
  339. unsigned int max_fast_reg_page_list_len;
  340. u16 max_pkeys;
  341. u8 local_ca_ack_delay;
  342. int sig_prot_cap;
  343. int sig_guard_cap;
  344. struct ib_odp_caps odp_caps;
  345. uint64_t timestamp_mask;
  346. uint64_t hca_core_clock; /* in KHZ */
  347. struct ib_rss_caps rss_caps;
  348. u32 max_wq_type_rq;
  349. u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
  350. struct ib_tm_caps tm_caps;
  351. struct ib_cq_caps cq_caps;
  352. u64 max_dm_size;
  353. };
  354. enum ib_mtu {
  355. IB_MTU_256 = 1,
  356. IB_MTU_512 = 2,
  357. IB_MTU_1024 = 3,
  358. IB_MTU_2048 = 4,
  359. IB_MTU_4096 = 5
  360. };
  361. static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
  362. {
  363. switch (mtu) {
  364. case IB_MTU_256: return 256;
  365. case IB_MTU_512: return 512;
  366. case IB_MTU_1024: return 1024;
  367. case IB_MTU_2048: return 2048;
  368. case IB_MTU_4096: return 4096;
  369. default: return -1;
  370. }
  371. }
  372. static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
  373. {
  374. if (mtu >= 4096)
  375. return IB_MTU_4096;
  376. else if (mtu >= 2048)
  377. return IB_MTU_2048;
  378. else if (mtu >= 1024)
  379. return IB_MTU_1024;
  380. else if (mtu >= 512)
  381. return IB_MTU_512;
  382. else
  383. return IB_MTU_256;
  384. }
  385. enum ib_port_state {
  386. IB_PORT_NOP = 0,
  387. IB_PORT_DOWN = 1,
  388. IB_PORT_INIT = 2,
  389. IB_PORT_ARMED = 3,
  390. IB_PORT_ACTIVE = 4,
  391. IB_PORT_ACTIVE_DEFER = 5
  392. };
  393. enum ib_port_width {
  394. IB_WIDTH_1X = 1,
  395. IB_WIDTH_4X = 2,
  396. IB_WIDTH_8X = 4,
  397. IB_WIDTH_12X = 8
  398. };
  399. static inline int ib_width_enum_to_int(enum ib_port_width width)
  400. {
  401. switch (width) {
  402. case IB_WIDTH_1X: return 1;
  403. case IB_WIDTH_4X: return 4;
  404. case IB_WIDTH_8X: return 8;
  405. case IB_WIDTH_12X: return 12;
  406. default: return -1;
  407. }
  408. }
  409. enum ib_port_speed {
  410. IB_SPEED_SDR = 1,
  411. IB_SPEED_DDR = 2,
  412. IB_SPEED_QDR = 4,
  413. IB_SPEED_FDR10 = 8,
  414. IB_SPEED_FDR = 16,
  415. IB_SPEED_EDR = 32,
  416. IB_SPEED_HDR = 64
  417. };
  418. /**
  419. * struct rdma_hw_stats
  420. * @lock - Mutex to protect parallel write access to lifespan and values
  421. * of counters, which are 64bits and not guaranteeed to be written
  422. * atomicaly on 32bits systems.
  423. * @timestamp - Used by the core code to track when the last update was
  424. * @lifespan - Used by the core code to determine how old the counters
  425. * should be before being updated again. Stored in jiffies, defaults
  426. * to 10 milliseconds, drivers can override the default be specifying
  427. * their own value during their allocation routine.
  428. * @name - Array of pointers to static names used for the counters in
  429. * directory.
  430. * @num_counters - How many hardware counters there are. If name is
  431. * shorter than this number, a kernel oops will result. Driver authors
  432. * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
  433. * in their code to prevent this.
  434. * @value - Array of u64 counters that are accessed by the sysfs code and
  435. * filled in by the drivers get_stats routine
  436. */
  437. struct rdma_hw_stats {
  438. struct mutex lock; /* Protect lifespan and values[] */
  439. unsigned long timestamp;
  440. unsigned long lifespan;
  441. const char * const *names;
  442. int num_counters;
  443. u64 value[];
  444. };
  445. #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
  446. /**
  447. * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
  448. * for drivers.
  449. * @names - Array of static const char *
  450. * @num_counters - How many elements in array
  451. * @lifespan - How many milliseconds between updates
  452. */
  453. static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
  454. const char * const *names, int num_counters,
  455. unsigned long lifespan)
  456. {
  457. struct rdma_hw_stats *stats;
  458. stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
  459. GFP_KERNEL);
  460. if (!stats)
  461. return NULL;
  462. stats->names = names;
  463. stats->num_counters = num_counters;
  464. stats->lifespan = msecs_to_jiffies(lifespan);
  465. return stats;
  466. }
  467. /* Define bits for the various functionality this port needs to be supported by
  468. * the core.
  469. */
  470. /* Management 0x00000FFF */
  471. #define RDMA_CORE_CAP_IB_MAD 0x00000001
  472. #define RDMA_CORE_CAP_IB_SMI 0x00000002
  473. #define RDMA_CORE_CAP_IB_CM 0x00000004
  474. #define RDMA_CORE_CAP_IW_CM 0x00000008
  475. #define RDMA_CORE_CAP_IB_SA 0x00000010
  476. #define RDMA_CORE_CAP_OPA_MAD 0x00000020
  477. /* Address format 0x000FF000 */
  478. #define RDMA_CORE_CAP_AF_IB 0x00001000
  479. #define RDMA_CORE_CAP_ETH_AH 0x00002000
  480. #define RDMA_CORE_CAP_OPA_AH 0x00004000
  481. #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
  482. /* Protocol 0xFFF00000 */
  483. #define RDMA_CORE_CAP_PROT_IB 0x00100000
  484. #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
  485. #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
  486. #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
  487. #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
  488. #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
  489. #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
  490. | RDMA_CORE_CAP_PROT_ROCE \
  491. | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
  492. #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
  493. | RDMA_CORE_CAP_IB_MAD \
  494. | RDMA_CORE_CAP_IB_SMI \
  495. | RDMA_CORE_CAP_IB_CM \
  496. | RDMA_CORE_CAP_IB_SA \
  497. | RDMA_CORE_CAP_AF_IB)
  498. #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
  499. | RDMA_CORE_CAP_IB_MAD \
  500. | RDMA_CORE_CAP_IB_CM \
  501. | RDMA_CORE_CAP_AF_IB \
  502. | RDMA_CORE_CAP_ETH_AH)
  503. #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
  504. (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
  505. | RDMA_CORE_CAP_IB_MAD \
  506. | RDMA_CORE_CAP_IB_CM \
  507. | RDMA_CORE_CAP_AF_IB \
  508. | RDMA_CORE_CAP_ETH_AH)
  509. #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
  510. | RDMA_CORE_CAP_IW_CM)
  511. #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
  512. | RDMA_CORE_CAP_OPA_MAD)
  513. #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
  514. #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
  515. struct ib_port_attr {
  516. u64 subnet_prefix;
  517. enum ib_port_state state;
  518. enum ib_mtu max_mtu;
  519. enum ib_mtu active_mtu;
  520. int gid_tbl_len;
  521. unsigned int ip_gids:1;
  522. /* This is the value from PortInfo CapabilityMask, defined by IBA */
  523. u32 port_cap_flags;
  524. u32 max_msg_sz;
  525. u32 bad_pkey_cntr;
  526. u32 qkey_viol_cntr;
  527. u16 pkey_tbl_len;
  528. u32 sm_lid;
  529. u32 lid;
  530. u8 lmc;
  531. u8 max_vl_num;
  532. u8 sm_sl;
  533. u8 subnet_timeout;
  534. u8 init_type_reply;
  535. u8 active_width;
  536. u8 active_speed;
  537. u8 phys_state;
  538. };
  539. enum ib_device_modify_flags {
  540. IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
  541. IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
  542. };
  543. #define IB_DEVICE_NODE_DESC_MAX 64
  544. struct ib_device_modify {
  545. u64 sys_image_guid;
  546. char node_desc[IB_DEVICE_NODE_DESC_MAX];
  547. };
  548. enum ib_port_modify_flags {
  549. IB_PORT_SHUTDOWN = 1,
  550. IB_PORT_INIT_TYPE = (1<<2),
  551. IB_PORT_RESET_QKEY_CNTR = (1<<3),
  552. IB_PORT_OPA_MASK_CHG = (1<<4)
  553. };
  554. struct ib_port_modify {
  555. u32 set_port_cap_mask;
  556. u32 clr_port_cap_mask;
  557. u8 init_type;
  558. };
  559. enum ib_event_type {
  560. IB_EVENT_CQ_ERR,
  561. IB_EVENT_QP_FATAL,
  562. IB_EVENT_QP_REQ_ERR,
  563. IB_EVENT_QP_ACCESS_ERR,
  564. IB_EVENT_COMM_EST,
  565. IB_EVENT_SQ_DRAINED,
  566. IB_EVENT_PATH_MIG,
  567. IB_EVENT_PATH_MIG_ERR,
  568. IB_EVENT_DEVICE_FATAL,
  569. IB_EVENT_PORT_ACTIVE,
  570. IB_EVENT_PORT_ERR,
  571. IB_EVENT_LID_CHANGE,
  572. IB_EVENT_PKEY_CHANGE,
  573. IB_EVENT_SM_CHANGE,
  574. IB_EVENT_SRQ_ERR,
  575. IB_EVENT_SRQ_LIMIT_REACHED,
  576. IB_EVENT_QP_LAST_WQE_REACHED,
  577. IB_EVENT_CLIENT_REREGISTER,
  578. IB_EVENT_GID_CHANGE,
  579. IB_EVENT_WQ_FATAL,
  580. };
  581. const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
  582. struct ib_event {
  583. struct ib_device *device;
  584. union {
  585. struct ib_cq *cq;
  586. struct ib_qp *qp;
  587. struct ib_srq *srq;
  588. struct ib_wq *wq;
  589. u8 port_num;
  590. } element;
  591. enum ib_event_type event;
  592. };
  593. struct ib_event_handler {
  594. struct ib_device *device;
  595. void (*handler)(struct ib_event_handler *, struct ib_event *);
  596. struct list_head list;
  597. };
  598. #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
  599. do { \
  600. (_ptr)->device = _device; \
  601. (_ptr)->handler = _handler; \
  602. INIT_LIST_HEAD(&(_ptr)->list); \
  603. } while (0)
  604. struct ib_global_route {
  605. const struct ib_gid_attr *sgid_attr;
  606. union ib_gid dgid;
  607. u32 flow_label;
  608. u8 sgid_index;
  609. u8 hop_limit;
  610. u8 traffic_class;
  611. };
  612. struct ib_grh {
  613. __be32 version_tclass_flow;
  614. __be16 paylen;
  615. u8 next_hdr;
  616. u8 hop_limit;
  617. union ib_gid sgid;
  618. union ib_gid dgid;
  619. };
  620. union rdma_network_hdr {
  621. struct ib_grh ibgrh;
  622. struct {
  623. /* The IB spec states that if it's IPv4, the header
  624. * is located in the last 20 bytes of the header.
  625. */
  626. u8 reserved[20];
  627. struct iphdr roce4grh;
  628. };
  629. };
  630. #define IB_QPN_MASK 0xFFFFFF
  631. enum {
  632. IB_MULTICAST_QPN = 0xffffff
  633. };
  634. #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
  635. #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
  636. enum ib_ah_flags {
  637. IB_AH_GRH = 1
  638. };
  639. enum ib_rate {
  640. IB_RATE_PORT_CURRENT = 0,
  641. IB_RATE_2_5_GBPS = 2,
  642. IB_RATE_5_GBPS = 5,
  643. IB_RATE_10_GBPS = 3,
  644. IB_RATE_20_GBPS = 6,
  645. IB_RATE_30_GBPS = 4,
  646. IB_RATE_40_GBPS = 7,
  647. IB_RATE_60_GBPS = 8,
  648. IB_RATE_80_GBPS = 9,
  649. IB_RATE_120_GBPS = 10,
  650. IB_RATE_14_GBPS = 11,
  651. IB_RATE_56_GBPS = 12,
  652. IB_RATE_112_GBPS = 13,
  653. IB_RATE_168_GBPS = 14,
  654. IB_RATE_25_GBPS = 15,
  655. IB_RATE_100_GBPS = 16,
  656. IB_RATE_200_GBPS = 17,
  657. IB_RATE_300_GBPS = 18
  658. };
  659. /**
  660. * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
  661. * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
  662. * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
  663. * @rate: rate to convert.
  664. */
  665. __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
  666. /**
  667. * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
  668. * For example, IB_RATE_2_5_GBPS will be converted to 2500.
  669. * @rate: rate to convert.
  670. */
  671. __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
  672. /**
  673. * enum ib_mr_type - memory region type
  674. * @IB_MR_TYPE_MEM_REG: memory region that is used for
  675. * normal registration
  676. * @IB_MR_TYPE_SIGNATURE: memory region that is used for
  677. * signature operations (data-integrity
  678. * capable regions)
  679. * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
  680. * register any arbitrary sg lists (without
  681. * the normal mr constraints - see
  682. * ib_map_mr_sg)
  683. */
  684. enum ib_mr_type {
  685. IB_MR_TYPE_MEM_REG,
  686. IB_MR_TYPE_SIGNATURE,
  687. IB_MR_TYPE_SG_GAPS,
  688. };
  689. /**
  690. * Signature types
  691. * IB_SIG_TYPE_NONE: Unprotected.
  692. * IB_SIG_TYPE_T10_DIF: Type T10-DIF
  693. */
  694. enum ib_signature_type {
  695. IB_SIG_TYPE_NONE,
  696. IB_SIG_TYPE_T10_DIF,
  697. };
  698. /**
  699. * Signature T10-DIF block-guard types
  700. * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
  701. * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
  702. */
  703. enum ib_t10_dif_bg_type {
  704. IB_T10DIF_CRC,
  705. IB_T10DIF_CSUM
  706. };
  707. /**
  708. * struct ib_t10_dif_domain - Parameters specific for T10-DIF
  709. * domain.
  710. * @bg_type: T10-DIF block guard type (CRC|CSUM)
  711. * @pi_interval: protection information interval.
  712. * @bg: seed of guard computation.
  713. * @app_tag: application tag of guard block
  714. * @ref_tag: initial guard block reference tag.
  715. * @ref_remap: Indicate wethear the reftag increments each block
  716. * @app_escape: Indicate to skip block check if apptag=0xffff
  717. * @ref_escape: Indicate to skip block check if reftag=0xffffffff
  718. * @apptag_check_mask: check bitmask of application tag.
  719. */
  720. struct ib_t10_dif_domain {
  721. enum ib_t10_dif_bg_type bg_type;
  722. u16 pi_interval;
  723. u16 bg;
  724. u16 app_tag;
  725. u32 ref_tag;
  726. bool ref_remap;
  727. bool app_escape;
  728. bool ref_escape;
  729. u16 apptag_check_mask;
  730. };
  731. /**
  732. * struct ib_sig_domain - Parameters for signature domain
  733. * @sig_type: specific signauture type
  734. * @sig: union of all signature domain attributes that may
  735. * be used to set domain layout.
  736. */
  737. struct ib_sig_domain {
  738. enum ib_signature_type sig_type;
  739. union {
  740. struct ib_t10_dif_domain dif;
  741. } sig;
  742. };
  743. /**
  744. * struct ib_sig_attrs - Parameters for signature handover operation
  745. * @check_mask: bitmask for signature byte check (8 bytes)
  746. * @mem: memory domain layout desciptor.
  747. * @wire: wire domain layout desciptor.
  748. */
  749. struct ib_sig_attrs {
  750. u8 check_mask;
  751. struct ib_sig_domain mem;
  752. struct ib_sig_domain wire;
  753. };
  754. enum ib_sig_err_type {
  755. IB_SIG_BAD_GUARD,
  756. IB_SIG_BAD_REFTAG,
  757. IB_SIG_BAD_APPTAG,
  758. };
  759. /**
  760. * Signature check masks (8 bytes in total) according to the T10-PI standard:
  761. * -------- -------- ------------
  762. * | GUARD | APPTAG | REFTAG |
  763. * | 2B | 2B | 4B |
  764. * -------- -------- ------------
  765. */
  766. enum {
  767. IB_SIG_CHECK_GUARD = 0xc0,
  768. IB_SIG_CHECK_APPTAG = 0x30,
  769. IB_SIG_CHECK_REFTAG = 0x0f,
  770. };
  771. /**
  772. * struct ib_sig_err - signature error descriptor
  773. */
  774. struct ib_sig_err {
  775. enum ib_sig_err_type err_type;
  776. u32 expected;
  777. u32 actual;
  778. u64 sig_err_offset;
  779. u32 key;
  780. };
  781. enum ib_mr_status_check {
  782. IB_MR_CHECK_SIG_STATUS = 1,
  783. };
  784. /**
  785. * struct ib_mr_status - Memory region status container
  786. *
  787. * @fail_status: Bitmask of MR checks status. For each
  788. * failed check a corresponding status bit is set.
  789. * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
  790. * failure.
  791. */
  792. struct ib_mr_status {
  793. u32 fail_status;
  794. struct ib_sig_err sig_err;
  795. };
  796. /**
  797. * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
  798. * enum.
  799. * @mult: multiple to convert.
  800. */
  801. __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
  802. enum rdma_ah_attr_type {
  803. RDMA_AH_ATTR_TYPE_UNDEFINED,
  804. RDMA_AH_ATTR_TYPE_IB,
  805. RDMA_AH_ATTR_TYPE_ROCE,
  806. RDMA_AH_ATTR_TYPE_OPA,
  807. };
  808. struct ib_ah_attr {
  809. u16 dlid;
  810. u8 src_path_bits;
  811. };
  812. struct roce_ah_attr {
  813. u8 dmac[ETH_ALEN];
  814. };
  815. struct opa_ah_attr {
  816. u32 dlid;
  817. u8 src_path_bits;
  818. bool make_grd;
  819. };
  820. struct rdma_ah_attr {
  821. struct ib_global_route grh;
  822. u8 sl;
  823. u8 static_rate;
  824. u8 port_num;
  825. u8 ah_flags;
  826. enum rdma_ah_attr_type type;
  827. union {
  828. struct ib_ah_attr ib;
  829. struct roce_ah_attr roce;
  830. struct opa_ah_attr opa;
  831. };
  832. };
  833. enum ib_wc_status {
  834. IB_WC_SUCCESS,
  835. IB_WC_LOC_LEN_ERR,
  836. IB_WC_LOC_QP_OP_ERR,
  837. IB_WC_LOC_EEC_OP_ERR,
  838. IB_WC_LOC_PROT_ERR,
  839. IB_WC_WR_FLUSH_ERR,
  840. IB_WC_MW_BIND_ERR,
  841. IB_WC_BAD_RESP_ERR,
  842. IB_WC_LOC_ACCESS_ERR,
  843. IB_WC_REM_INV_REQ_ERR,
  844. IB_WC_REM_ACCESS_ERR,
  845. IB_WC_REM_OP_ERR,
  846. IB_WC_RETRY_EXC_ERR,
  847. IB_WC_RNR_RETRY_EXC_ERR,
  848. IB_WC_LOC_RDD_VIOL_ERR,
  849. IB_WC_REM_INV_RD_REQ_ERR,
  850. IB_WC_REM_ABORT_ERR,
  851. IB_WC_INV_EECN_ERR,
  852. IB_WC_INV_EEC_STATE_ERR,
  853. IB_WC_FATAL_ERR,
  854. IB_WC_RESP_TIMEOUT_ERR,
  855. IB_WC_GENERAL_ERR
  856. };
  857. const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
  858. enum ib_wc_opcode {
  859. IB_WC_SEND,
  860. IB_WC_RDMA_WRITE,
  861. IB_WC_RDMA_READ,
  862. IB_WC_COMP_SWAP,
  863. IB_WC_FETCH_ADD,
  864. IB_WC_LSO,
  865. IB_WC_LOCAL_INV,
  866. IB_WC_REG_MR,
  867. IB_WC_MASKED_COMP_SWAP,
  868. IB_WC_MASKED_FETCH_ADD,
  869. /*
  870. * Set value of IB_WC_RECV so consumers can test if a completion is a
  871. * receive by testing (opcode & IB_WC_RECV).
  872. */
  873. IB_WC_RECV = 1 << 7,
  874. IB_WC_RECV_RDMA_WITH_IMM
  875. };
  876. enum ib_wc_flags {
  877. IB_WC_GRH = 1,
  878. IB_WC_WITH_IMM = (1<<1),
  879. IB_WC_WITH_INVALIDATE = (1<<2),
  880. IB_WC_IP_CSUM_OK = (1<<3),
  881. IB_WC_WITH_SMAC = (1<<4),
  882. IB_WC_WITH_VLAN = (1<<5),
  883. IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
  884. };
  885. struct ib_wc {
  886. union {
  887. u64 wr_id;
  888. struct ib_cqe *wr_cqe;
  889. };
  890. enum ib_wc_status status;
  891. enum ib_wc_opcode opcode;
  892. u32 vendor_err;
  893. u32 byte_len;
  894. struct ib_qp *qp;
  895. union {
  896. __be32 imm_data;
  897. u32 invalidate_rkey;
  898. } ex;
  899. u32 src_qp;
  900. u32 slid;
  901. int wc_flags;
  902. u16 pkey_index;
  903. u8 sl;
  904. u8 dlid_path_bits;
  905. u8 port_num; /* valid only for DR SMPs on switches */
  906. u8 smac[ETH_ALEN];
  907. u16 vlan_id;
  908. u8 network_hdr_type;
  909. };
  910. enum ib_cq_notify_flags {
  911. IB_CQ_SOLICITED = 1 << 0,
  912. IB_CQ_NEXT_COMP = 1 << 1,
  913. IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
  914. IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
  915. };
  916. enum ib_srq_type {
  917. IB_SRQT_BASIC,
  918. IB_SRQT_XRC,
  919. IB_SRQT_TM,
  920. };
  921. static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
  922. {
  923. return srq_type == IB_SRQT_XRC ||
  924. srq_type == IB_SRQT_TM;
  925. }
  926. enum ib_srq_attr_mask {
  927. IB_SRQ_MAX_WR = 1 << 0,
  928. IB_SRQ_LIMIT = 1 << 1,
  929. };
  930. struct ib_srq_attr {
  931. u32 max_wr;
  932. u32 max_sge;
  933. u32 srq_limit;
  934. };
  935. struct ib_srq_init_attr {
  936. void (*event_handler)(struct ib_event *, void *);
  937. void *srq_context;
  938. struct ib_srq_attr attr;
  939. enum ib_srq_type srq_type;
  940. struct {
  941. struct ib_cq *cq;
  942. union {
  943. struct {
  944. struct ib_xrcd *xrcd;
  945. } xrc;
  946. struct {
  947. u32 max_num_tags;
  948. } tag_matching;
  949. };
  950. } ext;
  951. };
  952. struct ib_qp_cap {
  953. u32 max_send_wr;
  954. u32 max_recv_wr;
  955. u32 max_send_sge;
  956. u32 max_recv_sge;
  957. u32 max_inline_data;
  958. /*
  959. * Maximum number of rdma_rw_ctx structures in flight at a time.
  960. * ib_create_qp() will calculate the right amount of neededed WRs
  961. * and MRs based on this.
  962. */
  963. u32 max_rdma_ctxs;
  964. };
  965. enum ib_sig_type {
  966. IB_SIGNAL_ALL_WR,
  967. IB_SIGNAL_REQ_WR
  968. };
  969. enum ib_qp_type {
  970. /*
  971. * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
  972. * here (and in that order) since the MAD layer uses them as
  973. * indices into a 2-entry table.
  974. */
  975. IB_QPT_SMI,
  976. IB_QPT_GSI,
  977. IB_QPT_RC,
  978. IB_QPT_UC,
  979. IB_QPT_UD,
  980. IB_QPT_RAW_IPV6,
  981. IB_QPT_RAW_ETHERTYPE,
  982. IB_QPT_RAW_PACKET = 8,
  983. IB_QPT_XRC_INI = 9,
  984. IB_QPT_XRC_TGT,
  985. IB_QPT_MAX,
  986. IB_QPT_DRIVER = 0xFF,
  987. /* Reserve a range for qp types internal to the low level driver.
  988. * These qp types will not be visible at the IB core layer, so the
  989. * IB_QPT_MAX usages should not be affected in the core layer
  990. */
  991. IB_QPT_RESERVED1 = 0x1000,
  992. IB_QPT_RESERVED2,
  993. IB_QPT_RESERVED3,
  994. IB_QPT_RESERVED4,
  995. IB_QPT_RESERVED5,
  996. IB_QPT_RESERVED6,
  997. IB_QPT_RESERVED7,
  998. IB_QPT_RESERVED8,
  999. IB_QPT_RESERVED9,
  1000. IB_QPT_RESERVED10,
  1001. };
  1002. enum ib_qp_create_flags {
  1003. IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
  1004. IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
  1005. IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
  1006. IB_QP_CREATE_MANAGED_SEND = 1 << 3,
  1007. IB_QP_CREATE_MANAGED_RECV = 1 << 4,
  1008. IB_QP_CREATE_NETIF_QP = 1 << 5,
  1009. IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
  1010. /* FREE = 1 << 7, */
  1011. IB_QP_CREATE_SCATTER_FCS = 1 << 8,
  1012. IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
  1013. IB_QP_CREATE_SOURCE_QPN = 1 << 10,
  1014. IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
  1015. /* reserve bits 26-31 for low level drivers' internal use */
  1016. IB_QP_CREATE_RESERVED_START = 1 << 26,
  1017. IB_QP_CREATE_RESERVED_END = 1 << 31,
  1018. };
  1019. /*
  1020. * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
  1021. * callback to destroy the passed in QP.
  1022. */
  1023. struct ib_qp_init_attr {
  1024. void (*event_handler)(struct ib_event *, void *);
  1025. void *qp_context;
  1026. struct ib_cq *send_cq;
  1027. struct ib_cq *recv_cq;
  1028. struct ib_srq *srq;
  1029. struct ib_xrcd *xrcd; /* XRC TGT QPs only */
  1030. struct ib_qp_cap cap;
  1031. enum ib_sig_type sq_sig_type;
  1032. enum ib_qp_type qp_type;
  1033. u32 create_flags;
  1034. /*
  1035. * Only needed for special QP types, or when using the RW API.
  1036. */
  1037. u8 port_num;
  1038. struct ib_rwq_ind_table *rwq_ind_tbl;
  1039. u32 source_qpn;
  1040. };
  1041. struct ib_qp_open_attr {
  1042. void (*event_handler)(struct ib_event *, void *);
  1043. void *qp_context;
  1044. u32 qp_num;
  1045. enum ib_qp_type qp_type;
  1046. };
  1047. enum ib_rnr_timeout {
  1048. IB_RNR_TIMER_655_36 = 0,
  1049. IB_RNR_TIMER_000_01 = 1,
  1050. IB_RNR_TIMER_000_02 = 2,
  1051. IB_RNR_TIMER_000_03 = 3,
  1052. IB_RNR_TIMER_000_04 = 4,
  1053. IB_RNR_TIMER_000_06 = 5,
  1054. IB_RNR_TIMER_000_08 = 6,
  1055. IB_RNR_TIMER_000_12 = 7,
  1056. IB_RNR_TIMER_000_16 = 8,
  1057. IB_RNR_TIMER_000_24 = 9,
  1058. IB_RNR_TIMER_000_32 = 10,
  1059. IB_RNR_TIMER_000_48 = 11,
  1060. IB_RNR_TIMER_000_64 = 12,
  1061. IB_RNR_TIMER_000_96 = 13,
  1062. IB_RNR_TIMER_001_28 = 14,
  1063. IB_RNR_TIMER_001_92 = 15,
  1064. IB_RNR_TIMER_002_56 = 16,
  1065. IB_RNR_TIMER_003_84 = 17,
  1066. IB_RNR_TIMER_005_12 = 18,
  1067. IB_RNR_TIMER_007_68 = 19,
  1068. IB_RNR_TIMER_010_24 = 20,
  1069. IB_RNR_TIMER_015_36 = 21,
  1070. IB_RNR_TIMER_020_48 = 22,
  1071. IB_RNR_TIMER_030_72 = 23,
  1072. IB_RNR_TIMER_040_96 = 24,
  1073. IB_RNR_TIMER_061_44 = 25,
  1074. IB_RNR_TIMER_081_92 = 26,
  1075. IB_RNR_TIMER_122_88 = 27,
  1076. IB_RNR_TIMER_163_84 = 28,
  1077. IB_RNR_TIMER_245_76 = 29,
  1078. IB_RNR_TIMER_327_68 = 30,
  1079. IB_RNR_TIMER_491_52 = 31
  1080. };
  1081. enum ib_qp_attr_mask {
  1082. IB_QP_STATE = 1,
  1083. IB_QP_CUR_STATE = (1<<1),
  1084. IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
  1085. IB_QP_ACCESS_FLAGS = (1<<3),
  1086. IB_QP_PKEY_INDEX = (1<<4),
  1087. IB_QP_PORT = (1<<5),
  1088. IB_QP_QKEY = (1<<6),
  1089. IB_QP_AV = (1<<7),
  1090. IB_QP_PATH_MTU = (1<<8),
  1091. IB_QP_TIMEOUT = (1<<9),
  1092. IB_QP_RETRY_CNT = (1<<10),
  1093. IB_QP_RNR_RETRY = (1<<11),
  1094. IB_QP_RQ_PSN = (1<<12),
  1095. IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
  1096. IB_QP_ALT_PATH = (1<<14),
  1097. IB_QP_MIN_RNR_TIMER = (1<<15),
  1098. IB_QP_SQ_PSN = (1<<16),
  1099. IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
  1100. IB_QP_PATH_MIG_STATE = (1<<18),
  1101. IB_QP_CAP = (1<<19),
  1102. IB_QP_DEST_QPN = (1<<20),
  1103. IB_QP_RESERVED1 = (1<<21),
  1104. IB_QP_RESERVED2 = (1<<22),
  1105. IB_QP_RESERVED3 = (1<<23),
  1106. IB_QP_RESERVED4 = (1<<24),
  1107. IB_QP_RATE_LIMIT = (1<<25),
  1108. };
  1109. enum ib_qp_state {
  1110. IB_QPS_RESET,
  1111. IB_QPS_INIT,
  1112. IB_QPS_RTR,
  1113. IB_QPS_RTS,
  1114. IB_QPS_SQD,
  1115. IB_QPS_SQE,
  1116. IB_QPS_ERR
  1117. };
  1118. enum ib_mig_state {
  1119. IB_MIG_MIGRATED,
  1120. IB_MIG_REARM,
  1121. IB_MIG_ARMED
  1122. };
  1123. enum ib_mw_type {
  1124. IB_MW_TYPE_1 = 1,
  1125. IB_MW_TYPE_2 = 2
  1126. };
  1127. struct ib_qp_attr {
  1128. enum ib_qp_state qp_state;
  1129. enum ib_qp_state cur_qp_state;
  1130. enum ib_mtu path_mtu;
  1131. enum ib_mig_state path_mig_state;
  1132. u32 qkey;
  1133. u32 rq_psn;
  1134. u32 sq_psn;
  1135. u32 dest_qp_num;
  1136. int qp_access_flags;
  1137. struct ib_qp_cap cap;
  1138. struct rdma_ah_attr ah_attr;
  1139. struct rdma_ah_attr alt_ah_attr;
  1140. u16 pkey_index;
  1141. u16 alt_pkey_index;
  1142. u8 en_sqd_async_notify;
  1143. u8 sq_draining;
  1144. u8 max_rd_atomic;
  1145. u8 max_dest_rd_atomic;
  1146. u8 min_rnr_timer;
  1147. u8 port_num;
  1148. u8 timeout;
  1149. u8 retry_cnt;
  1150. u8 rnr_retry;
  1151. u8 alt_port_num;
  1152. u8 alt_timeout;
  1153. u32 rate_limit;
  1154. };
  1155. enum ib_wr_opcode {
  1156. /* These are shared with userspace */
  1157. IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
  1158. IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
  1159. IB_WR_SEND = IB_UVERBS_WR_SEND,
  1160. IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
  1161. IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
  1162. IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
  1163. IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
  1164. IB_WR_LSO = IB_UVERBS_WR_TSO,
  1165. IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
  1166. IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
  1167. IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
  1168. IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
  1169. IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
  1170. IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
  1171. IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
  1172. /* These are kernel only and can not be issued by userspace */
  1173. IB_WR_REG_MR = 0x20,
  1174. IB_WR_REG_SIG_MR,
  1175. /* reserve values for low level drivers' internal use.
  1176. * These values will not be used at all in the ib core layer.
  1177. */
  1178. IB_WR_RESERVED1 = 0xf0,
  1179. IB_WR_RESERVED2,
  1180. IB_WR_RESERVED3,
  1181. IB_WR_RESERVED4,
  1182. IB_WR_RESERVED5,
  1183. IB_WR_RESERVED6,
  1184. IB_WR_RESERVED7,
  1185. IB_WR_RESERVED8,
  1186. IB_WR_RESERVED9,
  1187. IB_WR_RESERVED10,
  1188. };
  1189. enum ib_send_flags {
  1190. IB_SEND_FENCE = 1,
  1191. IB_SEND_SIGNALED = (1<<1),
  1192. IB_SEND_SOLICITED = (1<<2),
  1193. IB_SEND_INLINE = (1<<3),
  1194. IB_SEND_IP_CSUM = (1<<4),
  1195. /* reserve bits 26-31 for low level drivers' internal use */
  1196. IB_SEND_RESERVED_START = (1 << 26),
  1197. IB_SEND_RESERVED_END = (1 << 31),
  1198. };
  1199. struct ib_sge {
  1200. u64 addr;
  1201. u32 length;
  1202. u32 lkey;
  1203. };
  1204. struct ib_cqe {
  1205. void (*done)(struct ib_cq *cq, struct ib_wc *wc);
  1206. };
  1207. struct ib_send_wr {
  1208. struct ib_send_wr *next;
  1209. union {
  1210. u64 wr_id;
  1211. struct ib_cqe *wr_cqe;
  1212. };
  1213. struct ib_sge *sg_list;
  1214. int num_sge;
  1215. enum ib_wr_opcode opcode;
  1216. int send_flags;
  1217. union {
  1218. __be32 imm_data;
  1219. u32 invalidate_rkey;
  1220. } ex;
  1221. };
  1222. struct ib_rdma_wr {
  1223. struct ib_send_wr wr;
  1224. u64 remote_addr;
  1225. u32 rkey;
  1226. };
  1227. static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
  1228. {
  1229. return container_of(wr, struct ib_rdma_wr, wr);
  1230. }
  1231. struct ib_atomic_wr {
  1232. struct ib_send_wr wr;
  1233. u64 remote_addr;
  1234. u64 compare_add;
  1235. u64 swap;
  1236. u64 compare_add_mask;
  1237. u64 swap_mask;
  1238. u32 rkey;
  1239. };
  1240. static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
  1241. {
  1242. return container_of(wr, struct ib_atomic_wr, wr);
  1243. }
  1244. struct ib_ud_wr {
  1245. struct ib_send_wr wr;
  1246. struct ib_ah *ah;
  1247. void *header;
  1248. int hlen;
  1249. int mss;
  1250. u32 remote_qpn;
  1251. u32 remote_qkey;
  1252. u16 pkey_index; /* valid for GSI only */
  1253. u8 port_num; /* valid for DR SMPs on switch only */
  1254. };
  1255. static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
  1256. {
  1257. return container_of(wr, struct ib_ud_wr, wr);
  1258. }
  1259. struct ib_reg_wr {
  1260. struct ib_send_wr wr;
  1261. struct ib_mr *mr;
  1262. u32 key;
  1263. int access;
  1264. };
  1265. static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
  1266. {
  1267. return container_of(wr, struct ib_reg_wr, wr);
  1268. }
  1269. struct ib_sig_handover_wr {
  1270. struct ib_send_wr wr;
  1271. struct ib_sig_attrs *sig_attrs;
  1272. struct ib_mr *sig_mr;
  1273. int access_flags;
  1274. struct ib_sge *prot;
  1275. };
  1276. static inline const struct ib_sig_handover_wr *
  1277. sig_handover_wr(const struct ib_send_wr *wr)
  1278. {
  1279. return container_of(wr, struct ib_sig_handover_wr, wr);
  1280. }
  1281. struct ib_recv_wr {
  1282. struct ib_recv_wr *next;
  1283. union {
  1284. u64 wr_id;
  1285. struct ib_cqe *wr_cqe;
  1286. };
  1287. struct ib_sge *sg_list;
  1288. int num_sge;
  1289. };
  1290. enum ib_access_flags {
  1291. IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
  1292. IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
  1293. IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
  1294. IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
  1295. IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
  1296. IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
  1297. IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
  1298. IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
  1299. IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
  1300. };
  1301. /*
  1302. * XXX: these are apparently used for ->rereg_user_mr, no idea why they
  1303. * are hidden here instead of a uapi header!
  1304. */
  1305. enum ib_mr_rereg_flags {
  1306. IB_MR_REREG_TRANS = 1,
  1307. IB_MR_REREG_PD = (1<<1),
  1308. IB_MR_REREG_ACCESS = (1<<2),
  1309. IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
  1310. };
  1311. struct ib_fmr_attr {
  1312. int max_pages;
  1313. int max_maps;
  1314. u8 page_shift;
  1315. };
  1316. struct ib_umem;
  1317. enum rdma_remove_reason {
  1318. /*
  1319. * Userspace requested uobject deletion or initial try
  1320. * to remove uobject via cleanup. Call could fail
  1321. */
  1322. RDMA_REMOVE_DESTROY,
  1323. /* Context deletion. This call should delete the actual object itself */
  1324. RDMA_REMOVE_CLOSE,
  1325. /* Driver is being hot-unplugged. This call should delete the actual object itself */
  1326. RDMA_REMOVE_DRIVER_REMOVE,
  1327. /* uobj is being cleaned-up before being committed */
  1328. RDMA_REMOVE_ABORT,
  1329. };
  1330. struct ib_rdmacg_object {
  1331. #ifdef CONFIG_CGROUP_RDMA
  1332. struct rdma_cgroup *cg; /* owner rdma cgroup */
  1333. #endif
  1334. };
  1335. struct ib_ucontext {
  1336. struct ib_device *device;
  1337. struct ib_uverbs_file *ufile;
  1338. /*
  1339. * 'closing' can be read by the driver only during a destroy callback,
  1340. * it is set when we are closing the file descriptor and indicates
  1341. * that mm_sem may be locked.
  1342. */
  1343. int closing;
  1344. bool cleanup_retryable;
  1345. struct pid *tgid;
  1346. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1347. struct rb_root_cached umem_tree;
  1348. /*
  1349. * Protects .umem_rbroot and tree, as well as odp_mrs_count and
  1350. * mmu notifiers registration.
  1351. */
  1352. struct rw_semaphore umem_rwsem;
  1353. void (*invalidate_range)(struct ib_umem *umem,
  1354. unsigned long start, unsigned long end);
  1355. struct mmu_notifier mn;
  1356. atomic_t notifier_count;
  1357. /* A list of umems that don't have private mmu notifier counters yet. */
  1358. struct list_head no_private_counters;
  1359. int odp_mrs_count;
  1360. #endif
  1361. struct ib_rdmacg_object cg_obj;
  1362. };
  1363. struct ib_uobject {
  1364. u64 user_handle; /* handle given to us by userspace */
  1365. /* ufile & ucontext owning this object */
  1366. struct ib_uverbs_file *ufile;
  1367. /* FIXME, save memory: ufile->context == context */
  1368. struct ib_ucontext *context; /* associated user context */
  1369. void *object; /* containing object */
  1370. struct list_head list; /* link to context's list */
  1371. struct ib_rdmacg_object cg_obj; /* rdmacg object */
  1372. int id; /* index into kernel idr */
  1373. struct kref ref;
  1374. atomic_t usecnt; /* protects exclusive access */
  1375. struct rcu_head rcu; /* kfree_rcu() overhead */
  1376. const struct uverbs_api_object *uapi_object;
  1377. };
  1378. struct ib_udata {
  1379. const void __user *inbuf;
  1380. void __user *outbuf;
  1381. size_t inlen;
  1382. size_t outlen;
  1383. };
  1384. struct ib_pd {
  1385. u32 local_dma_lkey;
  1386. u32 flags;
  1387. struct ib_device *device;
  1388. struct ib_uobject *uobject;
  1389. atomic_t usecnt; /* count all resources */
  1390. u32 unsafe_global_rkey;
  1391. /*
  1392. * Implementation details of the RDMA core, don't use in drivers:
  1393. */
  1394. struct ib_mr *__internal_mr;
  1395. struct rdma_restrack_entry res;
  1396. };
  1397. struct ib_xrcd {
  1398. struct ib_device *device;
  1399. atomic_t usecnt; /* count all exposed resources */
  1400. struct inode *inode;
  1401. struct mutex tgt_qp_mutex;
  1402. struct list_head tgt_qp_list;
  1403. };
  1404. struct ib_ah {
  1405. struct ib_device *device;
  1406. struct ib_pd *pd;
  1407. struct ib_uobject *uobject;
  1408. const struct ib_gid_attr *sgid_attr;
  1409. enum rdma_ah_attr_type type;
  1410. };
  1411. typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
  1412. enum ib_poll_context {
  1413. IB_POLL_DIRECT, /* caller context, no hw completions */
  1414. IB_POLL_SOFTIRQ, /* poll from softirq context */
  1415. IB_POLL_WORKQUEUE, /* poll from workqueue */
  1416. IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
  1417. };
  1418. struct ib_cq {
  1419. struct ib_device *device;
  1420. struct ib_uobject *uobject;
  1421. ib_comp_handler comp_handler;
  1422. void (*event_handler)(struct ib_event *, void *);
  1423. void *cq_context;
  1424. int cqe;
  1425. atomic_t usecnt; /* count number of work queues */
  1426. enum ib_poll_context poll_ctx;
  1427. struct ib_wc *wc;
  1428. union {
  1429. struct irq_poll iop;
  1430. struct work_struct work;
  1431. };
  1432. struct workqueue_struct *comp_wq;
  1433. /*
  1434. * Implementation details of the RDMA core, don't use in drivers:
  1435. */
  1436. struct rdma_restrack_entry res;
  1437. };
  1438. struct ib_srq {
  1439. struct ib_device *device;
  1440. struct ib_pd *pd;
  1441. struct ib_uobject *uobject;
  1442. void (*event_handler)(struct ib_event *, void *);
  1443. void *srq_context;
  1444. enum ib_srq_type srq_type;
  1445. atomic_t usecnt;
  1446. struct {
  1447. struct ib_cq *cq;
  1448. union {
  1449. struct {
  1450. struct ib_xrcd *xrcd;
  1451. u32 srq_num;
  1452. } xrc;
  1453. };
  1454. } ext;
  1455. };
  1456. enum ib_raw_packet_caps {
  1457. /* Strip cvlan from incoming packet and report it in the matching work
  1458. * completion is supported.
  1459. */
  1460. IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
  1461. /* Scatter FCS field of an incoming packet to host memory is supported.
  1462. */
  1463. IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
  1464. /* Checksum offloads are supported (for both send and receive). */
  1465. IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
  1466. /* When a packet is received for an RQ with no receive WQEs, the
  1467. * packet processing is delayed.
  1468. */
  1469. IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
  1470. };
  1471. enum ib_wq_type {
  1472. IB_WQT_RQ
  1473. };
  1474. enum ib_wq_state {
  1475. IB_WQS_RESET,
  1476. IB_WQS_RDY,
  1477. IB_WQS_ERR
  1478. };
  1479. struct ib_wq {
  1480. struct ib_device *device;
  1481. struct ib_uobject *uobject;
  1482. void *wq_context;
  1483. void (*event_handler)(struct ib_event *, void *);
  1484. struct ib_pd *pd;
  1485. struct ib_cq *cq;
  1486. u32 wq_num;
  1487. enum ib_wq_state state;
  1488. enum ib_wq_type wq_type;
  1489. atomic_t usecnt;
  1490. };
  1491. enum ib_wq_flags {
  1492. IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
  1493. IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
  1494. IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
  1495. IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
  1496. };
  1497. struct ib_wq_init_attr {
  1498. void *wq_context;
  1499. enum ib_wq_type wq_type;
  1500. u32 max_wr;
  1501. u32 max_sge;
  1502. struct ib_cq *cq;
  1503. void (*event_handler)(struct ib_event *, void *);
  1504. u32 create_flags; /* Use enum ib_wq_flags */
  1505. };
  1506. enum ib_wq_attr_mask {
  1507. IB_WQ_STATE = 1 << 0,
  1508. IB_WQ_CUR_STATE = 1 << 1,
  1509. IB_WQ_FLAGS = 1 << 2,
  1510. };
  1511. struct ib_wq_attr {
  1512. enum ib_wq_state wq_state;
  1513. enum ib_wq_state curr_wq_state;
  1514. u32 flags; /* Use enum ib_wq_flags */
  1515. u32 flags_mask; /* Use enum ib_wq_flags */
  1516. };
  1517. struct ib_rwq_ind_table {
  1518. struct ib_device *device;
  1519. struct ib_uobject *uobject;
  1520. atomic_t usecnt;
  1521. u32 ind_tbl_num;
  1522. u32 log_ind_tbl_size;
  1523. struct ib_wq **ind_tbl;
  1524. };
  1525. struct ib_rwq_ind_table_init_attr {
  1526. u32 log_ind_tbl_size;
  1527. /* Each entry is a pointer to Receive Work Queue */
  1528. struct ib_wq **ind_tbl;
  1529. };
  1530. enum port_pkey_state {
  1531. IB_PORT_PKEY_NOT_VALID = 0,
  1532. IB_PORT_PKEY_VALID = 1,
  1533. IB_PORT_PKEY_LISTED = 2,
  1534. };
  1535. struct ib_qp_security;
  1536. struct ib_port_pkey {
  1537. enum port_pkey_state state;
  1538. u16 pkey_index;
  1539. u8 port_num;
  1540. struct list_head qp_list;
  1541. struct list_head to_error_list;
  1542. struct ib_qp_security *sec;
  1543. };
  1544. struct ib_ports_pkeys {
  1545. struct ib_port_pkey main;
  1546. struct ib_port_pkey alt;
  1547. };
  1548. struct ib_qp_security {
  1549. struct ib_qp *qp;
  1550. struct ib_device *dev;
  1551. /* Hold this mutex when changing port and pkey settings. */
  1552. struct mutex mutex;
  1553. struct ib_ports_pkeys *ports_pkeys;
  1554. /* A list of all open shared QP handles. Required to enforce security
  1555. * properly for all users of a shared QP.
  1556. */
  1557. struct list_head shared_qp_list;
  1558. void *security;
  1559. bool destroying;
  1560. atomic_t error_list_count;
  1561. struct completion error_complete;
  1562. int error_comps_pending;
  1563. };
  1564. /*
  1565. * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
  1566. * @max_read_sge: Maximum SGE elements per RDMA READ request.
  1567. */
  1568. struct ib_qp {
  1569. struct ib_device *device;
  1570. struct ib_pd *pd;
  1571. struct ib_cq *send_cq;
  1572. struct ib_cq *recv_cq;
  1573. spinlock_t mr_lock;
  1574. int mrs_used;
  1575. struct list_head rdma_mrs;
  1576. struct list_head sig_mrs;
  1577. struct ib_srq *srq;
  1578. struct ib_xrcd *xrcd; /* XRC TGT QPs only */
  1579. struct list_head xrcd_list;
  1580. /* count times opened, mcast attaches, flow attaches */
  1581. atomic_t usecnt;
  1582. struct list_head open_list;
  1583. struct ib_qp *real_qp;
  1584. struct ib_uobject *uobject;
  1585. void (*event_handler)(struct ib_event *, void *);
  1586. void *qp_context;
  1587. /* sgid_attrs associated with the AV's */
  1588. const struct ib_gid_attr *av_sgid_attr;
  1589. const struct ib_gid_attr *alt_path_sgid_attr;
  1590. u32 qp_num;
  1591. u32 max_write_sge;
  1592. u32 max_read_sge;
  1593. enum ib_qp_type qp_type;
  1594. struct ib_rwq_ind_table *rwq_ind_tbl;
  1595. struct ib_qp_security *qp_sec;
  1596. u8 port;
  1597. /*
  1598. * Implementation details of the RDMA core, don't use in drivers:
  1599. */
  1600. struct rdma_restrack_entry res;
  1601. };
  1602. struct ib_dm {
  1603. struct ib_device *device;
  1604. u32 length;
  1605. u32 flags;
  1606. struct ib_uobject *uobject;
  1607. atomic_t usecnt;
  1608. };
  1609. struct ib_mr {
  1610. struct ib_device *device;
  1611. struct ib_pd *pd;
  1612. u32 lkey;
  1613. u32 rkey;
  1614. u64 iova;
  1615. u64 length;
  1616. unsigned int page_size;
  1617. bool need_inval;
  1618. union {
  1619. struct ib_uobject *uobject; /* user */
  1620. struct list_head qp_entry; /* FR */
  1621. };
  1622. struct ib_dm *dm;
  1623. /*
  1624. * Implementation details of the RDMA core, don't use in drivers:
  1625. */
  1626. struct rdma_restrack_entry res;
  1627. };
  1628. struct ib_mw {
  1629. struct ib_device *device;
  1630. struct ib_pd *pd;
  1631. struct ib_uobject *uobject;
  1632. u32 rkey;
  1633. enum ib_mw_type type;
  1634. };
  1635. struct ib_fmr {
  1636. struct ib_device *device;
  1637. struct ib_pd *pd;
  1638. struct list_head list;
  1639. u32 lkey;
  1640. u32 rkey;
  1641. };
  1642. /* Supported steering options */
  1643. enum ib_flow_attr_type {
  1644. /* steering according to rule specifications */
  1645. IB_FLOW_ATTR_NORMAL = 0x0,
  1646. /* default unicast and multicast rule -
  1647. * receive all Eth traffic which isn't steered to any QP
  1648. */
  1649. IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
  1650. /* default multicast rule -
  1651. * receive all Eth multicast traffic which isn't steered to any QP
  1652. */
  1653. IB_FLOW_ATTR_MC_DEFAULT = 0x2,
  1654. /* sniffer rule - receive all port traffic */
  1655. IB_FLOW_ATTR_SNIFFER = 0x3
  1656. };
  1657. /* Supported steering header types */
  1658. enum ib_flow_spec_type {
  1659. /* L2 headers*/
  1660. IB_FLOW_SPEC_ETH = 0x20,
  1661. IB_FLOW_SPEC_IB = 0x22,
  1662. /* L3 header*/
  1663. IB_FLOW_SPEC_IPV4 = 0x30,
  1664. IB_FLOW_SPEC_IPV6 = 0x31,
  1665. IB_FLOW_SPEC_ESP = 0x34,
  1666. /* L4 headers*/
  1667. IB_FLOW_SPEC_TCP = 0x40,
  1668. IB_FLOW_SPEC_UDP = 0x41,
  1669. IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
  1670. IB_FLOW_SPEC_GRE = 0x51,
  1671. IB_FLOW_SPEC_MPLS = 0x60,
  1672. IB_FLOW_SPEC_INNER = 0x100,
  1673. /* Actions */
  1674. IB_FLOW_SPEC_ACTION_TAG = 0x1000,
  1675. IB_FLOW_SPEC_ACTION_DROP = 0x1001,
  1676. IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
  1677. IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
  1678. };
  1679. #define IB_FLOW_SPEC_LAYER_MASK 0xF0
  1680. #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
  1681. /* Flow steering rule priority is set according to it's domain.
  1682. * Lower domain value means higher priority.
  1683. */
  1684. enum ib_flow_domain {
  1685. IB_FLOW_DOMAIN_USER,
  1686. IB_FLOW_DOMAIN_ETHTOOL,
  1687. IB_FLOW_DOMAIN_RFS,
  1688. IB_FLOW_DOMAIN_NIC,
  1689. IB_FLOW_DOMAIN_NUM /* Must be last */
  1690. };
  1691. enum ib_flow_flags {
  1692. IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
  1693. IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
  1694. IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
  1695. };
  1696. struct ib_flow_eth_filter {
  1697. u8 dst_mac[6];
  1698. u8 src_mac[6];
  1699. __be16 ether_type;
  1700. __be16 vlan_tag;
  1701. /* Must be last */
  1702. u8 real_sz[0];
  1703. };
  1704. struct ib_flow_spec_eth {
  1705. u32 type;
  1706. u16 size;
  1707. struct ib_flow_eth_filter val;
  1708. struct ib_flow_eth_filter mask;
  1709. };
  1710. struct ib_flow_ib_filter {
  1711. __be16 dlid;
  1712. __u8 sl;
  1713. /* Must be last */
  1714. u8 real_sz[0];
  1715. };
  1716. struct ib_flow_spec_ib {
  1717. u32 type;
  1718. u16 size;
  1719. struct ib_flow_ib_filter val;
  1720. struct ib_flow_ib_filter mask;
  1721. };
  1722. /* IPv4 header flags */
  1723. enum ib_ipv4_flags {
  1724. IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
  1725. IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
  1726. last have this flag set */
  1727. };
  1728. struct ib_flow_ipv4_filter {
  1729. __be32 src_ip;
  1730. __be32 dst_ip;
  1731. u8 proto;
  1732. u8 tos;
  1733. u8 ttl;
  1734. u8 flags;
  1735. /* Must be last */
  1736. u8 real_sz[0];
  1737. };
  1738. struct ib_flow_spec_ipv4 {
  1739. u32 type;
  1740. u16 size;
  1741. struct ib_flow_ipv4_filter val;
  1742. struct ib_flow_ipv4_filter mask;
  1743. };
  1744. struct ib_flow_ipv6_filter {
  1745. u8 src_ip[16];
  1746. u8 dst_ip[16];
  1747. __be32 flow_label;
  1748. u8 next_hdr;
  1749. u8 traffic_class;
  1750. u8 hop_limit;
  1751. /* Must be last */
  1752. u8 real_sz[0];
  1753. };
  1754. struct ib_flow_spec_ipv6 {
  1755. u32 type;
  1756. u16 size;
  1757. struct ib_flow_ipv6_filter val;
  1758. struct ib_flow_ipv6_filter mask;
  1759. };
  1760. struct ib_flow_tcp_udp_filter {
  1761. __be16 dst_port;
  1762. __be16 src_port;
  1763. /* Must be last */
  1764. u8 real_sz[0];
  1765. };
  1766. struct ib_flow_spec_tcp_udp {
  1767. u32 type;
  1768. u16 size;
  1769. struct ib_flow_tcp_udp_filter val;
  1770. struct ib_flow_tcp_udp_filter mask;
  1771. };
  1772. struct ib_flow_tunnel_filter {
  1773. __be32 tunnel_id;
  1774. u8 real_sz[0];
  1775. };
  1776. /* ib_flow_spec_tunnel describes the Vxlan tunnel
  1777. * the tunnel_id from val has the vni value
  1778. */
  1779. struct ib_flow_spec_tunnel {
  1780. u32 type;
  1781. u16 size;
  1782. struct ib_flow_tunnel_filter val;
  1783. struct ib_flow_tunnel_filter mask;
  1784. };
  1785. struct ib_flow_esp_filter {
  1786. __be32 spi;
  1787. __be32 seq;
  1788. /* Must be last */
  1789. u8 real_sz[0];
  1790. };
  1791. struct ib_flow_spec_esp {
  1792. u32 type;
  1793. u16 size;
  1794. struct ib_flow_esp_filter val;
  1795. struct ib_flow_esp_filter mask;
  1796. };
  1797. struct ib_flow_gre_filter {
  1798. __be16 c_ks_res0_ver;
  1799. __be16 protocol;
  1800. __be32 key;
  1801. /* Must be last */
  1802. u8 real_sz[0];
  1803. };
  1804. struct ib_flow_spec_gre {
  1805. u32 type;
  1806. u16 size;
  1807. struct ib_flow_gre_filter val;
  1808. struct ib_flow_gre_filter mask;
  1809. };
  1810. struct ib_flow_mpls_filter {
  1811. __be32 tag;
  1812. /* Must be last */
  1813. u8 real_sz[0];
  1814. };
  1815. struct ib_flow_spec_mpls {
  1816. u32 type;
  1817. u16 size;
  1818. struct ib_flow_mpls_filter val;
  1819. struct ib_flow_mpls_filter mask;
  1820. };
  1821. struct ib_flow_spec_action_tag {
  1822. enum ib_flow_spec_type type;
  1823. u16 size;
  1824. u32 tag_id;
  1825. };
  1826. struct ib_flow_spec_action_drop {
  1827. enum ib_flow_spec_type type;
  1828. u16 size;
  1829. };
  1830. struct ib_flow_spec_action_handle {
  1831. enum ib_flow_spec_type type;
  1832. u16 size;
  1833. struct ib_flow_action *act;
  1834. };
  1835. enum ib_counters_description {
  1836. IB_COUNTER_PACKETS,
  1837. IB_COUNTER_BYTES,
  1838. };
  1839. struct ib_flow_spec_action_count {
  1840. enum ib_flow_spec_type type;
  1841. u16 size;
  1842. struct ib_counters *counters;
  1843. };
  1844. union ib_flow_spec {
  1845. struct {
  1846. u32 type;
  1847. u16 size;
  1848. };
  1849. struct ib_flow_spec_eth eth;
  1850. struct ib_flow_spec_ib ib;
  1851. struct ib_flow_spec_ipv4 ipv4;
  1852. struct ib_flow_spec_tcp_udp tcp_udp;
  1853. struct ib_flow_spec_ipv6 ipv6;
  1854. struct ib_flow_spec_tunnel tunnel;
  1855. struct ib_flow_spec_esp esp;
  1856. struct ib_flow_spec_gre gre;
  1857. struct ib_flow_spec_mpls mpls;
  1858. struct ib_flow_spec_action_tag flow_tag;
  1859. struct ib_flow_spec_action_drop drop;
  1860. struct ib_flow_spec_action_handle action;
  1861. struct ib_flow_spec_action_count flow_count;
  1862. };
  1863. struct ib_flow_attr {
  1864. enum ib_flow_attr_type type;
  1865. u16 size;
  1866. u16 priority;
  1867. u32 flags;
  1868. u8 num_of_specs;
  1869. u8 port;
  1870. union ib_flow_spec flows[];
  1871. };
  1872. struct ib_flow {
  1873. struct ib_qp *qp;
  1874. struct ib_device *device;
  1875. struct ib_uobject *uobject;
  1876. };
  1877. enum ib_flow_action_type {
  1878. IB_FLOW_ACTION_UNSPECIFIED,
  1879. IB_FLOW_ACTION_ESP = 1,
  1880. };
  1881. struct ib_flow_action_attrs_esp_keymats {
  1882. enum ib_uverbs_flow_action_esp_keymat protocol;
  1883. union {
  1884. struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
  1885. } keymat;
  1886. };
  1887. struct ib_flow_action_attrs_esp_replays {
  1888. enum ib_uverbs_flow_action_esp_replay protocol;
  1889. union {
  1890. struct ib_uverbs_flow_action_esp_replay_bmp bmp;
  1891. } replay;
  1892. };
  1893. enum ib_flow_action_attrs_esp_flags {
  1894. /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
  1895. * This is done in order to share the same flags between user-space and
  1896. * kernel and spare an unnecessary translation.
  1897. */
  1898. /* Kernel flags */
  1899. IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
  1900. IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
  1901. };
  1902. struct ib_flow_spec_list {
  1903. struct ib_flow_spec_list *next;
  1904. union ib_flow_spec spec;
  1905. };
  1906. struct ib_flow_action_attrs_esp {
  1907. struct ib_flow_action_attrs_esp_keymats *keymat;
  1908. struct ib_flow_action_attrs_esp_replays *replay;
  1909. struct ib_flow_spec_list *encap;
  1910. /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
  1911. * Value of 0 is a valid value.
  1912. */
  1913. u32 esn;
  1914. u32 spi;
  1915. u32 seq;
  1916. u32 tfc_pad;
  1917. /* Use enum ib_flow_action_attrs_esp_flags */
  1918. u64 flags;
  1919. u64 hard_limit_pkts;
  1920. };
  1921. struct ib_flow_action {
  1922. struct ib_device *device;
  1923. struct ib_uobject *uobject;
  1924. enum ib_flow_action_type type;
  1925. atomic_t usecnt;
  1926. };
  1927. struct ib_mad_hdr;
  1928. struct ib_grh;
  1929. enum ib_process_mad_flags {
  1930. IB_MAD_IGNORE_MKEY = 1,
  1931. IB_MAD_IGNORE_BKEY = 2,
  1932. IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
  1933. };
  1934. enum ib_mad_result {
  1935. IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
  1936. IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
  1937. IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
  1938. IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
  1939. };
  1940. struct ib_port_cache {
  1941. u64 subnet_prefix;
  1942. struct ib_pkey_cache *pkey;
  1943. struct ib_gid_table *gid;
  1944. u8 lmc;
  1945. enum ib_port_state port_state;
  1946. };
  1947. struct ib_cache {
  1948. rwlock_t lock;
  1949. struct ib_event_handler event_handler;
  1950. struct ib_port_cache *ports;
  1951. };
  1952. struct iw_cm_verbs;
  1953. struct ib_port_immutable {
  1954. int pkey_tbl_len;
  1955. int gid_tbl_len;
  1956. u32 core_cap_flags;
  1957. u32 max_mad_size;
  1958. };
  1959. /* rdma netdev type - specifies protocol type */
  1960. enum rdma_netdev_t {
  1961. RDMA_NETDEV_OPA_VNIC,
  1962. RDMA_NETDEV_IPOIB,
  1963. };
  1964. /**
  1965. * struct rdma_netdev - rdma netdev
  1966. * For cases where netstack interfacing is required.
  1967. */
  1968. struct rdma_netdev {
  1969. void *clnt_priv;
  1970. struct ib_device *hca;
  1971. u8 port_num;
  1972. /*
  1973. * cleanup function must be specified.
  1974. * FIXME: This is only used for OPA_VNIC and that usage should be
  1975. * removed too.
  1976. */
  1977. void (*free_rdma_netdev)(struct net_device *netdev);
  1978. /* control functions */
  1979. void (*set_id)(struct net_device *netdev, int id);
  1980. /* send packet */
  1981. int (*send)(struct net_device *dev, struct sk_buff *skb,
  1982. struct ib_ah *address, u32 dqpn);
  1983. /* multicast */
  1984. int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
  1985. union ib_gid *gid, u16 mlid,
  1986. int set_qkey, u32 qkey);
  1987. int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
  1988. union ib_gid *gid, u16 mlid);
  1989. };
  1990. struct ib_port_pkey_list {
  1991. /* Lock to hold while modifying the list. */
  1992. spinlock_t list_lock;
  1993. struct list_head pkey_list;
  1994. };
  1995. struct ib_counters {
  1996. struct ib_device *device;
  1997. struct ib_uobject *uobject;
  1998. /* num of objects attached */
  1999. atomic_t usecnt;
  2000. };
  2001. struct ib_counters_read_attr {
  2002. u64 *counters_buff;
  2003. u32 ncounters;
  2004. u32 flags; /* use enum ib_read_counters_flags */
  2005. };
  2006. struct uverbs_attr_bundle;
  2007. struct ib_device {
  2008. /* Do not access @dma_device directly from ULP nor from HW drivers. */
  2009. struct device *dma_device;
  2010. char name[IB_DEVICE_NAME_MAX];
  2011. struct list_head event_handler_list;
  2012. spinlock_t event_handler_lock;
  2013. spinlock_t client_data_lock;
  2014. struct list_head core_list;
  2015. /* Access to the client_data_list is protected by the client_data_lock
  2016. * spinlock and the lists_rwsem read-write semaphore */
  2017. struct list_head client_data_list;
  2018. struct ib_cache cache;
  2019. /**
  2020. * port_immutable is indexed by port number
  2021. */
  2022. struct ib_port_immutable *port_immutable;
  2023. int num_comp_vectors;
  2024. struct ib_port_pkey_list *port_pkey_list;
  2025. struct iw_cm_verbs *iwcm;
  2026. /**
  2027. * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
  2028. * driver initialized data. The struct is kfree()'ed by the sysfs
  2029. * core when the device is removed. A lifespan of -1 in the return
  2030. * struct tells the core to set a default lifespan.
  2031. */
  2032. struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
  2033. u8 port_num);
  2034. /**
  2035. * get_hw_stats - Fill in the counter value(s) in the stats struct.
  2036. * @index - The index in the value array we wish to have updated, or
  2037. * num_counters if we want all stats updated
  2038. * Return codes -
  2039. * < 0 - Error, no counters updated
  2040. * index - Updated the single counter pointed to by index
  2041. * num_counters - Updated all counters (will reset the timestamp
  2042. * and prevent further calls for lifespan milliseconds)
  2043. * Drivers are allowed to update all counters in leiu of just the
  2044. * one given in index at their option
  2045. */
  2046. int (*get_hw_stats)(struct ib_device *device,
  2047. struct rdma_hw_stats *stats,
  2048. u8 port, int index);
  2049. int (*query_device)(struct ib_device *device,
  2050. struct ib_device_attr *device_attr,
  2051. struct ib_udata *udata);
  2052. int (*query_port)(struct ib_device *device,
  2053. u8 port_num,
  2054. struct ib_port_attr *port_attr);
  2055. enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
  2056. u8 port_num);
  2057. /* When calling get_netdev, the HW vendor's driver should return the
  2058. * net device of device @device at port @port_num or NULL if such
  2059. * a net device doesn't exist. The vendor driver should call dev_hold
  2060. * on this net device. The HW vendor's device driver must guarantee
  2061. * that this function returns NULL before the net device has finished
  2062. * NETDEV_UNREGISTER state.
  2063. */
  2064. struct net_device *(*get_netdev)(struct ib_device *device,
  2065. u8 port_num);
  2066. /* query_gid should be return GID value for @device, when @port_num
  2067. * link layer is either IB or iWarp. It is no-op if @port_num port
  2068. * is RoCE link layer.
  2069. */
  2070. int (*query_gid)(struct ib_device *device,
  2071. u8 port_num, int index,
  2072. union ib_gid *gid);
  2073. /* When calling add_gid, the HW vendor's driver should add the gid
  2074. * of device of port at gid index available at @attr. Meta-info of
  2075. * that gid (for example, the network device related to this gid) is
  2076. * available at @attr. @context allows the HW vendor driver to store
  2077. * extra information together with a GID entry. The HW vendor driver may
  2078. * allocate memory to contain this information and store it in @context
  2079. * when a new GID entry is written to. Params are consistent until the
  2080. * next call of add_gid or delete_gid. The function should return 0 on
  2081. * success or error otherwise. The function could be called
  2082. * concurrently for different ports. This function is only called when
  2083. * roce_gid_table is used.
  2084. */
  2085. int (*add_gid)(const struct ib_gid_attr *attr,
  2086. void **context);
  2087. /* When calling del_gid, the HW vendor's driver should delete the
  2088. * gid of device @device at gid index gid_index of port port_num
  2089. * available in @attr.
  2090. * Upon the deletion of a GID entry, the HW vendor must free any
  2091. * allocated memory. The caller will clear @context afterwards.
  2092. * This function is only called when roce_gid_table is used.
  2093. */
  2094. int (*del_gid)(const struct ib_gid_attr *attr,
  2095. void **context);
  2096. int (*query_pkey)(struct ib_device *device,
  2097. u8 port_num, u16 index, u16 *pkey);
  2098. int (*modify_device)(struct ib_device *device,
  2099. int device_modify_mask,
  2100. struct ib_device_modify *device_modify);
  2101. int (*modify_port)(struct ib_device *device,
  2102. u8 port_num, int port_modify_mask,
  2103. struct ib_port_modify *port_modify);
  2104. struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
  2105. struct ib_udata *udata);
  2106. int (*dealloc_ucontext)(struct ib_ucontext *context);
  2107. int (*mmap)(struct ib_ucontext *context,
  2108. struct vm_area_struct *vma);
  2109. struct ib_pd * (*alloc_pd)(struct ib_device *device,
  2110. struct ib_ucontext *context,
  2111. struct ib_udata *udata);
  2112. int (*dealloc_pd)(struct ib_pd *pd);
  2113. struct ib_ah * (*create_ah)(struct ib_pd *pd,
  2114. struct rdma_ah_attr *ah_attr,
  2115. struct ib_udata *udata);
  2116. int (*modify_ah)(struct ib_ah *ah,
  2117. struct rdma_ah_attr *ah_attr);
  2118. int (*query_ah)(struct ib_ah *ah,
  2119. struct rdma_ah_attr *ah_attr);
  2120. int (*destroy_ah)(struct ib_ah *ah);
  2121. struct ib_srq * (*create_srq)(struct ib_pd *pd,
  2122. struct ib_srq_init_attr *srq_init_attr,
  2123. struct ib_udata *udata);
  2124. int (*modify_srq)(struct ib_srq *srq,
  2125. struct ib_srq_attr *srq_attr,
  2126. enum ib_srq_attr_mask srq_attr_mask,
  2127. struct ib_udata *udata);
  2128. int (*query_srq)(struct ib_srq *srq,
  2129. struct ib_srq_attr *srq_attr);
  2130. int (*destroy_srq)(struct ib_srq *srq);
  2131. int (*post_srq_recv)(struct ib_srq *srq,
  2132. const struct ib_recv_wr *recv_wr,
  2133. const struct ib_recv_wr **bad_recv_wr);
  2134. struct ib_qp * (*create_qp)(struct ib_pd *pd,
  2135. struct ib_qp_init_attr *qp_init_attr,
  2136. struct ib_udata *udata);
  2137. int (*modify_qp)(struct ib_qp *qp,
  2138. struct ib_qp_attr *qp_attr,
  2139. int qp_attr_mask,
  2140. struct ib_udata *udata);
  2141. int (*query_qp)(struct ib_qp *qp,
  2142. struct ib_qp_attr *qp_attr,
  2143. int qp_attr_mask,
  2144. struct ib_qp_init_attr *qp_init_attr);
  2145. int (*destroy_qp)(struct ib_qp *qp);
  2146. int (*post_send)(struct ib_qp *qp,
  2147. const struct ib_send_wr *send_wr,
  2148. const struct ib_send_wr **bad_send_wr);
  2149. int (*post_recv)(struct ib_qp *qp,
  2150. const struct ib_recv_wr *recv_wr,
  2151. const struct ib_recv_wr **bad_recv_wr);
  2152. struct ib_cq * (*create_cq)(struct ib_device *device,
  2153. const struct ib_cq_init_attr *attr,
  2154. struct ib_ucontext *context,
  2155. struct ib_udata *udata);
  2156. int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
  2157. u16 cq_period);
  2158. int (*destroy_cq)(struct ib_cq *cq);
  2159. int (*resize_cq)(struct ib_cq *cq, int cqe,
  2160. struct ib_udata *udata);
  2161. int (*poll_cq)(struct ib_cq *cq, int num_entries,
  2162. struct ib_wc *wc);
  2163. int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
  2164. int (*req_notify_cq)(struct ib_cq *cq,
  2165. enum ib_cq_notify_flags flags);
  2166. int (*req_ncomp_notif)(struct ib_cq *cq,
  2167. int wc_cnt);
  2168. struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
  2169. int mr_access_flags);
  2170. struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
  2171. u64 start, u64 length,
  2172. u64 virt_addr,
  2173. int mr_access_flags,
  2174. struct ib_udata *udata);
  2175. int (*rereg_user_mr)(struct ib_mr *mr,
  2176. int flags,
  2177. u64 start, u64 length,
  2178. u64 virt_addr,
  2179. int mr_access_flags,
  2180. struct ib_pd *pd,
  2181. struct ib_udata *udata);
  2182. int (*dereg_mr)(struct ib_mr *mr);
  2183. struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
  2184. enum ib_mr_type mr_type,
  2185. u32 max_num_sg);
  2186. int (*map_mr_sg)(struct ib_mr *mr,
  2187. struct scatterlist *sg,
  2188. int sg_nents,
  2189. unsigned int *sg_offset);
  2190. struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
  2191. enum ib_mw_type type,
  2192. struct ib_udata *udata);
  2193. int (*dealloc_mw)(struct ib_mw *mw);
  2194. struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
  2195. int mr_access_flags,
  2196. struct ib_fmr_attr *fmr_attr);
  2197. int (*map_phys_fmr)(struct ib_fmr *fmr,
  2198. u64 *page_list, int list_len,
  2199. u64 iova);
  2200. int (*unmap_fmr)(struct list_head *fmr_list);
  2201. int (*dealloc_fmr)(struct ib_fmr *fmr);
  2202. int (*attach_mcast)(struct ib_qp *qp,
  2203. union ib_gid *gid,
  2204. u16 lid);
  2205. int (*detach_mcast)(struct ib_qp *qp,
  2206. union ib_gid *gid,
  2207. u16 lid);
  2208. int (*process_mad)(struct ib_device *device,
  2209. int process_mad_flags,
  2210. u8 port_num,
  2211. const struct ib_wc *in_wc,
  2212. const struct ib_grh *in_grh,
  2213. const struct ib_mad_hdr *in_mad,
  2214. size_t in_mad_size,
  2215. struct ib_mad_hdr *out_mad,
  2216. size_t *out_mad_size,
  2217. u16 *out_mad_pkey_index);
  2218. struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
  2219. struct ib_ucontext *ucontext,
  2220. struct ib_udata *udata);
  2221. int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
  2222. struct ib_flow * (*create_flow)(struct ib_qp *qp,
  2223. struct ib_flow_attr
  2224. *flow_attr,
  2225. int domain,
  2226. struct ib_udata *udata);
  2227. int (*destroy_flow)(struct ib_flow *flow_id);
  2228. int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
  2229. struct ib_mr_status *mr_status);
  2230. void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
  2231. void (*drain_rq)(struct ib_qp *qp);
  2232. void (*drain_sq)(struct ib_qp *qp);
  2233. int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
  2234. int state);
  2235. int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
  2236. struct ifla_vf_info *ivf);
  2237. int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
  2238. struct ifla_vf_stats *stats);
  2239. int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
  2240. int type);
  2241. struct ib_wq * (*create_wq)(struct ib_pd *pd,
  2242. struct ib_wq_init_attr *init_attr,
  2243. struct ib_udata *udata);
  2244. int (*destroy_wq)(struct ib_wq *wq);
  2245. int (*modify_wq)(struct ib_wq *wq,
  2246. struct ib_wq_attr *attr,
  2247. u32 wq_attr_mask,
  2248. struct ib_udata *udata);
  2249. struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
  2250. struct ib_rwq_ind_table_init_attr *init_attr,
  2251. struct ib_udata *udata);
  2252. int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
  2253. struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
  2254. const struct ib_flow_action_attrs_esp *attr,
  2255. struct uverbs_attr_bundle *attrs);
  2256. int (*destroy_flow_action)(struct ib_flow_action *action);
  2257. int (*modify_flow_action_esp)(struct ib_flow_action *action,
  2258. const struct ib_flow_action_attrs_esp *attr,
  2259. struct uverbs_attr_bundle *attrs);
  2260. struct ib_dm * (*alloc_dm)(struct ib_device *device,
  2261. struct ib_ucontext *context,
  2262. struct ib_dm_alloc_attr *attr,
  2263. struct uverbs_attr_bundle *attrs);
  2264. int (*dealloc_dm)(struct ib_dm *dm);
  2265. struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
  2266. struct ib_dm_mr_attr *attr,
  2267. struct uverbs_attr_bundle *attrs);
  2268. struct ib_counters * (*create_counters)(struct ib_device *device,
  2269. struct uverbs_attr_bundle *attrs);
  2270. int (*destroy_counters)(struct ib_counters *counters);
  2271. int (*read_counters)(struct ib_counters *counters,
  2272. struct ib_counters_read_attr *counters_read_attr,
  2273. struct uverbs_attr_bundle *attrs);
  2274. /**
  2275. * rdma netdev operation
  2276. *
  2277. * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
  2278. * doesn't support the specified rdma netdev type.
  2279. */
  2280. struct net_device *(*alloc_rdma_netdev)(
  2281. struct ib_device *device,
  2282. u8 port_num,
  2283. enum rdma_netdev_t type,
  2284. const char *name,
  2285. unsigned char name_assign_type,
  2286. void (*setup)(struct net_device *));
  2287. struct module *owner;
  2288. struct device dev;
  2289. struct kobject *ports_parent;
  2290. struct list_head port_list;
  2291. enum {
  2292. IB_DEV_UNINITIALIZED,
  2293. IB_DEV_REGISTERED,
  2294. IB_DEV_UNREGISTERED
  2295. } reg_state;
  2296. int uverbs_abi_ver;
  2297. u64 uverbs_cmd_mask;
  2298. u64 uverbs_ex_cmd_mask;
  2299. char node_desc[IB_DEVICE_NODE_DESC_MAX];
  2300. __be64 node_guid;
  2301. u32 local_dma_lkey;
  2302. u16 is_switch:1;
  2303. u8 node_type;
  2304. u8 phys_port_cnt;
  2305. struct ib_device_attr attrs;
  2306. struct attribute_group *hw_stats_ag;
  2307. struct rdma_hw_stats *hw_stats;
  2308. #ifdef CONFIG_CGROUP_RDMA
  2309. struct rdmacg_device cg_device;
  2310. #endif
  2311. u32 index;
  2312. /*
  2313. * Implementation details of the RDMA core, don't use in drivers
  2314. */
  2315. struct rdma_restrack_root res;
  2316. /**
  2317. * The following mandatory functions are used only at device
  2318. * registration. Keep functions such as these at the end of this
  2319. * structure to avoid cache line misses when accessing struct ib_device
  2320. * in fast paths.
  2321. */
  2322. int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
  2323. void (*get_dev_fw_str)(struct ib_device *, char *str);
  2324. const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
  2325. int comp_vector);
  2326. const struct uverbs_object_tree_def *const *driver_specs;
  2327. enum rdma_driver_id driver_id;
  2328. };
  2329. struct ib_client {
  2330. char *name;
  2331. void (*add) (struct ib_device *);
  2332. void (*remove)(struct ib_device *, void *client_data);
  2333. /* Returns the net_dev belonging to this ib_client and matching the
  2334. * given parameters.
  2335. * @dev: An RDMA device that the net_dev use for communication.
  2336. * @port: A physical port number on the RDMA device.
  2337. * @pkey: P_Key that the net_dev uses if applicable.
  2338. * @gid: A GID that the net_dev uses to communicate.
  2339. * @addr: An IP address the net_dev is configured with.
  2340. * @client_data: The device's client data set by ib_set_client_data().
  2341. *
  2342. * An ib_client that implements a net_dev on top of RDMA devices
  2343. * (such as IP over IB) should implement this callback, allowing the
  2344. * rdma_cm module to find the right net_dev for a given request.
  2345. *
  2346. * The caller is responsible for calling dev_put on the returned
  2347. * netdev. */
  2348. struct net_device *(*get_net_dev_by_params)(
  2349. struct ib_device *dev,
  2350. u8 port,
  2351. u16 pkey,
  2352. const union ib_gid *gid,
  2353. const struct sockaddr *addr,
  2354. void *client_data);
  2355. struct list_head list;
  2356. };
  2357. struct ib_device *ib_alloc_device(size_t size);
  2358. void ib_dealloc_device(struct ib_device *device);
  2359. void ib_get_device_fw_str(struct ib_device *device, char *str);
  2360. int ib_register_device(struct ib_device *device,
  2361. int (*port_callback)(struct ib_device *,
  2362. u8, struct kobject *));
  2363. void ib_unregister_device(struct ib_device *device);
  2364. int ib_register_client (struct ib_client *client);
  2365. void ib_unregister_client(struct ib_client *client);
  2366. void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
  2367. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  2368. void *data);
  2369. static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
  2370. {
  2371. return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
  2372. }
  2373. static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
  2374. {
  2375. return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
  2376. }
  2377. static inline bool ib_is_buffer_cleared(const void __user *p,
  2378. size_t len)
  2379. {
  2380. bool ret;
  2381. u8 *buf;
  2382. if (len > USHRT_MAX)
  2383. return false;
  2384. buf = memdup_user(p, len);
  2385. if (IS_ERR(buf))
  2386. return false;
  2387. ret = !memchr_inv(buf, 0, len);
  2388. kfree(buf);
  2389. return ret;
  2390. }
  2391. static inline bool ib_is_udata_cleared(struct ib_udata *udata,
  2392. size_t offset,
  2393. size_t len)
  2394. {
  2395. return ib_is_buffer_cleared(udata->inbuf + offset, len);
  2396. }
  2397. /**
  2398. * ib_is_destroy_retryable - Check whether the uobject destruction
  2399. * is retryable.
  2400. * @ret: The initial destruction return code
  2401. * @why: remove reason
  2402. * @uobj: The uobject that is destroyed
  2403. *
  2404. * This function is a helper function that IB layer and low-level drivers
  2405. * can use to consider whether the destruction of the given uobject is
  2406. * retry-able.
  2407. * It checks the original return code, if it wasn't success the destruction
  2408. * is retryable according to the ucontext state (i.e. cleanup_retryable) and
  2409. * the remove reason. (i.e. why).
  2410. * Must be called with the object locked for destroy.
  2411. */
  2412. static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
  2413. struct ib_uobject *uobj)
  2414. {
  2415. return ret && (why == RDMA_REMOVE_DESTROY ||
  2416. uobj->context->cleanup_retryable);
  2417. }
  2418. /**
  2419. * ib_destroy_usecnt - Called during destruction to check the usecnt
  2420. * @usecnt: The usecnt atomic
  2421. * @why: remove reason
  2422. * @uobj: The uobject that is destroyed
  2423. *
  2424. * Non-zero usecnts will block destruction unless destruction was triggered by
  2425. * a ucontext cleanup.
  2426. */
  2427. static inline int ib_destroy_usecnt(atomic_t *usecnt,
  2428. enum rdma_remove_reason why,
  2429. struct ib_uobject *uobj)
  2430. {
  2431. if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
  2432. return -EBUSY;
  2433. return 0;
  2434. }
  2435. /**
  2436. * ib_modify_qp_is_ok - Check that the supplied attribute mask
  2437. * contains all required attributes and no attributes not allowed for
  2438. * the given QP state transition.
  2439. * @cur_state: Current QP state
  2440. * @next_state: Next QP state
  2441. * @type: QP type
  2442. * @mask: Mask of supplied QP attributes
  2443. * @ll : link layer of port
  2444. *
  2445. * This function is a helper function that a low-level driver's
  2446. * modify_qp method can use to validate the consumer's input. It
  2447. * checks that cur_state and next_state are valid QP states, that a
  2448. * transition from cur_state to next_state is allowed by the IB spec,
  2449. * and that the attribute mask supplied is allowed for the transition.
  2450. */
  2451. bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  2452. enum ib_qp_type type, enum ib_qp_attr_mask mask,
  2453. enum rdma_link_layer ll);
  2454. void ib_register_event_handler(struct ib_event_handler *event_handler);
  2455. void ib_unregister_event_handler(struct ib_event_handler *event_handler);
  2456. void ib_dispatch_event(struct ib_event *event);
  2457. int ib_query_port(struct ib_device *device,
  2458. u8 port_num, struct ib_port_attr *port_attr);
  2459. enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
  2460. u8 port_num);
  2461. /**
  2462. * rdma_cap_ib_switch - Check if the device is IB switch
  2463. * @device: Device to check
  2464. *
  2465. * Device driver is responsible for setting is_switch bit on
  2466. * in ib_device structure at init time.
  2467. *
  2468. * Return: true if the device is IB switch.
  2469. */
  2470. static inline bool rdma_cap_ib_switch(const struct ib_device *device)
  2471. {
  2472. return device->is_switch;
  2473. }
  2474. /**
  2475. * rdma_start_port - Return the first valid port number for the device
  2476. * specified
  2477. *
  2478. * @device: Device to be checked
  2479. *
  2480. * Return start port number
  2481. */
  2482. static inline u8 rdma_start_port(const struct ib_device *device)
  2483. {
  2484. return rdma_cap_ib_switch(device) ? 0 : 1;
  2485. }
  2486. /**
  2487. * rdma_end_port - Return the last valid port number for the device
  2488. * specified
  2489. *
  2490. * @device: Device to be checked
  2491. *
  2492. * Return last port number
  2493. */
  2494. static inline u8 rdma_end_port(const struct ib_device *device)
  2495. {
  2496. return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
  2497. }
  2498. static inline int rdma_is_port_valid(const struct ib_device *device,
  2499. unsigned int port)
  2500. {
  2501. return (port >= rdma_start_port(device) &&
  2502. port <= rdma_end_port(device));
  2503. }
  2504. static inline bool rdma_is_grh_required(const struct ib_device *device,
  2505. u8 port_num)
  2506. {
  2507. return device->port_immutable[port_num].core_cap_flags &
  2508. RDMA_CORE_PORT_IB_GRH_REQUIRED;
  2509. }
  2510. static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
  2511. {
  2512. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
  2513. }
  2514. static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
  2515. {
  2516. return device->port_immutable[port_num].core_cap_flags &
  2517. (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
  2518. }
  2519. static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
  2520. {
  2521. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
  2522. }
  2523. static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
  2524. {
  2525. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
  2526. }
  2527. static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
  2528. {
  2529. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
  2530. }
  2531. static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
  2532. {
  2533. return rdma_protocol_ib(device, port_num) ||
  2534. rdma_protocol_roce(device, port_num);
  2535. }
  2536. static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
  2537. {
  2538. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
  2539. }
  2540. static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
  2541. {
  2542. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
  2543. }
  2544. /**
  2545. * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
  2546. * Management Datagrams.
  2547. * @device: Device to check
  2548. * @port_num: Port number to check
  2549. *
  2550. * Management Datagrams (MAD) are a required part of the InfiniBand
  2551. * specification and are supported on all InfiniBand devices. A slightly
  2552. * extended version are also supported on OPA interfaces.
  2553. *
  2554. * Return: true if the port supports sending/receiving of MAD packets.
  2555. */
  2556. static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
  2557. {
  2558. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
  2559. }
  2560. /**
  2561. * rdma_cap_opa_mad - Check if the port of device provides support for OPA
  2562. * Management Datagrams.
  2563. * @device: Device to check
  2564. * @port_num: Port number to check
  2565. *
  2566. * Intel OmniPath devices extend and/or replace the InfiniBand Management
  2567. * datagrams with their own versions. These OPA MADs share many but not all of
  2568. * the characteristics of InfiniBand MADs.
  2569. *
  2570. * OPA MADs differ in the following ways:
  2571. *
  2572. * 1) MADs are variable size up to 2K
  2573. * IBTA defined MADs remain fixed at 256 bytes
  2574. * 2) OPA SMPs must carry valid PKeys
  2575. * 3) OPA SMP packets are a different format
  2576. *
  2577. * Return: true if the port supports OPA MAD packet formats.
  2578. */
  2579. static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
  2580. {
  2581. return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
  2582. == RDMA_CORE_CAP_OPA_MAD;
  2583. }
  2584. /**
  2585. * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
  2586. * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
  2587. * @device: Device to check
  2588. * @port_num: Port number to check
  2589. *
  2590. * Each InfiniBand node is required to provide a Subnet Management Agent
  2591. * that the subnet manager can access. Prior to the fabric being fully
  2592. * configured by the subnet manager, the SMA is accessed via a well known
  2593. * interface called the Subnet Management Interface (SMI). This interface
  2594. * uses directed route packets to communicate with the SM to get around the
  2595. * chicken and egg problem of the SM needing to know what's on the fabric
  2596. * in order to configure the fabric, and needing to configure the fabric in
  2597. * order to send packets to the devices on the fabric. These directed
  2598. * route packets do not need the fabric fully configured in order to reach
  2599. * their destination. The SMI is the only method allowed to send
  2600. * directed route packets on an InfiniBand fabric.
  2601. *
  2602. * Return: true if the port provides an SMI.
  2603. */
  2604. static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
  2605. {
  2606. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
  2607. }
  2608. /**
  2609. * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
  2610. * Communication Manager.
  2611. * @device: Device to check
  2612. * @port_num: Port number to check
  2613. *
  2614. * The InfiniBand Communication Manager is one of many pre-defined General
  2615. * Service Agents (GSA) that are accessed via the General Service
  2616. * Interface (GSI). It's role is to facilitate establishment of connections
  2617. * between nodes as well as other management related tasks for established
  2618. * connections.
  2619. *
  2620. * Return: true if the port supports an IB CM (this does not guarantee that
  2621. * a CM is actually running however).
  2622. */
  2623. static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
  2624. {
  2625. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
  2626. }
  2627. /**
  2628. * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
  2629. * Communication Manager.
  2630. * @device: Device to check
  2631. * @port_num: Port number to check
  2632. *
  2633. * Similar to above, but specific to iWARP connections which have a different
  2634. * managment protocol than InfiniBand.
  2635. *
  2636. * Return: true if the port supports an iWARP CM (this does not guarantee that
  2637. * a CM is actually running however).
  2638. */
  2639. static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
  2640. {
  2641. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
  2642. }
  2643. /**
  2644. * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
  2645. * Subnet Administration.
  2646. * @device: Device to check
  2647. * @port_num: Port number to check
  2648. *
  2649. * An InfiniBand Subnet Administration (SA) service is a pre-defined General
  2650. * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
  2651. * fabrics, devices should resolve routes to other hosts by contacting the
  2652. * SA to query the proper route.
  2653. *
  2654. * Return: true if the port should act as a client to the fabric Subnet
  2655. * Administration interface. This does not imply that the SA service is
  2656. * running locally.
  2657. */
  2658. static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
  2659. {
  2660. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
  2661. }
  2662. /**
  2663. * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
  2664. * Multicast.
  2665. * @device: Device to check
  2666. * @port_num: Port number to check
  2667. *
  2668. * InfiniBand multicast registration is more complex than normal IPv4 or
  2669. * IPv6 multicast registration. Each Host Channel Adapter must register
  2670. * with the Subnet Manager when it wishes to join a multicast group. It
  2671. * should do so only once regardless of how many queue pairs it subscribes
  2672. * to this group. And it should leave the group only after all queue pairs
  2673. * attached to the group have been detached.
  2674. *
  2675. * Return: true if the port must undertake the additional adminstrative
  2676. * overhead of registering/unregistering with the SM and tracking of the
  2677. * total number of queue pairs attached to the multicast group.
  2678. */
  2679. static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
  2680. {
  2681. return rdma_cap_ib_sa(device, port_num);
  2682. }
  2683. /**
  2684. * rdma_cap_af_ib - Check if the port of device has the capability
  2685. * Native Infiniband Address.
  2686. * @device: Device to check
  2687. * @port_num: Port number to check
  2688. *
  2689. * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
  2690. * GID. RoCE uses a different mechanism, but still generates a GID via
  2691. * a prescribed mechanism and port specific data.
  2692. *
  2693. * Return: true if the port uses a GID address to identify devices on the
  2694. * network.
  2695. */
  2696. static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
  2697. {
  2698. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
  2699. }
  2700. /**
  2701. * rdma_cap_eth_ah - Check if the port of device has the capability
  2702. * Ethernet Address Handle.
  2703. * @device: Device to check
  2704. * @port_num: Port number to check
  2705. *
  2706. * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
  2707. * to fabricate GIDs over Ethernet/IP specific addresses native to the
  2708. * port. Normally, packet headers are generated by the sending host
  2709. * adapter, but when sending connectionless datagrams, we must manually
  2710. * inject the proper headers for the fabric we are communicating over.
  2711. *
  2712. * Return: true if we are running as a RoCE port and must force the
  2713. * addition of a Global Route Header built from our Ethernet Address
  2714. * Handle into our header list for connectionless packets.
  2715. */
  2716. static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
  2717. {
  2718. return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
  2719. }
  2720. /**
  2721. * rdma_cap_opa_ah - Check if the port of device supports
  2722. * OPA Address handles
  2723. * @device: Device to check
  2724. * @port_num: Port number to check
  2725. *
  2726. * Return: true if we are running on an OPA device which supports
  2727. * the extended OPA addressing.
  2728. */
  2729. static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
  2730. {
  2731. return (device->port_immutable[port_num].core_cap_flags &
  2732. RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
  2733. }
  2734. /**
  2735. * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
  2736. *
  2737. * @device: Device
  2738. * @port_num: Port number
  2739. *
  2740. * This MAD size includes the MAD headers and MAD payload. No other headers
  2741. * are included.
  2742. *
  2743. * Return the max MAD size required by the Port. Will return 0 if the port
  2744. * does not support MADs
  2745. */
  2746. static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
  2747. {
  2748. return device->port_immutable[port_num].max_mad_size;
  2749. }
  2750. /**
  2751. * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
  2752. * @device: Device to check
  2753. * @port_num: Port number to check
  2754. *
  2755. * RoCE GID table mechanism manages the various GIDs for a device.
  2756. *
  2757. * NOTE: if allocating the port's GID table has failed, this call will still
  2758. * return true, but any RoCE GID table API will fail.
  2759. *
  2760. * Return: true if the port uses RoCE GID table mechanism in order to manage
  2761. * its GIDs.
  2762. */
  2763. static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
  2764. u8 port_num)
  2765. {
  2766. return rdma_protocol_roce(device, port_num) &&
  2767. device->add_gid && device->del_gid;
  2768. }
  2769. /*
  2770. * Check if the device supports READ W/ INVALIDATE.
  2771. */
  2772. static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
  2773. {
  2774. /*
  2775. * iWarp drivers must support READ W/ INVALIDATE. No other protocol
  2776. * has support for it yet.
  2777. */
  2778. return rdma_protocol_iwarp(dev, port_num);
  2779. }
  2780. int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
  2781. int state);
  2782. int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
  2783. struct ifla_vf_info *info);
  2784. int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
  2785. struct ifla_vf_stats *stats);
  2786. int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
  2787. int type);
  2788. int ib_query_pkey(struct ib_device *device,
  2789. u8 port_num, u16 index, u16 *pkey);
  2790. int ib_modify_device(struct ib_device *device,
  2791. int device_modify_mask,
  2792. struct ib_device_modify *device_modify);
  2793. int ib_modify_port(struct ib_device *device,
  2794. u8 port_num, int port_modify_mask,
  2795. struct ib_port_modify *port_modify);
  2796. int ib_find_gid(struct ib_device *device, union ib_gid *gid,
  2797. u8 *port_num, u16 *index);
  2798. int ib_find_pkey(struct ib_device *device,
  2799. u8 port_num, u16 pkey, u16 *index);
  2800. enum ib_pd_flags {
  2801. /*
  2802. * Create a memory registration for all memory in the system and place
  2803. * the rkey for it into pd->unsafe_global_rkey. This can be used by
  2804. * ULPs to avoid the overhead of dynamic MRs.
  2805. *
  2806. * This flag is generally considered unsafe and must only be used in
  2807. * extremly trusted environments. Every use of it will log a warning
  2808. * in the kernel log.
  2809. */
  2810. IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
  2811. };
  2812. struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
  2813. const char *caller);
  2814. #define ib_alloc_pd(device, flags) \
  2815. __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
  2816. void ib_dealloc_pd(struct ib_pd *pd);
  2817. /**
  2818. * rdma_create_ah - Creates an address handle for the given address vector.
  2819. * @pd: The protection domain associated with the address handle.
  2820. * @ah_attr: The attributes of the address vector.
  2821. *
  2822. * The address handle is used to reference a local or global destination
  2823. * in all UD QP post sends.
  2824. */
  2825. struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
  2826. /**
  2827. * rdma_create_user_ah - Creates an address handle for the given address vector.
  2828. * It resolves destination mac address for ah attribute of RoCE type.
  2829. * @pd: The protection domain associated with the address handle.
  2830. * @ah_attr: The attributes of the address vector.
  2831. * @udata: pointer to user's input output buffer information need by
  2832. * provider driver.
  2833. *
  2834. * It returns 0 on success and returns appropriate error code on error.
  2835. * The address handle is used to reference a local or global destination
  2836. * in all UD QP post sends.
  2837. */
  2838. struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
  2839. struct rdma_ah_attr *ah_attr,
  2840. struct ib_udata *udata);
  2841. /**
  2842. * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
  2843. * work completion.
  2844. * @hdr: the L3 header to parse
  2845. * @net_type: type of header to parse
  2846. * @sgid: place to store source gid
  2847. * @dgid: place to store destination gid
  2848. */
  2849. int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
  2850. enum rdma_network_type net_type,
  2851. union ib_gid *sgid, union ib_gid *dgid);
  2852. /**
  2853. * ib_get_rdma_header_version - Get the header version
  2854. * @hdr: the L3 header to parse
  2855. */
  2856. int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
  2857. /**
  2858. * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
  2859. * work completion.
  2860. * @device: Device on which the received message arrived.
  2861. * @port_num: Port on which the received message arrived.
  2862. * @wc: Work completion associated with the received message.
  2863. * @grh: References the received global route header. This parameter is
  2864. * ignored unless the work completion indicates that the GRH is valid.
  2865. * @ah_attr: Returned attributes that can be used when creating an address
  2866. * handle for replying to the message.
  2867. * When ib_init_ah_attr_from_wc() returns success,
  2868. * (a) for IB link layer it optionally contains a reference to SGID attribute
  2869. * when GRH is present for IB link layer.
  2870. * (b) for RoCE link layer it contains a reference to SGID attribute.
  2871. * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
  2872. * attributes which are initialized using ib_init_ah_attr_from_wc().
  2873. *
  2874. */
  2875. int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
  2876. const struct ib_wc *wc, const struct ib_grh *grh,
  2877. struct rdma_ah_attr *ah_attr);
  2878. /**
  2879. * ib_create_ah_from_wc - Creates an address handle associated with the
  2880. * sender of the specified work completion.
  2881. * @pd: The protection domain associated with the address handle.
  2882. * @wc: Work completion information associated with a received message.
  2883. * @grh: References the received global route header. This parameter is
  2884. * ignored unless the work completion indicates that the GRH is valid.
  2885. * @port_num: The outbound port number to associate with the address.
  2886. *
  2887. * The address handle is used to reference a local or global destination
  2888. * in all UD QP post sends.
  2889. */
  2890. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
  2891. const struct ib_grh *grh, u8 port_num);
  2892. /**
  2893. * rdma_modify_ah - Modifies the address vector associated with an address
  2894. * handle.
  2895. * @ah: The address handle to modify.
  2896. * @ah_attr: The new address vector attributes to associate with the
  2897. * address handle.
  2898. */
  2899. int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
  2900. /**
  2901. * rdma_query_ah - Queries the address vector associated with an address
  2902. * handle.
  2903. * @ah: The address handle to query.
  2904. * @ah_attr: The address vector attributes associated with the address
  2905. * handle.
  2906. */
  2907. int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
  2908. /**
  2909. * rdma_destroy_ah - Destroys an address handle.
  2910. * @ah: The address handle to destroy.
  2911. */
  2912. int rdma_destroy_ah(struct ib_ah *ah);
  2913. /**
  2914. * ib_create_srq - Creates a SRQ associated with the specified protection
  2915. * domain.
  2916. * @pd: The protection domain associated with the SRQ.
  2917. * @srq_init_attr: A list of initial attributes required to create the
  2918. * SRQ. If SRQ creation succeeds, then the attributes are updated to
  2919. * the actual capabilities of the created SRQ.
  2920. *
  2921. * srq_attr->max_wr and srq_attr->max_sge are read the determine the
  2922. * requested size of the SRQ, and set to the actual values allocated
  2923. * on return. If ib_create_srq() succeeds, then max_wr and max_sge
  2924. * will always be at least as large as the requested values.
  2925. */
  2926. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  2927. struct ib_srq_init_attr *srq_init_attr);
  2928. /**
  2929. * ib_modify_srq - Modifies the attributes for the specified SRQ.
  2930. * @srq: The SRQ to modify.
  2931. * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
  2932. * the current values of selected SRQ attributes are returned.
  2933. * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
  2934. * are being modified.
  2935. *
  2936. * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
  2937. * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
  2938. * the number of receives queued drops below the limit.
  2939. */
  2940. int ib_modify_srq(struct ib_srq *srq,
  2941. struct ib_srq_attr *srq_attr,
  2942. enum ib_srq_attr_mask srq_attr_mask);
  2943. /**
  2944. * ib_query_srq - Returns the attribute list and current values for the
  2945. * specified SRQ.
  2946. * @srq: The SRQ to query.
  2947. * @srq_attr: The attributes of the specified SRQ.
  2948. */
  2949. int ib_query_srq(struct ib_srq *srq,
  2950. struct ib_srq_attr *srq_attr);
  2951. /**
  2952. * ib_destroy_srq - Destroys the specified SRQ.
  2953. * @srq: The SRQ to destroy.
  2954. */
  2955. int ib_destroy_srq(struct ib_srq *srq);
  2956. /**
  2957. * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
  2958. * @srq: The SRQ to post the work request on.
  2959. * @recv_wr: A list of work requests to post on the receive queue.
  2960. * @bad_recv_wr: On an immediate failure, this parameter will reference
  2961. * the work request that failed to be posted on the QP.
  2962. */
  2963. static inline int ib_post_srq_recv(struct ib_srq *srq,
  2964. const struct ib_recv_wr *recv_wr,
  2965. const struct ib_recv_wr **bad_recv_wr)
  2966. {
  2967. const struct ib_recv_wr *dummy;
  2968. return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
  2969. }
  2970. /**
  2971. * ib_create_qp - Creates a QP associated with the specified protection
  2972. * domain.
  2973. * @pd: The protection domain associated with the QP.
  2974. * @qp_init_attr: A list of initial attributes required to create the
  2975. * QP. If QP creation succeeds, then the attributes are updated to
  2976. * the actual capabilities of the created QP.
  2977. */
  2978. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  2979. struct ib_qp_init_attr *qp_init_attr);
  2980. /**
  2981. * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
  2982. * @qp: The QP to modify.
  2983. * @attr: On input, specifies the QP attributes to modify. On output,
  2984. * the current values of selected QP attributes are returned.
  2985. * @attr_mask: A bit-mask used to specify which attributes of the QP
  2986. * are being modified.
  2987. * @udata: pointer to user's input output buffer information
  2988. * are being modified.
  2989. * It returns 0 on success and returns appropriate error code on error.
  2990. */
  2991. int ib_modify_qp_with_udata(struct ib_qp *qp,
  2992. struct ib_qp_attr *attr,
  2993. int attr_mask,
  2994. struct ib_udata *udata);
  2995. /**
  2996. * ib_modify_qp - Modifies the attributes for the specified QP and then
  2997. * transitions the QP to the given state.
  2998. * @qp: The QP to modify.
  2999. * @qp_attr: On input, specifies the QP attributes to modify. On output,
  3000. * the current values of selected QP attributes are returned.
  3001. * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
  3002. * are being modified.
  3003. */
  3004. int ib_modify_qp(struct ib_qp *qp,
  3005. struct ib_qp_attr *qp_attr,
  3006. int qp_attr_mask);
  3007. /**
  3008. * ib_query_qp - Returns the attribute list and current values for the
  3009. * specified QP.
  3010. * @qp: The QP to query.
  3011. * @qp_attr: The attributes of the specified QP.
  3012. * @qp_attr_mask: A bit-mask used to select specific attributes to query.
  3013. * @qp_init_attr: Additional attributes of the selected QP.
  3014. *
  3015. * The qp_attr_mask may be used to limit the query to gathering only the
  3016. * selected attributes.
  3017. */
  3018. int ib_query_qp(struct ib_qp *qp,
  3019. struct ib_qp_attr *qp_attr,
  3020. int qp_attr_mask,
  3021. struct ib_qp_init_attr *qp_init_attr);
  3022. /**
  3023. * ib_destroy_qp - Destroys the specified QP.
  3024. * @qp: The QP to destroy.
  3025. */
  3026. int ib_destroy_qp(struct ib_qp *qp);
  3027. /**
  3028. * ib_open_qp - Obtain a reference to an existing sharable QP.
  3029. * @xrcd - XRC domain
  3030. * @qp_open_attr: Attributes identifying the QP to open.
  3031. *
  3032. * Returns a reference to a sharable QP.
  3033. */
  3034. struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
  3035. struct ib_qp_open_attr *qp_open_attr);
  3036. /**
  3037. * ib_close_qp - Release an external reference to a QP.
  3038. * @qp: The QP handle to release
  3039. *
  3040. * The opened QP handle is released by the caller. The underlying
  3041. * shared QP is not destroyed until all internal references are released.
  3042. */
  3043. int ib_close_qp(struct ib_qp *qp);
  3044. /**
  3045. * ib_post_send - Posts a list of work requests to the send queue of
  3046. * the specified QP.
  3047. * @qp: The QP to post the work request on.
  3048. * @send_wr: A list of work requests to post on the send queue.
  3049. * @bad_send_wr: On an immediate failure, this parameter will reference
  3050. * the work request that failed to be posted on the QP.
  3051. *
  3052. * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
  3053. * error is returned, the QP state shall not be affected,
  3054. * ib_post_send() will return an immediate error after queueing any
  3055. * earlier work requests in the list.
  3056. */
  3057. static inline int ib_post_send(struct ib_qp *qp,
  3058. const struct ib_send_wr *send_wr,
  3059. const struct ib_send_wr **bad_send_wr)
  3060. {
  3061. const struct ib_send_wr *dummy;
  3062. return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
  3063. }
  3064. /**
  3065. * ib_post_recv - Posts a list of work requests to the receive queue of
  3066. * the specified QP.
  3067. * @qp: The QP to post the work request on.
  3068. * @recv_wr: A list of work requests to post on the receive queue.
  3069. * @bad_recv_wr: On an immediate failure, this parameter will reference
  3070. * the work request that failed to be posted on the QP.
  3071. */
  3072. static inline int ib_post_recv(struct ib_qp *qp,
  3073. const struct ib_recv_wr *recv_wr,
  3074. const struct ib_recv_wr **bad_recv_wr)
  3075. {
  3076. const struct ib_recv_wr *dummy;
  3077. return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
  3078. }
  3079. struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
  3080. int nr_cqe, int comp_vector,
  3081. enum ib_poll_context poll_ctx, const char *caller);
  3082. #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
  3083. __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
  3084. void ib_free_cq(struct ib_cq *cq);
  3085. int ib_process_cq_direct(struct ib_cq *cq, int budget);
  3086. /**
  3087. * ib_create_cq - Creates a CQ on the specified device.
  3088. * @device: The device on which to create the CQ.
  3089. * @comp_handler: A user-specified callback that is invoked when a
  3090. * completion event occurs on the CQ.
  3091. * @event_handler: A user-specified callback that is invoked when an
  3092. * asynchronous event not associated with a completion occurs on the CQ.
  3093. * @cq_context: Context associated with the CQ returned to the user via
  3094. * the associated completion and event handlers.
  3095. * @cq_attr: The attributes the CQ should be created upon.
  3096. *
  3097. * Users can examine the cq structure to determine the actual CQ size.
  3098. */
  3099. struct ib_cq *__ib_create_cq(struct ib_device *device,
  3100. ib_comp_handler comp_handler,
  3101. void (*event_handler)(struct ib_event *, void *),
  3102. void *cq_context,
  3103. const struct ib_cq_init_attr *cq_attr,
  3104. const char *caller);
  3105. #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
  3106. __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
  3107. /**
  3108. * ib_resize_cq - Modifies the capacity of the CQ.
  3109. * @cq: The CQ to resize.
  3110. * @cqe: The minimum size of the CQ.
  3111. *
  3112. * Users can examine the cq structure to determine the actual CQ size.
  3113. */
  3114. int ib_resize_cq(struct ib_cq *cq, int cqe);
  3115. /**
  3116. * rdma_set_cq_moderation - Modifies moderation params of the CQ
  3117. * @cq: The CQ to modify.
  3118. * @cq_count: number of CQEs that will trigger an event
  3119. * @cq_period: max period of time in usec before triggering an event
  3120. *
  3121. */
  3122. int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
  3123. /**
  3124. * ib_destroy_cq - Destroys the specified CQ.
  3125. * @cq: The CQ to destroy.
  3126. */
  3127. int ib_destroy_cq(struct ib_cq *cq);
  3128. /**
  3129. * ib_poll_cq - poll a CQ for completion(s)
  3130. * @cq:the CQ being polled
  3131. * @num_entries:maximum number of completions to return
  3132. * @wc:array of at least @num_entries &struct ib_wc where completions
  3133. * will be returned
  3134. *
  3135. * Poll a CQ for (possibly multiple) completions. If the return value
  3136. * is < 0, an error occurred. If the return value is >= 0, it is the
  3137. * number of completions returned. If the return value is
  3138. * non-negative and < num_entries, then the CQ was emptied.
  3139. */
  3140. static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
  3141. struct ib_wc *wc)
  3142. {
  3143. return cq->device->poll_cq(cq, num_entries, wc);
  3144. }
  3145. /**
  3146. * ib_req_notify_cq - Request completion notification on a CQ.
  3147. * @cq: The CQ to generate an event for.
  3148. * @flags:
  3149. * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
  3150. * to request an event on the next solicited event or next work
  3151. * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
  3152. * may also be |ed in to request a hint about missed events, as
  3153. * described below.
  3154. *
  3155. * Return Value:
  3156. * < 0 means an error occurred while requesting notification
  3157. * == 0 means notification was requested successfully, and if
  3158. * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
  3159. * were missed and it is safe to wait for another event. In
  3160. * this case is it guaranteed that any work completions added
  3161. * to the CQ since the last CQ poll will trigger a completion
  3162. * notification event.
  3163. * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
  3164. * in. It means that the consumer must poll the CQ again to
  3165. * make sure it is empty to avoid missing an event because of a
  3166. * race between requesting notification and an entry being
  3167. * added to the CQ. This return value means it is possible
  3168. * (but not guaranteed) that a work completion has been added
  3169. * to the CQ since the last poll without triggering a
  3170. * completion notification event.
  3171. */
  3172. static inline int ib_req_notify_cq(struct ib_cq *cq,
  3173. enum ib_cq_notify_flags flags)
  3174. {
  3175. return cq->device->req_notify_cq(cq, flags);
  3176. }
  3177. /**
  3178. * ib_req_ncomp_notif - Request completion notification when there are
  3179. * at least the specified number of unreaped completions on the CQ.
  3180. * @cq: The CQ to generate an event for.
  3181. * @wc_cnt: The number of unreaped completions that should be on the
  3182. * CQ before an event is generated.
  3183. */
  3184. static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  3185. {
  3186. return cq->device->req_ncomp_notif ?
  3187. cq->device->req_ncomp_notif(cq, wc_cnt) :
  3188. -ENOSYS;
  3189. }
  3190. /**
  3191. * ib_dma_mapping_error - check a DMA addr for error
  3192. * @dev: The device for which the dma_addr was created
  3193. * @dma_addr: The DMA address to check
  3194. */
  3195. static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  3196. {
  3197. return dma_mapping_error(dev->dma_device, dma_addr);
  3198. }
  3199. /**
  3200. * ib_dma_map_single - Map a kernel virtual address to DMA address
  3201. * @dev: The device for which the dma_addr is to be created
  3202. * @cpu_addr: The kernel virtual address
  3203. * @size: The size of the region in bytes
  3204. * @direction: The direction of the DMA
  3205. */
  3206. static inline u64 ib_dma_map_single(struct ib_device *dev,
  3207. void *cpu_addr, size_t size,
  3208. enum dma_data_direction direction)
  3209. {
  3210. return dma_map_single(dev->dma_device, cpu_addr, size, direction);
  3211. }
  3212. /**
  3213. * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
  3214. * @dev: The device for which the DMA address was created
  3215. * @addr: The DMA address
  3216. * @size: The size of the region in bytes
  3217. * @direction: The direction of the DMA
  3218. */
  3219. static inline void ib_dma_unmap_single(struct ib_device *dev,
  3220. u64 addr, size_t size,
  3221. enum dma_data_direction direction)
  3222. {
  3223. dma_unmap_single(dev->dma_device, addr, size, direction);
  3224. }
  3225. /**
  3226. * ib_dma_map_page - Map a physical page to DMA address
  3227. * @dev: The device for which the dma_addr is to be created
  3228. * @page: The page to be mapped
  3229. * @offset: The offset within the page
  3230. * @size: The size of the region in bytes
  3231. * @direction: The direction of the DMA
  3232. */
  3233. static inline u64 ib_dma_map_page(struct ib_device *dev,
  3234. struct page *page,
  3235. unsigned long offset,
  3236. size_t size,
  3237. enum dma_data_direction direction)
  3238. {
  3239. return dma_map_page(dev->dma_device, page, offset, size, direction);
  3240. }
  3241. /**
  3242. * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
  3243. * @dev: The device for which the DMA address was created
  3244. * @addr: The DMA address
  3245. * @size: The size of the region in bytes
  3246. * @direction: The direction of the DMA
  3247. */
  3248. static inline void ib_dma_unmap_page(struct ib_device *dev,
  3249. u64 addr, size_t size,
  3250. enum dma_data_direction direction)
  3251. {
  3252. dma_unmap_page(dev->dma_device, addr, size, direction);
  3253. }
  3254. /**
  3255. * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
  3256. * @dev: The device for which the DMA addresses are to be created
  3257. * @sg: The array of scatter/gather entries
  3258. * @nents: The number of scatter/gather entries
  3259. * @direction: The direction of the DMA
  3260. */
  3261. static inline int ib_dma_map_sg(struct ib_device *dev,
  3262. struct scatterlist *sg, int nents,
  3263. enum dma_data_direction direction)
  3264. {
  3265. return dma_map_sg(dev->dma_device, sg, nents, direction);
  3266. }
  3267. /**
  3268. * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
  3269. * @dev: The device for which the DMA addresses were created
  3270. * @sg: The array of scatter/gather entries
  3271. * @nents: The number of scatter/gather entries
  3272. * @direction: The direction of the DMA
  3273. */
  3274. static inline void ib_dma_unmap_sg(struct ib_device *dev,
  3275. struct scatterlist *sg, int nents,
  3276. enum dma_data_direction direction)
  3277. {
  3278. dma_unmap_sg(dev->dma_device, sg, nents, direction);
  3279. }
  3280. static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
  3281. struct scatterlist *sg, int nents,
  3282. enum dma_data_direction direction,
  3283. unsigned long dma_attrs)
  3284. {
  3285. return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
  3286. dma_attrs);
  3287. }
  3288. static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
  3289. struct scatterlist *sg, int nents,
  3290. enum dma_data_direction direction,
  3291. unsigned long dma_attrs)
  3292. {
  3293. dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
  3294. }
  3295. /**
  3296. * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  3297. * @dev: The device for which the DMA addresses were created
  3298. * @sg: The scatter/gather entry
  3299. *
  3300. * Note: this function is obsolete. To do: change all occurrences of
  3301. * ib_sg_dma_address() into sg_dma_address().
  3302. */
  3303. static inline u64 ib_sg_dma_address(struct ib_device *dev,
  3304. struct scatterlist *sg)
  3305. {
  3306. return sg_dma_address(sg);
  3307. }
  3308. /**
  3309. * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
  3310. * @dev: The device for which the DMA addresses were created
  3311. * @sg: The scatter/gather entry
  3312. *
  3313. * Note: this function is obsolete. To do: change all occurrences of
  3314. * ib_sg_dma_len() into sg_dma_len().
  3315. */
  3316. static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
  3317. struct scatterlist *sg)
  3318. {
  3319. return sg_dma_len(sg);
  3320. }
  3321. /**
  3322. * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
  3323. * @dev: The device for which the DMA address was created
  3324. * @addr: The DMA address
  3325. * @size: The size of the region in bytes
  3326. * @dir: The direction of the DMA
  3327. */
  3328. static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
  3329. u64 addr,
  3330. size_t size,
  3331. enum dma_data_direction dir)
  3332. {
  3333. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  3334. }
  3335. /**
  3336. * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
  3337. * @dev: The device for which the DMA address was created
  3338. * @addr: The DMA address
  3339. * @size: The size of the region in bytes
  3340. * @dir: The direction of the DMA
  3341. */
  3342. static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
  3343. u64 addr,
  3344. size_t size,
  3345. enum dma_data_direction dir)
  3346. {
  3347. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  3348. }
  3349. /**
  3350. * ib_dma_alloc_coherent - Allocate memory and map it for DMA
  3351. * @dev: The device for which the DMA address is requested
  3352. * @size: The size of the region to allocate in bytes
  3353. * @dma_handle: A pointer for returning the DMA address of the region
  3354. * @flag: memory allocator flags
  3355. */
  3356. static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
  3357. size_t size,
  3358. dma_addr_t *dma_handle,
  3359. gfp_t flag)
  3360. {
  3361. return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
  3362. }
  3363. /**
  3364. * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
  3365. * @dev: The device for which the DMA addresses were allocated
  3366. * @size: The size of the region
  3367. * @cpu_addr: the address returned by ib_dma_alloc_coherent()
  3368. * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
  3369. */
  3370. static inline void ib_dma_free_coherent(struct ib_device *dev,
  3371. size_t size, void *cpu_addr,
  3372. dma_addr_t dma_handle)
  3373. {
  3374. dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
  3375. }
  3376. /**
  3377. * ib_dereg_mr - Deregisters a memory region and removes it from the
  3378. * HCA translation table.
  3379. * @mr: The memory region to deregister.
  3380. *
  3381. * This function can fail, if the memory region has memory windows bound to it.
  3382. */
  3383. int ib_dereg_mr(struct ib_mr *mr);
  3384. struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
  3385. enum ib_mr_type mr_type,
  3386. u32 max_num_sg);
  3387. /**
  3388. * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
  3389. * R_Key and L_Key.
  3390. * @mr - struct ib_mr pointer to be updated.
  3391. * @newkey - new key to be used.
  3392. */
  3393. static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
  3394. {
  3395. mr->lkey = (mr->lkey & 0xffffff00) | newkey;
  3396. mr->rkey = (mr->rkey & 0xffffff00) | newkey;
  3397. }
  3398. /**
  3399. * ib_inc_rkey - increments the key portion of the given rkey. Can be used
  3400. * for calculating a new rkey for type 2 memory windows.
  3401. * @rkey - the rkey to increment.
  3402. */
  3403. static inline u32 ib_inc_rkey(u32 rkey)
  3404. {
  3405. const u32 mask = 0x000000ff;
  3406. return ((rkey + 1) & mask) | (rkey & ~mask);
  3407. }
  3408. /**
  3409. * ib_alloc_fmr - Allocates a unmapped fast memory region.
  3410. * @pd: The protection domain associated with the unmapped region.
  3411. * @mr_access_flags: Specifies the memory access rights.
  3412. * @fmr_attr: Attributes of the unmapped region.
  3413. *
  3414. * A fast memory region must be mapped before it can be used as part of
  3415. * a work request.
  3416. */
  3417. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  3418. int mr_access_flags,
  3419. struct ib_fmr_attr *fmr_attr);
  3420. /**
  3421. * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
  3422. * @fmr: The fast memory region to associate with the pages.
  3423. * @page_list: An array of physical pages to map to the fast memory region.
  3424. * @list_len: The number of pages in page_list.
  3425. * @iova: The I/O virtual address to use with the mapped region.
  3426. */
  3427. static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
  3428. u64 *page_list, int list_len,
  3429. u64 iova)
  3430. {
  3431. return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
  3432. }
  3433. /**
  3434. * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
  3435. * @fmr_list: A linked list of fast memory regions to unmap.
  3436. */
  3437. int ib_unmap_fmr(struct list_head *fmr_list);
  3438. /**
  3439. * ib_dealloc_fmr - Deallocates a fast memory region.
  3440. * @fmr: The fast memory region to deallocate.
  3441. */
  3442. int ib_dealloc_fmr(struct ib_fmr *fmr);
  3443. /**
  3444. * ib_attach_mcast - Attaches the specified QP to a multicast group.
  3445. * @qp: QP to attach to the multicast group. The QP must be type
  3446. * IB_QPT_UD.
  3447. * @gid: Multicast group GID.
  3448. * @lid: Multicast group LID in host byte order.
  3449. *
  3450. * In order to send and receive multicast packets, subnet
  3451. * administration must have created the multicast group and configured
  3452. * the fabric appropriately. The port associated with the specified
  3453. * QP must also be a member of the multicast group.
  3454. */
  3455. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  3456. /**
  3457. * ib_detach_mcast - Detaches the specified QP from a multicast group.
  3458. * @qp: QP to detach from the multicast group.
  3459. * @gid: Multicast group GID.
  3460. * @lid: Multicast group LID in host byte order.
  3461. */
  3462. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  3463. /**
  3464. * ib_alloc_xrcd - Allocates an XRC domain.
  3465. * @device: The device on which to allocate the XRC domain.
  3466. * @caller: Module name for kernel consumers
  3467. */
  3468. struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
  3469. #define ib_alloc_xrcd(device) \
  3470. __ib_alloc_xrcd((device), KBUILD_MODNAME)
  3471. /**
  3472. * ib_dealloc_xrcd - Deallocates an XRC domain.
  3473. * @xrcd: The XRC domain to deallocate.
  3474. */
  3475. int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
  3476. static inline int ib_check_mr_access(int flags)
  3477. {
  3478. /*
  3479. * Local write permission is required if remote write or
  3480. * remote atomic permission is also requested.
  3481. */
  3482. if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
  3483. !(flags & IB_ACCESS_LOCAL_WRITE))
  3484. return -EINVAL;
  3485. if (flags & ~IB_ACCESS_SUPPORTED)
  3486. return -EINVAL;
  3487. return 0;
  3488. }
  3489. static inline bool ib_access_writable(int access_flags)
  3490. {
  3491. /*
  3492. * We have writable memory backing the MR if any of the following
  3493. * access flags are set. "Local write" and "remote write" obviously
  3494. * require write access. "Remote atomic" can do things like fetch and
  3495. * add, which will modify memory, and "MW bind" can change permissions
  3496. * by binding a window.
  3497. */
  3498. return access_flags &
  3499. (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
  3500. IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
  3501. }
  3502. /**
  3503. * ib_check_mr_status: lightweight check of MR status.
  3504. * This routine may provide status checks on a selected
  3505. * ib_mr. first use is for signature status check.
  3506. *
  3507. * @mr: A memory region.
  3508. * @check_mask: Bitmask of which checks to perform from
  3509. * ib_mr_status_check enumeration.
  3510. * @mr_status: The container of relevant status checks.
  3511. * failed checks will be indicated in the status bitmask
  3512. * and the relevant info shall be in the error item.
  3513. */
  3514. int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
  3515. struct ib_mr_status *mr_status);
  3516. struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
  3517. u16 pkey, const union ib_gid *gid,
  3518. const struct sockaddr *addr);
  3519. struct ib_wq *ib_create_wq(struct ib_pd *pd,
  3520. struct ib_wq_init_attr *init_attr);
  3521. int ib_destroy_wq(struct ib_wq *wq);
  3522. int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
  3523. u32 wq_attr_mask);
  3524. struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
  3525. struct ib_rwq_ind_table_init_attr*
  3526. wq_ind_table_init_attr);
  3527. int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  3528. int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
  3529. unsigned int *sg_offset, unsigned int page_size);
  3530. static inline int
  3531. ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
  3532. unsigned int *sg_offset, unsigned int page_size)
  3533. {
  3534. int n;
  3535. n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
  3536. mr->iova = 0;
  3537. return n;
  3538. }
  3539. int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
  3540. unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
  3541. void ib_drain_rq(struct ib_qp *qp);
  3542. void ib_drain_sq(struct ib_qp *qp);
  3543. void ib_drain_qp(struct ib_qp *qp);
  3544. int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
  3545. static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
  3546. {
  3547. if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
  3548. return attr->roce.dmac;
  3549. return NULL;
  3550. }
  3551. static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
  3552. {
  3553. if (attr->type == RDMA_AH_ATTR_TYPE_IB)
  3554. attr->ib.dlid = (u16)dlid;
  3555. else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3556. attr->opa.dlid = dlid;
  3557. }
  3558. static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
  3559. {
  3560. if (attr->type == RDMA_AH_ATTR_TYPE_IB)
  3561. return attr->ib.dlid;
  3562. else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3563. return attr->opa.dlid;
  3564. return 0;
  3565. }
  3566. static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
  3567. {
  3568. attr->sl = sl;
  3569. }
  3570. static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
  3571. {
  3572. return attr->sl;
  3573. }
  3574. static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
  3575. u8 src_path_bits)
  3576. {
  3577. if (attr->type == RDMA_AH_ATTR_TYPE_IB)
  3578. attr->ib.src_path_bits = src_path_bits;
  3579. else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3580. attr->opa.src_path_bits = src_path_bits;
  3581. }
  3582. static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
  3583. {
  3584. if (attr->type == RDMA_AH_ATTR_TYPE_IB)
  3585. return attr->ib.src_path_bits;
  3586. else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3587. return attr->opa.src_path_bits;
  3588. return 0;
  3589. }
  3590. static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
  3591. bool make_grd)
  3592. {
  3593. if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3594. attr->opa.make_grd = make_grd;
  3595. }
  3596. static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
  3597. {
  3598. if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
  3599. return attr->opa.make_grd;
  3600. return false;
  3601. }
  3602. static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
  3603. {
  3604. attr->port_num = port_num;
  3605. }
  3606. static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
  3607. {
  3608. return attr->port_num;
  3609. }
  3610. static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
  3611. u8 static_rate)
  3612. {
  3613. attr->static_rate = static_rate;
  3614. }
  3615. static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
  3616. {
  3617. return attr->static_rate;
  3618. }
  3619. static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
  3620. enum ib_ah_flags flag)
  3621. {
  3622. attr->ah_flags = flag;
  3623. }
  3624. static inline enum ib_ah_flags
  3625. rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
  3626. {
  3627. return attr->ah_flags;
  3628. }
  3629. static inline const struct ib_global_route
  3630. *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
  3631. {
  3632. return &attr->grh;
  3633. }
  3634. /*To retrieve and modify the grh */
  3635. static inline struct ib_global_route
  3636. *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
  3637. {
  3638. return &attr->grh;
  3639. }
  3640. static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
  3641. {
  3642. struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
  3643. memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
  3644. }
  3645. static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
  3646. __be64 prefix)
  3647. {
  3648. struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
  3649. grh->dgid.global.subnet_prefix = prefix;
  3650. }
  3651. static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
  3652. __be64 if_id)
  3653. {
  3654. struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
  3655. grh->dgid.global.interface_id = if_id;
  3656. }
  3657. static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
  3658. union ib_gid *dgid, u32 flow_label,
  3659. u8 sgid_index, u8 hop_limit,
  3660. u8 traffic_class)
  3661. {
  3662. struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
  3663. attr->ah_flags = IB_AH_GRH;
  3664. if (dgid)
  3665. grh->dgid = *dgid;
  3666. grh->flow_label = flow_label;
  3667. grh->sgid_index = sgid_index;
  3668. grh->hop_limit = hop_limit;
  3669. grh->traffic_class = traffic_class;
  3670. grh->sgid_attr = NULL;
  3671. }
  3672. void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
  3673. void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
  3674. u32 flow_label, u8 hop_limit, u8 traffic_class,
  3675. const struct ib_gid_attr *sgid_attr);
  3676. void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
  3677. const struct rdma_ah_attr *src);
  3678. void rdma_replace_ah_attr(struct rdma_ah_attr *old,
  3679. const struct rdma_ah_attr *new);
  3680. void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
  3681. /**
  3682. * rdma_ah_find_type - Return address handle type.
  3683. *
  3684. * @dev: Device to be checked
  3685. * @port_num: Port number
  3686. */
  3687. static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
  3688. u8 port_num)
  3689. {
  3690. if (rdma_protocol_roce(dev, port_num))
  3691. return RDMA_AH_ATTR_TYPE_ROCE;
  3692. if (rdma_protocol_ib(dev, port_num)) {
  3693. if (rdma_cap_opa_ah(dev, port_num))
  3694. return RDMA_AH_ATTR_TYPE_OPA;
  3695. return RDMA_AH_ATTR_TYPE_IB;
  3696. }
  3697. return RDMA_AH_ATTR_TYPE_UNDEFINED;
  3698. }
  3699. /**
  3700. * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
  3701. * In the current implementation the only way to get
  3702. * get the 32bit lid is from other sources for OPA.
  3703. * For IB, lids will always be 16bits so cast the
  3704. * value accordingly.
  3705. *
  3706. * @lid: A 32bit LID
  3707. */
  3708. static inline u16 ib_lid_cpu16(u32 lid)
  3709. {
  3710. WARN_ON_ONCE(lid & 0xFFFF0000);
  3711. return (u16)lid;
  3712. }
  3713. /**
  3714. * ib_lid_be16 - Return lid in 16bit BE encoding.
  3715. *
  3716. * @lid: A 32bit LID
  3717. */
  3718. static inline __be16 ib_lid_be16(u32 lid)
  3719. {
  3720. WARN_ON_ONCE(lid & 0xFFFF0000);
  3721. return cpu_to_be16((u16)lid);
  3722. }
  3723. /**
  3724. * ib_get_vector_affinity - Get the affinity mappings of a given completion
  3725. * vector
  3726. * @device: the rdma device
  3727. * @comp_vector: index of completion vector
  3728. *
  3729. * Returns NULL on failure, otherwise a corresponding cpu map of the
  3730. * completion vector (returns all-cpus map if the device driver doesn't
  3731. * implement get_vector_affinity).
  3732. */
  3733. static inline const struct cpumask *
  3734. ib_get_vector_affinity(struct ib_device *device, int comp_vector)
  3735. {
  3736. if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
  3737. !device->get_vector_affinity)
  3738. return NULL;
  3739. return device->get_vector_affinity(device, comp_vector);
  3740. }
  3741. static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
  3742. struct ib_qp *qp, struct ib_device *device)
  3743. {
  3744. uobj->object = ibflow;
  3745. ibflow->uobject = uobj;
  3746. if (qp) {
  3747. atomic_inc(&qp->usecnt);
  3748. ibflow->qp = qp;
  3749. }
  3750. ibflow->device = device;
  3751. }
  3752. /**
  3753. * rdma_roce_rescan_device - Rescan all of the network devices in the system
  3754. * and add their gids, as needed, to the relevant RoCE devices.
  3755. *
  3756. * @device: the rdma device
  3757. */
  3758. void rdma_roce_rescan_device(struct ib_device *ibdev);
  3759. struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
  3760. int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
  3761. struct uverbs_attr_bundle *attrs);
  3762. #endif /* IB_VERBS_H */