ena_com.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ena_com.h"
  33. /*****************************************************************************/
  34. /*****************************************************************************/
  35. /* Timeout in micro-sec */
  36. #define ADMIN_CMD_TIMEOUT_US (3000000)
  37. #define ENA_ASYNC_QUEUE_DEPTH 16
  38. #define ENA_ADMIN_QUEUE_DEPTH 32
  39. #define ENA_CTRL_MAJOR 0
  40. #define ENA_CTRL_MINOR 0
  41. #define ENA_CTRL_SUB_MINOR 1
  42. #define MIN_ENA_CTRL_VER \
  43. (((ENA_CTRL_MAJOR) << \
  44. (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  45. ((ENA_CTRL_MINOR) << \
  46. (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  47. (ENA_CTRL_SUB_MINOR))
  48. #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
  49. #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
  50. #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  51. #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
  52. #define ENA_REGS_ADMIN_INTR_MASK 1
  53. #define ENA_POLL_MS 5
  54. /*****************************************************************************/
  55. /*****************************************************************************/
  56. /*****************************************************************************/
  57. enum ena_cmd_status {
  58. ENA_CMD_SUBMITTED,
  59. ENA_CMD_COMPLETED,
  60. /* Abort - canceled by the driver */
  61. ENA_CMD_ABORTED,
  62. };
  63. struct ena_comp_ctx {
  64. struct completion wait_event;
  65. struct ena_admin_acq_entry *user_cqe;
  66. u32 comp_size;
  67. enum ena_cmd_status status;
  68. /* status from the device */
  69. u8 comp_status;
  70. u8 cmd_opcode;
  71. bool occupied;
  72. };
  73. struct ena_com_stats_ctx {
  74. struct ena_admin_aq_get_stats_cmd get_cmd;
  75. struct ena_admin_acq_get_stats_resp get_resp;
  76. };
  77. static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  78. struct ena_common_mem_addr *ena_addr,
  79. dma_addr_t addr)
  80. {
  81. if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  82. pr_err("dma address has more bits that the device supports\n");
  83. return -EINVAL;
  84. }
  85. ena_addr->mem_addr_low = lower_32_bits(addr);
  86. ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
  87. return 0;
  88. }
  89. static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
  90. {
  91. struct ena_com_admin_sq *sq = &queue->sq;
  92. u16 size = ADMIN_SQ_SIZE(queue->q_depth);
  93. sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
  94. GFP_KERNEL);
  95. if (!sq->entries) {
  96. pr_err("memory allocation failed\n");
  97. return -ENOMEM;
  98. }
  99. sq->head = 0;
  100. sq->tail = 0;
  101. sq->phase = 1;
  102. sq->db_addr = NULL;
  103. return 0;
  104. }
  105. static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
  106. {
  107. struct ena_com_admin_cq *cq = &queue->cq;
  108. u16 size = ADMIN_CQ_SIZE(queue->q_depth);
  109. cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
  110. GFP_KERNEL);
  111. if (!cq->entries) {
  112. pr_err("memory allocation failed\n");
  113. return -ENOMEM;
  114. }
  115. cq->head = 0;
  116. cq->phase = 1;
  117. return 0;
  118. }
  119. static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
  120. struct ena_aenq_handlers *aenq_handlers)
  121. {
  122. struct ena_com_aenq *aenq = &dev->aenq;
  123. u32 addr_low, addr_high, aenq_caps;
  124. u16 size;
  125. dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
  126. size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
  127. aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
  128. GFP_KERNEL);
  129. if (!aenq->entries) {
  130. pr_err("memory allocation failed\n");
  131. return -ENOMEM;
  132. }
  133. aenq->head = aenq->q_depth;
  134. aenq->phase = 1;
  135. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
  136. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
  137. writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
  138. writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
  139. aenq_caps = 0;
  140. aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
  141. aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
  142. << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
  143. ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
  144. writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
  145. if (unlikely(!aenq_handlers)) {
  146. pr_err("aenq handlers pointer is NULL\n");
  147. return -EINVAL;
  148. }
  149. aenq->aenq_handlers = aenq_handlers;
  150. return 0;
  151. }
  152. static void comp_ctxt_release(struct ena_com_admin_queue *queue,
  153. struct ena_comp_ctx *comp_ctx)
  154. {
  155. comp_ctx->occupied = false;
  156. atomic_dec(&queue->outstanding_cmds);
  157. }
  158. static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
  159. u16 command_id, bool capture)
  160. {
  161. if (unlikely(!queue->comp_ctx)) {
  162. pr_err("Completion context is NULL\n");
  163. return NULL;
  164. }
  165. if (unlikely(command_id >= queue->q_depth)) {
  166. pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
  167. command_id, queue->q_depth);
  168. return NULL;
  169. }
  170. if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
  171. pr_err("Completion context is occupied\n");
  172. return NULL;
  173. }
  174. if (capture) {
  175. atomic_inc(&queue->outstanding_cmds);
  176. queue->comp_ctx[command_id].occupied = true;
  177. }
  178. return &queue->comp_ctx[command_id];
  179. }
  180. static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  181. struct ena_admin_aq_entry *cmd,
  182. size_t cmd_size_in_bytes,
  183. struct ena_admin_acq_entry *comp,
  184. size_t comp_size_in_bytes)
  185. {
  186. struct ena_comp_ctx *comp_ctx;
  187. u16 tail_masked, cmd_id;
  188. u16 queue_size_mask;
  189. u16 cnt;
  190. queue_size_mask = admin_queue->q_depth - 1;
  191. tail_masked = admin_queue->sq.tail & queue_size_mask;
  192. /* In case of queue FULL */
  193. cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
  194. if (cnt >= admin_queue->q_depth) {
  195. pr_debug("admin queue is full.\n");
  196. admin_queue->stats.out_of_space++;
  197. return ERR_PTR(-ENOSPC);
  198. }
  199. cmd_id = admin_queue->curr_cmd_id;
  200. cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
  201. ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
  202. cmd->aq_common_descriptor.command_id |= cmd_id &
  203. ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
  204. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
  205. if (unlikely(!comp_ctx))
  206. return ERR_PTR(-EINVAL);
  207. comp_ctx->status = ENA_CMD_SUBMITTED;
  208. comp_ctx->comp_size = (u32)comp_size_in_bytes;
  209. comp_ctx->user_cqe = comp;
  210. comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
  211. reinit_completion(&comp_ctx->wait_event);
  212. memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
  213. admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
  214. queue_size_mask;
  215. admin_queue->sq.tail++;
  216. admin_queue->stats.submitted_cmd++;
  217. if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
  218. admin_queue->sq.phase = !admin_queue->sq.phase;
  219. writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
  220. return comp_ctx;
  221. }
  222. static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
  223. {
  224. size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
  225. struct ena_comp_ctx *comp_ctx;
  226. u16 i;
  227. queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
  228. if (unlikely(!queue->comp_ctx)) {
  229. pr_err("memory allocation failed\n");
  230. return -ENOMEM;
  231. }
  232. for (i = 0; i < queue->q_depth; i++) {
  233. comp_ctx = get_comp_ctxt(queue, i, false);
  234. if (comp_ctx)
  235. init_completion(&comp_ctx->wait_event);
  236. }
  237. return 0;
  238. }
  239. static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  240. struct ena_admin_aq_entry *cmd,
  241. size_t cmd_size_in_bytes,
  242. struct ena_admin_acq_entry *comp,
  243. size_t comp_size_in_bytes)
  244. {
  245. unsigned long flags = 0;
  246. struct ena_comp_ctx *comp_ctx;
  247. spin_lock_irqsave(&admin_queue->q_lock, flags);
  248. if (unlikely(!admin_queue->running_state)) {
  249. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  250. return ERR_PTR(-ENODEV);
  251. }
  252. comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
  253. cmd_size_in_bytes,
  254. comp,
  255. comp_size_in_bytes);
  256. if (IS_ERR(comp_ctx))
  257. admin_queue->running_state = false;
  258. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  259. return comp_ctx;
  260. }
  261. static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
  262. struct ena_com_create_io_ctx *ctx,
  263. struct ena_com_io_sq *io_sq)
  264. {
  265. size_t size;
  266. int dev_node = 0;
  267. memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
  268. io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
  269. io_sq->desc_entry_size =
  270. (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  271. sizeof(struct ena_eth_io_tx_desc) :
  272. sizeof(struct ena_eth_io_rx_desc);
  273. size = io_sq->desc_entry_size * io_sq->q_depth;
  274. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  275. dev_node = dev_to_node(ena_dev->dmadev);
  276. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  277. io_sq->desc_addr.virt_addr =
  278. dma_zalloc_coherent(ena_dev->dmadev, size,
  279. &io_sq->desc_addr.phys_addr,
  280. GFP_KERNEL);
  281. set_dev_node(ena_dev->dmadev, dev_node);
  282. if (!io_sq->desc_addr.virt_addr) {
  283. io_sq->desc_addr.virt_addr =
  284. dma_zalloc_coherent(ena_dev->dmadev, size,
  285. &io_sq->desc_addr.phys_addr,
  286. GFP_KERNEL);
  287. }
  288. if (!io_sq->desc_addr.virt_addr) {
  289. pr_err("memory allocation failed\n");
  290. return -ENOMEM;
  291. }
  292. }
  293. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  294. /* Allocate bounce buffers */
  295. io_sq->bounce_buf_ctrl.buffer_size =
  296. ena_dev->llq_info.desc_list_entry_size;
  297. io_sq->bounce_buf_ctrl.buffers_num =
  298. ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
  299. io_sq->bounce_buf_ctrl.next_to_use = 0;
  300. size = io_sq->bounce_buf_ctrl.buffer_size *
  301. io_sq->bounce_buf_ctrl.buffers_num;
  302. dev_node = dev_to_node(ena_dev->dmadev);
  303. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  304. io_sq->bounce_buf_ctrl.base_buffer =
  305. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  306. set_dev_node(ena_dev->dmadev, dev_node);
  307. if (!io_sq->bounce_buf_ctrl.base_buffer)
  308. io_sq->bounce_buf_ctrl.base_buffer =
  309. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  310. if (!io_sq->bounce_buf_ctrl.base_buffer) {
  311. pr_err("bounce buffer memory allocation failed\n");
  312. return -ENOMEM;
  313. }
  314. memcpy(&io_sq->llq_info, &ena_dev->llq_info,
  315. sizeof(io_sq->llq_info));
  316. /* Initiate the first bounce buffer */
  317. io_sq->llq_buf_ctrl.curr_bounce_buf =
  318. ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
  319. memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
  320. 0x0, io_sq->llq_info.desc_list_entry_size);
  321. io_sq->llq_buf_ctrl.descs_left_in_line =
  322. io_sq->llq_info.descs_num_before_header;
  323. if (io_sq->llq_info.max_entries_in_tx_burst > 0)
  324. io_sq->entries_in_tx_burst_left =
  325. io_sq->llq_info.max_entries_in_tx_burst;
  326. }
  327. io_sq->tail = 0;
  328. io_sq->next_to_comp = 0;
  329. io_sq->phase = 1;
  330. return 0;
  331. }
  332. static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
  333. struct ena_com_create_io_ctx *ctx,
  334. struct ena_com_io_cq *io_cq)
  335. {
  336. size_t size;
  337. int prev_node = 0;
  338. memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
  339. /* Use the basic completion descriptor for Rx */
  340. io_cq->cdesc_entry_size_in_bytes =
  341. (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  342. sizeof(struct ena_eth_io_tx_cdesc) :
  343. sizeof(struct ena_eth_io_rx_cdesc_base);
  344. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  345. prev_node = dev_to_node(ena_dev->dmadev);
  346. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  347. io_cq->cdesc_addr.virt_addr =
  348. dma_zalloc_coherent(ena_dev->dmadev, size,
  349. &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
  350. set_dev_node(ena_dev->dmadev, prev_node);
  351. if (!io_cq->cdesc_addr.virt_addr) {
  352. io_cq->cdesc_addr.virt_addr =
  353. dma_zalloc_coherent(ena_dev->dmadev, size,
  354. &io_cq->cdesc_addr.phys_addr,
  355. GFP_KERNEL);
  356. }
  357. if (!io_cq->cdesc_addr.virt_addr) {
  358. pr_err("memory allocation failed\n");
  359. return -ENOMEM;
  360. }
  361. io_cq->phase = 1;
  362. io_cq->head = 0;
  363. return 0;
  364. }
  365. static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
  366. struct ena_admin_acq_entry *cqe)
  367. {
  368. struct ena_comp_ctx *comp_ctx;
  369. u16 cmd_id;
  370. cmd_id = cqe->acq_common_descriptor.command &
  371. ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
  372. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
  373. if (unlikely(!comp_ctx)) {
  374. pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
  375. admin_queue->running_state = false;
  376. return;
  377. }
  378. comp_ctx->status = ENA_CMD_COMPLETED;
  379. comp_ctx->comp_status = cqe->acq_common_descriptor.status;
  380. if (comp_ctx->user_cqe)
  381. memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
  382. if (!admin_queue->polling)
  383. complete(&comp_ctx->wait_event);
  384. }
  385. static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
  386. {
  387. struct ena_admin_acq_entry *cqe = NULL;
  388. u16 comp_num = 0;
  389. u16 head_masked;
  390. u8 phase;
  391. head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
  392. phase = admin_queue->cq.phase;
  393. cqe = &admin_queue->cq.entries[head_masked];
  394. /* Go over all the completions */
  395. while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
  396. ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
  397. /* Do not read the rest of the completion entry before the
  398. * phase bit was validated
  399. */
  400. dma_rmb();
  401. ena_com_handle_single_admin_completion(admin_queue, cqe);
  402. head_masked++;
  403. comp_num++;
  404. if (unlikely(head_masked == admin_queue->q_depth)) {
  405. head_masked = 0;
  406. phase = !phase;
  407. }
  408. cqe = &admin_queue->cq.entries[head_masked];
  409. }
  410. admin_queue->cq.head += comp_num;
  411. admin_queue->cq.phase = phase;
  412. admin_queue->sq.head += comp_num;
  413. admin_queue->stats.completed_cmd += comp_num;
  414. }
  415. static int ena_com_comp_status_to_errno(u8 comp_status)
  416. {
  417. if (unlikely(comp_status != 0))
  418. pr_err("admin command failed[%u]\n", comp_status);
  419. if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
  420. return -EINVAL;
  421. switch (comp_status) {
  422. case ENA_ADMIN_SUCCESS:
  423. return 0;
  424. case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
  425. return -ENOMEM;
  426. case ENA_ADMIN_UNSUPPORTED_OPCODE:
  427. return -EOPNOTSUPP;
  428. case ENA_ADMIN_BAD_OPCODE:
  429. case ENA_ADMIN_MALFORMED_REQUEST:
  430. case ENA_ADMIN_ILLEGAL_PARAMETER:
  431. case ENA_ADMIN_UNKNOWN_ERROR:
  432. return -EINVAL;
  433. }
  434. return 0;
  435. }
  436. static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
  437. struct ena_com_admin_queue *admin_queue)
  438. {
  439. unsigned long flags = 0;
  440. unsigned long timeout;
  441. int ret;
  442. timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
  443. while (1) {
  444. spin_lock_irqsave(&admin_queue->q_lock, flags);
  445. ena_com_handle_admin_completion(admin_queue);
  446. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  447. if (comp_ctx->status != ENA_CMD_SUBMITTED)
  448. break;
  449. if (time_is_before_jiffies(timeout)) {
  450. pr_err("Wait for completion (polling) timeout\n");
  451. /* ENA didn't have any completion */
  452. spin_lock_irqsave(&admin_queue->q_lock, flags);
  453. admin_queue->stats.no_completion++;
  454. admin_queue->running_state = false;
  455. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  456. ret = -ETIME;
  457. goto err;
  458. }
  459. msleep(ENA_POLL_MS);
  460. }
  461. if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
  462. pr_err("Command was aborted\n");
  463. spin_lock_irqsave(&admin_queue->q_lock, flags);
  464. admin_queue->stats.aborted_cmd++;
  465. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  466. ret = -ENODEV;
  467. goto err;
  468. }
  469. WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
  470. comp_ctx->status);
  471. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  472. err:
  473. comp_ctxt_release(admin_queue, comp_ctx);
  474. return ret;
  475. }
  476. /**
  477. * Set the LLQ configurations of the firmware
  478. *
  479. * The driver provides only the enabled feature values to the device,
  480. * which in turn, checks if they are supported.
  481. */
  482. static int ena_com_set_llq(struct ena_com_dev *ena_dev)
  483. {
  484. struct ena_com_admin_queue *admin_queue;
  485. struct ena_admin_set_feat_cmd cmd;
  486. struct ena_admin_set_feat_resp resp;
  487. struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
  488. int ret;
  489. memset(&cmd, 0x0, sizeof(cmd));
  490. admin_queue = &ena_dev->admin_queue;
  491. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  492. cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
  493. cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
  494. cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
  495. cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
  496. cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
  497. ret = ena_com_execute_admin_command(admin_queue,
  498. (struct ena_admin_aq_entry *)&cmd,
  499. sizeof(cmd),
  500. (struct ena_admin_acq_entry *)&resp,
  501. sizeof(resp));
  502. if (unlikely(ret))
  503. pr_err("Failed to set LLQ configurations: %d\n", ret);
  504. return ret;
  505. }
  506. static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
  507. struct ena_admin_feature_llq_desc *llq_features,
  508. struct ena_llq_configurations *llq_default_cfg)
  509. {
  510. struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
  511. u16 supported_feat;
  512. int rc;
  513. memset(llq_info, 0, sizeof(*llq_info));
  514. supported_feat = llq_features->header_location_ctrl_supported;
  515. if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
  516. llq_info->header_location_ctrl =
  517. llq_default_cfg->llq_header_location;
  518. } else {
  519. pr_err("Invalid header location control, supported: 0x%x\n",
  520. supported_feat);
  521. return -EINVAL;
  522. }
  523. if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
  524. supported_feat = llq_features->descriptors_stride_ctrl_supported;
  525. if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
  526. llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
  527. } else {
  528. if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
  529. llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
  530. } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
  531. llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
  532. } else {
  533. pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
  534. supported_feat);
  535. return -EINVAL;
  536. }
  537. pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
  538. llq_default_cfg->llq_stride_ctrl, supported_feat,
  539. llq_info->desc_stride_ctrl);
  540. }
  541. } else {
  542. llq_info->desc_stride_ctrl = 0;
  543. }
  544. supported_feat = llq_features->entry_size_ctrl_supported;
  545. if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
  546. llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
  547. llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
  548. } else {
  549. if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
  550. llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
  551. llq_info->desc_list_entry_size = 128;
  552. } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
  553. llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
  554. llq_info->desc_list_entry_size = 192;
  555. } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
  556. llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
  557. llq_info->desc_list_entry_size = 256;
  558. } else {
  559. pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
  560. supported_feat);
  561. return -EINVAL;
  562. }
  563. pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
  564. llq_default_cfg->llq_ring_entry_size, supported_feat,
  565. llq_info->desc_list_entry_size);
  566. }
  567. if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
  568. /* The desc list entry size should be whole multiply of 8
  569. * This requirement comes from __iowrite64_copy()
  570. */
  571. pr_err("illegal entry size %d\n",
  572. llq_info->desc_list_entry_size);
  573. return -EINVAL;
  574. }
  575. if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
  576. llq_info->descs_per_entry = llq_info->desc_list_entry_size /
  577. sizeof(struct ena_eth_io_tx_desc);
  578. else
  579. llq_info->descs_per_entry = 1;
  580. supported_feat = llq_features->desc_num_before_header_supported;
  581. if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
  582. llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
  583. } else {
  584. if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
  585. llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
  586. } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
  587. llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
  588. } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
  589. llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
  590. } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
  591. llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
  592. } else {
  593. pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
  594. supported_feat);
  595. return -EINVAL;
  596. }
  597. pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
  598. llq_default_cfg->llq_num_decs_before_header,
  599. supported_feat, llq_info->descs_num_before_header);
  600. }
  601. llq_info->max_entries_in_tx_burst =
  602. (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
  603. rc = ena_com_set_llq(ena_dev);
  604. if (rc)
  605. pr_err("Cannot set LLQ configuration: %d\n", rc);
  606. return rc;
  607. }
  608. static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
  609. struct ena_com_admin_queue *admin_queue)
  610. {
  611. unsigned long flags = 0;
  612. int ret;
  613. wait_for_completion_timeout(&comp_ctx->wait_event,
  614. usecs_to_jiffies(
  615. admin_queue->completion_timeout));
  616. /* In case the command wasn't completed find out the root cause.
  617. * There might be 2 kinds of errors
  618. * 1) No completion (timeout reached)
  619. * 2) There is completion but the device didn't get any msi-x interrupt.
  620. */
  621. if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
  622. spin_lock_irqsave(&admin_queue->q_lock, flags);
  623. ena_com_handle_admin_completion(admin_queue);
  624. admin_queue->stats.no_completion++;
  625. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  626. if (comp_ctx->status == ENA_CMD_COMPLETED) {
  627. pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
  628. comp_ctx->cmd_opcode,
  629. admin_queue->auto_polling ? "ON" : "OFF");
  630. /* Check if fallback to polling is enabled */
  631. if (admin_queue->auto_polling)
  632. admin_queue->polling = true;
  633. } else {
  634. pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
  635. comp_ctx->cmd_opcode, comp_ctx->status);
  636. }
  637. /* Check if shifted to polling mode.
  638. * This will happen if there is a completion without an interrupt
  639. * and autopolling mode is enabled. Continuing normal execution in such case
  640. */
  641. if (!admin_queue->polling) {
  642. admin_queue->running_state = false;
  643. ret = -ETIME;
  644. goto err;
  645. }
  646. }
  647. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  648. err:
  649. comp_ctxt_release(admin_queue, comp_ctx);
  650. return ret;
  651. }
  652. /* This method read the hardware device register through posting writes
  653. * and waiting for response
  654. * On timeout the function will return ENA_MMIO_READ_TIMEOUT
  655. */
  656. static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
  657. {
  658. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  659. volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
  660. mmio_read->read_resp;
  661. u32 mmio_read_reg, ret, i;
  662. unsigned long flags = 0;
  663. u32 timeout = mmio_read->reg_read_to;
  664. might_sleep();
  665. if (timeout == 0)
  666. timeout = ENA_REG_READ_TIMEOUT;
  667. /* If readless is disabled, perform regular read */
  668. if (!mmio_read->readless_supported)
  669. return readl(ena_dev->reg_bar + offset);
  670. spin_lock_irqsave(&mmio_read->lock, flags);
  671. mmio_read->seq_num++;
  672. read_resp->req_id = mmio_read->seq_num + 0xDEAD;
  673. mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
  674. ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
  675. mmio_read_reg |= mmio_read->seq_num &
  676. ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
  677. writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
  678. for (i = 0; i < timeout; i++) {
  679. if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
  680. break;
  681. udelay(1);
  682. }
  683. if (unlikely(i == timeout)) {
  684. pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
  685. mmio_read->seq_num, offset, read_resp->req_id,
  686. read_resp->reg_off);
  687. ret = ENA_MMIO_READ_TIMEOUT;
  688. goto err;
  689. }
  690. if (read_resp->reg_off != offset) {
  691. pr_err("Read failure: wrong offset provided\n");
  692. ret = ENA_MMIO_READ_TIMEOUT;
  693. } else {
  694. ret = read_resp->reg_val;
  695. }
  696. err:
  697. spin_unlock_irqrestore(&mmio_read->lock, flags);
  698. return ret;
  699. }
  700. /* There are two types to wait for completion.
  701. * Polling mode - wait until the completion is available.
  702. * Async mode - wait on wait queue until the completion is ready
  703. * (or the timeout expired).
  704. * It is expected that the IRQ called ena_com_handle_admin_completion
  705. * to mark the completions.
  706. */
  707. static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
  708. struct ena_com_admin_queue *admin_queue)
  709. {
  710. if (admin_queue->polling)
  711. return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
  712. admin_queue);
  713. return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
  714. admin_queue);
  715. }
  716. static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
  717. struct ena_com_io_sq *io_sq)
  718. {
  719. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  720. struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
  721. struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
  722. u8 direction;
  723. int ret;
  724. memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
  725. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  726. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  727. else
  728. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  729. destroy_cmd.sq.sq_identity |= (direction <<
  730. ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
  731. ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
  732. destroy_cmd.sq.sq_idx = io_sq->idx;
  733. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
  734. ret = ena_com_execute_admin_command(admin_queue,
  735. (struct ena_admin_aq_entry *)&destroy_cmd,
  736. sizeof(destroy_cmd),
  737. (struct ena_admin_acq_entry *)&destroy_resp,
  738. sizeof(destroy_resp));
  739. if (unlikely(ret && (ret != -ENODEV)))
  740. pr_err("failed to destroy io sq error: %d\n", ret);
  741. return ret;
  742. }
  743. static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
  744. struct ena_com_io_sq *io_sq,
  745. struct ena_com_io_cq *io_cq)
  746. {
  747. size_t size;
  748. if (io_cq->cdesc_addr.virt_addr) {
  749. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  750. dma_free_coherent(ena_dev->dmadev, size,
  751. io_cq->cdesc_addr.virt_addr,
  752. io_cq->cdesc_addr.phys_addr);
  753. io_cq->cdesc_addr.virt_addr = NULL;
  754. }
  755. if (io_sq->desc_addr.virt_addr) {
  756. size = io_sq->desc_entry_size * io_sq->q_depth;
  757. dma_free_coherent(ena_dev->dmadev, size,
  758. io_sq->desc_addr.virt_addr,
  759. io_sq->desc_addr.phys_addr);
  760. io_sq->desc_addr.virt_addr = NULL;
  761. }
  762. if (io_sq->bounce_buf_ctrl.base_buffer) {
  763. devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
  764. io_sq->bounce_buf_ctrl.base_buffer = NULL;
  765. }
  766. }
  767. static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
  768. u16 exp_state)
  769. {
  770. u32 val, i;
  771. /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
  772. timeout = (timeout * 100) / ENA_POLL_MS;
  773. for (i = 0; i < timeout; i++) {
  774. val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  775. if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
  776. pr_err("Reg read timeout occurred\n");
  777. return -ETIME;
  778. }
  779. if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
  780. exp_state)
  781. return 0;
  782. msleep(ENA_POLL_MS);
  783. }
  784. return -ETIME;
  785. }
  786. static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
  787. enum ena_admin_aq_feature_id feature_id)
  788. {
  789. u32 feature_mask = 1 << feature_id;
  790. /* Device attributes is always supported */
  791. if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
  792. !(ena_dev->supported_features & feature_mask))
  793. return false;
  794. return true;
  795. }
  796. static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
  797. struct ena_admin_get_feat_resp *get_resp,
  798. enum ena_admin_aq_feature_id feature_id,
  799. dma_addr_t control_buf_dma_addr,
  800. u32 control_buff_size,
  801. u8 feature_ver)
  802. {
  803. struct ena_com_admin_queue *admin_queue;
  804. struct ena_admin_get_feat_cmd get_cmd;
  805. int ret;
  806. if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
  807. pr_debug("Feature %d isn't supported\n", feature_id);
  808. return -EOPNOTSUPP;
  809. }
  810. memset(&get_cmd, 0x0, sizeof(get_cmd));
  811. admin_queue = &ena_dev->admin_queue;
  812. get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
  813. if (control_buff_size)
  814. get_cmd.aq_common_descriptor.flags =
  815. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  816. else
  817. get_cmd.aq_common_descriptor.flags = 0;
  818. ret = ena_com_mem_addr_set(ena_dev,
  819. &get_cmd.control_buffer.address,
  820. control_buf_dma_addr);
  821. if (unlikely(ret)) {
  822. pr_err("memory address set failed\n");
  823. return ret;
  824. }
  825. get_cmd.control_buffer.length = control_buff_size;
  826. get_cmd.feat_common.feature_version = feature_ver;
  827. get_cmd.feat_common.feature_id = feature_id;
  828. ret = ena_com_execute_admin_command(admin_queue,
  829. (struct ena_admin_aq_entry *)
  830. &get_cmd,
  831. sizeof(get_cmd),
  832. (struct ena_admin_acq_entry *)
  833. get_resp,
  834. sizeof(*get_resp));
  835. if (unlikely(ret))
  836. pr_err("Failed to submit get_feature command %d error: %d\n",
  837. feature_id, ret);
  838. return ret;
  839. }
  840. static int ena_com_get_feature(struct ena_com_dev *ena_dev,
  841. struct ena_admin_get_feat_resp *get_resp,
  842. enum ena_admin_aq_feature_id feature_id,
  843. u8 feature_ver)
  844. {
  845. return ena_com_get_feature_ex(ena_dev,
  846. get_resp,
  847. feature_id,
  848. 0,
  849. 0,
  850. feature_ver);
  851. }
  852. static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
  853. {
  854. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  855. (ena_dev->rss).hash_key;
  856. netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
  857. /* The key is stored in the device in u32 array
  858. * as well as the API requires the key to be passed in this
  859. * format. Thus the size of our array should be divided by 4
  860. */
  861. hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
  862. }
  863. int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
  864. {
  865. return ena_dev->rss.hash_func;
  866. }
  867. static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
  868. {
  869. struct ena_rss *rss = &ena_dev->rss;
  870. rss->hash_key =
  871. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  872. &rss->hash_key_dma_addr, GFP_KERNEL);
  873. if (unlikely(!rss->hash_key))
  874. return -ENOMEM;
  875. return 0;
  876. }
  877. static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
  878. {
  879. struct ena_rss *rss = &ena_dev->rss;
  880. if (rss->hash_key)
  881. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  882. rss->hash_key, rss->hash_key_dma_addr);
  883. rss->hash_key = NULL;
  884. }
  885. static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
  886. {
  887. struct ena_rss *rss = &ena_dev->rss;
  888. rss->hash_ctrl =
  889. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  890. &rss->hash_ctrl_dma_addr, GFP_KERNEL);
  891. if (unlikely(!rss->hash_ctrl))
  892. return -ENOMEM;
  893. return 0;
  894. }
  895. static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
  896. {
  897. struct ena_rss *rss = &ena_dev->rss;
  898. if (rss->hash_ctrl)
  899. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  900. rss->hash_ctrl, rss->hash_ctrl_dma_addr);
  901. rss->hash_ctrl = NULL;
  902. }
  903. static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
  904. u16 log_size)
  905. {
  906. struct ena_rss *rss = &ena_dev->rss;
  907. struct ena_admin_get_feat_resp get_resp;
  908. size_t tbl_size;
  909. int ret;
  910. ret = ena_com_get_feature(ena_dev, &get_resp,
  911. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
  912. if (unlikely(ret))
  913. return ret;
  914. if ((get_resp.u.ind_table.min_size > log_size) ||
  915. (get_resp.u.ind_table.max_size < log_size)) {
  916. pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
  917. 1 << log_size, 1 << get_resp.u.ind_table.min_size,
  918. 1 << get_resp.u.ind_table.max_size);
  919. return -EINVAL;
  920. }
  921. tbl_size = (1ULL << log_size) *
  922. sizeof(struct ena_admin_rss_ind_table_entry);
  923. rss->rss_ind_tbl =
  924. dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
  925. &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
  926. if (unlikely(!rss->rss_ind_tbl))
  927. goto mem_err1;
  928. tbl_size = (1ULL << log_size) * sizeof(u16);
  929. rss->host_rss_ind_tbl =
  930. devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
  931. if (unlikely(!rss->host_rss_ind_tbl))
  932. goto mem_err2;
  933. rss->tbl_log_size = log_size;
  934. return 0;
  935. mem_err2:
  936. tbl_size = (1ULL << log_size) *
  937. sizeof(struct ena_admin_rss_ind_table_entry);
  938. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  939. rss->rss_ind_tbl_dma_addr);
  940. rss->rss_ind_tbl = NULL;
  941. mem_err1:
  942. rss->tbl_log_size = 0;
  943. return -ENOMEM;
  944. }
  945. static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
  946. {
  947. struct ena_rss *rss = &ena_dev->rss;
  948. size_t tbl_size = (1ULL << rss->tbl_log_size) *
  949. sizeof(struct ena_admin_rss_ind_table_entry);
  950. if (rss->rss_ind_tbl)
  951. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  952. rss->rss_ind_tbl_dma_addr);
  953. rss->rss_ind_tbl = NULL;
  954. if (rss->host_rss_ind_tbl)
  955. devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
  956. rss->host_rss_ind_tbl = NULL;
  957. }
  958. static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
  959. struct ena_com_io_sq *io_sq, u16 cq_idx)
  960. {
  961. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  962. struct ena_admin_aq_create_sq_cmd create_cmd;
  963. struct ena_admin_acq_create_sq_resp_desc cmd_completion;
  964. u8 direction;
  965. int ret;
  966. memset(&create_cmd, 0x0, sizeof(create_cmd));
  967. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
  968. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  969. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  970. else
  971. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  972. create_cmd.sq_identity |= (direction <<
  973. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
  974. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
  975. create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
  976. ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
  977. create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
  978. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
  979. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
  980. create_cmd.sq_caps_3 |=
  981. ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
  982. create_cmd.cq_idx = cq_idx;
  983. create_cmd.sq_depth = io_sq->q_depth;
  984. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  985. ret = ena_com_mem_addr_set(ena_dev,
  986. &create_cmd.sq_ba,
  987. io_sq->desc_addr.phys_addr);
  988. if (unlikely(ret)) {
  989. pr_err("memory address set failed\n");
  990. return ret;
  991. }
  992. }
  993. ret = ena_com_execute_admin_command(admin_queue,
  994. (struct ena_admin_aq_entry *)&create_cmd,
  995. sizeof(create_cmd),
  996. (struct ena_admin_acq_entry *)&cmd_completion,
  997. sizeof(cmd_completion));
  998. if (unlikely(ret)) {
  999. pr_err("Failed to create IO SQ. error: %d\n", ret);
  1000. return ret;
  1001. }
  1002. io_sq->idx = cmd_completion.sq_idx;
  1003. io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1004. (uintptr_t)cmd_completion.sq_doorbell_offset);
  1005. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  1006. io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
  1007. + cmd_completion.llq_headers_offset);
  1008. io_sq->desc_addr.pbuf_dev_addr =
  1009. (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
  1010. cmd_completion.llq_descriptors_offset);
  1011. }
  1012. pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
  1013. return ret;
  1014. }
  1015. static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
  1016. {
  1017. struct ena_rss *rss = &ena_dev->rss;
  1018. struct ena_com_io_sq *io_sq;
  1019. u16 qid;
  1020. int i;
  1021. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  1022. qid = rss->host_rss_ind_tbl[i];
  1023. if (qid >= ENA_TOTAL_NUM_QUEUES)
  1024. return -EINVAL;
  1025. io_sq = &ena_dev->io_sq_queues[qid];
  1026. if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
  1027. return -EINVAL;
  1028. rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
  1029. }
  1030. return 0;
  1031. }
  1032. static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
  1033. {
  1034. u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
  1035. struct ena_rss *rss = &ena_dev->rss;
  1036. u8 idx;
  1037. u16 i;
  1038. for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
  1039. dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
  1040. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  1041. if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
  1042. return -EINVAL;
  1043. idx = (u8)rss->rss_ind_tbl[i].cq_idx;
  1044. if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
  1045. return -EINVAL;
  1046. rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
  1047. }
  1048. return 0;
  1049. }
  1050. static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
  1051. u16 intr_delay_resolution)
  1052. {
  1053. /* Initial value of intr_delay_resolution might be 0 */
  1054. u16 prev_intr_delay_resolution =
  1055. ena_dev->intr_delay_resolution ?
  1056. ena_dev->intr_delay_resolution :
  1057. ENA_DEFAULT_INTR_DELAY_RESOLUTION;
  1058. if (!intr_delay_resolution) {
  1059. pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
  1060. intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
  1061. }
  1062. /* update Rx */
  1063. ena_dev->intr_moder_rx_interval =
  1064. ena_dev->intr_moder_rx_interval *
  1065. prev_intr_delay_resolution /
  1066. intr_delay_resolution;
  1067. /* update Tx */
  1068. ena_dev->intr_moder_tx_interval =
  1069. ena_dev->intr_moder_tx_interval *
  1070. prev_intr_delay_resolution /
  1071. intr_delay_resolution;
  1072. ena_dev->intr_delay_resolution = intr_delay_resolution;
  1073. }
  1074. /*****************************************************************************/
  1075. /******************************* API ******************************/
  1076. /*****************************************************************************/
  1077. int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
  1078. struct ena_admin_aq_entry *cmd,
  1079. size_t cmd_size,
  1080. struct ena_admin_acq_entry *comp,
  1081. size_t comp_size)
  1082. {
  1083. struct ena_comp_ctx *comp_ctx;
  1084. int ret;
  1085. comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
  1086. comp, comp_size);
  1087. if (IS_ERR(comp_ctx)) {
  1088. if (comp_ctx == ERR_PTR(-ENODEV))
  1089. pr_debug("Failed to submit command [%ld]\n",
  1090. PTR_ERR(comp_ctx));
  1091. else
  1092. pr_err("Failed to submit command [%ld]\n",
  1093. PTR_ERR(comp_ctx));
  1094. return PTR_ERR(comp_ctx);
  1095. }
  1096. ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
  1097. if (unlikely(ret)) {
  1098. if (admin_queue->running_state)
  1099. pr_err("Failed to process command. ret = %d\n", ret);
  1100. else
  1101. pr_debug("Failed to process command. ret = %d\n", ret);
  1102. }
  1103. return ret;
  1104. }
  1105. int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
  1106. struct ena_com_io_cq *io_cq)
  1107. {
  1108. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1109. struct ena_admin_aq_create_cq_cmd create_cmd;
  1110. struct ena_admin_acq_create_cq_resp_desc cmd_completion;
  1111. int ret;
  1112. memset(&create_cmd, 0x0, sizeof(create_cmd));
  1113. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
  1114. create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
  1115. ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
  1116. create_cmd.cq_caps_1 |=
  1117. ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
  1118. create_cmd.msix_vector = io_cq->msix_vector;
  1119. create_cmd.cq_depth = io_cq->q_depth;
  1120. ret = ena_com_mem_addr_set(ena_dev,
  1121. &create_cmd.cq_ba,
  1122. io_cq->cdesc_addr.phys_addr);
  1123. if (unlikely(ret)) {
  1124. pr_err("memory address set failed\n");
  1125. return ret;
  1126. }
  1127. ret = ena_com_execute_admin_command(admin_queue,
  1128. (struct ena_admin_aq_entry *)&create_cmd,
  1129. sizeof(create_cmd),
  1130. (struct ena_admin_acq_entry *)&cmd_completion,
  1131. sizeof(cmd_completion));
  1132. if (unlikely(ret)) {
  1133. pr_err("Failed to create IO CQ. error: %d\n", ret);
  1134. return ret;
  1135. }
  1136. io_cq->idx = cmd_completion.cq_idx;
  1137. io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1138. cmd_completion.cq_interrupt_unmask_register_offset);
  1139. if (cmd_completion.cq_head_db_register_offset)
  1140. io_cq->cq_head_db_reg =
  1141. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1142. cmd_completion.cq_head_db_register_offset);
  1143. if (cmd_completion.numa_node_register_offset)
  1144. io_cq->numa_node_cfg_reg =
  1145. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1146. cmd_completion.numa_node_register_offset);
  1147. pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
  1148. return ret;
  1149. }
  1150. int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
  1151. struct ena_com_io_sq **io_sq,
  1152. struct ena_com_io_cq **io_cq)
  1153. {
  1154. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  1155. pr_err("Invalid queue number %d but the max is %d\n", qid,
  1156. ENA_TOTAL_NUM_QUEUES);
  1157. return -EINVAL;
  1158. }
  1159. *io_sq = &ena_dev->io_sq_queues[qid];
  1160. *io_cq = &ena_dev->io_cq_queues[qid];
  1161. return 0;
  1162. }
  1163. void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
  1164. {
  1165. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1166. struct ena_comp_ctx *comp_ctx;
  1167. u16 i;
  1168. if (!admin_queue->comp_ctx)
  1169. return;
  1170. for (i = 0; i < admin_queue->q_depth; i++) {
  1171. comp_ctx = get_comp_ctxt(admin_queue, i, false);
  1172. if (unlikely(!comp_ctx))
  1173. break;
  1174. comp_ctx->status = ENA_CMD_ABORTED;
  1175. complete(&comp_ctx->wait_event);
  1176. }
  1177. }
  1178. void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
  1179. {
  1180. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1181. unsigned long flags = 0;
  1182. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1183. while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
  1184. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1185. msleep(ENA_POLL_MS);
  1186. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1187. }
  1188. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1189. }
  1190. int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
  1191. struct ena_com_io_cq *io_cq)
  1192. {
  1193. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1194. struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
  1195. struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
  1196. int ret;
  1197. memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
  1198. destroy_cmd.cq_idx = io_cq->idx;
  1199. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
  1200. ret = ena_com_execute_admin_command(admin_queue,
  1201. (struct ena_admin_aq_entry *)&destroy_cmd,
  1202. sizeof(destroy_cmd),
  1203. (struct ena_admin_acq_entry *)&destroy_resp,
  1204. sizeof(destroy_resp));
  1205. if (unlikely(ret && (ret != -ENODEV)))
  1206. pr_err("Failed to destroy IO CQ. error: %d\n", ret);
  1207. return ret;
  1208. }
  1209. bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
  1210. {
  1211. return ena_dev->admin_queue.running_state;
  1212. }
  1213. void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
  1214. {
  1215. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1216. unsigned long flags = 0;
  1217. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1218. ena_dev->admin_queue.running_state = state;
  1219. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1220. }
  1221. void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
  1222. {
  1223. u16 depth = ena_dev->aenq.q_depth;
  1224. WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
  1225. /* Init head_db to mark that all entries in the queue
  1226. * are initially available
  1227. */
  1228. writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1229. }
  1230. int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
  1231. {
  1232. struct ena_com_admin_queue *admin_queue;
  1233. struct ena_admin_set_feat_cmd cmd;
  1234. struct ena_admin_set_feat_resp resp;
  1235. struct ena_admin_get_feat_resp get_resp;
  1236. int ret;
  1237. ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
  1238. if (ret) {
  1239. pr_info("Can't get aenq configuration\n");
  1240. return ret;
  1241. }
  1242. if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
  1243. pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
  1244. get_resp.u.aenq.supported_groups, groups_flag);
  1245. return -EOPNOTSUPP;
  1246. }
  1247. memset(&cmd, 0x0, sizeof(cmd));
  1248. admin_queue = &ena_dev->admin_queue;
  1249. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1250. cmd.aq_common_descriptor.flags = 0;
  1251. cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
  1252. cmd.u.aenq.enabled_groups = groups_flag;
  1253. ret = ena_com_execute_admin_command(admin_queue,
  1254. (struct ena_admin_aq_entry *)&cmd,
  1255. sizeof(cmd),
  1256. (struct ena_admin_acq_entry *)&resp,
  1257. sizeof(resp));
  1258. if (unlikely(ret))
  1259. pr_err("Failed to config AENQ ret: %d\n", ret);
  1260. return ret;
  1261. }
  1262. int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
  1263. {
  1264. u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1265. int width;
  1266. if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
  1267. pr_err("Reg read timeout occurred\n");
  1268. return -ETIME;
  1269. }
  1270. width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
  1271. ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
  1272. pr_debug("ENA dma width: %d\n", width);
  1273. if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
  1274. pr_err("DMA width illegal value: %d\n", width);
  1275. return -EINVAL;
  1276. }
  1277. ena_dev->dma_addr_bits = width;
  1278. return width;
  1279. }
  1280. int ena_com_validate_version(struct ena_com_dev *ena_dev)
  1281. {
  1282. u32 ver;
  1283. u32 ctrl_ver;
  1284. u32 ctrl_ver_masked;
  1285. /* Make sure the ENA version and the controller version are at least
  1286. * as the driver expects
  1287. */
  1288. ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
  1289. ctrl_ver = ena_com_reg_bar_read32(ena_dev,
  1290. ENA_REGS_CONTROLLER_VERSION_OFF);
  1291. if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
  1292. (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
  1293. pr_err("Reg read timeout occurred\n");
  1294. return -ETIME;
  1295. }
  1296. pr_info("ena device version: %d.%d\n",
  1297. (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
  1298. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
  1299. ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
  1300. pr_info("ena controller version: %d.%d.%d implementation version %d\n",
  1301. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
  1302. ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
  1303. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
  1304. ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
  1305. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
  1306. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
  1307. ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
  1308. ctrl_ver_masked =
  1309. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
  1310. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
  1311. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
  1312. /* Validate the ctrl version without the implementation ID */
  1313. if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
  1314. pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
  1315. return -1;
  1316. }
  1317. return 0;
  1318. }
  1319. void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
  1320. {
  1321. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1322. struct ena_com_admin_cq *cq = &admin_queue->cq;
  1323. struct ena_com_admin_sq *sq = &admin_queue->sq;
  1324. struct ena_com_aenq *aenq = &ena_dev->aenq;
  1325. u16 size;
  1326. if (admin_queue->comp_ctx)
  1327. devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
  1328. admin_queue->comp_ctx = NULL;
  1329. size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  1330. if (sq->entries)
  1331. dma_free_coherent(ena_dev->dmadev, size, sq->entries,
  1332. sq->dma_addr);
  1333. sq->entries = NULL;
  1334. size = ADMIN_CQ_SIZE(admin_queue->q_depth);
  1335. if (cq->entries)
  1336. dma_free_coherent(ena_dev->dmadev, size, cq->entries,
  1337. cq->dma_addr);
  1338. cq->entries = NULL;
  1339. size = ADMIN_AENQ_SIZE(aenq->q_depth);
  1340. if (ena_dev->aenq.entries)
  1341. dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
  1342. aenq->dma_addr);
  1343. aenq->entries = NULL;
  1344. }
  1345. void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
  1346. {
  1347. u32 mask_value = 0;
  1348. if (polling)
  1349. mask_value = ENA_REGS_ADMIN_INTR_MASK;
  1350. writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
  1351. ena_dev->admin_queue.polling = polling;
  1352. }
  1353. void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
  1354. bool polling)
  1355. {
  1356. ena_dev->admin_queue.auto_polling = polling;
  1357. }
  1358. int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
  1359. {
  1360. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1361. spin_lock_init(&mmio_read->lock);
  1362. mmio_read->read_resp =
  1363. dma_zalloc_coherent(ena_dev->dmadev,
  1364. sizeof(*mmio_read->read_resp),
  1365. &mmio_read->read_resp_dma_addr, GFP_KERNEL);
  1366. if (unlikely(!mmio_read->read_resp))
  1367. goto err;
  1368. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1369. mmio_read->read_resp->req_id = 0x0;
  1370. mmio_read->seq_num = 0x0;
  1371. mmio_read->readless_supported = true;
  1372. return 0;
  1373. err:
  1374. return -ENOMEM;
  1375. }
  1376. void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
  1377. {
  1378. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1379. mmio_read->readless_supported = readless_supported;
  1380. }
  1381. void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
  1382. {
  1383. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1384. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1385. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1386. dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
  1387. mmio_read->read_resp, mmio_read->read_resp_dma_addr);
  1388. mmio_read->read_resp = NULL;
  1389. }
  1390. void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
  1391. {
  1392. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1393. u32 addr_low, addr_high;
  1394. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
  1395. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
  1396. writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1397. writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1398. }
  1399. int ena_com_admin_init(struct ena_com_dev *ena_dev,
  1400. struct ena_aenq_handlers *aenq_handlers)
  1401. {
  1402. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1403. u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
  1404. int ret;
  1405. dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1406. if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
  1407. pr_err("Reg read timeout occurred\n");
  1408. return -ETIME;
  1409. }
  1410. if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
  1411. pr_err("Device isn't ready, abort com init\n");
  1412. return -ENODEV;
  1413. }
  1414. admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
  1415. admin_queue->q_dmadev = ena_dev->dmadev;
  1416. admin_queue->polling = false;
  1417. admin_queue->curr_cmd_id = 0;
  1418. atomic_set(&admin_queue->outstanding_cmds, 0);
  1419. spin_lock_init(&admin_queue->q_lock);
  1420. ret = ena_com_init_comp_ctxt(admin_queue);
  1421. if (ret)
  1422. goto error;
  1423. ret = ena_com_admin_init_sq(admin_queue);
  1424. if (ret)
  1425. goto error;
  1426. ret = ena_com_admin_init_cq(admin_queue);
  1427. if (ret)
  1428. goto error;
  1429. admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1430. ENA_REGS_AQ_DB_OFF);
  1431. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
  1432. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
  1433. writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
  1434. writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
  1435. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
  1436. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
  1437. writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
  1438. writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
  1439. aq_caps = 0;
  1440. aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
  1441. aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
  1442. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
  1443. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
  1444. acq_caps = 0;
  1445. acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
  1446. acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
  1447. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
  1448. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
  1449. writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
  1450. writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
  1451. ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
  1452. if (ret)
  1453. goto error;
  1454. admin_queue->running_state = true;
  1455. return 0;
  1456. error:
  1457. ena_com_admin_destroy(ena_dev);
  1458. return ret;
  1459. }
  1460. int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
  1461. struct ena_com_create_io_ctx *ctx)
  1462. {
  1463. struct ena_com_io_sq *io_sq;
  1464. struct ena_com_io_cq *io_cq;
  1465. int ret;
  1466. if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
  1467. pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
  1468. ctx->qid, ENA_TOTAL_NUM_QUEUES);
  1469. return -EINVAL;
  1470. }
  1471. io_sq = &ena_dev->io_sq_queues[ctx->qid];
  1472. io_cq = &ena_dev->io_cq_queues[ctx->qid];
  1473. memset(io_sq, 0x0, sizeof(*io_sq));
  1474. memset(io_cq, 0x0, sizeof(*io_cq));
  1475. /* Init CQ */
  1476. io_cq->q_depth = ctx->queue_size;
  1477. io_cq->direction = ctx->direction;
  1478. io_cq->qid = ctx->qid;
  1479. io_cq->msix_vector = ctx->msix_vector;
  1480. io_sq->q_depth = ctx->queue_size;
  1481. io_sq->direction = ctx->direction;
  1482. io_sq->qid = ctx->qid;
  1483. io_sq->mem_queue_type = ctx->mem_queue_type;
  1484. if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  1485. /* header length is limited to 8 bits */
  1486. io_sq->tx_max_header_size =
  1487. min_t(u32, ena_dev->tx_max_header_size, SZ_256);
  1488. ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
  1489. if (ret)
  1490. goto error;
  1491. ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
  1492. if (ret)
  1493. goto error;
  1494. ret = ena_com_create_io_cq(ena_dev, io_cq);
  1495. if (ret)
  1496. goto error;
  1497. ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
  1498. if (ret)
  1499. goto destroy_io_cq;
  1500. return 0;
  1501. destroy_io_cq:
  1502. ena_com_destroy_io_cq(ena_dev, io_cq);
  1503. error:
  1504. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1505. return ret;
  1506. }
  1507. void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
  1508. {
  1509. struct ena_com_io_sq *io_sq;
  1510. struct ena_com_io_cq *io_cq;
  1511. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  1512. pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
  1513. ENA_TOTAL_NUM_QUEUES);
  1514. return;
  1515. }
  1516. io_sq = &ena_dev->io_sq_queues[qid];
  1517. io_cq = &ena_dev->io_cq_queues[qid];
  1518. ena_com_destroy_io_sq(ena_dev, io_sq);
  1519. ena_com_destroy_io_cq(ena_dev, io_cq);
  1520. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1521. }
  1522. int ena_com_get_link_params(struct ena_com_dev *ena_dev,
  1523. struct ena_admin_get_feat_resp *resp)
  1524. {
  1525. return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
  1526. }
  1527. int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
  1528. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  1529. {
  1530. struct ena_admin_get_feat_resp get_resp;
  1531. int rc;
  1532. rc = ena_com_get_feature(ena_dev, &get_resp,
  1533. ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
  1534. if (rc)
  1535. return rc;
  1536. memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
  1537. sizeof(get_resp.u.dev_attr));
  1538. ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
  1539. if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
  1540. rc = ena_com_get_feature(ena_dev, &get_resp,
  1541. ENA_ADMIN_MAX_QUEUES_EXT,
  1542. ENA_FEATURE_MAX_QUEUE_EXT_VER);
  1543. if (rc)
  1544. return rc;
  1545. if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
  1546. return -EINVAL;
  1547. memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
  1548. sizeof(get_resp.u.max_queue_ext));
  1549. ena_dev->tx_max_header_size =
  1550. get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
  1551. } else {
  1552. rc = ena_com_get_feature(ena_dev, &get_resp,
  1553. ENA_ADMIN_MAX_QUEUES_NUM, 0);
  1554. memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
  1555. sizeof(get_resp.u.max_queue));
  1556. ena_dev->tx_max_header_size =
  1557. get_resp.u.max_queue.max_header_size;
  1558. if (rc)
  1559. return rc;
  1560. }
  1561. rc = ena_com_get_feature(ena_dev, &get_resp,
  1562. ENA_ADMIN_AENQ_CONFIG, 0);
  1563. if (rc)
  1564. return rc;
  1565. memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
  1566. sizeof(get_resp.u.aenq));
  1567. rc = ena_com_get_feature(ena_dev, &get_resp,
  1568. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
  1569. if (rc)
  1570. return rc;
  1571. memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
  1572. sizeof(get_resp.u.offload));
  1573. /* Driver hints isn't mandatory admin command. So in case the
  1574. * command isn't supported set driver hints to 0
  1575. */
  1576. rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
  1577. if (!rc)
  1578. memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
  1579. sizeof(get_resp.u.hw_hints));
  1580. else if (rc == -EOPNOTSUPP)
  1581. memset(&get_feat_ctx->hw_hints, 0x0,
  1582. sizeof(get_feat_ctx->hw_hints));
  1583. else
  1584. return rc;
  1585. rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
  1586. if (!rc)
  1587. memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
  1588. sizeof(get_resp.u.llq));
  1589. else if (rc == -EOPNOTSUPP)
  1590. memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
  1591. else
  1592. return rc;
  1593. return 0;
  1594. }
  1595. void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
  1596. {
  1597. ena_com_handle_admin_completion(&ena_dev->admin_queue);
  1598. }
  1599. /* ena_handle_specific_aenq_event:
  1600. * return the handler that is relevant to the specific event group
  1601. */
  1602. static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
  1603. u16 group)
  1604. {
  1605. struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
  1606. if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
  1607. return aenq_handlers->handlers[group];
  1608. return aenq_handlers->unimplemented_handler;
  1609. }
  1610. /* ena_aenq_intr_handler:
  1611. * handles the aenq incoming events.
  1612. * pop events from the queue and apply the specific handler
  1613. */
  1614. void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
  1615. {
  1616. struct ena_admin_aenq_entry *aenq_e;
  1617. struct ena_admin_aenq_common_desc *aenq_common;
  1618. struct ena_com_aenq *aenq = &dev->aenq;
  1619. unsigned long long timestamp;
  1620. ena_aenq_handler handler_cb;
  1621. u16 masked_head, processed = 0;
  1622. u8 phase;
  1623. masked_head = aenq->head & (aenq->q_depth - 1);
  1624. phase = aenq->phase;
  1625. aenq_e = &aenq->entries[masked_head]; /* Get first entry */
  1626. aenq_common = &aenq_e->aenq_common_desc;
  1627. /* Go over all the events */
  1628. while ((READ_ONCE(aenq_common->flags) &
  1629. ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
  1630. /* Make sure the phase bit (ownership) is as expected before
  1631. * reading the rest of the descriptor.
  1632. */
  1633. dma_rmb();
  1634. timestamp =
  1635. (unsigned long long)aenq_common->timestamp_low |
  1636. ((unsigned long long)aenq_common->timestamp_high << 32);
  1637. pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
  1638. aenq_common->group, aenq_common->syndrom, timestamp);
  1639. /* Handle specific event*/
  1640. handler_cb = ena_com_get_specific_aenq_cb(dev,
  1641. aenq_common->group);
  1642. handler_cb(data, aenq_e); /* call the actual event handler*/
  1643. /* Get next event entry */
  1644. masked_head++;
  1645. processed++;
  1646. if (unlikely(masked_head == aenq->q_depth)) {
  1647. masked_head = 0;
  1648. phase = !phase;
  1649. }
  1650. aenq_e = &aenq->entries[masked_head];
  1651. aenq_common = &aenq_e->aenq_common_desc;
  1652. }
  1653. aenq->head += processed;
  1654. aenq->phase = phase;
  1655. /* Don't update aenq doorbell if there weren't any processed events */
  1656. if (!processed)
  1657. return;
  1658. /* write the aenq doorbell after all AENQ descriptors were read */
  1659. mb();
  1660. writel_relaxed((u32)aenq->head,
  1661. dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1662. mmiowb();
  1663. }
  1664. int ena_com_dev_reset(struct ena_com_dev *ena_dev,
  1665. enum ena_regs_reset_reason_types reset_reason)
  1666. {
  1667. u32 stat, timeout, cap, reset_val;
  1668. int rc;
  1669. stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1670. cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1671. if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
  1672. (cap == ENA_MMIO_READ_TIMEOUT))) {
  1673. pr_err("Reg read32 timeout occurred\n");
  1674. return -ETIME;
  1675. }
  1676. if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
  1677. pr_err("Device isn't ready, can't reset device\n");
  1678. return -EINVAL;
  1679. }
  1680. timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
  1681. ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
  1682. if (timeout == 0) {
  1683. pr_err("Invalid timeout value\n");
  1684. return -EINVAL;
  1685. }
  1686. /* start reset */
  1687. reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
  1688. reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
  1689. ENA_REGS_DEV_CTL_RESET_REASON_MASK;
  1690. writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1691. /* Write again the MMIO read request address */
  1692. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1693. rc = wait_for_reset_state(ena_dev, timeout,
  1694. ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
  1695. if (rc != 0) {
  1696. pr_err("Reset indication didn't turn on\n");
  1697. return rc;
  1698. }
  1699. /* reset done */
  1700. writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1701. rc = wait_for_reset_state(ena_dev, timeout, 0);
  1702. if (rc != 0) {
  1703. pr_err("Reset indication didn't turn off\n");
  1704. return rc;
  1705. }
  1706. timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
  1707. ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
  1708. if (timeout)
  1709. /* the resolution of timeout reg is 100ms */
  1710. ena_dev->admin_queue.completion_timeout = timeout * 100000;
  1711. else
  1712. ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
  1713. return 0;
  1714. }
  1715. static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
  1716. struct ena_com_stats_ctx *ctx,
  1717. enum ena_admin_get_stats_type type)
  1718. {
  1719. struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
  1720. struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
  1721. struct ena_com_admin_queue *admin_queue;
  1722. int ret;
  1723. admin_queue = &ena_dev->admin_queue;
  1724. get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
  1725. get_cmd->aq_common_descriptor.flags = 0;
  1726. get_cmd->type = type;
  1727. ret = ena_com_execute_admin_command(admin_queue,
  1728. (struct ena_admin_aq_entry *)get_cmd,
  1729. sizeof(*get_cmd),
  1730. (struct ena_admin_acq_entry *)get_resp,
  1731. sizeof(*get_resp));
  1732. if (unlikely(ret))
  1733. pr_err("Failed to get stats. error: %d\n", ret);
  1734. return ret;
  1735. }
  1736. int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
  1737. struct ena_admin_basic_stats *stats)
  1738. {
  1739. struct ena_com_stats_ctx ctx;
  1740. int ret;
  1741. memset(&ctx, 0x0, sizeof(ctx));
  1742. ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
  1743. if (likely(ret == 0))
  1744. memcpy(stats, &ctx.get_resp.basic_stats,
  1745. sizeof(ctx.get_resp.basic_stats));
  1746. return ret;
  1747. }
  1748. int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
  1749. {
  1750. struct ena_com_admin_queue *admin_queue;
  1751. struct ena_admin_set_feat_cmd cmd;
  1752. struct ena_admin_set_feat_resp resp;
  1753. int ret;
  1754. if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
  1755. pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
  1756. return -EOPNOTSUPP;
  1757. }
  1758. memset(&cmd, 0x0, sizeof(cmd));
  1759. admin_queue = &ena_dev->admin_queue;
  1760. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1761. cmd.aq_common_descriptor.flags = 0;
  1762. cmd.feat_common.feature_id = ENA_ADMIN_MTU;
  1763. cmd.u.mtu.mtu = mtu;
  1764. ret = ena_com_execute_admin_command(admin_queue,
  1765. (struct ena_admin_aq_entry *)&cmd,
  1766. sizeof(cmd),
  1767. (struct ena_admin_acq_entry *)&resp,
  1768. sizeof(resp));
  1769. if (unlikely(ret))
  1770. pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
  1771. return ret;
  1772. }
  1773. int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
  1774. struct ena_admin_feature_offload_desc *offload)
  1775. {
  1776. int ret;
  1777. struct ena_admin_get_feat_resp resp;
  1778. ret = ena_com_get_feature(ena_dev, &resp,
  1779. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
  1780. if (unlikely(ret)) {
  1781. pr_err("Failed to get offload capabilities %d\n", ret);
  1782. return ret;
  1783. }
  1784. memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
  1785. return 0;
  1786. }
  1787. int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
  1788. {
  1789. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1790. struct ena_rss *rss = &ena_dev->rss;
  1791. struct ena_admin_set_feat_cmd cmd;
  1792. struct ena_admin_set_feat_resp resp;
  1793. struct ena_admin_get_feat_resp get_resp;
  1794. int ret;
  1795. if (!ena_com_check_supported_feature_id(ena_dev,
  1796. ENA_ADMIN_RSS_HASH_FUNCTION)) {
  1797. pr_debug("Feature %d isn't supported\n",
  1798. ENA_ADMIN_RSS_HASH_FUNCTION);
  1799. return -EOPNOTSUPP;
  1800. }
  1801. /* Validate hash function is supported */
  1802. ret = ena_com_get_feature(ena_dev, &get_resp,
  1803. ENA_ADMIN_RSS_HASH_FUNCTION, 0);
  1804. if (unlikely(ret))
  1805. return ret;
  1806. if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
  1807. pr_err("Func hash %d isn't supported by device, abort\n",
  1808. rss->hash_func);
  1809. return -EOPNOTSUPP;
  1810. }
  1811. memset(&cmd, 0x0, sizeof(cmd));
  1812. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1813. cmd.aq_common_descriptor.flags =
  1814. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1815. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
  1816. cmd.u.flow_hash_func.init_val = rss->hash_init_val;
  1817. cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
  1818. ret = ena_com_mem_addr_set(ena_dev,
  1819. &cmd.control_buffer.address,
  1820. rss->hash_key_dma_addr);
  1821. if (unlikely(ret)) {
  1822. pr_err("memory address set failed\n");
  1823. return ret;
  1824. }
  1825. cmd.control_buffer.length = sizeof(*rss->hash_key);
  1826. ret = ena_com_execute_admin_command(admin_queue,
  1827. (struct ena_admin_aq_entry *)&cmd,
  1828. sizeof(cmd),
  1829. (struct ena_admin_acq_entry *)&resp,
  1830. sizeof(resp));
  1831. if (unlikely(ret)) {
  1832. pr_err("Failed to set hash function %d. error: %d\n",
  1833. rss->hash_func, ret);
  1834. return -EINVAL;
  1835. }
  1836. return 0;
  1837. }
  1838. int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  1839. enum ena_admin_hash_functions func,
  1840. const u8 *key, u16 key_len, u32 init_val)
  1841. {
  1842. struct ena_rss *rss = &ena_dev->rss;
  1843. struct ena_admin_get_feat_resp get_resp;
  1844. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1845. rss->hash_key;
  1846. int rc;
  1847. /* Make sure size is a mult of DWs */
  1848. if (unlikely(key_len & 0x3))
  1849. return -EINVAL;
  1850. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1851. ENA_ADMIN_RSS_HASH_FUNCTION,
  1852. rss->hash_key_dma_addr,
  1853. sizeof(*rss->hash_key), 0);
  1854. if (unlikely(rc))
  1855. return rc;
  1856. if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
  1857. pr_err("Flow hash function %d isn't supported\n", func);
  1858. return -EOPNOTSUPP;
  1859. }
  1860. switch (func) {
  1861. case ENA_ADMIN_TOEPLITZ:
  1862. if (key) {
  1863. if (key_len != sizeof(hash_key->key)) {
  1864. pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
  1865. key_len, sizeof(hash_key->key));
  1866. return -EINVAL;
  1867. }
  1868. memcpy(hash_key->key, key, key_len);
  1869. rss->hash_init_val = init_val;
  1870. hash_key->keys_num = key_len >> 2;
  1871. }
  1872. break;
  1873. case ENA_ADMIN_CRC32:
  1874. rss->hash_init_val = init_val;
  1875. break;
  1876. default:
  1877. pr_err("Invalid hash function (%d)\n", func);
  1878. return -EINVAL;
  1879. }
  1880. rss->hash_func = func;
  1881. rc = ena_com_set_hash_function(ena_dev);
  1882. /* Restore the old function */
  1883. if (unlikely(rc))
  1884. ena_com_get_hash_function(ena_dev, NULL, NULL);
  1885. return rc;
  1886. }
  1887. int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
  1888. enum ena_admin_hash_functions *func,
  1889. u8 *key)
  1890. {
  1891. struct ena_rss *rss = &ena_dev->rss;
  1892. struct ena_admin_get_feat_resp get_resp;
  1893. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1894. rss->hash_key;
  1895. int rc;
  1896. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1897. ENA_ADMIN_RSS_HASH_FUNCTION,
  1898. rss->hash_key_dma_addr,
  1899. sizeof(*rss->hash_key), 0);
  1900. if (unlikely(rc))
  1901. return rc;
  1902. /* ffs() returns 1 in case the lsb is set */
  1903. rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
  1904. if (rss->hash_func)
  1905. rss->hash_func--;
  1906. if (func)
  1907. *func = rss->hash_func;
  1908. if (key)
  1909. memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
  1910. return 0;
  1911. }
  1912. int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
  1913. enum ena_admin_flow_hash_proto proto,
  1914. u16 *fields)
  1915. {
  1916. struct ena_rss *rss = &ena_dev->rss;
  1917. struct ena_admin_get_feat_resp get_resp;
  1918. int rc;
  1919. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1920. ENA_ADMIN_RSS_HASH_INPUT,
  1921. rss->hash_ctrl_dma_addr,
  1922. sizeof(*rss->hash_ctrl), 0);
  1923. if (unlikely(rc))
  1924. return rc;
  1925. if (fields)
  1926. *fields = rss->hash_ctrl->selected_fields[proto].fields;
  1927. return 0;
  1928. }
  1929. int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
  1930. {
  1931. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1932. struct ena_rss *rss = &ena_dev->rss;
  1933. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1934. struct ena_admin_set_feat_cmd cmd;
  1935. struct ena_admin_set_feat_resp resp;
  1936. int ret;
  1937. if (!ena_com_check_supported_feature_id(ena_dev,
  1938. ENA_ADMIN_RSS_HASH_INPUT)) {
  1939. pr_debug("Feature %d isn't supported\n",
  1940. ENA_ADMIN_RSS_HASH_INPUT);
  1941. return -EOPNOTSUPP;
  1942. }
  1943. memset(&cmd, 0x0, sizeof(cmd));
  1944. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1945. cmd.aq_common_descriptor.flags =
  1946. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1947. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
  1948. cmd.u.flow_hash_input.enabled_input_sort =
  1949. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
  1950. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
  1951. ret = ena_com_mem_addr_set(ena_dev,
  1952. &cmd.control_buffer.address,
  1953. rss->hash_ctrl_dma_addr);
  1954. if (unlikely(ret)) {
  1955. pr_err("memory address set failed\n");
  1956. return ret;
  1957. }
  1958. cmd.control_buffer.length = sizeof(*hash_ctrl);
  1959. ret = ena_com_execute_admin_command(admin_queue,
  1960. (struct ena_admin_aq_entry *)&cmd,
  1961. sizeof(cmd),
  1962. (struct ena_admin_acq_entry *)&resp,
  1963. sizeof(resp));
  1964. if (unlikely(ret))
  1965. pr_err("Failed to set hash input. error: %d\n", ret);
  1966. return ret;
  1967. }
  1968. int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
  1969. {
  1970. struct ena_rss *rss = &ena_dev->rss;
  1971. struct ena_admin_feature_rss_hash_control *hash_ctrl =
  1972. rss->hash_ctrl;
  1973. u16 available_fields = 0;
  1974. int rc, i;
  1975. /* Get the supported hash input */
  1976. rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1977. if (unlikely(rc))
  1978. return rc;
  1979. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
  1980. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1981. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1982. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
  1983. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1984. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1985. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
  1986. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1987. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1988. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
  1989. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1990. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1991. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
  1992. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1993. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
  1994. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1995. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
  1996. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1997. hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
  1998. ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
  1999. for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
  2000. available_fields = hash_ctrl->selected_fields[i].fields &
  2001. hash_ctrl->supported_fields[i].fields;
  2002. if (available_fields != hash_ctrl->selected_fields[i].fields) {
  2003. pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
  2004. i, hash_ctrl->supported_fields[i].fields,
  2005. hash_ctrl->selected_fields[i].fields);
  2006. return -EOPNOTSUPP;
  2007. }
  2008. }
  2009. rc = ena_com_set_hash_ctrl(ena_dev);
  2010. /* In case of failure, restore the old hash ctrl */
  2011. if (unlikely(rc))
  2012. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  2013. return rc;
  2014. }
  2015. int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
  2016. enum ena_admin_flow_hash_proto proto,
  2017. u16 hash_fields)
  2018. {
  2019. struct ena_rss *rss = &ena_dev->rss;
  2020. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  2021. u16 supported_fields;
  2022. int rc;
  2023. if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
  2024. pr_err("Invalid proto num (%u)\n", proto);
  2025. return -EINVAL;
  2026. }
  2027. /* Get the ctrl table */
  2028. rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
  2029. if (unlikely(rc))
  2030. return rc;
  2031. /* Make sure all the fields are supported */
  2032. supported_fields = hash_ctrl->supported_fields[proto].fields;
  2033. if ((hash_fields & supported_fields) != hash_fields) {
  2034. pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
  2035. proto, hash_fields, supported_fields);
  2036. }
  2037. hash_ctrl->selected_fields[proto].fields = hash_fields;
  2038. rc = ena_com_set_hash_ctrl(ena_dev);
  2039. /* In case of failure, restore the old hash ctrl */
  2040. if (unlikely(rc))
  2041. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  2042. return 0;
  2043. }
  2044. int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
  2045. u16 entry_idx, u16 entry_value)
  2046. {
  2047. struct ena_rss *rss = &ena_dev->rss;
  2048. if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
  2049. return -EINVAL;
  2050. if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
  2051. return -EINVAL;
  2052. rss->host_rss_ind_tbl[entry_idx] = entry_value;
  2053. return 0;
  2054. }
  2055. int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
  2056. {
  2057. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  2058. struct ena_rss *rss = &ena_dev->rss;
  2059. struct ena_admin_set_feat_cmd cmd;
  2060. struct ena_admin_set_feat_resp resp;
  2061. int ret;
  2062. if (!ena_com_check_supported_feature_id(
  2063. ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
  2064. pr_debug("Feature %d isn't supported\n",
  2065. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  2066. return -EOPNOTSUPP;
  2067. }
  2068. ret = ena_com_ind_tbl_convert_to_device(ena_dev);
  2069. if (ret) {
  2070. pr_err("Failed to convert host indirection table to device table\n");
  2071. return ret;
  2072. }
  2073. memset(&cmd, 0x0, sizeof(cmd));
  2074. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  2075. cmd.aq_common_descriptor.flags =
  2076. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  2077. cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
  2078. cmd.u.ind_table.size = rss->tbl_log_size;
  2079. cmd.u.ind_table.inline_index = 0xFFFFFFFF;
  2080. ret = ena_com_mem_addr_set(ena_dev,
  2081. &cmd.control_buffer.address,
  2082. rss->rss_ind_tbl_dma_addr);
  2083. if (unlikely(ret)) {
  2084. pr_err("memory address set failed\n");
  2085. return ret;
  2086. }
  2087. cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
  2088. sizeof(struct ena_admin_rss_ind_table_entry);
  2089. ret = ena_com_execute_admin_command(admin_queue,
  2090. (struct ena_admin_aq_entry *)&cmd,
  2091. sizeof(cmd),
  2092. (struct ena_admin_acq_entry *)&resp,
  2093. sizeof(resp));
  2094. if (unlikely(ret))
  2095. pr_err("Failed to set indirect table. error: %d\n", ret);
  2096. return ret;
  2097. }
  2098. int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
  2099. {
  2100. struct ena_rss *rss = &ena_dev->rss;
  2101. struct ena_admin_get_feat_resp get_resp;
  2102. u32 tbl_size;
  2103. int i, rc;
  2104. tbl_size = (1ULL << rss->tbl_log_size) *
  2105. sizeof(struct ena_admin_rss_ind_table_entry);
  2106. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  2107. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
  2108. rss->rss_ind_tbl_dma_addr,
  2109. tbl_size, 0);
  2110. if (unlikely(rc))
  2111. return rc;
  2112. if (!ind_tbl)
  2113. return 0;
  2114. rc = ena_com_ind_tbl_convert_from_device(ena_dev);
  2115. if (unlikely(rc))
  2116. return rc;
  2117. for (i = 0; i < (1 << rss->tbl_log_size); i++)
  2118. ind_tbl[i] = rss->host_rss_ind_tbl[i];
  2119. return 0;
  2120. }
  2121. int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
  2122. {
  2123. int rc;
  2124. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  2125. rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
  2126. if (unlikely(rc))
  2127. goto err_indr_tbl;
  2128. rc = ena_com_hash_key_allocate(ena_dev);
  2129. if (unlikely(rc))
  2130. goto err_hash_key;
  2131. ena_com_hash_key_fill_default_key(ena_dev);
  2132. rc = ena_com_hash_ctrl_init(ena_dev);
  2133. if (unlikely(rc))
  2134. goto err_hash_ctrl;
  2135. return 0;
  2136. err_hash_ctrl:
  2137. ena_com_hash_key_destroy(ena_dev);
  2138. err_hash_key:
  2139. ena_com_indirect_table_destroy(ena_dev);
  2140. err_indr_tbl:
  2141. return rc;
  2142. }
  2143. void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
  2144. {
  2145. ena_com_indirect_table_destroy(ena_dev);
  2146. ena_com_hash_key_destroy(ena_dev);
  2147. ena_com_hash_ctrl_destroy(ena_dev);
  2148. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  2149. }
  2150. int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
  2151. {
  2152. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  2153. host_attr->host_info =
  2154. dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
  2155. &host_attr->host_info_dma_addr, GFP_KERNEL);
  2156. if (unlikely(!host_attr->host_info))
  2157. return -ENOMEM;
  2158. host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
  2159. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
  2160. (ENA_COMMON_SPEC_VERSION_MINOR));
  2161. return 0;
  2162. }
  2163. int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
  2164. u32 debug_area_size)
  2165. {
  2166. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  2167. host_attr->debug_area_virt_addr =
  2168. dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
  2169. &host_attr->debug_area_dma_addr, GFP_KERNEL);
  2170. if (unlikely(!host_attr->debug_area_virt_addr)) {
  2171. host_attr->debug_area_size = 0;
  2172. return -ENOMEM;
  2173. }
  2174. host_attr->debug_area_size = debug_area_size;
  2175. return 0;
  2176. }
  2177. void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
  2178. {
  2179. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  2180. if (host_attr->host_info) {
  2181. dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
  2182. host_attr->host_info_dma_addr);
  2183. host_attr->host_info = NULL;
  2184. }
  2185. }
  2186. void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
  2187. {
  2188. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  2189. if (host_attr->debug_area_virt_addr) {
  2190. dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
  2191. host_attr->debug_area_virt_addr,
  2192. host_attr->debug_area_dma_addr);
  2193. host_attr->debug_area_virt_addr = NULL;
  2194. }
  2195. }
  2196. int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
  2197. {
  2198. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  2199. struct ena_com_admin_queue *admin_queue;
  2200. struct ena_admin_set_feat_cmd cmd;
  2201. struct ena_admin_set_feat_resp resp;
  2202. int ret;
  2203. /* Host attribute config is called before ena_com_get_dev_attr_feat
  2204. * so ena_com can't check if the feature is supported.
  2205. */
  2206. memset(&cmd, 0x0, sizeof(cmd));
  2207. admin_queue = &ena_dev->admin_queue;
  2208. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  2209. cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
  2210. ret = ena_com_mem_addr_set(ena_dev,
  2211. &cmd.u.host_attr.debug_ba,
  2212. host_attr->debug_area_dma_addr);
  2213. if (unlikely(ret)) {
  2214. pr_err("memory address set failed\n");
  2215. return ret;
  2216. }
  2217. ret = ena_com_mem_addr_set(ena_dev,
  2218. &cmd.u.host_attr.os_info_ba,
  2219. host_attr->host_info_dma_addr);
  2220. if (unlikely(ret)) {
  2221. pr_err("memory address set failed\n");
  2222. return ret;
  2223. }
  2224. cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
  2225. ret = ena_com_execute_admin_command(admin_queue,
  2226. (struct ena_admin_aq_entry *)&cmd,
  2227. sizeof(cmd),
  2228. (struct ena_admin_acq_entry *)&resp,
  2229. sizeof(resp));
  2230. if (unlikely(ret))
  2231. pr_err("Failed to set host attributes: %d\n", ret);
  2232. return ret;
  2233. }
  2234. /* Interrupt moderation */
  2235. bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
  2236. {
  2237. return ena_com_check_supported_feature_id(ena_dev,
  2238. ENA_ADMIN_INTERRUPT_MODERATION);
  2239. }
  2240. static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
  2241. u32 intr_delay_resolution,
  2242. u32 *intr_moder_interval)
  2243. {
  2244. if (!intr_delay_resolution) {
  2245. pr_err("Illegal interrupt delay granularity value\n");
  2246. return -EFAULT;
  2247. }
  2248. *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
  2249. return 0;
  2250. }
  2251. int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
  2252. u32 tx_coalesce_usecs)
  2253. {
  2254. return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
  2255. ena_dev->intr_delay_resolution,
  2256. &ena_dev->intr_moder_tx_interval);
  2257. }
  2258. int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
  2259. u32 rx_coalesce_usecs)
  2260. {
  2261. return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
  2262. ena_dev->intr_delay_resolution,
  2263. &ena_dev->intr_moder_rx_interval);
  2264. }
  2265. int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
  2266. {
  2267. struct ena_admin_get_feat_resp get_resp;
  2268. u16 delay_resolution;
  2269. int rc;
  2270. rc = ena_com_get_feature(ena_dev, &get_resp,
  2271. ENA_ADMIN_INTERRUPT_MODERATION, 0);
  2272. if (rc) {
  2273. if (rc == -EOPNOTSUPP) {
  2274. pr_debug("Feature %d isn't supported\n",
  2275. ENA_ADMIN_INTERRUPT_MODERATION);
  2276. rc = 0;
  2277. } else {
  2278. pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
  2279. rc);
  2280. }
  2281. /* no moderation supported, disable adaptive support */
  2282. ena_com_disable_adaptive_moderation(ena_dev);
  2283. return rc;
  2284. }
  2285. /* if moderation is supported by device we set adaptive moderation */
  2286. delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
  2287. ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
  2288. /* Disable adaptive moderation by default - can be enabled later */
  2289. ena_com_disable_adaptive_moderation(ena_dev);
  2290. return 0;
  2291. }
  2292. unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
  2293. {
  2294. return ena_dev->intr_moder_tx_interval;
  2295. }
  2296. unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
  2297. {
  2298. return ena_dev->intr_moder_rx_interval;
  2299. }
  2300. int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
  2301. struct ena_admin_feature_llq_desc *llq_features,
  2302. struct ena_llq_configurations *llq_default_cfg)
  2303. {
  2304. struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
  2305. int rc;
  2306. if (!llq_features->max_llq_num) {
  2307. ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
  2308. return 0;
  2309. }
  2310. rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
  2311. if (rc)
  2312. return rc;
  2313. ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
  2314. (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
  2315. if (unlikely(ena_dev->tx_max_header_size == 0)) {
  2316. pr_err("the size of the LLQ entry is smaller than needed\n");
  2317. return -EINVAL;
  2318. }
  2319. ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
  2320. return 0;
  2321. }