123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904 |
- /*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #include "ena_com.h"
- /*****************************************************************************/
- /*****************************************************************************/
- /* Timeout in micro-sec */
- #define ADMIN_CMD_TIMEOUT_US (3000000)
- #define ENA_ASYNC_QUEUE_DEPTH 16
- #define ENA_ADMIN_QUEUE_DEPTH 32
- #define ENA_CTRL_MAJOR 0
- #define ENA_CTRL_MINOR 0
- #define ENA_CTRL_SUB_MINOR 1
- #define MIN_ENA_CTRL_VER \
- (((ENA_CTRL_MAJOR) << \
- (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
- ((ENA_CTRL_MINOR) << \
- (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
- (ENA_CTRL_SUB_MINOR))
- #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
- #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
- #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
- #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
- #define ENA_REGS_ADMIN_INTR_MASK 1
- #define ENA_POLL_MS 5
- /*****************************************************************************/
- /*****************************************************************************/
- /*****************************************************************************/
- enum ena_cmd_status {
- ENA_CMD_SUBMITTED,
- ENA_CMD_COMPLETED,
- /* Abort - canceled by the driver */
- ENA_CMD_ABORTED,
- };
- struct ena_comp_ctx {
- struct completion wait_event;
- struct ena_admin_acq_entry *user_cqe;
- u32 comp_size;
- enum ena_cmd_status status;
- /* status from the device */
- u8 comp_status;
- u8 cmd_opcode;
- bool occupied;
- };
- struct ena_com_stats_ctx {
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
- };
- static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
- struct ena_common_mem_addr *ena_addr,
- dma_addr_t addr)
- {
- if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("dma address has more bits that the device supports\n");
- return -EINVAL;
- }
- ena_addr->mem_addr_low = lower_32_bits(addr);
- ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
- return 0;
- }
- static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
- {
- struct ena_com_admin_sq *sq = &queue->sq;
- u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
- if (!sq->entries) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- sq->head = 0;
- sq->tail = 0;
- sq->phase = 1;
- sq->db_addr = NULL;
- return 0;
- }
- static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
- {
- struct ena_com_admin_cq *cq = &queue->cq;
- u16 size = ADMIN_CQ_SIZE(queue->q_depth);
- cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
- if (!cq->entries) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- cq->head = 0;
- cq->phase = 1;
- return 0;
- }
- static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
- struct ena_aenq_handlers *aenq_handlers)
- {
- struct ena_com_aenq *aenq = &dev->aenq;
- u32 addr_low, addr_high, aenq_caps;
- u16 size;
- dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
- size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
- if (!aenq->entries) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- aenq->head = aenq->q_depth;
- aenq->phase = 1;
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
- writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
- aenq_caps = 0;
- aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
- if (unlikely(!aenq_handlers)) {
- pr_err("aenq handlers pointer is NULL\n");
- return -EINVAL;
- }
- aenq->aenq_handlers = aenq_handlers;
- return 0;
- }
- static void comp_ctxt_release(struct ena_com_admin_queue *queue,
- struct ena_comp_ctx *comp_ctx)
- {
- comp_ctx->occupied = false;
- atomic_dec(&queue->outstanding_cmds);
- }
- static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
- u16 command_id, bool capture)
- {
- if (unlikely(!queue->comp_ctx)) {
- pr_err("Completion context is NULL\n");
- return NULL;
- }
- if (unlikely(command_id >= queue->q_depth)) {
- pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, queue->q_depth);
- return NULL;
- }
- if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
- pr_err("Completion context is occupied\n");
- return NULL;
- }
- if (capture) {
- atomic_inc(&queue->outstanding_cmds);
- queue->comp_ctx[command_id].occupied = true;
- }
- return &queue->comp_ctx[command_id];
- }
- static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
- {
- struct ena_comp_ctx *comp_ctx;
- u16 tail_masked, cmd_id;
- u16 queue_size_mask;
- u16 cnt;
- queue_size_mask = admin_queue->q_depth - 1;
- tail_masked = admin_queue->sq.tail & queue_size_mask;
- /* In case of queue FULL */
- cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
- if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is full.\n");
- admin_queue->stats.out_of_space++;
- return ERR_PTR(-ENOSPC);
- }
- cmd_id = admin_queue->curr_cmd_id;
- cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
- ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
- cmd->aq_common_descriptor.command_id |= cmd_id &
- ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
- comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
- if (unlikely(!comp_ctx))
- return ERR_PTR(-EINVAL);
- comp_ctx->status = ENA_CMD_SUBMITTED;
- comp_ctx->comp_size = (u32)comp_size_in_bytes;
- comp_ctx->user_cqe = comp;
- comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
- reinit_completion(&comp_ctx->wait_event);
- memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
- admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
- queue_size_mask;
- admin_queue->sq.tail++;
- admin_queue->stats.submitted_cmd++;
- if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
- admin_queue->sq.phase = !admin_queue->sq.phase;
- writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
- return comp_ctx;
- }
- static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
- {
- size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
- struct ena_comp_ctx *comp_ctx;
- u16 i;
- queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
- if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- for (i = 0; i < queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(queue, i, false);
- if (comp_ctx)
- init_completion(&comp_ctx->wait_event);
- }
- return 0;
- }
- static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
- {
- unsigned long flags = 0;
- struct ena_comp_ctx *comp_ctx;
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- if (unlikely(!admin_queue->running_state)) {
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- return ERR_PTR(-ENODEV);
- }
- comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
- cmd_size_in_bytes,
- comp,
- comp_size_in_bytes);
- if (IS_ERR(comp_ctx))
- admin_queue->running_state = false;
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- return comp_ctx;
- }
- static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
- struct ena_com_create_io_ctx *ctx,
- struct ena_com_io_sq *io_sq)
- {
- size_t size;
- int dev_node = 0;
- memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
- io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
- io_sq->desc_entry_size =
- (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
- sizeof(struct ena_eth_io_tx_desc) :
- sizeof(struct ena_eth_io_rx_desc);
- size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
- }
- if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- }
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* Allocate bounce buffers */
- io_sq->bounce_buf_ctrl.buffer_size =
- ena_dev->llq_info.desc_list_entry_size;
- io_sq->bounce_buf_ctrl.buffers_num =
- ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
- io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
- io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
- if (!io_sq->bounce_buf_ctrl.base_buffer)
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed\n");
- return -ENOMEM;
- }
- memcpy(&io_sq->llq_info, &ena_dev->llq_info,
- sizeof(io_sq->llq_info));
- /* Initiate the first bounce buffer */
- io_sq->llq_buf_ctrl.curr_bounce_buf =
- ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
- memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
- 0x0, io_sq->llq_info.desc_list_entry_size);
- io_sq->llq_buf_ctrl.descs_left_in_line =
- io_sq->llq_info.descs_num_before_header;
- if (io_sq->llq_info.max_entries_in_tx_burst > 0)
- io_sq->entries_in_tx_burst_left =
- io_sq->llq_info.max_entries_in_tx_burst;
- }
- io_sq->tail = 0;
- io_sq->next_to_comp = 0;
- io_sq->phase = 1;
- return 0;
- }
- static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
- struct ena_com_create_io_ctx *ctx,
- struct ena_com_io_cq *io_cq)
- {
- size_t size;
- int prev_node = 0;
- memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
- /* Use the basic completion descriptor for Rx */
- io_cq->cdesc_entry_size_in_bytes =
- (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
- sizeof(struct ena_eth_io_tx_cdesc) :
- sizeof(struct ena_eth_io_rx_cdesc_base);
- size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
- if (!io_cq->cdesc_addr.virt_addr) {
- io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
- GFP_KERNEL);
- }
- if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
- return -ENOMEM;
- }
- io_cq->phase = 1;
- io_cq->head = 0;
- return 0;
- }
- static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_acq_entry *cqe)
- {
- struct ena_comp_ctx *comp_ctx;
- u16 cmd_id;
- cmd_id = cqe->acq_common_descriptor.command &
- ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
- comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
- if (unlikely(!comp_ctx)) {
- pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
- admin_queue->running_state = false;
- return;
- }
- comp_ctx->status = ENA_CMD_COMPLETED;
- comp_ctx->comp_status = cqe->acq_common_descriptor.status;
- if (comp_ctx->user_cqe)
- memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
- if (!admin_queue->polling)
- complete(&comp_ctx->wait_event);
- }
- static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
- {
- struct ena_admin_acq_entry *cqe = NULL;
- u16 comp_num = 0;
- u16 head_masked;
- u8 phase;
- head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
- phase = admin_queue->cq.phase;
- cqe = &admin_queue->cq.entries[head_masked];
- /* Go over all the completions */
- while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
- ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
- /* Do not read the rest of the completion entry before the
- * phase bit was validated
- */
- dma_rmb();
- ena_com_handle_single_admin_completion(admin_queue, cqe);
- head_masked++;
- comp_num++;
- if (unlikely(head_masked == admin_queue->q_depth)) {
- head_masked = 0;
- phase = !phase;
- }
- cqe = &admin_queue->cq.entries[head_masked];
- }
- admin_queue->cq.head += comp_num;
- admin_queue->cq.phase = phase;
- admin_queue->sq.head += comp_num;
- admin_queue->stats.completed_cmd += comp_num;
- }
- static int ena_com_comp_status_to_errno(u8 comp_status)
- {
- if (unlikely(comp_status != 0))
- pr_err("admin command failed[%u]\n", comp_status);
- if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
- return -EINVAL;
- switch (comp_status) {
- case ENA_ADMIN_SUCCESS:
- return 0;
- case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
- return -ENOMEM;
- case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return -EOPNOTSUPP;
- case ENA_ADMIN_BAD_OPCODE:
- case ENA_ADMIN_MALFORMED_REQUEST:
- case ENA_ADMIN_ILLEGAL_PARAMETER:
- case ENA_ADMIN_UNKNOWN_ERROR:
- return -EINVAL;
- }
- return 0;
- }
- static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
- {
- unsigned long flags = 0;
- unsigned long timeout;
- int ret;
- timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
- while (1) {
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- if (comp_ctx->status != ENA_CMD_SUBMITTED)
- break;
- if (time_is_before_jiffies(timeout)) {
- pr_err("Wait for completion (polling) timeout\n");
- /* ENA didn't have any completion */
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- admin_queue->stats.no_completion++;
- admin_queue->running_state = false;
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ret = -ETIME;
- goto err;
- }
- msleep(ENA_POLL_MS);
- }
- if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- pr_err("Command was aborted\n");
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- admin_queue->stats.aborted_cmd++;
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ret = -ENODEV;
- goto err;
- }
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
- err:
- comp_ctxt_release(admin_queue, comp_ctx);
- return ret;
- }
- /**
- * Set the LLQ configurations of the firmware
- *
- * The driver provides only the enabled feature values to the device,
- * which in turn, checks if they are supported.
- */
- static int ena_com_set_llq(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
- int ret;
- memset(&cmd, 0x0, sizeof(cmd));
- admin_queue = &ena_dev->admin_queue;
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
- cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
- cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
- cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
- cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to set LLQ configurations: %d\n", ret);
- return ret;
- }
- static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_llq_desc *llq_features,
- struct ena_llq_configurations *llq_default_cfg)
- {
- struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
- u16 supported_feat;
- int rc;
- memset(llq_info, 0, sizeof(*llq_info));
- supported_feat = llq_features->header_location_ctrl_supported;
- if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
- llq_info->header_location_ctrl =
- llq_default_cfg->llq_header_location;
- } else {
- pr_err("Invalid header location control, supported: 0x%x\n",
- supported_feat);
- return -EINVAL;
- }
- if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
- supported_feat = llq_features->descriptors_stride_ctrl_supported;
- if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
- llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
- } else {
- if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
- llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
- } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
- llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
- } else {
- pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
- supported_feat);
- return -EINVAL;
- }
- pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl, supported_feat,
- llq_info->desc_stride_ctrl);
- }
- } else {
- llq_info->desc_stride_ctrl = 0;
- }
- supported_feat = llq_features->entry_size_ctrl_supported;
- if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
- llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
- llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
- } else {
- if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
- llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
- llq_info->desc_list_entry_size = 128;
- } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
- llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
- llq_info->desc_list_entry_size = 192;
- } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
- llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
- llq_info->desc_list_entry_size = 256;
- } else {
- pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
- return -EINVAL;
- }
- pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_ring_entry_size, supported_feat,
- llq_info->desc_list_entry_size);
- }
- if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
- /* The desc list entry size should be whole multiply of 8
- * This requirement comes from __iowrite64_copy()
- */
- pr_err("illegal entry size %d\n",
- llq_info->desc_list_entry_size);
- return -EINVAL;
- }
- if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
- llq_info->descs_per_entry = llq_info->desc_list_entry_size /
- sizeof(struct ena_eth_io_tx_desc);
- else
- llq_info->descs_per_entry = 1;
- supported_feat = llq_features->desc_num_before_header_supported;
- if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
- llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
- } else {
- if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
- llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
- llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
- } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
- llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
- } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
- llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
- } else {
- pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
- supported_feat);
- return -EINVAL;
- }
- pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
- }
- llq_info->max_entries_in_tx_burst =
- (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
- rc = ena_com_set_llq(ena_dev);
- if (rc)
- pr_err("Cannot set LLQ configuration: %d\n", rc);
- return rc;
- }
- static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
- {
- unsigned long flags = 0;
- int ret;
- wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
- /* In case the command wasn't completed find out the root cause.
- * There might be 2 kinds of errors
- * 1) No completion (timeout reached)
- * 2) There is completion but the device didn't get any msi-x interrupt.
- */
- if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- admin_queue->stats.no_completion++;
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- if (comp_ctx->status == ENA_CMD_COMPLETED) {
- pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
- } else {
- pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
- comp_ctx->cmd_opcode, comp_ctx->status);
- }
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
- }
- }
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
- err:
- comp_ctxt_release(admin_queue, comp_ctx);
- return ret;
- }
- /* This method read the hardware device register through posting writes
- * and waiting for response
- * On timeout the function will return ENA_MMIO_READ_TIMEOUT
- */
- static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
- {
- struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
- mmio_read->read_resp;
- u32 mmio_read_reg, ret, i;
- unsigned long flags = 0;
- u32 timeout = mmio_read->reg_read_to;
- might_sleep();
- if (timeout == 0)
- timeout = ENA_REG_READ_TIMEOUT;
- /* If readless is disabled, perform regular read */
- if (!mmio_read->readless_supported)
- return readl(ena_dev->reg_bar + offset);
- spin_lock_irqsave(&mmio_read->lock, flags);
- mmio_read->seq_num++;
- read_resp->req_id = mmio_read->seq_num + 0xDEAD;
- mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
- ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
- mmio_read_reg |= mmio_read->seq_num &
- ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
- writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < timeout; i++) {
- if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
- break;
- udelay(1);
- }
- if (unlikely(i == timeout)) {
- pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
- ret = ENA_MMIO_READ_TIMEOUT;
- goto err;
- }
- if (read_resp->reg_off != offset) {
- pr_err("Read failure: wrong offset provided\n");
- ret = ENA_MMIO_READ_TIMEOUT;
- } else {
- ret = read_resp->reg_val;
- }
- err:
- spin_unlock_irqrestore(&mmio_read->lock, flags);
- return ret;
- }
- /* There are two types to wait for completion.
- * Polling mode - wait until the completion is available.
- * Async mode - wait on wait queue until the completion is ready
- * (or the timeout expired).
- * It is expected that the IRQ called ena_com_handle_admin_completion
- * to mark the completions.
- */
- static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
- {
- if (admin_queue->polling)
- return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
- admin_queue);
- return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
- admin_queue);
- }
- static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
- struct ena_com_io_sq *io_sq)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
- struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
- u8 direction;
- int ret;
- memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
- if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
- direction = ENA_ADMIN_SQ_DIRECTION_TX;
- else
- direction = ENA_ADMIN_SQ_DIRECTION_RX;
- destroy_cmd.sq.sq_identity |= (direction <<
- ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
- ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
- destroy_cmd.sq.sq_idx = io_sq->idx;
- destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
- if (unlikely(ret && (ret != -ENODEV)))
- pr_err("failed to destroy io sq error: %d\n", ret);
- return ret;
- }
- static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
- struct ena_com_io_sq *io_sq,
- struct ena_com_io_cq *io_cq)
- {
- size_t size;
- if (io_cq->cdesc_addr.virt_addr) {
- size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr);
- io_cq->cdesc_addr.virt_addr = NULL;
- }
- if (io_sq->desc_addr.virt_addr) {
- size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr);
- io_sq->desc_addr.virt_addr = NULL;
- }
- if (io_sq->bounce_buf_ctrl.base_buffer) {
- devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
- io_sq->bounce_buf_ctrl.base_buffer = NULL;
- }
- }
- static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
- u16 exp_state)
- {
- u32 val, i;
- /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
- timeout = (timeout * 100) / ENA_POLL_MS;
- for (i = 0; i < timeout; i++) {
- val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
- if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
- return -ETIME;
- }
- if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
- exp_state)
- return 0;
- msleep(ENA_POLL_MS);
- }
- return -ETIME;
- }
- static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
- enum ena_admin_aq_feature_id feature_id)
- {
- u32 feature_mask = 1 << feature_id;
- /* Device attributes is always supported */
- if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
- !(ena_dev->supported_features & feature_mask))
- return false;
- return true;
- }
- static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
- struct ena_admin_get_feat_resp *get_resp,
- enum ena_admin_aq_feature_id feature_id,
- dma_addr_t control_buf_dma_addr,
- u32 control_buff_size,
- u8 feature_ver)
- {
- struct ena_com_admin_queue *admin_queue;
- struct ena_admin_get_feat_cmd get_cmd;
- int ret;
- if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- pr_debug("Feature %d isn't supported\n", feature_id);
- return -EOPNOTSUPP;
- }
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- admin_queue = &ena_dev->admin_queue;
- get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
- if (control_buff_size)
- get_cmd.aq_common_descriptor.flags =
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- else
- get_cmd.aq_common_descriptor.flags = 0;
- ret = ena_com_mem_addr_set(ena_dev,
- &get_cmd.control_buffer.address,
- control_buf_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- get_cmd.control_buffer.length = control_buff_size;
- get_cmd.feat_common.feature_version = feature_ver;
- get_cmd.feat_common.feature_id = feature_id;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)
- &get_cmd,
- sizeof(get_cmd),
- (struct ena_admin_acq_entry *)
- get_resp,
- sizeof(*get_resp));
- if (unlikely(ret))
- pr_err("Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
- return ret;
- }
- static int ena_com_get_feature(struct ena_com_dev *ena_dev,
- struct ena_admin_get_feat_resp *get_resp,
- enum ena_admin_aq_feature_id feature_id,
- u8 feature_ver)
- {
- return ena_com_get_feature_ex(ena_dev,
- get_resp,
- feature_id,
- 0,
- 0,
- feature_ver);
- }
- static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
- {
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- (ena_dev->rss).hash_key;
- netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
- /* The key is stored in the device in u32 array
- * as well as the API requires the key to be passed in this
- * format. Thus the size of our array should be divided by 4
- */
- hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
- }
- int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
- {
- return ena_dev->rss.hash_func;
- }
- static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- rss->hash_key =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
- if (unlikely(!rss->hash_key))
- return -ENOMEM;
- return 0;
- }
- static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
- rss->hash_key = NULL;
- }
- static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
- if (unlikely(!rss->hash_ctrl))
- return -ENOMEM;
- return 0;
- }
- static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
- rss->hash_ctrl = NULL;
- }
- static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
- u16 log_size)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- size_t tbl_size;
- int ret;
- ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
- if (unlikely(ret))
- return ret;
- if ((get_resp.u.ind_table.min_size > log_size) ||
- (get_resp.u.ind_table.max_size < log_size)) {
- pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
- 1 << log_size, 1 << get_resp.u.ind_table.min_size,
- 1 << get_resp.u.ind_table.max_size);
- return -EINVAL;
- }
- tbl_size = (1ULL << log_size) *
- sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
- if (unlikely(!rss->rss_ind_tbl))
- goto mem_err1;
- tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
- if (unlikely(!rss->host_rss_ind_tbl))
- goto mem_err2;
- rss->tbl_log_size = log_size;
- return 0;
- mem_err2:
- tbl_size = (1ULL << log_size) *
- sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
- rss->rss_ind_tbl = NULL;
- mem_err1:
- rss->tbl_log_size = 0;
- return -ENOMEM;
- }
- static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- size_t tbl_size = (1ULL << rss->tbl_log_size) *
- sizeof(struct ena_admin_rss_ind_table_entry);
- if (rss->rss_ind_tbl)
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
- rss->rss_ind_tbl = NULL;
- if (rss->host_rss_ind_tbl)
- devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
- rss->host_rss_ind_tbl = NULL;
- }
- static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
- struct ena_com_io_sq *io_sq, u16 cq_idx)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_admin_aq_create_sq_cmd create_cmd;
- struct ena_admin_acq_create_sq_resp_desc cmd_completion;
- u8 direction;
- int ret;
- memset(&create_cmd, 0x0, sizeof(create_cmd));
- create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
- if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
- direction = ENA_ADMIN_SQ_DIRECTION_TX;
- else
- direction = ENA_ADMIN_SQ_DIRECTION_RX;
- create_cmd.sq_identity |= (direction <<
- ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
- create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
- create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
- ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
- create_cmd.sq_caps_3 |=
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
- create_cmd.cq_idx = cq_idx;
- create_cmd.sq_depth = io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- ret = ena_com_mem_addr_set(ena_dev,
- &create_cmd.sq_ba,
- io_sq->desc_addr.phys_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- }
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
- if (unlikely(ret)) {
- pr_err("Failed to create IO SQ. error: %d\n", ret);
- return ret;
- }
- io_sq->idx = cmd_completion.sq_idx;
- io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- (uintptr_t)cmd_completion.sq_doorbell_offset);
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
- io_sq->desc_addr.pbuf_dev_addr =
- (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
- cmd_completion.llq_descriptors_offset);
- }
- pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
- return ret;
- }
- static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_com_io_sq *io_sq;
- u16 qid;
- int i;
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- qid = rss->host_rss_ind_tbl[i];
- if (qid >= ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- io_sq = &ena_dev->io_sq_queues[qid];
- if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
- return -EINVAL;
- rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
- }
- return 0;
- }
- static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
- {
- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
- struct ena_rss *rss = &ena_dev->rss;
- u8 idx;
- u16 i;
- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
- }
- return 0;
- }
- static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
- u16 intr_delay_resolution)
- {
- /* Initial value of intr_delay_resolution might be 0 */
- u16 prev_intr_delay_resolution =
- ena_dev->intr_delay_resolution ?
- ena_dev->intr_delay_resolution :
- ENA_DEFAULT_INTR_DELAY_RESOLUTION;
- if (!intr_delay_resolution) {
- pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
- intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
- }
- /* update Rx */
- ena_dev->intr_moder_rx_interval =
- ena_dev->intr_moder_rx_interval *
- prev_intr_delay_resolution /
- intr_delay_resolution;
- /* update Tx */
- ena_dev->intr_moder_tx_interval =
- ena_dev->intr_moder_tx_interval *
- prev_intr_delay_resolution /
- intr_delay_resolution;
- ena_dev->intr_delay_resolution = intr_delay_resolution;
- }
- /*****************************************************************************/
- /******************************* API ******************************/
- /*****************************************************************************/
- int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size,
- struct ena_admin_acq_entry *comp,
- size_t comp_size)
- {
- struct ena_comp_ctx *comp_ctx;
- int ret;
- comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
- comp, comp_size);
- if (IS_ERR(comp_ctx)) {
- if (comp_ctx == ERR_PTR(-ENODEV))
- pr_debug("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
- else
- pr_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
- return PTR_ERR(comp_ctx);
- }
- ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
- if (unlikely(ret)) {
- if (admin_queue->running_state)
- pr_err("Failed to process command. ret = %d\n", ret);
- else
- pr_debug("Failed to process command. ret = %d\n", ret);
- }
- return ret;
- }
- int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
- struct ena_com_io_cq *io_cq)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_admin_aq_create_cq_cmd create_cmd;
- struct ena_admin_acq_create_cq_resp_desc cmd_completion;
- int ret;
- memset(&create_cmd, 0x0, sizeof(create_cmd));
- create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
- create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
- ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
- create_cmd.cq_caps_1 |=
- ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
- create_cmd.msix_vector = io_cq->msix_vector;
- create_cmd.cq_depth = io_cq->q_depth;
- ret = ena_com_mem_addr_set(ena_dev,
- &create_cmd.cq_ba,
- io_cq->cdesc_addr.phys_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
- if (unlikely(ret)) {
- pr_err("Failed to create IO CQ. error: %d\n", ret);
- return ret;
- }
- io_cq->idx = cmd_completion.cq_idx;
- io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
- if (cmd_completion.numa_node_register_offset)
- io_cq->numa_node_cfg_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.numa_node_register_offset);
- pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
- return ret;
- }
- int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
- struct ena_com_io_sq **io_sq,
- struct ena_com_io_cq **io_cq)
- {
- if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Invalid queue number %d but the max is %d\n", qid,
- ENA_TOTAL_NUM_QUEUES);
- return -EINVAL;
- }
- *io_sq = &ena_dev->io_sq_queues[qid];
- *io_cq = &ena_dev->io_cq_queues[qid];
- return 0;
- }
- void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_comp_ctx *comp_ctx;
- u16 i;
- if (!admin_queue->comp_ctx)
- return;
- for (i = 0; i < admin_queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(admin_queue, i, false);
- if (unlikely(!comp_ctx))
- break;
- comp_ctx->status = ENA_CMD_ABORTED;
- complete(&comp_ctx->wait_event);
- }
- }
- void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags = 0;
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- msleep(ENA_POLL_MS);
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- }
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- }
- int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
- struct ena_com_io_cq *io_cq)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
- struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
- int ret;
- memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
- destroy_cmd.cq_idx = io_cq->idx;
- destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
- if (unlikely(ret && (ret != -ENODEV)))
- pr_err("Failed to destroy IO CQ. error: %d\n", ret);
- return ret;
- }
- bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
- {
- return ena_dev->admin_queue.running_state;
- }
- void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags = 0;
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- ena_dev->admin_queue.running_state = state;
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- }
- void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
- {
- u16 depth = ena_dev->aenq.q_depth;
- WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
- /* Init head_db to mark that all entries in the queue
- * are initially available
- */
- writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
- }
- int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
- {
- struct ena_com_admin_queue *admin_queue;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- struct ena_admin_get_feat_resp get_resp;
- int ret;
- ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
- if (ret) {
- pr_info("Can't get aenq configuration\n");
- return ret;
- }
- if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
- get_resp.u.aenq.supported_groups, groups_flag);
- return -EOPNOTSUPP;
- }
- memset(&cmd, 0x0, sizeof(cmd));
- admin_queue = &ena_dev->admin_queue;
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.aq_common_descriptor.flags = 0;
- cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
- cmd.u.aenq.enabled_groups = groups_flag;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to config AENQ ret: %d\n", ret);
- return ret;
- }
- int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
- {
- u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- int width;
- if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
- return -ETIME;
- }
- width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
- ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
- pr_debug("ENA dma width: %d\n", width);
- if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- pr_err("DMA width illegal value: %d\n", width);
- return -EINVAL;
- }
- ena_dev->dma_addr_bits = width;
- return width;
- }
- int ena_com_validate_version(struct ena_com_dev *ena_dev)
- {
- u32 ver;
- u32 ctrl_ver;
- u32 ctrl_ver_masked;
- /* Make sure the ENA version and the controller version are at least
- * as the driver expects
- */
- ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
- ctrl_ver = ena_com_reg_bar_read32(ena_dev,
- ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read timeout occurred\n");
- return -ETIME;
- }
- pr_info("ena device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
- ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ena controller version: %d.%d.%d implementation version %d\n",
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
- ctrl_ver_masked =
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
- /* Validate the ctrl version without the implementation ID */
- if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
- pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
- return -1;
- }
- return 0;
- }
- void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_com_admin_cq *cq = &admin_queue->cq;
- struct ena_com_admin_sq *sq = &admin_queue->sq;
- struct ena_com_aenq *aenq = &ena_dev->aenq;
- u16 size;
- if (admin_queue->comp_ctx)
- devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
- admin_queue->comp_ctx = NULL;
- size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
- sq->entries = NULL;
- size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
- cq->entries = NULL;
- size = ADMIN_AENQ_SIZE(aenq->q_depth);
- if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
- aenq->entries = NULL;
- }
- void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
- {
- u32 mask_value = 0;
- if (polling)
- mask_value = ENA_REGS_ADMIN_INTR_MASK;
- writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
- ena_dev->admin_queue.polling = polling;
- }
- void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling)
- {
- ena_dev->admin_queue.auto_polling = polling;
- }
- int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
- {
- struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_zalloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
- if (unlikely(!mmio_read->read_resp))
- goto err;
- ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
- mmio_read->read_resp->req_id = 0x0;
- mmio_read->seq_num = 0x0;
- mmio_read->readless_supported = true;
- return 0;
- err:
- return -ENOMEM;
- }
- void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
- {
- struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- mmio_read->readless_supported = readless_supported;
- }
- void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
- {
- struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
- writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
- mmio_read->read_resp = NULL;
- }
- void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
- {
- struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- u32 addr_low, addr_high;
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
- writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
- writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- }
- int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
- int ret;
- dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
- if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
- return -ETIME;
- }
- if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- pr_err("Device isn't ready, abort com init\n");
- return -ENODEV;
- }
- admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
- admin_queue->q_dmadev = ena_dev->dmadev;
- admin_queue->polling = false;
- admin_queue->curr_cmd_id = 0;
- atomic_set(&admin_queue->outstanding_cmds, 0);
- spin_lock_init(&admin_queue->q_lock);
- ret = ena_com_init_comp_ctxt(admin_queue);
- if (ret)
- goto error;
- ret = ena_com_admin_init_sq(admin_queue);
- if (ret)
- goto error;
- ret = ena_com_admin_init_cq(admin_queue);
- if (ret)
- goto error;
- admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- ENA_REGS_AQ_DB_OFF);
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
- writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
- writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
- writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
- writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
- aq_caps = 0;
- aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
- aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
- ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
- acq_caps = 0;
- acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
- acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
- ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
- writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
- ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
- if (ret)
- goto error;
- admin_queue->running_state = true;
- return 0;
- error:
- ena_com_admin_destroy(ena_dev);
- return ret;
- }
- int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
- struct ena_com_create_io_ctx *ctx)
- {
- struct ena_com_io_sq *io_sq;
- struct ena_com_io_cq *io_cq;
- int ret;
- if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
- ctx->qid, ENA_TOTAL_NUM_QUEUES);
- return -EINVAL;
- }
- io_sq = &ena_dev->io_sq_queues[ctx->qid];
- io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(*io_sq));
- memset(io_cq, 0x0, sizeof(*io_cq));
- /* Init CQ */
- io_cq->q_depth = ctx->queue_size;
- io_cq->direction = ctx->direction;
- io_cq->qid = ctx->qid;
- io_cq->msix_vector = ctx->msix_vector;
- io_sq->q_depth = ctx->queue_size;
- io_sq->direction = ctx->direction;
- io_sq->qid = ctx->qid;
- io_sq->mem_queue_type = ctx->mem_queue_type;
- if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
- /* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
- ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
- if (ret)
- goto error;
- ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
- if (ret)
- goto error;
- ret = ena_com_create_io_cq(ena_dev, io_cq);
- if (ret)
- goto error;
- ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
- if (ret)
- goto destroy_io_cq;
- return 0;
- destroy_io_cq:
- ena_com_destroy_io_cq(ena_dev, io_cq);
- error:
- ena_com_io_queue_free(ena_dev, io_sq, io_cq);
- return ret;
- }
- void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
- {
- struct ena_com_io_sq *io_sq;
- struct ena_com_io_cq *io_cq;
- if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
- ENA_TOTAL_NUM_QUEUES);
- return;
- }
- io_sq = &ena_dev->io_sq_queues[qid];
- io_cq = &ena_dev->io_cq_queues[qid];
- ena_com_destroy_io_sq(ena_dev, io_sq);
- ena_com_destroy_io_cq(ena_dev, io_cq);
- ena_com_io_queue_free(ena_dev, io_sq, io_cq);
- }
- int ena_com_get_link_params(struct ena_com_dev *ena_dev,
- struct ena_admin_get_feat_resp *resp)
- {
- return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
- }
- int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
- {
- struct ena_admin_get_feat_resp get_resp;
- int rc;
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
- if (rc)
- return rc;
- memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
- sizeof(get_resp.u.dev_attr));
- ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
- if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_MAX_QUEUES_EXT,
- ENA_FEATURE_MAX_QUEUE_EXT_VER);
- if (rc)
- return rc;
- if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
- return -EINVAL;
- memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
- sizeof(get_resp.u.max_queue_ext));
- ena_dev->tx_max_header_size =
- get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
- } else {
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_MAX_QUEUES_NUM, 0);
- memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
- sizeof(get_resp.u.max_queue));
- ena_dev->tx_max_header_size =
- get_resp.u.max_queue.max_header_size;
- if (rc)
- return rc;
- }
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_AENQ_CONFIG, 0);
- if (rc)
- return rc;
- memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
- sizeof(get_resp.u.aenq));
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (rc)
- return rc;
- memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
- sizeof(get_resp.u.offload));
- /* Driver hints isn't mandatory admin command. So in case the
- * command isn't supported set driver hints to 0
- */
- rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
- if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
- else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
- else
- return rc;
- rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
- if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
- else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
- else
- return rc;
- return 0;
- }
- void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
- {
- ena_com_handle_admin_completion(&ena_dev->admin_queue);
- }
- /* ena_handle_specific_aenq_event:
- * return the handler that is relevant to the specific event group
- */
- static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
- u16 group)
- {
- struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
- if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
- return aenq_handlers->handlers[group];
- return aenq_handlers->unimplemented_handler;
- }
- /* ena_aenq_intr_handler:
- * handles the aenq incoming events.
- * pop events from the queue and apply the specific handler
- */
- void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
- {
- struct ena_admin_aenq_entry *aenq_e;
- struct ena_admin_aenq_common_desc *aenq_common;
- struct ena_com_aenq *aenq = &dev->aenq;
- unsigned long long timestamp;
- ena_aenq_handler handler_cb;
- u16 masked_head, processed = 0;
- u8 phase;
- masked_head = aenq->head & (aenq->q_depth - 1);
- phase = aenq->phase;
- aenq_e = &aenq->entries[masked_head]; /* Get first entry */
- aenq_common = &aenq_e->aenq_common_desc;
- /* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
- /* Make sure the phase bit (ownership) is as expected before
- * reading the rest of the descriptor.
- */
- dma_rmb();
- timestamp =
- (unsigned long long)aenq_common->timestamp_low |
- ((unsigned long long)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom, timestamp);
- /* Handle specific event*/
- handler_cb = ena_com_get_specific_aenq_cb(dev,
- aenq_common->group);
- handler_cb(data, aenq_e); /* call the actual event handler*/
- /* Get next event entry */
- masked_head++;
- processed++;
- if (unlikely(masked_head == aenq->q_depth)) {
- masked_head = 0;
- phase = !phase;
- }
- aenq_e = &aenq->entries[masked_head];
- aenq_common = &aenq_e->aenq_common_desc;
- }
- aenq->head += processed;
- aenq->phase = phase;
- /* Don't update aenq doorbell if there weren't any processed events */
- if (!processed)
- return;
- /* write the aenq doorbell after all AENQ descriptors were read */
- mb();
- writel_relaxed((u32)aenq->head,
- dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
- mmiowb();
- }
- int ena_com_dev_reset(struct ena_com_dev *ena_dev,
- enum ena_regs_reset_reason_types reset_reason)
- {
- u32 stat, timeout, cap, reset_val;
- int rc;
- stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
- cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read32 timeout occurred\n");
- return -ETIME;
- }
- if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- pr_err("Device isn't ready, can't reset device\n");
- return -EINVAL;
- }
- timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
- ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
- if (timeout == 0) {
- pr_err("Invalid timeout value\n");
- return -EINVAL;
- }
- /* start reset */
- reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
- reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
- ENA_REGS_DEV_CTL_RESET_REASON_MASK;
- writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
- /* Write again the MMIO read request address */
- ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
- rc = wait_for_reset_state(ena_dev, timeout,
- ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
- if (rc != 0) {
- pr_err("Reset indication didn't turn on\n");
- return rc;
- }
- /* reset done */
- writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
- rc = wait_for_reset_state(ena_dev, timeout, 0);
- if (rc != 0) {
- pr_err("Reset indication didn't turn off\n");
- return rc;
- }
- timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
- ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
- if (timeout)
- /* the resolution of timeout reg is 100ms */
- ena_dev->admin_queue.completion_timeout = timeout * 100000;
- else
- ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
- return 0;
- }
- static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
- {
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
- int ret;
- admin_queue = &ena_dev->admin_queue;
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
- if (unlikely(ret))
- pr_err("Failed to get stats. error: %d\n", ret);
- return ret;
- }
- int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats)
- {
- struct ena_com_stats_ctx ctx;
- int ret;
- memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
- if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.basic_stats,
- sizeof(ctx.get_resp.basic_stats));
- return ret;
- }
- int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
- {
- struct ena_com_admin_queue *admin_queue;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- int ret;
- if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return -EOPNOTSUPP;
- }
- memset(&cmd, 0x0, sizeof(cmd));
- admin_queue = &ena_dev->admin_queue;
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.aq_common_descriptor.flags = 0;
- cmd.feat_common.feature_id = ENA_ADMIN_MTU;
- cmd.u.mtu.mtu = mtu;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
- return ret;
- }
- int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload)
- {
- int ret;
- struct ena_admin_get_feat_resp resp;
- ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (unlikely(ret)) {
- pr_err("Failed to get offload capabilities %d\n", ret);
- return ret;
- }
- memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
- return 0;
- }
- int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- struct ena_admin_get_feat_resp get_resp;
- int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
- return -EOPNOTSUPP;
- }
- /* Validate hash function is supported */
- ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION, 0);
- if (unlikely(ret))
- return ret;
- if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- pr_err("Func hash %d isn't supported by device, abort\n",
- rss->hash_func);
- return -EOPNOTSUPP;
- }
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.aq_common_descriptor.flags =
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
- cmd.u.flow_hash_func.init_val = rss->hash_init_val;
- cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
- ret = ena_com_mem_addr_set(ena_dev,
- &cmd.control_buffer.address,
- rss->hash_key_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- cmd.control_buffer.length = sizeof(*rss->hash_key);
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret)) {
- pr_err("Failed to set hash function %d. error: %d\n",
- rss->hash_func, ret);
- return -EINVAL;
- }
- return 0;
- }
- int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions func,
- const u8 *key, u16 key_len, u32 init_val)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
- int rc;
- /* Make sure size is a mult of DWs */
- if (unlikely(key_len & 0x3))
- return -EINVAL;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- rss->hash_key_dma_addr,
- sizeof(*rss->hash_key), 0);
- if (unlikely(rc))
- return rc;
- if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
- pr_err("Flow hash function %d isn't supported\n", func);
- return -EOPNOTSUPP;
- }
- switch (func) {
- case ENA_ADMIN_TOEPLITZ:
- if (key) {
- if (key_len != sizeof(hash_key->key)) {
- pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
- }
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
- }
- break;
- case ENA_ADMIN_CRC32:
- rss->hash_init_val = init_val;
- break;
- default:
- pr_err("Invalid hash function (%d)\n", func);
- return -EINVAL;
- }
- rss->hash_func = func;
- rc = ena_com_set_hash_function(ena_dev);
- /* Restore the old function */
- if (unlikely(rc))
- ena_com_get_hash_function(ena_dev, NULL, NULL);
- return rc;
- }
- int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
- int rc;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- rss->hash_key_dma_addr,
- sizeof(*rss->hash_key), 0);
- if (unlikely(rc))
- return rc;
- /* ffs() returns 1 in case the lsb is set */
- rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
- if (rss->hash_func)
- rss->hash_func--;
- if (func)
- *func = rss->hash_func;
- if (key)
- memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
- return 0;
- }
- int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
- enum ena_admin_flow_hash_proto proto,
- u16 *fields)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- int rc;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_INPUT,
- rss->hash_ctrl_dma_addr,
- sizeof(*rss->hash_ctrl), 0);
- if (unlikely(rc))
- return rc;
- if (fields)
- *fields = rss->hash_ctrl->selected_fields[proto].fields;
- return 0;
- }
- int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
- return -EOPNOTSUPP;
- }
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.aq_common_descriptor.flags =
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
- cmd.u.flow_hash_input.enabled_input_sort =
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
- ret = ena_com_mem_addr_set(ena_dev,
- &cmd.control_buffer.address,
- rss->hash_ctrl_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- cmd.control_buffer.length = sizeof(*hash_ctrl);
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to set hash input. error: %d\n", ret);
- return ret;
- }
- int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl =
- rss->hash_ctrl;
- u16 available_fields = 0;
- int rc, i;
- /* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
- if (unlikely(rc))
- return rc;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
- ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
- for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
- available_fields = hash_ctrl->selected_fields[i].fields &
- hash_ctrl->supported_fields[i].fields;
- if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
- i, hash_ctrl->supported_fields[i].fields,
- hash_ctrl->selected_fields[i].fields);
- return -EOPNOTSUPP;
- }
- }
- rc = ena_com_set_hash_ctrl(ena_dev);
- /* In case of failure, restore the old hash ctrl */
- if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
- return rc;
- }
- int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
- enum ena_admin_flow_hash_proto proto,
- u16 hash_fields)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
- u16 supported_fields;
- int rc;
- if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- pr_err("Invalid proto num (%u)\n", proto);
- return -EINVAL;
- }
- /* Get the ctrl table */
- rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
- if (unlikely(rc))
- return rc;
- /* Make sure all the fields are supported */
- supported_fields = hash_ctrl->supported_fields[proto].fields;
- if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
- proto, hash_fields, supported_fields);
- }
- hash_ctrl->selected_fields[proto].fields = hash_fields;
- rc = ena_com_set_hash_ctrl(ena_dev);
- /* In case of failure, restore the old hash ctrl */
- if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
- return 0;
- }
- int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
- u16 entry_idx, u16 entry_value)
- {
- struct ena_rss *rss = &ena_dev->rss;
- if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
- return -EINVAL;
- if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
- return -EINVAL;
- rss->host_rss_ind_tbl[entry_idx] = entry_value;
- return 0;
- }
- int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
- {
- struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return -EOPNOTSUPP;
- }
- ret = ena_com_ind_tbl_convert_to_device(ena_dev);
- if (ret) {
- pr_err("Failed to convert host indirection table to device table\n");
- return ret;
- }
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.aq_common_descriptor.flags =
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
- cmd.u.ind_table.size = rss->tbl_log_size;
- cmd.u.ind_table.inline_index = 0xFFFFFFFF;
- ret = ena_com_mem_addr_set(ena_dev,
- &cmd.control_buffer.address,
- rss->rss_ind_tbl_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
- sizeof(struct ena_admin_rss_ind_table_entry);
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to set indirect table. error: %d\n", ret);
- return ret;
- }
- int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
- {
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- u32 tbl_size;
- int i, rc;
- tbl_size = (1ULL << rss->tbl_log_size) *
- sizeof(struct ena_admin_rss_ind_table_entry);
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
- rss->rss_ind_tbl_dma_addr,
- tbl_size, 0);
- if (unlikely(rc))
- return rc;
- if (!ind_tbl)
- return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
- if (unlikely(rc))
- return rc;
- for (i = 0; i < (1 << rss->tbl_log_size); i++)
- ind_tbl[i] = rss->host_rss_ind_tbl[i];
- return 0;
- }
- int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
- {
- int rc;
- memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
- rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
- if (unlikely(rc))
- goto err_indr_tbl;
- rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc))
- goto err_hash_key;
- ena_com_hash_key_fill_default_key(ena_dev);
- rc = ena_com_hash_ctrl_init(ena_dev);
- if (unlikely(rc))
- goto err_hash_ctrl;
- return 0;
- err_hash_ctrl:
- ena_com_hash_key_destroy(ena_dev);
- err_hash_key:
- ena_com_indirect_table_destroy(ena_dev);
- err_indr_tbl:
- return rc;
- }
- void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
- {
- ena_com_indirect_table_destroy(ena_dev);
- ena_com_hash_key_destroy(ena_dev);
- ena_com_hash_ctrl_destroy(ena_dev);
- memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
- }
- int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
- {
- struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
- if (unlikely(!host_attr->host_info))
- return -ENOMEM;
- host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
- (ENA_COMMON_SPEC_VERSION_MINOR));
- return 0;
- }
- int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
- u32 debug_area_size)
- {
- struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->debug_area_virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr, GFP_KERNEL);
- if (unlikely(!host_attr->debug_area_virt_addr)) {
- host_attr->debug_area_size = 0;
- return -ENOMEM;
- }
- host_attr->debug_area_size = debug_area_size;
- return 0;
- }
- void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
- {
- struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- if (host_attr->host_info) {
- dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
- host_attr->host_info_dma_addr);
- host_attr->host_info = NULL;
- }
- }
- void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
- {
- struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- if (host_attr->debug_area_virt_addr) {
- dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
- host_attr->debug_area_virt_addr = NULL;
- }
- }
- int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
- {
- struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- struct ena_com_admin_queue *admin_queue;
- struct ena_admin_set_feat_cmd cmd;
- struct ena_admin_set_feat_resp resp;
- int ret;
- /* Host attribute config is called before ena_com_get_dev_attr_feat
- * so ena_com can't check if the feature is supported.
- */
- memset(&cmd, 0x0, sizeof(cmd));
- admin_queue = &ena_dev->admin_queue;
- cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
- cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
- ret = ena_com_mem_addr_set(ena_dev,
- &cmd.u.host_attr.debug_ba,
- host_attr->debug_area_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- ret = ena_com_mem_addr_set(ena_dev,
- &cmd.u.host_attr.os_info_ba,
- host_attr->host_info_dma_addr);
- if (unlikely(ret)) {
- pr_err("memory address set failed\n");
- return ret;
- }
- cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)&cmd,
- sizeof(cmd),
- (struct ena_admin_acq_entry *)&resp,
- sizeof(resp));
- if (unlikely(ret))
- pr_err("Failed to set host attributes: %d\n", ret);
- return ret;
- }
- /* Interrupt moderation */
- bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
- {
- return ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_INTERRUPT_MODERATION);
- }
- static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
- u32 intr_delay_resolution,
- u32 *intr_moder_interval)
- {
- if (!intr_delay_resolution) {
- pr_err("Illegal interrupt delay granularity value\n");
- return -EFAULT;
- }
- *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
- return 0;
- }
- int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs)
- {
- return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
- ena_dev->intr_delay_resolution,
- &ena_dev->intr_moder_tx_interval);
- }
- int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs)
- {
- return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
- ena_dev->intr_delay_resolution,
- &ena_dev->intr_moder_rx_interval);
- }
- int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
- {
- struct ena_admin_get_feat_resp get_resp;
- u16 delay_resolution;
- int rc;
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_INTERRUPT_MODERATION, 0);
- if (rc) {
- if (rc == -EOPNOTSUPP) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
- rc = 0;
- } else {
- pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
- }
- /* no moderation supported, disable adaptive support */
- ena_com_disable_adaptive_moderation(ena_dev);
- return rc;
- }
- /* if moderation is supported by device we set adaptive moderation */
- delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
- ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
- /* Disable adaptive moderation by default - can be enabled later */
- ena_com_disable_adaptive_moderation(ena_dev);
- return 0;
- }
- unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
- {
- return ena_dev->intr_moder_tx_interval;
- }
- unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
- {
- return ena_dev->intr_moder_rx_interval;
- }
- int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_llq_desc *llq_features,
- struct ena_llq_configurations *llq_default_cfg)
- {
- struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
- int rc;
- if (!llq_features->max_llq_num) {
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- return 0;
- }
- rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
- if (rc)
- return rc;
- ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
- (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
- if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("the size of the LLQ entry is smaller than needed\n");
- return -EINVAL;
- }
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- return 0;
- }
|