123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082 |
- /* bnx2fc_io.c: QLogic NetXtreme II Linux FCoE offload driver.
- * IO manager and SCSI IO processing.
- *
- * Copyright (c) 2008 - 2013 Broadcom Corporation
- * Copyright (c) 2014, QLogic Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
- */
- #include "bnx2fc.h"
- #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
- static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
- int bd_index);
- static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
- static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
- static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
- static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
- static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
- struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq);
- void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
- unsigned int timer_msec)
- {
- struct bnx2fc_interface *interface = io_req->port->priv;
- if (queue_delayed_work(interface->timer_work_queue,
- &io_req->timeout_work,
- msecs_to_jiffies(timer_msec)))
- kref_get(&io_req->refcount);
- }
- static void bnx2fc_cmd_timeout(struct work_struct *work)
- {
- struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
- timeout_work.work);
- struct fc_lport *lport;
- struct fc_rport_priv *rdata;
- u8 cmd_type = io_req->cmd_type;
- struct bnx2fc_rport *tgt = io_req->tgt;
- int logo_issued;
- int rc;
- BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
- "req_flags = %lx\n", cmd_type, io_req->req_flags);
- spin_lock_bh(&tgt->tgt_lock);
- if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
- clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
- /*
- * ideally we should hold the io_req until RRQ complets,
- * and release io_req from timeout hold.
- */
- spin_unlock_bh(&tgt->tgt_lock);
- bnx2fc_send_rrq(io_req);
- return;
- }
- if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
- BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
- goto done;
- }
- switch (cmd_type) {
- case BNX2FC_SCSI_CMD:
- if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
- &io_req->req_flags)) {
- /* Handle eh_abort timeout */
- BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
- complete(&io_req->tm_done);
- } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &io_req->req_flags)) {
- /* Handle internally generated ABTS timeout */
- BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
- io_req->refcount.refcount.counter);
- if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
- &io_req->req_flags))) {
- lport = io_req->port->lport;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(
- BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- /* Explicitly logo the target */
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Explicit "
- "logo - tgt flags = 0x%lx\n",
- tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- }
- return;
- }
- } else {
- /* Hanlde IO timeout */
- BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
- if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
- &io_req->req_flags)) {
- BNX2FC_IO_DBG(io_req, "IO completed before "
- " timer expiry\n");
- goto done;
- }
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &io_req->req_flags)) {
- rc = bnx2fc_initiate_abts(io_req);
- if (rc == SUCCESS)
- goto done;
- /*
- * Explicitly logo the target if
- * abts initiation fails
- */
- lport = io_req->port->lport;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(
- BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Explicit "
- "logo - tgt flags = 0x%lx\n",
- tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- }
- return;
- } else {
- BNX2FC_IO_DBG(io_req, "IO already in "
- "ABTS processing\n");
- }
- }
- break;
- case BNX2FC_ELS:
- if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
- BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
- if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
- &io_req->req_flags)) {
- lport = io_req->port->lport;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(
- BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- /* Explicitly logo the target */
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Explicitly logo"
- "(els)\n");
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- }
- return;
- }
- } else {
- /*
- * Handle ELS timeout.
- * tgt_lock is used to sync compl path and timeout
- * path. If els compl path is processing this IO, we
- * have nothing to do here, just release the timer hold
- */
- BNX2FC_IO_DBG(io_req, "ELS timed out\n");
- if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
- &io_req->req_flags))
- goto done;
- /* Indicate the cb_func that this ELS is timed out */
- set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
- if ((io_req->cb_func) && (io_req->cb_arg)) {
- io_req->cb_func(io_req->cb_arg);
- io_req->cb_arg = NULL;
- }
- }
- break;
- default:
- printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
- cmd_type);
- break;
- }
- done:
- /* release the cmd that was held when timer was set */
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- }
- static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
- {
- /* Called with host lock held */
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- /*
- * active_cmd_queue may have other command types as well,
- * and during flush operation, we want to error back only
- * scsi commands.
- */
- if (io_req->cmd_type != BNX2FC_SCSI_CMD)
- return;
- BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
- if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
- /* Do not call scsi done for this IO */
- return;
- }
- bnx2fc_unmap_sg_list(io_req);
- io_req->sc_cmd = NULL;
- if (!sc_cmd) {
- printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
- "IO(0x%x) already cleaned up\n",
- io_req->xid);
- return;
- }
- sc_cmd->result = err_code << 16;
- BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
- sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
- sc_cmd->allowed);
- scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
- }
- struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
- {
- struct bnx2fc_cmd_mgr *cmgr;
- struct io_bdt *bdt_info;
- struct bnx2fc_cmd *io_req;
- size_t len;
- u32 mem_size;
- u16 xid;
- int i;
- int num_ios, num_pri_ios;
- size_t bd_tbl_sz;
- int arr_sz = num_possible_cpus() + 1;
- u16 min_xid = BNX2FC_MIN_XID;
- u16 max_xid = hba->max_xid;
- if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
- printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
- and max_xid 0x%x\n", min_xid, max_xid);
- return NULL;
- }
- BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
- num_ios = max_xid - min_xid + 1;
- len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
- len += sizeof(struct bnx2fc_cmd_mgr);
- cmgr = kzalloc(len, GFP_KERNEL);
- if (!cmgr) {
- printk(KERN_ERR PFX "failed to alloc cmgr\n");
- return NULL;
- }
- cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
- arr_sz, GFP_KERNEL);
- if (!cmgr->free_list) {
- printk(KERN_ERR PFX "failed to alloc free_list\n");
- goto mem_err;
- }
- cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
- arr_sz, GFP_KERNEL);
- if (!cmgr->free_list_lock) {
- printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
- kfree(cmgr->free_list);
- cmgr->free_list = NULL;
- goto mem_err;
- }
- cmgr->hba = hba;
- cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
- for (i = 0; i < arr_sz; i++) {
- INIT_LIST_HEAD(&cmgr->free_list[i]);
- spin_lock_init(&cmgr->free_list_lock[i]);
- }
- /*
- * Pre-allocated pool of bnx2fc_cmds.
- * Last entry in the free list array is the free list
- * of slow path requests.
- */
- xid = BNX2FC_MIN_XID;
- num_pri_ios = num_ios - hba->elstm_xids;
- for (i = 0; i < num_ios; i++) {
- io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
- if (!io_req) {
- printk(KERN_ERR PFX "failed to alloc io_req\n");
- goto mem_err;
- }
- INIT_LIST_HEAD(&io_req->link);
- INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
- io_req->xid = xid++;
- if (i < num_pri_ios)
- list_add_tail(&io_req->link,
- &cmgr->free_list[io_req->xid %
- num_possible_cpus()]);
- else
- list_add_tail(&io_req->link,
- &cmgr->free_list[num_possible_cpus()]);
- io_req++;
- }
- /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
- mem_size = num_ios * sizeof(struct io_bdt *);
- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
- if (!cmgr->io_bdt_pool) {
- printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
- goto mem_err;
- }
- mem_size = sizeof(struct io_bdt);
- for (i = 0; i < num_ios; i++) {
- cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
- if (!cmgr->io_bdt_pool[i]) {
- printk(KERN_ERR PFX "failed to alloc "
- "io_bdt_pool[%d]\n", i);
- goto mem_err;
- }
- }
- /* Allocate an map fcoe_bdt_ctx structures */
- bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
- for (i = 0; i < num_ios; i++) {
- bdt_info = cmgr->io_bdt_pool[i];
- bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- bd_tbl_sz,
- &bdt_info->bd_tbl_dma,
- GFP_KERNEL);
- if (!bdt_info->bd_tbl) {
- printk(KERN_ERR PFX "failed to alloc "
- "bdt_tbl[%d]\n", i);
- goto mem_err;
- }
- }
- return cmgr;
- mem_err:
- bnx2fc_cmd_mgr_free(cmgr);
- return NULL;
- }
- void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
- {
- struct io_bdt *bdt_info;
- struct bnx2fc_hba *hba = cmgr->hba;
- size_t bd_tbl_sz;
- u16 min_xid = BNX2FC_MIN_XID;
- u16 max_xid = hba->max_xid;
- int num_ios;
- int i;
- num_ios = max_xid - min_xid + 1;
- /* Free fcoe_bdt_ctx structures */
- if (!cmgr->io_bdt_pool)
- goto free_cmd_pool;
- bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
- for (i = 0; i < num_ios; i++) {
- bdt_info = cmgr->io_bdt_pool[i];
- if (bdt_info->bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
- bdt_info->bd_tbl,
- bdt_info->bd_tbl_dma);
- bdt_info->bd_tbl = NULL;
- }
- }
- /* Destroy io_bdt pool */
- for (i = 0; i < num_ios; i++) {
- kfree(cmgr->io_bdt_pool[i]);
- cmgr->io_bdt_pool[i] = NULL;
- }
- kfree(cmgr->io_bdt_pool);
- cmgr->io_bdt_pool = NULL;
- free_cmd_pool:
- kfree(cmgr->free_list_lock);
- /* Destroy cmd pool */
- if (!cmgr->free_list)
- goto free_cmgr;
- for (i = 0; i < num_possible_cpus() + 1; i++) {
- struct bnx2fc_cmd *tmp, *io_req;
- list_for_each_entry_safe(io_req, tmp,
- &cmgr->free_list[i], link) {
- list_del(&io_req->link);
- kfree(io_req);
- }
- }
- kfree(cmgr->free_list);
- free_cmgr:
- /* Free command manager itself */
- kfree(cmgr);
- }
- struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
- {
- struct fcoe_port *port = tgt->port;
- struct bnx2fc_interface *interface = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
- struct bnx2fc_cmd *io_req;
- struct list_head *listp;
- struct io_bdt *bd_tbl;
- int index = RESERVE_FREE_LIST_INDEX;
- u32 free_sqes;
- u32 max_sqes;
- u16 xid;
- max_sqes = tgt->max_sqes;
- switch (type) {
- case BNX2FC_TASK_MGMT_CMD:
- max_sqes = BNX2FC_TM_MAX_SQES;
- break;
- case BNX2FC_ELS:
- max_sqes = BNX2FC_ELS_MAX_SQES;
- break;
- default:
- break;
- }
- /*
- * NOTE: Free list insertions and deletions are protected with
- * cmgr lock
- */
- spin_lock_bh(&cmd_mgr->free_list_lock[index]);
- free_sqes = atomic_read(&tgt->free_sqes);
- if ((list_empty(&(cmd_mgr->free_list[index]))) ||
- (tgt->num_active_ios.counter >= max_sqes) ||
- (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
- BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
- "ios(%d):sqes(%d)\n",
- tgt->num_active_ios.counter, tgt->max_sqes);
- if (list_empty(&(cmd_mgr->free_list[index])))
- printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
- spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- return NULL;
- }
- listp = (struct list_head *)
- cmd_mgr->free_list[index].next;
- list_del_init(listp);
- io_req = (struct bnx2fc_cmd *) listp;
- xid = io_req->xid;
- cmd_mgr->cmds[xid] = io_req;
- atomic_inc(&tgt->num_active_ios);
- atomic_dec(&tgt->free_sqes);
- spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- INIT_LIST_HEAD(&io_req->link);
- io_req->port = port;
- io_req->cmd_mgr = cmd_mgr;
- io_req->req_flags = 0;
- io_req->cmd_type = type;
- /* Bind io_bdt for this io_req */
- /* Have a static link between io_req and io_bdt_pool */
- bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
- bd_tbl->io_req = io_req;
- /* Hold the io_req against deletion */
- kref_init(&io_req->refcount);
- return io_req;
- }
- struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
- {
- struct fcoe_port *port = tgt->port;
- struct bnx2fc_interface *interface = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
- struct bnx2fc_cmd *io_req;
- struct list_head *listp;
- struct io_bdt *bd_tbl;
- u32 free_sqes;
- u32 max_sqes;
- u16 xid;
- int index = get_cpu();
- max_sqes = BNX2FC_SCSI_MAX_SQES;
- /*
- * NOTE: Free list insertions and deletions are protected with
- * cmgr lock
- */
- spin_lock_bh(&cmd_mgr->free_list_lock[index]);
- free_sqes = atomic_read(&tgt->free_sqes);
- if ((list_empty(&cmd_mgr->free_list[index])) ||
- (tgt->num_active_ios.counter >= max_sqes) ||
- (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
- spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
- return NULL;
- }
- listp = (struct list_head *)
- cmd_mgr->free_list[index].next;
- list_del_init(listp);
- io_req = (struct bnx2fc_cmd *) listp;
- xid = io_req->xid;
- cmd_mgr->cmds[xid] = io_req;
- atomic_inc(&tgt->num_active_ios);
- atomic_dec(&tgt->free_sqes);
- spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
- INIT_LIST_HEAD(&io_req->link);
- io_req->port = port;
- io_req->cmd_mgr = cmd_mgr;
- io_req->req_flags = 0;
- /* Bind io_bdt for this io_req */
- /* Have a static link between io_req and io_bdt_pool */
- bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
- bd_tbl->io_req = io_req;
- /* Hold the io_req against deletion */
- kref_init(&io_req->refcount);
- return io_req;
- }
- void bnx2fc_cmd_release(struct kref *ref)
- {
- struct bnx2fc_cmd *io_req = container_of(ref,
- struct bnx2fc_cmd, refcount);
- struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
- int index;
- if (io_req->cmd_type == BNX2FC_SCSI_CMD)
- index = io_req->xid % num_possible_cpus();
- else
- index = RESERVE_FREE_LIST_INDEX;
- spin_lock_bh(&cmd_mgr->free_list_lock[index]);
- if (io_req->cmd_type != BNX2FC_SCSI_CMD)
- bnx2fc_free_mp_resc(io_req);
- cmd_mgr->cmds[io_req->xid] = NULL;
- /* Delete IO from retire queue */
- list_del_init(&io_req->link);
- /* Add it to the free list */
- list_add(&io_req->link,
- &cmd_mgr->free_list[index]);
- atomic_dec(&io_req->tgt->num_active_ios);
- spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- }
- static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
- {
- struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
- struct bnx2fc_interface *interface = io_req->port->priv;
- struct bnx2fc_hba *hba = interface->hba;
- size_t sz = sizeof(struct fcoe_bd_ctx);
- /* clear tm flags */
- mp_req->tm_flags = 0;
- if (mp_req->mp_req_bd) {
- dma_free_coherent(&hba->pcidev->dev, sz,
- mp_req->mp_req_bd,
- mp_req->mp_req_bd_dma);
- mp_req->mp_req_bd = NULL;
- }
- if (mp_req->mp_resp_bd) {
- dma_free_coherent(&hba->pcidev->dev, sz,
- mp_req->mp_resp_bd,
- mp_req->mp_resp_bd_dma);
- mp_req->mp_resp_bd = NULL;
- }
- if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
- mp_req->req_buf,
- mp_req->req_buf_dma);
- mp_req->req_buf = NULL;
- }
- if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
- mp_req->resp_buf,
- mp_req->resp_buf_dma);
- mp_req->resp_buf = NULL;
- }
- }
- int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
- {
- struct bnx2fc_mp_req *mp_req;
- struct fcoe_bd_ctx *mp_req_bd;
- struct fcoe_bd_ctx *mp_resp_bd;
- struct bnx2fc_interface *interface = io_req->port->priv;
- struct bnx2fc_hba *hba = interface->hba;
- dma_addr_t addr;
- size_t sz;
- mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
- memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
- mp_req->req_len = sizeof(struct fcp_cmnd);
- io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
- &mp_req->req_buf_dma,
- GFP_ATOMIC);
- if (!mp_req->req_buf) {
- printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
- bnx2fc_free_mp_resc(io_req);
- return FAILED;
- }
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
- &mp_req->resp_buf_dma,
- GFP_ATOMIC);
- if (!mp_req->resp_buf) {
- printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
- bnx2fc_free_mp_resc(io_req);
- return FAILED;
- }
- memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
- memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
- /* Allocate and map mp_req_bd and mp_resp_bd */
- sz = sizeof(struct fcoe_bd_ctx);
- mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
- &mp_req->mp_req_bd_dma,
- GFP_ATOMIC);
- if (!mp_req->mp_req_bd) {
- printk(KERN_ERR PFX "unable to alloc MP req bd\n");
- bnx2fc_free_mp_resc(io_req);
- return FAILED;
- }
- mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
- &mp_req->mp_resp_bd_dma,
- GFP_ATOMIC);
- if (!mp_req->mp_resp_bd) {
- printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
- bnx2fc_free_mp_resc(io_req);
- return FAILED;
- }
- /* Fill bd table */
- addr = mp_req->req_buf_dma;
- mp_req_bd = mp_req->mp_req_bd;
- mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
- mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = CNIC_PAGE_SIZE;
- mp_req_bd->flags = 0;
- /*
- * MP buffer is either a task mgmt command or an ELS.
- * So the assumption is that it consumes a single bd
- * entry in the bd table
- */
- mp_resp_bd = mp_req->mp_resp_bd;
- addr = mp_req->resp_buf_dma;
- mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
- mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
- mp_resp_bd->flags = 0;
- return SUCCESS;
- }
- static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
- {
- struct fc_lport *lport;
- struct fc_rport *rport;
- struct fc_rport_libfc_priv *rp;
- struct fcoe_port *port;
- struct bnx2fc_interface *interface;
- struct bnx2fc_rport *tgt;
- struct bnx2fc_cmd *io_req;
- struct bnx2fc_mp_req *tm_req;
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
- struct Scsi_Host *host = sc_cmd->device->host;
- struct fc_frame_header *fc_hdr;
- struct fcp_cmnd *fcp_cmnd;
- int task_idx, index;
- int rc = SUCCESS;
- u16 xid;
- u32 sid, did;
- unsigned long start = jiffies;
- lport = shost_priv(host);
- rport = starget_to_rport(scsi_target(sc_cmd->device));
- port = lport_priv(lport);
- interface = port->priv;
- if (rport == NULL) {
- printk(KERN_ERR PFX "device_reset: rport is NULL\n");
- rc = FAILED;
- goto tmf_err;
- }
- rp = rport->dd_data;
- rc = fc_block_scsi_eh(sc_cmd);
- if (rc)
- return rc;
- if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
- printk(KERN_ERR PFX "device_reset: link is not ready\n");
- rc = FAILED;
- goto tmf_err;
- }
- /* rport and tgt are allocated together, so tgt should be non-NULL */
- tgt = (struct bnx2fc_rport *)&rp[1];
- if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
- printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
- rc = FAILED;
- goto tmf_err;
- }
- retry_tmf:
- io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
- if (!io_req) {
- if (time_after(jiffies, start + HZ)) {
- printk(KERN_ERR PFX "tmf: Failed TMF");
- rc = FAILED;
- goto tmf_err;
- }
- msleep(20);
- goto retry_tmf;
- }
- /* Initialize rest of io_req fields */
- io_req->sc_cmd = sc_cmd;
- io_req->port = port;
- io_req->tgt = tgt;
- tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
- rc = bnx2fc_init_mp_req(io_req);
- if (rc == FAILED) {
- printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
- spin_lock_bh(&tgt->tgt_lock);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- goto tmf_err;
- }
- /* Set TM flags */
- io_req->io_req_flags = 0;
- tm_req->tm_flags = tm_flags;
- /* Fill FCP_CMND */
- bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
- fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
- memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
- fcp_cmnd->fc_dl = 0;
- /* Fill FC header */
- fc_hdr = &(tm_req->req_fc_hdr);
- sid = tgt->sid;
- did = rport->port_id;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
- FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
- /* Obtain exchange id */
- xid = io_req->xid;
- BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
- task_idx = xid/BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- /* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
- bnx2fc_init_mp_task(io_req, task);
- sc_cmd->SCp.ptr = (char *)io_req;
- /* Obtain free SQ entry */
- spin_lock_bh(&tgt->tgt_lock);
- bnx2fc_add_2_sq(tgt, xid);
- /* Enqueue the io_req to active_tm_queue */
- io_req->on_tmf_queue = 1;
- list_add_tail(&io_req->link, &tgt->active_tm_queue);
- init_completion(&io_req->tm_done);
- io_req->wait_for_comp = 1;
- /* Ring doorbell */
- bnx2fc_ring_doorbell(tgt);
- spin_unlock_bh(&tgt->tgt_lock);
- rc = wait_for_completion_timeout(&io_req->tm_done,
- BNX2FC_TM_TIMEOUT * HZ);
- spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
- if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
- set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
- if (io_req->on_tmf_queue) {
- list_del_init(&io_req->link);
- io_req->on_tmf_queue = 0;
- }
- io_req->wait_for_comp = 1;
- bnx2fc_initiate_cleanup(io_req);
- spin_unlock_bh(&tgt->tgt_lock);
- rc = wait_for_completion_timeout(&io_req->tm_done,
- BNX2FC_FW_TIMEOUT);
- spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
- if (!rc)
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- }
- spin_unlock_bh(&tgt->tgt_lock);
- if (!rc) {
- BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
- rc = FAILED;
- } else {
- BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
- rc = SUCCESS;
- }
- tmf_err:
- return rc;
- }
- int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
- {
- struct fc_lport *lport;
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct fc_rport *rport = tgt->rport;
- struct fc_rport_priv *rdata = tgt->rdata;
- struct bnx2fc_interface *interface;
- struct fcoe_port *port;
- struct bnx2fc_cmd *abts_io_req;
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
- struct fc_frame_header *fc_hdr;
- struct bnx2fc_mp_req *abts_req;
- int task_idx, index;
- u32 sid, did;
- u16 xid;
- int rc = SUCCESS;
- u32 r_a_tov = rdata->r_a_tov;
- /* called with tgt_lock held */
- BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
- port = io_req->port;
- interface = port->priv;
- lport = port->lport;
- if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
- printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
- rc = FAILED;
- goto abts_err;
- }
- if (rport == NULL) {
- printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
- rc = FAILED;
- goto abts_err;
- }
- if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
- printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
- rc = FAILED;
- goto abts_err;
- }
- abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
- if (!abts_io_req) {
- printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
- rc = FAILED;
- goto abts_err;
- }
- /* Initialize rest of io_req fields */
- abts_io_req->sc_cmd = NULL;
- abts_io_req->port = port;
- abts_io_req->tgt = tgt;
- abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
- abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
- memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
- /* Fill FC header */
- fc_hdr = &(abts_req->req_fc_hdr);
- /* Obtain oxid and rxid for the original exchange to be aborted */
- fc_hdr->fh_ox_id = htons(io_req->xid);
- fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
- sid = tgt->sid;
- did = rport->port_id;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
- FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
- xid = abts_io_req->xid;
- BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
- task_idx = xid/BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- /* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
- bnx2fc_init_mp_task(abts_io_req, task);
- /*
- * ABTS task is a temporary task that will be cleaned up
- * irrespective of ABTS response. We need to start the timer
- * for the original exchange, as the CQE is posted for the original
- * IO request.
- *
- * Timer for ABTS is started only when it is originated by a
- * TM request. For the ABTS issued as part of ULP timeout,
- * scsi-ml maintains the timers.
- */
- /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
- bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
- /* Obtain free SQ entry */
- bnx2fc_add_2_sq(tgt, xid);
- /* Ring doorbell */
- bnx2fc_ring_doorbell(tgt);
- abts_err:
- return rc;
- }
- int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
- enum fc_rctl r_ctl)
- {
- struct fc_lport *lport;
- struct bnx2fc_rport *tgt = orig_io_req->tgt;
- struct bnx2fc_interface *interface;
- struct fcoe_port *port;
- struct bnx2fc_cmd *seq_clnp_req;
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
- struct bnx2fc_els_cb_arg *cb_arg = NULL;
- int task_idx, index;
- u16 xid;
- int rc = 0;
- BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
- orig_io_req->xid);
- kref_get(&orig_io_req->refcount);
- port = orig_io_req->port;
- interface = port->priv;
- lport = port->lport;
- cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
- if (!cb_arg) {
- printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
- rc = -ENOMEM;
- goto cleanup_err;
- }
- seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
- if (!seq_clnp_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
- rc = -ENOMEM;
- kfree(cb_arg);
- goto cleanup_err;
- }
- /* Initialize rest of io_req fields */
- seq_clnp_req->sc_cmd = NULL;
- seq_clnp_req->port = port;
- seq_clnp_req->tgt = tgt;
- seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
- xid = seq_clnp_req->xid;
- task_idx = xid/BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- /* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
- cb_arg->aborted_io_req = orig_io_req;
- cb_arg->io_req = seq_clnp_req;
- cb_arg->r_ctl = r_ctl;
- cb_arg->offset = offset;
- seq_clnp_req->cb_arg = cb_arg;
- printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
- bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
- /* Obtain free SQ entry */
- bnx2fc_add_2_sq(tgt, xid);
- /* Ring doorbell */
- bnx2fc_ring_doorbell(tgt);
- cleanup_err:
- return rc;
- }
- int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
- {
- struct fc_lport *lport;
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct bnx2fc_interface *interface;
- struct fcoe_port *port;
- struct bnx2fc_cmd *cleanup_io_req;
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
- int task_idx, index;
- u16 xid, orig_xid;
- int rc = 0;
- /* ASSUMPTION: called with tgt_lock held */
- BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
- port = io_req->port;
- interface = port->priv;
- lport = port->lport;
- cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
- if (!cleanup_io_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
- rc = -1;
- goto cleanup_err;
- }
- /* Initialize rest of io_req fields */
- cleanup_io_req->sc_cmd = NULL;
- cleanup_io_req->port = port;
- cleanup_io_req->tgt = tgt;
- cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
- xid = cleanup_io_req->xid;
- task_idx = xid/BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- /* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
- orig_xid = io_req->xid;
- BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
- bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
- /* Obtain free SQ entry */
- bnx2fc_add_2_sq(tgt, xid);
- /* Ring doorbell */
- bnx2fc_ring_doorbell(tgt);
- cleanup_err:
- return rc;
- }
- /**
- * bnx2fc_eh_target_reset: Reset a target
- *
- * @sc_cmd: SCSI command
- *
- * Set from SCSI host template to send task mgmt command to the target
- * and wait for the response
- */
- int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
- {
- return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
- }
- /**
- * bnx2fc_eh_device_reset - Reset a single LUN
- *
- * @sc_cmd: SCSI command
- *
- * Set from SCSI host template to send task mgmt command to the target
- * and wait for the response
- */
- int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
- {
- return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
- }
- int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
- {
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct fc_rport_priv *rdata = tgt->rdata;
- int logo_issued;
- int rc = SUCCESS;
- int wait_cnt = 0;
- BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
- tgt->flags);
- logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- io_req->wait_for_comp = 1;
- bnx2fc_initiate_cleanup(io_req);
- spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
- io_req->wait_for_comp = 0;
- /*
- * release the reference taken in eh_abort to allow the
- * target to re-login after flushing IOs
- */
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- if (!logo_issued) {
- clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- do {
- msleep(BNX2FC_RELOGIN_WAIT_TIME);
- if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
- rc = FAILED;
- break;
- }
- } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
- }
- spin_lock_bh(&tgt->tgt_lock);
- return rc;
- }
- /**
- * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
- * SCSI command
- *
- * @sc_cmd: SCSI_ML command pointer
- *
- * SCSI abort request handler
- */
- int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
- {
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct bnx2fc_cmd *io_req;
- struct fc_lport *lport;
- struct bnx2fc_rport *tgt;
- int rc = FAILED;
- rc = fc_block_scsi_eh(sc_cmd);
- if (rc)
- return rc;
- lport = shost_priv(sc_cmd->device->host);
- if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- printk(KERN_ERR PFX "eh_abort: link not ready\n");
- return rc;
- }
- tgt = (struct bnx2fc_rport *)&rp[1];
- BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
- spin_lock_bh(&tgt->tgt_lock);
- io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
- if (!io_req) {
- /* Command might have just completed */
- printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
- spin_unlock_bh(&tgt->tgt_lock);
- return SUCCESS;
- }
- BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
- io_req->refcount.refcount.counter);
- /* Hold IO request across abort processing */
- kref_get(&io_req->refcount);
- BUG_ON(tgt != io_req->tgt);
- /* Remove the io_req from the active_q. */
- /*
- * Task Mgmt functions (LUN RESET & TGT RESET) will not
- * issue an ABTS on this particular IO req, as the
- * io_req is no longer in the active_q.
- */
- if (tgt->flush_in_prog) {
- printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
- "flush in progress\n", io_req->xid);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- return SUCCESS;
- }
- if (io_req->on_active_queue == 0) {
- printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
- "not on active_q\n", io_req->xid);
- /*
- * This condition can happen only due to the FW bug,
- * where we do not receive cleanup response from
- * the FW. Handle this case gracefully by erroring
- * back the IO request to SCSI-ml
- */
- bnx2fc_scsi_done(io_req, DID_ABORT);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- return SUCCESS;
- }
- /*
- * Only eh_abort processing will remove the IO from
- * active_cmd_q before processing the request. this is
- * done to avoid race conditions between IOs aborted
- * as part of task management completion and eh_abort
- * processing
- */
- list_del_init(&io_req->link);
- io_req->on_active_queue = 0;
- /* Move IO req to retire queue */
- list_add_tail(&io_req->link, &tgt->io_retire_queue);
- init_completion(&io_req->tm_done);
- if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
- printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
- "already in abts processing\n", io_req->xid);
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- rc = bnx2fc_expl_logo(lport, io_req);
- /* This only occurs when an task abort was requested while ABTS
- is in progress. Setting the IO_CLEANUP flag will skip the
- RRQ process in the case when the fw generated SCSI_CMD cmpl
- was a result from the ABTS request rather than the CLEANUP
- request */
- set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- goto out;
- }
- /* Cancel the current timer running on this io_req */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
- io_req->wait_for_comp = 1;
- rc = bnx2fc_initiate_abts(io_req);
- if (rc == FAILED) {
- bnx2fc_initiate_cleanup(io_req);
- spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
- spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
- goto done;
- }
- spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
- spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
- if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
- BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
- rc = SUCCESS;
- } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
- &io_req->req_flags))) {
- /* Let the scsi-ml try to recover this command */
- printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
- io_req->xid);
- rc = bnx2fc_expl_logo(lport, io_req);
- goto out;
- } else {
- /*
- * We come here even when there was a race condition
- * between timeout and abts completion, and abts
- * completion happens just in time.
- */
- BNX2FC_IO_DBG(io_req, "abort succeeded\n");
- rc = SUCCESS;
- bnx2fc_scsi_done(io_req, DID_ABORT);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- }
- done:
- /* release the reference taken in eh_abort */
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- out:
- spin_unlock_bh(&tgt->tgt_lock);
- return rc;
- }
- void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
- struct fcoe_task_ctx_entry *task,
- u8 rx_state)
- {
- struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
- struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
- u32 offset = cb_arg->offset;
- enum fc_rctl r_ctl = cb_arg->r_ctl;
- int rc = 0;
- struct bnx2fc_rport *tgt = orig_io_req->tgt;
- BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
- "cmd_type = %d\n",
- seq_clnp_req->xid, seq_clnp_req->cmd_type);
- if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
- printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
- seq_clnp_req->xid);
- goto free_cb_arg;
- }
- spin_unlock_bh(&tgt->tgt_lock);
- rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
- spin_lock_bh(&tgt->tgt_lock);
- if (rc)
- printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
- " IO will abort\n");
- seq_clnp_req->cb_arg = NULL;
- kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
- free_cb_arg:
- kfree(cb_arg);
- return;
- }
- void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task,
- u8 num_rq)
- {
- BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
- "refcnt = %d, cmd_type = %d\n",
- io_req->refcount.refcount.counter, io_req->cmd_type);
- bnx2fc_scsi_done(io_req, DID_ERROR);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- if (io_req->wait_for_comp)
- complete(&io_req->tm_done);
- }
- void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task,
- u8 num_rq)
- {
- u32 r_ctl;
- u32 r_a_tov = FC_DEF_R_A_TOV;
- u8 issue_rrq = 0;
- struct bnx2fc_rport *tgt = io_req->tgt;
- BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
- "refcnt = %d, cmd_type = %d\n",
- io_req->xid,
- io_req->refcount.refcount.counter, io_req->cmd_type);
- if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
- &io_req->req_flags)) {
- BNX2FC_IO_DBG(io_req, "Timer context finished processing"
- " this io\n");
- return;
- }
- /* Do not issue RRQ as this IO is already cleanedup */
- if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
- &io_req->req_flags))
- goto io_compl;
- /*
- * For ABTS issued due to SCSI eh_abort_handler, timeout
- * values are maintained by scsi-ml itself. Cancel timeout
- * in case ABTS issued as part of task management function
- * or due to FW error.
- */
- if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
- switch (r_ctl) {
- case FC_RCTL_BA_ACC:
- /*
- * Dont release this cmd yet. It will be relesed
- * after we get RRQ response
- */
- BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
- issue_rrq = 1;
- break;
- case FC_RCTL_BA_RJT:
- BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
- break;
- default:
- printk(KERN_ERR PFX "Unknown ABTS response\n");
- break;
- }
- if (issue_rrq) {
- BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
- set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
- }
- set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
- bnx2fc_cmd_timer_set(io_req, r_a_tov);
- io_compl:
- if (io_req->wait_for_comp) {
- if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
- &io_req->req_flags))
- complete(&io_req->tm_done);
- } else {
- /*
- * We end up here when ABTS is issued as
- * in asynchronous context, i.e., as part
- * of task management completion, or
- * when FW error is received or when the
- * ABTS is issued when the IO is timed
- * out.
- */
- if (io_req->on_active_queue) {
- list_del_init(&io_req->link);
- io_req->on_active_queue = 0;
- /* Move IO req to retire queue */
- list_add_tail(&io_req->link, &tgt->io_retire_queue);
- }
- bnx2fc_scsi_done(io_req, DID_ERROR);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- }
- }
- static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
- {
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct bnx2fc_cmd *cmd, *tmp;
- u64 tm_lun = sc_cmd->device->lun;
- u64 lun;
- int rc = 0;
- /* called with tgt_lock held */
- BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
- /*
- * Walk thru the active_ios queue and ABORT the IO
- * that matches with the LUN that was reset
- */
- list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
- BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
- lun = cmd->sc_cmd->device->lun;
- if (lun == tm_lun) {
- /* Initiate ABTS on this cmd */
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &cmd->req_flags)) {
- /* cancel the IO timeout */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release);
- /* timer hold */
- rc = bnx2fc_initiate_abts(cmd);
- /* abts shouldn't fail in this context */
- WARN_ON(rc != SUCCESS);
- } else
- printk(KERN_ERR PFX "lun_rst: abts already in"
- " progress for this IO 0x%x\n",
- cmd->xid);
- }
- }
- }
- static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
- {
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct bnx2fc_cmd *cmd, *tmp;
- int rc = 0;
- /* called with tgt_lock held */
- BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
- /*
- * Walk thru the active_ios queue and ABORT the IO
- * that matches with the LUN that was reset
- */
- list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
- BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
- /* Initiate ABTS */
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &cmd->req_flags)) {
- /* cancel the IO timeout */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* timer hold */
- rc = bnx2fc_initiate_abts(cmd);
- /* abts shouldn't fail in this context */
- WARN_ON(rc != SUCCESS);
- } else
- printk(KERN_ERR PFX "tgt_rst: abts already in progress"
- " for this IO 0x%x\n", cmd->xid);
- }
- }
- void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task, u8 num_rq)
- {
- struct bnx2fc_mp_req *tm_req;
- struct fc_frame_header *fc_hdr;
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- u64 *hdr;
- u64 *temp_hdr;
- void *rsp_buf;
- /* Called with tgt_lock held */
- BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
- if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
- set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
- else {
- /* TM has already timed out and we got
- * delayed completion. Ignore completion
- * processing.
- */
- return;
- }
- tm_req = &(io_req->mp_req);
- fc_hdr = &(tm_req->resp_fc_hdr);
- hdr = (u64 *)fc_hdr;
- temp_hdr = (u64 *)
- &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
- hdr[0] = cpu_to_be64(temp_hdr[0]);
- hdr[1] = cpu_to_be64(temp_hdr[1]);
- hdr[2] = cpu_to_be64(temp_hdr[2]);
- tm_req->resp_len =
- task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
- rsp_buf = tm_req->resp_buf;
- if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
- bnx2fc_parse_fcp_rsp(io_req,
- (struct fcoe_fcp_rsp_payload *)
- rsp_buf, num_rq);
- if (io_req->fcp_rsp_code == 0) {
- /* TM successful */
- if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
- bnx2fc_lun_reset_cmpl(io_req);
- else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
- bnx2fc_tgt_reset_cmpl(io_req);
- }
- } else {
- printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
- fc_hdr->fh_r_ctl);
- }
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
- return;
- }
- switch (io_req->fcp_status) {
- case FC_GOOD:
- if (io_req->cdb_status == 0) {
- /* Good IO completion */
- sc_cmd->result = DID_OK << 16;
- } else {
- /* Transport status is good, SCSI status not good */
- sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
- }
- if (io_req->fcp_resid)
- scsi_set_resid(sc_cmd, io_req->fcp_resid);
- break;
- default:
- BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
- io_req->fcp_status);
- break;
- }
- sc_cmd = io_req->sc_cmd;
- io_req->sc_cmd = NULL;
- /* check if the io_req exists in tgt's tmf_q */
- if (io_req->on_tmf_queue) {
- list_del_init(&io_req->link);
- io_req->on_tmf_queue = 0;
- } else {
- printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
- return;
- }
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- if (io_req->wait_for_comp) {
- BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
- complete(&io_req->tm_done);
- }
- }
- static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
- int bd_index)
- {
- struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
- int frag_size, sg_frags;
- sg_frags = 0;
- while (sg_len) {
- if (sg_len >= BNX2FC_BD_SPLIT_SZ)
- frag_size = BNX2FC_BD_SPLIT_SZ;
- else
- frag_size = sg_len;
- bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
- bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
- bd[bd_index + sg_frags].buf_len = (u16)frag_size;
- bd[bd_index + sg_frags].flags = 0;
- addr += (u64) frag_size;
- sg_frags++;
- sg_len -= frag_size;
- }
- return sg_frags;
- }
- static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
- {
- struct bnx2fc_interface *interface = io_req->port->priv;
- struct bnx2fc_hba *hba = interface->hba;
- struct scsi_cmnd *sc = io_req->sc_cmd;
- struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
- struct scatterlist *sg;
- int byte_count = 0;
- int sg_count = 0;
- int bd_count = 0;
- int sg_frags;
- unsigned int sg_len;
- u64 addr;
- int i;
- /*
- * Use dma_map_sg directly to ensure we're using the correct
- * dev struct off of pcidev.
- */
- sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
- scsi_sg_count(sc), sc->sc_data_direction);
- scsi_for_each_sg(sc, sg, sg_count, i) {
- sg_len = sg_dma_len(sg);
- addr = sg_dma_address(sg);
- if (sg_len > BNX2FC_MAX_BD_LEN) {
- sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
- bd_count);
- } else {
- sg_frags = 1;
- bd[bd_count].buf_addr_lo = addr & 0xffffffff;
- bd[bd_count].buf_addr_hi = addr >> 32;
- bd[bd_count].buf_len = (u16)sg_len;
- bd[bd_count].flags = 0;
- }
- bd_count += sg_frags;
- byte_count += sg_len;
- }
- if (byte_count != scsi_bufflen(sc))
- printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
- "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
- io_req->xid);
- return bd_count;
- }
- static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
- {
- struct scsi_cmnd *sc = io_req->sc_cmd;
- struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
- int bd_count;
- if (scsi_sg_count(sc)) {
- bd_count = bnx2fc_map_sg(io_req);
- if (bd_count == 0)
- return -ENOMEM;
- } else {
- bd_count = 0;
- bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
- bd[0].buf_len = bd[0].flags = 0;
- }
- io_req->bd_tbl->bd_valid = bd_count;
- return 0;
- }
- static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
- {
- struct scsi_cmnd *sc = io_req->sc_cmd;
- struct bnx2fc_interface *interface = io_req->port->priv;
- struct bnx2fc_hba *hba = interface->hba;
- /*
- * Use dma_unmap_sg directly to ensure we're using the correct
- * dev struct off of pcidev.
- */
- if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
- dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
- scsi_sg_count(sc), sc->sc_data_direction);
- io_req->bd_tbl->bd_valid = 0;
- }
- }
- void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
- struct fcp_cmnd *fcp_cmnd)
- {
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
- fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
- memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
- fcp_cmnd->fc_cmdref = 0;
- fcp_cmnd->fc_pri_ta = 0;
- fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
- fcp_cmnd->fc_flags = io_req->io_req_flags;
- fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
- }
- static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
- struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq)
- {
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct bnx2fc_rport *tgt = io_req->tgt;
- u8 rsp_flags = fcp_rsp->fcp_flags.flags;
- u32 rq_buff_len = 0;
- int i;
- unsigned char *rq_data;
- unsigned char *dummy;
- int fcp_sns_len = 0;
- int fcp_rsp_len = 0;
- io_req->fcp_status = FC_GOOD;
- io_req->fcp_resid = fcp_rsp->fcp_resid;
- io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
- /* Fetch fcp_rsp_info and fcp_sns_info if available */
- if (num_rq) {
- /*
- * We do not anticipate num_rq >1, as the linux defined
- * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
- * 256 bytes of single rq buffer is good enough to hold this.
- */
- if (rsp_flags &
- FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
- fcp_rsp_len = rq_buff_len
- = fcp_rsp->fcp_rsp_len;
- }
- if (rsp_flags &
- FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
- fcp_sns_len = fcp_rsp->fcp_sns_len;
- rq_buff_len += fcp_rsp->fcp_sns_len;
- }
- io_req->fcp_rsp_len = fcp_rsp_len;
- io_req->fcp_sns_len = fcp_sns_len;
- if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
- /* Invalid sense sense length. */
- printk(KERN_ERR PFX "invalid sns length %d\n",
- rq_buff_len);
- /* reset rq_buff_len */
- rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
- }
- rq_data = bnx2fc_get_next_rqe(tgt, 1);
- if (num_rq > 1) {
- /* We do not need extra sense data */
- for (i = 1; i < num_rq; i++)
- dummy = bnx2fc_get_next_rqe(tgt, 1);
- }
- /* fetch fcp_rsp_code */
- if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
- /* Only for task management function */
- io_req->fcp_rsp_code = rq_data[3];
- printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
- io_req->fcp_rsp_code);
- }
- /* fetch sense data */
- rq_data += fcp_rsp_len;
- if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
- printk(KERN_ERR PFX "Truncating sense buffer\n");
- fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
- }
- memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (fcp_sns_len)
- memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
- /* return RQ entries */
- for (i = 0; i < num_rq; i++)
- bnx2fc_return_rqe(tgt, 1);
- }
- }
- /**
- * bnx2fc_queuecommand - Queuecommand function of the scsi template
- *
- * @host: The Scsi_Host the command was issued to
- * @sc_cmd: struct scsi_cmnd to be executed
- *
- * This is the IO strategy routine, called by SCSI-ML
- **/
- int bnx2fc_queuecommand(struct Scsi_Host *host,
- struct scsi_cmnd *sc_cmd)
- {
- struct fc_lport *lport = shost_priv(host);
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct bnx2fc_rport *tgt;
- struct bnx2fc_cmd *io_req;
- int rc = 0;
- int rval;
- rval = fc_remote_port_chkready(rport);
- if (rval) {
- sc_cmd->result = rval;
- sc_cmd->scsi_done(sc_cmd);
- return 0;
- }
- if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto exit_qcmd;
- }
- /* rport and tgt are allocated together, so tgt should be non-NULL */
- tgt = (struct bnx2fc_rport *)&rp[1];
- if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
- /*
- * Session is not offloaded yet. Let SCSI-ml retry
- * the command.
- */
- rc = SCSI_MLQUEUE_TARGET_BUSY;
- goto exit_qcmd;
- }
- if (tgt->retry_delay_timestamp) {
- if (time_after(jiffies, tgt->retry_delay_timestamp)) {
- tgt->retry_delay_timestamp = 0;
- } else {
- /* If retry_delay timer is active, flow off the ML */
- rc = SCSI_MLQUEUE_TARGET_BUSY;
- goto exit_qcmd;
- }
- }
- spin_lock_bh(&tgt->tgt_lock);
- io_req = bnx2fc_cmd_alloc(tgt);
- if (!io_req) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto exit_qcmd_tgtlock;
- }
- io_req->sc_cmd = sc_cmd;
- if (bnx2fc_post_io_req(tgt, io_req)) {
- printk(KERN_ERR PFX "Unable to post io_req\n");
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto exit_qcmd_tgtlock;
- }
- exit_qcmd_tgtlock:
- spin_unlock_bh(&tgt->tgt_lock);
- exit_qcmd:
- return rc;
- }
- void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task,
- u8 num_rq)
- {
- struct fcoe_fcp_rsp_payload *fcp_rsp;
- struct bnx2fc_rport *tgt = io_req->tgt;
- struct scsi_cmnd *sc_cmd;
- struct Scsi_Host *host;
- /* scsi_cmd_cmpl is called with tgt lock held */
- if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
- /* we will not receive ABTS response for this IO */
- BNX2FC_IO_DBG(io_req, "Timer context finished processing "
- "this scsi cmd\n");
- }
- /* Cancel the timeout_work, as we received IO completion */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- sc_cmd = io_req->sc_cmd;
- if (sc_cmd == NULL) {
- printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
- return;
- }
- /* Fetch fcp_rsp from task context and perform cmd completion */
- fcp_rsp = (struct fcoe_fcp_rsp_payload *)
- &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
- /* parse fcp_rsp and obtain sense data from RQ if available */
- bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
- host = sc_cmd->device->host;
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "SCp.ptr is NULL\n");
- return;
- }
- if (io_req->on_active_queue) {
- list_del_init(&io_req->link);
- io_req->on_active_queue = 0;
- /* Move IO req to retire queue */
- list_add_tail(&io_req->link, &tgt->io_retire_queue);
- } else {
- /* This should not happen, but could have been pulled
- * by bnx2fc_flush_active_ios(), or during a race
- * between command abort and (late) completion.
- */
- BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
- if (io_req->wait_for_comp)
- if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
- &io_req->req_flags))
- complete(&io_req->tm_done);
- }
- bnx2fc_unmap_sg_list(io_req);
- io_req->sc_cmd = NULL;
- switch (io_req->fcp_status) {
- case FC_GOOD:
- if (io_req->cdb_status == 0) {
- /* Good IO completion */
- sc_cmd->result = DID_OK << 16;
- } else {
- /* Transport status is good, SCSI status not good */
- BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
- " fcp_resid = 0x%x\n",
- io_req->cdb_status, io_req->fcp_resid);
- sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
- if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
- io_req->cdb_status == SAM_STAT_BUSY) {
- /* Set the jiffies + retry_delay_timer * 100ms
- for the rport/tgt */
- tgt->retry_delay_timestamp = jiffies +
- fcp_rsp->retry_delay_timer * HZ / 10;
- }
- }
- if (io_req->fcp_resid)
- scsi_set_resid(sc_cmd, io_req->fcp_resid);
- break;
- default:
- printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
- io_req->fcp_status);
- break;
- }
- sc_cmd->SCp.ptr = NULL;
- sc_cmd->scsi_done(sc_cmd);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- }
- int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
- struct bnx2fc_cmd *io_req)
- {
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct fcoe_port *port = tgt->port;
- struct bnx2fc_interface *interface = port->priv;
- struct bnx2fc_hba *hba = interface->hba;
- struct fc_lport *lport = port->lport;
- struct fc_stats *stats;
- int task_idx, index;
- u16 xid;
- /* bnx2fc_post_io_req() is called with the tgt_lock held */
- /* Initialize rest of io_req fields */
- io_req->cmd_type = BNX2FC_SCSI_CMD;
- io_req->port = port;
- io_req->tgt = tgt;
- io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
- stats = per_cpu_ptr(lport->stats, get_cpu());
- if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- io_req->io_req_flags = BNX2FC_READ;
- stats->InputRequests++;
- stats->InputBytes += io_req->data_xfer_len;
- } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
- io_req->io_req_flags = BNX2FC_WRITE;
- stats->OutputRequests++;
- stats->OutputBytes += io_req->data_xfer_len;
- } else {
- io_req->io_req_flags = 0;
- stats->ControlRequests++;
- }
- put_cpu();
- xid = io_req->xid;
- /* Build buffer descriptor list for firmware from sg list */
- if (bnx2fc_build_bd_list_from_sg(io_req)) {
- printk(KERN_ERR PFX "BD list creation failed\n");
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- return -EAGAIN;
- }
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- /* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
- task = &(task_page[index]);
- bnx2fc_init_task(io_req, task);
- if (tgt->flush_in_prog) {
- printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- return -EAGAIN;
- }
- if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
- printk(KERN_ERR PFX "Session not ready...post_io\n");
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- return -EAGAIN;
- }
- /* Time IO req */
- if (tgt->io_timeout)
- bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
- /* Obtain free SQ entry */
- bnx2fc_add_2_sq(tgt, xid);
- /* Enqueue the io_req to active_cmd_queue */
- io_req->on_active_queue = 1;
- /* move io_req from pending_queue to active_queue */
- list_add_tail(&io_req->link, &tgt->active_cmd_queue);
- /* Ring doorbell */
- bnx2fc_ring_doorbell(tgt);
- return 0;
- }
|