csio_isr.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/pci.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/cpumask.h>
  38. #include <linux/string.h>
  39. #include "csio_init.h"
  40. #include "csio_hw.h"
  41. static irqreturn_t
  42. csio_nondata_isr(int irq, void *dev_id)
  43. {
  44. struct csio_hw *hw = (struct csio_hw *) dev_id;
  45. int rv;
  46. unsigned long flags;
  47. if (unlikely(!hw))
  48. return IRQ_NONE;
  49. if (unlikely(pci_channel_offline(hw->pdev))) {
  50. CSIO_INC_STATS(hw, n_pcich_offline);
  51. return IRQ_NONE;
  52. }
  53. spin_lock_irqsave(&hw->lock, flags);
  54. csio_hw_slow_intr_handler(hw);
  55. rv = csio_mb_isr_handler(hw);
  56. if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  57. hw->flags |= CSIO_HWF_FWEVT_PENDING;
  58. spin_unlock_irqrestore(&hw->lock, flags);
  59. schedule_work(&hw->evtq_work);
  60. return IRQ_HANDLED;
  61. }
  62. spin_unlock_irqrestore(&hw->lock, flags);
  63. return IRQ_HANDLED;
  64. }
  65. /*
  66. * csio_fwevt_handler - Common FW event handler routine.
  67. * @hw: HW module.
  68. *
  69. * This is the ISR for FW events. It is shared b/w MSIX
  70. * and INTx handlers.
  71. */
  72. static void
  73. csio_fwevt_handler(struct csio_hw *hw)
  74. {
  75. int rv;
  76. unsigned long flags;
  77. rv = csio_fwevtq_handler(hw);
  78. spin_lock_irqsave(&hw->lock, flags);
  79. if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  80. hw->flags |= CSIO_HWF_FWEVT_PENDING;
  81. spin_unlock_irqrestore(&hw->lock, flags);
  82. schedule_work(&hw->evtq_work);
  83. return;
  84. }
  85. spin_unlock_irqrestore(&hw->lock, flags);
  86. } /* csio_fwevt_handler */
  87. /*
  88. * csio_fwevt_isr() - FW events MSIX ISR
  89. * @irq:
  90. * @dev_id:
  91. *
  92. * Process WRs on the FW event queue.
  93. *
  94. */
  95. static irqreturn_t
  96. csio_fwevt_isr(int irq, void *dev_id)
  97. {
  98. struct csio_hw *hw = (struct csio_hw *) dev_id;
  99. if (unlikely(!hw))
  100. return IRQ_NONE;
  101. if (unlikely(pci_channel_offline(hw->pdev))) {
  102. CSIO_INC_STATS(hw, n_pcich_offline);
  103. return IRQ_NONE;
  104. }
  105. csio_fwevt_handler(hw);
  106. return IRQ_HANDLED;
  107. }
  108. /*
  109. * csio_fwevt_isr() - INTx wrapper for handling FW events.
  110. * @irq:
  111. * @dev_id:
  112. */
  113. void
  114. csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
  115. struct csio_fl_dma_buf *flb, void *priv)
  116. {
  117. csio_fwevt_handler(hw);
  118. } /* csio_fwevt_intx_handler */
  119. /*
  120. * csio_process_scsi_cmpl - Process a SCSI WR completion.
  121. * @hw: HW module.
  122. * @wr: The completed WR from the ingress queue.
  123. * @len: Length of the WR.
  124. * @flb: Freelist buffer array.
  125. *
  126. */
  127. static void
  128. csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
  129. struct csio_fl_dma_buf *flb, void *cbfn_q)
  130. {
  131. struct csio_ioreq *ioreq;
  132. uint8_t *scsiwr;
  133. uint8_t subop;
  134. void *cmnd;
  135. unsigned long flags;
  136. ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
  137. if (likely(ioreq)) {
  138. if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
  139. subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
  140. ((struct fw_scsi_abrt_cls_wr *)
  141. scsiwr)->sub_opcode_to_chk_all_io);
  142. csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
  143. subop ? "Close" : "Abort",
  144. ioreq, ioreq->wr_status);
  145. spin_lock_irqsave(&hw->lock, flags);
  146. if (subop)
  147. csio_scsi_closed(ioreq,
  148. (struct list_head *)cbfn_q);
  149. else
  150. csio_scsi_aborted(ioreq,
  151. (struct list_head *)cbfn_q);
  152. /*
  153. * We call scsi_done for I/Os that driver thinks aborts
  154. * have timed out. If there is a race caused by FW
  155. * completing abort at the exact same time that the
  156. * driver has deteced the abort timeout, the following
  157. * check prevents calling of scsi_done twice for the
  158. * same command: once from the eh_abort_handler, another
  159. * from csio_scsi_isr_handler(). This also avoids the
  160. * need to check if csio_scsi_cmnd(req) is NULL in the
  161. * fast path.
  162. */
  163. cmnd = csio_scsi_cmnd(ioreq);
  164. if (unlikely(cmnd == NULL))
  165. list_del_init(&ioreq->sm.sm_list);
  166. spin_unlock_irqrestore(&hw->lock, flags);
  167. if (unlikely(cmnd == NULL))
  168. csio_put_scsi_ioreq_lock(hw,
  169. csio_hw_to_scsim(hw), ioreq);
  170. } else {
  171. spin_lock_irqsave(&hw->lock, flags);
  172. csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
  173. spin_unlock_irqrestore(&hw->lock, flags);
  174. }
  175. }
  176. }
  177. /*
  178. * csio_scsi_isr_handler() - Common SCSI ISR handler.
  179. * @iq: Ingress queue pointer.
  180. *
  181. * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
  182. * by calling csio_wr_process_iq_idx. If there are completions on the
  183. * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
  184. * Once done, add these completions onto the freelist.
  185. * This routine is shared b/w MSIX and INTx.
  186. */
  187. static inline irqreturn_t
  188. csio_scsi_isr_handler(struct csio_q *iq)
  189. {
  190. struct csio_hw *hw = (struct csio_hw *)iq->owner;
  191. LIST_HEAD(cbfn_q);
  192. struct list_head *tmp;
  193. struct csio_scsim *scm;
  194. struct csio_ioreq *ioreq;
  195. int isr_completions = 0;
  196. scm = csio_hw_to_scsim(hw);
  197. if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
  198. &cbfn_q) != 0))
  199. return IRQ_NONE;
  200. /* Call back the completion routines */
  201. list_for_each(tmp, &cbfn_q) {
  202. ioreq = (struct csio_ioreq *)tmp;
  203. isr_completions++;
  204. ioreq->io_cbfn(hw, ioreq);
  205. /* Release ddp buffer if used for this req */
  206. if (unlikely(ioreq->dcopy))
  207. csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
  208. ioreq->nsge);
  209. }
  210. if (isr_completions) {
  211. /* Return the ioreqs back to ioreq->freelist */
  212. csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
  213. isr_completions);
  214. }
  215. return IRQ_HANDLED;
  216. }
  217. /*
  218. * csio_scsi_isr() - SCSI MSIX handler
  219. * @irq:
  220. * @dev_id:
  221. *
  222. * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
  223. * for handling SCSI completions.
  224. */
  225. static irqreturn_t
  226. csio_scsi_isr(int irq, void *dev_id)
  227. {
  228. struct csio_q *iq = (struct csio_q *) dev_id;
  229. struct csio_hw *hw;
  230. if (unlikely(!iq))
  231. return IRQ_NONE;
  232. hw = (struct csio_hw *)iq->owner;
  233. if (unlikely(pci_channel_offline(hw->pdev))) {
  234. CSIO_INC_STATS(hw, n_pcich_offline);
  235. return IRQ_NONE;
  236. }
  237. csio_scsi_isr_handler(iq);
  238. return IRQ_HANDLED;
  239. }
  240. /*
  241. * csio_scsi_intx_handler() - SCSI INTx handler
  242. * @irq:
  243. * @dev_id:
  244. *
  245. * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
  246. * for handling SCSI completions.
  247. */
  248. void
  249. csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
  250. struct csio_fl_dma_buf *flb, void *priv)
  251. {
  252. struct csio_q *iq = priv;
  253. csio_scsi_isr_handler(iq);
  254. } /* csio_scsi_intx_handler */
  255. /*
  256. * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
  257. * @irq:
  258. * @dev_id:
  259. *
  260. *
  261. */
  262. static irqreturn_t
  263. csio_fcoe_isr(int irq, void *dev_id)
  264. {
  265. struct csio_hw *hw = (struct csio_hw *) dev_id;
  266. struct csio_q *intx_q = NULL;
  267. int rv;
  268. irqreturn_t ret = IRQ_NONE;
  269. unsigned long flags;
  270. if (unlikely(!hw))
  271. return IRQ_NONE;
  272. if (unlikely(pci_channel_offline(hw->pdev))) {
  273. CSIO_INC_STATS(hw, n_pcich_offline);
  274. return IRQ_NONE;
  275. }
  276. /* Disable the interrupt for this PCI function. */
  277. if (hw->intr_mode == CSIO_IM_INTX)
  278. csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
  279. /*
  280. * The read in the following function will flush the
  281. * above write.
  282. */
  283. if (csio_hw_slow_intr_handler(hw))
  284. ret = IRQ_HANDLED;
  285. /* Get the INTx Forward interrupt IQ. */
  286. intx_q = csio_get_q(hw, hw->intr_iq_idx);
  287. CSIO_DB_ASSERT(intx_q);
  288. /* IQ handler is not possible for intx_q, hence pass in NULL */
  289. if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
  290. ret = IRQ_HANDLED;
  291. spin_lock_irqsave(&hw->lock, flags);
  292. rv = csio_mb_isr_handler(hw);
  293. if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  294. hw->flags |= CSIO_HWF_FWEVT_PENDING;
  295. spin_unlock_irqrestore(&hw->lock, flags);
  296. schedule_work(&hw->evtq_work);
  297. return IRQ_HANDLED;
  298. }
  299. spin_unlock_irqrestore(&hw->lock, flags);
  300. return ret;
  301. }
  302. static void
  303. csio_add_msix_desc(struct csio_hw *hw)
  304. {
  305. int i;
  306. struct csio_msix_entries *entryp = &hw->msix_entries[0];
  307. int k = CSIO_EXTRA_VECS;
  308. int len = sizeof(entryp->desc) - 1;
  309. int cnt = hw->num_sqsets + k;
  310. /* Non-data vector */
  311. memset(entryp->desc, 0, len + 1);
  312. snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
  313. CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
  314. entryp++;
  315. memset(entryp->desc, 0, len + 1);
  316. snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
  317. CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
  318. entryp++;
  319. /* Name SCSI vecs */
  320. for (i = k; i < cnt; i++, entryp++) {
  321. memset(entryp->desc, 0, len + 1);
  322. snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
  323. CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
  324. CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
  325. }
  326. }
  327. int
  328. csio_request_irqs(struct csio_hw *hw)
  329. {
  330. int rv, i, j, k = 0;
  331. struct csio_msix_entries *entryp = &hw->msix_entries[0];
  332. struct csio_scsi_cpu_info *info;
  333. if (hw->intr_mode != CSIO_IM_MSIX) {
  334. rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
  335. (hw->intr_mode == CSIO_IM_MSI) ?
  336. 0 : IRQF_SHARED,
  337. KBUILD_MODNAME, hw);
  338. if (rv) {
  339. if (hw->intr_mode == CSIO_IM_MSI)
  340. pci_disable_msi(hw->pdev);
  341. csio_err(hw, "Failed to allocate interrupt line.\n");
  342. return -EINVAL;
  343. }
  344. goto out;
  345. }
  346. /* Add the MSIX vector descriptions */
  347. csio_add_msix_desc(hw);
  348. rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
  349. entryp[k].desc, hw);
  350. if (rv) {
  351. csio_err(hw, "IRQ request failed for vec %d err:%d\n",
  352. entryp[k].vector, rv);
  353. goto err;
  354. }
  355. entryp[k++].dev_id = (void *)hw;
  356. rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
  357. entryp[k].desc, hw);
  358. if (rv) {
  359. csio_err(hw, "IRQ request failed for vec %d err:%d\n",
  360. entryp[k].vector, rv);
  361. goto err;
  362. }
  363. entryp[k++].dev_id = (void *)hw;
  364. /* Allocate IRQs for SCSI */
  365. for (i = 0; i < hw->num_pports; i++) {
  366. info = &hw->scsi_cpu_info[i];
  367. for (j = 0; j < info->max_cpus; j++, k++) {
  368. struct csio_scsi_qset *sqset = &hw->sqset[i][j];
  369. struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
  370. rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
  371. entryp[k].desc, q);
  372. if (rv) {
  373. csio_err(hw,
  374. "IRQ request failed for vec %d err:%d\n",
  375. entryp[k].vector, rv);
  376. goto err;
  377. }
  378. entryp[k].dev_id = (void *)q;
  379. } /* for all scsi cpus */
  380. } /* for all ports */
  381. out:
  382. hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
  383. return 0;
  384. err:
  385. for (i = 0; i < k; i++) {
  386. entryp = &hw->msix_entries[i];
  387. free_irq(entryp->vector, entryp->dev_id);
  388. }
  389. pci_disable_msix(hw->pdev);
  390. return -EINVAL;
  391. }
  392. static void
  393. csio_disable_msix(struct csio_hw *hw, bool free)
  394. {
  395. int i;
  396. struct csio_msix_entries *entryp;
  397. int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
  398. if (free) {
  399. for (i = 0; i < cnt; i++) {
  400. entryp = &hw->msix_entries[i];
  401. free_irq(entryp->vector, entryp->dev_id);
  402. }
  403. }
  404. pci_disable_msix(hw->pdev);
  405. }
  406. /* Reduce per-port max possible CPUs */
  407. static void
  408. csio_reduce_sqsets(struct csio_hw *hw, int cnt)
  409. {
  410. int i;
  411. struct csio_scsi_cpu_info *info;
  412. while (cnt < hw->num_sqsets) {
  413. for (i = 0; i < hw->num_pports; i++) {
  414. info = &hw->scsi_cpu_info[i];
  415. if (info->max_cpus > 1) {
  416. info->max_cpus--;
  417. hw->num_sqsets--;
  418. if (hw->num_sqsets <= cnt)
  419. break;
  420. }
  421. }
  422. }
  423. csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
  424. }
  425. static int
  426. csio_enable_msix(struct csio_hw *hw)
  427. {
  428. int i, j, k, n, min, cnt;
  429. struct csio_msix_entries *entryp;
  430. struct msix_entry *entries;
  431. int extra = CSIO_EXTRA_VECS;
  432. struct csio_scsi_cpu_info *info;
  433. min = hw->num_pports + extra;
  434. cnt = hw->num_sqsets + extra;
  435. /* Max vectors required based on #niqs configured in fw */
  436. if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
  437. cnt = min_t(uint8_t, hw->cfg_niq, cnt);
  438. entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
  439. if (!entries)
  440. return -ENOMEM;
  441. for (i = 0; i < cnt; i++)
  442. entries[i].entry = (uint16_t)i;
  443. csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
  444. cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
  445. if (cnt < 0) {
  446. kfree(entries);
  447. return cnt;
  448. }
  449. if (cnt < (hw->num_sqsets + extra)) {
  450. csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
  451. csio_reduce_sqsets(hw, cnt - extra);
  452. }
  453. /* Save off vectors */
  454. for (i = 0; i < cnt; i++) {
  455. entryp = &hw->msix_entries[i];
  456. entryp->vector = entries[i].vector;
  457. }
  458. /* Distribute vectors */
  459. k = 0;
  460. csio_set_nondata_intr_idx(hw, entries[k].entry);
  461. csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
  462. csio_set_fwevt_intr_idx(hw, entries[k++].entry);
  463. for (i = 0; i < hw->num_pports; i++) {
  464. info = &hw->scsi_cpu_info[i];
  465. for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
  466. n = (j % info->max_cpus) + k;
  467. hw->sqset[i][j].intr_idx = entries[n].entry;
  468. }
  469. k += info->max_cpus;
  470. }
  471. kfree(entries);
  472. return 0;
  473. }
  474. void
  475. csio_intr_enable(struct csio_hw *hw)
  476. {
  477. hw->intr_mode = CSIO_IM_NONE;
  478. hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
  479. /* Try MSIX, then MSI or fall back to INTx */
  480. if ((csio_msi == 2) && !csio_enable_msix(hw))
  481. hw->intr_mode = CSIO_IM_MSIX;
  482. else {
  483. /* Max iqs required based on #niqs configured in fw */
  484. if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
  485. !csio_is_hw_master(hw)) {
  486. int extra = CSIO_EXTRA_MSI_IQS;
  487. if (hw->cfg_niq < (hw->num_sqsets + extra)) {
  488. csio_dbg(hw, "Reducing sqsets to %d\n",
  489. hw->cfg_niq - extra);
  490. csio_reduce_sqsets(hw, hw->cfg_niq - extra);
  491. }
  492. }
  493. if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
  494. hw->intr_mode = CSIO_IM_MSI;
  495. else
  496. hw->intr_mode = CSIO_IM_INTX;
  497. }
  498. csio_dbg(hw, "Using %s interrupt mode.\n",
  499. (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
  500. ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
  501. }
  502. void
  503. csio_intr_disable(struct csio_hw *hw, bool free)
  504. {
  505. csio_hw_intr_disable(hw);
  506. switch (hw->intr_mode) {
  507. case CSIO_IM_MSIX:
  508. csio_disable_msix(hw, free);
  509. break;
  510. case CSIO_IM_MSI:
  511. if (free)
  512. free_irq(hw->pdev->irq, hw);
  513. pci_disable_msi(hw->pdev);
  514. break;
  515. case CSIO_IM_INTX:
  516. if (free)
  517. free_irq(hw->pdev->irq, hw);
  518. break;
  519. default:
  520. break;
  521. }
  522. hw->intr_mode = CSIO_IM_NONE;
  523. hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
  524. }