cxgb4_uld.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  35. * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/types.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/export.h>
  43. #include <linux/list.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/pci.h>
  46. #include "cxgb4.h"
  47. #include "cxgb4_uld.h"
  48. #include "t4_regs.h"
  49. #include "t4fw_api.h"
  50. #include "t4_msg.h"
  51. #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
  52. static int get_msix_idx_from_bmap(struct adapter *adap)
  53. {
  54. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  55. unsigned long flags;
  56. unsigned int msix_idx;
  57. spin_lock_irqsave(&bmap->lock, flags);
  58. msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
  59. if (msix_idx < bmap->mapsize) {
  60. __set_bit(msix_idx, bmap->msix_bmap);
  61. } else {
  62. spin_unlock_irqrestore(&bmap->lock, flags);
  63. return -ENOSPC;
  64. }
  65. spin_unlock_irqrestore(&bmap->lock, flags);
  66. return msix_idx;
  67. }
  68. static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
  69. {
  70. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  71. unsigned long flags;
  72. spin_lock_irqsave(&bmap->lock, flags);
  73. __clear_bit(msix_idx, bmap->msix_bmap);
  74. spin_unlock_irqrestore(&bmap->lock, flags);
  75. }
  76. /* Flush the aggregated lro sessions */
  77. static void uldrx_flush_handler(struct sge_rspq *q)
  78. {
  79. struct adapter *adap = q->adap;
  80. if (adap->uld[q->uld].lro_flush)
  81. adap->uld[q->uld].lro_flush(&q->lro_mgr);
  82. }
  83. /**
  84. * uldrx_handler - response queue handler for ULD queues
  85. * @q: the response queue that received the packet
  86. * @rsp: the response queue descriptor holding the offload message
  87. * @gl: the gather list of packet fragments
  88. *
  89. * Deliver an ingress offload packet to a ULD. All processing is done by
  90. * the ULD, we just maintain statistics.
  91. */
  92. static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
  93. const struct pkt_gl *gl)
  94. {
  95. struct adapter *adap = q->adap;
  96. struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
  97. int ret;
  98. /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
  99. if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
  100. ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
  101. rsp += 2;
  102. if (q->flush_handler)
  103. ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
  104. rsp, gl, &q->lro_mgr,
  105. &q->napi);
  106. else
  107. ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
  108. rsp, gl);
  109. if (ret) {
  110. rxq->stats.nomem++;
  111. return -1;
  112. }
  113. if (!gl)
  114. rxq->stats.imm++;
  115. else if (gl == CXGB4_MSG_AN)
  116. rxq->stats.an++;
  117. else
  118. rxq->stats.pkts++;
  119. return 0;
  120. }
  121. static int alloc_uld_rxqs(struct adapter *adap,
  122. struct sge_uld_rxq_info *rxq_info, bool lro)
  123. {
  124. unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
  125. int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
  126. struct sge_ofld_rxq *q = rxq_info->uldrxq;
  127. unsigned short *ids = rxq_info->rspq_id;
  128. struct sge *s = &adap->sge;
  129. unsigned int per_chan;
  130. per_chan = rxq_info->nrxq / adap->params.nports;
  131. if (adap->flags & USING_MSIX)
  132. msi_idx = 1;
  133. else
  134. msi_idx = -((int)s->intrq.abs_id + 1);
  135. for (i = 0; i < nq; i++, q++) {
  136. if (i == rxq_info->nrxq) {
  137. /* start allocation of concentrator queues */
  138. per_chan = rxq_info->nciq / adap->params.nports;
  139. que_idx = 0;
  140. }
  141. if (msi_idx >= 0) {
  142. bmap_idx = get_msix_idx_from_bmap(adap);
  143. if (bmap_idx < 0) {
  144. err = -ENOSPC;
  145. goto freeout;
  146. }
  147. msi_idx = adap->msix_info_ulds[bmap_idx].idx;
  148. }
  149. err = t4_sge_alloc_rxq(adap, &q->rspq, false,
  150. adap->port[que_idx++ / per_chan],
  151. msi_idx,
  152. q->fl.size ? &q->fl : NULL,
  153. uldrx_handler,
  154. lro ? uldrx_flush_handler : NULL,
  155. 0);
  156. if (err)
  157. goto freeout;
  158. if (msi_idx >= 0)
  159. rxq_info->msix_tbl[i] = bmap_idx;
  160. memset(&q->stats, 0, sizeof(q->stats));
  161. if (ids)
  162. ids[i] = q->rspq.abs_id;
  163. }
  164. return 0;
  165. freeout:
  166. q = rxq_info->uldrxq;
  167. for ( ; i; i--, q++) {
  168. if (q->rspq.desc)
  169. free_rspq_fl(adap, &q->rspq,
  170. q->fl.size ? &q->fl : NULL);
  171. }
  172. return err;
  173. }
  174. static int
  175. setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
  176. {
  177. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  178. int i, ret = 0;
  179. if (adap->flags & USING_MSIX) {
  180. rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
  181. sizeof(unsigned short),
  182. GFP_KERNEL);
  183. if (!rxq_info->msix_tbl)
  184. return -ENOMEM;
  185. }
  186. ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
  187. /* Tell uP to route control queue completions to rdma rspq */
  188. if (adap->flags & FULL_INIT_DONE &&
  189. !ret && uld_type == CXGB4_ULD_RDMA) {
  190. struct sge *s = &adap->sge;
  191. unsigned int cmplqid;
  192. u32 param, cmdop;
  193. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  194. for_each_port(adap, i) {
  195. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  196. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  197. FW_PARAMS_PARAM_X_V(cmdop) |
  198. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  199. ret = t4_set_params(adap, adap->mbox, adap->pf,
  200. 0, 1, &param, &cmplqid);
  201. }
  202. }
  203. return ret;
  204. }
  205. static void t4_free_uld_rxqs(struct adapter *adap, int n,
  206. struct sge_ofld_rxq *q)
  207. {
  208. for ( ; n; n--, q++) {
  209. if (q->rspq.desc)
  210. free_rspq_fl(adap, &q->rspq,
  211. q->fl.size ? &q->fl : NULL);
  212. }
  213. }
  214. static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
  215. {
  216. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  217. if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
  218. struct sge *s = &adap->sge;
  219. u32 param, cmdop, cmplqid = 0;
  220. int i;
  221. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  222. for_each_port(adap, i) {
  223. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  224. FW_PARAMS_PARAM_X_V(cmdop) |
  225. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  226. t4_set_params(adap, adap->mbox, adap->pf,
  227. 0, 1, &param, &cmplqid);
  228. }
  229. }
  230. if (rxq_info->nciq)
  231. t4_free_uld_rxqs(adap, rxq_info->nciq,
  232. rxq_info->uldrxq + rxq_info->nrxq);
  233. t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
  234. if (adap->flags & USING_MSIX)
  235. kfree(rxq_info->msix_tbl);
  236. }
  237. static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
  238. const struct cxgb4_uld_info *uld_info)
  239. {
  240. struct sge *s = &adap->sge;
  241. struct sge_uld_rxq_info *rxq_info;
  242. int i, nrxq, ciq_size;
  243. rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
  244. if (!rxq_info)
  245. return -ENOMEM;
  246. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  247. i = s->nqs_per_uld;
  248. rxq_info->nrxq = roundup(i, adap->params.nports);
  249. } else {
  250. i = min_t(int, uld_info->nrxq,
  251. num_online_cpus());
  252. rxq_info->nrxq = roundup(i, adap->params.nports);
  253. }
  254. if (!uld_info->ciq) {
  255. rxq_info->nciq = 0;
  256. } else {
  257. if (adap->flags & USING_MSIX)
  258. rxq_info->nciq = min_t(int, s->nqs_per_uld,
  259. num_online_cpus());
  260. else
  261. rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
  262. num_online_cpus());
  263. rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
  264. adap->params.nports);
  265. rxq_info->nciq = max_t(int, rxq_info->nciq,
  266. adap->params.nports);
  267. }
  268. nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
  269. rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
  270. GFP_KERNEL);
  271. if (!rxq_info->uldrxq) {
  272. kfree(rxq_info);
  273. return -ENOMEM;
  274. }
  275. rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
  276. if (!rxq_info->rspq_id) {
  277. kfree(rxq_info->uldrxq);
  278. kfree(rxq_info);
  279. return -ENOMEM;
  280. }
  281. for (i = 0; i < rxq_info->nrxq; i++) {
  282. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  283. init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
  284. r->rspq.uld = uld_type;
  285. r->fl.size = 72;
  286. }
  287. ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
  288. if (ciq_size > SGE_MAX_IQ_SIZE) {
  289. dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
  290. ciq_size = SGE_MAX_IQ_SIZE;
  291. }
  292. for (i = rxq_info->nrxq; i < nrxq; i++) {
  293. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  294. init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
  295. r->rspq.uld = uld_type;
  296. }
  297. memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
  298. adap->sge.uld_rxq_info[uld_type] = rxq_info;
  299. return 0;
  300. }
  301. static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
  302. {
  303. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  304. adap->sge.uld_rxq_info[uld_type] = NULL;
  305. kfree(rxq_info->rspq_id);
  306. kfree(rxq_info->uldrxq);
  307. kfree(rxq_info);
  308. }
  309. static int
  310. request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  311. {
  312. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  313. int err = 0;
  314. unsigned int idx, bmap_idx;
  315. for_each_uldrxq(rxq_info, idx) {
  316. bmap_idx = rxq_info->msix_tbl[idx];
  317. err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
  318. t4_sge_intr_msix, 0,
  319. adap->msix_info_ulds[bmap_idx].desc,
  320. &rxq_info->uldrxq[idx].rspq);
  321. if (err)
  322. goto unwind;
  323. }
  324. return 0;
  325. unwind:
  326. while (idx-- > 0) {
  327. bmap_idx = rxq_info->msix_tbl[idx];
  328. free_msix_idx_in_bmap(adap, bmap_idx);
  329. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  330. &rxq_info->uldrxq[idx].rspq);
  331. }
  332. return err;
  333. }
  334. static void
  335. free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  336. {
  337. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  338. unsigned int idx, bmap_idx;
  339. for_each_uldrxq(rxq_info, idx) {
  340. bmap_idx = rxq_info->msix_tbl[idx];
  341. free_msix_idx_in_bmap(adap, bmap_idx);
  342. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  343. &rxq_info->uldrxq[idx].rspq);
  344. }
  345. }
  346. static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
  347. {
  348. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  349. int n = sizeof(adap->msix_info_ulds[0].desc);
  350. unsigned int idx, bmap_idx;
  351. for_each_uldrxq(rxq_info, idx) {
  352. bmap_idx = rxq_info->msix_tbl[idx];
  353. snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
  354. adap->port[0]->name, rxq_info->name, idx);
  355. }
  356. }
  357. static void enable_rx(struct adapter *adap, struct sge_rspq *q)
  358. {
  359. if (!q)
  360. return;
  361. if (q->handler)
  362. napi_enable(&q->napi);
  363. /* 0-increment GTS to start the timer and enable interrupts */
  364. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  365. SEINTARM_V(q->intr_params) |
  366. INGRESSQID_V(q->cntxt_id));
  367. }
  368. static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
  369. {
  370. if (q && q->handler)
  371. napi_disable(&q->napi);
  372. }
  373. static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
  374. {
  375. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  376. int idx;
  377. for_each_uldrxq(rxq_info, idx)
  378. enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
  379. }
  380. static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
  381. {
  382. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  383. int idx;
  384. for_each_uldrxq(rxq_info, idx)
  385. quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
  386. }
  387. static void
  388. free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
  389. {
  390. int nq = txq_info->ntxq;
  391. int i;
  392. for (i = 0; i < nq; i++) {
  393. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  394. if (txq && txq->q.desc) {
  395. tasklet_kill(&txq->qresume_tsk);
  396. t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
  397. txq->q.cntxt_id);
  398. free_tx_desc(adap, &txq->q, txq->q.in_use, false);
  399. kfree(txq->q.sdesc);
  400. __skb_queue_purge(&txq->sendq);
  401. free_txq(adap, &txq->q);
  402. }
  403. }
  404. }
  405. static int
  406. alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
  407. unsigned int uld_type)
  408. {
  409. struct sge *s = &adap->sge;
  410. int nq = txq_info->ntxq;
  411. int i, j, err;
  412. j = nq / adap->params.nports;
  413. for (i = 0; i < nq; i++) {
  414. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  415. txq->q.size = 1024;
  416. err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
  417. s->fw_evtq.cntxt_id, uld_type);
  418. if (err)
  419. goto freeout;
  420. }
  421. return 0;
  422. freeout:
  423. free_sge_txq_uld(adap, txq_info);
  424. return err;
  425. }
  426. static void
  427. release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
  428. {
  429. struct sge_uld_txq_info *txq_info = NULL;
  430. int tx_uld_type = TX_ULD(uld_type);
  431. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  432. if (txq_info && atomic_dec_and_test(&txq_info->users)) {
  433. free_sge_txq_uld(adap, txq_info);
  434. kfree(txq_info->uldtxq);
  435. kfree(txq_info);
  436. adap->sge.uld_txq_info[tx_uld_type] = NULL;
  437. }
  438. }
  439. static int
  440. setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
  441. const struct cxgb4_uld_info *uld_info)
  442. {
  443. struct sge_uld_txq_info *txq_info = NULL;
  444. int tx_uld_type, i;
  445. tx_uld_type = TX_ULD(uld_type);
  446. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  447. if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
  448. (atomic_inc_return(&txq_info->users) > 1))
  449. return 0;
  450. txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
  451. if (!txq_info)
  452. return -ENOMEM;
  453. i = min_t(int, uld_info->ntxq, num_online_cpus());
  454. txq_info->ntxq = roundup(i, adap->params.nports);
  455. txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
  456. GFP_KERNEL);
  457. if (!txq_info->uldtxq) {
  458. kfree(txq_info);
  459. return -ENOMEM;
  460. }
  461. if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
  462. kfree(txq_info->uldtxq);
  463. kfree(txq_info);
  464. return -ENOMEM;
  465. }
  466. atomic_inc(&txq_info->users);
  467. adap->sge.uld_txq_info[tx_uld_type] = txq_info;
  468. return 0;
  469. }
  470. static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
  471. struct cxgb4_lld_info *lli)
  472. {
  473. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  474. lli->rxq_ids = rxq_info->rspq_id;
  475. lli->nrxq = rxq_info->nrxq;
  476. lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
  477. lli->nciq = rxq_info->nciq;
  478. }
  479. int t4_uld_mem_alloc(struct adapter *adap)
  480. {
  481. struct sge *s = &adap->sge;
  482. adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
  483. if (!adap->uld)
  484. return -ENOMEM;
  485. s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
  486. sizeof(struct sge_uld_rxq_info *),
  487. GFP_KERNEL);
  488. if (!s->uld_rxq_info)
  489. goto err_uld;
  490. s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
  491. sizeof(struct sge_uld_txq_info *),
  492. GFP_KERNEL);
  493. if (!s->uld_txq_info)
  494. goto err_uld_rx;
  495. return 0;
  496. err_uld_rx:
  497. kfree(s->uld_rxq_info);
  498. err_uld:
  499. kfree(adap->uld);
  500. return -ENOMEM;
  501. }
  502. void t4_uld_mem_free(struct adapter *adap)
  503. {
  504. struct sge *s = &adap->sge;
  505. kfree(s->uld_txq_info);
  506. kfree(s->uld_rxq_info);
  507. kfree(adap->uld);
  508. }
  509. /* This function should be called with uld_mutex taken. */
  510. static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
  511. {
  512. if (adap->uld[type].handle) {
  513. adap->uld[type].handle = NULL;
  514. adap->uld[type].add = NULL;
  515. release_sge_txq_uld(adap, type);
  516. if (adap->flags & FULL_INIT_DONE)
  517. quiesce_rx_uld(adap, type);
  518. if (adap->flags & USING_MSIX)
  519. free_msix_queue_irqs_uld(adap, type);
  520. free_sge_queues_uld(adap, type);
  521. free_queues_uld(adap, type);
  522. }
  523. }
  524. void t4_uld_clean_up(struct adapter *adap)
  525. {
  526. unsigned int i;
  527. mutex_lock(&uld_mutex);
  528. for (i = 0; i < CXGB4_ULD_MAX; i++) {
  529. if (!adap->uld[i].handle)
  530. continue;
  531. cxgb4_shutdown_uld_adapter(adap, i);
  532. }
  533. mutex_unlock(&uld_mutex);
  534. }
  535. static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
  536. {
  537. int i;
  538. lld->pdev = adap->pdev;
  539. lld->pf = adap->pf;
  540. lld->l2t = adap->l2t;
  541. lld->tids = &adap->tids;
  542. lld->ports = adap->port;
  543. lld->vr = &adap->vres;
  544. lld->mtus = adap->params.mtus;
  545. lld->ntxq = adap->sge.ofldqsets;
  546. lld->nchan = adap->params.nports;
  547. lld->nports = adap->params.nports;
  548. lld->wr_cred = adap->params.ofldq_wr_cred;
  549. lld->crypto = adap->params.crypto;
  550. lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
  551. lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
  552. lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
  553. lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
  554. lld->iscsi_ppm = &adap->iscsi_ppm;
  555. lld->adapter_type = adap->params.chip;
  556. lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
  557. lld->udb_density = 1 << adap->params.sge.eq_qpp;
  558. lld->ucq_density = 1 << adap->params.sge.iq_qpp;
  559. lld->filt_mode = adap->params.tp.vlan_pri_map;
  560. /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
  561. for (i = 0; i < NCHAN; i++)
  562. lld->tx_modq[i] = i;
  563. lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
  564. lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
  565. lld->fw_vers = adap->params.fw_vers;
  566. lld->dbfifo_int_thresh = dbfifo_int_thresh;
  567. lld->sge_ingpadboundary = adap->sge.fl_align;
  568. lld->sge_egrstatuspagesize = adap->sge.stat_len;
  569. lld->sge_pktshift = adap->sge.pktshift;
  570. lld->ulp_crypto = adap->params.crypto;
  571. lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
  572. lld->max_ordird_qp = adap->params.max_ordird_qp;
  573. lld->max_ird_adapter = adap->params.max_ird_adapter;
  574. lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
  575. lld->nodeid = dev_to_node(adap->pdev_dev);
  576. lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
  577. lld->write_w_imm_support = adap->params.write_w_imm_support;
  578. lld->write_cmpl_support = adap->params.write_cmpl_support;
  579. }
  580. static int uld_attach(struct adapter *adap, unsigned int uld)
  581. {
  582. struct cxgb4_lld_info lli;
  583. void *handle;
  584. uld_init(adap, &lli);
  585. uld_queue_init(adap, uld, &lli);
  586. handle = adap->uld[uld].add(&lli);
  587. if (IS_ERR(handle)) {
  588. dev_warn(adap->pdev_dev,
  589. "could not attach to the %s driver, error %ld\n",
  590. adap->uld[uld].name, PTR_ERR(handle));
  591. return PTR_ERR(handle);
  592. }
  593. adap->uld[uld].handle = handle;
  594. t4_register_netevent_notifier();
  595. if (adap->flags & FULL_INIT_DONE)
  596. adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
  597. return 0;
  598. }
  599. /* cxgb4_register_uld - register an upper-layer driver
  600. * @type: the ULD type
  601. * @p: the ULD methods
  602. *
  603. * Registers an upper-layer driver with this driver and notifies the ULD
  604. * about any presently available devices that support its type. Returns
  605. * %-EBUSY if a ULD of the same type is already registered.
  606. */
  607. int cxgb4_register_uld(enum cxgb4_uld type,
  608. const struct cxgb4_uld_info *p)
  609. {
  610. unsigned int adap_idx = 0;
  611. struct adapter *adap;
  612. int ret = 0;
  613. if (type >= CXGB4_ULD_MAX)
  614. return -EINVAL;
  615. mutex_lock(&uld_mutex);
  616. list_for_each_entry(adap, &adapter_list, list_node) {
  617. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  618. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  619. continue;
  620. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  621. continue;
  622. ret = cfg_queues_uld(adap, type, p);
  623. if (ret)
  624. goto out;
  625. ret = setup_sge_queues_uld(adap, type, p->lro);
  626. if (ret)
  627. goto free_queues;
  628. if (adap->flags & USING_MSIX) {
  629. name_msix_vecs_uld(adap, type);
  630. ret = request_msix_queue_irqs_uld(adap, type);
  631. if (ret)
  632. goto free_rxq;
  633. }
  634. if (adap->flags & FULL_INIT_DONE)
  635. enable_rx_uld(adap, type);
  636. if (adap->uld[type].add) {
  637. ret = -EBUSY;
  638. goto free_irq;
  639. }
  640. ret = setup_sge_txq_uld(adap, type, p);
  641. if (ret)
  642. goto free_irq;
  643. adap->uld[type] = *p;
  644. ret = uld_attach(adap, type);
  645. if (ret)
  646. goto free_txq;
  647. adap_idx++;
  648. }
  649. mutex_unlock(&uld_mutex);
  650. return 0;
  651. free_txq:
  652. release_sge_txq_uld(adap, type);
  653. free_irq:
  654. if (adap->flags & FULL_INIT_DONE)
  655. quiesce_rx_uld(adap, type);
  656. if (adap->flags & USING_MSIX)
  657. free_msix_queue_irqs_uld(adap, type);
  658. free_rxq:
  659. free_sge_queues_uld(adap, type);
  660. free_queues:
  661. free_queues_uld(adap, type);
  662. out:
  663. list_for_each_entry(adap, &adapter_list, list_node) {
  664. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  665. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  666. continue;
  667. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  668. continue;
  669. if (!adap_idx)
  670. break;
  671. adap->uld[type].handle = NULL;
  672. adap->uld[type].add = NULL;
  673. release_sge_txq_uld(adap, type);
  674. if (adap->flags & FULL_INIT_DONE)
  675. quiesce_rx_uld(adap, type);
  676. if (adap->flags & USING_MSIX)
  677. free_msix_queue_irqs_uld(adap, type);
  678. free_sge_queues_uld(adap, type);
  679. free_queues_uld(adap, type);
  680. adap_idx--;
  681. }
  682. mutex_unlock(&uld_mutex);
  683. return ret;
  684. }
  685. EXPORT_SYMBOL(cxgb4_register_uld);
  686. /**
  687. * cxgb4_unregister_uld - unregister an upper-layer driver
  688. * @type: the ULD type
  689. *
  690. * Unregisters an existing upper-layer driver.
  691. */
  692. int cxgb4_unregister_uld(enum cxgb4_uld type)
  693. {
  694. struct adapter *adap;
  695. if (type >= CXGB4_ULD_MAX)
  696. return -EINVAL;
  697. mutex_lock(&uld_mutex);
  698. list_for_each_entry(adap, &adapter_list, list_node) {
  699. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  700. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  701. continue;
  702. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  703. continue;
  704. cxgb4_shutdown_uld_adapter(adap, type);
  705. }
  706. mutex_unlock(&uld_mutex);
  707. return 0;
  708. }
  709. EXPORT_SYMBOL(cxgb4_unregister_uld);