cxgb4_uld.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  35. * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/types.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/export.h>
  43. #include <linux/list.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/pci.h>
  46. #include "cxgb4.h"
  47. #include "cxgb4_uld.h"
  48. #include "t4_regs.h"
  49. #include "t4fw_api.h"
  50. #include "t4_msg.h"
  51. #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
  52. static int get_msix_idx_from_bmap(struct adapter *adap)
  53. {
  54. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  55. unsigned long flags;
  56. unsigned int msix_idx;
  57. spin_lock_irqsave(&bmap->lock, flags);
  58. msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
  59. if (msix_idx < bmap->mapsize) {
  60. __set_bit(msix_idx, bmap->msix_bmap);
  61. } else {
  62. spin_unlock_irqrestore(&bmap->lock, flags);
  63. return -ENOSPC;
  64. }
  65. spin_unlock_irqrestore(&bmap->lock, flags);
  66. return msix_idx;
  67. }
  68. static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
  69. {
  70. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  71. unsigned long flags;
  72. spin_lock_irqsave(&bmap->lock, flags);
  73. __clear_bit(msix_idx, bmap->msix_bmap);
  74. spin_unlock_irqrestore(&bmap->lock, flags);
  75. }
  76. /* Flush the aggregated lro sessions */
  77. static void uldrx_flush_handler(struct sge_rspq *q)
  78. {
  79. struct adapter *adap = q->adap;
  80. if (adap->uld[q->uld].lro_flush)
  81. adap->uld[q->uld].lro_flush(&q->lro_mgr);
  82. }
  83. /**
  84. * uldrx_handler - response queue handler for ULD queues
  85. * @q: the response queue that received the packet
  86. * @rsp: the response queue descriptor holding the offload message
  87. * @gl: the gather list of packet fragments
  88. *
  89. * Deliver an ingress offload packet to a ULD. All processing is done by
  90. * the ULD, we just maintain statistics.
  91. */
  92. static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
  93. const struct pkt_gl *gl)
  94. {
  95. struct adapter *adap = q->adap;
  96. struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
  97. int ret;
  98. /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
  99. if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
  100. ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
  101. rsp += 2;
  102. if (q->flush_handler)
  103. ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
  104. rsp, gl, &q->lro_mgr,
  105. &q->napi);
  106. else
  107. ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
  108. rsp, gl);
  109. if (ret) {
  110. rxq->stats.nomem++;
  111. return -1;
  112. }
  113. if (!gl)
  114. rxq->stats.imm++;
  115. else if (gl == CXGB4_MSG_AN)
  116. rxq->stats.an++;
  117. else
  118. rxq->stats.pkts++;
  119. return 0;
  120. }
  121. static int alloc_uld_rxqs(struct adapter *adap,
  122. struct sge_uld_rxq_info *rxq_info, bool lro)
  123. {
  124. struct sge *s = &adap->sge;
  125. unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
  126. struct sge_ofld_rxq *q = rxq_info->uldrxq;
  127. unsigned short *ids = rxq_info->rspq_id;
  128. unsigned int bmap_idx = 0;
  129. unsigned int per_chan;
  130. int i, err, msi_idx, que_idx = 0;
  131. per_chan = rxq_info->nrxq / adap->params.nports;
  132. if (adap->flags & USING_MSIX)
  133. msi_idx = 1;
  134. else
  135. msi_idx = -((int)s->intrq.abs_id + 1);
  136. for (i = 0; i < nq; i++, q++) {
  137. if (i == rxq_info->nrxq) {
  138. /* start allocation of concentrator queues */
  139. per_chan = rxq_info->nciq / adap->params.nports;
  140. que_idx = 0;
  141. }
  142. if (msi_idx >= 0) {
  143. bmap_idx = get_msix_idx_from_bmap(adap);
  144. msi_idx = adap->msix_info_ulds[bmap_idx].idx;
  145. }
  146. err = t4_sge_alloc_rxq(adap, &q->rspq, false,
  147. adap->port[que_idx++ / per_chan],
  148. msi_idx,
  149. q->fl.size ? &q->fl : NULL,
  150. uldrx_handler,
  151. lro ? uldrx_flush_handler : NULL,
  152. 0);
  153. if (err)
  154. goto freeout;
  155. if (msi_idx >= 0)
  156. rxq_info->msix_tbl[i] = bmap_idx;
  157. memset(&q->stats, 0, sizeof(q->stats));
  158. if (ids)
  159. ids[i] = q->rspq.abs_id;
  160. }
  161. return 0;
  162. freeout:
  163. q = rxq_info->uldrxq;
  164. for ( ; i; i--, q++) {
  165. if (q->rspq.desc)
  166. free_rspq_fl(adap, &q->rspq,
  167. q->fl.size ? &q->fl : NULL);
  168. }
  169. return err;
  170. }
  171. static int
  172. setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
  173. {
  174. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  175. int i, ret = 0;
  176. if (adap->flags & USING_MSIX) {
  177. rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
  178. sizeof(unsigned short),
  179. GFP_KERNEL);
  180. if (!rxq_info->msix_tbl)
  181. return -ENOMEM;
  182. }
  183. ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
  184. /* Tell uP to route control queue completions to rdma rspq */
  185. if (adap->flags & FULL_INIT_DONE &&
  186. !ret && uld_type == CXGB4_ULD_RDMA) {
  187. struct sge *s = &adap->sge;
  188. unsigned int cmplqid;
  189. u32 param, cmdop;
  190. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  191. for_each_port(adap, i) {
  192. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  193. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  194. FW_PARAMS_PARAM_X_V(cmdop) |
  195. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  196. ret = t4_set_params(adap, adap->mbox, adap->pf,
  197. 0, 1, &param, &cmplqid);
  198. }
  199. }
  200. return ret;
  201. }
  202. static void t4_free_uld_rxqs(struct adapter *adap, int n,
  203. struct sge_ofld_rxq *q)
  204. {
  205. for ( ; n; n--, q++) {
  206. if (q->rspq.desc)
  207. free_rspq_fl(adap, &q->rspq,
  208. q->fl.size ? &q->fl : NULL);
  209. }
  210. }
  211. static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
  212. {
  213. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  214. if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
  215. struct sge *s = &adap->sge;
  216. u32 param, cmdop, cmplqid = 0;
  217. int i;
  218. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  219. for_each_port(adap, i) {
  220. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  221. FW_PARAMS_PARAM_X_V(cmdop) |
  222. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  223. t4_set_params(adap, adap->mbox, adap->pf,
  224. 0, 1, &param, &cmplqid);
  225. }
  226. }
  227. if (rxq_info->nciq)
  228. t4_free_uld_rxqs(adap, rxq_info->nciq,
  229. rxq_info->uldrxq + rxq_info->nrxq);
  230. t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
  231. if (adap->flags & USING_MSIX)
  232. kfree(rxq_info->msix_tbl);
  233. }
  234. static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
  235. const struct cxgb4_uld_info *uld_info)
  236. {
  237. struct sge *s = &adap->sge;
  238. struct sge_uld_rxq_info *rxq_info;
  239. int i, nrxq, ciq_size;
  240. rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
  241. if (!rxq_info)
  242. return -ENOMEM;
  243. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  244. i = s->nqs_per_uld;
  245. rxq_info->nrxq = roundup(i, adap->params.nports);
  246. } else {
  247. i = min_t(int, uld_info->nrxq,
  248. num_online_cpus());
  249. rxq_info->nrxq = roundup(i, adap->params.nports);
  250. }
  251. if (!uld_info->ciq) {
  252. rxq_info->nciq = 0;
  253. } else {
  254. if (adap->flags & USING_MSIX)
  255. rxq_info->nciq = min_t(int, s->nqs_per_uld,
  256. num_online_cpus());
  257. else
  258. rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
  259. num_online_cpus());
  260. rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
  261. adap->params.nports);
  262. rxq_info->nciq = max_t(int, rxq_info->nciq,
  263. adap->params.nports);
  264. }
  265. nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
  266. rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
  267. GFP_KERNEL);
  268. if (!rxq_info->uldrxq) {
  269. kfree(rxq_info);
  270. return -ENOMEM;
  271. }
  272. rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
  273. if (!rxq_info->rspq_id) {
  274. kfree(rxq_info->uldrxq);
  275. kfree(rxq_info);
  276. return -ENOMEM;
  277. }
  278. for (i = 0; i < rxq_info->nrxq; i++) {
  279. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  280. init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
  281. r->rspq.uld = uld_type;
  282. r->fl.size = 72;
  283. }
  284. ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
  285. if (ciq_size > SGE_MAX_IQ_SIZE) {
  286. dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
  287. ciq_size = SGE_MAX_IQ_SIZE;
  288. }
  289. for (i = rxq_info->nrxq; i < nrxq; i++) {
  290. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  291. init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
  292. r->rspq.uld = uld_type;
  293. }
  294. memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
  295. adap->sge.uld_rxq_info[uld_type] = rxq_info;
  296. return 0;
  297. }
  298. static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
  299. {
  300. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  301. adap->sge.uld_rxq_info[uld_type] = NULL;
  302. kfree(rxq_info->rspq_id);
  303. kfree(rxq_info->uldrxq);
  304. kfree(rxq_info);
  305. }
  306. static int
  307. request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  308. {
  309. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  310. int err = 0;
  311. unsigned int idx, bmap_idx;
  312. for_each_uldrxq(rxq_info, idx) {
  313. bmap_idx = rxq_info->msix_tbl[idx];
  314. err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
  315. t4_sge_intr_msix, 0,
  316. adap->msix_info_ulds[bmap_idx].desc,
  317. &rxq_info->uldrxq[idx].rspq);
  318. if (err)
  319. goto unwind;
  320. }
  321. return 0;
  322. unwind:
  323. while (idx-- > 0) {
  324. bmap_idx = rxq_info->msix_tbl[idx];
  325. free_msix_idx_in_bmap(adap, bmap_idx);
  326. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  327. &rxq_info->uldrxq[idx].rspq);
  328. }
  329. return err;
  330. }
  331. static void
  332. free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  333. {
  334. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  335. unsigned int idx, bmap_idx;
  336. for_each_uldrxq(rxq_info, idx) {
  337. bmap_idx = rxq_info->msix_tbl[idx];
  338. free_msix_idx_in_bmap(adap, bmap_idx);
  339. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  340. &rxq_info->uldrxq[idx].rspq);
  341. }
  342. }
  343. static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
  344. {
  345. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  346. int n = sizeof(adap->msix_info_ulds[0].desc);
  347. unsigned int idx, bmap_idx;
  348. for_each_uldrxq(rxq_info, idx) {
  349. bmap_idx = rxq_info->msix_tbl[idx];
  350. snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
  351. adap->port[0]->name, rxq_info->name, idx);
  352. }
  353. }
  354. static void enable_rx(struct adapter *adap, struct sge_rspq *q)
  355. {
  356. if (!q)
  357. return;
  358. if (q->handler) {
  359. cxgb_busy_poll_init_lock(q);
  360. napi_enable(&q->napi);
  361. }
  362. /* 0-increment GTS to start the timer and enable interrupts */
  363. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  364. SEINTARM_V(q->intr_params) |
  365. INGRESSQID_V(q->cntxt_id));
  366. }
  367. static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
  368. {
  369. if (q && q->handler) {
  370. napi_disable(&q->napi);
  371. local_bh_disable();
  372. while (!cxgb_poll_lock_napi(q))
  373. mdelay(1);
  374. local_bh_enable();
  375. }
  376. }
  377. static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
  378. {
  379. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  380. int idx;
  381. for_each_uldrxq(rxq_info, idx)
  382. enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
  383. }
  384. static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
  385. {
  386. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  387. int idx;
  388. for_each_uldrxq(rxq_info, idx)
  389. quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
  390. }
  391. static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
  392. struct cxgb4_lld_info *lli)
  393. {
  394. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  395. lli->rxq_ids = rxq_info->rspq_id;
  396. lli->nrxq = rxq_info->nrxq;
  397. lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
  398. lli->nciq = rxq_info->nciq;
  399. }
  400. int t4_uld_mem_alloc(struct adapter *adap)
  401. {
  402. struct sge *s = &adap->sge;
  403. adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
  404. if (!adap->uld)
  405. return -ENOMEM;
  406. s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
  407. sizeof(struct sge_uld_rxq_info *),
  408. GFP_KERNEL);
  409. if (!s->uld_rxq_info)
  410. goto err_uld;
  411. return 0;
  412. err_uld:
  413. kfree(adap->uld);
  414. return -ENOMEM;
  415. }
  416. void t4_uld_mem_free(struct adapter *adap)
  417. {
  418. struct sge *s = &adap->sge;
  419. kfree(s->uld_rxq_info);
  420. kfree(adap->uld);
  421. }
  422. void t4_uld_clean_up(struct adapter *adap)
  423. {
  424. struct sge_uld_rxq_info *rxq_info;
  425. unsigned int i;
  426. if (!adap->uld)
  427. return;
  428. for (i = 0; i < CXGB4_ULD_MAX; i++) {
  429. if (!adap->uld[i].handle)
  430. continue;
  431. rxq_info = adap->sge.uld_rxq_info[i];
  432. if (adap->flags & FULL_INIT_DONE)
  433. quiesce_rx_uld(adap, i);
  434. if (adap->flags & USING_MSIX)
  435. free_msix_queue_irqs_uld(adap, i);
  436. free_sge_queues_uld(adap, i);
  437. free_queues_uld(adap, i);
  438. }
  439. }
  440. static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
  441. {
  442. int i;
  443. lld->pdev = adap->pdev;
  444. lld->pf = adap->pf;
  445. lld->l2t = adap->l2t;
  446. lld->tids = &adap->tids;
  447. lld->ports = adap->port;
  448. lld->vr = &adap->vres;
  449. lld->mtus = adap->params.mtus;
  450. lld->ntxq = adap->sge.ofldqsets;
  451. lld->nchan = adap->params.nports;
  452. lld->nports = adap->params.nports;
  453. lld->wr_cred = adap->params.ofldq_wr_cred;
  454. lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
  455. lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
  456. lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
  457. lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
  458. lld->iscsi_ppm = &adap->iscsi_ppm;
  459. lld->adapter_type = adap->params.chip;
  460. lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
  461. lld->udb_density = 1 << adap->params.sge.eq_qpp;
  462. lld->ucq_density = 1 << adap->params.sge.iq_qpp;
  463. lld->filt_mode = adap->params.tp.vlan_pri_map;
  464. /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
  465. for (i = 0; i < NCHAN; i++)
  466. lld->tx_modq[i] = i;
  467. lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
  468. lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
  469. lld->fw_vers = adap->params.fw_vers;
  470. lld->dbfifo_int_thresh = dbfifo_int_thresh;
  471. lld->sge_ingpadboundary = adap->sge.fl_align;
  472. lld->sge_egrstatuspagesize = adap->sge.stat_len;
  473. lld->sge_pktshift = adap->sge.pktshift;
  474. lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
  475. lld->max_ordird_qp = adap->params.max_ordird_qp;
  476. lld->max_ird_adapter = adap->params.max_ird_adapter;
  477. lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
  478. lld->nodeid = dev_to_node(adap->pdev_dev);
  479. lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
  480. }
  481. static void uld_attach(struct adapter *adap, unsigned int uld)
  482. {
  483. void *handle;
  484. struct cxgb4_lld_info lli;
  485. uld_init(adap, &lli);
  486. uld_queue_init(adap, uld, &lli);
  487. handle = adap->uld[uld].add(&lli);
  488. if (IS_ERR(handle)) {
  489. dev_warn(adap->pdev_dev,
  490. "could not attach to the %s driver, error %ld\n",
  491. adap->uld[uld].name, PTR_ERR(handle));
  492. return;
  493. }
  494. adap->uld[uld].handle = handle;
  495. t4_register_netevent_notifier();
  496. if (adap->flags & FULL_INIT_DONE)
  497. adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
  498. }
  499. /**
  500. * cxgb4_register_uld - register an upper-layer driver
  501. * @type: the ULD type
  502. * @p: the ULD methods
  503. *
  504. * Registers an upper-layer driver with this driver and notifies the ULD
  505. * about any presently available devices that support its type. Returns
  506. * %-EBUSY if a ULD of the same type is already registered.
  507. */
  508. int cxgb4_register_uld(enum cxgb4_uld type,
  509. const struct cxgb4_uld_info *p)
  510. {
  511. int ret = 0;
  512. unsigned int adap_idx = 0;
  513. struct adapter *adap;
  514. if (type >= CXGB4_ULD_MAX)
  515. return -EINVAL;
  516. mutex_lock(&uld_mutex);
  517. list_for_each_entry(adap, &adapter_list, list_node) {
  518. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  519. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  520. continue;
  521. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  522. continue;
  523. ret = cfg_queues_uld(adap, type, p);
  524. if (ret)
  525. goto out;
  526. ret = setup_sge_queues_uld(adap, type, p->lro);
  527. if (ret)
  528. goto free_queues;
  529. if (adap->flags & USING_MSIX) {
  530. name_msix_vecs_uld(adap, type);
  531. ret = request_msix_queue_irqs_uld(adap, type);
  532. if (ret)
  533. goto free_rxq;
  534. }
  535. if (adap->flags & FULL_INIT_DONE)
  536. enable_rx_uld(adap, type);
  537. if (adap->uld[type].add) {
  538. ret = -EBUSY;
  539. goto free_irq;
  540. }
  541. adap->uld[type] = *p;
  542. uld_attach(adap, type);
  543. adap_idx++;
  544. }
  545. mutex_unlock(&uld_mutex);
  546. return 0;
  547. free_irq:
  548. if (adap->flags & FULL_INIT_DONE)
  549. quiesce_rx_uld(adap, type);
  550. if (adap->flags & USING_MSIX)
  551. free_msix_queue_irqs_uld(adap, type);
  552. free_rxq:
  553. free_sge_queues_uld(adap, type);
  554. free_queues:
  555. free_queues_uld(adap, type);
  556. out:
  557. list_for_each_entry(adap, &adapter_list, list_node) {
  558. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  559. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  560. continue;
  561. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  562. continue;
  563. if (!adap_idx)
  564. break;
  565. adap->uld[type].handle = NULL;
  566. adap->uld[type].add = NULL;
  567. if (adap->flags & FULL_INIT_DONE)
  568. quiesce_rx_uld(adap, type);
  569. if (adap->flags & USING_MSIX)
  570. free_msix_queue_irqs_uld(adap, type);
  571. free_sge_queues_uld(adap, type);
  572. free_queues_uld(adap, type);
  573. adap_idx--;
  574. }
  575. mutex_unlock(&uld_mutex);
  576. return ret;
  577. }
  578. EXPORT_SYMBOL(cxgb4_register_uld);
  579. /**
  580. * cxgb4_unregister_uld - unregister an upper-layer driver
  581. * @type: the ULD type
  582. *
  583. * Unregisters an existing upper-layer driver.
  584. */
  585. int cxgb4_unregister_uld(enum cxgb4_uld type)
  586. {
  587. struct adapter *adap;
  588. if (type >= CXGB4_ULD_MAX)
  589. return -EINVAL;
  590. mutex_lock(&uld_mutex);
  591. list_for_each_entry(adap, &adapter_list, list_node) {
  592. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  593. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  594. continue;
  595. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  596. continue;
  597. adap->uld[type].handle = NULL;
  598. adap->uld[type].add = NULL;
  599. if (adap->flags & FULL_INIT_DONE)
  600. quiesce_rx_uld(adap, type);
  601. if (adap->flags & USING_MSIX)
  602. free_msix_queue_irqs_uld(adap, type);
  603. free_sge_queues_uld(adap, type);
  604. free_queues_uld(adap, type);
  605. }
  606. mutex_unlock(&uld_mutex);
  607. return 0;
  608. }
  609. EXPORT_SYMBOL(cxgb4_unregister_uld);