i40e_fcoe.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2016 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include <linux/if_ether.h>
  27. #include <scsi/scsi_cmnd.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/fc/fc_fs.h>
  30. #include <scsi/fc/fc_fip.h>
  31. #include <scsi/fc/fc_fcoe.h>
  32. #include <scsi/libfc.h>
  33. #include <scsi/libfcoe.h>
  34. #include <uapi/linux/dcbnl.h>
  35. #include "i40e.h"
  36. #include "i40e_fcoe.h"
  37. /**
  38. * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
  39. * @sof: the FCoE start of frame delimiter
  40. **/
  41. static inline bool i40e_fcoe_sof_is_class2(u8 sof)
  42. {
  43. return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
  44. }
  45. /**
  46. * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
  47. * @sof: the FCoE start of frame delimiter
  48. **/
  49. static inline bool i40e_fcoe_sof_is_class3(u8 sof)
  50. {
  51. return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
  52. }
  53. /**
  54. * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
  55. * @sof: the input SOF value from the frame
  56. **/
  57. static inline bool i40e_fcoe_sof_is_supported(u8 sof)
  58. {
  59. return i40e_fcoe_sof_is_class2(sof) ||
  60. i40e_fcoe_sof_is_class3(sof);
  61. }
  62. /**
  63. * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
  64. * @skb: the frame whose EOF is to be pulled from
  65. **/
  66. static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
  67. {
  68. *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
  69. if (!i40e_fcoe_sof_is_supported(*sof))
  70. return -EINVAL;
  71. return 0;
  72. }
  73. /**
  74. * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
  75. * @eof: the input EOF value from the frame
  76. **/
  77. static inline bool i40e_fcoe_eof_is_supported(u8 eof)
  78. {
  79. return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
  80. (eof == FC_EOF_NI) || (eof == FC_EOF_A);
  81. }
  82. /**
  83. * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
  84. * @skb: the frame whose EOF is to be pulled from
  85. **/
  86. static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
  87. {
  88. /* the first byte of the last dword is EOF */
  89. skb_copy_bits(skb, skb->len - 4, eof, 1);
  90. if (!i40e_fcoe_eof_is_supported(*eof))
  91. return -EINVAL;
  92. return 0;
  93. }
  94. /**
  95. * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
  96. * @eof: the input eof value from the frame
  97. *
  98. * The FC EOF is converted to the value understood by HW for descriptor
  99. * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
  100. * first and that already checks for all supported valid eof values.
  101. **/
  102. static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
  103. {
  104. switch (eof) {
  105. case FC_EOF_N:
  106. return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
  107. case FC_EOF_T:
  108. return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
  109. case FC_EOF_NI:
  110. return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
  111. case FC_EOF_A:
  112. return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
  113. default:
  114. /* Supported valid eof shall be already checked by
  115. * calling i40e_fcoe_eof_is_supported() first,
  116. * therefore this default case shall never hit.
  117. */
  118. WARN_ON(1);
  119. return -EINVAL;
  120. }
  121. }
  122. /**
  123. * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
  124. * @xid: the exchange id
  125. **/
  126. static inline bool i40e_fcoe_xid_is_valid(u16 xid)
  127. {
  128. return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
  129. }
  130. /**
  131. * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
  132. * @pf: pointer to PF
  133. * @ddp: sw DDP context
  134. *
  135. * Unmap the scatter-gather list associated with the given SW DDP context
  136. *
  137. * Returns: data length already ddp-ed in bytes
  138. *
  139. **/
  140. static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
  141. struct i40e_fcoe_ddp *ddp)
  142. {
  143. if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
  144. return;
  145. if (ddp->sgl) {
  146. dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
  147. DMA_FROM_DEVICE);
  148. ddp->sgl = NULL;
  149. ddp->sgc = 0;
  150. }
  151. if (ddp->pool) {
  152. dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
  153. ddp->pool = NULL;
  154. }
  155. }
  156. /**
  157. * i40e_fcoe_ddp_clear - clear the given SW DDP context
  158. * @ddp - SW DDP context
  159. **/
  160. static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
  161. {
  162. memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
  163. ddp->xid = FC_XID_UNKNOWN;
  164. ddp->flags = __I40E_FCOE_DDP_NONE;
  165. }
  166. /**
  167. * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
  168. * @id: the prog id for the programming status Rx descriptor write-back
  169. **/
  170. static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
  171. {
  172. return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
  173. (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
  174. }
  175. /**
  176. * i40e_fcoe_fc_get_xid - get xid from the frame header
  177. * @fh: the fc frame header
  178. *
  179. * In case the incoming frame's exchange is originated from
  180. * the initiator, then received frame's exchange id is ANDed
  181. * with fc_cpu_mask bits to get the same cpu on which exchange
  182. * was originated, otherwise just use the current cpu.
  183. *
  184. * Returns ox_id if exchange originator, rx_id if responder
  185. **/
  186. static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
  187. {
  188. u32 f_ctl = ntoh24(fh->fh_f_ctl);
  189. return (f_ctl & FC_FC_EX_CTX) ?
  190. be16_to_cpu(fh->fh_ox_id) :
  191. be16_to_cpu(fh->fh_rx_id);
  192. }
  193. /**
  194. * i40e_fcoe_fc_frame_header - get fc frame header from skb
  195. * @skb: packet
  196. *
  197. * This checks if there is a VLAN header and returns the data
  198. * pointer to the start of the fc_frame_header.
  199. *
  200. * Returns pointer to the fc_frame_header
  201. **/
  202. static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
  203. struct sk_buff *skb)
  204. {
  205. void *fh = skb->data + sizeof(struct fcoe_hdr);
  206. if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
  207. fh += sizeof(struct vlan_hdr);
  208. return (struct fc_frame_header *)fh;
  209. }
  210. /**
  211. * i40e_fcoe_ddp_put - release the DDP context for a given exchange id
  212. * @netdev: the corresponding net_device
  213. * @xid: the exchange id that corresponding DDP context will be released
  214. *
  215. * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
  216. * and it is expected to be called by ULD, i.e., FCP layer of libfc
  217. * to release the corresponding ddp context when the I/O is done.
  218. *
  219. * Returns : data length already ddp-ed in bytes
  220. **/
  221. static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
  222. {
  223. struct i40e_netdev_priv *np = netdev_priv(netdev);
  224. struct i40e_pf *pf = np->vsi->back;
  225. struct i40e_fcoe *fcoe = &pf->fcoe;
  226. int len = 0;
  227. struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
  228. if (!fcoe || !ddp)
  229. goto out;
  230. if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
  231. len = ddp->len;
  232. i40e_fcoe_ddp_unmap(pf, ddp);
  233. out:
  234. return len;
  235. }
  236. /**
  237. * i40e_fcoe_sw_init - sets up the HW for FCoE
  238. * @pf: pointer to PF
  239. **/
  240. void i40e_init_pf_fcoe(struct i40e_pf *pf)
  241. {
  242. struct i40e_hw *hw = &pf->hw;
  243. u32 val;
  244. pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
  245. pf->num_fcoe_qps = 0;
  246. pf->fcoe_hmc_cntx_num = 0;
  247. pf->fcoe_hmc_filt_num = 0;
  248. if (!pf->hw.func_caps.fcoe) {
  249. dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
  250. return;
  251. }
  252. if (!pf->hw.func_caps.dcb) {
  253. dev_warn(&pf->pdev->dev,
  254. "Hardware is not DCB capable not enabling FCoE.\n");
  255. return;
  256. }
  257. /* enable FCoE hash filter */
  258. val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1));
  259. val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
  260. val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
  261. val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
  262. i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val);
  263. /* enable flag */
  264. pf->flags |= I40E_FLAG_FCOE_ENABLED;
  265. pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
  266. /* Reserve 4K DDP contexts and 20K filter size for FCoE */
  267. pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
  268. I40E_DMA_CNTX_BASE_SIZE;
  269. pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
  270. BIT(I40E_HASH_FILTER_SIZE_16K) *
  271. I40E_HASH_FILTER_BASE_SIZE;
  272. /* FCoE object: max 16K filter buckets and 4K DMA contexts */
  273. pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
  274. pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
  275. /* Setup max frame with FCoE_MTU plus L2 overheads */
  276. val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL);
  277. val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
  278. val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  279. << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
  280. i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val);
  281. dev_info(&pf->pdev->dev, "FCoE is supported.\n");
  282. }
  283. /**
  284. * i40e_get_fcoe_tc_map - Return TC map for FCoE APP
  285. * @pf: pointer to PF
  286. *
  287. **/
  288. u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
  289. {
  290. struct i40e_dcb_app_priority_table app;
  291. struct i40e_hw *hw = &pf->hw;
  292. u8 enabled_tc = 0;
  293. u8 tc, i;
  294. /* Get the FCoE APP TLV */
  295. struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
  296. for (i = 0; i < dcbcfg->numapps; i++) {
  297. app = dcbcfg->app[i];
  298. if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  299. app.protocolid == ETH_P_FCOE) {
  300. tc = dcbcfg->etscfg.prioritytable[app.priority];
  301. enabled_tc |= BIT(tc);
  302. break;
  303. }
  304. }
  305. /* TC0 if there is no TC defined for FCoE APP TLV */
  306. enabled_tc = enabled_tc ? enabled_tc : 0x1;
  307. return enabled_tc;
  308. }
  309. /**
  310. * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
  311. * @vsi: pointer to the associated VSI struct
  312. * @ctxt: pointer to the associated VSI context to be passed to HW
  313. *
  314. * Returns 0 on success or < 0 on error
  315. **/
  316. int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
  317. {
  318. struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
  319. struct i40e_pf *pf = vsi->back;
  320. struct i40e_hw *hw = &pf->hw;
  321. u8 enabled_tc = 0;
  322. if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
  323. dev_err(&pf->pdev->dev,
  324. "FCoE is not enabled for this device\n");
  325. return -EPERM;
  326. }
  327. /* initialize the hardware for FCoE */
  328. ctxt->pf_num = hw->pf_id;
  329. ctxt->vf_num = 0;
  330. ctxt->uplink_seid = vsi->uplink_seid;
  331. ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
  332. ctxt->flags = I40E_AQ_VSI_TYPE_PF;
  333. /* FCoE VSI would need the following sections */
  334. info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
  335. /* FCoE VSI does not need these sections */
  336. info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
  337. I40E_AQ_VSI_PROP_VLAN_VALID |
  338. I40E_AQ_VSI_PROP_CAS_PV_VALID |
  339. I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
  340. I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
  341. if (i40e_is_vsi_uplink_mode_veb(vsi)) {
  342. info->valid_sections |=
  343. cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
  344. info->switch_id =
  345. cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
  346. }
  347. enabled_tc = i40e_get_fcoe_tc_map(pf);
  348. i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
  349. /* set up queue option section: only enable FCoE */
  350. info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
  351. return 0;
  352. }
  353. /**
  354. * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
  355. * indicating the upper FCoE protocol stack is ready to use FCoE
  356. * offload features.
  357. *
  358. * @netdev: pointer to the netdev that FCoE is created on
  359. *
  360. * Returns 0 on success
  361. *
  362. * in RTNL
  363. *
  364. **/
  365. int i40e_fcoe_enable(struct net_device *netdev)
  366. {
  367. struct i40e_netdev_priv *np = netdev_priv(netdev);
  368. struct i40e_vsi *vsi = np->vsi;
  369. struct i40e_pf *pf = vsi->back;
  370. struct i40e_fcoe *fcoe = &pf->fcoe;
  371. if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
  372. netdev_err(netdev, "HW does not support FCoE.\n");
  373. return -ENODEV;
  374. }
  375. if (vsi->type != I40E_VSI_FCOE) {
  376. netdev_err(netdev, "interface does not support FCoE.\n");
  377. return -EBUSY;
  378. }
  379. atomic_inc(&fcoe->refcnt);
  380. return 0;
  381. }
  382. /**
  383. * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
  384. * @dev: pointer to the netdev that FCoE is created on
  385. *
  386. * Returns 0 on success
  387. *
  388. **/
  389. int i40e_fcoe_disable(struct net_device *netdev)
  390. {
  391. struct i40e_netdev_priv *np = netdev_priv(netdev);
  392. struct i40e_vsi *vsi = np->vsi;
  393. struct i40e_pf *pf = vsi->back;
  394. struct i40e_fcoe *fcoe = &pf->fcoe;
  395. if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
  396. netdev_err(netdev, "device does not support FCoE\n");
  397. return -ENODEV;
  398. }
  399. if (vsi->type != I40E_VSI_FCOE)
  400. return -EBUSY;
  401. if (!atomic_dec_and_test(&fcoe->refcnt))
  402. return -EINVAL;
  403. netdev_info(netdev, "FCoE disabled\n");
  404. return 0;
  405. }
  406. /**
  407. * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
  408. * @fcoe: the FCoE sw object
  409. * @dev: the device that the pool is associated with
  410. * @cpu: the cpu for this pool
  411. *
  412. **/
  413. static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
  414. struct device *dev,
  415. unsigned int cpu)
  416. {
  417. struct i40e_fcoe_ddp_pool *ddp_pool;
  418. ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
  419. if (!ddp_pool->pool) {
  420. dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
  421. return;
  422. }
  423. dma_pool_destroy(ddp_pool->pool);
  424. ddp_pool->pool = NULL;
  425. }
  426. /**
  427. * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
  428. * @fcoe: the FCoE sw object
  429. * @dev: the device that the pool is associated with
  430. * @cpu: the cpu for this pool
  431. *
  432. * Returns 0 on successful or non zero on failure
  433. *
  434. **/
  435. static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
  436. struct device *dev,
  437. unsigned int cpu)
  438. {
  439. struct i40e_fcoe_ddp_pool *ddp_pool;
  440. struct dma_pool *pool;
  441. char pool_name[32];
  442. ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
  443. if (ddp_pool && ddp_pool->pool) {
  444. dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
  445. return 0;
  446. }
  447. snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
  448. pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
  449. I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
  450. if (!pool) {
  451. dev_err(dev, "dma_pool_create %s failed\n", pool_name);
  452. return -ENOMEM;
  453. }
  454. ddp_pool->pool = pool;
  455. return 0;
  456. }
  457. /**
  458. * i40e_fcoe_free_ddp_resources - release FCoE DDP resources
  459. * @vsi: the vsi FCoE is associated with
  460. *
  461. **/
  462. void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
  463. {
  464. struct i40e_pf *pf = vsi->back;
  465. struct i40e_fcoe *fcoe = &pf->fcoe;
  466. int cpu, i;
  467. /* do nothing if not FCoE VSI */
  468. if (vsi->type != I40E_VSI_FCOE)
  469. return;
  470. /* do nothing if no DDP pools were allocated */
  471. if (!fcoe->ddp_pool)
  472. return;
  473. for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
  474. i40e_fcoe_ddp_put(vsi->netdev, i);
  475. for_each_possible_cpu(cpu)
  476. i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
  477. free_percpu(fcoe->ddp_pool);
  478. fcoe->ddp_pool = NULL;
  479. netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
  480. vsi->id, vsi->seid);
  481. }
  482. /**
  483. * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
  484. * @vsi: the VSI FCoE is associated with
  485. *
  486. * Returns 0 on successful or non zero on failure
  487. *
  488. **/
  489. int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
  490. {
  491. struct i40e_pf *pf = vsi->back;
  492. struct device *dev = &pf->pdev->dev;
  493. struct i40e_fcoe *fcoe = &pf->fcoe;
  494. unsigned int cpu;
  495. int i;
  496. if (vsi->type != I40E_VSI_FCOE)
  497. return -ENODEV;
  498. /* do nothing if no DDP pools were allocated */
  499. if (fcoe->ddp_pool)
  500. return -EEXIST;
  501. /* allocate per CPU memory to track DDP pools */
  502. fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
  503. if (!fcoe->ddp_pool) {
  504. dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
  505. return -ENOMEM;
  506. }
  507. /* allocate pci pool for each cpu */
  508. for_each_possible_cpu(cpu) {
  509. if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
  510. continue;
  511. dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
  512. i40e_fcoe_free_ddp_resources(vsi);
  513. return -ENOMEM;
  514. }
  515. /* initialize the sw context */
  516. for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
  517. i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
  518. netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
  519. vsi->id, vsi->seid);
  520. return 0;
  521. }
  522. /**
  523. * i40e_fcoe_handle_status - check the Programming Status for FCoE
  524. * @rx_ring: the Rx ring for this descriptor
  525. * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
  526. *
  527. * Check if this is the Rx Programming Status descriptor write-back for FCoE.
  528. * This is used to verify if the context/filter programming or invalidation
  529. * requested by SW to the HW is successful or not and take actions accordingly.
  530. **/
  531. void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
  532. union i40e_rx_desc *rx_desc, u8 prog_id)
  533. {
  534. struct i40e_pf *pf = rx_ring->vsi->back;
  535. struct i40e_fcoe *fcoe = &pf->fcoe;
  536. struct i40e_fcoe_ddp *ddp;
  537. u32 error;
  538. u16 xid;
  539. u64 qw;
  540. /* we only care for FCoE here */
  541. if (!i40e_fcoe_progid_is_fcoe(prog_id))
  542. return;
  543. xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
  544. (I40E_FCOE_DDP_MAX - 1);
  545. if (!i40e_fcoe_xid_is_valid(xid))
  546. return;
  547. ddp = &fcoe->ddp[xid];
  548. WARN_ON(xid != ddp->xid);
  549. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  550. error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
  551. I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
  552. /* DDP context programming status: failure or success */
  553. if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
  554. if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
  555. dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
  556. xid, ddp->xid);
  557. ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
  558. }
  559. if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
  560. dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
  561. xid, ddp->xid);
  562. ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
  563. }
  564. }
  565. /* DDP context invalidation status: failure or success */
  566. if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
  567. if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
  568. dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
  569. xid, ddp->xid);
  570. ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
  571. }
  572. /* clear the flag so we can retry invalidation */
  573. clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
  574. }
  575. /* unmap DMA */
  576. i40e_fcoe_ddp_unmap(pf, ddp);
  577. i40e_fcoe_ddp_clear(ddp);
  578. }
  579. /**
  580. * i40e_fcoe_handle_offload - check ddp status and mark it done
  581. * @adapter: i40e adapter
  582. * @rx_desc: advanced rx descriptor
  583. * @skb: the skb holding the received data
  584. *
  585. * This checks ddp status.
  586. *
  587. * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
  588. * not passing the skb to ULD, > 0 indicates is the length of data
  589. * being ddped.
  590. *
  591. **/
  592. int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
  593. union i40e_rx_desc *rx_desc,
  594. struct sk_buff *skb)
  595. {
  596. struct i40e_pf *pf = rx_ring->vsi->back;
  597. struct i40e_fcoe *fcoe = &pf->fcoe;
  598. struct fc_frame_header *fh = NULL;
  599. struct i40e_fcoe_ddp *ddp = NULL;
  600. u32 status, fltstat;
  601. u32 error, fcerr;
  602. int rc = -EINVAL;
  603. u16 ptype;
  604. u16 xid;
  605. u64 qw;
  606. /* check this rxd is for programming status */
  607. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  608. /* packet descriptor, check packet type */
  609. ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
  610. if (!i40e_rx_is_fcoe(ptype))
  611. goto out_no_ddp;
  612. error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
  613. fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
  614. I40E_RX_DESC_FCOE_ERROR_MASK;
  615. /* check stateless offload error */
  616. if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
  617. dev_err(&pf->pdev->dev, "Protocol Error\n");
  618. skb->ip_summed = CHECKSUM_NONE;
  619. } else {
  620. skb->ip_summed = CHECKSUM_UNNECESSARY;
  621. }
  622. /* check hw status on ddp */
  623. status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
  624. fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
  625. I40E_RX_DESC_FLTSTAT_FCMASK;
  626. /* now we are ready to check DDP */
  627. fh = i40e_fcoe_fc_frame_header(skb);
  628. xid = i40e_fcoe_fc_get_xid(fh);
  629. if (!i40e_fcoe_xid_is_valid(xid))
  630. goto out_no_ddp;
  631. /* non DDP normal receive, return to the protocol stack */
  632. if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH)
  633. goto out_no_ddp;
  634. /* do we have a sw ddp context setup ? */
  635. ddp = &fcoe->ddp[xid];
  636. if (!ddp->sgl)
  637. goto out_no_ddp;
  638. /* fetch xid from hw rxd wb, which should match up the sw ctxt */
  639. xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
  640. if (ddp->xid != xid) {
  641. dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
  642. ddp->xid, xid);
  643. goto out_put_ddp;
  644. }
  645. /* the same exchange has already errored out */
  646. if (ddp->fcerr) {
  647. dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
  648. xid, ddp->fcerr, fcerr);
  649. goto out_put_ddp;
  650. }
  651. /* fcoe param is valid by now with correct DDPed length */
  652. ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
  653. ddp->fcerr = fcerr;
  654. /* header posting only, useful only for target mode and debugging */
  655. if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
  656. /* For target mode, we get header of the last packet but it
  657. * does not have the FCoE trailer field, i.e., CRC and EOF
  658. * Ordered Set since they are offloaded by the HW, so fill
  659. * it up correspondingly to allow the packet to pass through
  660. * to the upper protocol stack.
  661. */
  662. u32 f_ctl = ntoh24(fh->fh_f_ctl);
  663. if ((f_ctl & FC_FC_END_SEQ) &&
  664. (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
  665. struct fcoe_crc_eof *crc = NULL;
  666. crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
  667. crc->fcoe_eof = FC_EOF_T;
  668. } else {
  669. /* otherwise, drop the header only frame */
  670. rc = 0;
  671. goto out_no_ddp;
  672. }
  673. }
  674. out_put_ddp:
  675. /* either we got RSP or we have an error, unmap DMA in both cases */
  676. i40e_fcoe_ddp_unmap(pf, ddp);
  677. if (ddp->len && !ddp->fcerr) {
  678. int pkts;
  679. rc = ddp->len;
  680. i40e_fcoe_ddp_clear(ddp);
  681. ddp->len = rc;
  682. pkts = DIV_ROUND_UP(rc, 2048);
  683. rx_ring->stats.bytes += rc;
  684. rx_ring->stats.packets += pkts;
  685. rx_ring->q_vector->rx.total_bytes += rc;
  686. rx_ring->q_vector->rx.total_packets += pkts;
  687. set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
  688. }
  689. out_no_ddp:
  690. return rc;
  691. }
  692. /**
  693. * i40e_fcoe_ddp_setup - called to set up ddp context
  694. * @netdev: the corresponding net_device
  695. * @xid: the exchange id requesting ddp
  696. * @sgl: the scatter-gather list for this request
  697. * @sgc: the number of scatter-gather items
  698. * @target_mode: indicates this is a DDP request for target
  699. *
  700. * Returns : 1 for success and 0 for no DDP on this I/O
  701. **/
  702. static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
  703. struct scatterlist *sgl, unsigned int sgc,
  704. int target_mode)
  705. {
  706. static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
  707. struct i40e_netdev_priv *np = netdev_priv(netdev);
  708. struct i40e_fcoe_ddp_pool *ddp_pool;
  709. struct i40e_pf *pf = np->vsi->back;
  710. struct i40e_fcoe *fcoe = &pf->fcoe;
  711. unsigned int i, j, dmacount;
  712. struct i40e_fcoe_ddp *ddp;
  713. unsigned int firstoff = 0;
  714. unsigned int thisoff = 0;
  715. unsigned int thislen = 0;
  716. struct scatterlist *sg;
  717. dma_addr_t addr = 0;
  718. unsigned int len;
  719. if (xid >= I40E_FCOE_DDP_MAX) {
  720. dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
  721. return 0;
  722. }
  723. /* no DDP if we are already down or resetting */
  724. if (test_bit(__I40E_DOWN, &pf->state) ||
  725. test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
  726. dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
  727. xid);
  728. return 0;
  729. }
  730. ddp = &fcoe->ddp[xid];
  731. if (ddp->sgl) {
  732. dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
  733. xid, ddp->sgl, ddp->sgc);
  734. return 0;
  735. }
  736. i40e_fcoe_ddp_clear(ddp);
  737. if (!fcoe->ddp_pool) {
  738. dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
  739. return 0;
  740. }
  741. ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
  742. if (!ddp_pool->pool) {
  743. dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
  744. goto out_noddp;
  745. }
  746. /* setup dma from scsi command sgl */
  747. dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
  748. if (dmacount == 0) {
  749. dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
  750. sgl, sgc);
  751. goto out_noddp_unmap;
  752. }
  753. /* alloc the udl from our ddp pool */
  754. ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
  755. if (!ddp->udl) {
  756. dev_info(&pf->pdev->dev,
  757. "Failed allocated ddp context, xid 0x%x\n", xid);
  758. goto out_noddp_unmap;
  759. }
  760. j = 0;
  761. ddp->len = 0;
  762. for_each_sg(sgl, sg, dmacount, i) {
  763. addr = sg_dma_address(sg);
  764. len = sg_dma_len(sg);
  765. ddp->len += len;
  766. while (len) {
  767. /* max number of buffers allowed in one DDP context */
  768. if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
  769. dev_info(&pf->pdev->dev,
  770. "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
  771. xid, i, j, dmacount, (u64)addr);
  772. goto out_noddp_free;
  773. }
  774. /* get the offset of length of current buffer */
  775. thisoff = addr & ((dma_addr_t)bufflen - 1);
  776. thislen = min_t(unsigned int, (bufflen - thisoff), len);
  777. /* all but the 1st buffer (j == 0)
  778. * must be aligned on bufflen
  779. */
  780. if ((j != 0) && (thisoff))
  781. goto out_noddp_free;
  782. /* all but the last buffer
  783. * ((i == (dmacount - 1)) && (thislen == len))
  784. * must end at bufflen
  785. */
  786. if (((i != (dmacount - 1)) || (thislen != len)) &&
  787. ((thislen + thisoff) != bufflen))
  788. goto out_noddp_free;
  789. ddp->udl[j] = (u64)(addr - thisoff);
  790. /* only the first buffer may have none-zero offset */
  791. if (j == 0)
  792. firstoff = thisoff;
  793. len -= thislen;
  794. addr += thislen;
  795. j++;
  796. }
  797. }
  798. /* only the last buffer may have non-full bufflen */
  799. ddp->lastsize = thisoff + thislen;
  800. ddp->firstoff = firstoff;
  801. ddp->list_len = j;
  802. ddp->pool = ddp_pool->pool;
  803. ddp->sgl = sgl;
  804. ddp->sgc = sgc;
  805. ddp->xid = xid;
  806. if (target_mode)
  807. set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
  808. set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
  809. put_cpu();
  810. return 1; /* Success */
  811. out_noddp_free:
  812. dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
  813. i40e_fcoe_ddp_clear(ddp);
  814. out_noddp_unmap:
  815. dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
  816. out_noddp:
  817. put_cpu();
  818. return 0;
  819. }
  820. /**
  821. * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
  822. * @netdev: the corresponding net_device
  823. * @xid: the exchange id requesting ddp
  824. * @sgl: the scatter-gather list for this request
  825. * @sgc: the number of scatter-gather items
  826. *
  827. * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
  828. * and is expected to be called from ULD, e.g., FCP layer of libfc
  829. * to set up ddp for the corresponding xid of the given sglist for
  830. * the corresponding I/O.
  831. *
  832. * Returns : 1 for success and 0 for no ddp
  833. **/
  834. static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
  835. struct scatterlist *sgl, unsigned int sgc)
  836. {
  837. return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
  838. }
  839. /**
  840. * i40e_fcoe_ddp_target - called to set up ddp context in target mode
  841. * @netdev: the corresponding net_device
  842. * @xid: the exchange id requesting ddp
  843. * @sgl: the scatter-gather list for this request
  844. * @sgc: the number of scatter-gather items
  845. *
  846. * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
  847. * and is expected to be called from ULD, e.g., FCP layer of libfc
  848. * to set up ddp for the corresponding xid of the given sglist for
  849. * the corresponding I/O. The DDP in target mode is a write I/O request
  850. * from the initiator.
  851. *
  852. * Returns : 1 for success and 0 for no ddp
  853. **/
  854. static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
  855. struct scatterlist *sgl, unsigned int sgc)
  856. {
  857. return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
  858. }
  859. /**
  860. * i40e_fcoe_program_ddp - programs the HW DDP related descriptors
  861. * @tx_ring: transmit ring for this packet
  862. * @skb: the packet to be sent out
  863. * @sof: the SOF to indicate class of service
  864. *
  865. * Determine if it is READ/WRITE command, and finds out if there is
  866. * a matching SW DDP context for this command. DDP is applicable
  867. * only in case of READ if initiator or WRITE in case of
  868. * responder (via checking XFER_RDY).
  869. *
  870. * Note: caller checks sof and ddp sw context
  871. *
  872. * Returns : none
  873. *
  874. **/
  875. static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
  876. struct sk_buff *skb,
  877. struct i40e_fcoe_ddp *ddp, u8 sof)
  878. {
  879. struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
  880. struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
  881. struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
  882. struct i40e_pf *pf = tx_ring->vsi->back;
  883. u16 i = tx_ring->next_to_use;
  884. struct fc_frame_header *fh;
  885. u64 flags_rsvd_lanq = 0;
  886. bool target_mode;
  887. /* check if abort is still pending */
  888. if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
  889. dev_warn(&pf->pdev->dev,
  890. "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
  891. ddp->xid, ddp->flags);
  892. return;
  893. }
  894. /* set the flag to indicate this is programmed */
  895. if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
  896. dev_warn(&pf->pdev->dev,
  897. "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
  898. ddp->xid, ddp->flags);
  899. return;
  900. }
  901. /* Prepare the DDP context descriptor */
  902. ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
  903. i++;
  904. if (i == tx_ring->count)
  905. i = 0;
  906. ddp_desc->type_cmd_foff_lsize =
  907. cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX |
  908. ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K <<
  909. I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) |
  910. ((u64)ddp->firstoff <<
  911. I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) |
  912. ((u64)ddp->lastsize <<
  913. I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
  914. ddp_desc->rsvd = cpu_to_le64(0);
  915. /* target mode needs last packet in the sequence */
  916. target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
  917. if (target_mode)
  918. ddp_desc->type_cmd_foff_lsize |=
  919. cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
  920. /* Prepare queue_context descriptor */
  921. queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
  922. if (i == tx_ring->count)
  923. i = 0;
  924. queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
  925. queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
  926. ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
  927. I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
  928. I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
  929. /* Prepare filter_context_desc */
  930. filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
  931. i++;
  932. if (i == tx_ring->count)
  933. i = 0;
  934. fh = (struct fc_frame_header *)skb_transport_header(skb);
  935. filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
  936. filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
  937. filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
  938. I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
  939. flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
  940. flags_rsvd_lanq |= (u64)(target_mode ?
  941. I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
  942. I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
  943. flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
  944. I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
  945. I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
  946. flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
  947. I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
  948. filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
  949. /* By this time, all offload related descriptors has been programmed */
  950. tx_ring->next_to_use = i;
  951. }
  952. /**
  953. * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
  954. * @tx_ring: transmit ring for this packet
  955. * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
  956. * @ddp: the SW DDP context for this DDP
  957. *
  958. * Programs the Tx context descriptor to do DDP invalidation.
  959. **/
  960. static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
  961. struct sk_buff *skb,
  962. struct i40e_fcoe_ddp *ddp)
  963. {
  964. struct i40e_tx_context_desc *context_desc;
  965. int i;
  966. if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
  967. return;
  968. i = tx_ring->next_to_use;
  969. context_desc = I40E_TX_CTXTDESC(tx_ring, i);
  970. i++;
  971. if (i == tx_ring->count)
  972. i = 0;
  973. context_desc->tunneling_params = cpu_to_le32(0);
  974. context_desc->l2tag2 = cpu_to_le16(0);
  975. context_desc->rsvd = cpu_to_le16(0);
  976. context_desc->type_cmd_tso_mss = cpu_to_le64(
  977. I40E_TX_DESC_DTYPE_FCOE_CTX |
  978. (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
  979. I40E_TXD_CTX_QW1_CMD_SHIFT) |
  980. (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
  981. I40E_TXD_CTX_QW1_CMD_SHIFT));
  982. tx_ring->next_to_use = i;
  983. }
  984. /**
  985. * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
  986. * @tx_ring: transmit ring for this packet
  987. * @skb: the packet to be sent out
  988. * @sof: the SOF to indicate class of service
  989. *
  990. * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
  991. * a matching SW DDP context for this command. DDP is applicable
  992. * only in case of READ if initiator or WRITE in case of
  993. * responder (via checking XFER_RDY). In case this is an ABTS, send
  994. * just invalidate the context.
  995. **/
  996. static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
  997. struct sk_buff *skb, u8 sof)
  998. {
  999. struct i40e_pf *pf = tx_ring->vsi->back;
  1000. struct i40e_fcoe *fcoe = &pf->fcoe;
  1001. struct fc_frame_header *fh;
  1002. struct i40e_fcoe_ddp *ddp;
  1003. u32 f_ctl;
  1004. u8 r_ctl;
  1005. u16 xid;
  1006. fh = (struct fc_frame_header *)skb_transport_header(skb);
  1007. f_ctl = ntoh24(fh->fh_f_ctl);
  1008. r_ctl = fh->fh_r_ctl;
  1009. ddp = NULL;
  1010. if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
  1011. /* exchange responder? if so, XFER_RDY for write */
  1012. xid = ntohs(fh->fh_rx_id);
  1013. if (i40e_fcoe_xid_is_valid(xid)) {
  1014. ddp = &fcoe->ddp[xid];
  1015. if ((ddp->xid == xid) &&
  1016. (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
  1017. i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
  1018. }
  1019. } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
  1020. /* exchange originator, check READ cmd */
  1021. xid = ntohs(fh->fh_ox_id);
  1022. if (i40e_fcoe_xid_is_valid(xid)) {
  1023. ddp = &fcoe->ddp[xid];
  1024. if ((ddp->xid == xid) &&
  1025. (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
  1026. i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
  1027. }
  1028. } else if (r_ctl == FC_RCTL_BA_ABTS) {
  1029. /* exchange originator, check ABTS */
  1030. xid = ntohs(fh->fh_ox_id);
  1031. if (i40e_fcoe_xid_is_valid(xid)) {
  1032. ddp = &fcoe->ddp[xid];
  1033. if ((ddp->xid == xid) &&
  1034. (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
  1035. i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
  1036. }
  1037. }
  1038. }
  1039. /**
  1040. * i40e_fcoe_tso - set up FCoE TSO
  1041. * @tx_ring: ring to send buffer on
  1042. * @skb: send buffer
  1043. * @tx_flags: collected send information
  1044. * @hdr_len: the tso header length
  1045. * @sof: the SOF to indicate class of service
  1046. *
  1047. * Note must already have sof checked to be either class 2 or class 3 before
  1048. * calling this function.
  1049. *
  1050. * Returns 1 to indicate sequence segmentation offload is properly setup
  1051. * or returns 0 to indicate no tso is needed, otherwise returns error
  1052. * code to drop the frame.
  1053. **/
  1054. static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
  1055. struct sk_buff *skb,
  1056. u32 tx_flags, u8 *hdr_len, u8 sof)
  1057. {
  1058. struct i40e_tx_context_desc *context_desc;
  1059. u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
  1060. struct fc_frame_header *fh;
  1061. u64 cd_type_cmd_tso_mss;
  1062. /* must match gso type as FCoE */
  1063. if (!skb_is_gso(skb))
  1064. return 0;
  1065. /* is it the expected gso type for FCoE ?*/
  1066. if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
  1067. netdev_err(skb->dev,
  1068. "wrong gso type %d:expecting SKB_GSO_FCOE\n",
  1069. skb_shinfo(skb)->gso_type);
  1070. return -EINVAL;
  1071. }
  1072. /* header and trailer are inserted by hw */
  1073. *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
  1074. sizeof(struct fcoe_crc_eof);
  1075. /* check sof to decide a class 2 or 3 TSO */
  1076. if (likely(i40e_fcoe_sof_is_class3(sof)))
  1077. cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
  1078. else
  1079. cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
  1080. /* param field valid? */
  1081. fh = (struct fc_frame_header *)skb_transport_header(skb);
  1082. if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
  1083. cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
  1084. /* fill the field values */
  1085. cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
  1086. cd_tso_len = skb->len - *hdr_len;
  1087. cd_mss = skb_shinfo(skb)->gso_size;
  1088. cd_type_cmd_tso_mss =
  1089. ((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
  1090. ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
  1091. ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
  1092. ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
  1093. /* grab the next descriptor */
  1094. context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
  1095. tx_ring->next_to_use++;
  1096. if (tx_ring->next_to_use == tx_ring->count)
  1097. tx_ring->next_to_use = 0;
  1098. context_desc->tunneling_params = 0;
  1099. context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
  1100. >> I40E_TX_FLAGS_VLAN_SHIFT);
  1101. context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
  1102. return 1;
  1103. }
  1104. /**
  1105. * i40e_fcoe_tx_map - build the tx descriptor
  1106. * @tx_ring: ring to send buffer on
  1107. * @skb: send buffer
  1108. * @first: first buffer info buffer to use
  1109. * @tx_flags: collected send information
  1110. * @hdr_len: ptr to the size of the packet header
  1111. * @eof: the frame eof value
  1112. *
  1113. * Note, for FCoE, sof and eof are already checked
  1114. **/
  1115. static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
  1116. struct sk_buff *skb,
  1117. struct i40e_tx_buffer *first,
  1118. u32 tx_flags, u8 hdr_len, u8 eof)
  1119. {
  1120. u32 td_offset = 0;
  1121. u32 td_cmd = 0;
  1122. u32 maclen;
  1123. /* insert CRC */
  1124. td_cmd = I40E_TX_DESC_CMD_ICRC;
  1125. /* setup MACLEN */
  1126. maclen = skb_network_offset(skb);
  1127. if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
  1128. maclen += sizeof(struct vlan_hdr);
  1129. if (skb->protocol == htons(ETH_P_FCOE)) {
  1130. /* for FCoE, maclen should exclude ether type */
  1131. maclen -= 2;
  1132. /* setup type as FCoE and EOF insertion */
  1133. td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
  1134. /* setup FCoELEN and FCLEN */
  1135. td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
  1136. I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
  1137. ((sizeof(struct fc_frame_header) >> 2) <<
  1138. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
  1139. /* trim to exclude trailer */
  1140. pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
  1141. }
  1142. /* MACLEN is ether header length in words not bytes */
  1143. td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
  1144. i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
  1145. }
  1146. /**
  1147. * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
  1148. * @skb: the skb to be adjusted
  1149. *
  1150. * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
  1151. * adjusts the skb header pointers correspondingly. Otherwise, returns false.
  1152. **/
  1153. static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
  1154. {
  1155. __be16 protocol = skb->protocol;
  1156. skb_reset_mac_header(skb);
  1157. skb->mac_len = sizeof(struct ethhdr);
  1158. if (protocol == htons(ETH_P_8021Q)) {
  1159. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
  1160. protocol = veth->h_vlan_encapsulated_proto;
  1161. skb->mac_len += sizeof(struct vlan_hdr);
  1162. }
  1163. /* FCoE or FIP only */
  1164. if ((protocol != htons(ETH_P_FIP)) &&
  1165. (protocol != htons(ETH_P_FCOE)))
  1166. return -EINVAL;
  1167. /* set header to L2 of FCoE/FIP */
  1168. skb_set_network_header(skb, skb->mac_len);
  1169. if (protocol == htons(ETH_P_FIP))
  1170. return 0;
  1171. /* set header to L3 of FC */
  1172. skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
  1173. return 0;
  1174. }
  1175. /**
  1176. * i40e_fcoe_xmit_frame - transmit buffer
  1177. * @skb: send buffer
  1178. * @netdev: the fcoe netdev
  1179. *
  1180. * Returns 0 if sent, else an error code
  1181. **/
  1182. static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
  1183. struct net_device *netdev)
  1184. {
  1185. struct i40e_netdev_priv *np = netdev_priv(skb->dev);
  1186. struct i40e_vsi *vsi = np->vsi;
  1187. struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
  1188. struct i40e_tx_buffer *first;
  1189. u32 tx_flags = 0;
  1190. int fso, count;
  1191. u8 hdr_len = 0;
  1192. u8 sof = 0;
  1193. u8 eof = 0;
  1194. if (i40e_fcoe_set_skb_header(skb))
  1195. goto out_drop;
  1196. count = i40e_xmit_descriptor_count(skb);
  1197. if (i40e_chk_linearize(skb, count)) {
  1198. if (__skb_linearize(skb))
  1199. goto out_drop;
  1200. count = i40e_txd_use_count(skb->len);
  1201. tx_ring->tx_stats.tx_linearize++;
  1202. }
  1203. /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
  1204. * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
  1205. * + 4 desc gap to avoid the cache line where head is,
  1206. * + 1 desc for context descriptor,
  1207. * otherwise try next time
  1208. */
  1209. if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
  1210. tx_ring->tx_stats.tx_busy++;
  1211. return NETDEV_TX_BUSY;
  1212. }
  1213. /* prepare the xmit flags */
  1214. if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
  1215. goto out_drop;
  1216. /* record the location of the first descriptor for this packet */
  1217. first = &tx_ring->tx_bi[tx_ring->next_to_use];
  1218. /* FIP is a regular L2 traffic w/o offload */
  1219. if (skb->protocol == htons(ETH_P_FIP))
  1220. goto out_send;
  1221. /* check sof and eof, only supports FC Class 2 or 3 */
  1222. if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
  1223. netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
  1224. goto out_drop;
  1225. }
  1226. /* always do FCCRC for FCoE */
  1227. tx_flags |= I40E_TX_FLAGS_FCCRC;
  1228. /* check we should do sequence offload */
  1229. fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
  1230. if (fso < 0)
  1231. goto out_drop;
  1232. else if (fso)
  1233. tx_flags |= I40E_TX_FLAGS_FSO;
  1234. else
  1235. i40e_fcoe_handle_ddp(tx_ring, skb, sof);
  1236. out_send:
  1237. /* send out the packet */
  1238. i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
  1239. i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
  1240. return NETDEV_TX_OK;
  1241. out_drop:
  1242. dev_kfree_skb_any(skb);
  1243. return NETDEV_TX_OK;
  1244. }
  1245. /**
  1246. * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
  1247. * @netdev: network interface device structure
  1248. * @new_mtu: new value for maximum frame size
  1249. *
  1250. * Returns error as operation not permitted
  1251. *
  1252. **/
  1253. static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
  1254. {
  1255. netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
  1256. return -EPERM;
  1257. }
  1258. /**
  1259. * i40e_fcoe_set_features - set the netdev feature flags
  1260. * @netdev: ptr to the netdev being adjusted
  1261. * @features: the feature set that the stack is suggesting
  1262. *
  1263. **/
  1264. static int i40e_fcoe_set_features(struct net_device *netdev,
  1265. netdev_features_t features)
  1266. {
  1267. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1268. struct i40e_vsi *vsi = np->vsi;
  1269. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  1270. i40e_vlan_stripping_enable(vsi);
  1271. else
  1272. i40e_vlan_stripping_disable(vsi);
  1273. return 0;
  1274. }
  1275. static const struct net_device_ops i40e_fcoe_netdev_ops = {
  1276. .ndo_open = i40e_open,
  1277. .ndo_stop = i40e_close,
  1278. .ndo_get_stats64 = i40e_get_netdev_stats_struct,
  1279. .ndo_set_rx_mode = i40e_set_rx_mode,
  1280. .ndo_validate_addr = eth_validate_addr,
  1281. .ndo_set_mac_address = i40e_set_mac,
  1282. .ndo_change_mtu = i40e_fcoe_change_mtu,
  1283. .ndo_do_ioctl = i40e_ioctl,
  1284. .ndo_tx_timeout = i40e_tx_timeout,
  1285. .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
  1286. .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
  1287. .ndo_setup_tc = __i40e_setup_tc,
  1288. #ifdef CONFIG_NET_POLL_CONTROLLER
  1289. .ndo_poll_controller = i40e_netpoll,
  1290. #endif
  1291. .ndo_start_xmit = i40e_fcoe_xmit_frame,
  1292. .ndo_fcoe_enable = i40e_fcoe_enable,
  1293. .ndo_fcoe_disable = i40e_fcoe_disable,
  1294. .ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get,
  1295. .ndo_fcoe_ddp_done = i40e_fcoe_ddp_put,
  1296. .ndo_fcoe_ddp_target = i40e_fcoe_ddp_target,
  1297. .ndo_set_features = i40e_fcoe_set_features,
  1298. };
  1299. /* fcoe network device type */
  1300. static struct device_type fcoe_netdev_type = {
  1301. .name = "fcoe",
  1302. };
  1303. /**
  1304. * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
  1305. * @vsi: pointer to the associated VSI struct
  1306. * @ctxt: pointer to the associated VSI context to be passed to HW
  1307. *
  1308. * Returns 0 on success or < 0 on error
  1309. **/
  1310. void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
  1311. {
  1312. struct i40e_hw *hw = &vsi->back->hw;
  1313. struct i40e_pf *pf = vsi->back;
  1314. if (vsi->type != I40E_VSI_FCOE)
  1315. return;
  1316. netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
  1317. NETIF_F_HW_VLAN_CTAG_RX |
  1318. NETIF_F_HW_VLAN_CTAG_FILTER);
  1319. netdev->vlan_features = netdev->features;
  1320. netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
  1321. NETIF_F_HW_VLAN_CTAG_RX |
  1322. NETIF_F_HW_VLAN_CTAG_FILTER);
  1323. netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
  1324. netdev->features |= NETIF_F_ALL_FCOE;
  1325. netdev->vlan_features |= NETIF_F_ALL_FCOE;
  1326. netdev->hw_features |= netdev->features;
  1327. netdev->priv_flags |= IFF_UNICAST_FLT;
  1328. netdev->priv_flags |= IFF_SUPP_NOFCS;
  1329. strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
  1330. netdev->mtu = FCOE_MTU;
  1331. SET_NETDEV_DEV(netdev, &pf->pdev->dev);
  1332. SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type);
  1333. /* set different dev_port value 1 for FCoE netdev than the default
  1334. * zero dev_port value for PF netdev, this helps biosdevname user
  1335. * tool to differentiate them correctly while both attached to the
  1336. * same PCI function.
  1337. */
  1338. netdev->dev_port = 1;
  1339. spin_lock_bh(&vsi->mac_filter_list_lock);
  1340. i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
  1341. i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
  1342. i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
  1343. i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
  1344. spin_unlock_bh(&vsi->mac_filter_list_lock);
  1345. /* use san mac */
  1346. ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
  1347. ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
  1348. /* fcoe netdev ops */
  1349. netdev->netdev_ops = &i40e_fcoe_netdev_ops;
  1350. }
  1351. /**
  1352. * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
  1353. * @pf: the PF that VSI is associated with
  1354. *
  1355. **/
  1356. void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
  1357. {
  1358. struct i40e_vsi *vsi;
  1359. u16 seid;
  1360. int i;
  1361. if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
  1362. return;
  1363. for (i = 0; i < pf->num_alloc_vsi; i++) {
  1364. vsi = pf->vsi[i];
  1365. if (vsi && vsi->type == I40E_VSI_FCOE) {
  1366. dev_warn(&pf->pdev->dev,
  1367. "FCoE VSI already created\n");
  1368. return;
  1369. }
  1370. }
  1371. seid = pf->vsi[pf->lan_vsi]->seid;
  1372. vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
  1373. if (vsi) {
  1374. dev_dbg(&pf->pdev->dev,
  1375. "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n",
  1376. vsi->seid, vsi->id, vsi->uplink_seid, seid);
  1377. } else {
  1378. dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
  1379. }
  1380. }