fnic_fcs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "cq_enet_desc.h"
  35. #include "cq_exch_desc.h"
  36. struct workqueue_struct *fnic_event_queue;
  37. static void fnic_set_eth_mode(struct fnic *);
  38. void fnic_handle_link(struct work_struct *work)
  39. {
  40. struct fnic *fnic = container_of(work, struct fnic, link_work);
  41. unsigned long flags;
  42. int old_link_status;
  43. u32 old_link_down_cnt;
  44. spin_lock_irqsave(&fnic->fnic_lock, flags);
  45. if (fnic->stop_rx_link_events) {
  46. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  47. return;
  48. }
  49. old_link_down_cnt = fnic->link_down_cnt;
  50. old_link_status = fnic->link_status;
  51. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  52. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  53. if (old_link_status == fnic->link_status) {
  54. if (!fnic->link_status)
  55. /* DOWN -> DOWN */
  56. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  57. else {
  58. if (old_link_down_cnt != fnic->link_down_cnt) {
  59. /* UP -> DOWN -> UP */
  60. fnic->lport->host_stats.link_failure_count++;
  61. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  62. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  63. "link down\n");
  64. fcoe_ctlr_link_down(&fnic->ctlr);
  65. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  66. "link up\n");
  67. fcoe_ctlr_link_up(&fnic->ctlr);
  68. } else
  69. /* UP -> UP */
  70. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  71. }
  72. } else if (fnic->link_status) {
  73. /* DOWN -> UP */
  74. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  75. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  76. fcoe_ctlr_link_up(&fnic->ctlr);
  77. } else {
  78. /* UP -> DOWN */
  79. fnic->lport->host_stats.link_failure_count++;
  80. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  81. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  82. fcoe_ctlr_link_down(&fnic->ctlr);
  83. }
  84. }
  85. /*
  86. * This function passes incoming fabric frames to libFC
  87. */
  88. void fnic_handle_frame(struct work_struct *work)
  89. {
  90. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  91. struct fc_lport *lp = fnic->lport;
  92. unsigned long flags;
  93. struct sk_buff *skb;
  94. struct fc_frame *fp;
  95. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  96. spin_lock_irqsave(&fnic->fnic_lock, flags);
  97. if (fnic->stop_rx_link_events) {
  98. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  99. dev_kfree_skb(skb);
  100. return;
  101. }
  102. fp = (struct fc_frame *)skb;
  103. /*
  104. * If we're in a transitional state, just re-queue and return.
  105. * The queue will be serviced when we get to a stable state.
  106. */
  107. if (fnic->state != FNIC_IN_FC_MODE &&
  108. fnic->state != FNIC_IN_ETH_MODE) {
  109. skb_queue_head(&fnic->frame_queue, skb);
  110. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  111. return;
  112. }
  113. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  114. fc_exch_recv(lp, fp);
  115. }
  116. }
  117. /**
  118. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  119. * @fnic: fnic instance.
  120. * @skb: Ethernet Frame.
  121. */
  122. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  123. {
  124. struct fc_frame *fp;
  125. struct ethhdr *eh;
  126. struct fcoe_hdr *fcoe_hdr;
  127. struct fcoe_crc_eof *ft;
  128. /*
  129. * Undo VLAN encapsulation if present.
  130. */
  131. eh = (struct ethhdr *)skb->data;
  132. if (eh->h_proto == htons(ETH_P_8021Q)) {
  133. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  134. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  135. skb_reset_mac_header(skb);
  136. }
  137. if (eh->h_proto == htons(ETH_P_FIP)) {
  138. skb_pull(skb, sizeof(*eh));
  139. fcoe_ctlr_recv(&fnic->ctlr, skb);
  140. return 1; /* let caller know packet was used */
  141. }
  142. if (eh->h_proto != htons(ETH_P_FCOE))
  143. goto drop;
  144. skb_set_network_header(skb, sizeof(*eh));
  145. skb_pull(skb, sizeof(*eh));
  146. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  147. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  148. goto drop;
  149. fp = (struct fc_frame *)skb;
  150. fc_frame_init(fp);
  151. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  152. skb_pull(skb, sizeof(struct fcoe_hdr));
  153. skb_reset_transport_header(skb);
  154. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  155. fr_eof(fp) = ft->fcoe_eof;
  156. skb_trim(skb, skb->len - sizeof(*ft));
  157. return 0;
  158. drop:
  159. dev_kfree_skb_irq(skb);
  160. return -1;
  161. }
  162. /**
  163. * fnic_update_mac_locked() - set data MAC address and filters.
  164. * @fnic: fnic instance.
  165. * @new: newly-assigned FCoE MAC address.
  166. *
  167. * Called with the fnic lock held.
  168. */
  169. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  170. {
  171. u8 *ctl = fnic->ctlr.ctl_src_addr;
  172. u8 *data = fnic->data_src_addr;
  173. if (is_zero_ether_addr(new))
  174. new = ctl;
  175. if (!compare_ether_addr(data, new))
  176. return;
  177. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  178. if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
  179. vnic_dev_del_addr(fnic->vdev, data);
  180. memcpy(data, new, ETH_ALEN);
  181. if (compare_ether_addr(new, ctl))
  182. vnic_dev_add_addr(fnic->vdev, new);
  183. }
  184. /**
  185. * fnic_update_mac() - set data MAC address and filters.
  186. * @lport: local port.
  187. * @new: newly-assigned FCoE MAC address.
  188. */
  189. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  190. {
  191. struct fnic *fnic = lport_priv(lport);
  192. spin_lock_irq(&fnic->fnic_lock);
  193. fnic_update_mac_locked(fnic, new);
  194. spin_unlock_irq(&fnic->fnic_lock);
  195. }
  196. /**
  197. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  198. * @lport: local port.
  199. * @port_id: assigned FC_ID.
  200. * @fp: received frame containing the FLOGI accept or NULL.
  201. *
  202. * This is called from libfc when a new FC_ID has been assigned.
  203. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  204. * address and FC_ID.
  205. *
  206. * It is also called with FC_ID 0 when we're logged off.
  207. *
  208. * If the FC_ID is due to point-to-point, fp may be NULL.
  209. */
  210. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  211. {
  212. struct fnic *fnic = lport_priv(lport);
  213. u8 *mac;
  214. int ret;
  215. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  216. port_id, fp);
  217. /*
  218. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  219. * Set ethernet mode to send FLOGI.
  220. */
  221. if (!port_id) {
  222. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  223. fnic_set_eth_mode(fnic);
  224. return;
  225. }
  226. if (fp) {
  227. mac = fr_cb(fp)->granted_mac;
  228. if (is_zero_ether_addr(mac)) {
  229. /* non-FIP - FLOGI already accepted - ignore return */
  230. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  231. }
  232. fnic_update_mac(lport, mac);
  233. }
  234. /* Change state to reflect transition to FC mode */
  235. spin_lock_irq(&fnic->fnic_lock);
  236. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  237. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  238. else {
  239. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  240. "Unexpected fnic state %s while"
  241. " processing flogi resp\n",
  242. fnic_state_to_str(fnic->state));
  243. spin_unlock_irq(&fnic->fnic_lock);
  244. return;
  245. }
  246. spin_unlock_irq(&fnic->fnic_lock);
  247. /*
  248. * Send FLOGI registration to firmware to set up FC mode.
  249. * The new address will be set up when registration completes.
  250. */
  251. ret = fnic_flogi_reg_handler(fnic, port_id);
  252. if (ret < 0) {
  253. spin_lock_irq(&fnic->fnic_lock);
  254. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  255. fnic->state = FNIC_IN_ETH_MODE;
  256. spin_unlock_irq(&fnic->fnic_lock);
  257. }
  258. }
  259. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  260. *cq_desc, struct vnic_rq_buf *buf,
  261. int skipped __attribute__((unused)),
  262. void *opaque)
  263. {
  264. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  265. struct sk_buff *skb;
  266. struct fc_frame *fp;
  267. unsigned int eth_hdrs_stripped;
  268. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  269. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  270. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  271. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  272. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  273. u8 fcs_ok = 1, packet_error = 0;
  274. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  275. u32 rss_hash;
  276. u16 exchange_id, tmpl;
  277. u8 sof = 0;
  278. u8 eof = 0;
  279. u32 fcp_bytes_written = 0;
  280. unsigned long flags;
  281. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  282. PCI_DMA_FROMDEVICE);
  283. skb = buf->os_buf;
  284. fp = (struct fc_frame *)skb;
  285. buf->os_buf = NULL;
  286. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  287. if (type == CQ_DESC_TYPE_RQ_FCP) {
  288. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  289. &type, &color, &q_number, &completed_index,
  290. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  291. &tmpl, &fcp_bytes_written, &sof, &eof,
  292. &ingress_port, &packet_error,
  293. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  294. &vlan);
  295. eth_hdrs_stripped = 1;
  296. skb_trim(skb, fcp_bytes_written);
  297. fr_sof(fp) = sof;
  298. fr_eof(fp) = eof;
  299. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  300. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  301. &type, &color, &q_number, &completed_index,
  302. &ingress_port, &fcoe, &eop, &sop,
  303. &rss_type, &csum_not_calc, &rss_hash,
  304. &bytes_written, &packet_error,
  305. &vlan_stripped, &vlan, &checksum,
  306. &fcoe_sof, &fcoe_fc_crc_ok,
  307. &fcoe_enc_error, &fcoe_eof,
  308. &tcp_udp_csum_ok, &udp, &tcp,
  309. &ipv4_csum_ok, &ipv6, &ipv4,
  310. &ipv4_fragment, &fcs_ok);
  311. eth_hdrs_stripped = 0;
  312. skb_trim(skb, bytes_written);
  313. if (!fcs_ok) {
  314. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  315. "fcs error. dropping packet.\n");
  316. goto drop;
  317. }
  318. if (fnic_import_rq_eth_pkt(fnic, skb))
  319. return;
  320. } else {
  321. /* wrong CQ type*/
  322. shost_printk(KERN_ERR, fnic->lport->host,
  323. "fnic rq_cmpl wrong cq type x%x\n", type);
  324. goto drop;
  325. }
  326. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  327. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  328. "fnic rq_cmpl fcoe x%x fcsok x%x"
  329. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  330. " x%x\n",
  331. fcoe, fcs_ok, packet_error,
  332. fcoe_fc_crc_ok, fcoe_enc_error);
  333. goto drop;
  334. }
  335. spin_lock_irqsave(&fnic->fnic_lock, flags);
  336. if (fnic->stop_rx_link_events) {
  337. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  338. goto drop;
  339. }
  340. fr_dev(fp) = fnic->lport;
  341. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  342. skb_queue_tail(&fnic->frame_queue, skb);
  343. queue_work(fnic_event_queue, &fnic->frame_work);
  344. return;
  345. drop:
  346. dev_kfree_skb_irq(skb);
  347. }
  348. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  349. struct cq_desc *cq_desc, u8 type,
  350. u16 q_number, u16 completed_index,
  351. void *opaque)
  352. {
  353. struct fnic *fnic = vnic_dev_priv(vdev);
  354. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  355. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  356. NULL);
  357. return 0;
  358. }
  359. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  360. {
  361. unsigned int tot_rq_work_done = 0, cur_work_done;
  362. unsigned int i;
  363. int err;
  364. for (i = 0; i < fnic->rq_count; i++) {
  365. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  366. fnic_rq_cmpl_handler_cont,
  367. NULL);
  368. if (cur_work_done) {
  369. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  370. if (err)
  371. shost_printk(KERN_ERR, fnic->lport->host,
  372. "fnic_alloc_rq_frame can't alloc"
  373. " frame\n");
  374. }
  375. tot_rq_work_done += cur_work_done;
  376. }
  377. return tot_rq_work_done;
  378. }
  379. /*
  380. * This function is called once at init time to allocate and fill RQ
  381. * buffers. Subsequently, it is called in the interrupt context after RQ
  382. * buffer processing to replenish the buffers in the RQ
  383. */
  384. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  385. {
  386. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  387. struct sk_buff *skb;
  388. u16 len;
  389. dma_addr_t pa;
  390. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  391. skb = dev_alloc_skb(len);
  392. if (!skb) {
  393. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  394. "Unable to allocate RQ sk_buff\n");
  395. return -ENOMEM;
  396. }
  397. skb_reset_mac_header(skb);
  398. skb_reset_transport_header(skb);
  399. skb_reset_network_header(skb);
  400. skb_put(skb, len);
  401. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  402. fnic_queue_rq_desc(rq, skb, pa, len);
  403. return 0;
  404. }
  405. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  406. {
  407. struct fc_frame *fp = buf->os_buf;
  408. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  409. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  410. PCI_DMA_FROMDEVICE);
  411. dev_kfree_skb(fp_skb(fp));
  412. buf->os_buf = NULL;
  413. }
  414. /**
  415. * fnic_eth_send() - Send Ethernet frame.
  416. * @fip: fcoe_ctlr instance.
  417. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  418. */
  419. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  420. {
  421. struct fnic *fnic = fnic_from_ctlr(fip);
  422. struct vnic_wq *wq = &fnic->wq[0];
  423. dma_addr_t pa;
  424. struct ethhdr *eth_hdr;
  425. struct vlan_ethhdr *vlan_hdr;
  426. unsigned long flags;
  427. if (!fnic->vlan_hw_insert) {
  428. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  429. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  430. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  431. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  432. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  433. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  434. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  435. }
  436. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  437. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  438. if (!vnic_wq_desc_avail(wq)) {
  439. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  440. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  441. kfree_skb(skb);
  442. return;
  443. }
  444. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  445. fnic->vlan_hw_insert, fnic->vlan_id, 1);
  446. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  447. }
  448. /*
  449. * Send FC frame.
  450. */
  451. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  452. {
  453. struct vnic_wq *wq = &fnic->wq[0];
  454. struct sk_buff *skb;
  455. dma_addr_t pa;
  456. struct ethhdr *eth_hdr;
  457. struct vlan_ethhdr *vlan_hdr;
  458. struct fcoe_hdr *fcoe_hdr;
  459. struct fc_frame_header *fh;
  460. u32 tot_len, eth_hdr_len;
  461. int ret = 0;
  462. unsigned long flags;
  463. fh = fc_frame_header_get(fp);
  464. skb = fp_skb(fp);
  465. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  466. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  467. return 0;
  468. if (!fnic->vlan_hw_insert) {
  469. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  470. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  471. eth_hdr = (struct ethhdr *)vlan_hdr;
  472. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  473. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  474. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  475. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  476. } else {
  477. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  478. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  479. eth_hdr->h_proto = htons(ETH_P_FCOE);
  480. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  481. }
  482. if (fnic->ctlr.map_dest)
  483. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  484. else
  485. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  486. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  487. tot_len = skb->len;
  488. BUG_ON(tot_len % 4);
  489. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  490. fcoe_hdr->fcoe_sof = fr_sof(fp);
  491. if (FC_FCOE_VER)
  492. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  493. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  494. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  495. if (!vnic_wq_desc_avail(wq)) {
  496. pci_unmap_single(fnic->pdev, pa,
  497. tot_len, PCI_DMA_TODEVICE);
  498. ret = -1;
  499. goto fnic_send_frame_end;
  500. }
  501. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  502. fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
  503. fnic_send_frame_end:
  504. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  505. if (ret)
  506. dev_kfree_skb_any(fp_skb(fp));
  507. return ret;
  508. }
  509. /*
  510. * fnic_send
  511. * Routine to send a raw frame
  512. */
  513. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  514. {
  515. struct fnic *fnic = lport_priv(lp);
  516. unsigned long flags;
  517. if (fnic->in_remove) {
  518. dev_kfree_skb(fp_skb(fp));
  519. return -1;
  520. }
  521. /*
  522. * Queue frame if in a transitional state.
  523. * This occurs while registering the Port_ID / MAC address after FLOGI.
  524. */
  525. spin_lock_irqsave(&fnic->fnic_lock, flags);
  526. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  527. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  528. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  529. return 0;
  530. }
  531. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  532. return fnic_send_frame(fnic, fp);
  533. }
  534. /**
  535. * fnic_flush_tx() - send queued frames.
  536. * @fnic: fnic device
  537. *
  538. * Send frames that were waiting to go out in FC or Ethernet mode.
  539. * Whenever changing modes we purge queued frames, so these frames should
  540. * be queued for the stable mode that we're in, either FC or Ethernet.
  541. *
  542. * Called without fnic_lock held.
  543. */
  544. void fnic_flush_tx(struct fnic *fnic)
  545. {
  546. struct sk_buff *skb;
  547. struct fc_frame *fp;
  548. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  549. fp = (struct fc_frame *)skb;
  550. fnic_send_frame(fnic, fp);
  551. }
  552. }
  553. /**
  554. * fnic_set_eth_mode() - put fnic into ethernet mode.
  555. * @fnic: fnic device
  556. *
  557. * Called without fnic lock held.
  558. */
  559. static void fnic_set_eth_mode(struct fnic *fnic)
  560. {
  561. unsigned long flags;
  562. enum fnic_state old_state;
  563. int ret;
  564. spin_lock_irqsave(&fnic->fnic_lock, flags);
  565. again:
  566. old_state = fnic->state;
  567. switch (old_state) {
  568. case FNIC_IN_FC_MODE:
  569. case FNIC_IN_ETH_TRANS_FC_MODE:
  570. default:
  571. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  572. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  573. ret = fnic_fw_reset_handler(fnic);
  574. spin_lock_irqsave(&fnic->fnic_lock, flags);
  575. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  576. goto again;
  577. if (ret)
  578. fnic->state = old_state;
  579. break;
  580. case FNIC_IN_FC_TRANS_ETH_MODE:
  581. case FNIC_IN_ETH_MODE:
  582. break;
  583. }
  584. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  585. }
  586. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  587. struct cq_desc *cq_desc,
  588. struct vnic_wq_buf *buf, void *opaque)
  589. {
  590. struct sk_buff *skb = buf->os_buf;
  591. struct fc_frame *fp = (struct fc_frame *)skb;
  592. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  593. pci_unmap_single(fnic->pdev, buf->dma_addr,
  594. buf->len, PCI_DMA_TODEVICE);
  595. dev_kfree_skb_irq(fp_skb(fp));
  596. buf->os_buf = NULL;
  597. }
  598. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  599. struct cq_desc *cq_desc, u8 type,
  600. u16 q_number, u16 completed_index,
  601. void *opaque)
  602. {
  603. struct fnic *fnic = vnic_dev_priv(vdev);
  604. unsigned long flags;
  605. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  606. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  607. fnic_wq_complete_frame_send, NULL);
  608. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  609. return 0;
  610. }
  611. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  612. {
  613. unsigned int wq_work_done = 0;
  614. unsigned int i;
  615. for (i = 0; i < fnic->raw_wq_count; i++) {
  616. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  617. work_to_do,
  618. fnic_wq_cmpl_handler_cont,
  619. NULL);
  620. }
  621. return wq_work_done;
  622. }
  623. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  624. {
  625. struct fc_frame *fp = buf->os_buf;
  626. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  627. pci_unmap_single(fnic->pdev, buf->dma_addr,
  628. buf->len, PCI_DMA_TODEVICE);
  629. dev_kfree_skb(fp_skb(fp));
  630. buf->os_buf = NULL;
  631. }