fnic_fcs.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. if (old_link_status == fnic->link_status) {
  62. if (!fnic->link_status) {
  63. /* DOWN -> DOWN */
  64. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  65. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  66. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  67. strlen("Link Status: DOWN->DOWN"));
  68. } else {
  69. if (old_link_down_cnt != fnic->link_down_cnt) {
  70. /* UP -> DOWN -> UP */
  71. fnic->lport->host_stats.link_failure_count++;
  72. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  73. fnic_fc_trace_set_data(
  74. fnic->lport->host->host_no,
  75. FNIC_FC_LE,
  76. "Link Status:UP_DOWN_UP",
  77. strlen("Link_Status:UP_DOWN_UP")
  78. );
  79. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  80. "link down\n");
  81. fcoe_ctlr_link_down(&fnic->ctlr);
  82. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  83. /* start FCoE VLAN discovery */
  84. fnic_fc_trace_set_data(
  85. fnic->lport->host->host_no,
  86. FNIC_FC_LE,
  87. "Link Status: UP_DOWN_UP_VLAN",
  88. strlen(
  89. "Link Status: UP_DOWN_UP_VLAN")
  90. );
  91. fnic_fcoe_send_vlan_req(fnic);
  92. return;
  93. }
  94. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  95. "link up\n");
  96. fcoe_ctlr_link_up(&fnic->ctlr);
  97. } else {
  98. /* UP -> UP */
  99. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  100. fnic_fc_trace_set_data(
  101. fnic->lport->host->host_no, FNIC_FC_LE,
  102. "Link Status: UP_UP",
  103. strlen("Link Status: UP_UP"));
  104. }
  105. }
  106. } else if (fnic->link_status) {
  107. /* DOWN -> UP */
  108. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  109. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  110. /* start FCoE VLAN discovery */
  111. fnic_fc_trace_set_data(
  112. fnic->lport->host->host_no,
  113. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  114. strlen("Link Status: DOWN_UP_VLAN"));
  115. fnic_fcoe_send_vlan_req(fnic);
  116. return;
  117. }
  118. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  119. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  120. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  121. fcoe_ctlr_link_up(&fnic->ctlr);
  122. } else {
  123. /* UP -> DOWN */
  124. fnic->lport->host_stats.link_failure_count++;
  125. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  126. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  127. fnic_fc_trace_set_data(
  128. fnic->lport->host->host_no, FNIC_FC_LE,
  129. "Link Status: UP_DOWN",
  130. strlen("Link Status: UP_DOWN"));
  131. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  132. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  133. "deleting fip-timer during link-down\n");
  134. del_timer_sync(&fnic->fip_timer);
  135. }
  136. fcoe_ctlr_link_down(&fnic->ctlr);
  137. }
  138. }
  139. /*
  140. * This function passes incoming fabric frames to libFC
  141. */
  142. void fnic_handle_frame(struct work_struct *work)
  143. {
  144. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  145. struct fc_lport *lp = fnic->lport;
  146. unsigned long flags;
  147. struct sk_buff *skb;
  148. struct fc_frame *fp;
  149. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  150. spin_lock_irqsave(&fnic->fnic_lock, flags);
  151. if (fnic->stop_rx_link_events) {
  152. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  153. dev_kfree_skb(skb);
  154. return;
  155. }
  156. fp = (struct fc_frame *)skb;
  157. /*
  158. * If we're in a transitional state, just re-queue and return.
  159. * The queue will be serviced when we get to a stable state.
  160. */
  161. if (fnic->state != FNIC_IN_FC_MODE &&
  162. fnic->state != FNIC_IN_ETH_MODE) {
  163. skb_queue_head(&fnic->frame_queue, skb);
  164. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  165. return;
  166. }
  167. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  168. fc_exch_recv(lp, fp);
  169. }
  170. }
  171. void fnic_fcoe_evlist_free(struct fnic *fnic)
  172. {
  173. struct fnic_event *fevt = NULL;
  174. struct fnic_event *next = NULL;
  175. unsigned long flags;
  176. spin_lock_irqsave(&fnic->fnic_lock, flags);
  177. if (list_empty(&fnic->evlist)) {
  178. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  179. return;
  180. }
  181. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  182. list_del(&fevt->list);
  183. kfree(fevt);
  184. }
  185. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  186. }
  187. void fnic_handle_event(struct work_struct *work)
  188. {
  189. struct fnic *fnic = container_of(work, struct fnic, event_work);
  190. struct fnic_event *fevt = NULL;
  191. struct fnic_event *next = NULL;
  192. unsigned long flags;
  193. spin_lock_irqsave(&fnic->fnic_lock, flags);
  194. if (list_empty(&fnic->evlist)) {
  195. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  196. return;
  197. }
  198. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  199. if (fnic->stop_rx_link_events) {
  200. list_del(&fevt->list);
  201. kfree(fevt);
  202. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  203. return;
  204. }
  205. /*
  206. * If we're in a transitional state, just re-queue and return.
  207. * The queue will be serviced when we get to a stable state.
  208. */
  209. if (fnic->state != FNIC_IN_FC_MODE &&
  210. fnic->state != FNIC_IN_ETH_MODE) {
  211. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  212. return;
  213. }
  214. list_del(&fevt->list);
  215. switch (fevt->event) {
  216. case FNIC_EVT_START_VLAN_DISC:
  217. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  218. fnic_fcoe_send_vlan_req(fnic);
  219. spin_lock_irqsave(&fnic->fnic_lock, flags);
  220. break;
  221. case FNIC_EVT_START_FCF_DISC:
  222. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  223. "Start FCF Discovery\n");
  224. fnic_fcoe_start_fcf_disc(fnic);
  225. break;
  226. default:
  227. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  228. "Unknown event 0x%x\n", fevt->event);
  229. break;
  230. }
  231. kfree(fevt);
  232. }
  233. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  234. }
  235. /**
  236. * Check if the Received FIP FLOGI frame is rejected
  237. * @fip: The FCoE controller that received the frame
  238. * @skb: The received FIP frame
  239. *
  240. * Returns non-zero if the frame is rejected with unsupported cmd with
  241. * insufficient resource els explanation.
  242. */
  243. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  244. struct sk_buff *skb)
  245. {
  246. struct fc_lport *lport = fip->lp;
  247. struct fip_header *fiph;
  248. struct fc_frame_header *fh = NULL;
  249. struct fip_desc *desc;
  250. struct fip_encaps *els;
  251. enum fip_desc_type els_dtype = 0;
  252. u16 op;
  253. u8 els_op;
  254. u8 sub;
  255. size_t els_len = 0;
  256. size_t rlen;
  257. size_t dlen = 0;
  258. if (skb_linearize(skb))
  259. return 0;
  260. if (skb->len < sizeof(*fiph))
  261. return 0;
  262. fiph = (struct fip_header *)skb->data;
  263. op = ntohs(fiph->fip_op);
  264. sub = fiph->fip_subcode;
  265. if (op != FIP_OP_LS)
  266. return 0;
  267. if (sub != FIP_SC_REP)
  268. return 0;
  269. rlen = ntohs(fiph->fip_dl_len) * 4;
  270. if (rlen + sizeof(*fiph) > skb->len)
  271. return 0;
  272. desc = (struct fip_desc *)(fiph + 1);
  273. dlen = desc->fip_dlen * FIP_BPW;
  274. if (desc->fip_dtype == FIP_DT_FLOGI) {
  275. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  276. return 0;
  277. els_len = dlen - sizeof(*els);
  278. els = (struct fip_encaps *)desc;
  279. fh = (struct fc_frame_header *)(els + 1);
  280. els_dtype = desc->fip_dtype;
  281. if (!fh)
  282. return 0;
  283. /*
  284. * ELS command code, reason and explanation should be = Reject,
  285. * unsupported command and insufficient resource
  286. */
  287. els_op = *(u8 *)(fh + 1);
  288. if (els_op == ELS_LS_RJT) {
  289. shost_printk(KERN_INFO, lport->host,
  290. "Flogi Request Rejected by Switch\n");
  291. return 1;
  292. }
  293. shost_printk(KERN_INFO, lport->host,
  294. "Flogi Request Accepted by Switch\n");
  295. }
  296. return 0;
  297. }
  298. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  299. {
  300. struct fcoe_ctlr *fip = &fnic->ctlr;
  301. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  302. struct sk_buff *skb;
  303. char *eth_fr;
  304. int fr_len;
  305. struct fip_vlan *vlan;
  306. u64 vlan_tov;
  307. fnic_fcoe_reset_vlans(fnic);
  308. fnic->set_vlan(fnic, 0);
  309. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  310. "Sending VLAN request...\n");
  311. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  312. if (!skb)
  313. return;
  314. fr_len = sizeof(*vlan);
  315. eth_fr = (char *)skb->data;
  316. vlan = (struct fip_vlan *)eth_fr;
  317. memset(vlan, 0, sizeof(*vlan));
  318. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  319. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  320. vlan->eth.h_proto = htons(ETH_P_FIP);
  321. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  322. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  323. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  324. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  325. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  326. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  327. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  328. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  329. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  330. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  331. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  332. skb_put(skb, sizeof(*vlan));
  333. skb->protocol = htons(ETH_P_FIP);
  334. skb_reset_mac_header(skb);
  335. skb_reset_network_header(skb);
  336. fip->send(fip, skb);
  337. /* set a timer so that we can retry if there no response */
  338. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  339. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  340. }
  341. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  342. {
  343. struct fcoe_ctlr *fip = &fnic->ctlr;
  344. struct fip_header *fiph;
  345. struct fip_desc *desc;
  346. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  347. u16 vid;
  348. size_t rlen;
  349. size_t dlen;
  350. struct fcoe_vlan *vlan;
  351. u64 sol_time;
  352. unsigned long flags;
  353. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  354. "Received VLAN response...\n");
  355. fiph = (struct fip_header *) skb->data;
  356. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  357. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  358. ntohs(fiph->fip_op), fiph->fip_subcode);
  359. rlen = ntohs(fiph->fip_dl_len) * 4;
  360. fnic_fcoe_reset_vlans(fnic);
  361. spin_lock_irqsave(&fnic->vlans_lock, flags);
  362. desc = (struct fip_desc *)(fiph + 1);
  363. while (rlen > 0) {
  364. dlen = desc->fip_dlen * FIP_BPW;
  365. switch (desc->fip_dtype) {
  366. case FIP_DT_VLAN:
  367. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  368. shost_printk(KERN_INFO, fnic->lport->host,
  369. "process_vlan_resp: FIP VLAN %d\n", vid);
  370. vlan = kmalloc(sizeof(*vlan),
  371. GFP_ATOMIC);
  372. if (!vlan) {
  373. /* retry from timer */
  374. spin_unlock_irqrestore(&fnic->vlans_lock,
  375. flags);
  376. goto out;
  377. }
  378. memset(vlan, 0, sizeof(struct fcoe_vlan));
  379. vlan->vid = vid & 0x0fff;
  380. vlan->state = FIP_VLAN_AVAIL;
  381. list_add_tail(&vlan->list, &fnic->vlans);
  382. break;
  383. }
  384. desc = (struct fip_desc *)((char *)desc + dlen);
  385. rlen -= dlen;
  386. }
  387. /* any VLAN descriptors present ? */
  388. if (list_empty(&fnic->vlans)) {
  389. /* retry from timer */
  390. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  391. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  392. "No VLAN descriptors in FIP VLAN response\n");
  393. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  394. goto out;
  395. }
  396. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  397. fnic->set_vlan(fnic, vlan->vid);
  398. vlan->state = FIP_VLAN_SENT; /* sent now */
  399. vlan->sol_count++;
  400. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  401. /* start the solicitation */
  402. fcoe_ctlr_link_up(fip);
  403. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  404. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  405. out:
  406. return;
  407. }
  408. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  409. {
  410. unsigned long flags;
  411. struct fcoe_vlan *vlan;
  412. u64 sol_time;
  413. spin_lock_irqsave(&fnic->vlans_lock, flags);
  414. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  415. fnic->set_vlan(fnic, vlan->vid);
  416. vlan->state = FIP_VLAN_SENT; /* sent now */
  417. vlan->sol_count = 1;
  418. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  419. /* start the solicitation */
  420. fcoe_ctlr_link_up(&fnic->ctlr);
  421. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  422. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  423. }
  424. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  425. {
  426. unsigned long flags;
  427. struct fcoe_vlan *fvlan;
  428. spin_lock_irqsave(&fnic->vlans_lock, flags);
  429. if (list_empty(&fnic->vlans)) {
  430. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  431. return -EINVAL;
  432. }
  433. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  434. if (fvlan->state == FIP_VLAN_USED) {
  435. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  436. return 0;
  437. }
  438. if (fvlan->state == FIP_VLAN_SENT) {
  439. fvlan->state = FIP_VLAN_USED;
  440. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  441. return 0;
  442. }
  443. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  444. return -EINVAL;
  445. }
  446. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  447. {
  448. struct fnic_event *fevt;
  449. unsigned long flags;
  450. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  451. if (!fevt)
  452. return;
  453. fevt->fnic = fnic;
  454. fevt->event = ev;
  455. spin_lock_irqsave(&fnic->fnic_lock, flags);
  456. list_add_tail(&fevt->list, &fnic->evlist);
  457. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  458. schedule_work(&fnic->event_work);
  459. }
  460. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  461. {
  462. struct fip_header *fiph;
  463. int ret = 1;
  464. u16 op;
  465. u8 sub;
  466. if (!skb || !(skb->data))
  467. return -1;
  468. if (skb_linearize(skb))
  469. goto drop;
  470. fiph = (struct fip_header *)skb->data;
  471. op = ntohs(fiph->fip_op);
  472. sub = fiph->fip_subcode;
  473. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  474. goto drop;
  475. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  476. goto drop;
  477. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  478. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  479. goto drop;
  480. /* pass it on to fcoe */
  481. ret = 1;
  482. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
  483. /* set the vlan as used */
  484. fnic_fcoe_process_vlan_resp(fnic, skb);
  485. ret = 0;
  486. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  487. /* received CVL request, restart vlan disc */
  488. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  489. /* pass it on to fcoe */
  490. ret = 1;
  491. }
  492. drop:
  493. return ret;
  494. }
  495. void fnic_handle_fip_frame(struct work_struct *work)
  496. {
  497. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  498. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  499. unsigned long flags;
  500. struct sk_buff *skb;
  501. struct ethhdr *eh;
  502. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  503. spin_lock_irqsave(&fnic->fnic_lock, flags);
  504. if (fnic->stop_rx_link_events) {
  505. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  506. dev_kfree_skb(skb);
  507. return;
  508. }
  509. /*
  510. * If we're in a transitional state, just re-queue and return.
  511. * The queue will be serviced when we get to a stable state.
  512. */
  513. if (fnic->state != FNIC_IN_FC_MODE &&
  514. fnic->state != FNIC_IN_ETH_MODE) {
  515. skb_queue_head(&fnic->fip_frame_queue, skb);
  516. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  517. return;
  518. }
  519. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  520. eh = (struct ethhdr *)skb->data;
  521. if (eh->h_proto == htons(ETH_P_FIP)) {
  522. skb_pull(skb, sizeof(*eh));
  523. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  524. dev_kfree_skb(skb);
  525. continue;
  526. }
  527. /*
  528. * If there's FLOGI rejects - clear all
  529. * fcf's & restart from scratch
  530. */
  531. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  532. atomic64_inc(
  533. &fnic_stats->vlan_stats.flogi_rejects);
  534. shost_printk(KERN_INFO, fnic->lport->host,
  535. "Trigger a Link down - VLAN Disc\n");
  536. fcoe_ctlr_link_down(&fnic->ctlr);
  537. /* start FCoE VLAN discovery */
  538. fnic_fcoe_send_vlan_req(fnic);
  539. dev_kfree_skb(skb);
  540. continue;
  541. }
  542. fcoe_ctlr_recv(&fnic->ctlr, skb);
  543. continue;
  544. }
  545. }
  546. }
  547. /**
  548. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  549. * @fnic: fnic instance.
  550. * @skb: Ethernet Frame.
  551. */
  552. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  553. {
  554. struct fc_frame *fp;
  555. struct ethhdr *eh;
  556. struct fcoe_hdr *fcoe_hdr;
  557. struct fcoe_crc_eof *ft;
  558. /*
  559. * Undo VLAN encapsulation if present.
  560. */
  561. eh = (struct ethhdr *)skb->data;
  562. if (eh->h_proto == htons(ETH_P_8021Q)) {
  563. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  564. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  565. skb_reset_mac_header(skb);
  566. }
  567. if (eh->h_proto == htons(ETH_P_FIP)) {
  568. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  569. printk(KERN_ERR "Dropped FIP frame, as firmware "
  570. "uses non-FIP mode, Enable FIP "
  571. "using UCSM\n");
  572. goto drop;
  573. }
  574. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  575. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  576. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  577. }
  578. skb_queue_tail(&fnic->fip_frame_queue, skb);
  579. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  580. return 1; /* let caller know packet was used */
  581. }
  582. if (eh->h_proto != htons(ETH_P_FCOE))
  583. goto drop;
  584. skb_set_network_header(skb, sizeof(*eh));
  585. skb_pull(skb, sizeof(*eh));
  586. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  587. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  588. goto drop;
  589. fp = (struct fc_frame *)skb;
  590. fc_frame_init(fp);
  591. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  592. skb_pull(skb, sizeof(struct fcoe_hdr));
  593. skb_reset_transport_header(skb);
  594. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  595. fr_eof(fp) = ft->fcoe_eof;
  596. skb_trim(skb, skb->len - sizeof(*ft));
  597. return 0;
  598. drop:
  599. dev_kfree_skb_irq(skb);
  600. return -1;
  601. }
  602. /**
  603. * fnic_update_mac_locked() - set data MAC address and filters.
  604. * @fnic: fnic instance.
  605. * @new: newly-assigned FCoE MAC address.
  606. *
  607. * Called with the fnic lock held.
  608. */
  609. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  610. {
  611. u8 *ctl = fnic->ctlr.ctl_src_addr;
  612. u8 *data = fnic->data_src_addr;
  613. if (is_zero_ether_addr(new))
  614. new = ctl;
  615. if (ether_addr_equal(data, new))
  616. return;
  617. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  618. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  619. vnic_dev_del_addr(fnic->vdev, data);
  620. memcpy(data, new, ETH_ALEN);
  621. if (!ether_addr_equal(new, ctl))
  622. vnic_dev_add_addr(fnic->vdev, new);
  623. }
  624. /**
  625. * fnic_update_mac() - set data MAC address and filters.
  626. * @lport: local port.
  627. * @new: newly-assigned FCoE MAC address.
  628. */
  629. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  630. {
  631. struct fnic *fnic = lport_priv(lport);
  632. spin_lock_irq(&fnic->fnic_lock);
  633. fnic_update_mac_locked(fnic, new);
  634. spin_unlock_irq(&fnic->fnic_lock);
  635. }
  636. /**
  637. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  638. * @lport: local port.
  639. * @port_id: assigned FC_ID.
  640. * @fp: received frame containing the FLOGI accept or NULL.
  641. *
  642. * This is called from libfc when a new FC_ID has been assigned.
  643. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  644. * address and FC_ID.
  645. *
  646. * It is also called with FC_ID 0 when we're logged off.
  647. *
  648. * If the FC_ID is due to point-to-point, fp may be NULL.
  649. */
  650. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  651. {
  652. struct fnic *fnic = lport_priv(lport);
  653. u8 *mac;
  654. int ret;
  655. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  656. port_id, fp);
  657. /*
  658. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  659. * Set ethernet mode to send FLOGI.
  660. */
  661. if (!port_id) {
  662. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  663. fnic_set_eth_mode(fnic);
  664. return;
  665. }
  666. if (fp) {
  667. mac = fr_cb(fp)->granted_mac;
  668. if (is_zero_ether_addr(mac)) {
  669. /* non-FIP - FLOGI already accepted - ignore return */
  670. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  671. }
  672. fnic_update_mac(lport, mac);
  673. }
  674. /* Change state to reflect transition to FC mode */
  675. spin_lock_irq(&fnic->fnic_lock);
  676. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  677. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  678. else {
  679. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  680. "Unexpected fnic state %s while"
  681. " processing flogi resp\n",
  682. fnic_state_to_str(fnic->state));
  683. spin_unlock_irq(&fnic->fnic_lock);
  684. return;
  685. }
  686. spin_unlock_irq(&fnic->fnic_lock);
  687. /*
  688. * Send FLOGI registration to firmware to set up FC mode.
  689. * The new address will be set up when registration completes.
  690. */
  691. ret = fnic_flogi_reg_handler(fnic, port_id);
  692. if (ret < 0) {
  693. spin_lock_irq(&fnic->fnic_lock);
  694. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  695. fnic->state = FNIC_IN_ETH_MODE;
  696. spin_unlock_irq(&fnic->fnic_lock);
  697. }
  698. }
  699. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  700. *cq_desc, struct vnic_rq_buf *buf,
  701. int skipped __attribute__((unused)),
  702. void *opaque)
  703. {
  704. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  705. struct sk_buff *skb;
  706. struct fc_frame *fp;
  707. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  708. unsigned int eth_hdrs_stripped;
  709. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  710. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  711. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  712. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  713. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  714. u8 fcs_ok = 1, packet_error = 0;
  715. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  716. u32 rss_hash;
  717. u16 exchange_id, tmpl;
  718. u8 sof = 0;
  719. u8 eof = 0;
  720. u32 fcp_bytes_written = 0;
  721. unsigned long flags;
  722. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  723. PCI_DMA_FROMDEVICE);
  724. skb = buf->os_buf;
  725. fp = (struct fc_frame *)skb;
  726. buf->os_buf = NULL;
  727. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  728. if (type == CQ_DESC_TYPE_RQ_FCP) {
  729. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  730. &type, &color, &q_number, &completed_index,
  731. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  732. &tmpl, &fcp_bytes_written, &sof, &eof,
  733. &ingress_port, &packet_error,
  734. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  735. &vlan);
  736. eth_hdrs_stripped = 1;
  737. skb_trim(skb, fcp_bytes_written);
  738. fr_sof(fp) = sof;
  739. fr_eof(fp) = eof;
  740. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  741. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  742. &type, &color, &q_number, &completed_index,
  743. &ingress_port, &fcoe, &eop, &sop,
  744. &rss_type, &csum_not_calc, &rss_hash,
  745. &bytes_written, &packet_error,
  746. &vlan_stripped, &vlan, &checksum,
  747. &fcoe_sof, &fcoe_fc_crc_ok,
  748. &fcoe_enc_error, &fcoe_eof,
  749. &tcp_udp_csum_ok, &udp, &tcp,
  750. &ipv4_csum_ok, &ipv6, &ipv4,
  751. &ipv4_fragment, &fcs_ok);
  752. eth_hdrs_stripped = 0;
  753. skb_trim(skb, bytes_written);
  754. if (!fcs_ok) {
  755. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  756. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  757. "fcs error. dropping packet.\n");
  758. goto drop;
  759. }
  760. if (fnic_import_rq_eth_pkt(fnic, skb))
  761. return;
  762. } else {
  763. /* wrong CQ type*/
  764. shost_printk(KERN_ERR, fnic->lport->host,
  765. "fnic rq_cmpl wrong cq type x%x\n", type);
  766. goto drop;
  767. }
  768. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  769. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  770. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  771. "fnic rq_cmpl fcoe x%x fcsok x%x"
  772. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  773. " x%x\n",
  774. fcoe, fcs_ok, packet_error,
  775. fcoe_fc_crc_ok, fcoe_enc_error);
  776. goto drop;
  777. }
  778. spin_lock_irqsave(&fnic->fnic_lock, flags);
  779. if (fnic->stop_rx_link_events) {
  780. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  781. goto drop;
  782. }
  783. fr_dev(fp) = fnic->lport;
  784. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  785. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  786. (char *)skb->data, skb->len)) != 0) {
  787. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  788. }
  789. skb_queue_tail(&fnic->frame_queue, skb);
  790. queue_work(fnic_event_queue, &fnic->frame_work);
  791. return;
  792. drop:
  793. dev_kfree_skb_irq(skb);
  794. }
  795. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  796. struct cq_desc *cq_desc, u8 type,
  797. u16 q_number, u16 completed_index,
  798. void *opaque)
  799. {
  800. struct fnic *fnic = vnic_dev_priv(vdev);
  801. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  802. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  803. NULL);
  804. return 0;
  805. }
  806. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  807. {
  808. unsigned int tot_rq_work_done = 0, cur_work_done;
  809. unsigned int i;
  810. int err;
  811. for (i = 0; i < fnic->rq_count; i++) {
  812. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  813. fnic_rq_cmpl_handler_cont,
  814. NULL);
  815. if (cur_work_done) {
  816. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  817. if (err)
  818. shost_printk(KERN_ERR, fnic->lport->host,
  819. "fnic_alloc_rq_frame can't alloc"
  820. " frame\n");
  821. }
  822. tot_rq_work_done += cur_work_done;
  823. }
  824. return tot_rq_work_done;
  825. }
  826. /*
  827. * This function is called once at init time to allocate and fill RQ
  828. * buffers. Subsequently, it is called in the interrupt context after RQ
  829. * buffer processing to replenish the buffers in the RQ
  830. */
  831. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  832. {
  833. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  834. struct sk_buff *skb;
  835. u16 len;
  836. dma_addr_t pa;
  837. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  838. skb = dev_alloc_skb(len);
  839. if (!skb) {
  840. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  841. "Unable to allocate RQ sk_buff\n");
  842. return -ENOMEM;
  843. }
  844. skb_reset_mac_header(skb);
  845. skb_reset_transport_header(skb);
  846. skb_reset_network_header(skb);
  847. skb_put(skb, len);
  848. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  849. fnic_queue_rq_desc(rq, skb, pa, len);
  850. return 0;
  851. }
  852. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  853. {
  854. struct fc_frame *fp = buf->os_buf;
  855. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  856. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  857. PCI_DMA_FROMDEVICE);
  858. dev_kfree_skb(fp_skb(fp));
  859. buf->os_buf = NULL;
  860. }
  861. /**
  862. * fnic_eth_send() - Send Ethernet frame.
  863. * @fip: fcoe_ctlr instance.
  864. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  865. */
  866. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  867. {
  868. struct fnic *fnic = fnic_from_ctlr(fip);
  869. struct vnic_wq *wq = &fnic->wq[0];
  870. dma_addr_t pa;
  871. struct ethhdr *eth_hdr;
  872. struct vlan_ethhdr *vlan_hdr;
  873. unsigned long flags;
  874. if (!fnic->vlan_hw_insert) {
  875. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  876. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  877. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  878. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  879. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  880. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  881. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  882. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  883. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  884. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  885. }
  886. } else {
  887. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  888. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  889. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  890. }
  891. }
  892. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  893. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  894. if (!vnic_wq_desc_avail(wq)) {
  895. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  896. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  897. kfree_skb(skb);
  898. return;
  899. }
  900. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  901. 0 /* hw inserts cos value */,
  902. fnic->vlan_id, 1);
  903. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  904. }
  905. /*
  906. * Send FC frame.
  907. */
  908. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  909. {
  910. struct vnic_wq *wq = &fnic->wq[0];
  911. struct sk_buff *skb;
  912. dma_addr_t pa;
  913. struct ethhdr *eth_hdr;
  914. struct vlan_ethhdr *vlan_hdr;
  915. struct fcoe_hdr *fcoe_hdr;
  916. struct fc_frame_header *fh;
  917. u32 tot_len, eth_hdr_len;
  918. int ret = 0;
  919. unsigned long flags;
  920. fh = fc_frame_header_get(fp);
  921. skb = fp_skb(fp);
  922. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  923. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  924. return 0;
  925. if (!fnic->vlan_hw_insert) {
  926. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  927. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  928. eth_hdr = (struct ethhdr *)vlan_hdr;
  929. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  930. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  931. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  932. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  933. } else {
  934. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  935. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  936. eth_hdr->h_proto = htons(ETH_P_FCOE);
  937. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  938. }
  939. if (fnic->ctlr.map_dest)
  940. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  941. else
  942. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  943. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  944. tot_len = skb->len;
  945. BUG_ON(tot_len % 4);
  946. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  947. fcoe_hdr->fcoe_sof = fr_sof(fp);
  948. if (FC_FCOE_VER)
  949. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  950. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  951. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  952. (char *)eth_hdr, tot_len)) != 0) {
  953. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  954. }
  955. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  956. if (!vnic_wq_desc_avail(wq)) {
  957. pci_unmap_single(fnic->pdev, pa,
  958. tot_len, PCI_DMA_TODEVICE);
  959. ret = -1;
  960. goto fnic_send_frame_end;
  961. }
  962. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  963. 0 /* hw inserts cos value */,
  964. fnic->vlan_id, 1, 1, 1);
  965. fnic_send_frame_end:
  966. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  967. if (ret)
  968. dev_kfree_skb_any(fp_skb(fp));
  969. return ret;
  970. }
  971. /*
  972. * fnic_send
  973. * Routine to send a raw frame
  974. */
  975. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  976. {
  977. struct fnic *fnic = lport_priv(lp);
  978. unsigned long flags;
  979. if (fnic->in_remove) {
  980. dev_kfree_skb(fp_skb(fp));
  981. return -1;
  982. }
  983. /*
  984. * Queue frame if in a transitional state.
  985. * This occurs while registering the Port_ID / MAC address after FLOGI.
  986. */
  987. spin_lock_irqsave(&fnic->fnic_lock, flags);
  988. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  989. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  990. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  991. return 0;
  992. }
  993. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  994. return fnic_send_frame(fnic, fp);
  995. }
  996. /**
  997. * fnic_flush_tx() - send queued frames.
  998. * @fnic: fnic device
  999. *
  1000. * Send frames that were waiting to go out in FC or Ethernet mode.
  1001. * Whenever changing modes we purge queued frames, so these frames should
  1002. * be queued for the stable mode that we're in, either FC or Ethernet.
  1003. *
  1004. * Called without fnic_lock held.
  1005. */
  1006. void fnic_flush_tx(struct fnic *fnic)
  1007. {
  1008. struct sk_buff *skb;
  1009. struct fc_frame *fp;
  1010. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1011. fp = (struct fc_frame *)skb;
  1012. fnic_send_frame(fnic, fp);
  1013. }
  1014. }
  1015. /**
  1016. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1017. * @fnic: fnic device
  1018. *
  1019. * Called without fnic lock held.
  1020. */
  1021. static void fnic_set_eth_mode(struct fnic *fnic)
  1022. {
  1023. unsigned long flags;
  1024. enum fnic_state old_state;
  1025. int ret;
  1026. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1027. again:
  1028. old_state = fnic->state;
  1029. switch (old_state) {
  1030. case FNIC_IN_FC_MODE:
  1031. case FNIC_IN_ETH_TRANS_FC_MODE:
  1032. default:
  1033. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1034. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1035. ret = fnic_fw_reset_handler(fnic);
  1036. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1037. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1038. goto again;
  1039. if (ret)
  1040. fnic->state = old_state;
  1041. break;
  1042. case FNIC_IN_FC_TRANS_ETH_MODE:
  1043. case FNIC_IN_ETH_MODE:
  1044. break;
  1045. }
  1046. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1047. }
  1048. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1049. struct cq_desc *cq_desc,
  1050. struct vnic_wq_buf *buf, void *opaque)
  1051. {
  1052. struct sk_buff *skb = buf->os_buf;
  1053. struct fc_frame *fp = (struct fc_frame *)skb;
  1054. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1055. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1056. buf->len, PCI_DMA_TODEVICE);
  1057. dev_kfree_skb_irq(fp_skb(fp));
  1058. buf->os_buf = NULL;
  1059. }
  1060. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1061. struct cq_desc *cq_desc, u8 type,
  1062. u16 q_number, u16 completed_index,
  1063. void *opaque)
  1064. {
  1065. struct fnic *fnic = vnic_dev_priv(vdev);
  1066. unsigned long flags;
  1067. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1068. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1069. fnic_wq_complete_frame_send, NULL);
  1070. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1071. return 0;
  1072. }
  1073. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1074. {
  1075. unsigned int wq_work_done = 0;
  1076. unsigned int i;
  1077. for (i = 0; i < fnic->raw_wq_count; i++) {
  1078. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1079. work_to_do,
  1080. fnic_wq_cmpl_handler_cont,
  1081. NULL);
  1082. }
  1083. return wq_work_done;
  1084. }
  1085. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1086. {
  1087. struct fc_frame *fp = buf->os_buf;
  1088. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1089. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1090. buf->len, PCI_DMA_TODEVICE);
  1091. dev_kfree_skb(fp_skb(fp));
  1092. buf->os_buf = NULL;
  1093. }
  1094. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1095. {
  1096. unsigned long flags;
  1097. struct fcoe_vlan *vlan;
  1098. struct fcoe_vlan *next;
  1099. /*
  1100. * indicate a link down to fcoe so that all fcf's are free'd
  1101. * might not be required since we did this before sending vlan
  1102. * discovery request
  1103. */
  1104. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1105. if (!list_empty(&fnic->vlans)) {
  1106. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1107. list_del(&vlan->list);
  1108. kfree(vlan);
  1109. }
  1110. }
  1111. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1112. }
  1113. void fnic_handle_fip_timer(struct fnic *fnic)
  1114. {
  1115. unsigned long flags;
  1116. struct fcoe_vlan *vlan;
  1117. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1118. u64 sol_time;
  1119. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1120. if (fnic->stop_rx_link_events) {
  1121. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1122. return;
  1123. }
  1124. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1125. if (fnic->ctlr.mode == FIP_ST_NON_FIP)
  1126. return;
  1127. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1128. if (list_empty(&fnic->vlans)) {
  1129. /* no vlans available, try again */
  1130. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1131. "Start VLAN Discovery\n");
  1132. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1133. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1134. return;
  1135. }
  1136. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1137. shost_printk(KERN_DEBUG, fnic->lport->host,
  1138. "fip_timer: vlan %d state %d sol_count %d\n",
  1139. vlan->vid, vlan->state, vlan->sol_count);
  1140. switch (vlan->state) {
  1141. case FIP_VLAN_USED:
  1142. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1143. "FIP VLAN is selected for FC transaction\n");
  1144. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1145. break;
  1146. case FIP_VLAN_FAILED:
  1147. /* if all vlans are in failed state, restart vlan disc */
  1148. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1149. "Start VLAN Discovery\n");
  1150. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1151. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1152. break;
  1153. case FIP_VLAN_SENT:
  1154. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1155. /*
  1156. * no response on this vlan, remove from the list.
  1157. * Try the next vlan
  1158. */
  1159. shost_printk(KERN_INFO, fnic->lport->host,
  1160. "Dequeue this VLAN ID %d from list\n",
  1161. vlan->vid);
  1162. list_del(&vlan->list);
  1163. kfree(vlan);
  1164. vlan = NULL;
  1165. if (list_empty(&fnic->vlans)) {
  1166. /* we exhausted all vlans, restart vlan disc */
  1167. spin_unlock_irqrestore(&fnic->vlans_lock,
  1168. flags);
  1169. shost_printk(KERN_INFO, fnic->lport->host,
  1170. "fip_timer: vlan list empty, "
  1171. "trigger vlan disc\n");
  1172. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1173. return;
  1174. }
  1175. /* check the next vlan */
  1176. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1177. list);
  1178. fnic->set_vlan(fnic, vlan->vid);
  1179. vlan->state = FIP_VLAN_SENT; /* sent now */
  1180. }
  1181. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1182. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1183. vlan->sol_count++;
  1184. sol_time = jiffies + msecs_to_jiffies
  1185. (FCOE_CTLR_START_DELAY);
  1186. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1187. break;
  1188. }
  1189. }