fnic_fcs.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. switch (vnic_dev_port_speed(fnic->vdev)) {
  62. case DCEM_PORTSPEED_10G:
  63. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
  64. fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
  65. break;
  66. case DCEM_PORTSPEED_25G:
  67. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
  68. fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
  69. break;
  70. case DCEM_PORTSPEED_40G:
  71. case DCEM_PORTSPEED_4x10G:
  72. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
  73. fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
  74. break;
  75. case DCEM_PORTSPEED_100G:
  76. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
  77. fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
  78. break;
  79. default:
  80. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
  81. fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  82. break;
  83. }
  84. if (old_link_status == fnic->link_status) {
  85. if (!fnic->link_status) {
  86. /* DOWN -> DOWN */
  87. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  88. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  89. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  90. strlen("Link Status: DOWN->DOWN"));
  91. } else {
  92. if (old_link_down_cnt != fnic->link_down_cnt) {
  93. /* UP -> DOWN -> UP */
  94. fnic->lport->host_stats.link_failure_count++;
  95. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  96. fnic_fc_trace_set_data(
  97. fnic->lport->host->host_no,
  98. FNIC_FC_LE,
  99. "Link Status:UP_DOWN_UP",
  100. strlen("Link_Status:UP_DOWN_UP")
  101. );
  102. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  103. "link down\n");
  104. fcoe_ctlr_link_down(&fnic->ctlr);
  105. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  106. /* start FCoE VLAN discovery */
  107. fnic_fc_trace_set_data(
  108. fnic->lport->host->host_no,
  109. FNIC_FC_LE,
  110. "Link Status: UP_DOWN_UP_VLAN",
  111. strlen(
  112. "Link Status: UP_DOWN_UP_VLAN")
  113. );
  114. fnic_fcoe_send_vlan_req(fnic);
  115. return;
  116. }
  117. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  118. "link up\n");
  119. fcoe_ctlr_link_up(&fnic->ctlr);
  120. } else {
  121. /* UP -> UP */
  122. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  123. fnic_fc_trace_set_data(
  124. fnic->lport->host->host_no, FNIC_FC_LE,
  125. "Link Status: UP_UP",
  126. strlen("Link Status: UP_UP"));
  127. }
  128. }
  129. } else if (fnic->link_status) {
  130. /* DOWN -> UP */
  131. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  132. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  133. /* start FCoE VLAN discovery */
  134. fnic_fc_trace_set_data(
  135. fnic->lport->host->host_no,
  136. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  137. strlen("Link Status: DOWN_UP_VLAN"));
  138. fnic_fcoe_send_vlan_req(fnic);
  139. return;
  140. }
  141. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  142. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  143. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  144. fcoe_ctlr_link_up(&fnic->ctlr);
  145. } else {
  146. /* UP -> DOWN */
  147. fnic->lport->host_stats.link_failure_count++;
  148. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  149. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  150. fnic_fc_trace_set_data(
  151. fnic->lport->host->host_no, FNIC_FC_LE,
  152. "Link Status: UP_DOWN",
  153. strlen("Link Status: UP_DOWN"));
  154. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  155. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  156. "deleting fip-timer during link-down\n");
  157. del_timer_sync(&fnic->fip_timer);
  158. }
  159. fcoe_ctlr_link_down(&fnic->ctlr);
  160. }
  161. }
  162. /*
  163. * This function passes incoming fabric frames to libFC
  164. */
  165. void fnic_handle_frame(struct work_struct *work)
  166. {
  167. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  168. struct fc_lport *lp = fnic->lport;
  169. unsigned long flags;
  170. struct sk_buff *skb;
  171. struct fc_frame *fp;
  172. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  173. spin_lock_irqsave(&fnic->fnic_lock, flags);
  174. if (fnic->stop_rx_link_events) {
  175. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  176. dev_kfree_skb(skb);
  177. return;
  178. }
  179. fp = (struct fc_frame *)skb;
  180. /*
  181. * If we're in a transitional state, just re-queue and return.
  182. * The queue will be serviced when we get to a stable state.
  183. */
  184. if (fnic->state != FNIC_IN_FC_MODE &&
  185. fnic->state != FNIC_IN_ETH_MODE) {
  186. skb_queue_head(&fnic->frame_queue, skb);
  187. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  188. return;
  189. }
  190. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  191. fc_exch_recv(lp, fp);
  192. }
  193. }
  194. void fnic_fcoe_evlist_free(struct fnic *fnic)
  195. {
  196. struct fnic_event *fevt = NULL;
  197. struct fnic_event *next = NULL;
  198. unsigned long flags;
  199. spin_lock_irqsave(&fnic->fnic_lock, flags);
  200. if (list_empty(&fnic->evlist)) {
  201. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  202. return;
  203. }
  204. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  205. list_del(&fevt->list);
  206. kfree(fevt);
  207. }
  208. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  209. }
  210. void fnic_handle_event(struct work_struct *work)
  211. {
  212. struct fnic *fnic = container_of(work, struct fnic, event_work);
  213. struct fnic_event *fevt = NULL;
  214. struct fnic_event *next = NULL;
  215. unsigned long flags;
  216. spin_lock_irqsave(&fnic->fnic_lock, flags);
  217. if (list_empty(&fnic->evlist)) {
  218. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  219. return;
  220. }
  221. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  222. if (fnic->stop_rx_link_events) {
  223. list_del(&fevt->list);
  224. kfree(fevt);
  225. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  226. return;
  227. }
  228. /*
  229. * If we're in a transitional state, just re-queue and return.
  230. * The queue will be serviced when we get to a stable state.
  231. */
  232. if (fnic->state != FNIC_IN_FC_MODE &&
  233. fnic->state != FNIC_IN_ETH_MODE) {
  234. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  235. return;
  236. }
  237. list_del(&fevt->list);
  238. switch (fevt->event) {
  239. case FNIC_EVT_START_VLAN_DISC:
  240. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  241. fnic_fcoe_send_vlan_req(fnic);
  242. spin_lock_irqsave(&fnic->fnic_lock, flags);
  243. break;
  244. case FNIC_EVT_START_FCF_DISC:
  245. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  246. "Start FCF Discovery\n");
  247. fnic_fcoe_start_fcf_disc(fnic);
  248. break;
  249. default:
  250. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  251. "Unknown event 0x%x\n", fevt->event);
  252. break;
  253. }
  254. kfree(fevt);
  255. }
  256. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  257. }
  258. /**
  259. * Check if the Received FIP FLOGI frame is rejected
  260. * @fip: The FCoE controller that received the frame
  261. * @skb: The received FIP frame
  262. *
  263. * Returns non-zero if the frame is rejected with unsupported cmd with
  264. * insufficient resource els explanation.
  265. */
  266. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  267. struct sk_buff *skb)
  268. {
  269. struct fc_lport *lport = fip->lp;
  270. struct fip_header *fiph;
  271. struct fc_frame_header *fh = NULL;
  272. struct fip_desc *desc;
  273. struct fip_encaps *els;
  274. enum fip_desc_type els_dtype = 0;
  275. u16 op;
  276. u8 els_op;
  277. u8 sub;
  278. size_t els_len = 0;
  279. size_t rlen;
  280. size_t dlen = 0;
  281. if (skb_linearize(skb))
  282. return 0;
  283. if (skb->len < sizeof(*fiph))
  284. return 0;
  285. fiph = (struct fip_header *)skb->data;
  286. op = ntohs(fiph->fip_op);
  287. sub = fiph->fip_subcode;
  288. if (op != FIP_OP_LS)
  289. return 0;
  290. if (sub != FIP_SC_REP)
  291. return 0;
  292. rlen = ntohs(fiph->fip_dl_len) * 4;
  293. if (rlen + sizeof(*fiph) > skb->len)
  294. return 0;
  295. desc = (struct fip_desc *)(fiph + 1);
  296. dlen = desc->fip_dlen * FIP_BPW;
  297. if (desc->fip_dtype == FIP_DT_FLOGI) {
  298. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  299. return 0;
  300. els_len = dlen - sizeof(*els);
  301. els = (struct fip_encaps *)desc;
  302. fh = (struct fc_frame_header *)(els + 1);
  303. els_dtype = desc->fip_dtype;
  304. if (!fh)
  305. return 0;
  306. /*
  307. * ELS command code, reason and explanation should be = Reject,
  308. * unsupported command and insufficient resource
  309. */
  310. els_op = *(u8 *)(fh + 1);
  311. if (els_op == ELS_LS_RJT) {
  312. shost_printk(KERN_INFO, lport->host,
  313. "Flogi Request Rejected by Switch\n");
  314. return 1;
  315. }
  316. shost_printk(KERN_INFO, lport->host,
  317. "Flogi Request Accepted by Switch\n");
  318. }
  319. return 0;
  320. }
  321. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  322. {
  323. struct fcoe_ctlr *fip = &fnic->ctlr;
  324. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  325. struct sk_buff *skb;
  326. char *eth_fr;
  327. int fr_len;
  328. struct fip_vlan *vlan;
  329. u64 vlan_tov;
  330. fnic_fcoe_reset_vlans(fnic);
  331. fnic->set_vlan(fnic, 0);
  332. if (printk_ratelimit())
  333. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  334. "Sending VLAN request...\n");
  335. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  336. if (!skb)
  337. return;
  338. fr_len = sizeof(*vlan);
  339. eth_fr = (char *)skb->data;
  340. vlan = (struct fip_vlan *)eth_fr;
  341. memset(vlan, 0, sizeof(*vlan));
  342. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  343. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  344. vlan->eth.h_proto = htons(ETH_P_FIP);
  345. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  346. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  347. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  348. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  349. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  350. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  351. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  352. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  353. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  354. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  355. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  356. skb_put(skb, sizeof(*vlan));
  357. skb->protocol = htons(ETH_P_FIP);
  358. skb_reset_mac_header(skb);
  359. skb_reset_network_header(skb);
  360. fip->send(fip, skb);
  361. /* set a timer so that we can retry if there no response */
  362. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  363. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  364. }
  365. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  366. {
  367. struct fcoe_ctlr *fip = &fnic->ctlr;
  368. struct fip_header *fiph;
  369. struct fip_desc *desc;
  370. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  371. u16 vid;
  372. size_t rlen;
  373. size_t dlen;
  374. struct fcoe_vlan *vlan;
  375. u64 sol_time;
  376. unsigned long flags;
  377. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  378. "Received VLAN response...\n");
  379. fiph = (struct fip_header *) skb->data;
  380. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  381. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  382. ntohs(fiph->fip_op), fiph->fip_subcode);
  383. rlen = ntohs(fiph->fip_dl_len) * 4;
  384. fnic_fcoe_reset_vlans(fnic);
  385. spin_lock_irqsave(&fnic->vlans_lock, flags);
  386. desc = (struct fip_desc *)(fiph + 1);
  387. while (rlen > 0) {
  388. dlen = desc->fip_dlen * FIP_BPW;
  389. switch (desc->fip_dtype) {
  390. case FIP_DT_VLAN:
  391. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  392. shost_printk(KERN_INFO, fnic->lport->host,
  393. "process_vlan_resp: FIP VLAN %d\n", vid);
  394. vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
  395. if (!vlan) {
  396. /* retry from timer */
  397. spin_unlock_irqrestore(&fnic->vlans_lock,
  398. flags);
  399. goto out;
  400. }
  401. vlan->vid = vid & 0x0fff;
  402. vlan->state = FIP_VLAN_AVAIL;
  403. list_add_tail(&vlan->list, &fnic->vlans);
  404. break;
  405. }
  406. desc = (struct fip_desc *)((char *)desc + dlen);
  407. rlen -= dlen;
  408. }
  409. /* any VLAN descriptors present ? */
  410. if (list_empty(&fnic->vlans)) {
  411. /* retry from timer */
  412. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  413. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  414. "No VLAN descriptors in FIP VLAN response\n");
  415. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  416. goto out;
  417. }
  418. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  419. fnic->set_vlan(fnic, vlan->vid);
  420. vlan->state = FIP_VLAN_SENT; /* sent now */
  421. vlan->sol_count++;
  422. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  423. /* start the solicitation */
  424. fcoe_ctlr_link_up(fip);
  425. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  426. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  427. out:
  428. return;
  429. }
  430. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  431. {
  432. unsigned long flags;
  433. struct fcoe_vlan *vlan;
  434. u64 sol_time;
  435. spin_lock_irqsave(&fnic->vlans_lock, flags);
  436. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  437. fnic->set_vlan(fnic, vlan->vid);
  438. vlan->state = FIP_VLAN_SENT; /* sent now */
  439. vlan->sol_count = 1;
  440. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  441. /* start the solicitation */
  442. fcoe_ctlr_link_up(&fnic->ctlr);
  443. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  444. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  445. }
  446. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  447. {
  448. unsigned long flags;
  449. struct fcoe_vlan *fvlan;
  450. spin_lock_irqsave(&fnic->vlans_lock, flags);
  451. if (list_empty(&fnic->vlans)) {
  452. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  453. return -EINVAL;
  454. }
  455. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  456. if (fvlan->state == FIP_VLAN_USED) {
  457. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  458. return 0;
  459. }
  460. if (fvlan->state == FIP_VLAN_SENT) {
  461. fvlan->state = FIP_VLAN_USED;
  462. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  463. return 0;
  464. }
  465. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  466. return -EINVAL;
  467. }
  468. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  469. {
  470. struct fnic_event *fevt;
  471. unsigned long flags;
  472. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  473. if (!fevt)
  474. return;
  475. fevt->fnic = fnic;
  476. fevt->event = ev;
  477. spin_lock_irqsave(&fnic->fnic_lock, flags);
  478. list_add_tail(&fevt->list, &fnic->evlist);
  479. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  480. schedule_work(&fnic->event_work);
  481. }
  482. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  483. {
  484. struct fip_header *fiph;
  485. int ret = 1;
  486. u16 op;
  487. u8 sub;
  488. if (!skb || !(skb->data))
  489. return -1;
  490. if (skb_linearize(skb))
  491. goto drop;
  492. fiph = (struct fip_header *)skb->data;
  493. op = ntohs(fiph->fip_op);
  494. sub = fiph->fip_subcode;
  495. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  496. goto drop;
  497. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  498. goto drop;
  499. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  500. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  501. goto drop;
  502. /* pass it on to fcoe */
  503. ret = 1;
  504. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
  505. /* set the vlan as used */
  506. fnic_fcoe_process_vlan_resp(fnic, skb);
  507. ret = 0;
  508. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  509. /* received CVL request, restart vlan disc */
  510. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  511. /* pass it on to fcoe */
  512. ret = 1;
  513. }
  514. drop:
  515. return ret;
  516. }
  517. void fnic_handle_fip_frame(struct work_struct *work)
  518. {
  519. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  520. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  521. unsigned long flags;
  522. struct sk_buff *skb;
  523. struct ethhdr *eh;
  524. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  525. spin_lock_irqsave(&fnic->fnic_lock, flags);
  526. if (fnic->stop_rx_link_events) {
  527. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  528. dev_kfree_skb(skb);
  529. return;
  530. }
  531. /*
  532. * If we're in a transitional state, just re-queue and return.
  533. * The queue will be serviced when we get to a stable state.
  534. */
  535. if (fnic->state != FNIC_IN_FC_MODE &&
  536. fnic->state != FNIC_IN_ETH_MODE) {
  537. skb_queue_head(&fnic->fip_frame_queue, skb);
  538. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  539. return;
  540. }
  541. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  542. eh = (struct ethhdr *)skb->data;
  543. if (eh->h_proto == htons(ETH_P_FIP)) {
  544. skb_pull(skb, sizeof(*eh));
  545. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  546. dev_kfree_skb(skb);
  547. continue;
  548. }
  549. /*
  550. * If there's FLOGI rejects - clear all
  551. * fcf's & restart from scratch
  552. */
  553. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  554. atomic64_inc(
  555. &fnic_stats->vlan_stats.flogi_rejects);
  556. shost_printk(KERN_INFO, fnic->lport->host,
  557. "Trigger a Link down - VLAN Disc\n");
  558. fcoe_ctlr_link_down(&fnic->ctlr);
  559. /* start FCoE VLAN discovery */
  560. fnic_fcoe_send_vlan_req(fnic);
  561. dev_kfree_skb(skb);
  562. continue;
  563. }
  564. fcoe_ctlr_recv(&fnic->ctlr, skb);
  565. continue;
  566. }
  567. }
  568. }
  569. /**
  570. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  571. * @fnic: fnic instance.
  572. * @skb: Ethernet Frame.
  573. */
  574. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  575. {
  576. struct fc_frame *fp;
  577. struct ethhdr *eh;
  578. struct fcoe_hdr *fcoe_hdr;
  579. struct fcoe_crc_eof *ft;
  580. /*
  581. * Undo VLAN encapsulation if present.
  582. */
  583. eh = (struct ethhdr *)skb->data;
  584. if (eh->h_proto == htons(ETH_P_8021Q)) {
  585. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  586. eh = skb_pull(skb, VLAN_HLEN);
  587. skb_reset_mac_header(skb);
  588. }
  589. if (eh->h_proto == htons(ETH_P_FIP)) {
  590. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  591. printk(KERN_ERR "Dropped FIP frame, as firmware "
  592. "uses non-FIP mode, Enable FIP "
  593. "using UCSM\n");
  594. goto drop;
  595. }
  596. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  597. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  598. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  599. }
  600. skb_queue_tail(&fnic->fip_frame_queue, skb);
  601. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  602. return 1; /* let caller know packet was used */
  603. }
  604. if (eh->h_proto != htons(ETH_P_FCOE))
  605. goto drop;
  606. skb_set_network_header(skb, sizeof(*eh));
  607. skb_pull(skb, sizeof(*eh));
  608. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  609. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  610. goto drop;
  611. fp = (struct fc_frame *)skb;
  612. fc_frame_init(fp);
  613. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  614. skb_pull(skb, sizeof(struct fcoe_hdr));
  615. skb_reset_transport_header(skb);
  616. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  617. fr_eof(fp) = ft->fcoe_eof;
  618. skb_trim(skb, skb->len - sizeof(*ft));
  619. return 0;
  620. drop:
  621. dev_kfree_skb_irq(skb);
  622. return -1;
  623. }
  624. /**
  625. * fnic_update_mac_locked() - set data MAC address and filters.
  626. * @fnic: fnic instance.
  627. * @new: newly-assigned FCoE MAC address.
  628. *
  629. * Called with the fnic lock held.
  630. */
  631. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  632. {
  633. u8 *ctl = fnic->ctlr.ctl_src_addr;
  634. u8 *data = fnic->data_src_addr;
  635. if (is_zero_ether_addr(new))
  636. new = ctl;
  637. if (ether_addr_equal(data, new))
  638. return;
  639. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  640. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  641. vnic_dev_del_addr(fnic->vdev, data);
  642. memcpy(data, new, ETH_ALEN);
  643. if (!ether_addr_equal(new, ctl))
  644. vnic_dev_add_addr(fnic->vdev, new);
  645. }
  646. /**
  647. * fnic_update_mac() - set data MAC address and filters.
  648. * @lport: local port.
  649. * @new: newly-assigned FCoE MAC address.
  650. */
  651. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  652. {
  653. struct fnic *fnic = lport_priv(lport);
  654. spin_lock_irq(&fnic->fnic_lock);
  655. fnic_update_mac_locked(fnic, new);
  656. spin_unlock_irq(&fnic->fnic_lock);
  657. }
  658. /**
  659. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  660. * @lport: local port.
  661. * @port_id: assigned FC_ID.
  662. * @fp: received frame containing the FLOGI accept or NULL.
  663. *
  664. * This is called from libfc when a new FC_ID has been assigned.
  665. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  666. * address and FC_ID.
  667. *
  668. * It is also called with FC_ID 0 when we're logged off.
  669. *
  670. * If the FC_ID is due to point-to-point, fp may be NULL.
  671. */
  672. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  673. {
  674. struct fnic *fnic = lport_priv(lport);
  675. u8 *mac;
  676. int ret;
  677. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  678. port_id, fp);
  679. /*
  680. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  681. * Set ethernet mode to send FLOGI.
  682. */
  683. if (!port_id) {
  684. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  685. fnic_set_eth_mode(fnic);
  686. return;
  687. }
  688. if (fp) {
  689. mac = fr_cb(fp)->granted_mac;
  690. if (is_zero_ether_addr(mac)) {
  691. /* non-FIP - FLOGI already accepted - ignore return */
  692. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  693. }
  694. fnic_update_mac(lport, mac);
  695. }
  696. /* Change state to reflect transition to FC mode */
  697. spin_lock_irq(&fnic->fnic_lock);
  698. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  699. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  700. else {
  701. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  702. "Unexpected fnic state %s while"
  703. " processing flogi resp\n",
  704. fnic_state_to_str(fnic->state));
  705. spin_unlock_irq(&fnic->fnic_lock);
  706. return;
  707. }
  708. spin_unlock_irq(&fnic->fnic_lock);
  709. /*
  710. * Send FLOGI registration to firmware to set up FC mode.
  711. * The new address will be set up when registration completes.
  712. */
  713. ret = fnic_flogi_reg_handler(fnic, port_id);
  714. if (ret < 0) {
  715. spin_lock_irq(&fnic->fnic_lock);
  716. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  717. fnic->state = FNIC_IN_ETH_MODE;
  718. spin_unlock_irq(&fnic->fnic_lock);
  719. }
  720. }
  721. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  722. *cq_desc, struct vnic_rq_buf *buf,
  723. int skipped __attribute__((unused)),
  724. void *opaque)
  725. {
  726. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  727. struct sk_buff *skb;
  728. struct fc_frame *fp;
  729. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  730. unsigned int eth_hdrs_stripped;
  731. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  732. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  733. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  734. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  735. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  736. u8 fcs_ok = 1, packet_error = 0;
  737. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  738. u32 rss_hash;
  739. u16 exchange_id, tmpl;
  740. u8 sof = 0;
  741. u8 eof = 0;
  742. u32 fcp_bytes_written = 0;
  743. unsigned long flags;
  744. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  745. PCI_DMA_FROMDEVICE);
  746. skb = buf->os_buf;
  747. fp = (struct fc_frame *)skb;
  748. buf->os_buf = NULL;
  749. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  750. if (type == CQ_DESC_TYPE_RQ_FCP) {
  751. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  752. &type, &color, &q_number, &completed_index,
  753. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  754. &tmpl, &fcp_bytes_written, &sof, &eof,
  755. &ingress_port, &packet_error,
  756. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  757. &vlan);
  758. eth_hdrs_stripped = 1;
  759. skb_trim(skb, fcp_bytes_written);
  760. fr_sof(fp) = sof;
  761. fr_eof(fp) = eof;
  762. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  763. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  764. &type, &color, &q_number, &completed_index,
  765. &ingress_port, &fcoe, &eop, &sop,
  766. &rss_type, &csum_not_calc, &rss_hash,
  767. &bytes_written, &packet_error,
  768. &vlan_stripped, &vlan, &checksum,
  769. &fcoe_sof, &fcoe_fc_crc_ok,
  770. &fcoe_enc_error, &fcoe_eof,
  771. &tcp_udp_csum_ok, &udp, &tcp,
  772. &ipv4_csum_ok, &ipv6, &ipv4,
  773. &ipv4_fragment, &fcs_ok);
  774. eth_hdrs_stripped = 0;
  775. skb_trim(skb, bytes_written);
  776. if (!fcs_ok) {
  777. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  778. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  779. "fcs error. dropping packet.\n");
  780. goto drop;
  781. }
  782. if (fnic_import_rq_eth_pkt(fnic, skb))
  783. return;
  784. } else {
  785. /* wrong CQ type*/
  786. shost_printk(KERN_ERR, fnic->lport->host,
  787. "fnic rq_cmpl wrong cq type x%x\n", type);
  788. goto drop;
  789. }
  790. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  791. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  792. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  793. "fnic rq_cmpl fcoe x%x fcsok x%x"
  794. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  795. " x%x\n",
  796. fcoe, fcs_ok, packet_error,
  797. fcoe_fc_crc_ok, fcoe_enc_error);
  798. goto drop;
  799. }
  800. spin_lock_irqsave(&fnic->fnic_lock, flags);
  801. if (fnic->stop_rx_link_events) {
  802. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  803. goto drop;
  804. }
  805. fr_dev(fp) = fnic->lport;
  806. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  807. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  808. (char *)skb->data, skb->len)) != 0) {
  809. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  810. }
  811. skb_queue_tail(&fnic->frame_queue, skb);
  812. queue_work(fnic_event_queue, &fnic->frame_work);
  813. return;
  814. drop:
  815. dev_kfree_skb_irq(skb);
  816. }
  817. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  818. struct cq_desc *cq_desc, u8 type,
  819. u16 q_number, u16 completed_index,
  820. void *opaque)
  821. {
  822. struct fnic *fnic = vnic_dev_priv(vdev);
  823. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  824. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  825. NULL);
  826. return 0;
  827. }
  828. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  829. {
  830. unsigned int tot_rq_work_done = 0, cur_work_done;
  831. unsigned int i;
  832. int err;
  833. for (i = 0; i < fnic->rq_count; i++) {
  834. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  835. fnic_rq_cmpl_handler_cont,
  836. NULL);
  837. if (cur_work_done) {
  838. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  839. if (err)
  840. shost_printk(KERN_ERR, fnic->lport->host,
  841. "fnic_alloc_rq_frame can't alloc"
  842. " frame\n");
  843. }
  844. tot_rq_work_done += cur_work_done;
  845. }
  846. return tot_rq_work_done;
  847. }
  848. /*
  849. * This function is called once at init time to allocate and fill RQ
  850. * buffers. Subsequently, it is called in the interrupt context after RQ
  851. * buffer processing to replenish the buffers in the RQ
  852. */
  853. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  854. {
  855. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  856. struct sk_buff *skb;
  857. u16 len;
  858. dma_addr_t pa;
  859. int r;
  860. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  861. skb = dev_alloc_skb(len);
  862. if (!skb) {
  863. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  864. "Unable to allocate RQ sk_buff\n");
  865. return -ENOMEM;
  866. }
  867. skb_reset_mac_header(skb);
  868. skb_reset_transport_header(skb);
  869. skb_reset_network_header(skb);
  870. skb_put(skb, len);
  871. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  872. if (pci_dma_mapping_error(fnic->pdev, pa)) {
  873. r = -ENOMEM;
  874. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  875. goto free_skb;
  876. }
  877. fnic_queue_rq_desc(rq, skb, pa, len);
  878. return 0;
  879. free_skb:
  880. kfree_skb(skb);
  881. return r;
  882. }
  883. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  884. {
  885. struct fc_frame *fp = buf->os_buf;
  886. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  887. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  888. PCI_DMA_FROMDEVICE);
  889. dev_kfree_skb(fp_skb(fp));
  890. buf->os_buf = NULL;
  891. }
  892. /**
  893. * fnic_eth_send() - Send Ethernet frame.
  894. * @fip: fcoe_ctlr instance.
  895. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  896. */
  897. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  898. {
  899. struct fnic *fnic = fnic_from_ctlr(fip);
  900. struct vnic_wq *wq = &fnic->wq[0];
  901. dma_addr_t pa;
  902. struct ethhdr *eth_hdr;
  903. struct vlan_ethhdr *vlan_hdr;
  904. unsigned long flags;
  905. int r;
  906. if (!fnic->vlan_hw_insert) {
  907. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  908. vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  909. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  910. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  911. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  912. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  913. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  914. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  915. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  916. }
  917. } else {
  918. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  919. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  920. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  921. }
  922. }
  923. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  924. r = pci_dma_mapping_error(fnic->pdev, pa);
  925. if (r) {
  926. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  927. goto free_skb;
  928. }
  929. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  930. if (!vnic_wq_desc_avail(wq))
  931. goto irq_restore;
  932. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  933. 0 /* hw inserts cos value */,
  934. fnic->vlan_id, 1);
  935. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  936. return;
  937. irq_restore:
  938. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  939. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  940. free_skb:
  941. kfree_skb(skb);
  942. }
  943. /*
  944. * Send FC frame.
  945. */
  946. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  947. {
  948. struct vnic_wq *wq = &fnic->wq[0];
  949. struct sk_buff *skb;
  950. dma_addr_t pa;
  951. struct ethhdr *eth_hdr;
  952. struct vlan_ethhdr *vlan_hdr;
  953. struct fcoe_hdr *fcoe_hdr;
  954. struct fc_frame_header *fh;
  955. u32 tot_len, eth_hdr_len;
  956. int ret = 0;
  957. unsigned long flags;
  958. fh = fc_frame_header_get(fp);
  959. skb = fp_skb(fp);
  960. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  961. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  962. return 0;
  963. if (!fnic->vlan_hw_insert) {
  964. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  965. vlan_hdr = skb_push(skb, eth_hdr_len);
  966. eth_hdr = (struct ethhdr *)vlan_hdr;
  967. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  968. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  969. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  970. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  971. } else {
  972. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  973. eth_hdr = skb_push(skb, eth_hdr_len);
  974. eth_hdr->h_proto = htons(ETH_P_FCOE);
  975. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  976. }
  977. if (fnic->ctlr.map_dest)
  978. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  979. else
  980. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  981. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  982. tot_len = skb->len;
  983. BUG_ON(tot_len % 4);
  984. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  985. fcoe_hdr->fcoe_sof = fr_sof(fp);
  986. if (FC_FCOE_VER)
  987. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  988. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  989. if (pci_dma_mapping_error(fnic->pdev, pa)) {
  990. ret = -ENOMEM;
  991. printk(KERN_ERR "DMA map failed with error %d\n", ret);
  992. goto free_skb_on_err;
  993. }
  994. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  995. (char *)eth_hdr, tot_len)) != 0) {
  996. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  997. }
  998. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  999. if (!vnic_wq_desc_avail(wq)) {
  1000. pci_unmap_single(fnic->pdev, pa,
  1001. tot_len, PCI_DMA_TODEVICE);
  1002. ret = -1;
  1003. goto irq_restore;
  1004. }
  1005. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  1006. 0 /* hw inserts cos value */,
  1007. fnic->vlan_id, 1, 1, 1);
  1008. irq_restore:
  1009. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  1010. free_skb_on_err:
  1011. if (ret)
  1012. dev_kfree_skb_any(fp_skb(fp));
  1013. return ret;
  1014. }
  1015. /*
  1016. * fnic_send
  1017. * Routine to send a raw frame
  1018. */
  1019. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  1020. {
  1021. struct fnic *fnic = lport_priv(lp);
  1022. unsigned long flags;
  1023. if (fnic->in_remove) {
  1024. dev_kfree_skb(fp_skb(fp));
  1025. return -1;
  1026. }
  1027. /*
  1028. * Queue frame if in a transitional state.
  1029. * This occurs while registering the Port_ID / MAC address after FLOGI.
  1030. */
  1031. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1032. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  1033. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  1034. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1035. return 0;
  1036. }
  1037. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1038. return fnic_send_frame(fnic, fp);
  1039. }
  1040. /**
  1041. * fnic_flush_tx() - send queued frames.
  1042. * @fnic: fnic device
  1043. *
  1044. * Send frames that were waiting to go out in FC or Ethernet mode.
  1045. * Whenever changing modes we purge queued frames, so these frames should
  1046. * be queued for the stable mode that we're in, either FC or Ethernet.
  1047. *
  1048. * Called without fnic_lock held.
  1049. */
  1050. void fnic_flush_tx(struct fnic *fnic)
  1051. {
  1052. struct sk_buff *skb;
  1053. struct fc_frame *fp;
  1054. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1055. fp = (struct fc_frame *)skb;
  1056. fnic_send_frame(fnic, fp);
  1057. }
  1058. }
  1059. /**
  1060. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1061. * @fnic: fnic device
  1062. *
  1063. * Called without fnic lock held.
  1064. */
  1065. static void fnic_set_eth_mode(struct fnic *fnic)
  1066. {
  1067. unsigned long flags;
  1068. enum fnic_state old_state;
  1069. int ret;
  1070. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1071. again:
  1072. old_state = fnic->state;
  1073. switch (old_state) {
  1074. case FNIC_IN_FC_MODE:
  1075. case FNIC_IN_ETH_TRANS_FC_MODE:
  1076. default:
  1077. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1078. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1079. ret = fnic_fw_reset_handler(fnic);
  1080. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1081. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1082. goto again;
  1083. if (ret)
  1084. fnic->state = old_state;
  1085. break;
  1086. case FNIC_IN_FC_TRANS_ETH_MODE:
  1087. case FNIC_IN_ETH_MODE:
  1088. break;
  1089. }
  1090. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1091. }
  1092. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1093. struct cq_desc *cq_desc,
  1094. struct vnic_wq_buf *buf, void *opaque)
  1095. {
  1096. struct sk_buff *skb = buf->os_buf;
  1097. struct fc_frame *fp = (struct fc_frame *)skb;
  1098. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1099. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1100. buf->len, PCI_DMA_TODEVICE);
  1101. dev_kfree_skb_irq(fp_skb(fp));
  1102. buf->os_buf = NULL;
  1103. }
  1104. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1105. struct cq_desc *cq_desc, u8 type,
  1106. u16 q_number, u16 completed_index,
  1107. void *opaque)
  1108. {
  1109. struct fnic *fnic = vnic_dev_priv(vdev);
  1110. unsigned long flags;
  1111. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1112. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1113. fnic_wq_complete_frame_send, NULL);
  1114. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1115. return 0;
  1116. }
  1117. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1118. {
  1119. unsigned int wq_work_done = 0;
  1120. unsigned int i;
  1121. for (i = 0; i < fnic->raw_wq_count; i++) {
  1122. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1123. work_to_do,
  1124. fnic_wq_cmpl_handler_cont,
  1125. NULL);
  1126. }
  1127. return wq_work_done;
  1128. }
  1129. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1130. {
  1131. struct fc_frame *fp = buf->os_buf;
  1132. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1133. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1134. buf->len, PCI_DMA_TODEVICE);
  1135. dev_kfree_skb(fp_skb(fp));
  1136. buf->os_buf = NULL;
  1137. }
  1138. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1139. {
  1140. unsigned long flags;
  1141. struct fcoe_vlan *vlan;
  1142. struct fcoe_vlan *next;
  1143. /*
  1144. * indicate a link down to fcoe so that all fcf's are free'd
  1145. * might not be required since we did this before sending vlan
  1146. * discovery request
  1147. */
  1148. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1149. if (!list_empty(&fnic->vlans)) {
  1150. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1151. list_del(&vlan->list);
  1152. kfree(vlan);
  1153. }
  1154. }
  1155. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1156. }
  1157. void fnic_handle_fip_timer(struct fnic *fnic)
  1158. {
  1159. unsigned long flags;
  1160. struct fcoe_vlan *vlan;
  1161. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1162. u64 sol_time;
  1163. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1164. if (fnic->stop_rx_link_events) {
  1165. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1166. return;
  1167. }
  1168. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1169. if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
  1170. return;
  1171. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1172. if (list_empty(&fnic->vlans)) {
  1173. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1174. /* no vlans available, try again */
  1175. if (printk_ratelimit())
  1176. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1177. "Start VLAN Discovery\n");
  1178. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1179. return;
  1180. }
  1181. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1182. shost_printk(KERN_DEBUG, fnic->lport->host,
  1183. "fip_timer: vlan %d state %d sol_count %d\n",
  1184. vlan->vid, vlan->state, vlan->sol_count);
  1185. switch (vlan->state) {
  1186. case FIP_VLAN_USED:
  1187. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1188. "FIP VLAN is selected for FC transaction\n");
  1189. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1190. break;
  1191. case FIP_VLAN_FAILED:
  1192. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1193. /* if all vlans are in failed state, restart vlan disc */
  1194. if (printk_ratelimit())
  1195. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1196. "Start VLAN Discovery\n");
  1197. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1198. break;
  1199. case FIP_VLAN_SENT:
  1200. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1201. /*
  1202. * no response on this vlan, remove from the list.
  1203. * Try the next vlan
  1204. */
  1205. shost_printk(KERN_INFO, fnic->lport->host,
  1206. "Dequeue this VLAN ID %d from list\n",
  1207. vlan->vid);
  1208. list_del(&vlan->list);
  1209. kfree(vlan);
  1210. vlan = NULL;
  1211. if (list_empty(&fnic->vlans)) {
  1212. /* we exhausted all vlans, restart vlan disc */
  1213. spin_unlock_irqrestore(&fnic->vlans_lock,
  1214. flags);
  1215. shost_printk(KERN_INFO, fnic->lport->host,
  1216. "fip_timer: vlan list empty, "
  1217. "trigger vlan disc\n");
  1218. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1219. return;
  1220. }
  1221. /* check the next vlan */
  1222. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1223. list);
  1224. fnic->set_vlan(fnic, vlan->vid);
  1225. vlan->state = FIP_VLAN_SENT; /* sent now */
  1226. }
  1227. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1228. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1229. vlan->sol_count++;
  1230. sol_time = jiffies + msecs_to_jiffies
  1231. (FCOE_CTLR_START_DELAY);
  1232. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1233. break;
  1234. }
  1235. }