bnx2fc_fcoe.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011
  1. /* bnx2fc_fcoe.c: QLogic Linux FCoE offload driver.
  2. * This file contains the code that interacts with libfc, libfcoe,
  3. * cnic modules to create FCoE instances, send/receive non-offloaded
  4. * FIP/FCoE packets, listen to link events etc.
  5. *
  6. * Copyright (c) 2008-2013 Broadcom Corporation
  7. * Copyright (c) 2014-2016 QLogic Corporation
  8. * Copyright (c) 2016-2017 Cavium Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation.
  13. *
  14. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  15. */
  16. #include "bnx2fc.h"
  17. static struct list_head adapter_list;
  18. static struct list_head if_list;
  19. static u32 adapter_count;
  20. static DEFINE_MUTEX(bnx2fc_dev_lock);
  21. DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  22. #define DRV_MODULE_NAME "bnx2fc"
  23. #define DRV_MODULE_VERSION BNX2FC_VERSION
  24. #define DRV_MODULE_RELDATE "October 15, 2015"
  25. static char version[] =
  26. "QLogic FCoE Driver " DRV_MODULE_NAME \
  27. " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  28. MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
  29. MODULE_DESCRIPTION("QLogic FCoE Driver");
  30. MODULE_LICENSE("GPL");
  31. MODULE_VERSION(DRV_MODULE_VERSION);
  32. #define BNX2FC_MAX_QUEUE_DEPTH 256
  33. #define BNX2FC_MIN_QUEUE_DEPTH 32
  34. #define FCOE_WORD_TO_BYTE 4
  35. static struct scsi_transport_template *bnx2fc_transport_template;
  36. static struct scsi_transport_template *bnx2fc_vport_xport_template;
  37. struct workqueue_struct *bnx2fc_wq;
  38. /* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
  39. * Here the io threads are per cpu but the l2 thread is just one
  40. */
  41. struct fcoe_percpu_s bnx2fc_global;
  42. DEFINE_SPINLOCK(bnx2fc_global_lock);
  43. static struct cnic_ulp_ops bnx2fc_cnic_cb;
  44. static struct libfc_function_template bnx2fc_libfc_fcn_templ;
  45. static struct scsi_host_template bnx2fc_shost_template;
  46. static struct fc_function_template bnx2fc_transport_function;
  47. static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
  48. static struct fc_function_template bnx2fc_vport_xport_function;
  49. static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode);
  50. static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
  51. static int bnx2fc_destroy(struct net_device *net_device);
  52. static int bnx2fc_enable(struct net_device *netdev);
  53. static int bnx2fc_disable(struct net_device *netdev);
  54. /* fcoe_syfs control interface handlers */
  55. static int bnx2fc_ctlr_alloc(struct net_device *netdev);
  56. static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
  57. static void bnx2fc_recv_frame(struct sk_buff *skb);
  58. static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
  59. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
  60. static int bnx2fc_lport_config(struct fc_lport *lport);
  61. static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
  62. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
  63. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
  64. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
  65. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
  66. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
  67. struct device *parent, int npiv);
  68. static void bnx2fc_destroy_work(struct work_struct *work);
  69. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
  70. static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
  71. *phys_dev);
  72. static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface);
  73. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
  74. static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
  75. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
  76. static void bnx2fc_port_shutdown(struct fc_lport *lport);
  77. static void bnx2fc_stop(struct bnx2fc_interface *interface);
  78. static int __init bnx2fc_mod_init(void);
  79. static void __exit bnx2fc_mod_exit(void);
  80. unsigned int bnx2fc_debug_level;
  81. module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
  82. MODULE_PARM_DESC(debug_logging,
  83. "Option to enable extended logging,\n"
  84. "\t\tDefault is 0 - no logging.\n"
  85. "\t\t0x01 - SCSI cmd error, cleanup.\n"
  86. "\t\t0x02 - Session setup, cleanup, etc.\n"
  87. "\t\t0x04 - lport events, link, mtu, etc.\n"
  88. "\t\t0x08 - ELS logs.\n"
  89. "\t\t0x10 - fcoe L2 fame related logs.\n"
  90. "\t\t0xff - LOG all messages.");
  91. uint bnx2fc_devloss_tmo;
  92. module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
  93. MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
  94. "attached via bnx2fc.");
  95. uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
  96. module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
  97. MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
  98. "0xffff.");
  99. uint bnx2fc_queue_depth;
  100. module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
  101. MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
  102. "attached via bnx2fc.");
  103. uint bnx2fc_log_fka;
  104. module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
  105. MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
  106. "initiating a FIP keep alive when debug logging is enabled.");
  107. static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
  108. {
  109. return ((struct bnx2fc_interface *)
  110. ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
  111. }
  112. static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
  113. {
  114. struct fcoe_ctlr_device *ctlr_dev =
  115. fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
  116. struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
  117. struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
  118. fcf_dev->vlan_id = fcoe->vlan_id;
  119. }
  120. static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
  121. {
  122. struct fcoe_percpu_s *bg;
  123. struct fcoe_rcv_info *fr;
  124. struct sk_buff_head *list;
  125. struct sk_buff *skb, *next;
  126. struct sk_buff *head;
  127. bg = &bnx2fc_global;
  128. spin_lock_bh(&bg->fcoe_rx_list.lock);
  129. list = &bg->fcoe_rx_list;
  130. head = list->next;
  131. for (skb = head; skb != (struct sk_buff *)list;
  132. skb = next) {
  133. next = skb->next;
  134. fr = fcoe_dev_from_skb(skb);
  135. if (fr->fr_dev == lp) {
  136. __skb_unlink(skb, list);
  137. kfree_skb(skb);
  138. }
  139. }
  140. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  141. }
  142. int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  143. {
  144. int rc;
  145. spin_lock(&bnx2fc_global_lock);
  146. rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
  147. spin_unlock(&bnx2fc_global_lock);
  148. return rc;
  149. }
  150. static void bnx2fc_abort_io(struct fc_lport *lport)
  151. {
  152. /*
  153. * This function is no-op for bnx2fc, but we do
  154. * not want to leave it as NULL either, as libfc
  155. * can call the default function which is
  156. * fc_fcp_abort_io.
  157. */
  158. }
  159. static void bnx2fc_cleanup(struct fc_lport *lport)
  160. {
  161. struct fcoe_port *port = lport_priv(lport);
  162. struct bnx2fc_interface *interface = port->priv;
  163. struct bnx2fc_hba *hba = interface->hba;
  164. struct bnx2fc_rport *tgt;
  165. int i;
  166. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  167. mutex_lock(&hba->hba_mutex);
  168. spin_lock_bh(&hba->hba_lock);
  169. for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
  170. tgt = hba->tgt_ofld_list[i];
  171. if (tgt) {
  172. /* Cleanup IOs belonging to requested vport */
  173. if (tgt->port == port) {
  174. spin_unlock_bh(&hba->hba_lock);
  175. BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
  176. bnx2fc_flush_active_ios(tgt);
  177. spin_lock_bh(&hba->hba_lock);
  178. }
  179. }
  180. }
  181. spin_unlock_bh(&hba->hba_lock);
  182. mutex_unlock(&hba->hba_mutex);
  183. }
  184. static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
  185. struct fc_frame *fp)
  186. {
  187. struct fc_rport_priv *rdata = tgt->rdata;
  188. struct fc_frame_header *fh;
  189. int rc = 0;
  190. fh = fc_frame_header_get(fp);
  191. BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
  192. "r_ctl = 0x%x\n", rdata->ids.port_id,
  193. ntohs(fh->fh_ox_id), fh->fh_r_ctl);
  194. if ((fh->fh_type == FC_TYPE_ELS) &&
  195. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  196. switch (fc_frame_payload_op(fp)) {
  197. case ELS_ADISC:
  198. rc = bnx2fc_send_adisc(tgt, fp);
  199. break;
  200. case ELS_LOGO:
  201. rc = bnx2fc_send_logo(tgt, fp);
  202. break;
  203. case ELS_RLS:
  204. rc = bnx2fc_send_rls(tgt, fp);
  205. break;
  206. default:
  207. break;
  208. }
  209. } else if ((fh->fh_type == FC_TYPE_BLS) &&
  210. (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
  211. BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
  212. else {
  213. BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
  214. "rctl 0x%x thru non-offload path\n",
  215. fh->fh_type, fh->fh_r_ctl);
  216. return -ENODEV;
  217. }
  218. if (rc)
  219. return -ENOMEM;
  220. else
  221. return 0;
  222. }
  223. /**
  224. * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
  225. *
  226. * @lport: the associated local port
  227. * @fp: the fc_frame to be transmitted
  228. */
  229. static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
  230. {
  231. struct ethhdr *eh;
  232. struct fcoe_crc_eof *cp;
  233. struct sk_buff *skb;
  234. struct fc_frame_header *fh;
  235. struct bnx2fc_interface *interface;
  236. struct fcoe_ctlr *ctlr;
  237. struct bnx2fc_hba *hba;
  238. struct fcoe_port *port;
  239. struct fcoe_hdr *hp;
  240. struct bnx2fc_rport *tgt;
  241. struct fc_stats *stats;
  242. u8 sof, eof;
  243. u32 crc;
  244. unsigned int hlen, tlen, elen;
  245. int wlen, rc = 0;
  246. port = (struct fcoe_port *)lport_priv(lport);
  247. interface = port->priv;
  248. ctlr = bnx2fc_to_ctlr(interface);
  249. hba = interface->hba;
  250. fh = fc_frame_header_get(fp);
  251. skb = fp_skb(fp);
  252. if (!lport->link_up) {
  253. BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
  254. kfree_skb(skb);
  255. return 0;
  256. }
  257. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  258. if (!ctlr->sel_fcf) {
  259. BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
  260. kfree_skb(skb);
  261. return -EINVAL;
  262. }
  263. if (fcoe_ctlr_els_send(ctlr, lport, skb))
  264. return 0;
  265. }
  266. sof = fr_sof(fp);
  267. eof = fr_eof(fp);
  268. /*
  269. * Snoop the frame header to check if the frame is for
  270. * an offloaded session
  271. */
  272. /*
  273. * tgt_ofld_list access is synchronized using
  274. * both hba mutex and hba lock. Atleast hba mutex or
  275. * hba lock needs to be held for read access.
  276. */
  277. spin_lock_bh(&hba->hba_lock);
  278. tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
  279. if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  280. /* This frame is for offloaded session */
  281. BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
  282. "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
  283. spin_unlock_bh(&hba->hba_lock);
  284. rc = bnx2fc_xmit_l2_frame(tgt, fp);
  285. if (rc != -ENODEV) {
  286. kfree_skb(skb);
  287. return rc;
  288. }
  289. } else {
  290. spin_unlock_bh(&hba->hba_lock);
  291. }
  292. elen = sizeof(struct ethhdr);
  293. hlen = sizeof(struct fcoe_hdr);
  294. tlen = sizeof(struct fcoe_crc_eof);
  295. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  296. skb->ip_summed = CHECKSUM_NONE;
  297. crc = fcoe_fc_crc(fp);
  298. /* copy port crc and eof to the skb buff */
  299. if (skb_is_nonlinear(skb)) {
  300. skb_frag_t *frag;
  301. if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
  302. kfree_skb(skb);
  303. return -ENOMEM;
  304. }
  305. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  306. cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
  307. } else {
  308. cp = skb_put(skb, tlen);
  309. }
  310. memset(cp, 0, sizeof(*cp));
  311. cp->fcoe_eof = eof;
  312. cp->fcoe_crc32 = cpu_to_le32(~crc);
  313. if (skb_is_nonlinear(skb)) {
  314. kunmap_atomic(cp);
  315. cp = NULL;
  316. }
  317. /* adjust skb network/transport offsets to match mac/fcoe/port */
  318. skb_push(skb, elen + hlen);
  319. skb_reset_mac_header(skb);
  320. skb_reset_network_header(skb);
  321. skb->mac_len = elen;
  322. skb->protocol = htons(ETH_P_FCOE);
  323. skb->dev = interface->netdev;
  324. /* fill up mac and fcoe headers */
  325. eh = eth_hdr(skb);
  326. eh->h_proto = htons(ETH_P_FCOE);
  327. if (ctlr->map_dest)
  328. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  329. else
  330. /* insert GW address */
  331. memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
  332. if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
  333. memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
  334. else
  335. memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
  336. hp = (struct fcoe_hdr *)(eh + 1);
  337. memset(hp, 0, sizeof(*hp));
  338. if (FC_FCOE_VER)
  339. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  340. hp->fcoe_sof = sof;
  341. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  342. if (lport->seq_offload && fr_max_payload(fp)) {
  343. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  344. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  345. } else {
  346. skb_shinfo(skb)->gso_type = 0;
  347. skb_shinfo(skb)->gso_size = 0;
  348. }
  349. /*update tx stats */
  350. stats = per_cpu_ptr(lport->stats, get_cpu());
  351. stats->TxFrames++;
  352. stats->TxWords += wlen;
  353. put_cpu();
  354. /* send down to lld */
  355. fr_dev(fp) = lport;
  356. if (port->fcoe_pending_queue.qlen)
  357. fcoe_check_wait_queue(lport, skb);
  358. else if (fcoe_start_io(skb))
  359. fcoe_check_wait_queue(lport, skb);
  360. return 0;
  361. }
  362. /**
  363. * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
  364. *
  365. * @skb: the receive socket buffer
  366. * @dev: associated net device
  367. * @ptype: context
  368. * @olddev: last device
  369. *
  370. * This function receives the packet and builds FC frame and passes it up
  371. */
  372. static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
  373. struct packet_type *ptype, struct net_device *olddev)
  374. {
  375. struct fc_lport *lport;
  376. struct bnx2fc_interface *interface;
  377. struct fcoe_ctlr *ctlr;
  378. struct fc_frame_header *fh;
  379. struct fcoe_rcv_info *fr;
  380. struct fcoe_percpu_s *bg;
  381. struct sk_buff *tmp_skb;
  382. unsigned short oxid;
  383. interface = container_of(ptype, struct bnx2fc_interface,
  384. fcoe_packet_type);
  385. ctlr = bnx2fc_to_ctlr(interface);
  386. lport = ctlr->lp;
  387. if (unlikely(lport == NULL)) {
  388. printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
  389. goto err;
  390. }
  391. tmp_skb = skb_share_check(skb, GFP_ATOMIC);
  392. if (!tmp_skb)
  393. goto err;
  394. skb = tmp_skb;
  395. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  396. printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
  397. goto err;
  398. }
  399. /*
  400. * Check for minimum frame length, and make sure required FCoE
  401. * and FC headers are pulled into the linear data area.
  402. */
  403. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  404. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  405. goto err;
  406. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  407. fh = (struct fc_frame_header *) skb_transport_header(skb);
  408. oxid = ntohs(fh->fh_ox_id);
  409. fr = fcoe_dev_from_skb(skb);
  410. fr->fr_dev = lport;
  411. bg = &bnx2fc_global;
  412. spin_lock(&bg->fcoe_rx_list.lock);
  413. __skb_queue_tail(&bg->fcoe_rx_list, skb);
  414. if (bg->fcoe_rx_list.qlen == 1)
  415. wake_up_process(bg->kthread);
  416. spin_unlock(&bg->fcoe_rx_list.lock);
  417. return 0;
  418. err:
  419. kfree_skb(skb);
  420. return -1;
  421. }
  422. static int bnx2fc_l2_rcv_thread(void *arg)
  423. {
  424. struct fcoe_percpu_s *bg = arg;
  425. struct sk_buff *skb;
  426. set_user_nice(current, MIN_NICE);
  427. set_current_state(TASK_INTERRUPTIBLE);
  428. while (!kthread_should_stop()) {
  429. schedule();
  430. spin_lock_bh(&bg->fcoe_rx_list.lock);
  431. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
  432. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  433. bnx2fc_recv_frame(skb);
  434. spin_lock_bh(&bg->fcoe_rx_list.lock);
  435. }
  436. __set_current_state(TASK_INTERRUPTIBLE);
  437. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  438. }
  439. __set_current_state(TASK_RUNNING);
  440. return 0;
  441. }
  442. static void bnx2fc_recv_frame(struct sk_buff *skb)
  443. {
  444. u32 fr_len;
  445. struct fc_lport *lport;
  446. struct fcoe_rcv_info *fr;
  447. struct fc_stats *stats;
  448. struct fc_frame_header *fh;
  449. struct fcoe_crc_eof crc_eof;
  450. struct fc_frame *fp;
  451. struct fc_lport *vn_port;
  452. struct fcoe_port *port, *phys_port;
  453. u8 *mac = NULL;
  454. u8 *dest_mac = NULL;
  455. struct fcoe_hdr *hp;
  456. struct bnx2fc_interface *interface;
  457. struct fcoe_ctlr *ctlr;
  458. fr = fcoe_dev_from_skb(skb);
  459. lport = fr->fr_dev;
  460. if (unlikely(lport == NULL)) {
  461. printk(KERN_ERR PFX "Invalid lport struct\n");
  462. kfree_skb(skb);
  463. return;
  464. }
  465. if (skb_is_nonlinear(skb))
  466. skb_linearize(skb);
  467. mac = eth_hdr(skb)->h_source;
  468. dest_mac = eth_hdr(skb)->h_dest;
  469. /* Pull the header */
  470. hp = (struct fcoe_hdr *) skb_network_header(skb);
  471. fh = (struct fc_frame_header *) skb_transport_header(skb);
  472. skb_pull(skb, sizeof(struct fcoe_hdr));
  473. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  474. fp = (struct fc_frame *)skb;
  475. fc_frame_init(fp);
  476. fr_dev(fp) = lport;
  477. fr_sof(fp) = hp->fcoe_sof;
  478. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  479. kfree_skb(skb);
  480. return;
  481. }
  482. fr_eof(fp) = crc_eof.fcoe_eof;
  483. fr_crc(fp) = crc_eof.fcoe_crc32;
  484. if (pskb_trim(skb, fr_len)) {
  485. kfree_skb(skb);
  486. return;
  487. }
  488. phys_port = lport_priv(lport);
  489. interface = phys_port->priv;
  490. ctlr = bnx2fc_to_ctlr(interface);
  491. fh = fc_frame_header_get(fp);
  492. if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
  493. BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n",
  494. dest_mac);
  495. kfree_skb(skb);
  496. return;
  497. }
  498. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  499. if (vn_port) {
  500. port = lport_priv(vn_port);
  501. if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
  502. BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
  503. kfree_skb(skb);
  504. return;
  505. }
  506. }
  507. if (ctlr->state) {
  508. if (!ether_addr_equal(mac, ctlr->dest_addr)) {
  509. BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n",
  510. mac, ctlr->dest_addr);
  511. kfree_skb(skb);
  512. return;
  513. }
  514. }
  515. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  516. fh->fh_type == FC_TYPE_FCP) {
  517. /* Drop FCP data. We dont this in L2 path */
  518. kfree_skb(skb);
  519. return;
  520. }
  521. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  522. fh->fh_type == FC_TYPE_ELS) {
  523. switch (fc_frame_payload_op(fp)) {
  524. case ELS_LOGO:
  525. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  526. /* drop non-FIP LOGO */
  527. kfree_skb(skb);
  528. return;
  529. }
  530. break;
  531. }
  532. }
  533. if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
  534. /* Drop incoming ABTS */
  535. kfree_skb(skb);
  536. return;
  537. }
  538. /*
  539. * If the destination ID from the frame header does not match what we
  540. * have on record for lport and the search for a NPIV port came up
  541. * empty then this is not addressed to our port so simply drop it.
  542. */
  543. if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
  544. BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
  545. lport->port_id, ntoh24(fh->fh_d_id));
  546. kfree_skb(skb);
  547. return;
  548. }
  549. stats = per_cpu_ptr(lport->stats, smp_processor_id());
  550. stats->RxFrames++;
  551. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  552. if (le32_to_cpu(fr_crc(fp)) !=
  553. ~crc32(~0, skb->data, fr_len)) {
  554. if (stats->InvalidCRCCount < 5)
  555. printk(KERN_WARNING PFX "dropping frame with "
  556. "CRC error\n");
  557. stats->InvalidCRCCount++;
  558. kfree_skb(skb);
  559. return;
  560. }
  561. fc_exch_recv(lport, fp);
  562. }
  563. /**
  564. * bnx2fc_percpu_io_thread - thread per cpu for ios
  565. *
  566. * @arg: ptr to bnx2fc_percpu_info structure
  567. */
  568. static int bnx2fc_percpu_io_thread(void *arg)
  569. {
  570. struct bnx2fc_percpu_s *p = arg;
  571. struct bnx2fc_work *work, *tmp;
  572. LIST_HEAD(work_list);
  573. set_user_nice(current, MIN_NICE);
  574. set_current_state(TASK_INTERRUPTIBLE);
  575. while (!kthread_should_stop()) {
  576. schedule();
  577. spin_lock_bh(&p->fp_work_lock);
  578. while (!list_empty(&p->work_list)) {
  579. list_splice_init(&p->work_list, &work_list);
  580. spin_unlock_bh(&p->fp_work_lock);
  581. list_for_each_entry_safe(work, tmp, &work_list, list) {
  582. list_del_init(&work->list);
  583. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  584. kfree(work);
  585. }
  586. spin_lock_bh(&p->fp_work_lock);
  587. }
  588. __set_current_state(TASK_INTERRUPTIBLE);
  589. spin_unlock_bh(&p->fp_work_lock);
  590. }
  591. __set_current_state(TASK_RUNNING);
  592. return 0;
  593. }
  594. static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
  595. {
  596. struct fc_host_statistics *bnx2fc_stats;
  597. struct fc_lport *lport = shost_priv(shost);
  598. struct fcoe_port *port = lport_priv(lport);
  599. struct bnx2fc_interface *interface = port->priv;
  600. struct bnx2fc_hba *hba = interface->hba;
  601. struct fcoe_statistics_params *fw_stats;
  602. int rc = 0;
  603. fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
  604. if (!fw_stats)
  605. return NULL;
  606. mutex_lock(&hba->hba_stats_mutex);
  607. bnx2fc_stats = fc_get_host_stats(shost);
  608. init_completion(&hba->stat_req_done);
  609. if (bnx2fc_send_stat_req(hba))
  610. goto unlock_stats_mutex;
  611. rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
  612. if (!rc) {
  613. BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
  614. goto unlock_stats_mutex;
  615. }
  616. BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
  617. bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
  618. BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
  619. bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
  620. BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
  621. bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
  622. BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
  623. bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
  624. BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
  625. bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
  626. bnx2fc_stats->dumped_frames = 0;
  627. bnx2fc_stats->lip_count = 0;
  628. bnx2fc_stats->nos_count = 0;
  629. bnx2fc_stats->loss_of_sync_count = 0;
  630. bnx2fc_stats->loss_of_signal_count = 0;
  631. bnx2fc_stats->prim_seq_protocol_err_count = 0;
  632. memcpy(&hba->prev_stats, hba->stats_buffer,
  633. sizeof(struct fcoe_statistics_params));
  634. unlock_stats_mutex:
  635. mutex_unlock(&hba->hba_stats_mutex);
  636. return bnx2fc_stats;
  637. }
  638. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
  639. {
  640. struct fcoe_port *port = lport_priv(lport);
  641. struct bnx2fc_interface *interface = port->priv;
  642. struct bnx2fc_hba *hba = interface->hba;
  643. struct Scsi_Host *shost = lport->host;
  644. int rc = 0;
  645. shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
  646. shost->max_lun = bnx2fc_max_luns;
  647. shost->max_id = BNX2FC_MAX_FCP_TGT;
  648. shost->max_channel = 0;
  649. if (lport->vport)
  650. shost->transportt = bnx2fc_vport_xport_template;
  651. else
  652. shost->transportt = bnx2fc_transport_template;
  653. /* Add the new host to SCSI-ml */
  654. rc = scsi_add_host(lport->host, dev);
  655. if (rc) {
  656. printk(KERN_ERR PFX "Error on scsi_add_host\n");
  657. return rc;
  658. }
  659. if (!lport->vport)
  660. fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
  661. snprintf(fc_host_symbolic_name(lport->host), 256,
  662. "%s (QLogic %s) v%s over %s",
  663. BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
  664. interface->netdev->name);
  665. return 0;
  666. }
  667. static int bnx2fc_link_ok(struct fc_lport *lport)
  668. {
  669. struct fcoe_port *port = lport_priv(lport);
  670. struct bnx2fc_interface *interface = port->priv;
  671. struct bnx2fc_hba *hba = interface->hba;
  672. struct net_device *dev = hba->phys_dev;
  673. int rc = 0;
  674. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
  675. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  676. else {
  677. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  678. rc = -1;
  679. }
  680. return rc;
  681. }
  682. /**
  683. * bnx2fc_get_link_state - get network link state
  684. *
  685. * @hba: adapter instance pointer
  686. *
  687. * updates adapter structure flag based on netdev state
  688. */
  689. void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
  690. {
  691. if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
  692. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  693. else
  694. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  695. }
  696. static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
  697. {
  698. struct bnx2fc_hba *hba;
  699. struct bnx2fc_interface *interface;
  700. struct fcoe_ctlr *ctlr;
  701. struct fcoe_port *port;
  702. u64 wwnn, wwpn;
  703. port = lport_priv(lport);
  704. interface = port->priv;
  705. ctlr = bnx2fc_to_ctlr(interface);
  706. hba = interface->hba;
  707. /* require support for get_pauseparam ethtool op. */
  708. if (!hba->phys_dev->ethtool_ops ||
  709. !hba->phys_dev->ethtool_ops->get_pauseparam)
  710. return -EOPNOTSUPP;
  711. if (fc_set_mfs(lport, BNX2FC_MFS))
  712. return -EINVAL;
  713. skb_queue_head_init(&port->fcoe_pending_queue);
  714. port->fcoe_pending_queue_active = 0;
  715. timer_setup(&port->timer, fcoe_queue_timer, 0);
  716. fcoe_link_speed_update(lport);
  717. if (!lport->vport) {
  718. if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
  719. wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
  720. 1, 0);
  721. BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
  722. fc_set_wwnn(lport, wwnn);
  723. if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
  724. wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
  725. 2, 0);
  726. BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
  727. fc_set_wwpn(lport, wwpn);
  728. }
  729. return 0;
  730. }
  731. static void bnx2fc_destroy_timer(struct timer_list *t)
  732. {
  733. struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer);
  734. printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
  735. "Destroy compl not received!!\n");
  736. set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
  737. wake_up_interruptible(&hba->destroy_wait);
  738. }
  739. /**
  740. * bnx2fc_indicate_netevent - Generic netdev event handler
  741. *
  742. * @context: adapter structure pointer
  743. * @event: event type
  744. * @vlan_id: vlan id - associated vlan id with this event
  745. *
  746. * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
  747. * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans.
  748. */
  749. static void bnx2fc_indicate_netevent(void *context, unsigned long event,
  750. u16 vlan_id)
  751. {
  752. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
  753. struct fcoe_ctlr_device *cdev;
  754. struct fc_lport *lport;
  755. struct fc_lport *vport;
  756. struct bnx2fc_interface *interface, *tmp;
  757. struct fcoe_ctlr *ctlr;
  758. int wait_for_upload = 0;
  759. u32 link_possible = 1;
  760. if (vlan_id != 0 && event != NETDEV_UNREGISTER)
  761. return;
  762. switch (event) {
  763. case NETDEV_UP:
  764. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
  765. printk(KERN_ERR "indicate_netevent: "\
  766. "hba is not UP!!\n");
  767. break;
  768. case NETDEV_DOWN:
  769. clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  770. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  771. link_possible = 0;
  772. break;
  773. case NETDEV_GOING_DOWN:
  774. set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  775. link_possible = 0;
  776. break;
  777. case NETDEV_CHANGE:
  778. break;
  779. case NETDEV_UNREGISTER:
  780. if (!vlan_id)
  781. return;
  782. mutex_lock(&bnx2fc_dev_lock);
  783. list_for_each_entry_safe(interface, tmp, &if_list, list) {
  784. if (interface->hba == hba &&
  785. interface->vlan_id == (vlan_id & VLAN_VID_MASK))
  786. __bnx2fc_destroy(interface);
  787. }
  788. mutex_unlock(&bnx2fc_dev_lock);
  789. /* Ensure ALL destroy work has been completed before return */
  790. flush_workqueue(bnx2fc_wq);
  791. return;
  792. default:
  793. return;
  794. }
  795. mutex_lock(&bnx2fc_dev_lock);
  796. list_for_each_entry(interface, &if_list, list) {
  797. if (interface->hba != hba)
  798. continue;
  799. ctlr = bnx2fc_to_ctlr(interface);
  800. lport = ctlr->lp;
  801. BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
  802. interface->netdev->name, event);
  803. fcoe_link_speed_update(lport);
  804. cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
  805. if (link_possible && !bnx2fc_link_ok(lport)) {
  806. switch (cdev->enabled) {
  807. case FCOE_CTLR_DISABLED:
  808. pr_info("Link up while interface is disabled.\n");
  809. break;
  810. case FCOE_CTLR_ENABLED:
  811. case FCOE_CTLR_UNUSED:
  812. /* Reset max recv frame size to default */
  813. fc_set_mfs(lport, BNX2FC_MFS);
  814. /*
  815. * ctlr link up will only be handled during
  816. * enable to avoid sending discovery
  817. * solicitation on a stale vlan
  818. */
  819. if (interface->enabled)
  820. fcoe_ctlr_link_up(ctlr);
  821. };
  822. } else if (fcoe_ctlr_link_down(ctlr)) {
  823. switch (cdev->enabled) {
  824. case FCOE_CTLR_DISABLED:
  825. pr_info("Link down while interface is disabled.\n");
  826. break;
  827. case FCOE_CTLR_ENABLED:
  828. case FCOE_CTLR_UNUSED:
  829. mutex_lock(&lport->lp_mutex);
  830. list_for_each_entry(vport, &lport->vports, list)
  831. fc_host_port_type(vport->host) =
  832. FC_PORTTYPE_UNKNOWN;
  833. mutex_unlock(&lport->lp_mutex);
  834. fc_host_port_type(lport->host) =
  835. FC_PORTTYPE_UNKNOWN;
  836. per_cpu_ptr(lport->stats,
  837. get_cpu())->LinkFailureCount++;
  838. put_cpu();
  839. fcoe_clean_pending_queue(lport);
  840. wait_for_upload = 1;
  841. };
  842. }
  843. }
  844. mutex_unlock(&bnx2fc_dev_lock);
  845. if (wait_for_upload) {
  846. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  847. init_waitqueue_head(&hba->shutdown_wait);
  848. BNX2FC_MISC_DBG("indicate_netevent "
  849. "num_ofld_sess = %d\n",
  850. hba->num_ofld_sess);
  851. hba->wait_for_link_down = 1;
  852. wait_event_interruptible(hba->shutdown_wait,
  853. (hba->num_ofld_sess == 0));
  854. BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
  855. hba->num_ofld_sess);
  856. hba->wait_for_link_down = 0;
  857. if (signal_pending(current))
  858. flush_signals(current);
  859. }
  860. }
  861. static int bnx2fc_libfc_config(struct fc_lport *lport)
  862. {
  863. /* Set the function pointers set by bnx2fc driver */
  864. memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
  865. sizeof(struct libfc_function_template));
  866. fc_elsct_init(lport);
  867. fc_exch_init(lport);
  868. fc_disc_init(lport);
  869. fc_disc_config(lport, lport);
  870. return 0;
  871. }
  872. static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
  873. {
  874. int fcoe_min_xid, fcoe_max_xid;
  875. fcoe_min_xid = hba->max_xid + 1;
  876. if (nr_cpu_ids <= 2)
  877. fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
  878. else
  879. fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
  880. if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
  881. fcoe_max_xid, NULL)) {
  882. printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
  883. return -ENOMEM;
  884. }
  885. return 0;
  886. }
  887. static int bnx2fc_lport_config(struct fc_lport *lport)
  888. {
  889. lport->link_up = 0;
  890. lport->qfull = 0;
  891. lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
  892. lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
  893. lport->e_d_tov = 2 * 1000;
  894. lport->r_a_tov = 10 * 1000;
  895. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  896. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  897. lport->does_npiv = 1;
  898. memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
  899. lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
  900. /* alloc stats structure */
  901. if (fc_lport_init_stats(lport))
  902. return -ENOMEM;
  903. /* Finish fc_lport configuration */
  904. fc_lport_config(lport);
  905. return 0;
  906. }
  907. /**
  908. * bnx2fc_fip_recv - handle a received FIP frame.
  909. *
  910. * @skb: the received skb
  911. * @dev: associated &net_device
  912. * @ptype: the &packet_type structure which was used to register this handler.
  913. * @orig_dev: original receive &net_device, in case @ dev is a bond.
  914. *
  915. * Returns: 0 for success
  916. */
  917. static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
  918. struct packet_type *ptype,
  919. struct net_device *orig_dev)
  920. {
  921. struct bnx2fc_interface *interface;
  922. struct fcoe_ctlr *ctlr;
  923. interface = container_of(ptype, struct bnx2fc_interface,
  924. fip_packet_type);
  925. ctlr = bnx2fc_to_ctlr(interface);
  926. fcoe_ctlr_recv(ctlr, skb);
  927. return 0;
  928. }
  929. /**
  930. * bnx2fc_update_src_mac - Update Ethernet MAC filters.
  931. *
  932. * @fip: FCoE controller.
  933. * @old: Unicast MAC address to delete if the MAC is non-zero.
  934. * @new: Unicast MAC address to add.
  935. *
  936. * Remove any previously-set unicast MAC filter.
  937. * Add secondary FCoE MAC address filter for our OUI.
  938. */
  939. static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
  940. {
  941. struct fcoe_port *port = lport_priv(lport);
  942. memcpy(port->data_src_addr, addr, ETH_ALEN);
  943. }
  944. /**
  945. * bnx2fc_get_src_mac - return the ethernet source address for an lport
  946. *
  947. * @lport: libfc port
  948. */
  949. static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
  950. {
  951. struct fcoe_port *port;
  952. port = (struct fcoe_port *)lport_priv(lport);
  953. return port->data_src_addr;
  954. }
  955. /**
  956. * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
  957. *
  958. * @fip: FCoE controller.
  959. * @skb: FIP Packet.
  960. */
  961. static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  962. {
  963. struct fip_header *fiph;
  964. struct ethhdr *eth_hdr;
  965. u16 op;
  966. u8 sub;
  967. fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
  968. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  969. op = ntohs(fiph->fip_op);
  970. sub = fiph->fip_subcode;
  971. if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka)
  972. BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n",
  973. eth_hdr->h_source, eth_hdr->h_dest);
  974. skb->dev = bnx2fc_from_ctlr(fip)->netdev;
  975. dev_queue_xmit(skb);
  976. }
  977. static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
  978. {
  979. struct Scsi_Host *shost = vport_to_shost(vport);
  980. struct fc_lport *n_port = shost_priv(shost);
  981. struct fcoe_port *port = lport_priv(n_port);
  982. struct bnx2fc_interface *interface = port->priv;
  983. struct net_device *netdev = interface->netdev;
  984. struct fc_lport *vn_port;
  985. int rc;
  986. char buf[32];
  987. rc = fcoe_validate_vport_create(vport);
  988. if (rc) {
  989. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  990. printk(KERN_ERR PFX "Failed to create vport, "
  991. "WWPN (0x%s) already exists\n",
  992. buf);
  993. return rc;
  994. }
  995. if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
  996. printk(KERN_ERR PFX "vn ports cannot be created on"
  997. "this interface\n");
  998. return -EIO;
  999. }
  1000. rtnl_lock();
  1001. mutex_lock(&bnx2fc_dev_lock);
  1002. vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
  1003. mutex_unlock(&bnx2fc_dev_lock);
  1004. rtnl_unlock();
  1005. if (!vn_port) {
  1006. printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
  1007. netdev->name);
  1008. return -EIO;
  1009. }
  1010. if (bnx2fc_devloss_tmo)
  1011. fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo;
  1012. if (disabled) {
  1013. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1014. } else {
  1015. vn_port->boot_time = jiffies;
  1016. fc_lport_init(vn_port);
  1017. fc_fabric_login(vn_port);
  1018. fc_vport_setlink(vn_port);
  1019. }
  1020. return 0;
  1021. }
  1022. static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport)
  1023. {
  1024. struct bnx2fc_lport *blport, *tmp;
  1025. spin_lock_bh(&hba->hba_lock);
  1026. list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
  1027. if (blport->lport == lport) {
  1028. list_del(&blport->list);
  1029. kfree(blport);
  1030. }
  1031. }
  1032. spin_unlock_bh(&hba->hba_lock);
  1033. }
  1034. static int bnx2fc_vport_destroy(struct fc_vport *vport)
  1035. {
  1036. struct Scsi_Host *shost = vport_to_shost(vport);
  1037. struct fc_lport *n_port = shost_priv(shost);
  1038. struct fc_lport *vn_port = vport->dd_data;
  1039. struct fcoe_port *port = lport_priv(vn_port);
  1040. struct bnx2fc_interface *interface = port->priv;
  1041. struct fc_lport *v_port;
  1042. bool found = false;
  1043. mutex_lock(&n_port->lp_mutex);
  1044. list_for_each_entry(v_port, &n_port->vports, list)
  1045. if (v_port->vport == vport) {
  1046. found = true;
  1047. break;
  1048. }
  1049. if (!found) {
  1050. mutex_unlock(&n_port->lp_mutex);
  1051. return -ENOENT;
  1052. }
  1053. list_del(&vn_port->list);
  1054. mutex_unlock(&n_port->lp_mutex);
  1055. bnx2fc_free_vport(interface->hba, port->lport);
  1056. bnx2fc_port_shutdown(port->lport);
  1057. bnx2fc_interface_put(interface);
  1058. queue_work(bnx2fc_wq, &port->destroy_work);
  1059. return 0;
  1060. }
  1061. static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
  1062. {
  1063. struct fc_lport *lport = vport->dd_data;
  1064. if (disable) {
  1065. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1066. fc_fabric_logoff(lport);
  1067. } else {
  1068. lport->boot_time = jiffies;
  1069. fc_fabric_login(lport);
  1070. fc_vport_setlink(lport);
  1071. }
  1072. return 0;
  1073. }
  1074. static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
  1075. {
  1076. struct net_device *netdev = interface->netdev;
  1077. struct net_device *physdev = interface->hba->phys_dev;
  1078. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1079. struct netdev_hw_addr *ha;
  1080. int sel_san_mac = 0;
  1081. /* setup Source MAC Address */
  1082. rcu_read_lock();
  1083. for_each_dev_addr(physdev, ha) {
  1084. BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
  1085. ha->type);
  1086. printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
  1087. ha->addr[1], ha->addr[2], ha->addr[3],
  1088. ha->addr[4], ha->addr[5]);
  1089. if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
  1090. (is_valid_ether_addr(ha->addr))) {
  1091. memcpy(ctlr->ctl_src_addr, ha->addr,
  1092. ETH_ALEN);
  1093. sel_san_mac = 1;
  1094. BNX2FC_MISC_DBG("Found SAN MAC\n");
  1095. }
  1096. }
  1097. rcu_read_unlock();
  1098. if (!sel_san_mac)
  1099. return -ENODEV;
  1100. interface->fip_packet_type.func = bnx2fc_fip_recv;
  1101. interface->fip_packet_type.type = htons(ETH_P_FIP);
  1102. interface->fip_packet_type.dev = netdev;
  1103. dev_add_pack(&interface->fip_packet_type);
  1104. interface->fcoe_packet_type.func = bnx2fc_rcv;
  1105. interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  1106. interface->fcoe_packet_type.dev = netdev;
  1107. dev_add_pack(&interface->fcoe_packet_type);
  1108. return 0;
  1109. }
  1110. static int bnx2fc_attach_transport(void)
  1111. {
  1112. bnx2fc_transport_template =
  1113. fc_attach_transport(&bnx2fc_transport_function);
  1114. if (bnx2fc_transport_template == NULL) {
  1115. printk(KERN_ERR PFX "Failed to attach FC transport\n");
  1116. return -ENODEV;
  1117. }
  1118. bnx2fc_vport_xport_template =
  1119. fc_attach_transport(&bnx2fc_vport_xport_function);
  1120. if (bnx2fc_vport_xport_template == NULL) {
  1121. printk(KERN_ERR PFX
  1122. "Failed to attach FC transport for vport\n");
  1123. fc_release_transport(bnx2fc_transport_template);
  1124. bnx2fc_transport_template = NULL;
  1125. return -ENODEV;
  1126. }
  1127. return 0;
  1128. }
  1129. static void bnx2fc_release_transport(void)
  1130. {
  1131. fc_release_transport(bnx2fc_transport_template);
  1132. fc_release_transport(bnx2fc_vport_xport_template);
  1133. bnx2fc_transport_template = NULL;
  1134. bnx2fc_vport_xport_template = NULL;
  1135. }
  1136. static void bnx2fc_interface_release(struct kref *kref)
  1137. {
  1138. struct fcoe_ctlr_device *ctlr_dev;
  1139. struct bnx2fc_interface *interface;
  1140. struct fcoe_ctlr *ctlr;
  1141. struct net_device *netdev;
  1142. interface = container_of(kref, struct bnx2fc_interface, kref);
  1143. BNX2FC_MISC_DBG("Interface is being released\n");
  1144. ctlr = bnx2fc_to_ctlr(interface);
  1145. ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
  1146. netdev = interface->netdev;
  1147. /* tear-down FIP controller */
  1148. if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
  1149. fcoe_ctlr_destroy(ctlr);
  1150. fcoe_ctlr_device_delete(ctlr_dev);
  1151. dev_put(netdev);
  1152. module_put(THIS_MODULE);
  1153. }
  1154. static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
  1155. {
  1156. kref_get(&interface->kref);
  1157. }
  1158. static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
  1159. {
  1160. kref_put(&interface->kref, bnx2fc_interface_release);
  1161. }
  1162. static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
  1163. {
  1164. /* Free the command manager */
  1165. if (hba->cmd_mgr) {
  1166. bnx2fc_cmd_mgr_free(hba->cmd_mgr);
  1167. hba->cmd_mgr = NULL;
  1168. }
  1169. kfree(hba->tgt_ofld_list);
  1170. bnx2fc_unbind_pcidev(hba);
  1171. kfree(hba);
  1172. }
  1173. /**
  1174. * bnx2fc_hba_create - create a new bnx2fc hba
  1175. *
  1176. * @cnic: pointer to cnic device
  1177. *
  1178. * Creates a new FCoE hba on the given device.
  1179. *
  1180. */
  1181. static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
  1182. {
  1183. struct bnx2fc_hba *hba;
  1184. struct fcoe_capabilities *fcoe_cap;
  1185. int rc;
  1186. hba = kzalloc(sizeof(*hba), GFP_KERNEL);
  1187. if (!hba) {
  1188. printk(KERN_ERR PFX "Unable to allocate hba structure\n");
  1189. return NULL;
  1190. }
  1191. spin_lock_init(&hba->hba_lock);
  1192. mutex_init(&hba->hba_mutex);
  1193. mutex_init(&hba->hba_stats_mutex);
  1194. hba->cnic = cnic;
  1195. hba->max_tasks = cnic->max_fcoe_exchanges;
  1196. hba->elstm_xids = (hba->max_tasks / 2);
  1197. hba->max_outstanding_cmds = hba->elstm_xids;
  1198. hba->max_xid = (hba->max_tasks - 1);
  1199. rc = bnx2fc_bind_pcidev(hba);
  1200. if (rc) {
  1201. printk(KERN_ERR PFX "create_adapter: bind error\n");
  1202. goto bind_err;
  1203. }
  1204. hba->phys_dev = cnic->netdev;
  1205. hba->next_conn_id = 0;
  1206. hba->tgt_ofld_list =
  1207. kcalloc(BNX2FC_NUM_MAX_SESS, sizeof(struct bnx2fc_rport *),
  1208. GFP_KERNEL);
  1209. if (!hba->tgt_ofld_list) {
  1210. printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
  1211. goto tgtofld_err;
  1212. }
  1213. hba->num_ofld_sess = 0;
  1214. hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
  1215. if (!hba->cmd_mgr) {
  1216. printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
  1217. goto cmgr_err;
  1218. }
  1219. fcoe_cap = &hba->fcoe_cap;
  1220. fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
  1221. FCOE_IOS_PER_CONNECTION_SHIFT;
  1222. fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
  1223. FCOE_LOGINS_PER_PORT_SHIFT;
  1224. fcoe_cap->capability2 = hba->max_outstanding_cmds <<
  1225. FCOE_NUMBER_OF_EXCHANGES_SHIFT;
  1226. fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
  1227. FCOE_NPIV_WWN_PER_PORT_SHIFT;
  1228. fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
  1229. FCOE_TARGETS_SUPPORTED_SHIFT;
  1230. fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
  1231. FCOE_OUTSTANDING_COMMANDS_SHIFT;
  1232. fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
  1233. init_waitqueue_head(&hba->shutdown_wait);
  1234. init_waitqueue_head(&hba->destroy_wait);
  1235. INIT_LIST_HEAD(&hba->vports);
  1236. return hba;
  1237. cmgr_err:
  1238. kfree(hba->tgt_ofld_list);
  1239. tgtofld_err:
  1240. bnx2fc_unbind_pcidev(hba);
  1241. bind_err:
  1242. kfree(hba);
  1243. return NULL;
  1244. }
  1245. static struct bnx2fc_interface *
  1246. bnx2fc_interface_create(struct bnx2fc_hba *hba,
  1247. struct net_device *netdev,
  1248. enum fip_mode fip_mode)
  1249. {
  1250. struct fcoe_ctlr_device *ctlr_dev;
  1251. struct bnx2fc_interface *interface;
  1252. struct fcoe_ctlr *ctlr;
  1253. int size;
  1254. int rc = 0;
  1255. size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
  1256. ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
  1257. size);
  1258. if (!ctlr_dev) {
  1259. printk(KERN_ERR PFX "Unable to allocate interface structure\n");
  1260. return NULL;
  1261. }
  1262. ctlr = fcoe_ctlr_device_priv(ctlr_dev);
  1263. ctlr->cdev = ctlr_dev;
  1264. interface = fcoe_ctlr_priv(ctlr);
  1265. dev_hold(netdev);
  1266. kref_init(&interface->kref);
  1267. interface->hba = hba;
  1268. interface->netdev = netdev;
  1269. /* Initialize FIP */
  1270. fcoe_ctlr_init(ctlr, fip_mode);
  1271. ctlr->send = bnx2fc_fip_send;
  1272. ctlr->update_mac = bnx2fc_update_src_mac;
  1273. ctlr->get_src_addr = bnx2fc_get_src_mac;
  1274. set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
  1275. rc = bnx2fc_interface_setup(interface);
  1276. if (!rc)
  1277. return interface;
  1278. fcoe_ctlr_destroy(ctlr);
  1279. dev_put(netdev);
  1280. fcoe_ctlr_device_delete(ctlr_dev);
  1281. return NULL;
  1282. }
  1283. /**
  1284. * bnx2fc_if_create - Create FCoE instance on a given interface
  1285. *
  1286. * @interface: FCoE interface to create a local port on
  1287. * @parent: Device pointer to be the parent in sysfs for the SCSI host
  1288. * @npiv: Indicates if the port is vport or not
  1289. *
  1290. * Creates a fc_lport instance and a Scsi_Host instance and configure them.
  1291. *
  1292. * Returns: Allocated fc_lport or an error pointer
  1293. */
  1294. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
  1295. struct device *parent, int npiv)
  1296. {
  1297. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1298. struct fc_lport *lport, *n_port;
  1299. struct fcoe_port *port;
  1300. struct Scsi_Host *shost;
  1301. struct fc_vport *vport = dev_to_vport(parent);
  1302. struct bnx2fc_lport *blport;
  1303. struct bnx2fc_hba *hba = interface->hba;
  1304. int rc = 0;
  1305. blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
  1306. if (!blport) {
  1307. BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
  1308. return NULL;
  1309. }
  1310. /* Allocate Scsi_Host structure */
  1311. bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
  1312. if (!npiv)
  1313. lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
  1314. else
  1315. lport = libfc_vport_create(vport, sizeof(*port));
  1316. if (!lport) {
  1317. printk(KERN_ERR PFX "could not allocate scsi host structure\n");
  1318. goto free_blport;
  1319. }
  1320. shost = lport->host;
  1321. port = lport_priv(lport);
  1322. port->lport = lport;
  1323. port->priv = interface;
  1324. port->get_netdev = bnx2fc_netdev;
  1325. INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
  1326. /* Configure fcoe_port */
  1327. rc = bnx2fc_lport_config(lport);
  1328. if (rc)
  1329. goto lp_config_err;
  1330. if (npiv) {
  1331. printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
  1332. vport->node_name, vport->port_name);
  1333. fc_set_wwnn(lport, vport->node_name);
  1334. fc_set_wwpn(lport, vport->port_name);
  1335. }
  1336. /* Configure netdev and networking properties of the lport */
  1337. rc = bnx2fc_net_config(lport, interface->netdev);
  1338. if (rc) {
  1339. printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
  1340. goto lp_config_err;
  1341. }
  1342. rc = bnx2fc_shost_config(lport, parent);
  1343. if (rc) {
  1344. printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
  1345. interface->netdev->name);
  1346. goto lp_config_err;
  1347. }
  1348. /* Initialize the libfc library */
  1349. rc = bnx2fc_libfc_config(lport);
  1350. if (rc) {
  1351. printk(KERN_ERR PFX "Couldn't configure libfc\n");
  1352. goto shost_err;
  1353. }
  1354. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1355. if (bnx2fc_devloss_tmo)
  1356. fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo;
  1357. /* Allocate exchange manager */
  1358. if (!npiv)
  1359. rc = bnx2fc_em_config(lport, hba);
  1360. else {
  1361. shost = vport_to_shost(vport);
  1362. n_port = shost_priv(shost);
  1363. rc = fc_exch_mgr_list_clone(n_port, lport);
  1364. }
  1365. if (rc) {
  1366. printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
  1367. goto shost_err;
  1368. }
  1369. bnx2fc_interface_get(interface);
  1370. spin_lock_bh(&hba->hba_lock);
  1371. blport->lport = lport;
  1372. list_add_tail(&blport->list, &hba->vports);
  1373. spin_unlock_bh(&hba->hba_lock);
  1374. return lport;
  1375. shost_err:
  1376. scsi_remove_host(shost);
  1377. lp_config_err:
  1378. scsi_host_put(lport->host);
  1379. free_blport:
  1380. kfree(blport);
  1381. return NULL;
  1382. }
  1383. static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
  1384. {
  1385. /* Dont listen for Ethernet packets anymore */
  1386. __dev_remove_pack(&interface->fcoe_packet_type);
  1387. __dev_remove_pack(&interface->fip_packet_type);
  1388. synchronize_net();
  1389. }
  1390. static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
  1391. {
  1392. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1393. struct fc_lport *lport = ctlr->lp;
  1394. struct fcoe_port *port = lport_priv(lport);
  1395. struct bnx2fc_hba *hba = interface->hba;
  1396. /* Stop the transmit retry timer */
  1397. del_timer_sync(&port->timer);
  1398. /* Free existing transmit skbs */
  1399. fcoe_clean_pending_queue(lport);
  1400. bnx2fc_net_cleanup(interface);
  1401. bnx2fc_free_vport(hba, lport);
  1402. }
  1403. static void bnx2fc_if_destroy(struct fc_lport *lport)
  1404. {
  1405. /* Free queued packets for the receive thread */
  1406. bnx2fc_clean_rx_queue(lport);
  1407. /* Detach from scsi-ml */
  1408. fc_remove_host(lport->host);
  1409. scsi_remove_host(lport->host);
  1410. /*
  1411. * Note that only the physical lport will have the exchange manager.
  1412. * for vports, this function is NOP
  1413. */
  1414. fc_exch_mgr_free(lport);
  1415. /* Free memory used by statistical counters */
  1416. fc_lport_free_stats(lport);
  1417. /* Release Scsi_Host */
  1418. scsi_host_put(lport->host);
  1419. }
  1420. static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
  1421. {
  1422. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1423. struct fc_lport *lport = ctlr->lp;
  1424. struct fcoe_port *port = lport_priv(lport);
  1425. bnx2fc_interface_cleanup(interface);
  1426. bnx2fc_stop(interface);
  1427. list_del(&interface->list);
  1428. bnx2fc_interface_put(interface);
  1429. queue_work(bnx2fc_wq, &port->destroy_work);
  1430. }
  1431. /**
  1432. * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
  1433. *
  1434. * @buffer: The name of the Ethernet interface to be destroyed
  1435. * @kp: The associated kernel parameter
  1436. *
  1437. * Called from sysfs.
  1438. *
  1439. * Returns: 0 for success
  1440. */
  1441. static int bnx2fc_destroy(struct net_device *netdev)
  1442. {
  1443. struct bnx2fc_interface *interface = NULL;
  1444. struct workqueue_struct *timer_work_queue;
  1445. struct fcoe_ctlr *ctlr;
  1446. int rc = 0;
  1447. rtnl_lock();
  1448. mutex_lock(&bnx2fc_dev_lock);
  1449. interface = bnx2fc_interface_lookup(netdev);
  1450. ctlr = bnx2fc_to_ctlr(interface);
  1451. if (!interface || !ctlr->lp) {
  1452. rc = -ENODEV;
  1453. printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
  1454. goto netdev_err;
  1455. }
  1456. timer_work_queue = interface->timer_work_queue;
  1457. __bnx2fc_destroy(interface);
  1458. destroy_workqueue(timer_work_queue);
  1459. netdev_err:
  1460. mutex_unlock(&bnx2fc_dev_lock);
  1461. rtnl_unlock();
  1462. return rc;
  1463. }
  1464. static void bnx2fc_destroy_work(struct work_struct *work)
  1465. {
  1466. struct fcoe_port *port;
  1467. struct fc_lport *lport;
  1468. port = container_of(work, struct fcoe_port, destroy_work);
  1469. lport = port->lport;
  1470. BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
  1471. bnx2fc_if_destroy(lport);
  1472. }
  1473. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
  1474. {
  1475. bnx2fc_free_fw_resc(hba);
  1476. bnx2fc_free_task_ctx(hba);
  1477. }
  1478. /**
  1479. * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
  1480. * pci structure
  1481. *
  1482. * @hba: Adapter instance
  1483. */
  1484. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
  1485. {
  1486. if (bnx2fc_setup_task_ctx(hba))
  1487. goto mem_err;
  1488. if (bnx2fc_setup_fw_resc(hba))
  1489. goto mem_err;
  1490. return 0;
  1491. mem_err:
  1492. bnx2fc_unbind_adapter_devices(hba);
  1493. return -ENOMEM;
  1494. }
  1495. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
  1496. {
  1497. struct cnic_dev *cnic;
  1498. struct pci_dev *pdev;
  1499. if (!hba->cnic) {
  1500. printk(KERN_ERR PFX "cnic is NULL\n");
  1501. return -ENODEV;
  1502. }
  1503. cnic = hba->cnic;
  1504. pdev = hba->pcidev = cnic->pcidev;
  1505. if (!hba->pcidev)
  1506. return -ENODEV;
  1507. switch (pdev->device) {
  1508. case PCI_DEVICE_ID_NX2_57710:
  1509. strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
  1510. break;
  1511. case PCI_DEVICE_ID_NX2_57711:
  1512. strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
  1513. break;
  1514. case PCI_DEVICE_ID_NX2_57712:
  1515. case PCI_DEVICE_ID_NX2_57712_MF:
  1516. case PCI_DEVICE_ID_NX2_57712_VF:
  1517. strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
  1518. break;
  1519. case PCI_DEVICE_ID_NX2_57800:
  1520. case PCI_DEVICE_ID_NX2_57800_MF:
  1521. case PCI_DEVICE_ID_NX2_57800_VF:
  1522. strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
  1523. break;
  1524. case PCI_DEVICE_ID_NX2_57810:
  1525. case PCI_DEVICE_ID_NX2_57810_MF:
  1526. case PCI_DEVICE_ID_NX2_57810_VF:
  1527. strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
  1528. break;
  1529. case PCI_DEVICE_ID_NX2_57840:
  1530. case PCI_DEVICE_ID_NX2_57840_MF:
  1531. case PCI_DEVICE_ID_NX2_57840_VF:
  1532. case PCI_DEVICE_ID_NX2_57840_2_20:
  1533. case PCI_DEVICE_ID_NX2_57840_4_10:
  1534. strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
  1535. break;
  1536. default:
  1537. pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
  1538. break;
  1539. }
  1540. pci_dev_get(hba->pcidev);
  1541. return 0;
  1542. }
  1543. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
  1544. {
  1545. if (hba->pcidev) {
  1546. hba->chip_num[0] = '\0';
  1547. pci_dev_put(hba->pcidev);
  1548. }
  1549. hba->pcidev = NULL;
  1550. }
  1551. /**
  1552. * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
  1553. *
  1554. * @handle: transport handle pointing to adapter struture
  1555. */
  1556. static int bnx2fc_ulp_get_stats(void *handle)
  1557. {
  1558. struct bnx2fc_hba *hba = handle;
  1559. struct cnic_dev *cnic;
  1560. struct fcoe_stats_info *stats_addr;
  1561. if (!hba)
  1562. return -EINVAL;
  1563. cnic = hba->cnic;
  1564. stats_addr = &cnic->stats_addr->fcoe_stat;
  1565. if (!stats_addr)
  1566. return -EINVAL;
  1567. strncpy(stats_addr->version, BNX2FC_VERSION,
  1568. sizeof(stats_addr->version));
  1569. stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
  1570. stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
  1571. return 0;
  1572. }
  1573. /**
  1574. * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
  1575. *
  1576. * @handle: transport handle pointing to adapter structure
  1577. *
  1578. * This function maps adapter structure to pcidev structure and initiates
  1579. * firmware handshake to enable/initialize on-chip FCoE components.
  1580. * This bnx2fc - cnic interface api callback is used after following
  1581. * conditions are met -
  1582. * a) underlying network interface is up (marked by event NETDEV_UP
  1583. * from netdev
  1584. * b) bnx2fc adatper structure is registered.
  1585. */
  1586. static void bnx2fc_ulp_start(void *handle)
  1587. {
  1588. struct bnx2fc_hba *hba = handle;
  1589. struct bnx2fc_interface *interface;
  1590. struct fcoe_ctlr *ctlr;
  1591. struct fc_lport *lport;
  1592. mutex_lock(&bnx2fc_dev_lock);
  1593. if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
  1594. bnx2fc_fw_init(hba);
  1595. BNX2FC_MISC_DBG("bnx2fc started.\n");
  1596. list_for_each_entry(interface, &if_list, list) {
  1597. if (interface->hba == hba) {
  1598. ctlr = bnx2fc_to_ctlr(interface);
  1599. lport = ctlr->lp;
  1600. /* Kick off Fabric discovery*/
  1601. printk(KERN_ERR PFX "ulp_init: start discovery\n");
  1602. lport->tt.frame_send = bnx2fc_xmit;
  1603. bnx2fc_start_disc(interface);
  1604. }
  1605. }
  1606. mutex_unlock(&bnx2fc_dev_lock);
  1607. }
  1608. static void bnx2fc_port_shutdown(struct fc_lport *lport)
  1609. {
  1610. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1611. fc_fabric_logoff(lport);
  1612. fc_lport_destroy(lport);
  1613. }
  1614. static void bnx2fc_stop(struct bnx2fc_interface *interface)
  1615. {
  1616. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1617. struct fc_lport *lport;
  1618. struct fc_lport *vport;
  1619. if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
  1620. return;
  1621. lport = ctlr->lp;
  1622. bnx2fc_port_shutdown(lport);
  1623. mutex_lock(&lport->lp_mutex);
  1624. list_for_each_entry(vport, &lport->vports, list)
  1625. fc_host_port_type(vport->host) =
  1626. FC_PORTTYPE_UNKNOWN;
  1627. mutex_unlock(&lport->lp_mutex);
  1628. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1629. fcoe_ctlr_link_down(ctlr);
  1630. fcoe_clean_pending_queue(lport);
  1631. }
  1632. static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
  1633. {
  1634. #define BNX2FC_INIT_POLL_TIME (1000 / HZ)
  1635. int rc = -1;
  1636. int i = HZ;
  1637. rc = bnx2fc_bind_adapter_devices(hba);
  1638. if (rc) {
  1639. printk(KERN_ALERT PFX
  1640. "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
  1641. goto err_out;
  1642. }
  1643. rc = bnx2fc_send_fw_fcoe_init_msg(hba);
  1644. if (rc) {
  1645. printk(KERN_ALERT PFX
  1646. "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
  1647. goto err_unbind;
  1648. }
  1649. /*
  1650. * Wait until the adapter init message is complete, and adapter
  1651. * state is UP.
  1652. */
  1653. while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
  1654. msleep(BNX2FC_INIT_POLL_TIME);
  1655. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
  1656. printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
  1657. "Ignoring...\n",
  1658. hba->cnic->netdev->name);
  1659. rc = -1;
  1660. goto err_unbind;
  1661. }
  1662. set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
  1663. return 0;
  1664. err_unbind:
  1665. bnx2fc_unbind_adapter_devices(hba);
  1666. err_out:
  1667. return rc;
  1668. }
  1669. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
  1670. {
  1671. if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
  1672. if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
  1673. timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer,
  1674. 0);
  1675. hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
  1676. jiffies;
  1677. add_timer(&hba->destroy_timer);
  1678. wait_event_interruptible(hba->destroy_wait,
  1679. test_bit(BNX2FC_FLAG_DESTROY_CMPL,
  1680. &hba->flags));
  1681. clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
  1682. /* This should never happen */
  1683. if (signal_pending(current))
  1684. flush_signals(current);
  1685. del_timer_sync(&hba->destroy_timer);
  1686. }
  1687. bnx2fc_unbind_adapter_devices(hba);
  1688. }
  1689. }
  1690. /**
  1691. * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
  1692. *
  1693. * @handle: transport handle pointing to adapter structure
  1694. *
  1695. * Driver checks if adapter is already in shutdown mode, if not start
  1696. * the shutdown process.
  1697. */
  1698. static void bnx2fc_ulp_stop(void *handle)
  1699. {
  1700. struct bnx2fc_hba *hba = handle;
  1701. struct bnx2fc_interface *interface;
  1702. printk(KERN_ERR "ULP_STOP\n");
  1703. mutex_lock(&bnx2fc_dev_lock);
  1704. if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
  1705. goto exit;
  1706. list_for_each_entry(interface, &if_list, list) {
  1707. if (interface->hba == hba)
  1708. bnx2fc_stop(interface);
  1709. }
  1710. BUG_ON(hba->num_ofld_sess != 0);
  1711. mutex_lock(&hba->hba_mutex);
  1712. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  1713. clear_bit(ADAPTER_STATE_GOING_DOWN,
  1714. &hba->adapter_state);
  1715. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  1716. mutex_unlock(&hba->hba_mutex);
  1717. bnx2fc_fw_destroy(hba);
  1718. exit:
  1719. mutex_unlock(&bnx2fc_dev_lock);
  1720. }
  1721. static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
  1722. {
  1723. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  1724. struct fc_lport *lport;
  1725. int wait_cnt = 0;
  1726. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1727. /* Kick off FIP/FLOGI */
  1728. if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
  1729. printk(KERN_ERR PFX "Init not done yet\n");
  1730. return;
  1731. }
  1732. lport = ctlr->lp;
  1733. BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
  1734. if (!bnx2fc_link_ok(lport) && interface->enabled) {
  1735. BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
  1736. fcoe_ctlr_link_up(ctlr);
  1737. fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
  1738. set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
  1739. }
  1740. /* wait for the FCF to be selected before issuing FLOGI */
  1741. while (!ctlr->sel_fcf) {
  1742. msleep(250);
  1743. /* give up after 3 secs */
  1744. if (++wait_cnt > 12)
  1745. break;
  1746. }
  1747. /* Reset max receive frame size to default */
  1748. if (fc_set_mfs(lport, BNX2FC_MFS))
  1749. return;
  1750. fc_lport_init(lport);
  1751. fc_fabric_login(lport);
  1752. }
  1753. /**
  1754. * bnx2fc_ulp_init - Initialize an adapter instance
  1755. *
  1756. * @dev : cnic device handle
  1757. * Called from cnic_register_driver() context to initialize all
  1758. * enumerated cnic devices. This routine allocates adapter structure
  1759. * and other device specific resources.
  1760. */
  1761. static void bnx2fc_ulp_init(struct cnic_dev *dev)
  1762. {
  1763. struct bnx2fc_hba *hba;
  1764. int rc = 0;
  1765. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1766. /* bnx2fc works only when bnx2x is loaded */
  1767. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
  1768. (dev->max_fcoe_conn == 0)) {
  1769. printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
  1770. " flags: %lx fcoe_conn: %d\n",
  1771. dev->netdev->name, dev->flags, dev->max_fcoe_conn);
  1772. return;
  1773. }
  1774. hba = bnx2fc_hba_create(dev);
  1775. if (!hba) {
  1776. printk(KERN_ERR PFX "hba initialization failed\n");
  1777. return;
  1778. }
  1779. pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name);
  1780. /* Add HBA to the adapter list */
  1781. mutex_lock(&bnx2fc_dev_lock);
  1782. list_add_tail(&hba->list, &adapter_list);
  1783. adapter_count++;
  1784. mutex_unlock(&bnx2fc_dev_lock);
  1785. dev->fcoe_cap = &hba->fcoe_cap;
  1786. clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1787. rc = dev->register_device(dev, CNIC_ULP_FCOE,
  1788. (void *) hba);
  1789. if (rc)
  1790. printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
  1791. else
  1792. set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1793. }
  1794. /* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
  1795. static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
  1796. {
  1797. struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
  1798. if (interface->enabled == true) {
  1799. if (!ctlr->lp) {
  1800. pr_err(PFX "__bnx2fc_disable: lport not found\n");
  1801. return -ENODEV;
  1802. } else {
  1803. interface->enabled = false;
  1804. fcoe_ctlr_link_down(ctlr);
  1805. fcoe_clean_pending_queue(ctlr->lp);
  1806. }
  1807. }
  1808. return 0;
  1809. }
  1810. /**
  1811. * Deperecated: Use bnx2fc_enabled()
  1812. */
  1813. static int bnx2fc_disable(struct net_device *netdev)
  1814. {
  1815. struct bnx2fc_interface *interface;
  1816. struct fcoe_ctlr *ctlr;
  1817. int rc = 0;
  1818. rtnl_lock();
  1819. mutex_lock(&bnx2fc_dev_lock);
  1820. interface = bnx2fc_interface_lookup(netdev);
  1821. ctlr = bnx2fc_to_ctlr(interface);
  1822. if (!interface) {
  1823. rc = -ENODEV;
  1824. pr_err(PFX "bnx2fc_disable: interface not found\n");
  1825. } else {
  1826. rc = __bnx2fc_disable(ctlr);
  1827. }
  1828. mutex_unlock(&bnx2fc_dev_lock);
  1829. rtnl_unlock();
  1830. return rc;
  1831. }
  1832. static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
  1833. struct cnic_fc_npiv_tbl *npiv_tbl)
  1834. {
  1835. struct fc_vport_identifiers vpid;
  1836. uint i, created = 0;
  1837. u64 wwnn = 0;
  1838. char wwpn_str[32];
  1839. char wwnn_str[32];
  1840. if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
  1841. BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
  1842. goto done;
  1843. }
  1844. /* Sanity check the first entry to make sure it's not 0 */
  1845. if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
  1846. wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
  1847. BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
  1848. goto done;
  1849. }
  1850. vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
  1851. vpid.vport_type = FC_PORTTYPE_NPIV;
  1852. vpid.disable = false;
  1853. for (i = 0; i < npiv_tbl->count; i++) {
  1854. wwnn = wwn_to_u64(npiv_tbl->wwnn[i]);
  1855. if (wwnn == 0) {
  1856. /*
  1857. * If we get a 0 element from for the WWNN then assume
  1858. * the WWNN should be the same as the physical port.
  1859. */
  1860. wwnn = lport->wwnn;
  1861. }
  1862. vpid.node_name = wwnn;
  1863. vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
  1864. scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
  1865. "NPIV[%u]:%016llx-%016llx",
  1866. created, vpid.port_name, vpid.node_name);
  1867. fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str));
  1868. fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str));
  1869. BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str,
  1870. wwpn_str);
  1871. if (fc_vport_create(lport->host, 0, &vpid))
  1872. created++;
  1873. else
  1874. BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
  1875. }
  1876. done:
  1877. return created;
  1878. }
  1879. static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
  1880. {
  1881. struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
  1882. struct bnx2fc_hba *hba;
  1883. struct cnic_fc_npiv_tbl *npiv_tbl;
  1884. struct fc_lport *lport;
  1885. if (interface->enabled == false) {
  1886. if (!ctlr->lp) {
  1887. pr_err(PFX "__bnx2fc_enable: lport not found\n");
  1888. return -ENODEV;
  1889. } else if (!bnx2fc_link_ok(ctlr->lp)) {
  1890. fcoe_ctlr_link_up(ctlr);
  1891. interface->enabled = true;
  1892. }
  1893. }
  1894. /* Create static NPIV ports if any are contained in NVRAM */
  1895. hba = interface->hba;
  1896. lport = ctlr->lp;
  1897. if (!hba)
  1898. goto done;
  1899. if (!hba->cnic)
  1900. goto done;
  1901. if (!lport)
  1902. goto done;
  1903. if (!lport->host)
  1904. goto done;
  1905. if (!hba->cnic->get_fc_npiv_tbl)
  1906. goto done;
  1907. npiv_tbl = kzalloc(sizeof(struct cnic_fc_npiv_tbl), GFP_KERNEL);
  1908. if (!npiv_tbl)
  1909. goto done;
  1910. if (hba->cnic->get_fc_npiv_tbl(hba->cnic, npiv_tbl))
  1911. goto done_free;
  1912. bnx2fc_npiv_create_vports(lport, npiv_tbl);
  1913. done_free:
  1914. kfree(npiv_tbl);
  1915. done:
  1916. return 0;
  1917. }
  1918. /**
  1919. * Deprecated: Use bnx2fc_enabled()
  1920. */
  1921. static int bnx2fc_enable(struct net_device *netdev)
  1922. {
  1923. struct bnx2fc_interface *interface;
  1924. struct fcoe_ctlr *ctlr;
  1925. int rc = 0;
  1926. rtnl_lock();
  1927. mutex_lock(&bnx2fc_dev_lock);
  1928. interface = bnx2fc_interface_lookup(netdev);
  1929. ctlr = bnx2fc_to_ctlr(interface);
  1930. if (!interface) {
  1931. rc = -ENODEV;
  1932. pr_err(PFX "bnx2fc_enable: interface not found\n");
  1933. } else {
  1934. rc = __bnx2fc_enable(ctlr);
  1935. }
  1936. mutex_unlock(&bnx2fc_dev_lock);
  1937. rtnl_unlock();
  1938. return rc;
  1939. }
  1940. /**
  1941. * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
  1942. * @cdev: The FCoE Controller that is being enabled or disabled
  1943. *
  1944. * fcoe_sysfs will ensure that the state of 'enabled' has
  1945. * changed, so no checking is necessary here. This routine simply
  1946. * calls fcoe_enable or fcoe_disable, both of which are deprecated.
  1947. * When those routines are removed the functionality can be merged
  1948. * here.
  1949. */
  1950. static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
  1951. {
  1952. struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
  1953. switch (cdev->enabled) {
  1954. case FCOE_CTLR_ENABLED:
  1955. return __bnx2fc_enable(ctlr);
  1956. case FCOE_CTLR_DISABLED:
  1957. return __bnx2fc_disable(ctlr);
  1958. case FCOE_CTLR_UNUSED:
  1959. default:
  1960. return -ENOTSUPP;
  1961. };
  1962. }
  1963. enum bnx2fc_create_link_state {
  1964. BNX2FC_CREATE_LINK_DOWN,
  1965. BNX2FC_CREATE_LINK_UP,
  1966. };
  1967. /**
  1968. * _bnx2fc_create() - Create bnx2fc FCoE interface
  1969. * @netdev : The net_device object the Ethernet interface to create on
  1970. * @fip_mode: The FIP mode for this creation
  1971. * @link_state: The ctlr link state on creation
  1972. *
  1973. * Called from either the libfcoe 'create' module parameter
  1974. * via fcoe_create or from fcoe_syfs's ctlr_create file.
  1975. *
  1976. * libfcoe's 'create' module parameter is deprecated so some
  1977. * consolidation of code can be done when that interface is
  1978. * removed.
  1979. *
  1980. * Returns: 0 for success
  1981. */
  1982. static int _bnx2fc_create(struct net_device *netdev,
  1983. enum fip_mode fip_mode,
  1984. enum bnx2fc_create_link_state link_state)
  1985. {
  1986. struct fcoe_ctlr_device *cdev;
  1987. struct fcoe_ctlr *ctlr;
  1988. struct bnx2fc_interface *interface;
  1989. struct bnx2fc_hba *hba;
  1990. struct net_device *phys_dev = netdev;
  1991. struct fc_lport *lport;
  1992. struct ethtool_drvinfo drvinfo;
  1993. int rc = 0;
  1994. int vlan_id = 0;
  1995. BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
  1996. if (fip_mode != FIP_MODE_FABRIC) {
  1997. printk(KERN_ERR "fip mode not FABRIC\n");
  1998. return -EIO;
  1999. }
  2000. rtnl_lock();
  2001. mutex_lock(&bnx2fc_dev_lock);
  2002. if (!try_module_get(THIS_MODULE)) {
  2003. rc = -EINVAL;
  2004. goto mod_err;
  2005. }
  2006. /* obtain physical netdev */
  2007. if (is_vlan_dev(netdev))
  2008. phys_dev = vlan_dev_real_dev(netdev);
  2009. /* verify if the physical device is a netxtreme2 device */
  2010. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  2011. memset(&drvinfo, 0, sizeof(drvinfo));
  2012. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  2013. if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
  2014. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  2015. rc = -EINVAL;
  2016. goto netdev_err;
  2017. }
  2018. } else {
  2019. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  2020. rc = -EINVAL;
  2021. goto netdev_err;
  2022. }
  2023. /* obtain interface and initialize rest of the structure */
  2024. hba = bnx2fc_hba_lookup(phys_dev);
  2025. if (!hba) {
  2026. rc = -ENODEV;
  2027. printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
  2028. goto netdev_err;
  2029. }
  2030. if (bnx2fc_interface_lookup(netdev)) {
  2031. rc = -EEXIST;
  2032. goto netdev_err;
  2033. }
  2034. interface = bnx2fc_interface_create(hba, netdev, fip_mode);
  2035. if (!interface) {
  2036. printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
  2037. rc = -ENOMEM;
  2038. goto netdev_err;
  2039. }
  2040. if (is_vlan_dev(netdev)) {
  2041. vlan_id = vlan_dev_vlan_id(netdev);
  2042. interface->vlan_enabled = 1;
  2043. }
  2044. ctlr = bnx2fc_to_ctlr(interface);
  2045. cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
  2046. interface->vlan_id = vlan_id;
  2047. interface->tm_timeout = BNX2FC_TM_TIMEOUT;
  2048. interface->timer_work_queue =
  2049. create_singlethread_workqueue("bnx2fc_timer_wq");
  2050. if (!interface->timer_work_queue) {
  2051. printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
  2052. rc = -EINVAL;
  2053. goto ifput_err;
  2054. }
  2055. lport = bnx2fc_if_create(interface, &cdev->dev, 0);
  2056. if (!lport) {
  2057. printk(KERN_ERR PFX "Failed to create interface (%s)\n",
  2058. netdev->name);
  2059. rc = -EINVAL;
  2060. goto if_create_err;
  2061. }
  2062. /* Add interface to if_list */
  2063. list_add_tail(&interface->list, &if_list);
  2064. lport->boot_time = jiffies;
  2065. /* Make this master N_port */
  2066. ctlr->lp = lport;
  2067. if (link_state == BNX2FC_CREATE_LINK_UP)
  2068. cdev->enabled = FCOE_CTLR_ENABLED;
  2069. else
  2070. cdev->enabled = FCOE_CTLR_DISABLED;
  2071. if (link_state == BNX2FC_CREATE_LINK_UP &&
  2072. !bnx2fc_link_ok(lport)) {
  2073. fcoe_ctlr_link_up(ctlr);
  2074. fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
  2075. set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
  2076. }
  2077. BNX2FC_HBA_DBG(lport, "create: START DISC\n");
  2078. bnx2fc_start_disc(interface);
  2079. if (link_state == BNX2FC_CREATE_LINK_UP)
  2080. interface->enabled = true;
  2081. /*
  2082. * Release from kref_init in bnx2fc_interface_setup, on success
  2083. * lport should be holding a reference taken in bnx2fc_if_create
  2084. */
  2085. bnx2fc_interface_put(interface);
  2086. /* put netdev that was held while calling dev_get_by_name */
  2087. mutex_unlock(&bnx2fc_dev_lock);
  2088. rtnl_unlock();
  2089. return 0;
  2090. if_create_err:
  2091. destroy_workqueue(interface->timer_work_queue);
  2092. ifput_err:
  2093. bnx2fc_net_cleanup(interface);
  2094. bnx2fc_interface_put(interface);
  2095. goto mod_err;
  2096. netdev_err:
  2097. module_put(THIS_MODULE);
  2098. mod_err:
  2099. mutex_unlock(&bnx2fc_dev_lock);
  2100. rtnl_unlock();
  2101. return rc;
  2102. }
  2103. /**
  2104. * bnx2fc_create() - Create a bnx2fc interface
  2105. * @netdev : The net_device object the Ethernet interface to create on
  2106. * @fip_mode: The FIP mode for this creation
  2107. *
  2108. * Called from fcoe transport
  2109. *
  2110. * Returns: 0 for success
  2111. */
  2112. static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode)
  2113. {
  2114. return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
  2115. }
  2116. /**
  2117. * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
  2118. * @netdev: The net_device to be used by the allocated FCoE Controller
  2119. *
  2120. * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
  2121. * in a link_down state. The allows the user an opportunity to configure
  2122. * the FCoE Controller from sysfs before enabling the FCoE Controller.
  2123. *
  2124. * Creating in with this routine starts the FCoE Controller in Fabric
  2125. * mode. The user can change to VN2VN or another mode before enabling.
  2126. */
  2127. static int bnx2fc_ctlr_alloc(struct net_device *netdev)
  2128. {
  2129. return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
  2130. BNX2FC_CREATE_LINK_DOWN);
  2131. }
  2132. /**
  2133. * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
  2134. *
  2135. * @cnic: Pointer to cnic device instance
  2136. *
  2137. **/
  2138. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
  2139. {
  2140. struct bnx2fc_hba *hba;
  2141. /* Called with bnx2fc_dev_lock held */
  2142. list_for_each_entry(hba, &adapter_list, list) {
  2143. if (hba->cnic == cnic)
  2144. return hba;
  2145. }
  2146. return NULL;
  2147. }
  2148. static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
  2149. *netdev)
  2150. {
  2151. struct bnx2fc_interface *interface;
  2152. /* Called with bnx2fc_dev_lock held */
  2153. list_for_each_entry(interface, &if_list, list) {
  2154. if (interface->netdev == netdev)
  2155. return interface;
  2156. }
  2157. return NULL;
  2158. }
  2159. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
  2160. *phys_dev)
  2161. {
  2162. struct bnx2fc_hba *hba;
  2163. /* Called with bnx2fc_dev_lock held */
  2164. list_for_each_entry(hba, &adapter_list, list) {
  2165. if (hba->phys_dev == phys_dev)
  2166. return hba;
  2167. }
  2168. printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
  2169. return NULL;
  2170. }
  2171. /**
  2172. * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
  2173. *
  2174. * @dev cnic device handle
  2175. */
  2176. static void bnx2fc_ulp_exit(struct cnic_dev *dev)
  2177. {
  2178. struct bnx2fc_hba *hba;
  2179. struct bnx2fc_interface *interface, *tmp;
  2180. BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
  2181. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
  2182. printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
  2183. dev->netdev->name, dev->flags);
  2184. return;
  2185. }
  2186. mutex_lock(&bnx2fc_dev_lock);
  2187. hba = bnx2fc_find_hba_for_cnic(dev);
  2188. if (!hba) {
  2189. printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
  2190. dev);
  2191. mutex_unlock(&bnx2fc_dev_lock);
  2192. return;
  2193. }
  2194. list_del_init(&hba->list);
  2195. adapter_count--;
  2196. list_for_each_entry_safe(interface, tmp, &if_list, list)
  2197. /* destroy not called yet, move to quiesced list */
  2198. if (interface->hba == hba)
  2199. __bnx2fc_destroy(interface);
  2200. mutex_unlock(&bnx2fc_dev_lock);
  2201. /* Ensure ALL destroy work has been completed before return */
  2202. flush_workqueue(bnx2fc_wq);
  2203. bnx2fc_ulp_stop(hba);
  2204. /* unregister cnic device */
  2205. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
  2206. hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
  2207. bnx2fc_hba_destroy(hba);
  2208. }
  2209. static void bnx2fc_rport_terminate_io(struct fc_rport *rport)
  2210. {
  2211. /* This is a no-op */
  2212. }
  2213. /**
  2214. * bnx2fc_fcoe_reset - Resets the fcoe
  2215. *
  2216. * @shost: shost the reset is from
  2217. *
  2218. * Returns: always 0
  2219. */
  2220. static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
  2221. {
  2222. struct fc_lport *lport = shost_priv(shost);
  2223. fc_lport_reset(lport);
  2224. return 0;
  2225. }
  2226. static bool bnx2fc_match(struct net_device *netdev)
  2227. {
  2228. struct net_device *phys_dev = netdev;
  2229. mutex_lock(&bnx2fc_dev_lock);
  2230. if (is_vlan_dev(netdev))
  2231. phys_dev = vlan_dev_real_dev(netdev);
  2232. if (bnx2fc_hba_lookup(phys_dev)) {
  2233. mutex_unlock(&bnx2fc_dev_lock);
  2234. return true;
  2235. }
  2236. mutex_unlock(&bnx2fc_dev_lock);
  2237. return false;
  2238. }
  2239. static struct fcoe_transport bnx2fc_transport = {
  2240. .name = {"bnx2fc"},
  2241. .attached = false,
  2242. .list = LIST_HEAD_INIT(bnx2fc_transport.list),
  2243. .alloc = bnx2fc_ctlr_alloc,
  2244. .match = bnx2fc_match,
  2245. .create = bnx2fc_create,
  2246. .destroy = bnx2fc_destroy,
  2247. .enable = bnx2fc_enable,
  2248. .disable = bnx2fc_disable,
  2249. };
  2250. /**
  2251. * bnx2fc_cpu_online - Create a receive thread for an online CPU
  2252. *
  2253. * @cpu: cpu index for the online cpu
  2254. */
  2255. static int bnx2fc_cpu_online(unsigned int cpu)
  2256. {
  2257. struct bnx2fc_percpu_s *p;
  2258. struct task_struct *thread;
  2259. p = &per_cpu(bnx2fc_percpu, cpu);
  2260. thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
  2261. (void *)p, cpu_to_node(cpu),
  2262. "bnx2fc_thread/%d", cpu);
  2263. if (IS_ERR(thread))
  2264. return PTR_ERR(thread);
  2265. /* bind thread to the cpu */
  2266. kthread_bind(thread, cpu);
  2267. p->iothread = thread;
  2268. wake_up_process(thread);
  2269. return 0;
  2270. }
  2271. static int bnx2fc_cpu_offline(unsigned int cpu)
  2272. {
  2273. struct bnx2fc_percpu_s *p;
  2274. struct task_struct *thread;
  2275. struct bnx2fc_work *work, *tmp;
  2276. BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
  2277. /* Prevent any new work from being queued for this CPU */
  2278. p = &per_cpu(bnx2fc_percpu, cpu);
  2279. spin_lock_bh(&p->fp_work_lock);
  2280. thread = p->iothread;
  2281. p->iothread = NULL;
  2282. /* Free all work in the list */
  2283. list_for_each_entry_safe(work, tmp, &p->work_list, list) {
  2284. list_del_init(&work->list);
  2285. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  2286. kfree(work);
  2287. }
  2288. spin_unlock_bh(&p->fp_work_lock);
  2289. if (thread)
  2290. kthread_stop(thread);
  2291. return 0;
  2292. }
  2293. static int bnx2fc_slave_configure(struct scsi_device *sdev)
  2294. {
  2295. if (!bnx2fc_queue_depth)
  2296. return 0;
  2297. scsi_change_queue_depth(sdev, bnx2fc_queue_depth);
  2298. return 0;
  2299. }
  2300. static enum cpuhp_state bnx2fc_online_state;
  2301. /**
  2302. * bnx2fc_mod_init - module init entry point
  2303. *
  2304. * Initialize driver wide global data structures, and register
  2305. * with cnic module
  2306. **/
  2307. static int __init bnx2fc_mod_init(void)
  2308. {
  2309. struct fcoe_percpu_s *bg;
  2310. struct task_struct *l2_thread;
  2311. int rc = 0;
  2312. unsigned int cpu = 0;
  2313. struct bnx2fc_percpu_s *p;
  2314. printk(KERN_INFO PFX "%s", version);
  2315. /* register as a fcoe transport */
  2316. rc = fcoe_transport_attach(&bnx2fc_transport);
  2317. if (rc) {
  2318. printk(KERN_ERR "failed to register an fcoe transport, check "
  2319. "if libfcoe is loaded\n");
  2320. goto out;
  2321. }
  2322. INIT_LIST_HEAD(&adapter_list);
  2323. INIT_LIST_HEAD(&if_list);
  2324. mutex_init(&bnx2fc_dev_lock);
  2325. adapter_count = 0;
  2326. /* Attach FC transport template */
  2327. rc = bnx2fc_attach_transport();
  2328. if (rc)
  2329. goto detach_ft;
  2330. bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
  2331. if (!bnx2fc_wq) {
  2332. rc = -ENOMEM;
  2333. goto release_bt;
  2334. }
  2335. bg = &bnx2fc_global;
  2336. skb_queue_head_init(&bg->fcoe_rx_list);
  2337. l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
  2338. (void *)bg,
  2339. "bnx2fc_l2_thread");
  2340. if (IS_ERR(l2_thread)) {
  2341. rc = PTR_ERR(l2_thread);
  2342. goto free_wq;
  2343. }
  2344. wake_up_process(l2_thread);
  2345. spin_lock_bh(&bg->fcoe_rx_list.lock);
  2346. bg->kthread = l2_thread;
  2347. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  2348. for_each_possible_cpu(cpu) {
  2349. p = &per_cpu(bnx2fc_percpu, cpu);
  2350. INIT_LIST_HEAD(&p->work_list);
  2351. spin_lock_init(&p->fp_work_lock);
  2352. }
  2353. rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
  2354. bnx2fc_cpu_online, bnx2fc_cpu_offline);
  2355. if (rc < 0)
  2356. goto stop_thread;
  2357. bnx2fc_online_state = rc;
  2358. cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
  2359. return 0;
  2360. stop_thread:
  2361. kthread_stop(l2_thread);
  2362. free_wq:
  2363. destroy_workqueue(bnx2fc_wq);
  2364. release_bt:
  2365. bnx2fc_release_transport();
  2366. detach_ft:
  2367. fcoe_transport_detach(&bnx2fc_transport);
  2368. out:
  2369. return rc;
  2370. }
  2371. static void __exit bnx2fc_mod_exit(void)
  2372. {
  2373. LIST_HEAD(to_be_deleted);
  2374. struct bnx2fc_hba *hba, *next;
  2375. struct fcoe_percpu_s *bg;
  2376. struct task_struct *l2_thread;
  2377. struct sk_buff *skb;
  2378. /*
  2379. * NOTE: Since cnic calls register_driver routine rtnl_lock,
  2380. * it will have higher precedence than bnx2fc_dev_lock.
  2381. * unregister_device() cannot be called with bnx2fc_dev_lock
  2382. * held.
  2383. */
  2384. mutex_lock(&bnx2fc_dev_lock);
  2385. list_splice_init(&adapter_list, &to_be_deleted);
  2386. adapter_count = 0;
  2387. mutex_unlock(&bnx2fc_dev_lock);
  2388. /* Unregister with cnic */
  2389. list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
  2390. list_del_init(&hba->list);
  2391. printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
  2392. hba);
  2393. bnx2fc_ulp_stop(hba);
  2394. /* unregister cnic device */
  2395. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
  2396. &hba->reg_with_cnic))
  2397. hba->cnic->unregister_device(hba->cnic,
  2398. CNIC_ULP_FCOE);
  2399. bnx2fc_hba_destroy(hba);
  2400. }
  2401. cnic_unregister_driver(CNIC_ULP_FCOE);
  2402. /* Destroy global thread */
  2403. bg = &bnx2fc_global;
  2404. spin_lock_bh(&bg->fcoe_rx_list.lock);
  2405. l2_thread = bg->kthread;
  2406. bg->kthread = NULL;
  2407. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
  2408. kfree_skb(skb);
  2409. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  2410. if (l2_thread)
  2411. kthread_stop(l2_thread);
  2412. cpuhp_remove_state(bnx2fc_online_state);
  2413. destroy_workqueue(bnx2fc_wq);
  2414. /*
  2415. * detach from scsi transport
  2416. * must happen after all destroys are done
  2417. */
  2418. bnx2fc_release_transport();
  2419. /* detach from fcoe transport */
  2420. fcoe_transport_detach(&bnx2fc_transport);
  2421. }
  2422. module_init(bnx2fc_mod_init);
  2423. module_exit(bnx2fc_mod_exit);
  2424. static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
  2425. .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
  2426. .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
  2427. .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
  2428. .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
  2429. .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
  2430. .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
  2431. .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
  2432. .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
  2433. .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
  2434. };
  2435. static struct fc_function_template bnx2fc_transport_function = {
  2436. .show_host_node_name = 1,
  2437. .show_host_port_name = 1,
  2438. .show_host_supported_classes = 1,
  2439. .show_host_supported_fc4s = 1,
  2440. .show_host_active_fc4s = 1,
  2441. .show_host_maxframe_size = 1,
  2442. .show_host_port_id = 1,
  2443. .show_host_supported_speeds = 1,
  2444. .get_host_speed = fc_get_host_speed,
  2445. .show_host_speed = 1,
  2446. .show_host_port_type = 1,
  2447. .get_host_port_state = fc_get_host_port_state,
  2448. .show_host_port_state = 1,
  2449. .show_host_symbolic_name = 1,
  2450. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2451. sizeof(struct bnx2fc_rport)),
  2452. .show_rport_maxframe_size = 1,
  2453. .show_rport_supported_classes = 1,
  2454. .show_host_fabric_name = 1,
  2455. .show_starget_node_name = 1,
  2456. .show_starget_port_name = 1,
  2457. .show_starget_port_id = 1,
  2458. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2459. .show_rport_dev_loss_tmo = 1,
  2460. .get_fc_host_stats = bnx2fc_get_host_stats,
  2461. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2462. .terminate_rport_io = bnx2fc_rport_terminate_io,
  2463. .vport_create = bnx2fc_vport_create,
  2464. .vport_delete = bnx2fc_vport_destroy,
  2465. .vport_disable = bnx2fc_vport_disable,
  2466. .bsg_request = fc_lport_bsg_request,
  2467. };
  2468. static struct fc_function_template bnx2fc_vport_xport_function = {
  2469. .show_host_node_name = 1,
  2470. .show_host_port_name = 1,
  2471. .show_host_supported_classes = 1,
  2472. .show_host_supported_fc4s = 1,
  2473. .show_host_active_fc4s = 1,
  2474. .show_host_maxframe_size = 1,
  2475. .show_host_port_id = 1,
  2476. .show_host_supported_speeds = 1,
  2477. .get_host_speed = fc_get_host_speed,
  2478. .show_host_speed = 1,
  2479. .show_host_port_type = 1,
  2480. .get_host_port_state = fc_get_host_port_state,
  2481. .show_host_port_state = 1,
  2482. .show_host_symbolic_name = 1,
  2483. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2484. sizeof(struct bnx2fc_rport)),
  2485. .show_rport_maxframe_size = 1,
  2486. .show_rport_supported_classes = 1,
  2487. .show_host_fabric_name = 1,
  2488. .show_starget_node_name = 1,
  2489. .show_starget_port_name = 1,
  2490. .show_starget_port_id = 1,
  2491. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2492. .show_rport_dev_loss_tmo = 1,
  2493. .get_fc_host_stats = fc_get_host_stats,
  2494. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2495. .terminate_rport_io = fc_rport_terminate_io,
  2496. .bsg_request = fc_lport_bsg_request,
  2497. };
  2498. /*
  2499. * Additional scsi_host attributes.
  2500. */
  2501. static ssize_t
  2502. bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr,
  2503. char *buf)
  2504. {
  2505. struct Scsi_Host *shost = class_to_shost(dev);
  2506. struct fc_lport *lport = shost_priv(shost);
  2507. struct fcoe_port *port = lport_priv(lport);
  2508. struct bnx2fc_interface *interface = port->priv;
  2509. sprintf(buf, "%u\n", interface->tm_timeout);
  2510. return strlen(buf);
  2511. }
  2512. static ssize_t
  2513. bnx2fc_tm_timeout_store(struct device *dev,
  2514. struct device_attribute *attr, const char *buf, size_t count)
  2515. {
  2516. struct Scsi_Host *shost = class_to_shost(dev);
  2517. struct fc_lport *lport = shost_priv(shost);
  2518. struct fcoe_port *port = lport_priv(lport);
  2519. struct bnx2fc_interface *interface = port->priv;
  2520. int rval, val;
  2521. rval = kstrtouint(buf, 10, &val);
  2522. if (rval)
  2523. return rval;
  2524. if (val > 255)
  2525. return -ERANGE;
  2526. interface->tm_timeout = (u8)val;
  2527. return strlen(buf);
  2528. }
  2529. static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
  2530. bnx2fc_tm_timeout_store);
  2531. static struct device_attribute *bnx2fc_host_attrs[] = {
  2532. &dev_attr_tm_timeout,
  2533. NULL,
  2534. };
  2535. /**
  2536. * scsi_host_template structure used while registering with SCSI-ml
  2537. */
  2538. static struct scsi_host_template bnx2fc_shost_template = {
  2539. .module = THIS_MODULE,
  2540. .name = "QLogic Offload FCoE Initiator",
  2541. .queuecommand = bnx2fc_queuecommand,
  2542. .eh_timed_out = fc_eh_timed_out,
  2543. .eh_abort_handler = bnx2fc_eh_abort, /* abts */
  2544. .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
  2545. .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
  2546. .eh_host_reset_handler = fc_eh_host_reset,
  2547. .slave_alloc = fc_slave_alloc,
  2548. .change_queue_depth = scsi_change_queue_depth,
  2549. .this_id = -1,
  2550. .cmd_per_lun = 3,
  2551. .use_clustering = ENABLE_CLUSTERING,
  2552. .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
  2553. .max_sectors = 1024,
  2554. .track_queue_depth = 1,
  2555. .slave_configure = bnx2fc_slave_configure,
  2556. .shost_attrs = bnx2fc_host_attrs,
  2557. };
  2558. static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
  2559. .frame_send = bnx2fc_xmit,
  2560. .elsct_send = bnx2fc_elsct_send,
  2561. .fcp_abort_io = bnx2fc_abort_io,
  2562. .fcp_cleanup = bnx2fc_cleanup,
  2563. .get_lesb = fcoe_get_lesb,
  2564. .rport_event_callback = bnx2fc_rport_event_handler,
  2565. };
  2566. /**
  2567. * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
  2568. * structure carrying callback function pointers
  2569. */
  2570. static struct cnic_ulp_ops bnx2fc_cnic_cb = {
  2571. .owner = THIS_MODULE,
  2572. .cnic_init = bnx2fc_ulp_init,
  2573. .cnic_exit = bnx2fc_ulp_exit,
  2574. .cnic_start = bnx2fc_ulp_start,
  2575. .cnic_stop = bnx2fc_ulp_stop,
  2576. .indicate_kcqes = bnx2fc_indicate_kcqe,
  2577. .indicate_netevent = bnx2fc_indicate_netevent,
  2578. .cnic_get_stats = bnx2fc_ulp_get_stats,
  2579. };