qla3xxx.c 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/delay.h>
  34. #include <linux/mm.h>
  35. #include <linux/prefetch.h>
  36. #include "qla3xxx.h"
  37. #define DRV_NAME "qla3xxx"
  38. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  39. #define DRV_VERSION "v2.03.00-k5"
  40. static const char ql3xxx_driver_name[] = DRV_NAME;
  41. static const char ql3xxx_driver_version[] = DRV_VERSION;
  42. #define TIMED_OUT_MSG \
  43. "Timed out waiting for management port to get free before issuing command\n"
  44. MODULE_AUTHOR("QLogic Corporation");
  45. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  46. MODULE_LICENSE("GPL");
  47. MODULE_VERSION(DRV_VERSION);
  48. static const u32 default_msg
  49. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  50. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  51. static int debug = -1; /* defaults above */
  52. module_param(debug, int, 0);
  53. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  54. static int msi;
  55. module_param(msi, int, 0);
  56. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  57. static const struct pci_device_id ql3xxx_pci_tbl[] = {
  58. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  60. /* required last entry */
  61. {0,}
  62. };
  63. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  64. /*
  65. * These are the known PHY's which are used
  66. */
  67. enum PHY_DEVICE_TYPE {
  68. PHY_TYPE_UNKNOWN = 0,
  69. PHY_VITESSE_VSC8211,
  70. PHY_AGERE_ET1011C,
  71. MAX_PHY_DEV_TYPES
  72. };
  73. struct PHY_DEVICE_INFO {
  74. const enum PHY_DEVICE_TYPE phyDevice;
  75. const u32 phyIdOUI;
  76. const u16 phyIdModel;
  77. const char *name;
  78. };
  79. static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
  80. {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  81. {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  82. {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  83. };
  84. /*
  85. * Caller must take hw_lock.
  86. */
  87. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  88. u32 sem_mask, u32 sem_bits)
  89. {
  90. struct ql3xxx_port_registers __iomem *port_regs =
  91. qdev->mem_map_registers;
  92. u32 value;
  93. unsigned int seconds = 3;
  94. do {
  95. writel((sem_mask | sem_bits),
  96. &port_regs->CommonRegs.semaphoreReg);
  97. value = readl(&port_regs->CommonRegs.semaphoreReg);
  98. if ((value & (sem_mask >> 16)) == sem_bits)
  99. return 0;
  100. ssleep(1);
  101. } while (--seconds);
  102. return -1;
  103. }
  104. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  105. {
  106. struct ql3xxx_port_registers __iomem *port_regs =
  107. qdev->mem_map_registers;
  108. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  109. readl(&port_regs->CommonRegs.semaphoreReg);
  110. }
  111. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  112. {
  113. struct ql3xxx_port_registers __iomem *port_regs =
  114. qdev->mem_map_registers;
  115. u32 value;
  116. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  117. value = readl(&port_regs->CommonRegs.semaphoreReg);
  118. return ((value & (sem_mask >> 16)) == sem_bits);
  119. }
  120. /*
  121. * Caller holds hw_lock.
  122. */
  123. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  124. {
  125. int i = 0;
  126. do {
  127. if (ql_sem_lock(qdev,
  128. QL_DRVR_SEM_MASK,
  129. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  130. * 2) << 1)) {
  131. netdev_printk(KERN_DEBUG, qdev->ndev,
  132. "driver lock acquired\n");
  133. return 1;
  134. }
  135. ssleep(1);
  136. } while (++i < 10);
  137. netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
  138. return 0;
  139. }
  140. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  141. {
  142. struct ql3xxx_port_registers __iomem *port_regs =
  143. qdev->mem_map_registers;
  144. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  145. &port_regs->CommonRegs.ispControlStatus);
  146. readl(&port_regs->CommonRegs.ispControlStatus);
  147. qdev->current_page = page;
  148. }
  149. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  150. {
  151. u32 value;
  152. unsigned long hw_flags;
  153. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  154. value = readl(reg);
  155. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  156. return value;
  157. }
  158. static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  159. {
  160. return readl(reg);
  161. }
  162. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  163. {
  164. u32 value;
  165. unsigned long hw_flags;
  166. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  167. if (qdev->current_page != 0)
  168. ql_set_register_page(qdev, 0);
  169. value = readl(reg);
  170. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  171. return value;
  172. }
  173. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  174. {
  175. if (qdev->current_page != 0)
  176. ql_set_register_page(qdev, 0);
  177. return readl(reg);
  178. }
  179. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  180. u32 __iomem *reg, u32 value)
  181. {
  182. unsigned long hw_flags;
  183. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  184. writel(value, reg);
  185. readl(reg);
  186. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  187. }
  188. static void ql_write_common_reg(struct ql3_adapter *qdev,
  189. u32 __iomem *reg, u32 value)
  190. {
  191. writel(value, reg);
  192. readl(reg);
  193. }
  194. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  195. u32 __iomem *reg, u32 value)
  196. {
  197. writel(value, reg);
  198. readl(reg);
  199. udelay(1);
  200. }
  201. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  202. u32 __iomem *reg, u32 value)
  203. {
  204. if (qdev->current_page != 0)
  205. ql_set_register_page(qdev, 0);
  206. writel(value, reg);
  207. readl(reg);
  208. }
  209. /*
  210. * Caller holds hw_lock. Only called during init.
  211. */
  212. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  213. u32 __iomem *reg, u32 value)
  214. {
  215. if (qdev->current_page != 1)
  216. ql_set_register_page(qdev, 1);
  217. writel(value, reg);
  218. readl(reg);
  219. }
  220. /*
  221. * Caller holds hw_lock. Only called during init.
  222. */
  223. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  224. u32 __iomem *reg, u32 value)
  225. {
  226. if (qdev->current_page != 2)
  227. ql_set_register_page(qdev, 2);
  228. writel(value, reg);
  229. readl(reg);
  230. }
  231. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  232. {
  233. struct ql3xxx_port_registers __iomem *port_regs =
  234. qdev->mem_map_registers;
  235. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  236. (ISP_IMR_ENABLE_INT << 16));
  237. }
  238. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  239. {
  240. struct ql3xxx_port_registers __iomem *port_regs =
  241. qdev->mem_map_registers;
  242. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  243. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  244. }
  245. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  246. struct ql_rcv_buf_cb *lrg_buf_cb)
  247. {
  248. dma_addr_t map;
  249. int err;
  250. lrg_buf_cb->next = NULL;
  251. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  252. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  253. } else {
  254. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  255. qdev->lrg_buf_free_tail = lrg_buf_cb;
  256. }
  257. if (!lrg_buf_cb->skb) {
  258. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  259. qdev->lrg_buffer_len);
  260. if (unlikely(!lrg_buf_cb->skb)) {
  261. qdev->lrg_buf_skb_check++;
  262. } else {
  263. /*
  264. * We save some space to copy the ethhdr from first
  265. * buffer
  266. */
  267. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  268. map = pci_map_single(qdev->pdev,
  269. lrg_buf_cb->skb->data,
  270. qdev->lrg_buffer_len -
  271. QL_HEADER_SPACE,
  272. PCI_DMA_FROMDEVICE);
  273. err = pci_dma_mapping_error(qdev->pdev, map);
  274. if (err) {
  275. netdev_err(qdev->ndev,
  276. "PCI mapping failed with error: %d\n",
  277. err);
  278. dev_kfree_skb(lrg_buf_cb->skb);
  279. lrg_buf_cb->skb = NULL;
  280. qdev->lrg_buf_skb_check++;
  281. return;
  282. }
  283. lrg_buf_cb->buf_phy_addr_low =
  284. cpu_to_le32(LS_64BITS(map));
  285. lrg_buf_cb->buf_phy_addr_high =
  286. cpu_to_le32(MS_64BITS(map));
  287. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  288. dma_unmap_len_set(lrg_buf_cb, maplen,
  289. qdev->lrg_buffer_len -
  290. QL_HEADER_SPACE);
  291. }
  292. }
  293. qdev->lrg_buf_free_count++;
  294. }
  295. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  296. *qdev)
  297. {
  298. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  299. if (lrg_buf_cb != NULL) {
  300. qdev->lrg_buf_free_head = lrg_buf_cb->next;
  301. if (qdev->lrg_buf_free_head == NULL)
  302. qdev->lrg_buf_free_tail = NULL;
  303. qdev->lrg_buf_free_count--;
  304. }
  305. return lrg_buf_cb;
  306. }
  307. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  308. static u32 dataBits = EEPROM_NO_DATA_BITS;
  309. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  310. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  311. unsigned short *value);
  312. /*
  313. * Caller holds hw_lock.
  314. */
  315. static void fm93c56a_select(struct ql3_adapter *qdev)
  316. {
  317. struct ql3xxx_port_registers __iomem *port_regs =
  318. qdev->mem_map_registers;
  319. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  320. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  321. ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  322. }
  323. /*
  324. * Caller holds hw_lock.
  325. */
  326. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  327. {
  328. int i;
  329. u32 mask;
  330. u32 dataBit;
  331. u32 previousBit;
  332. struct ql3xxx_port_registers __iomem *port_regs =
  333. qdev->mem_map_registers;
  334. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  335. /* Clock in a zero, then do the start bit */
  336. ql_write_nvram_reg(qdev, spir,
  337. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  338. AUBURN_EEPROM_DO_1));
  339. ql_write_nvram_reg(qdev, spir,
  340. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  341. AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
  342. ql_write_nvram_reg(qdev, spir,
  343. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  344. AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
  345. mask = 1 << (FM93C56A_CMD_BITS - 1);
  346. /* Force the previous data bit to be different */
  347. previousBit = 0xffff;
  348. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  349. dataBit = (cmd & mask)
  350. ? AUBURN_EEPROM_DO_1
  351. : AUBURN_EEPROM_DO_0;
  352. if (previousBit != dataBit) {
  353. /* If the bit changed, change the DO state to match */
  354. ql_write_nvram_reg(qdev, spir,
  355. (ISP_NVRAM_MASK |
  356. qdev->eeprom_cmd_data | dataBit));
  357. previousBit = dataBit;
  358. }
  359. ql_write_nvram_reg(qdev, spir,
  360. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  361. dataBit | AUBURN_EEPROM_CLK_RISE));
  362. ql_write_nvram_reg(qdev, spir,
  363. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  364. dataBit | AUBURN_EEPROM_CLK_FALL));
  365. cmd = cmd << 1;
  366. }
  367. mask = 1 << (addrBits - 1);
  368. /* Force the previous data bit to be different */
  369. previousBit = 0xffff;
  370. for (i = 0; i < addrBits; i++) {
  371. dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
  372. : AUBURN_EEPROM_DO_0;
  373. if (previousBit != dataBit) {
  374. /*
  375. * If the bit changed, then change the DO state to
  376. * match
  377. */
  378. ql_write_nvram_reg(qdev, spir,
  379. (ISP_NVRAM_MASK |
  380. qdev->eeprom_cmd_data | dataBit));
  381. previousBit = dataBit;
  382. }
  383. ql_write_nvram_reg(qdev, spir,
  384. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  385. dataBit | AUBURN_EEPROM_CLK_RISE));
  386. ql_write_nvram_reg(qdev, spir,
  387. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  388. dataBit | AUBURN_EEPROM_CLK_FALL));
  389. eepromAddr = eepromAddr << 1;
  390. }
  391. }
  392. /*
  393. * Caller holds hw_lock.
  394. */
  395. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  396. {
  397. struct ql3xxx_port_registers __iomem *port_regs =
  398. qdev->mem_map_registers;
  399. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  400. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  401. ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  402. }
  403. /*
  404. * Caller holds hw_lock.
  405. */
  406. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  407. {
  408. int i;
  409. u32 data = 0;
  410. u32 dataBit;
  411. struct ql3xxx_port_registers __iomem *port_regs =
  412. qdev->mem_map_registers;
  413. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  414. /* Read the data bits */
  415. /* The first bit is a dummy. Clock right over it. */
  416. for (i = 0; i < dataBits; i++) {
  417. ql_write_nvram_reg(qdev, spir,
  418. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  419. AUBURN_EEPROM_CLK_RISE);
  420. ql_write_nvram_reg(qdev, spir,
  421. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  422. AUBURN_EEPROM_CLK_FALL);
  423. dataBit = (ql_read_common_reg(qdev, spir) &
  424. AUBURN_EEPROM_DI_1) ? 1 : 0;
  425. data = (data << 1) | dataBit;
  426. }
  427. *value = (u16)data;
  428. }
  429. /*
  430. * Caller holds hw_lock.
  431. */
  432. static void eeprom_readword(struct ql3_adapter *qdev,
  433. u32 eepromAddr, unsigned short *value)
  434. {
  435. fm93c56a_select(qdev);
  436. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  437. fm93c56a_datain(qdev, value);
  438. fm93c56a_deselect(qdev);
  439. }
  440. static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
  441. {
  442. __le16 *p = (__le16 *)ndev->dev_addr;
  443. p[0] = cpu_to_le16(addr[0]);
  444. p[1] = cpu_to_le16(addr[1]);
  445. p[2] = cpu_to_le16(addr[2]);
  446. }
  447. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  448. {
  449. u16 *pEEPROMData;
  450. u16 checksum = 0;
  451. u32 index;
  452. unsigned long hw_flags;
  453. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  454. pEEPROMData = (u16 *)&qdev->nvram_data;
  455. qdev->eeprom_cmd_data = 0;
  456. if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  457. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  458. 2) << 10)) {
  459. pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
  460. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  461. return -1;
  462. }
  463. for (index = 0; index < EEPROM_SIZE; index++) {
  464. eeprom_readword(qdev, index, pEEPROMData);
  465. checksum += *pEEPROMData;
  466. pEEPROMData++;
  467. }
  468. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  469. if (checksum != 0) {
  470. netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
  471. checksum);
  472. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  473. return -1;
  474. }
  475. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  476. return checksum;
  477. }
  478. static const u32 PHYAddr[2] = {
  479. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  480. };
  481. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  482. {
  483. struct ql3xxx_port_registers __iomem *port_regs =
  484. qdev->mem_map_registers;
  485. u32 temp;
  486. int count = 1000;
  487. while (count) {
  488. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  489. if (!(temp & MAC_MII_STATUS_BSY))
  490. return 0;
  491. udelay(10);
  492. count--;
  493. }
  494. return -1;
  495. }
  496. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  497. {
  498. struct ql3xxx_port_registers __iomem *port_regs =
  499. qdev->mem_map_registers;
  500. u32 scanControl;
  501. if (qdev->numPorts > 1) {
  502. /* Auto scan will cycle through multiple ports */
  503. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  504. } else {
  505. scanControl = MAC_MII_CONTROL_SC;
  506. }
  507. /*
  508. * Scan register 1 of PHY/PETBI,
  509. * Set up to scan both devices
  510. * The autoscan starts from the first register, completes
  511. * the last one before rolling over to the first
  512. */
  513. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  514. PHYAddr[0] | MII_SCAN_REGISTER);
  515. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  516. (scanControl) |
  517. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  518. }
  519. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  520. {
  521. u8 ret;
  522. struct ql3xxx_port_registers __iomem *port_regs =
  523. qdev->mem_map_registers;
  524. /* See if scan mode is enabled before we turn it off */
  525. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  526. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  527. /* Scan is enabled */
  528. ret = 1;
  529. } else {
  530. /* Scan is disabled */
  531. ret = 0;
  532. }
  533. /*
  534. * When disabling scan mode you must first change the MII register
  535. * address
  536. */
  537. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  538. PHYAddr[0] | MII_SCAN_REGISTER);
  539. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  540. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  541. MAC_MII_CONTROL_RC) << 16));
  542. return ret;
  543. }
  544. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  545. u16 regAddr, u16 value, u32 phyAddr)
  546. {
  547. struct ql3xxx_port_registers __iomem *port_regs =
  548. qdev->mem_map_registers;
  549. u8 scanWasEnabled;
  550. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  551. if (ql_wait_for_mii_ready(qdev)) {
  552. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  553. return -1;
  554. }
  555. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  556. phyAddr | regAddr);
  557. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  558. /* Wait for write to complete 9/10/04 SJP */
  559. if (ql_wait_for_mii_ready(qdev)) {
  560. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  561. return -1;
  562. }
  563. if (scanWasEnabled)
  564. ql_mii_enable_scan_mode(qdev);
  565. return 0;
  566. }
  567. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  568. u16 *value, u32 phyAddr)
  569. {
  570. struct ql3xxx_port_registers __iomem *port_regs =
  571. qdev->mem_map_registers;
  572. u8 scanWasEnabled;
  573. u32 temp;
  574. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  575. if (ql_wait_for_mii_ready(qdev)) {
  576. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  577. return -1;
  578. }
  579. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  580. phyAddr | regAddr);
  581. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  582. (MAC_MII_CONTROL_RC << 16));
  583. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  584. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  585. /* Wait for the read to complete */
  586. if (ql_wait_for_mii_ready(qdev)) {
  587. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  588. return -1;
  589. }
  590. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  591. *value = (u16) temp;
  592. if (scanWasEnabled)
  593. ql_mii_enable_scan_mode(qdev);
  594. return 0;
  595. }
  596. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  597. {
  598. struct ql3xxx_port_registers __iomem *port_regs =
  599. qdev->mem_map_registers;
  600. ql_mii_disable_scan_mode(qdev);
  601. if (ql_wait_for_mii_ready(qdev)) {
  602. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  603. return -1;
  604. }
  605. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  606. qdev->PHYAddr | regAddr);
  607. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  608. /* Wait for write to complete. */
  609. if (ql_wait_for_mii_ready(qdev)) {
  610. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  611. return -1;
  612. }
  613. ql_mii_enable_scan_mode(qdev);
  614. return 0;
  615. }
  616. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  617. {
  618. u32 temp;
  619. struct ql3xxx_port_registers __iomem *port_regs =
  620. qdev->mem_map_registers;
  621. ql_mii_disable_scan_mode(qdev);
  622. if (ql_wait_for_mii_ready(qdev)) {
  623. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  624. return -1;
  625. }
  626. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  627. qdev->PHYAddr | regAddr);
  628. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  629. (MAC_MII_CONTROL_RC << 16));
  630. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  631. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  632. /* Wait for the read to complete */
  633. if (ql_wait_for_mii_ready(qdev)) {
  634. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  635. return -1;
  636. }
  637. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  638. *value = (u16) temp;
  639. ql_mii_enable_scan_mode(qdev);
  640. return 0;
  641. }
  642. static void ql_petbi_reset(struct ql3_adapter *qdev)
  643. {
  644. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  645. }
  646. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  647. {
  648. u16 reg;
  649. /* Enable Auto-negotiation sense */
  650. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  651. reg |= PETBI_TBI_AUTO_SENSE;
  652. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  653. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  654. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  655. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  656. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  657. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  658. }
  659. static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
  660. {
  661. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  662. PHYAddr[qdev->mac_index]);
  663. }
  664. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
  665. {
  666. u16 reg;
  667. /* Enable Auto-negotiation sense */
  668. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
  669. PHYAddr[qdev->mac_index]);
  670. reg |= PETBI_TBI_AUTO_SENSE;
  671. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
  672. PHYAddr[qdev->mac_index]);
  673. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  674. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
  675. PHYAddr[qdev->mac_index]);
  676. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  677. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  678. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  679. PHYAddr[qdev->mac_index]);
  680. }
  681. static void ql_petbi_init(struct ql3_adapter *qdev)
  682. {
  683. ql_petbi_reset(qdev);
  684. ql_petbi_start_neg(qdev);
  685. }
  686. static void ql_petbi_init_ex(struct ql3_adapter *qdev)
  687. {
  688. ql_petbi_reset_ex(qdev);
  689. ql_petbi_start_neg_ex(qdev);
  690. }
  691. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  692. {
  693. u16 reg;
  694. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  695. return 0;
  696. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  697. }
  698. static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
  699. {
  700. netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
  701. /* power down device bit 11 = 1 */
  702. ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
  703. /* enable diagnostic mode bit 2 = 1 */
  704. ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
  705. /* 1000MB amplitude adjust (see Agere errata) */
  706. ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
  707. /* 1000MB amplitude adjust (see Agere errata) */
  708. ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
  709. /* 100MB amplitude adjust (see Agere errata) */
  710. ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
  711. /* 100MB amplitude adjust (see Agere errata) */
  712. ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
  713. /* 10MB amplitude adjust (see Agere errata) */
  714. ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
  715. /* 10MB amplitude adjust (see Agere errata) */
  716. ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
  717. /* point to hidden reg 0x2806 */
  718. ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
  719. /* Write new PHYAD w/bit 5 set */
  720. ql_mii_write_reg_ex(qdev, 0x11,
  721. 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
  722. /*
  723. * Disable diagnostic mode bit 2 = 0
  724. * Power up device bit 11 = 0
  725. * Link up (on) and activity (blink)
  726. */
  727. ql_mii_write_reg(qdev, 0x12, 0x840a);
  728. ql_mii_write_reg(qdev, 0x00, 0x1140);
  729. ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
  730. }
  731. static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
  732. u16 phyIdReg0, u16 phyIdReg1)
  733. {
  734. enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
  735. u32 oui;
  736. u16 model;
  737. int i;
  738. if (phyIdReg0 == 0xffff)
  739. return result;
  740. if (phyIdReg1 == 0xffff)
  741. return result;
  742. /* oui is split between two registers */
  743. oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
  744. model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
  745. /* Scan table for this PHY */
  746. for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
  747. if ((oui == PHY_DEVICES[i].phyIdOUI) &&
  748. (model == PHY_DEVICES[i].phyIdModel)) {
  749. netdev_info(qdev->ndev, "Phy: %s\n",
  750. PHY_DEVICES[i].name);
  751. result = PHY_DEVICES[i].phyDevice;
  752. break;
  753. }
  754. }
  755. return result;
  756. }
  757. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  758. {
  759. u16 reg;
  760. switch (qdev->phyType) {
  761. case PHY_AGERE_ET1011C: {
  762. if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
  763. return 0;
  764. reg = (reg >> 8) & 3;
  765. break;
  766. }
  767. default:
  768. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  769. return 0;
  770. reg = (((reg & 0x18) >> 3) & 3);
  771. }
  772. switch (reg) {
  773. case 2:
  774. return SPEED_1000;
  775. case 1:
  776. return SPEED_100;
  777. case 0:
  778. return SPEED_10;
  779. default:
  780. return -1;
  781. }
  782. }
  783. static int ql_is_full_dup(struct ql3_adapter *qdev)
  784. {
  785. u16 reg;
  786. switch (qdev->phyType) {
  787. case PHY_AGERE_ET1011C: {
  788. if (ql_mii_read_reg(qdev, 0x1A, &reg))
  789. return 0;
  790. return ((reg & 0x0080) && (reg & 0x1000)) != 0;
  791. }
  792. case PHY_VITESSE_VSC8211:
  793. default: {
  794. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  795. return 0;
  796. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  797. }
  798. }
  799. }
  800. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  801. {
  802. u16 reg;
  803. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  804. return 0;
  805. return (reg & PHY_NEG_PAUSE) != 0;
  806. }
  807. static int PHY_Setup(struct ql3_adapter *qdev)
  808. {
  809. u16 reg1;
  810. u16 reg2;
  811. bool agereAddrChangeNeeded = false;
  812. u32 miiAddr = 0;
  813. int err;
  814. /* Determine the PHY we are using by reading the ID's */
  815. err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
  816. if (err != 0) {
  817. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
  818. return err;
  819. }
  820. err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
  821. if (err != 0) {
  822. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
  823. return err;
  824. }
  825. /* Check if we have a Agere PHY */
  826. if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
  827. /* Determine which MII address we should be using
  828. determined by the index of the card */
  829. if (qdev->mac_index == 0)
  830. miiAddr = MII_AGERE_ADDR_1;
  831. else
  832. miiAddr = MII_AGERE_ADDR_2;
  833. err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
  834. if (err != 0) {
  835. netdev_err(qdev->ndev,
  836. "Could not read from reg PHY_ID_0_REG after Agere detected\n");
  837. return err;
  838. }
  839. err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
  840. if (err != 0) {
  841. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
  842. return err;
  843. }
  844. /* We need to remember to initialize the Agere PHY */
  845. agereAddrChangeNeeded = true;
  846. }
  847. /* Determine the particular PHY we have on board to apply
  848. PHY specific initializations */
  849. qdev->phyType = getPhyType(qdev, reg1, reg2);
  850. if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
  851. /* need this here so address gets changed */
  852. phyAgereSpecificInit(qdev, miiAddr);
  853. } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
  854. netdev_err(qdev->ndev, "PHY is unknown\n");
  855. return -EIO;
  856. }
  857. return 0;
  858. }
  859. /*
  860. * Caller holds hw_lock.
  861. */
  862. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  863. {
  864. struct ql3xxx_port_registers __iomem *port_regs =
  865. qdev->mem_map_registers;
  866. u32 value;
  867. if (enable)
  868. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  869. else
  870. value = (MAC_CONFIG_REG_PE << 16);
  871. if (qdev->mac_index)
  872. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  873. else
  874. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  875. }
  876. /*
  877. * Caller holds hw_lock.
  878. */
  879. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  880. {
  881. struct ql3xxx_port_registers __iomem *port_regs =
  882. qdev->mem_map_registers;
  883. u32 value;
  884. if (enable)
  885. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  886. else
  887. value = (MAC_CONFIG_REG_SR << 16);
  888. if (qdev->mac_index)
  889. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  890. else
  891. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  892. }
  893. /*
  894. * Caller holds hw_lock.
  895. */
  896. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  897. {
  898. struct ql3xxx_port_registers __iomem *port_regs =
  899. qdev->mem_map_registers;
  900. u32 value;
  901. if (enable)
  902. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  903. else
  904. value = (MAC_CONFIG_REG_GM << 16);
  905. if (qdev->mac_index)
  906. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  907. else
  908. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  909. }
  910. /*
  911. * Caller holds hw_lock.
  912. */
  913. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  914. {
  915. struct ql3xxx_port_registers __iomem *port_regs =
  916. qdev->mem_map_registers;
  917. u32 value;
  918. if (enable)
  919. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  920. else
  921. value = (MAC_CONFIG_REG_FD << 16);
  922. if (qdev->mac_index)
  923. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  924. else
  925. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  926. }
  927. /*
  928. * Caller holds hw_lock.
  929. */
  930. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  931. {
  932. struct ql3xxx_port_registers __iomem *port_regs =
  933. qdev->mem_map_registers;
  934. u32 value;
  935. if (enable)
  936. value =
  937. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  938. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  939. else
  940. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  941. if (qdev->mac_index)
  942. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  943. else
  944. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  945. }
  946. /*
  947. * Caller holds hw_lock.
  948. */
  949. static int ql_is_fiber(struct ql3_adapter *qdev)
  950. {
  951. struct ql3xxx_port_registers __iomem *port_regs =
  952. qdev->mem_map_registers;
  953. u32 bitToCheck = 0;
  954. u32 temp;
  955. switch (qdev->mac_index) {
  956. case 0:
  957. bitToCheck = PORT_STATUS_SM0;
  958. break;
  959. case 1:
  960. bitToCheck = PORT_STATUS_SM1;
  961. break;
  962. }
  963. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  964. return (temp & bitToCheck) != 0;
  965. }
  966. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  967. {
  968. u16 reg;
  969. ql_mii_read_reg(qdev, 0x00, &reg);
  970. return (reg & 0x1000) != 0;
  971. }
  972. /*
  973. * Caller holds hw_lock.
  974. */
  975. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  976. {
  977. struct ql3xxx_port_registers __iomem *port_regs =
  978. qdev->mem_map_registers;
  979. u32 bitToCheck = 0;
  980. u32 temp;
  981. switch (qdev->mac_index) {
  982. case 0:
  983. bitToCheck = PORT_STATUS_AC0;
  984. break;
  985. case 1:
  986. bitToCheck = PORT_STATUS_AC1;
  987. break;
  988. }
  989. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  990. if (temp & bitToCheck) {
  991. netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
  992. return 1;
  993. }
  994. netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
  995. return 0;
  996. }
  997. /*
  998. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  999. */
  1000. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  1001. {
  1002. if (ql_is_fiber(qdev))
  1003. return ql_is_petbi_neg_pause(qdev);
  1004. else
  1005. return ql_is_phy_neg_pause(qdev);
  1006. }
  1007. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  1008. {
  1009. struct ql3xxx_port_registers __iomem *port_regs =
  1010. qdev->mem_map_registers;
  1011. u32 bitToCheck = 0;
  1012. u32 temp;
  1013. switch (qdev->mac_index) {
  1014. case 0:
  1015. bitToCheck = PORT_STATUS_AE0;
  1016. break;
  1017. case 1:
  1018. bitToCheck = PORT_STATUS_AE1;
  1019. break;
  1020. }
  1021. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1022. return (temp & bitToCheck) != 0;
  1023. }
  1024. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  1025. {
  1026. if (ql_is_fiber(qdev))
  1027. return SPEED_1000;
  1028. else
  1029. return ql_phy_get_speed(qdev);
  1030. }
  1031. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  1032. {
  1033. if (ql_is_fiber(qdev))
  1034. return 1;
  1035. else
  1036. return ql_is_full_dup(qdev);
  1037. }
  1038. /*
  1039. * Caller holds hw_lock.
  1040. */
  1041. static int ql_link_down_detect(struct ql3_adapter *qdev)
  1042. {
  1043. struct ql3xxx_port_registers __iomem *port_regs =
  1044. qdev->mem_map_registers;
  1045. u32 bitToCheck = 0;
  1046. u32 temp;
  1047. switch (qdev->mac_index) {
  1048. case 0:
  1049. bitToCheck = ISP_CONTROL_LINK_DN_0;
  1050. break;
  1051. case 1:
  1052. bitToCheck = ISP_CONTROL_LINK_DN_1;
  1053. break;
  1054. }
  1055. temp =
  1056. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  1057. return (temp & bitToCheck) != 0;
  1058. }
  1059. /*
  1060. * Caller holds hw_lock.
  1061. */
  1062. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  1063. {
  1064. struct ql3xxx_port_registers __iomem *port_regs =
  1065. qdev->mem_map_registers;
  1066. switch (qdev->mac_index) {
  1067. case 0:
  1068. ql_write_common_reg(qdev,
  1069. &port_regs->CommonRegs.ispControlStatus,
  1070. (ISP_CONTROL_LINK_DN_0) |
  1071. (ISP_CONTROL_LINK_DN_0 << 16));
  1072. break;
  1073. case 1:
  1074. ql_write_common_reg(qdev,
  1075. &port_regs->CommonRegs.ispControlStatus,
  1076. (ISP_CONTROL_LINK_DN_1) |
  1077. (ISP_CONTROL_LINK_DN_1 << 16));
  1078. break;
  1079. default:
  1080. return 1;
  1081. }
  1082. return 0;
  1083. }
  1084. /*
  1085. * Caller holds hw_lock.
  1086. */
  1087. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
  1088. {
  1089. struct ql3xxx_port_registers __iomem *port_regs =
  1090. qdev->mem_map_registers;
  1091. u32 bitToCheck = 0;
  1092. u32 temp;
  1093. switch (qdev->mac_index) {
  1094. case 0:
  1095. bitToCheck = PORT_STATUS_F1_ENABLED;
  1096. break;
  1097. case 1:
  1098. bitToCheck = PORT_STATUS_F3_ENABLED;
  1099. break;
  1100. default:
  1101. break;
  1102. }
  1103. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1104. if (temp & bitToCheck) {
  1105. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1106. "not link master\n");
  1107. return 0;
  1108. }
  1109. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
  1110. return 1;
  1111. }
  1112. static void ql_phy_reset_ex(struct ql3_adapter *qdev)
  1113. {
  1114. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
  1115. PHYAddr[qdev->mac_index]);
  1116. }
  1117. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
  1118. {
  1119. u16 reg;
  1120. u16 portConfiguration;
  1121. if (qdev->phyType == PHY_AGERE_ET1011C)
  1122. ql_mii_write_reg(qdev, 0x13, 0x0000);
  1123. /* turn off external loopback */
  1124. if (qdev->mac_index == 0)
  1125. portConfiguration =
  1126. qdev->nvram_data.macCfg_port0.portConfiguration;
  1127. else
  1128. portConfiguration =
  1129. qdev->nvram_data.macCfg_port1.portConfiguration;
  1130. /* Some HBA's in the field are set to 0 and they need to
  1131. be reinterpreted with a default value */
  1132. if (portConfiguration == 0)
  1133. portConfiguration = PORT_CONFIG_DEFAULT;
  1134. /* Set the 1000 advertisements */
  1135. ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
  1136. PHYAddr[qdev->mac_index]);
  1137. reg &= ~PHY_GIG_ALL_PARAMS;
  1138. if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
  1139. if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
  1140. reg |= PHY_GIG_ADV_1000F;
  1141. else
  1142. reg |= PHY_GIG_ADV_1000H;
  1143. }
  1144. ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
  1145. PHYAddr[qdev->mac_index]);
  1146. /* Set the 10/100 & pause negotiation advertisements */
  1147. ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
  1148. PHYAddr[qdev->mac_index]);
  1149. reg &= ~PHY_NEG_ALL_PARAMS;
  1150. if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
  1151. reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
  1152. if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
  1153. if (portConfiguration & PORT_CONFIG_100MB_SPEED)
  1154. reg |= PHY_NEG_ADV_100F;
  1155. if (portConfiguration & PORT_CONFIG_10MB_SPEED)
  1156. reg |= PHY_NEG_ADV_10F;
  1157. }
  1158. if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
  1159. if (portConfiguration & PORT_CONFIG_100MB_SPEED)
  1160. reg |= PHY_NEG_ADV_100H;
  1161. if (portConfiguration & PORT_CONFIG_10MB_SPEED)
  1162. reg |= PHY_NEG_ADV_10H;
  1163. }
  1164. if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
  1165. reg |= 1;
  1166. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
  1167. PHYAddr[qdev->mac_index]);
  1168. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
  1169. ql_mii_write_reg_ex(qdev, CONTROL_REG,
  1170. reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
  1171. PHYAddr[qdev->mac_index]);
  1172. }
  1173. static void ql_phy_init_ex(struct ql3_adapter *qdev)
  1174. {
  1175. ql_phy_reset_ex(qdev);
  1176. PHY_Setup(qdev);
  1177. ql_phy_start_neg_ex(qdev);
  1178. }
  1179. /*
  1180. * Caller holds hw_lock.
  1181. */
  1182. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1183. {
  1184. struct ql3xxx_port_registers __iomem *port_regs =
  1185. qdev->mem_map_registers;
  1186. u32 bitToCheck = 0;
  1187. u32 temp, linkState;
  1188. switch (qdev->mac_index) {
  1189. case 0:
  1190. bitToCheck = PORT_STATUS_UP0;
  1191. break;
  1192. case 1:
  1193. bitToCheck = PORT_STATUS_UP1;
  1194. break;
  1195. }
  1196. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1197. if (temp & bitToCheck)
  1198. linkState = LS_UP;
  1199. else
  1200. linkState = LS_DOWN;
  1201. return linkState;
  1202. }
  1203. static int ql_port_start(struct ql3_adapter *qdev)
  1204. {
  1205. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1206. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1207. 2) << 7)) {
  1208. netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
  1209. return -1;
  1210. }
  1211. if (ql_is_fiber(qdev)) {
  1212. ql_petbi_init(qdev);
  1213. } else {
  1214. /* Copper port */
  1215. ql_phy_init_ex(qdev);
  1216. }
  1217. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1218. return 0;
  1219. }
  1220. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1221. {
  1222. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1223. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1224. 2) << 7))
  1225. return -1;
  1226. if (!ql_auto_neg_error(qdev)) {
  1227. if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
  1228. /* configure the MAC */
  1229. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1230. "Configuring link\n");
  1231. ql_mac_cfg_soft_reset(qdev, 1);
  1232. ql_mac_cfg_gig(qdev,
  1233. (ql_get_link_speed
  1234. (qdev) ==
  1235. SPEED_1000));
  1236. ql_mac_cfg_full_dup(qdev,
  1237. ql_is_link_full_dup
  1238. (qdev));
  1239. ql_mac_cfg_pause(qdev,
  1240. ql_is_neg_pause
  1241. (qdev));
  1242. ql_mac_cfg_soft_reset(qdev, 0);
  1243. /* enable the MAC */
  1244. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1245. "Enabling mac\n");
  1246. ql_mac_enable(qdev, 1);
  1247. }
  1248. qdev->port_link_state = LS_UP;
  1249. netif_start_queue(qdev->ndev);
  1250. netif_carrier_on(qdev->ndev);
  1251. netif_info(qdev, link, qdev->ndev,
  1252. "Link is up at %d Mbps, %s duplex\n",
  1253. ql_get_link_speed(qdev),
  1254. ql_is_link_full_dup(qdev) ? "full" : "half");
  1255. } else { /* Remote error detected */
  1256. if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
  1257. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1258. "Remote error detected. Calling ql_port_start()\n");
  1259. /*
  1260. * ql_port_start() is shared code and needs
  1261. * to lock the PHY on it's own.
  1262. */
  1263. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1264. if (ql_port_start(qdev)) /* Restart port */
  1265. return -1;
  1266. return 0;
  1267. }
  1268. }
  1269. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1270. return 0;
  1271. }
  1272. static void ql_link_state_machine_work(struct work_struct *work)
  1273. {
  1274. struct ql3_adapter *qdev =
  1275. container_of(work, struct ql3_adapter, link_state_work.work);
  1276. u32 curr_link_state;
  1277. unsigned long hw_flags;
  1278. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1279. curr_link_state = ql_get_link_state(qdev);
  1280. if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
  1281. netif_info(qdev, link, qdev->ndev,
  1282. "Reset in progress, skip processing link state\n");
  1283. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1284. /* Restart timer on 2 second interval. */
  1285. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1286. return;
  1287. }
  1288. switch (qdev->port_link_state) {
  1289. default:
  1290. if (test_bit(QL_LINK_MASTER, &qdev->flags))
  1291. ql_port_start(qdev);
  1292. qdev->port_link_state = LS_DOWN;
  1293. /* Fall Through */
  1294. case LS_DOWN:
  1295. if (curr_link_state == LS_UP) {
  1296. netif_info(qdev, link, qdev->ndev, "Link is up\n");
  1297. if (ql_is_auto_neg_complete(qdev))
  1298. ql_finish_auto_neg(qdev);
  1299. if (qdev->port_link_state == LS_UP)
  1300. ql_link_down_detect_clear(qdev);
  1301. qdev->port_link_state = LS_UP;
  1302. }
  1303. break;
  1304. case LS_UP:
  1305. /*
  1306. * See if the link is currently down or went down and came
  1307. * back up
  1308. */
  1309. if (curr_link_state == LS_DOWN) {
  1310. netif_info(qdev, link, qdev->ndev, "Link is down\n");
  1311. qdev->port_link_state = LS_DOWN;
  1312. }
  1313. if (ql_link_down_detect(qdev))
  1314. qdev->port_link_state = LS_DOWN;
  1315. break;
  1316. }
  1317. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1318. /* Restart timer on 2 second interval. */
  1319. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1320. }
  1321. /*
  1322. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1323. */
  1324. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1325. {
  1326. if (ql_this_adapter_controls_port(qdev))
  1327. set_bit(QL_LINK_MASTER, &qdev->flags);
  1328. else
  1329. clear_bit(QL_LINK_MASTER, &qdev->flags);
  1330. }
  1331. /*
  1332. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1333. */
  1334. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1335. {
  1336. ql_mii_enable_scan_mode(qdev);
  1337. if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
  1338. if (ql_this_adapter_controls_port(qdev))
  1339. ql_petbi_init_ex(qdev);
  1340. } else {
  1341. if (ql_this_adapter_controls_port(qdev))
  1342. ql_phy_init_ex(qdev);
  1343. }
  1344. }
  1345. /*
  1346. * MII_Setup needs to be called before taking the PHY out of reset
  1347. * so that the management interface clock speed can be set properly.
  1348. * It would be better if we had a way to disable MDC until after the
  1349. * PHY is out of reset, but we don't have that capability.
  1350. */
  1351. static int ql_mii_setup(struct ql3_adapter *qdev)
  1352. {
  1353. u32 reg;
  1354. struct ql3xxx_port_registers __iomem *port_regs =
  1355. qdev->mem_map_registers;
  1356. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1357. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1358. 2) << 7))
  1359. return -1;
  1360. if (qdev->device_id == QL3032_DEVICE_ID)
  1361. ql_write_page0_reg(qdev,
  1362. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1363. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1364. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1365. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1366. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1367. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1368. return 0;
  1369. }
  1370. #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
  1371. SUPPORTED_FIBRE | \
  1372. SUPPORTED_Autoneg)
  1373. #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
  1374. SUPPORTED_10baseT_Full | \
  1375. SUPPORTED_100baseT_Half | \
  1376. SUPPORTED_100baseT_Full | \
  1377. SUPPORTED_1000baseT_Half | \
  1378. SUPPORTED_1000baseT_Full | \
  1379. SUPPORTED_Autoneg | \
  1380. SUPPORTED_TP) \
  1381. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1382. {
  1383. if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
  1384. return SUPPORTED_OPTICAL_MODES;
  1385. return SUPPORTED_TP_MODES;
  1386. }
  1387. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1388. {
  1389. int status;
  1390. unsigned long hw_flags;
  1391. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1392. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1393. (QL_RESOURCE_BITS_BASE_CODE |
  1394. (qdev->mac_index) * 2) << 7)) {
  1395. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1396. return 0;
  1397. }
  1398. status = ql_is_auto_cfg(qdev);
  1399. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1400. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1401. return status;
  1402. }
  1403. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1404. {
  1405. u32 status;
  1406. unsigned long hw_flags;
  1407. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1408. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1409. (QL_RESOURCE_BITS_BASE_CODE |
  1410. (qdev->mac_index) * 2) << 7)) {
  1411. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1412. return 0;
  1413. }
  1414. status = ql_get_link_speed(qdev);
  1415. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1416. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1417. return status;
  1418. }
  1419. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1420. {
  1421. int status;
  1422. unsigned long hw_flags;
  1423. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1424. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1425. (QL_RESOURCE_BITS_BASE_CODE |
  1426. (qdev->mac_index) * 2) << 7)) {
  1427. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1428. return 0;
  1429. }
  1430. status = ql_is_link_full_dup(qdev);
  1431. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1432. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1433. return status;
  1434. }
  1435. static int ql_get_link_ksettings(struct net_device *ndev,
  1436. struct ethtool_link_ksettings *cmd)
  1437. {
  1438. struct ql3_adapter *qdev = netdev_priv(ndev);
  1439. u32 supported, advertising;
  1440. supported = ql_supported_modes(qdev);
  1441. if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
  1442. cmd->base.port = PORT_FIBRE;
  1443. } else {
  1444. cmd->base.port = PORT_TP;
  1445. cmd->base.phy_address = qdev->PHYAddr;
  1446. }
  1447. advertising = ql_supported_modes(qdev);
  1448. cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
  1449. cmd->base.speed = ql_get_speed(qdev);
  1450. cmd->base.duplex = ql_get_full_dup(qdev);
  1451. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  1452. supported);
  1453. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  1454. advertising);
  1455. return 0;
  1456. }
  1457. static void ql_get_drvinfo(struct net_device *ndev,
  1458. struct ethtool_drvinfo *drvinfo)
  1459. {
  1460. struct ql3_adapter *qdev = netdev_priv(ndev);
  1461. strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
  1462. strlcpy(drvinfo->version, ql3xxx_driver_version,
  1463. sizeof(drvinfo->version));
  1464. strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
  1465. sizeof(drvinfo->bus_info));
  1466. }
  1467. static u32 ql_get_msglevel(struct net_device *ndev)
  1468. {
  1469. struct ql3_adapter *qdev = netdev_priv(ndev);
  1470. return qdev->msg_enable;
  1471. }
  1472. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1473. {
  1474. struct ql3_adapter *qdev = netdev_priv(ndev);
  1475. qdev->msg_enable = value;
  1476. }
  1477. static void ql_get_pauseparam(struct net_device *ndev,
  1478. struct ethtool_pauseparam *pause)
  1479. {
  1480. struct ql3_adapter *qdev = netdev_priv(ndev);
  1481. struct ql3xxx_port_registers __iomem *port_regs =
  1482. qdev->mem_map_registers;
  1483. u32 reg;
  1484. if (qdev->mac_index == 0)
  1485. reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
  1486. else
  1487. reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
  1488. pause->autoneg = ql_get_auto_cfg_status(qdev);
  1489. pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
  1490. pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
  1491. }
  1492. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1493. .get_drvinfo = ql_get_drvinfo,
  1494. .get_link = ethtool_op_get_link,
  1495. .get_msglevel = ql_get_msglevel,
  1496. .set_msglevel = ql_set_msglevel,
  1497. .get_pauseparam = ql_get_pauseparam,
  1498. .get_link_ksettings = ql_get_link_ksettings,
  1499. };
  1500. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1501. {
  1502. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1503. dma_addr_t map;
  1504. int err;
  1505. while (lrg_buf_cb) {
  1506. if (!lrg_buf_cb->skb) {
  1507. lrg_buf_cb->skb =
  1508. netdev_alloc_skb(qdev->ndev,
  1509. qdev->lrg_buffer_len);
  1510. if (unlikely(!lrg_buf_cb->skb)) {
  1511. netdev_printk(KERN_DEBUG, qdev->ndev,
  1512. "Failed netdev_alloc_skb()\n");
  1513. break;
  1514. } else {
  1515. /*
  1516. * We save some space to copy the ethhdr from
  1517. * first buffer
  1518. */
  1519. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1520. map = pci_map_single(qdev->pdev,
  1521. lrg_buf_cb->skb->data,
  1522. qdev->lrg_buffer_len -
  1523. QL_HEADER_SPACE,
  1524. PCI_DMA_FROMDEVICE);
  1525. err = pci_dma_mapping_error(qdev->pdev, map);
  1526. if (err) {
  1527. netdev_err(qdev->ndev,
  1528. "PCI mapping failed with error: %d\n",
  1529. err);
  1530. dev_kfree_skb(lrg_buf_cb->skb);
  1531. lrg_buf_cb->skb = NULL;
  1532. break;
  1533. }
  1534. lrg_buf_cb->buf_phy_addr_low =
  1535. cpu_to_le32(LS_64BITS(map));
  1536. lrg_buf_cb->buf_phy_addr_high =
  1537. cpu_to_le32(MS_64BITS(map));
  1538. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1539. dma_unmap_len_set(lrg_buf_cb, maplen,
  1540. qdev->lrg_buffer_len -
  1541. QL_HEADER_SPACE);
  1542. --qdev->lrg_buf_skb_check;
  1543. if (!qdev->lrg_buf_skb_check)
  1544. return 1;
  1545. }
  1546. }
  1547. lrg_buf_cb = lrg_buf_cb->next;
  1548. }
  1549. return 0;
  1550. }
  1551. /*
  1552. * Caller holds hw_lock.
  1553. */
  1554. static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
  1555. {
  1556. struct ql3xxx_port_registers __iomem *port_regs =
  1557. qdev->mem_map_registers;
  1558. if (qdev->small_buf_release_cnt >= 16) {
  1559. while (qdev->small_buf_release_cnt >= 16) {
  1560. qdev->small_buf_q_producer_index++;
  1561. if (qdev->small_buf_q_producer_index ==
  1562. NUM_SBUFQ_ENTRIES)
  1563. qdev->small_buf_q_producer_index = 0;
  1564. qdev->small_buf_release_cnt -= 8;
  1565. }
  1566. wmb();
  1567. writel_relaxed(qdev->small_buf_q_producer_index,
  1568. &port_regs->CommonRegs.rxSmallQProducerIndex);
  1569. mmiowb();
  1570. }
  1571. }
  1572. /*
  1573. * Caller holds hw_lock.
  1574. */
  1575. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1576. {
  1577. struct bufq_addr_element *lrg_buf_q_ele;
  1578. int i;
  1579. struct ql_rcv_buf_cb *lrg_buf_cb;
  1580. struct ql3xxx_port_registers __iomem *port_regs =
  1581. qdev->mem_map_registers;
  1582. if ((qdev->lrg_buf_free_count >= 8) &&
  1583. (qdev->lrg_buf_release_cnt >= 16)) {
  1584. if (qdev->lrg_buf_skb_check)
  1585. if (!ql_populate_free_queue(qdev))
  1586. return;
  1587. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1588. while ((qdev->lrg_buf_release_cnt >= 16) &&
  1589. (qdev->lrg_buf_free_count >= 8)) {
  1590. for (i = 0; i < 8; i++) {
  1591. lrg_buf_cb =
  1592. ql_get_from_lrg_buf_free_list(qdev);
  1593. lrg_buf_q_ele->addr_high =
  1594. lrg_buf_cb->buf_phy_addr_high;
  1595. lrg_buf_q_ele->addr_low =
  1596. lrg_buf_cb->buf_phy_addr_low;
  1597. lrg_buf_q_ele++;
  1598. qdev->lrg_buf_release_cnt--;
  1599. }
  1600. qdev->lrg_buf_q_producer_index++;
  1601. if (qdev->lrg_buf_q_producer_index ==
  1602. qdev->num_lbufq_entries)
  1603. qdev->lrg_buf_q_producer_index = 0;
  1604. if (qdev->lrg_buf_q_producer_index ==
  1605. (qdev->num_lbufq_entries - 1)) {
  1606. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1607. }
  1608. }
  1609. wmb();
  1610. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1611. writel(qdev->lrg_buf_q_producer_index,
  1612. &port_regs->CommonRegs.rxLargeQProducerIndex);
  1613. }
  1614. }
  1615. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1616. struct ob_mac_iocb_rsp *mac_rsp)
  1617. {
  1618. struct ql_tx_buf_cb *tx_cb;
  1619. int i;
  1620. if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1621. netdev_warn(qdev->ndev,
  1622. "Frame too short but it was padded and sent\n");
  1623. }
  1624. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1625. /* Check the transmit response flags for any errors */
  1626. if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1627. netdev_err(qdev->ndev,
  1628. "Frame too short to be legal, frame not sent\n");
  1629. qdev->ndev->stats.tx_errors++;
  1630. goto frame_not_sent;
  1631. }
  1632. if (tx_cb->seg_count == 0) {
  1633. netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
  1634. mac_rsp->transaction_id);
  1635. qdev->ndev->stats.tx_errors++;
  1636. goto invalid_seg_count;
  1637. }
  1638. pci_unmap_single(qdev->pdev,
  1639. dma_unmap_addr(&tx_cb->map[0], mapaddr),
  1640. dma_unmap_len(&tx_cb->map[0], maplen),
  1641. PCI_DMA_TODEVICE);
  1642. tx_cb->seg_count--;
  1643. if (tx_cb->seg_count) {
  1644. for (i = 1; i < tx_cb->seg_count; i++) {
  1645. pci_unmap_page(qdev->pdev,
  1646. dma_unmap_addr(&tx_cb->map[i],
  1647. mapaddr),
  1648. dma_unmap_len(&tx_cb->map[i], maplen),
  1649. PCI_DMA_TODEVICE);
  1650. }
  1651. }
  1652. qdev->ndev->stats.tx_packets++;
  1653. qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
  1654. frame_not_sent:
  1655. dev_kfree_skb_irq(tx_cb->skb);
  1656. tx_cb->skb = NULL;
  1657. invalid_seg_count:
  1658. atomic_inc(&qdev->tx_count);
  1659. }
  1660. static void ql_get_sbuf(struct ql3_adapter *qdev)
  1661. {
  1662. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1663. qdev->small_buf_index = 0;
  1664. qdev->small_buf_release_cnt++;
  1665. }
  1666. static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
  1667. {
  1668. struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
  1669. lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
  1670. qdev->lrg_buf_release_cnt++;
  1671. if (++qdev->lrg_buf_index == qdev->num_large_buffers)
  1672. qdev->lrg_buf_index = 0;
  1673. return lrg_buf_cb;
  1674. }
  1675. /*
  1676. * The difference between 3022 and 3032 for inbound completions:
  1677. * 3022 uses two buffers per completion. The first buffer contains
  1678. * (some) header info, the second the remainder of the headers plus
  1679. * the data. For this chip we reserve some space at the top of the
  1680. * receive buffer so that the header info in buffer one can be
  1681. * prepended to the buffer two. Buffer two is the sent up while
  1682. * buffer one is returned to the hardware to be reused.
  1683. * 3032 receives all of it's data and headers in one buffer for a
  1684. * simpler process. 3032 also supports checksum verification as
  1685. * can be seen in ql_process_macip_rx_intr().
  1686. */
  1687. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1688. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1689. {
  1690. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1691. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1692. struct sk_buff *skb;
  1693. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1694. /*
  1695. * Get the inbound address list (small buffer).
  1696. */
  1697. ql_get_sbuf(qdev);
  1698. if (qdev->device_id == QL3022_DEVICE_ID)
  1699. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1700. /* start of second buffer */
  1701. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1702. skb = lrg_buf_cb2->skb;
  1703. qdev->ndev->stats.rx_packets++;
  1704. qdev->ndev->stats.rx_bytes += length;
  1705. skb_put(skb, length);
  1706. pci_unmap_single(qdev->pdev,
  1707. dma_unmap_addr(lrg_buf_cb2, mapaddr),
  1708. dma_unmap_len(lrg_buf_cb2, maplen),
  1709. PCI_DMA_FROMDEVICE);
  1710. prefetch(skb->data);
  1711. skb_checksum_none_assert(skb);
  1712. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1713. napi_gro_receive(&qdev->napi, skb);
  1714. lrg_buf_cb2->skb = NULL;
  1715. if (qdev->device_id == QL3022_DEVICE_ID)
  1716. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1717. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1718. }
  1719. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1720. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1721. {
  1722. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1723. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1724. struct sk_buff *skb1 = NULL, *skb2;
  1725. struct net_device *ndev = qdev->ndev;
  1726. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1727. u16 size = 0;
  1728. /*
  1729. * Get the inbound address list (small buffer).
  1730. */
  1731. ql_get_sbuf(qdev);
  1732. if (qdev->device_id == QL3022_DEVICE_ID) {
  1733. /* start of first buffer on 3022 */
  1734. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1735. skb1 = lrg_buf_cb1->skb;
  1736. size = ETH_HLEN;
  1737. if (*((u16 *) skb1->data) != 0xFFFF)
  1738. size += VLAN_ETH_HLEN - ETH_HLEN;
  1739. }
  1740. /* start of second buffer */
  1741. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1742. skb2 = lrg_buf_cb2->skb;
  1743. skb_put(skb2, length); /* Just the second buffer length here. */
  1744. pci_unmap_single(qdev->pdev,
  1745. dma_unmap_addr(lrg_buf_cb2, mapaddr),
  1746. dma_unmap_len(lrg_buf_cb2, maplen),
  1747. PCI_DMA_FROMDEVICE);
  1748. prefetch(skb2->data);
  1749. skb_checksum_none_assert(skb2);
  1750. if (qdev->device_id == QL3022_DEVICE_ID) {
  1751. /*
  1752. * Copy the ethhdr from first buffer to second. This
  1753. * is necessary for 3022 IP completions.
  1754. */
  1755. skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
  1756. skb_push(skb2, size), size);
  1757. } else {
  1758. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1759. if (checksum &
  1760. (IB_IP_IOCB_RSP_3032_ICE |
  1761. IB_IP_IOCB_RSP_3032_CE)) {
  1762. netdev_err(ndev,
  1763. "%s: Bad checksum for this %s packet, checksum = %x\n",
  1764. __func__,
  1765. ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
  1766. "TCP" : "UDP"), checksum);
  1767. } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
  1768. (checksum & IB_IP_IOCB_RSP_3032_UDP &&
  1769. !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
  1770. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1771. }
  1772. }
  1773. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1774. napi_gro_receive(&qdev->napi, skb2);
  1775. ndev->stats.rx_packets++;
  1776. ndev->stats.rx_bytes += length;
  1777. lrg_buf_cb2->skb = NULL;
  1778. if (qdev->device_id == QL3022_DEVICE_ID)
  1779. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1780. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1781. }
  1782. static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
  1783. {
  1784. struct net_rsp_iocb *net_rsp;
  1785. struct net_device *ndev = qdev->ndev;
  1786. int work_done = 0;
  1787. /* While there are entries in the completion queue. */
  1788. while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
  1789. qdev->rsp_consumer_index) && (work_done < budget)) {
  1790. net_rsp = qdev->rsp_current;
  1791. rmb();
  1792. /*
  1793. * Fix 4032 chip's undocumented "feature" where bit-8 is set
  1794. * if the inbound completion is for a VLAN.
  1795. */
  1796. if (qdev->device_id == QL3032_DEVICE_ID)
  1797. net_rsp->opcode &= 0x7f;
  1798. switch (net_rsp->opcode) {
  1799. case OPCODE_OB_MAC_IOCB_FN0:
  1800. case OPCODE_OB_MAC_IOCB_FN2:
  1801. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1802. net_rsp);
  1803. break;
  1804. case OPCODE_IB_MAC_IOCB:
  1805. case OPCODE_IB_3032_MAC_IOCB:
  1806. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1807. net_rsp);
  1808. work_done++;
  1809. break;
  1810. case OPCODE_IB_IP_IOCB:
  1811. case OPCODE_IB_3032_IP_IOCB:
  1812. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1813. net_rsp);
  1814. work_done++;
  1815. break;
  1816. default: {
  1817. u32 *tmp = (u32 *)net_rsp;
  1818. netdev_err(ndev,
  1819. "Hit default case, not handled!\n"
  1820. " dropping the packet, opcode = %x\n"
  1821. "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
  1822. net_rsp->opcode,
  1823. (unsigned long int)tmp[0],
  1824. (unsigned long int)tmp[1],
  1825. (unsigned long int)tmp[2],
  1826. (unsigned long int)tmp[3]);
  1827. }
  1828. }
  1829. qdev->rsp_consumer_index++;
  1830. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1831. qdev->rsp_consumer_index = 0;
  1832. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1833. } else {
  1834. qdev->rsp_current++;
  1835. }
  1836. }
  1837. return work_done;
  1838. }
  1839. static int ql_poll(struct napi_struct *napi, int budget)
  1840. {
  1841. struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
  1842. struct ql3xxx_port_registers __iomem *port_regs =
  1843. qdev->mem_map_registers;
  1844. int work_done;
  1845. work_done = ql_tx_rx_clean(qdev, budget);
  1846. if (work_done < budget && napi_complete_done(napi, work_done)) {
  1847. unsigned long flags;
  1848. spin_lock_irqsave(&qdev->hw_lock, flags);
  1849. ql_update_small_bufq_prod_index(qdev);
  1850. ql_update_lrg_bufq_prod_index(qdev);
  1851. writel(qdev->rsp_consumer_index,
  1852. &port_regs->CommonRegs.rspQConsumerIndex);
  1853. spin_unlock_irqrestore(&qdev->hw_lock, flags);
  1854. ql_enable_interrupts(qdev);
  1855. }
  1856. return work_done;
  1857. }
  1858. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1859. {
  1860. struct net_device *ndev = dev_id;
  1861. struct ql3_adapter *qdev = netdev_priv(ndev);
  1862. struct ql3xxx_port_registers __iomem *port_regs =
  1863. qdev->mem_map_registers;
  1864. u32 value;
  1865. int handled = 1;
  1866. u32 var;
  1867. value = ql_read_common_reg_l(qdev,
  1868. &port_regs->CommonRegs.ispControlStatus);
  1869. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1870. spin_lock(&qdev->adapter_lock);
  1871. netif_stop_queue(qdev->ndev);
  1872. netif_carrier_off(qdev->ndev);
  1873. ql_disable_interrupts(qdev);
  1874. qdev->port_link_state = LS_DOWN;
  1875. set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
  1876. if (value & ISP_CONTROL_FE) {
  1877. /*
  1878. * Chip Fatal Error.
  1879. */
  1880. var =
  1881. ql_read_page0_reg_l(qdev,
  1882. &port_regs->PortFatalErrStatus);
  1883. netdev_warn(ndev,
  1884. "Resetting chip. PortFatalErrStatus register = 0x%x\n",
  1885. var);
  1886. set_bit(QL_RESET_START, &qdev->flags) ;
  1887. } else {
  1888. /*
  1889. * Soft Reset Requested.
  1890. */
  1891. set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
  1892. netdev_err(ndev,
  1893. "Another function issued a reset to the chip. ISR value = %x\n",
  1894. value);
  1895. }
  1896. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  1897. spin_unlock(&qdev->adapter_lock);
  1898. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  1899. ql_disable_interrupts(qdev);
  1900. if (likely(napi_schedule_prep(&qdev->napi)))
  1901. __napi_schedule(&qdev->napi);
  1902. } else
  1903. return IRQ_NONE;
  1904. return IRQ_RETVAL(handled);
  1905. }
  1906. /*
  1907. * Get the total number of segments needed for the given number of fragments.
  1908. * This is necessary because outbound address lists (OAL) will be used when
  1909. * more than two frags are given. Each address list has 5 addr/len pairs.
  1910. * The 5th pair in each OAL is used to point to the next OAL if more frags
  1911. * are coming. That is why the frags:segment count ratio is not linear.
  1912. */
  1913. static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
  1914. {
  1915. if (qdev->device_id == QL3022_DEVICE_ID)
  1916. return 1;
  1917. if (frags <= 2)
  1918. return frags + 1;
  1919. else if (frags <= 6)
  1920. return frags + 2;
  1921. else if (frags <= 10)
  1922. return frags + 3;
  1923. else if (frags <= 14)
  1924. return frags + 4;
  1925. else if (frags <= 18)
  1926. return frags + 5;
  1927. return -1;
  1928. }
  1929. static void ql_hw_csum_setup(const struct sk_buff *skb,
  1930. struct ob_mac_iocb_req *mac_iocb_ptr)
  1931. {
  1932. const struct iphdr *ip = ip_hdr(skb);
  1933. mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
  1934. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1935. if (ip->protocol == IPPROTO_TCP) {
  1936. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
  1937. OB_3032MAC_IOCB_REQ_IC;
  1938. } else {
  1939. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
  1940. OB_3032MAC_IOCB_REQ_IC;
  1941. }
  1942. }
  1943. /*
  1944. * Map the buffers for this transmit.
  1945. * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1946. */
  1947. static int ql_send_map(struct ql3_adapter *qdev,
  1948. struct ob_mac_iocb_req *mac_iocb_ptr,
  1949. struct ql_tx_buf_cb *tx_cb,
  1950. struct sk_buff *skb)
  1951. {
  1952. struct oal *oal;
  1953. struct oal_entry *oal_entry;
  1954. int len = skb_headlen(skb);
  1955. dma_addr_t map;
  1956. int err;
  1957. int completed_segs, i;
  1958. int seg_cnt, seg = 0;
  1959. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  1960. seg_cnt = tx_cb->seg_count;
  1961. /*
  1962. * Map the skb buffer first.
  1963. */
  1964. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1965. err = pci_dma_mapping_error(qdev->pdev, map);
  1966. if (err) {
  1967. netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
  1968. err);
  1969. return NETDEV_TX_BUSY;
  1970. }
  1971. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  1972. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1973. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1974. oal_entry->len = cpu_to_le32(len);
  1975. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1976. dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
  1977. seg++;
  1978. if (seg_cnt == 1) {
  1979. /* Terminate the last segment. */
  1980. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  1981. return NETDEV_TX_OK;
  1982. }
  1983. oal = tx_cb->oal;
  1984. for (completed_segs = 0;
  1985. completed_segs < frag_cnt;
  1986. completed_segs++, seg++) {
  1987. skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
  1988. oal_entry++;
  1989. /*
  1990. * Check for continuation requirements.
  1991. * It's strange but necessary.
  1992. * Continuation entry points to outbound address list.
  1993. */
  1994. if ((seg == 2 && seg_cnt > 3) ||
  1995. (seg == 7 && seg_cnt > 8) ||
  1996. (seg == 12 && seg_cnt > 13) ||
  1997. (seg == 17 && seg_cnt > 18)) {
  1998. map = pci_map_single(qdev->pdev, oal,
  1999. sizeof(struct oal),
  2000. PCI_DMA_TODEVICE);
  2001. err = pci_dma_mapping_error(qdev->pdev, map);
  2002. if (err) {
  2003. netdev_err(qdev->ndev,
  2004. "PCI mapping outbound address list with error: %d\n",
  2005. err);
  2006. goto map_error;
  2007. }
  2008. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2009. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2010. oal_entry->len = cpu_to_le32(sizeof(struct oal) |
  2011. OAL_CONT_ENTRY);
  2012. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2013. dma_unmap_len_set(&tx_cb->map[seg], maplen,
  2014. sizeof(struct oal));
  2015. oal_entry = (struct oal_entry *)oal;
  2016. oal++;
  2017. seg++;
  2018. }
  2019. map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
  2020. DMA_TO_DEVICE);
  2021. err = dma_mapping_error(&qdev->pdev->dev, map);
  2022. if (err) {
  2023. netdev_err(qdev->ndev,
  2024. "PCI mapping frags failed with error: %d\n",
  2025. err);
  2026. goto map_error;
  2027. }
  2028. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2029. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2030. oal_entry->len = cpu_to_le32(skb_frag_size(frag));
  2031. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2032. dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
  2033. }
  2034. /* Terminate the last segment. */
  2035. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2036. return NETDEV_TX_OK;
  2037. map_error:
  2038. /* A PCI mapping failed and now we will need to back out
  2039. * We need to traverse through the oal's and associated pages which
  2040. * have been mapped and now we must unmap them to clean up properly
  2041. */
  2042. seg = 1;
  2043. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2044. oal = tx_cb->oal;
  2045. for (i = 0; i < completed_segs; i++, seg++) {
  2046. oal_entry++;
  2047. /*
  2048. * Check for continuation requirements.
  2049. * It's strange but necessary.
  2050. */
  2051. if ((seg == 2 && seg_cnt > 3) ||
  2052. (seg == 7 && seg_cnt > 8) ||
  2053. (seg == 12 && seg_cnt > 13) ||
  2054. (seg == 17 && seg_cnt > 18)) {
  2055. pci_unmap_single(qdev->pdev,
  2056. dma_unmap_addr(&tx_cb->map[seg], mapaddr),
  2057. dma_unmap_len(&tx_cb->map[seg], maplen),
  2058. PCI_DMA_TODEVICE);
  2059. oal++;
  2060. seg++;
  2061. }
  2062. pci_unmap_page(qdev->pdev,
  2063. dma_unmap_addr(&tx_cb->map[seg], mapaddr),
  2064. dma_unmap_len(&tx_cb->map[seg], maplen),
  2065. PCI_DMA_TODEVICE);
  2066. }
  2067. pci_unmap_single(qdev->pdev,
  2068. dma_unmap_addr(&tx_cb->map[0], mapaddr),
  2069. dma_unmap_addr(&tx_cb->map[0], maplen),
  2070. PCI_DMA_TODEVICE);
  2071. return NETDEV_TX_BUSY;
  2072. }
  2073. /*
  2074. * The difference between 3022 and 3032 sends:
  2075. * 3022 only supports a simple single segment transmission.
  2076. * 3032 supports checksumming and scatter/gather lists (fragments).
  2077. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  2078. * in the IOCB plus a chain of outbound address lists (OAL) that
  2079. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  2080. * will be used to point to an OAL when more ALP entries are required.
  2081. * The IOCB is always the top of the chain followed by one or more
  2082. * OALs (when necessary).
  2083. */
  2084. static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
  2085. struct net_device *ndev)
  2086. {
  2087. struct ql3_adapter *qdev = netdev_priv(ndev);
  2088. struct ql3xxx_port_registers __iomem *port_regs =
  2089. qdev->mem_map_registers;
  2090. struct ql_tx_buf_cb *tx_cb;
  2091. u32 tot_len = skb->len;
  2092. struct ob_mac_iocb_req *mac_iocb_ptr;
  2093. if (unlikely(atomic_read(&qdev->tx_count) < 2))
  2094. return NETDEV_TX_BUSY;
  2095. tx_cb = &qdev->tx_buf[qdev->req_producer_index];
  2096. tx_cb->seg_count = ql_get_seg_count(qdev,
  2097. skb_shinfo(skb)->nr_frags);
  2098. if (tx_cb->seg_count == -1) {
  2099. netdev_err(ndev, "%s: invalid segment count!\n", __func__);
  2100. return NETDEV_TX_OK;
  2101. }
  2102. mac_iocb_ptr = tx_cb->queue_entry;
  2103. memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
  2104. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  2105. mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
  2106. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  2107. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  2108. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  2109. tx_cb->skb = skb;
  2110. if (qdev->device_id == QL3032_DEVICE_ID &&
  2111. skb->ip_summed == CHECKSUM_PARTIAL)
  2112. ql_hw_csum_setup(skb, mac_iocb_ptr);
  2113. if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
  2114. netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
  2115. return NETDEV_TX_BUSY;
  2116. }
  2117. wmb();
  2118. qdev->req_producer_index++;
  2119. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  2120. qdev->req_producer_index = 0;
  2121. wmb();
  2122. ql_write_common_reg_l(qdev,
  2123. &port_regs->CommonRegs.reqQProducerIndex,
  2124. qdev->req_producer_index);
  2125. netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
  2126. "tx queued, slot %d, len %d\n",
  2127. qdev->req_producer_index, skb->len);
  2128. atomic_dec(&qdev->tx_count);
  2129. return NETDEV_TX_OK;
  2130. }
  2131. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  2132. {
  2133. qdev->req_q_size =
  2134. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  2135. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  2136. /* The barrier is required to ensure request and response queue
  2137. * addr writes to the registers.
  2138. */
  2139. wmb();
  2140. qdev->req_q_virt_addr =
  2141. pci_alloc_consistent(qdev->pdev,
  2142. (size_t) qdev->req_q_size,
  2143. &qdev->req_q_phy_addr);
  2144. if ((qdev->req_q_virt_addr == NULL) ||
  2145. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  2146. netdev_err(qdev->ndev, "reqQ failed\n");
  2147. return -ENOMEM;
  2148. }
  2149. qdev->rsp_q_virt_addr =
  2150. pci_alloc_consistent(qdev->pdev,
  2151. (size_t) qdev->rsp_q_size,
  2152. &qdev->rsp_q_phy_addr);
  2153. if ((qdev->rsp_q_virt_addr == NULL) ||
  2154. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  2155. netdev_err(qdev->ndev, "rspQ allocation failed\n");
  2156. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  2157. qdev->req_q_virt_addr,
  2158. qdev->req_q_phy_addr);
  2159. return -ENOMEM;
  2160. }
  2161. set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
  2162. return 0;
  2163. }
  2164. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2165. {
  2166. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
  2167. netdev_info(qdev->ndev, "Already done\n");
  2168. return;
  2169. }
  2170. pci_free_consistent(qdev->pdev,
  2171. qdev->req_q_size,
  2172. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2173. qdev->req_q_virt_addr = NULL;
  2174. pci_free_consistent(qdev->pdev,
  2175. qdev->rsp_q_size,
  2176. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2177. qdev->rsp_q_virt_addr = NULL;
  2178. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
  2179. }
  2180. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2181. {
  2182. /* Create Large Buffer Queue */
  2183. qdev->lrg_buf_q_size =
  2184. qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
  2185. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2186. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2187. else
  2188. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2189. qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
  2190. sizeof(struct ql_rcv_buf_cb),
  2191. GFP_KERNEL);
  2192. if (qdev->lrg_buf == NULL)
  2193. return -ENOMEM;
  2194. qdev->lrg_buf_q_alloc_virt_addr =
  2195. pci_alloc_consistent(qdev->pdev,
  2196. qdev->lrg_buf_q_alloc_size,
  2197. &qdev->lrg_buf_q_alloc_phy_addr);
  2198. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2199. netdev_err(qdev->ndev, "lBufQ failed\n");
  2200. return -ENOMEM;
  2201. }
  2202. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2203. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2204. /* Create Small Buffer Queue */
  2205. qdev->small_buf_q_size =
  2206. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2207. if (qdev->small_buf_q_size < PAGE_SIZE)
  2208. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2209. else
  2210. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2211. qdev->small_buf_q_alloc_virt_addr =
  2212. pci_alloc_consistent(qdev->pdev,
  2213. qdev->small_buf_q_alloc_size,
  2214. &qdev->small_buf_q_alloc_phy_addr);
  2215. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2216. netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
  2217. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2218. qdev->lrg_buf_q_alloc_virt_addr,
  2219. qdev->lrg_buf_q_alloc_phy_addr);
  2220. return -ENOMEM;
  2221. }
  2222. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2223. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2224. set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
  2225. return 0;
  2226. }
  2227. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2228. {
  2229. if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
  2230. netdev_info(qdev->ndev, "Already done\n");
  2231. return;
  2232. }
  2233. kfree(qdev->lrg_buf);
  2234. pci_free_consistent(qdev->pdev,
  2235. qdev->lrg_buf_q_alloc_size,
  2236. qdev->lrg_buf_q_alloc_virt_addr,
  2237. qdev->lrg_buf_q_alloc_phy_addr);
  2238. qdev->lrg_buf_q_virt_addr = NULL;
  2239. pci_free_consistent(qdev->pdev,
  2240. qdev->small_buf_q_alloc_size,
  2241. qdev->small_buf_q_alloc_virt_addr,
  2242. qdev->small_buf_q_alloc_phy_addr);
  2243. qdev->small_buf_q_virt_addr = NULL;
  2244. clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
  2245. }
  2246. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2247. {
  2248. int i;
  2249. struct bufq_addr_element *small_buf_q_entry;
  2250. /* Currently we allocate on one of memory and use it for smallbuffers */
  2251. qdev->small_buf_total_size =
  2252. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2253. QL_SMALL_BUFFER_SIZE);
  2254. qdev->small_buf_virt_addr =
  2255. pci_alloc_consistent(qdev->pdev,
  2256. qdev->small_buf_total_size,
  2257. &qdev->small_buf_phy_addr);
  2258. if (qdev->small_buf_virt_addr == NULL) {
  2259. netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
  2260. return -ENOMEM;
  2261. }
  2262. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2263. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2264. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2265. /* Initialize the small buffer queue. */
  2266. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2267. small_buf_q_entry->addr_high =
  2268. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2269. small_buf_q_entry->addr_low =
  2270. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2271. (i * QL_SMALL_BUFFER_SIZE));
  2272. small_buf_q_entry++;
  2273. }
  2274. qdev->small_buf_index = 0;
  2275. set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
  2276. return 0;
  2277. }
  2278. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2279. {
  2280. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
  2281. netdev_info(qdev->ndev, "Already done\n");
  2282. return;
  2283. }
  2284. if (qdev->small_buf_virt_addr != NULL) {
  2285. pci_free_consistent(qdev->pdev,
  2286. qdev->small_buf_total_size,
  2287. qdev->small_buf_virt_addr,
  2288. qdev->small_buf_phy_addr);
  2289. qdev->small_buf_virt_addr = NULL;
  2290. }
  2291. }
  2292. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2293. {
  2294. int i = 0;
  2295. struct ql_rcv_buf_cb *lrg_buf_cb;
  2296. for (i = 0; i < qdev->num_large_buffers; i++) {
  2297. lrg_buf_cb = &qdev->lrg_buf[i];
  2298. if (lrg_buf_cb->skb) {
  2299. dev_kfree_skb(lrg_buf_cb->skb);
  2300. pci_unmap_single(qdev->pdev,
  2301. dma_unmap_addr(lrg_buf_cb, mapaddr),
  2302. dma_unmap_len(lrg_buf_cb, maplen),
  2303. PCI_DMA_FROMDEVICE);
  2304. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2305. } else {
  2306. break;
  2307. }
  2308. }
  2309. }
  2310. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2311. {
  2312. int i;
  2313. struct ql_rcv_buf_cb *lrg_buf_cb;
  2314. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2315. for (i = 0; i < qdev->num_large_buffers; i++) {
  2316. lrg_buf_cb = &qdev->lrg_buf[i];
  2317. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2318. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2319. buf_addr_ele++;
  2320. }
  2321. qdev->lrg_buf_index = 0;
  2322. qdev->lrg_buf_skb_check = 0;
  2323. }
  2324. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2325. {
  2326. int i;
  2327. struct ql_rcv_buf_cb *lrg_buf_cb;
  2328. struct sk_buff *skb;
  2329. dma_addr_t map;
  2330. int err;
  2331. for (i = 0; i < qdev->num_large_buffers; i++) {
  2332. lrg_buf_cb = &qdev->lrg_buf[i];
  2333. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2334. skb = netdev_alloc_skb(qdev->ndev,
  2335. qdev->lrg_buffer_len);
  2336. if (unlikely(!skb)) {
  2337. /* Better luck next round */
  2338. netdev_err(qdev->ndev,
  2339. "large buff alloc failed for %d bytes at index %d\n",
  2340. qdev->lrg_buffer_len * 2, i);
  2341. ql_free_large_buffers(qdev);
  2342. return -ENOMEM;
  2343. } else {
  2344. lrg_buf_cb->index = i;
  2345. /*
  2346. * We save some space to copy the ethhdr from first
  2347. * buffer
  2348. */
  2349. skb_reserve(skb, QL_HEADER_SPACE);
  2350. map = pci_map_single(qdev->pdev,
  2351. skb->data,
  2352. qdev->lrg_buffer_len -
  2353. QL_HEADER_SPACE,
  2354. PCI_DMA_FROMDEVICE);
  2355. err = pci_dma_mapping_error(qdev->pdev, map);
  2356. if (err) {
  2357. netdev_err(qdev->ndev,
  2358. "PCI mapping failed with error: %d\n",
  2359. err);
  2360. dev_kfree_skb_irq(skb);
  2361. ql_free_large_buffers(qdev);
  2362. return -ENOMEM;
  2363. }
  2364. lrg_buf_cb->skb = skb;
  2365. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2366. dma_unmap_len_set(lrg_buf_cb, maplen,
  2367. qdev->lrg_buffer_len -
  2368. QL_HEADER_SPACE);
  2369. lrg_buf_cb->buf_phy_addr_low =
  2370. cpu_to_le32(LS_64BITS(map));
  2371. lrg_buf_cb->buf_phy_addr_high =
  2372. cpu_to_le32(MS_64BITS(map));
  2373. }
  2374. }
  2375. return 0;
  2376. }
  2377. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2378. {
  2379. struct ql_tx_buf_cb *tx_cb;
  2380. int i;
  2381. tx_cb = &qdev->tx_buf[0];
  2382. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2383. kfree(tx_cb->oal);
  2384. tx_cb->oal = NULL;
  2385. tx_cb++;
  2386. }
  2387. }
  2388. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2389. {
  2390. struct ql_tx_buf_cb *tx_cb;
  2391. int i;
  2392. struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
  2393. /* Create free list of transmit buffers */
  2394. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2395. tx_cb = &qdev->tx_buf[i];
  2396. tx_cb->skb = NULL;
  2397. tx_cb->queue_entry = req_q_curr;
  2398. req_q_curr++;
  2399. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2400. if (tx_cb->oal == NULL)
  2401. return -ENOMEM;
  2402. }
  2403. return 0;
  2404. }
  2405. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2406. {
  2407. if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
  2408. qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
  2409. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2410. } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2411. /*
  2412. * Bigger buffers, so less of them.
  2413. */
  2414. qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
  2415. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2416. } else {
  2417. netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
  2418. qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
  2419. return -ENOMEM;
  2420. }
  2421. qdev->num_large_buffers =
  2422. qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
  2423. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2424. qdev->max_frame_size =
  2425. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2426. /*
  2427. * First allocate a page of shared memory and use it for shadow
  2428. * locations of Network Request Queue Consumer Address Register and
  2429. * Network Completion Queue Producer Index Register
  2430. */
  2431. qdev->shadow_reg_virt_addr =
  2432. pci_alloc_consistent(qdev->pdev,
  2433. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2434. if (qdev->shadow_reg_virt_addr != NULL) {
  2435. qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
  2436. qdev->req_consumer_index_phy_addr_high =
  2437. MS_64BITS(qdev->shadow_reg_phy_addr);
  2438. qdev->req_consumer_index_phy_addr_low =
  2439. LS_64BITS(qdev->shadow_reg_phy_addr);
  2440. qdev->prsp_producer_index =
  2441. (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2442. qdev->rsp_producer_index_phy_addr_high =
  2443. qdev->req_consumer_index_phy_addr_high;
  2444. qdev->rsp_producer_index_phy_addr_low =
  2445. qdev->req_consumer_index_phy_addr_low + 8;
  2446. } else {
  2447. netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
  2448. return -ENOMEM;
  2449. }
  2450. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2451. netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
  2452. goto err_req_rsp;
  2453. }
  2454. if (ql_alloc_buffer_queues(qdev) != 0) {
  2455. netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
  2456. goto err_buffer_queues;
  2457. }
  2458. if (ql_alloc_small_buffers(qdev) != 0) {
  2459. netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
  2460. goto err_small_buffers;
  2461. }
  2462. if (ql_alloc_large_buffers(qdev) != 0) {
  2463. netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
  2464. goto err_small_buffers;
  2465. }
  2466. /* Initialize the large buffer queue. */
  2467. ql_init_large_buffers(qdev);
  2468. if (ql_create_send_free_list(qdev))
  2469. goto err_free_list;
  2470. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2471. return 0;
  2472. err_free_list:
  2473. ql_free_send_free_list(qdev);
  2474. err_small_buffers:
  2475. ql_free_buffer_queues(qdev);
  2476. err_buffer_queues:
  2477. ql_free_net_req_rsp_queues(qdev);
  2478. err_req_rsp:
  2479. pci_free_consistent(qdev->pdev,
  2480. PAGE_SIZE,
  2481. qdev->shadow_reg_virt_addr,
  2482. qdev->shadow_reg_phy_addr);
  2483. return -ENOMEM;
  2484. }
  2485. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2486. {
  2487. ql_free_send_free_list(qdev);
  2488. ql_free_large_buffers(qdev);
  2489. ql_free_small_buffers(qdev);
  2490. ql_free_buffer_queues(qdev);
  2491. ql_free_net_req_rsp_queues(qdev);
  2492. if (qdev->shadow_reg_virt_addr != NULL) {
  2493. pci_free_consistent(qdev->pdev,
  2494. PAGE_SIZE,
  2495. qdev->shadow_reg_virt_addr,
  2496. qdev->shadow_reg_phy_addr);
  2497. qdev->shadow_reg_virt_addr = NULL;
  2498. }
  2499. }
  2500. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2501. {
  2502. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2503. (void __iomem *)qdev->mem_map_registers;
  2504. if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2505. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2506. 2) << 4))
  2507. return -1;
  2508. ql_write_page2_reg(qdev,
  2509. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2510. ql_write_page2_reg(qdev,
  2511. &local_ram->maxBufletCount,
  2512. qdev->nvram_data.bufletCount);
  2513. ql_write_page2_reg(qdev,
  2514. &local_ram->freeBufletThresholdLow,
  2515. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2516. (qdev->nvram_data.tcpWindowThreshold0));
  2517. ql_write_page2_reg(qdev,
  2518. &local_ram->freeBufletThresholdHigh,
  2519. qdev->nvram_data.tcpWindowThreshold50);
  2520. ql_write_page2_reg(qdev,
  2521. &local_ram->ipHashTableBase,
  2522. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2523. qdev->nvram_data.ipHashTableBaseLo);
  2524. ql_write_page2_reg(qdev,
  2525. &local_ram->ipHashTableCount,
  2526. qdev->nvram_data.ipHashTableSize);
  2527. ql_write_page2_reg(qdev,
  2528. &local_ram->tcpHashTableBase,
  2529. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2530. qdev->nvram_data.tcpHashTableBaseLo);
  2531. ql_write_page2_reg(qdev,
  2532. &local_ram->tcpHashTableCount,
  2533. qdev->nvram_data.tcpHashTableSize);
  2534. ql_write_page2_reg(qdev,
  2535. &local_ram->ncbBase,
  2536. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2537. qdev->nvram_data.ncbTableBaseLo);
  2538. ql_write_page2_reg(qdev,
  2539. &local_ram->maxNcbCount,
  2540. qdev->nvram_data.ncbTableSize);
  2541. ql_write_page2_reg(qdev,
  2542. &local_ram->drbBase,
  2543. (qdev->nvram_data.drbTableBaseHi << 16) |
  2544. qdev->nvram_data.drbTableBaseLo);
  2545. ql_write_page2_reg(qdev,
  2546. &local_ram->maxDrbCount,
  2547. qdev->nvram_data.drbTableSize);
  2548. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2549. return 0;
  2550. }
  2551. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2552. {
  2553. u32 value;
  2554. struct ql3xxx_port_registers __iomem *port_regs =
  2555. qdev->mem_map_registers;
  2556. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  2557. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2558. (void __iomem *)port_regs;
  2559. u32 delay = 10;
  2560. int status = 0;
  2561. if (ql_mii_setup(qdev))
  2562. return -1;
  2563. /* Bring out PHY out of reset */
  2564. ql_write_common_reg(qdev, spir,
  2565. (ISP_SERIAL_PORT_IF_WE |
  2566. (ISP_SERIAL_PORT_IF_WE << 16)));
  2567. /* Give the PHY time to come out of reset. */
  2568. mdelay(100);
  2569. qdev->port_link_state = LS_DOWN;
  2570. netif_carrier_off(qdev->ndev);
  2571. /* V2 chip fix for ARS-39168. */
  2572. ql_write_common_reg(qdev, spir,
  2573. (ISP_SERIAL_PORT_IF_SDE |
  2574. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2575. /* Request Queue Registers */
  2576. *((u32 *)(qdev->preq_consumer_index)) = 0;
  2577. atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
  2578. qdev->req_producer_index = 0;
  2579. ql_write_page1_reg(qdev,
  2580. &hmem_regs->reqConsumerIndexAddrHigh,
  2581. qdev->req_consumer_index_phy_addr_high);
  2582. ql_write_page1_reg(qdev,
  2583. &hmem_regs->reqConsumerIndexAddrLow,
  2584. qdev->req_consumer_index_phy_addr_low);
  2585. ql_write_page1_reg(qdev,
  2586. &hmem_regs->reqBaseAddrHigh,
  2587. MS_64BITS(qdev->req_q_phy_addr));
  2588. ql_write_page1_reg(qdev,
  2589. &hmem_regs->reqBaseAddrLow,
  2590. LS_64BITS(qdev->req_q_phy_addr));
  2591. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2592. /* Response Queue Registers */
  2593. *((__le16 *) (qdev->prsp_producer_index)) = 0;
  2594. qdev->rsp_consumer_index = 0;
  2595. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2596. ql_write_page1_reg(qdev,
  2597. &hmem_regs->rspProducerIndexAddrHigh,
  2598. qdev->rsp_producer_index_phy_addr_high);
  2599. ql_write_page1_reg(qdev,
  2600. &hmem_regs->rspProducerIndexAddrLow,
  2601. qdev->rsp_producer_index_phy_addr_low);
  2602. ql_write_page1_reg(qdev,
  2603. &hmem_regs->rspBaseAddrHigh,
  2604. MS_64BITS(qdev->rsp_q_phy_addr));
  2605. ql_write_page1_reg(qdev,
  2606. &hmem_regs->rspBaseAddrLow,
  2607. LS_64BITS(qdev->rsp_q_phy_addr));
  2608. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2609. /* Large Buffer Queue */
  2610. ql_write_page1_reg(qdev,
  2611. &hmem_regs->rxLargeQBaseAddrHigh,
  2612. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2613. ql_write_page1_reg(qdev,
  2614. &hmem_regs->rxLargeQBaseAddrLow,
  2615. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2616. ql_write_page1_reg(qdev,
  2617. &hmem_regs->rxLargeQLength,
  2618. qdev->num_lbufq_entries);
  2619. ql_write_page1_reg(qdev,
  2620. &hmem_regs->rxLargeBufferLength,
  2621. qdev->lrg_buffer_len);
  2622. /* Small Buffer Queue */
  2623. ql_write_page1_reg(qdev,
  2624. &hmem_regs->rxSmallQBaseAddrHigh,
  2625. MS_64BITS(qdev->small_buf_q_phy_addr));
  2626. ql_write_page1_reg(qdev,
  2627. &hmem_regs->rxSmallQBaseAddrLow,
  2628. LS_64BITS(qdev->small_buf_q_phy_addr));
  2629. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2630. ql_write_page1_reg(qdev,
  2631. &hmem_regs->rxSmallBufferLength,
  2632. QL_SMALL_BUFFER_SIZE);
  2633. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2634. qdev->small_buf_release_cnt = 8;
  2635. qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
  2636. qdev->lrg_buf_release_cnt = 8;
  2637. qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
  2638. qdev->small_buf_index = 0;
  2639. qdev->lrg_buf_index = 0;
  2640. qdev->lrg_buf_free_count = 0;
  2641. qdev->lrg_buf_free_head = NULL;
  2642. qdev->lrg_buf_free_tail = NULL;
  2643. ql_write_common_reg(qdev,
  2644. &port_regs->CommonRegs.
  2645. rxSmallQProducerIndex,
  2646. qdev->small_buf_q_producer_index);
  2647. ql_write_common_reg(qdev,
  2648. &port_regs->CommonRegs.
  2649. rxLargeQProducerIndex,
  2650. qdev->lrg_buf_q_producer_index);
  2651. /*
  2652. * Find out if the chip has already been initialized. If it has, then
  2653. * we skip some of the initialization.
  2654. */
  2655. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2656. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2657. if ((value & PORT_STATUS_IC) == 0) {
  2658. /* Chip has not been configured yet, so let it rip. */
  2659. if (ql_init_misc_registers(qdev)) {
  2660. status = -1;
  2661. goto out;
  2662. }
  2663. value = qdev->nvram_data.tcpMaxWindowSize;
  2664. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2665. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2666. if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2667. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2668. * 2) << 13)) {
  2669. status = -1;
  2670. goto out;
  2671. }
  2672. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2673. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2674. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2675. 16) | (INTERNAL_CHIP_SD |
  2676. INTERNAL_CHIP_WE)));
  2677. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2678. }
  2679. if (qdev->mac_index)
  2680. ql_write_page0_reg(qdev,
  2681. &port_regs->mac1MaxFrameLengthReg,
  2682. qdev->max_frame_size);
  2683. else
  2684. ql_write_page0_reg(qdev,
  2685. &port_regs->mac0MaxFrameLengthReg,
  2686. qdev->max_frame_size);
  2687. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2688. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2689. 2) << 7)) {
  2690. status = -1;
  2691. goto out;
  2692. }
  2693. PHY_Setup(qdev);
  2694. ql_init_scan_mode(qdev);
  2695. ql_get_phy_owner(qdev);
  2696. /* Load the MAC Configuration */
  2697. /* Program lower 32 bits of the MAC address */
  2698. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2699. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2700. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2701. ((qdev->ndev->dev_addr[2] << 24)
  2702. | (qdev->ndev->dev_addr[3] << 16)
  2703. | (qdev->ndev->dev_addr[4] << 8)
  2704. | qdev->ndev->dev_addr[5]));
  2705. /* Program top 16 bits of the MAC address */
  2706. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2707. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2708. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2709. ((qdev->ndev->dev_addr[0] << 8)
  2710. | qdev->ndev->dev_addr[1]));
  2711. /* Enable Primary MAC */
  2712. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2713. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2714. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2715. /* Clear Primary and Secondary IP addresses */
  2716. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2717. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2718. (qdev->mac_index << 2)));
  2719. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2720. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2721. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2722. ((qdev->mac_index << 2) + 1)));
  2723. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2724. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2725. /* Indicate Configuration Complete */
  2726. ql_write_page0_reg(qdev,
  2727. &port_regs->portControl,
  2728. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2729. do {
  2730. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2731. if (value & PORT_STATUS_IC)
  2732. break;
  2733. spin_unlock_irq(&qdev->hw_lock);
  2734. msleep(500);
  2735. spin_lock_irq(&qdev->hw_lock);
  2736. } while (--delay);
  2737. if (delay == 0) {
  2738. netdev_err(qdev->ndev, "Hw Initialization timeout\n");
  2739. status = -1;
  2740. goto out;
  2741. }
  2742. /* Enable Ethernet Function */
  2743. if (qdev->device_id == QL3032_DEVICE_ID) {
  2744. value =
  2745. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2746. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
  2747. QL3032_PORT_CONTROL_ET);
  2748. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2749. ((value << 16) | value));
  2750. } else {
  2751. value =
  2752. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2753. PORT_CONTROL_HH);
  2754. ql_write_page0_reg(qdev, &port_regs->portControl,
  2755. ((value << 16) | value));
  2756. }
  2757. out:
  2758. return status;
  2759. }
  2760. /*
  2761. * Caller holds hw_lock.
  2762. */
  2763. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2764. {
  2765. struct ql3xxx_port_registers __iomem *port_regs =
  2766. qdev->mem_map_registers;
  2767. int status = 0;
  2768. u16 value;
  2769. int max_wait_time;
  2770. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2771. clear_bit(QL_RESET_DONE, &qdev->flags);
  2772. /*
  2773. * Issue soft reset to chip.
  2774. */
  2775. netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
  2776. ql_write_common_reg(qdev,
  2777. &port_regs->CommonRegs.ispControlStatus,
  2778. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2779. /* Wait 3 seconds for reset to complete. */
  2780. netdev_printk(KERN_DEBUG, qdev->ndev,
  2781. "Wait 10 milliseconds for reset to complete\n");
  2782. /* Wait until the firmware tells us the Soft Reset is done */
  2783. max_wait_time = 5;
  2784. do {
  2785. value =
  2786. ql_read_common_reg(qdev,
  2787. &port_regs->CommonRegs.ispControlStatus);
  2788. if ((value & ISP_CONTROL_SR) == 0)
  2789. break;
  2790. ssleep(1);
  2791. } while ((--max_wait_time));
  2792. /*
  2793. * Also, make sure that the Network Reset Interrupt bit has been
  2794. * cleared after the soft reset has taken place.
  2795. */
  2796. value =
  2797. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2798. if (value & ISP_CONTROL_RI) {
  2799. netdev_printk(KERN_DEBUG, qdev->ndev,
  2800. "clearing RI after reset\n");
  2801. ql_write_common_reg(qdev,
  2802. &port_regs->CommonRegs.
  2803. ispControlStatus,
  2804. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2805. }
  2806. if (max_wait_time == 0) {
  2807. /* Issue Force Soft Reset */
  2808. ql_write_common_reg(qdev,
  2809. &port_regs->CommonRegs.
  2810. ispControlStatus,
  2811. ((ISP_CONTROL_FSR << 16) |
  2812. ISP_CONTROL_FSR));
  2813. /*
  2814. * Wait until the firmware tells us the Force Soft Reset is
  2815. * done
  2816. */
  2817. max_wait_time = 5;
  2818. do {
  2819. value = ql_read_common_reg(qdev,
  2820. &port_regs->CommonRegs.
  2821. ispControlStatus);
  2822. if ((value & ISP_CONTROL_FSR) == 0)
  2823. break;
  2824. ssleep(1);
  2825. } while ((--max_wait_time));
  2826. }
  2827. if (max_wait_time == 0)
  2828. status = 1;
  2829. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2830. set_bit(QL_RESET_DONE, &qdev->flags);
  2831. return status;
  2832. }
  2833. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2834. {
  2835. struct ql3xxx_port_registers __iomem *port_regs =
  2836. qdev->mem_map_registers;
  2837. u32 value, port_status;
  2838. u8 func_number;
  2839. /* Get the function number */
  2840. value =
  2841. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2842. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2843. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2844. switch (value & ISP_CONTROL_FN_MASK) {
  2845. case ISP_CONTROL_FN0_NET:
  2846. qdev->mac_index = 0;
  2847. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2848. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2849. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2850. if (port_status & PORT_STATUS_SM0)
  2851. set_bit(QL_LINK_OPTICAL, &qdev->flags);
  2852. else
  2853. clear_bit(QL_LINK_OPTICAL, &qdev->flags);
  2854. break;
  2855. case ISP_CONTROL_FN1_NET:
  2856. qdev->mac_index = 1;
  2857. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2858. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2859. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2860. if (port_status & PORT_STATUS_SM1)
  2861. set_bit(QL_LINK_OPTICAL, &qdev->flags);
  2862. else
  2863. clear_bit(QL_LINK_OPTICAL, &qdev->flags);
  2864. break;
  2865. case ISP_CONTROL_FN0_SCSI:
  2866. case ISP_CONTROL_FN1_SCSI:
  2867. default:
  2868. netdev_printk(KERN_DEBUG, qdev->ndev,
  2869. "Invalid function number, ispControlStatus = 0x%x\n",
  2870. value);
  2871. break;
  2872. }
  2873. qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
  2874. }
  2875. static void ql_display_dev_info(struct net_device *ndev)
  2876. {
  2877. struct ql3_adapter *qdev = netdev_priv(ndev);
  2878. struct pci_dev *pdev = qdev->pdev;
  2879. netdev_info(ndev,
  2880. "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
  2881. DRV_NAME, qdev->index, qdev->chip_rev_id,
  2882. qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
  2883. qdev->pci_slot);
  2884. netdev_info(ndev, "%s Interface\n",
  2885. test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
  2886. /*
  2887. * Print PCI bus width/type.
  2888. */
  2889. netdev_info(ndev, "Bus interface is %s %s\n",
  2890. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  2891. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  2892. netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
  2893. qdev->mem_map_registers);
  2894. netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
  2895. netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
  2896. }
  2897. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  2898. {
  2899. struct net_device *ndev = qdev->ndev;
  2900. int retval = 0;
  2901. netif_stop_queue(ndev);
  2902. netif_carrier_off(ndev);
  2903. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  2904. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2905. ql_disable_interrupts(qdev);
  2906. free_irq(qdev->pdev->irq, ndev);
  2907. if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2908. netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
  2909. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2910. pci_disable_msi(qdev->pdev);
  2911. }
  2912. del_timer_sync(&qdev->adapter_timer);
  2913. napi_disable(&qdev->napi);
  2914. if (do_reset) {
  2915. int soft_reset;
  2916. unsigned long hw_flags;
  2917. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2918. if (ql_wait_for_drvr_lock(qdev)) {
  2919. soft_reset = ql_adapter_reset(qdev);
  2920. if (soft_reset) {
  2921. netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
  2922. qdev->index);
  2923. }
  2924. netdev_err(ndev,
  2925. "Releasing driver lock via chip reset\n");
  2926. } else {
  2927. netdev_err(ndev,
  2928. "Could not acquire driver lock to do reset!\n");
  2929. retval = -1;
  2930. }
  2931. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2932. }
  2933. ql_free_mem_resources(qdev);
  2934. return retval;
  2935. }
  2936. static int ql_adapter_up(struct ql3_adapter *qdev)
  2937. {
  2938. struct net_device *ndev = qdev->ndev;
  2939. int err;
  2940. unsigned long irq_flags = IRQF_SHARED;
  2941. unsigned long hw_flags;
  2942. if (ql_alloc_mem_resources(qdev)) {
  2943. netdev_err(ndev, "Unable to allocate buffers\n");
  2944. return -ENOMEM;
  2945. }
  2946. if (qdev->msi) {
  2947. if (pci_enable_msi(qdev->pdev)) {
  2948. netdev_err(ndev,
  2949. "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
  2950. qdev->msi = 0;
  2951. } else {
  2952. netdev_info(ndev, "MSI Enabled...\n");
  2953. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2954. irq_flags &= ~IRQF_SHARED;
  2955. }
  2956. }
  2957. err = request_irq(qdev->pdev->irq, ql3xxx_isr,
  2958. irq_flags, ndev->name, ndev);
  2959. if (err) {
  2960. netdev_err(ndev,
  2961. "Failed to reserve interrupt %d - already in use\n",
  2962. qdev->pdev->irq);
  2963. goto err_irq;
  2964. }
  2965. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2966. err = ql_wait_for_drvr_lock(qdev);
  2967. if (err) {
  2968. err = ql_adapter_initialize(qdev);
  2969. if (err) {
  2970. netdev_err(ndev, "Unable to initialize adapter\n");
  2971. goto err_init;
  2972. }
  2973. netdev_err(ndev, "Releasing driver lock\n");
  2974. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2975. } else {
  2976. netdev_err(ndev, "Could not acquire driver lock\n");
  2977. goto err_lock;
  2978. }
  2979. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2980. set_bit(QL_ADAPTER_UP, &qdev->flags);
  2981. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  2982. napi_enable(&qdev->napi);
  2983. ql_enable_interrupts(qdev);
  2984. return 0;
  2985. err_init:
  2986. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2987. err_lock:
  2988. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2989. free_irq(qdev->pdev->irq, ndev);
  2990. err_irq:
  2991. if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2992. netdev_info(ndev, "calling pci_disable_msi()\n");
  2993. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2994. pci_disable_msi(qdev->pdev);
  2995. }
  2996. return err;
  2997. }
  2998. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  2999. {
  3000. if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
  3001. netdev_err(qdev->ndev,
  3002. "Driver up/down cycle failed, closing device\n");
  3003. rtnl_lock();
  3004. dev_close(qdev->ndev);
  3005. rtnl_unlock();
  3006. return -1;
  3007. }
  3008. return 0;
  3009. }
  3010. static int ql3xxx_close(struct net_device *ndev)
  3011. {
  3012. struct ql3_adapter *qdev = netdev_priv(ndev);
  3013. /*
  3014. * Wait for device to recover from a reset.
  3015. * (Rarely happens, but possible.)
  3016. */
  3017. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  3018. msleep(50);
  3019. ql_adapter_down(qdev, QL_DO_RESET);
  3020. return 0;
  3021. }
  3022. static int ql3xxx_open(struct net_device *ndev)
  3023. {
  3024. struct ql3_adapter *qdev = netdev_priv(ndev);
  3025. return ql_adapter_up(qdev);
  3026. }
  3027. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  3028. {
  3029. struct ql3_adapter *qdev = netdev_priv(ndev);
  3030. struct ql3xxx_port_registers __iomem *port_regs =
  3031. qdev->mem_map_registers;
  3032. struct sockaddr *addr = p;
  3033. unsigned long hw_flags;
  3034. if (netif_running(ndev))
  3035. return -EBUSY;
  3036. if (!is_valid_ether_addr(addr->sa_data))
  3037. return -EADDRNOTAVAIL;
  3038. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3039. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3040. /* Program lower 32 bits of the MAC address */
  3041. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3042. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  3043. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3044. ((ndev->dev_addr[2] << 24) | (ndev->
  3045. dev_addr[3] << 16) |
  3046. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  3047. /* Program top 16 bits of the MAC address */
  3048. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3049. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  3050. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3051. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  3052. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3053. return 0;
  3054. }
  3055. static void ql3xxx_tx_timeout(struct net_device *ndev)
  3056. {
  3057. struct ql3_adapter *qdev = netdev_priv(ndev);
  3058. netdev_err(ndev, "Resetting...\n");
  3059. /*
  3060. * Stop the queues, we've got a problem.
  3061. */
  3062. netif_stop_queue(ndev);
  3063. /*
  3064. * Wake up the worker to process this event.
  3065. */
  3066. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  3067. }
  3068. static void ql_reset_work(struct work_struct *work)
  3069. {
  3070. struct ql3_adapter *qdev =
  3071. container_of(work, struct ql3_adapter, reset_work.work);
  3072. struct net_device *ndev = qdev->ndev;
  3073. u32 value;
  3074. struct ql_tx_buf_cb *tx_cb;
  3075. int max_wait_time, i;
  3076. struct ql3xxx_port_registers __iomem *port_regs =
  3077. qdev->mem_map_registers;
  3078. unsigned long hw_flags;
  3079. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
  3080. clear_bit(QL_LINK_MASTER, &qdev->flags);
  3081. /*
  3082. * Loop through the active list and return the skb.
  3083. */
  3084. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  3085. int j;
  3086. tx_cb = &qdev->tx_buf[i];
  3087. if (tx_cb->skb) {
  3088. netdev_printk(KERN_DEBUG, ndev,
  3089. "Freeing lost SKB\n");
  3090. pci_unmap_single(qdev->pdev,
  3091. dma_unmap_addr(&tx_cb->map[0],
  3092. mapaddr),
  3093. dma_unmap_len(&tx_cb->map[0], maplen),
  3094. PCI_DMA_TODEVICE);
  3095. for (j = 1; j < tx_cb->seg_count; j++) {
  3096. pci_unmap_page(qdev->pdev,
  3097. dma_unmap_addr(&tx_cb->map[j],
  3098. mapaddr),
  3099. dma_unmap_len(&tx_cb->map[j],
  3100. maplen),
  3101. PCI_DMA_TODEVICE);
  3102. }
  3103. dev_kfree_skb(tx_cb->skb);
  3104. tx_cb->skb = NULL;
  3105. }
  3106. }
  3107. netdev_err(ndev, "Clearing NRI after reset\n");
  3108. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3109. ql_write_common_reg(qdev,
  3110. &port_regs->CommonRegs.
  3111. ispControlStatus,
  3112. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  3113. /*
  3114. * Wait the for Soft Reset to Complete.
  3115. */
  3116. max_wait_time = 10;
  3117. do {
  3118. value = ql_read_common_reg(qdev,
  3119. &port_regs->CommonRegs.
  3120. ispControlStatus);
  3121. if ((value & ISP_CONTROL_SR) == 0) {
  3122. netdev_printk(KERN_DEBUG, ndev,
  3123. "reset completed\n");
  3124. break;
  3125. }
  3126. if (value & ISP_CONTROL_RI) {
  3127. netdev_printk(KERN_DEBUG, ndev,
  3128. "clearing NRI after reset\n");
  3129. ql_write_common_reg(qdev,
  3130. &port_regs->
  3131. CommonRegs.
  3132. ispControlStatus,
  3133. ((ISP_CONTROL_RI <<
  3134. 16) | ISP_CONTROL_RI));
  3135. }
  3136. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3137. ssleep(1);
  3138. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3139. } while (--max_wait_time);
  3140. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3141. if (value & ISP_CONTROL_SR) {
  3142. /*
  3143. * Set the reset flags and clear the board again.
  3144. * Nothing else to do...
  3145. */
  3146. netdev_err(ndev,
  3147. "Timed out waiting for reset to complete\n");
  3148. netdev_err(ndev, "Do a reset\n");
  3149. clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
  3150. clear_bit(QL_RESET_START, &qdev->flags);
  3151. ql_cycle_adapter(qdev, QL_DO_RESET);
  3152. return;
  3153. }
  3154. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  3155. clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
  3156. clear_bit(QL_RESET_START, &qdev->flags);
  3157. ql_cycle_adapter(qdev, QL_NO_RESET);
  3158. }
  3159. }
  3160. static void ql_tx_timeout_work(struct work_struct *work)
  3161. {
  3162. struct ql3_adapter *qdev =
  3163. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3164. ql_cycle_adapter(qdev, QL_DO_RESET);
  3165. }
  3166. static void ql_get_board_info(struct ql3_adapter *qdev)
  3167. {
  3168. struct ql3xxx_port_registers __iomem *port_regs =
  3169. qdev->mem_map_registers;
  3170. u32 value;
  3171. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3172. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3173. if (value & PORT_STATUS_64)
  3174. qdev->pci_width = 64;
  3175. else
  3176. qdev->pci_width = 32;
  3177. if (value & PORT_STATUS_X)
  3178. qdev->pci_x = 1;
  3179. else
  3180. qdev->pci_x = 0;
  3181. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3182. }
  3183. static void ql3xxx_timer(struct timer_list *t)
  3184. {
  3185. struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
  3186. queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
  3187. }
  3188. static const struct net_device_ops ql3xxx_netdev_ops = {
  3189. .ndo_open = ql3xxx_open,
  3190. .ndo_start_xmit = ql3xxx_send,
  3191. .ndo_stop = ql3xxx_close,
  3192. .ndo_validate_addr = eth_validate_addr,
  3193. .ndo_set_mac_address = ql3xxx_set_mac_address,
  3194. .ndo_tx_timeout = ql3xxx_tx_timeout,
  3195. };
  3196. static int ql3xxx_probe(struct pci_dev *pdev,
  3197. const struct pci_device_id *pci_entry)
  3198. {
  3199. struct net_device *ndev = NULL;
  3200. struct ql3_adapter *qdev = NULL;
  3201. static int cards_found;
  3202. int uninitialized_var(pci_using_dac), err;
  3203. err = pci_enable_device(pdev);
  3204. if (err) {
  3205. pr_err("%s cannot enable PCI device\n", pci_name(pdev));
  3206. goto err_out;
  3207. }
  3208. err = pci_request_regions(pdev, DRV_NAME);
  3209. if (err) {
  3210. pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
  3211. goto err_out_disable_pdev;
  3212. }
  3213. pci_set_master(pdev);
  3214. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3215. pci_using_dac = 1;
  3216. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3217. } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
  3218. pci_using_dac = 0;
  3219. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  3220. }
  3221. if (err) {
  3222. pr_err("%s no usable DMA configuration\n", pci_name(pdev));
  3223. goto err_out_free_regions;
  3224. }
  3225. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3226. if (!ndev) {
  3227. err = -ENOMEM;
  3228. goto err_out_free_regions;
  3229. }
  3230. SET_NETDEV_DEV(ndev, &pdev->dev);
  3231. pci_set_drvdata(pdev, ndev);
  3232. qdev = netdev_priv(ndev);
  3233. qdev->index = cards_found;
  3234. qdev->ndev = ndev;
  3235. qdev->pdev = pdev;
  3236. qdev->device_id = pci_entry->device;
  3237. qdev->port_link_state = LS_DOWN;
  3238. if (msi)
  3239. qdev->msi = 1;
  3240. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3241. if (pci_using_dac)
  3242. ndev->features |= NETIF_F_HIGHDMA;
  3243. if (qdev->device_id == QL3032_DEVICE_ID)
  3244. ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  3245. qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
  3246. if (!qdev->mem_map_registers) {
  3247. pr_err("%s: cannot map device registers\n", pci_name(pdev));
  3248. err = -EIO;
  3249. goto err_out_free_ndev;
  3250. }
  3251. spin_lock_init(&qdev->adapter_lock);
  3252. spin_lock_init(&qdev->hw_lock);
  3253. /* Set driver entry points */
  3254. ndev->netdev_ops = &ql3xxx_netdev_ops;
  3255. ndev->ethtool_ops = &ql3xxx_ethtool_ops;
  3256. ndev->watchdog_timeo = 5 * HZ;
  3257. netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
  3258. ndev->irq = pdev->irq;
  3259. /* make sure the EEPROM is good */
  3260. if (ql_get_nvram_params(qdev)) {
  3261. pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
  3262. __func__, qdev->index);
  3263. err = -EIO;
  3264. goto err_out_iounmap;
  3265. }
  3266. ql_set_mac_info(qdev);
  3267. /* Validate and set parameters */
  3268. if (qdev->mac_index) {
  3269. ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
  3270. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
  3271. } else {
  3272. ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
  3273. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
  3274. }
  3275. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3276. /* Record PCI bus information. */
  3277. ql_get_board_info(qdev);
  3278. /*
  3279. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3280. * jumbo frames.
  3281. */
  3282. if (qdev->pci_x)
  3283. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3284. err = register_netdev(ndev);
  3285. if (err) {
  3286. pr_err("%s: cannot register net device\n", pci_name(pdev));
  3287. goto err_out_iounmap;
  3288. }
  3289. /* we're going to reset, so assume we have no link for now */
  3290. netif_carrier_off(ndev);
  3291. netif_stop_queue(ndev);
  3292. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3293. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3294. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3295. INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
  3296. timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
  3297. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3298. if (!cards_found) {
  3299. pr_alert("%s\n", DRV_STRING);
  3300. pr_alert("Driver name: %s, Version: %s\n",
  3301. DRV_NAME, DRV_VERSION);
  3302. }
  3303. ql_display_dev_info(ndev);
  3304. cards_found++;
  3305. return 0;
  3306. err_out_iounmap:
  3307. iounmap(qdev->mem_map_registers);
  3308. err_out_free_ndev:
  3309. free_netdev(ndev);
  3310. err_out_free_regions:
  3311. pci_release_regions(pdev);
  3312. err_out_disable_pdev:
  3313. pci_disable_device(pdev);
  3314. err_out:
  3315. return err;
  3316. }
  3317. static void ql3xxx_remove(struct pci_dev *pdev)
  3318. {
  3319. struct net_device *ndev = pci_get_drvdata(pdev);
  3320. struct ql3_adapter *qdev = netdev_priv(ndev);
  3321. unregister_netdev(ndev);
  3322. ql_disable_interrupts(qdev);
  3323. if (qdev->workqueue) {
  3324. cancel_delayed_work(&qdev->reset_work);
  3325. cancel_delayed_work(&qdev->tx_timeout_work);
  3326. destroy_workqueue(qdev->workqueue);
  3327. qdev->workqueue = NULL;
  3328. }
  3329. iounmap(qdev->mem_map_registers);
  3330. pci_release_regions(pdev);
  3331. free_netdev(ndev);
  3332. }
  3333. static struct pci_driver ql3xxx_driver = {
  3334. .name = DRV_NAME,
  3335. .id_table = ql3xxx_pci_tbl,
  3336. .probe = ql3xxx_probe,
  3337. .remove = ql3xxx_remove,
  3338. };
  3339. module_pci_driver(ql3xxx_driver);