3c59x.c 102 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358
  1. /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
  2. /*
  3. Written 1996-1999 by Donald Becker.
  4. This software may be used and distributed according to the terms
  5. of the GNU General Public License, incorporated herein by reference.
  6. This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
  7. Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
  8. and the EtherLink XL 3c900 and 3c905 cards.
  9. Problem reports and questions should be directed to
  10. vortex@scyld.com
  11. The author may be reached as becker@scyld.com, or C/O
  12. Scyld Computing Corporation
  13. 410 Severn Ave., Suite 210
  14. Annapolis MD 21403
  15. */
  16. /*
  17. * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
  18. * as well as other drivers
  19. *
  20. * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
  21. * due to dead code elimination. There will be some performance benefits from this due to
  22. * elimination of all the tests and reduced cache footprint.
  23. */
  24. #define DRV_NAME "3c59x"
  25. /* A few values that may be tweaked. */
  26. /* Keep the ring sizes a power of two for efficiency. */
  27. #define TX_RING_SIZE 16
  28. #define RX_RING_SIZE 32
  29. #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
  30. /* "Knobs" that adjust features and parameters. */
  31. /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  32. Setting to > 1512 effectively disables this feature. */
  33. #ifndef __arm__
  34. static int rx_copybreak = 200;
  35. #else
  36. /* ARM systems perform better by disregarding the bus-master
  37. transfer capability of these cards. -- rmk */
  38. static int rx_copybreak = 1513;
  39. #endif
  40. /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
  41. static const int mtu = 1500;
  42. /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  43. static int max_interrupt_work = 32;
  44. /* Tx timeout interval (millisecs) */
  45. static int watchdog = 5000;
  46. /* Allow aggregation of Tx interrupts. Saves CPU load at the cost
  47. * of possible Tx stalls if the system is blocking interrupts
  48. * somewhere else. Undefine this to disable.
  49. */
  50. #define tx_interrupt_mitigation 1
  51. /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
  52. #define vortex_debug debug
  53. #ifdef VORTEX_DEBUG
  54. static int vortex_debug = VORTEX_DEBUG;
  55. #else
  56. static int vortex_debug = 1;
  57. #endif
  58. #include <linux/module.h>
  59. #include <linux/kernel.h>
  60. #include <linux/string.h>
  61. #include <linux/timer.h>
  62. #include <linux/errno.h>
  63. #include <linux/in.h>
  64. #include <linux/ioport.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/pci.h>
  67. #include <linux/mii.h>
  68. #include <linux/init.h>
  69. #include <linux/netdevice.h>
  70. #include <linux/etherdevice.h>
  71. #include <linux/skbuff.h>
  72. #include <linux/ethtool.h>
  73. #include <linux/highmem.h>
  74. #include <linux/eisa.h>
  75. #include <linux/bitops.h>
  76. #include <linux/jiffies.h>
  77. #include <linux/gfp.h>
  78. #include <asm/irq.h> /* For nr_irqs only. */
  79. #include <asm/io.h>
  80. #include <linux/uaccess.h>
  81. /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
  82. This is only in the support-all-kernels source code. */
  83. #define RUN_AT(x) (jiffies + (x))
  84. #include <linux/delay.h>
  85. static const char version[] =
  86. DRV_NAME ": Donald Becker and others.\n";
  87. MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  88. MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
  89. MODULE_LICENSE("GPL");
  90. /* Operational parameter that usually are not changed. */
  91. /* The Vortex size is twice that of the original EtherLinkIII series: the
  92. runtime register window, window 1, is now always mapped in.
  93. The Boomerang size is twice as large as the Vortex -- it has additional
  94. bus master control registers. */
  95. #define VORTEX_TOTAL_SIZE 0x20
  96. #define BOOMERANG_TOTAL_SIZE 0x40
  97. /* Set iff a MII transceiver on any interface requires mdio preamble.
  98. This only set with the original DP83840 on older 3c905 boards, so the extra
  99. code size of a per-interface flag is not worthwhile. */
  100. static char mii_preamble_required;
  101. #define PFX DRV_NAME ": "
  102. /*
  103. Theory of Operation
  104. I. Board Compatibility
  105. This device driver is designed for the 3Com FastEtherLink and FastEtherLink
  106. XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
  107. versions of the FastEtherLink cards. The supported product IDs are
  108. 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
  109. The related ISA 3c515 is supported with a separate driver, 3c515.c, included
  110. with the kernel source or available from
  111. cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
  112. II. Board-specific settings
  113. PCI bus devices are configured by the system at boot time, so no jumpers
  114. need to be set on the board. The system BIOS should be set to assign the
  115. PCI INTA signal to an otherwise unused system IRQ line.
  116. The EEPROM settings for media type and forced-full-duplex are observed.
  117. The EEPROM media type should be left at the default "autoselect" unless using
  118. 10base2 or AUI connections which cannot be reliably detected.
  119. III. Driver operation
  120. The 3c59x series use an interface that's very similar to the previous 3c5x9
  121. series. The primary interface is two programmed-I/O FIFOs, with an
  122. alternate single-contiguous-region bus-master transfer (see next).
  123. The 3c900 "Boomerang" series uses a full-bus-master interface with separate
  124. lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
  125. DEC Tulip and Intel Speedo3. The first chip version retains a compatible
  126. programmed-I/O interface that has been removed in 'B' and subsequent board
  127. revisions.
  128. One extension that is advertised in a very large font is that the adapters
  129. are capable of being bus masters. On the Vortex chip this capability was
  130. only for a single contiguous region making it far less useful than the full
  131. bus master capability. There is a significant performance impact of taking
  132. an extra interrupt or polling for the completion of each transfer, as well
  133. as difficulty sharing the single transfer engine between the transmit and
  134. receive threads. Using DMA transfers is a win only with large blocks or
  135. with the flawed versions of the Intel Orion motherboard PCI controller.
  136. The Boomerang chip's full-bus-master interface is useful, and has the
  137. currently-unused advantages over other similar chips that queued transmit
  138. packets may be reordered and receive buffer groups are associated with a
  139. single frame.
  140. With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
  141. Rather than a fixed intermediate receive buffer, this scheme allocates
  142. full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
  143. the copying breakpoint: it is chosen to trade-off the memory wasted by
  144. passing the full-sized skbuff to the queue layer for all frames vs. the
  145. copying cost of copying a frame to a correctly-sized skbuff.
  146. IIIC. Synchronization
  147. The driver runs as two independent, single-threaded flows of control. One
  148. is the send-packet routine, which enforces single-threaded use by the
  149. dev->tbusy flag. The other thread is the interrupt handler, which is single
  150. threaded by the hardware and other software.
  151. IV. Notes
  152. Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
  153. 3c590, 3c595, and 3c900 boards.
  154. The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
  155. the EISA version is called "Demon". According to Terry these names come
  156. from rides at the local amusement park.
  157. The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
  158. This driver only supports ethernet packets because of the skbuff allocation
  159. limit of 4K.
  160. */
  161. /* This table drives the PCI probe routines. It's mostly boilerplate in all
  162. of the drivers, and will likely be provided by some future kernel.
  163. */
  164. enum pci_flags_bit {
  165. PCI_USES_MASTER=4,
  166. };
  167. enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
  168. EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
  169. HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
  170. INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
  171. EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
  172. EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
  173. enum vortex_chips {
  174. CH_3C590 = 0,
  175. CH_3C592,
  176. CH_3C597,
  177. CH_3C595_1,
  178. CH_3C595_2,
  179. CH_3C595_3,
  180. CH_3C900_1,
  181. CH_3C900_2,
  182. CH_3C900_3,
  183. CH_3C900_4,
  184. CH_3C900_5,
  185. CH_3C900B_FL,
  186. CH_3C905_1,
  187. CH_3C905_2,
  188. CH_3C905B_TX,
  189. CH_3C905B_1,
  190. CH_3C905B_2,
  191. CH_3C905B_FX,
  192. CH_3C905C,
  193. CH_3C9202,
  194. CH_3C980,
  195. CH_3C9805,
  196. CH_3CSOHO100_TX,
  197. CH_3C555,
  198. CH_3C556,
  199. CH_3C556B,
  200. CH_3C575,
  201. CH_3C575_1,
  202. CH_3CCFE575,
  203. CH_3CCFE575CT,
  204. CH_3CCFE656,
  205. CH_3CCFEM656,
  206. CH_3CCFEM656_1,
  207. CH_3C450,
  208. CH_3C920,
  209. CH_3C982A,
  210. CH_3C982B,
  211. CH_905BT4,
  212. CH_920B_EMB_WNM,
  213. };
  214. /* note: this array directly indexed by above enums, and MUST
  215. * be kept in sync with both the enums above, and the PCI device
  216. * table below
  217. */
  218. static struct vortex_chip_info {
  219. const char *name;
  220. int flags;
  221. int drv_flags;
  222. int io_size;
  223. } vortex_info_tbl[] = {
  224. {"3c590 Vortex 10Mbps",
  225. PCI_USES_MASTER, IS_VORTEX, 32, },
  226. {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
  227. PCI_USES_MASTER, IS_VORTEX, 32, },
  228. {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
  229. PCI_USES_MASTER, IS_VORTEX, 32, },
  230. {"3c595 Vortex 100baseTx",
  231. PCI_USES_MASTER, IS_VORTEX, 32, },
  232. {"3c595 Vortex 100baseT4",
  233. PCI_USES_MASTER, IS_VORTEX, 32, },
  234. {"3c595 Vortex 100base-MII",
  235. PCI_USES_MASTER, IS_VORTEX, 32, },
  236. {"3c900 Boomerang 10baseT",
  237. PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
  238. {"3c900 Boomerang 10Mbps Combo",
  239. PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
  240. {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
  241. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
  242. {"3c900 Cyclone 10Mbps Combo",
  243. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
  244. {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
  245. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
  246. {"3c900B-FL Cyclone 10base-FL",
  247. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
  248. {"3c905 Boomerang 100baseTx",
  249. PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
  250. {"3c905 Boomerang 100baseT4",
  251. PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
  252. {"3C905B-TX Fast Etherlink XL PCI",
  253. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  254. {"3c905B Cyclone 100baseTx",
  255. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  256. {"3c905B Cyclone 10/100/BNC",
  257. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
  258. {"3c905B-FX Cyclone 100baseFx",
  259. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
  260. {"3c905C Tornado",
  261. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  262. {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
  263. PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
  264. {"3c980 Cyclone",
  265. PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  266. {"3c980C Python-T",
  267. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
  268. {"3cSOHO100-TX Hurricane",
  269. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  270. {"3c555 Laptop Hurricane",
  271. PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
  272. {"3c556 Laptop Tornado",
  273. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
  274. HAS_HWCKSM, 128, },
  275. {"3c556B Laptop Hurricane",
  276. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
  277. WNO_XCVR_PWR|HAS_HWCKSM, 128, },
  278. {"3c575 [Megahertz] 10/100 LAN CardBus",
  279. PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
  280. {"3c575 Boomerang CardBus",
  281. PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
  282. {"3CCFE575BT Cyclone CardBus",
  283. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
  284. INVERT_LED_PWR|HAS_HWCKSM, 128, },
  285. {"3CCFE575CT Tornado CardBus",
  286. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
  287. MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
  288. {"3CCFE656 Cyclone CardBus",
  289. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
  290. INVERT_LED_PWR|HAS_HWCKSM, 128, },
  291. {"3CCFEM656B Cyclone+Winmodem CardBus",
  292. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
  293. INVERT_LED_PWR|HAS_HWCKSM, 128, },
  294. {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
  295. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
  296. MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
  297. {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
  298. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
  299. {"3c920 Tornado",
  300. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
  301. {"3c982 Hydra Dual Port A",
  302. PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
  303. {"3c982 Hydra Dual Port B",
  304. PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
  305. {"3c905B-T4",
  306. PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
  307. {"3c920B-EMB-WNM Tornado",
  308. PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
  309. {NULL,}, /* NULL terminated list. */
  310. };
  311. static const struct pci_device_id vortex_pci_tbl[] = {
  312. { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
  313. { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
  314. { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
  315. { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
  316. { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
  317. { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
  318. { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
  319. { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
  320. { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
  321. { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
  322. { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
  323. { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
  324. { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
  325. { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
  326. { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
  327. { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
  328. { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
  329. { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
  330. { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
  331. { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
  332. { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
  333. { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
  334. { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
  335. { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
  336. { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
  337. { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
  338. { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
  339. { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
  340. { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
  341. { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
  342. { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
  343. { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
  344. { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
  345. { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
  346. { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
  347. { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
  348. { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
  349. { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
  350. { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
  351. {0,} /* 0 terminated list. */
  352. };
  353. MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
  354. /* Operational definitions.
  355. These are not used by other compilation units and thus are not
  356. exported in a ".h" file.
  357. First the windows. There are eight register windows, with the command
  358. and status registers available in each.
  359. */
  360. #define EL3_CMD 0x0e
  361. #define EL3_STATUS 0x0e
  362. /* The top five bits written to EL3_CMD are a command, the lower
  363. 11 bits are the parameter, if applicable.
  364. Note that 11 parameters bits was fine for ethernet, but the new chip
  365. can handle FDDI length frames (~4500 octets) and now parameters count
  366. 32-bit 'Dwords' rather than octets. */
  367. enum vortex_cmd {
  368. TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
  369. RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
  370. UpStall = 6<<11, UpUnstall = (6<<11)+1,
  371. DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
  372. RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
  373. FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
  374. SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
  375. SetTxThreshold = 18<<11, SetTxStart = 19<<11,
  376. StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
  377. StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
  378. /* The SetRxFilter command accepts the following classes: */
  379. enum RxFilter {
  380. RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
  381. /* Bits in the general status register. */
  382. enum vortex_status {
  383. IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
  384. TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
  385. IntReq = 0x0040, StatsFull = 0x0080,
  386. DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
  387. DMAInProgress = 1<<11, /* DMA controller is still busy.*/
  388. CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
  389. };
  390. /* Register window 1 offsets, the window used in normal operation.
  391. On the Vortex this window is always mapped at offsets 0x10-0x1f. */
  392. enum Window1 {
  393. TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
  394. RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
  395. TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
  396. };
  397. enum Window0 {
  398. Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
  399. Wn0EepromData = 12, /* Window 0: EEPROM results register. */
  400. IntrStatus=0x0E, /* Valid in all windows. */
  401. };
  402. enum Win0_EEPROM_bits {
  403. EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
  404. EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
  405. EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
  406. };
  407. /* EEPROM locations. */
  408. enum eeprom_offset {
  409. PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
  410. EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
  411. NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
  412. DriverTune=13, Checksum=15};
  413. enum Window2 { /* Window 2. */
  414. Wn2_ResetOptions=12,
  415. };
  416. enum Window3 { /* Window 3: MAC/config bits. */
  417. Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
  418. };
  419. #define BFEXT(value, offset, bitcount) \
  420. ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
  421. #define BFINS(lhs, rhs, offset, bitcount) \
  422. (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
  423. (((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
  424. #define RAM_SIZE(v) BFEXT(v, 0, 3)
  425. #define RAM_WIDTH(v) BFEXT(v, 3, 1)
  426. #define RAM_SPEED(v) BFEXT(v, 4, 2)
  427. #define ROM_SIZE(v) BFEXT(v, 6, 2)
  428. #define RAM_SPLIT(v) BFEXT(v, 16, 2)
  429. #define XCVR(v) BFEXT(v, 20, 4)
  430. #define AUTOSELECT(v) BFEXT(v, 24, 1)
  431. enum Window4 { /* Window 4: Xcvr/media bits. */
  432. Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
  433. };
  434. enum Win4_Media_bits {
  435. Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
  436. Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
  437. Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
  438. Media_LnkBeat = 0x0800,
  439. };
  440. enum Window7 { /* Window 7: Bus Master control. */
  441. Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
  442. Wn7_MasterStatus = 12,
  443. };
  444. /* Boomerang bus master control registers. */
  445. enum MasterCtrl {
  446. PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
  447. TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
  448. };
  449. /* The Rx and Tx descriptor lists.
  450. Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
  451. alignment contraint on tx_ring[] and rx_ring[]. */
  452. #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
  453. #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
  454. struct boom_rx_desc {
  455. __le32 next; /* Last entry points to 0. */
  456. __le32 status;
  457. __le32 addr; /* Up to 63 addr/len pairs possible. */
  458. __le32 length; /* Set LAST_FRAG to indicate last pair. */
  459. };
  460. /* Values for the Rx status entry. */
  461. enum rx_desc_status {
  462. RxDComplete=0x00008000, RxDError=0x4000,
  463. /* See boomerang_rx() for actual error bits */
  464. IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
  465. IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
  466. };
  467. #ifdef MAX_SKB_FRAGS
  468. #define DO_ZEROCOPY 1
  469. #else
  470. #define DO_ZEROCOPY 0
  471. #endif
  472. struct boom_tx_desc {
  473. __le32 next; /* Last entry points to 0. */
  474. __le32 status; /* bits 0:12 length, others see below. */
  475. #if DO_ZEROCOPY
  476. struct {
  477. __le32 addr;
  478. __le32 length;
  479. } frag[1+MAX_SKB_FRAGS];
  480. #else
  481. __le32 addr;
  482. __le32 length;
  483. #endif
  484. };
  485. /* Values for the Tx status entry. */
  486. enum tx_desc_status {
  487. CRCDisable=0x2000, TxDComplete=0x8000,
  488. AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
  489. TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
  490. };
  491. /* Chip features we care about in vp->capabilities, read from the EEPROM. */
  492. enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
  493. struct vortex_extra_stats {
  494. unsigned long tx_deferred;
  495. unsigned long tx_max_collisions;
  496. unsigned long tx_multiple_collisions;
  497. unsigned long tx_single_collisions;
  498. unsigned long rx_bad_ssd;
  499. };
  500. struct vortex_private {
  501. /* The Rx and Tx rings should be quad-word-aligned. */
  502. struct boom_rx_desc* rx_ring;
  503. struct boom_tx_desc* tx_ring;
  504. dma_addr_t rx_ring_dma;
  505. dma_addr_t tx_ring_dma;
  506. /* The addresses of transmit- and receive-in-place skbuffs. */
  507. struct sk_buff* rx_skbuff[RX_RING_SIZE];
  508. struct sk_buff* tx_skbuff[TX_RING_SIZE];
  509. unsigned int cur_rx, cur_tx; /* The next free ring entry */
  510. unsigned int dirty_tx; /* The ring entries to be free()ed. */
  511. struct vortex_extra_stats xstats; /* NIC-specific extra stats */
  512. struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
  513. dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
  514. /* PCI configuration space information. */
  515. struct device *gendev;
  516. void __iomem *ioaddr; /* IO address space */
  517. void __iomem *cb_fn_base; /* CardBus function status addr space. */
  518. /* Some values here only for performance evaluation and path-coverage */
  519. int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
  520. int card_idx;
  521. /* The remainder are related to chip state, mostly media selection. */
  522. struct timer_list timer; /* Media selection timer. */
  523. int options; /* User-settable misc. driver options. */
  524. unsigned int media_override:4, /* Passed-in media type. */
  525. default_media:4, /* Read from the EEPROM/Wn3_Config. */
  526. full_duplex:1, autoselect:1,
  527. bus_master:1, /* Vortex can only do a fragment bus-m. */
  528. full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
  529. flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
  530. partner_flow_ctrl:1, /* Partner supports flow control */
  531. has_nway:1,
  532. enable_wol:1, /* Wake-on-LAN is enabled */
  533. pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
  534. open:1,
  535. medialock:1,
  536. large_frames:1, /* accept large frames */
  537. handling_irq:1; /* private in_irq indicator */
  538. /* {get|set}_wol operations are already serialized by rtnl.
  539. * no additional locking is required for the enable_wol and acpi_set_WOL()
  540. */
  541. int drv_flags;
  542. u16 status_enable;
  543. u16 intr_enable;
  544. u16 available_media; /* From Wn3_Options. */
  545. u16 capabilities, info1, info2; /* Various, from EEPROM. */
  546. u16 advertising; /* NWay media advertisement */
  547. unsigned char phys[2]; /* MII device addresses. */
  548. u16 deferred; /* Resend these interrupts when we
  549. * bale from the ISR */
  550. u16 io_size; /* Size of PCI region (for release_region) */
  551. /* Serialises access to hardware other than MII and variables below.
  552. * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
  553. spinlock_t lock;
  554. spinlock_t mii_lock; /* Serialises access to MII */
  555. struct mii_if_info mii; /* MII lib hooks/info */
  556. spinlock_t window_lock; /* Serialises access to windowed regs */
  557. int window; /* Register window */
  558. };
  559. static void window_set(struct vortex_private *vp, int window)
  560. {
  561. if (window != vp->window) {
  562. iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
  563. vp->window = window;
  564. }
  565. }
  566. #define DEFINE_WINDOW_IO(size) \
  567. static u ## size \
  568. window_read ## size(struct vortex_private *vp, int window, int addr) \
  569. { \
  570. unsigned long flags; \
  571. u ## size ret; \
  572. spin_lock_irqsave(&vp->window_lock, flags); \
  573. window_set(vp, window); \
  574. ret = ioread ## size(vp->ioaddr + addr); \
  575. spin_unlock_irqrestore(&vp->window_lock, flags); \
  576. return ret; \
  577. } \
  578. static void \
  579. window_write ## size(struct vortex_private *vp, u ## size value, \
  580. int window, int addr) \
  581. { \
  582. unsigned long flags; \
  583. spin_lock_irqsave(&vp->window_lock, flags); \
  584. window_set(vp, window); \
  585. iowrite ## size(value, vp->ioaddr + addr); \
  586. spin_unlock_irqrestore(&vp->window_lock, flags); \
  587. }
  588. DEFINE_WINDOW_IO(8)
  589. DEFINE_WINDOW_IO(16)
  590. DEFINE_WINDOW_IO(32)
  591. #ifdef CONFIG_PCI
  592. #define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
  593. #else
  594. #define DEVICE_PCI(dev) NULL
  595. #endif
  596. #define VORTEX_PCI(vp) \
  597. ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
  598. #ifdef CONFIG_EISA
  599. #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
  600. #else
  601. #define DEVICE_EISA(dev) NULL
  602. #endif
  603. #define VORTEX_EISA(vp) \
  604. ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
  605. /* The action to take with a media selection timer tick.
  606. Note that we deviate from the 3Com order by checking 10base2 before AUI.
  607. */
  608. enum xcvr_types {
  609. XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
  610. XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
  611. };
  612. static const struct media_table {
  613. char *name;
  614. unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
  615. mask:8, /* The transceiver-present bit in Wn3_Config.*/
  616. next:8; /* The media type to try next. */
  617. int wait; /* Time before we check media status. */
  618. } media_tbl[] = {
  619. { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
  620. { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
  621. { "undefined", 0, 0x80, XCVR_10baseT, 10000},
  622. { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
  623. { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
  624. { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
  625. { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
  626. { "undefined", 0, 0x01, XCVR_10baseT, 10000},
  627. { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
  628. { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
  629. { "Default", 0, 0xFF, XCVR_10baseT, 10000},
  630. };
  631. static struct {
  632. const char str[ETH_GSTRING_LEN];
  633. } ethtool_stats_keys[] = {
  634. { "tx_deferred" },
  635. { "tx_max_collisions" },
  636. { "tx_multiple_collisions" },
  637. { "tx_single_collisions" },
  638. { "rx_bad_ssd" },
  639. };
  640. /* number of ETHTOOL_GSTATS u64's */
  641. #define VORTEX_NUM_STATS 5
  642. static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
  643. int chip_idx, int card_idx);
  644. static int vortex_up(struct net_device *dev);
  645. static void vortex_down(struct net_device *dev, int final);
  646. static int vortex_open(struct net_device *dev);
  647. static void mdio_sync(struct vortex_private *vp, int bits);
  648. static int mdio_read(struct net_device *dev, int phy_id, int location);
  649. static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
  650. static void vortex_timer(struct timer_list *t);
  651. static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
  652. struct net_device *dev);
  653. static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
  654. struct net_device *dev);
  655. static int vortex_rx(struct net_device *dev);
  656. static int boomerang_rx(struct net_device *dev);
  657. static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
  658. static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
  659. static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
  660. static int vortex_close(struct net_device *dev);
  661. static void dump_tx_ring(struct net_device *dev);
  662. static void update_stats(void __iomem *ioaddr, struct net_device *dev);
  663. static struct net_device_stats *vortex_get_stats(struct net_device *dev);
  664. static void set_rx_mode(struct net_device *dev);
  665. #ifdef CONFIG_PCI
  666. static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  667. #endif
  668. static void vortex_tx_timeout(struct net_device *dev);
  669. static void acpi_set_WOL(struct net_device *dev);
  670. static const struct ethtool_ops vortex_ethtool_ops;
  671. static void set_8021q_mode(struct net_device *dev, int enable);
  672. /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
  673. /* Option count limit only -- unlimited interfaces are supported. */
  674. #define MAX_UNITS 8
  675. static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
  676. static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
  677. static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
  678. static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
  679. static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
  680. static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
  681. static int global_options = -1;
  682. static int global_full_duplex = -1;
  683. static int global_enable_wol = -1;
  684. static int global_use_mmio = -1;
  685. /* Variables to work-around the Compaq PCI BIOS32 problem. */
  686. static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
  687. static struct net_device *compaq_net_device;
  688. static int vortex_cards_found;
  689. module_param(debug, int, 0);
  690. module_param(global_options, int, 0);
  691. module_param_array(options, int, NULL, 0);
  692. module_param(global_full_duplex, int, 0);
  693. module_param_array(full_duplex, int, NULL, 0);
  694. module_param_array(hw_checksums, int, NULL, 0);
  695. module_param_array(flow_ctrl, int, NULL, 0);
  696. module_param(global_enable_wol, int, 0);
  697. module_param_array(enable_wol, int, NULL, 0);
  698. module_param(rx_copybreak, int, 0);
  699. module_param(max_interrupt_work, int, 0);
  700. module_param_hw(compaq_ioaddr, int, ioport, 0);
  701. module_param_hw(compaq_irq, int, irq, 0);
  702. module_param(compaq_device_id, int, 0);
  703. module_param(watchdog, int, 0);
  704. module_param(global_use_mmio, int, 0);
  705. module_param_array(use_mmio, int, NULL, 0);
  706. MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
  707. MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
  708. MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
  709. MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
  710. MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
  711. MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
  712. MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
  713. MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
  714. MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
  715. MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
  716. MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
  717. MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
  718. MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
  719. MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
  720. MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
  721. MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
  722. MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
  723. #ifdef CONFIG_NET_POLL_CONTROLLER
  724. static void poll_vortex(struct net_device *dev)
  725. {
  726. vortex_boomerang_interrupt(dev->irq, dev);
  727. }
  728. #endif
  729. #ifdef CONFIG_PM
  730. static int vortex_suspend(struct device *dev)
  731. {
  732. struct pci_dev *pdev = to_pci_dev(dev);
  733. struct net_device *ndev = pci_get_drvdata(pdev);
  734. if (!ndev || !netif_running(ndev))
  735. return 0;
  736. netif_device_detach(ndev);
  737. vortex_down(ndev, 1);
  738. return 0;
  739. }
  740. static int vortex_resume(struct device *dev)
  741. {
  742. struct pci_dev *pdev = to_pci_dev(dev);
  743. struct net_device *ndev = pci_get_drvdata(pdev);
  744. int err;
  745. if (!ndev || !netif_running(ndev))
  746. return 0;
  747. err = vortex_up(ndev);
  748. if (err)
  749. return err;
  750. netif_device_attach(ndev);
  751. return 0;
  752. }
  753. static const struct dev_pm_ops vortex_pm_ops = {
  754. .suspend = vortex_suspend,
  755. .resume = vortex_resume,
  756. .freeze = vortex_suspend,
  757. .thaw = vortex_resume,
  758. .poweroff = vortex_suspend,
  759. .restore = vortex_resume,
  760. };
  761. #define VORTEX_PM_OPS (&vortex_pm_ops)
  762. #else /* !CONFIG_PM */
  763. #define VORTEX_PM_OPS NULL
  764. #endif /* !CONFIG_PM */
  765. #ifdef CONFIG_EISA
  766. static const struct eisa_device_id vortex_eisa_ids[] = {
  767. { "TCM5920", CH_3C592 },
  768. { "TCM5970", CH_3C597 },
  769. { "" }
  770. };
  771. MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
  772. static int vortex_eisa_probe(struct device *device)
  773. {
  774. void __iomem *ioaddr;
  775. struct eisa_device *edev;
  776. edev = to_eisa_device(device);
  777. if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
  778. return -EBUSY;
  779. ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
  780. if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
  781. edev->id.driver_data, vortex_cards_found)) {
  782. release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
  783. return -ENODEV;
  784. }
  785. vortex_cards_found++;
  786. return 0;
  787. }
  788. static int vortex_eisa_remove(struct device *device)
  789. {
  790. struct eisa_device *edev;
  791. struct net_device *dev;
  792. struct vortex_private *vp;
  793. void __iomem *ioaddr;
  794. edev = to_eisa_device(device);
  795. dev = eisa_get_drvdata(edev);
  796. if (!dev) {
  797. pr_err("vortex_eisa_remove called for Compaq device!\n");
  798. BUG();
  799. }
  800. vp = netdev_priv(dev);
  801. ioaddr = vp->ioaddr;
  802. unregister_netdev(dev);
  803. iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
  804. release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
  805. free_netdev(dev);
  806. return 0;
  807. }
  808. static struct eisa_driver vortex_eisa_driver = {
  809. .id_table = vortex_eisa_ids,
  810. .driver = {
  811. .name = "3c59x",
  812. .probe = vortex_eisa_probe,
  813. .remove = vortex_eisa_remove
  814. }
  815. };
  816. #endif /* CONFIG_EISA */
  817. /* returns count found (>= 0), or negative on error */
  818. static int __init vortex_eisa_init(void)
  819. {
  820. int eisa_found = 0;
  821. int orig_cards_found = vortex_cards_found;
  822. #ifdef CONFIG_EISA
  823. int err;
  824. err = eisa_driver_register (&vortex_eisa_driver);
  825. if (!err) {
  826. /*
  827. * Because of the way EISA bus is probed, we cannot assume
  828. * any device have been found when we exit from
  829. * eisa_driver_register (the bus root driver may not be
  830. * initialized yet). So we blindly assume something was
  831. * found, and let the sysfs magic happened...
  832. */
  833. eisa_found = 1;
  834. }
  835. #endif
  836. /* Special code to work-around the Compaq PCI BIOS32 problem. */
  837. if (compaq_ioaddr) {
  838. vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
  839. compaq_irq, compaq_device_id, vortex_cards_found++);
  840. }
  841. return vortex_cards_found - orig_cards_found + eisa_found;
  842. }
  843. /* returns count (>= 0), or negative on error */
  844. static int vortex_init_one(struct pci_dev *pdev,
  845. const struct pci_device_id *ent)
  846. {
  847. int rc, unit, pci_bar;
  848. struct vortex_chip_info *vci;
  849. void __iomem *ioaddr;
  850. /* wake up and enable device */
  851. rc = pci_enable_device(pdev);
  852. if (rc < 0)
  853. goto out;
  854. rc = pci_request_regions(pdev, DRV_NAME);
  855. if (rc < 0)
  856. goto out_disable;
  857. unit = vortex_cards_found;
  858. if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
  859. /* Determine the default if the user didn't override us */
  860. vci = &vortex_info_tbl[ent->driver_data];
  861. pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
  862. } else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
  863. pci_bar = use_mmio[unit] ? 1 : 0;
  864. else
  865. pci_bar = global_use_mmio ? 1 : 0;
  866. ioaddr = pci_iomap(pdev, pci_bar, 0);
  867. if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
  868. ioaddr = pci_iomap(pdev, 0, 0);
  869. if (!ioaddr) {
  870. rc = -ENOMEM;
  871. goto out_release;
  872. }
  873. rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
  874. ent->driver_data, unit);
  875. if (rc < 0)
  876. goto out_iounmap;
  877. vortex_cards_found++;
  878. goto out;
  879. out_iounmap:
  880. pci_iounmap(pdev, ioaddr);
  881. out_release:
  882. pci_release_regions(pdev);
  883. out_disable:
  884. pci_disable_device(pdev);
  885. out:
  886. return rc;
  887. }
  888. static const struct net_device_ops boomrang_netdev_ops = {
  889. .ndo_open = vortex_open,
  890. .ndo_stop = vortex_close,
  891. .ndo_start_xmit = boomerang_start_xmit,
  892. .ndo_tx_timeout = vortex_tx_timeout,
  893. .ndo_get_stats = vortex_get_stats,
  894. #ifdef CONFIG_PCI
  895. .ndo_do_ioctl = vortex_ioctl,
  896. #endif
  897. .ndo_set_rx_mode = set_rx_mode,
  898. .ndo_set_mac_address = eth_mac_addr,
  899. .ndo_validate_addr = eth_validate_addr,
  900. #ifdef CONFIG_NET_POLL_CONTROLLER
  901. .ndo_poll_controller = poll_vortex,
  902. #endif
  903. };
  904. static const struct net_device_ops vortex_netdev_ops = {
  905. .ndo_open = vortex_open,
  906. .ndo_stop = vortex_close,
  907. .ndo_start_xmit = vortex_start_xmit,
  908. .ndo_tx_timeout = vortex_tx_timeout,
  909. .ndo_get_stats = vortex_get_stats,
  910. #ifdef CONFIG_PCI
  911. .ndo_do_ioctl = vortex_ioctl,
  912. #endif
  913. .ndo_set_rx_mode = set_rx_mode,
  914. .ndo_set_mac_address = eth_mac_addr,
  915. .ndo_validate_addr = eth_validate_addr,
  916. #ifdef CONFIG_NET_POLL_CONTROLLER
  917. .ndo_poll_controller = poll_vortex,
  918. #endif
  919. };
  920. /*
  921. * Start up the PCI/EISA device which is described by *gendev.
  922. * Return 0 on success.
  923. *
  924. * NOTE: pdev can be NULL, for the case of a Compaq device
  925. */
  926. static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
  927. int chip_idx, int card_idx)
  928. {
  929. struct vortex_private *vp;
  930. int option;
  931. unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
  932. int i, step;
  933. struct net_device *dev;
  934. static int printed_version;
  935. int retval, print_info;
  936. struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
  937. const char *print_name = "3c59x";
  938. struct pci_dev *pdev = NULL;
  939. struct eisa_device *edev = NULL;
  940. if (!printed_version) {
  941. pr_info("%s", version);
  942. printed_version = 1;
  943. }
  944. if (gendev) {
  945. if ((pdev = DEVICE_PCI(gendev))) {
  946. print_name = pci_name(pdev);
  947. }
  948. if ((edev = DEVICE_EISA(gendev))) {
  949. print_name = dev_name(&edev->dev);
  950. }
  951. }
  952. dev = alloc_etherdev(sizeof(*vp));
  953. retval = -ENOMEM;
  954. if (!dev)
  955. goto out;
  956. SET_NETDEV_DEV(dev, gendev);
  957. vp = netdev_priv(dev);
  958. option = global_options;
  959. /* The lower four bits are the media type. */
  960. if (dev->mem_start) {
  961. /*
  962. * The 'options' param is passed in as the third arg to the
  963. * LILO 'ether=' argument for non-modular use
  964. */
  965. option = dev->mem_start;
  966. }
  967. else if (card_idx < MAX_UNITS) {
  968. if (options[card_idx] >= 0)
  969. option = options[card_idx];
  970. }
  971. if (option > 0) {
  972. if (option & 0x8000)
  973. vortex_debug = 7;
  974. if (option & 0x4000)
  975. vortex_debug = 2;
  976. if (option & 0x0400)
  977. vp->enable_wol = 1;
  978. }
  979. print_info = (vortex_debug > 1);
  980. if (print_info)
  981. pr_info("See Documentation/networking/vortex.txt\n");
  982. pr_info("%s: 3Com %s %s at %p.\n",
  983. print_name,
  984. pdev ? "PCI" : "EISA",
  985. vci->name,
  986. ioaddr);
  987. dev->base_addr = (unsigned long)ioaddr;
  988. dev->irq = irq;
  989. dev->mtu = mtu;
  990. vp->ioaddr = ioaddr;
  991. vp->large_frames = mtu > 1500;
  992. vp->drv_flags = vci->drv_flags;
  993. vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
  994. vp->io_size = vci->io_size;
  995. vp->card_idx = card_idx;
  996. vp->window = -1;
  997. /* module list only for Compaq device */
  998. if (gendev == NULL) {
  999. compaq_net_device = dev;
  1000. }
  1001. /* PCI-only startup logic */
  1002. if (pdev) {
  1003. /* enable bus-mastering if necessary */
  1004. if (vci->flags & PCI_USES_MASTER)
  1005. pci_set_master(pdev);
  1006. if (vci->drv_flags & IS_VORTEX) {
  1007. u8 pci_latency;
  1008. u8 new_latency = 248;
  1009. /* Check the PCI latency value. On the 3c590 series the latency timer
  1010. must be set to the maximum value to avoid data corruption that occurs
  1011. when the timer expires during a transfer. This bug exists the Vortex
  1012. chip only. */
  1013. pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
  1014. if (pci_latency < new_latency) {
  1015. pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
  1016. print_name, pci_latency, new_latency);
  1017. pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
  1018. }
  1019. }
  1020. }
  1021. spin_lock_init(&vp->lock);
  1022. spin_lock_init(&vp->mii_lock);
  1023. spin_lock_init(&vp->window_lock);
  1024. vp->gendev = gendev;
  1025. vp->mii.dev = dev;
  1026. vp->mii.mdio_read = mdio_read;
  1027. vp->mii.mdio_write = mdio_write;
  1028. vp->mii.phy_id_mask = 0x1f;
  1029. vp->mii.reg_num_mask = 0x1f;
  1030. /* Makes sure rings are at least 16 byte aligned. */
  1031. vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
  1032. + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
  1033. &vp->rx_ring_dma, GFP_KERNEL);
  1034. retval = -ENOMEM;
  1035. if (!vp->rx_ring)
  1036. goto free_device;
  1037. vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
  1038. vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
  1039. /* if we are a PCI driver, we store info in pdev->driver_data
  1040. * instead of a module list */
  1041. if (pdev)
  1042. pci_set_drvdata(pdev, dev);
  1043. if (edev)
  1044. eisa_set_drvdata(edev, dev);
  1045. vp->media_override = 7;
  1046. if (option >= 0) {
  1047. vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
  1048. if (vp->media_override != 7)
  1049. vp->medialock = 1;
  1050. vp->full_duplex = (option & 0x200) ? 1 : 0;
  1051. vp->bus_master = (option & 16) ? 1 : 0;
  1052. }
  1053. if (global_full_duplex > 0)
  1054. vp->full_duplex = 1;
  1055. if (global_enable_wol > 0)
  1056. vp->enable_wol = 1;
  1057. if (card_idx < MAX_UNITS) {
  1058. if (full_duplex[card_idx] > 0)
  1059. vp->full_duplex = 1;
  1060. if (flow_ctrl[card_idx] > 0)
  1061. vp->flow_ctrl = 1;
  1062. if (enable_wol[card_idx] > 0)
  1063. vp->enable_wol = 1;
  1064. }
  1065. vp->mii.force_media = vp->full_duplex;
  1066. vp->options = option;
  1067. /* Read the station address from the EEPROM. */
  1068. {
  1069. int base;
  1070. if (vci->drv_flags & EEPROM_8BIT)
  1071. base = 0x230;
  1072. else if (vci->drv_flags & EEPROM_OFFSET)
  1073. base = EEPROM_Read + 0x30;
  1074. else
  1075. base = EEPROM_Read;
  1076. for (i = 0; i < 0x40; i++) {
  1077. int timer;
  1078. window_write16(vp, base + i, 0, Wn0EepromCmd);
  1079. /* Pause for at least 162 us. for the read to take place. */
  1080. for (timer = 10; timer >= 0; timer--) {
  1081. udelay(162);
  1082. if ((window_read16(vp, 0, Wn0EepromCmd) &
  1083. 0x8000) == 0)
  1084. break;
  1085. }
  1086. eeprom[i] = window_read16(vp, 0, Wn0EepromData);
  1087. }
  1088. }
  1089. for (i = 0; i < 0x18; i++)
  1090. checksum ^= eeprom[i];
  1091. checksum = (checksum ^ (checksum >> 8)) & 0xff;
  1092. if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
  1093. while (i < 0x21)
  1094. checksum ^= eeprom[i++];
  1095. checksum = (checksum ^ (checksum >> 8)) & 0xff;
  1096. }
  1097. if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
  1098. pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
  1099. for (i = 0; i < 3; i++)
  1100. ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
  1101. if (print_info)
  1102. pr_cont(" %pM", dev->dev_addr);
  1103. /* Unfortunately an all zero eeprom passes the checksum and this
  1104. gets found in the wild in failure cases. Crypto is hard 8) */
  1105. if (!is_valid_ether_addr(dev->dev_addr)) {
  1106. retval = -EINVAL;
  1107. pr_err("*** EEPROM MAC address is invalid.\n");
  1108. goto free_ring; /* With every pack */
  1109. }
  1110. for (i = 0; i < 6; i++)
  1111. window_write8(vp, dev->dev_addr[i], 2, i);
  1112. if (print_info)
  1113. pr_cont(", IRQ %d\n", dev->irq);
  1114. /* Tell them about an invalid IRQ. */
  1115. if (dev->irq <= 0 || dev->irq >= nr_irqs)
  1116. pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
  1117. dev->irq);
  1118. step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
  1119. if (print_info) {
  1120. pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
  1121. eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
  1122. step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
  1123. }
  1124. if (pdev && vci->drv_flags & HAS_CB_FNS) {
  1125. unsigned short n;
  1126. vp->cb_fn_base = pci_iomap(pdev, 2, 0);
  1127. if (!vp->cb_fn_base) {
  1128. retval = -ENOMEM;
  1129. goto free_ring;
  1130. }
  1131. if (print_info) {
  1132. pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
  1133. print_name,
  1134. (unsigned long long)pci_resource_start(pdev, 2),
  1135. vp->cb_fn_base);
  1136. }
  1137. n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
  1138. if (vp->drv_flags & INVERT_LED_PWR)
  1139. n |= 0x10;
  1140. if (vp->drv_flags & INVERT_MII_PWR)
  1141. n |= 0x4000;
  1142. window_write16(vp, n, 2, Wn2_ResetOptions);
  1143. if (vp->drv_flags & WNO_XCVR_PWR) {
  1144. window_write16(vp, 0x0800, 0, 0);
  1145. }
  1146. }
  1147. /* Extract our information from the EEPROM data. */
  1148. vp->info1 = eeprom[13];
  1149. vp->info2 = eeprom[15];
  1150. vp->capabilities = eeprom[16];
  1151. if (vp->info1 & 0x8000) {
  1152. vp->full_duplex = 1;
  1153. if (print_info)
  1154. pr_info("Full duplex capable\n");
  1155. }
  1156. {
  1157. static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
  1158. unsigned int config;
  1159. vp->available_media = window_read16(vp, 3, Wn3_Options);
  1160. if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
  1161. vp->available_media = 0x40;
  1162. config = window_read32(vp, 3, Wn3_Config);
  1163. if (print_info) {
  1164. pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
  1165. config, window_read16(vp, 3, Wn3_Options));
  1166. pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
  1167. 8 << RAM_SIZE(config),
  1168. RAM_WIDTH(config) ? "word" : "byte",
  1169. ram_split[RAM_SPLIT(config)],
  1170. AUTOSELECT(config) ? "autoselect/" : "",
  1171. XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
  1172. media_tbl[XCVR(config)].name);
  1173. }
  1174. vp->default_media = XCVR(config);
  1175. if (vp->default_media == XCVR_NWAY)
  1176. vp->has_nway = 1;
  1177. vp->autoselect = AUTOSELECT(config);
  1178. }
  1179. if (vp->media_override != 7) {
  1180. pr_info("%s: Media override to transceiver type %d (%s).\n",
  1181. print_name, vp->media_override,
  1182. media_tbl[vp->media_override].name);
  1183. dev->if_port = vp->media_override;
  1184. } else
  1185. dev->if_port = vp->default_media;
  1186. if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
  1187. dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
  1188. int phy, phy_idx = 0;
  1189. mii_preamble_required++;
  1190. if (vp->drv_flags & EXTRA_PREAMBLE)
  1191. mii_preamble_required++;
  1192. mdio_sync(vp, 32);
  1193. mdio_read(dev, 24, MII_BMSR);
  1194. for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
  1195. int mii_status, phyx;
  1196. /*
  1197. * For the 3c905CX we look at index 24 first, because it bogusly
  1198. * reports an external PHY at all indices
  1199. */
  1200. if (phy == 0)
  1201. phyx = 24;
  1202. else if (phy <= 24)
  1203. phyx = phy - 1;
  1204. else
  1205. phyx = phy;
  1206. mii_status = mdio_read(dev, phyx, MII_BMSR);
  1207. if (mii_status && mii_status != 0xffff) {
  1208. vp->phys[phy_idx++] = phyx;
  1209. if (print_info) {
  1210. pr_info(" MII transceiver found at address %d, status %4x.\n",
  1211. phyx, mii_status);
  1212. }
  1213. if ((mii_status & 0x0040) == 0)
  1214. mii_preamble_required++;
  1215. }
  1216. }
  1217. mii_preamble_required--;
  1218. if (phy_idx == 0) {
  1219. pr_warn(" ***WARNING*** No MII transceivers found!\n");
  1220. vp->phys[0] = 24;
  1221. } else {
  1222. vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
  1223. if (vp->full_duplex) {
  1224. /* Only advertise the FD media types. */
  1225. vp->advertising &= ~0x02A0;
  1226. mdio_write(dev, vp->phys[0], 4, vp->advertising);
  1227. }
  1228. }
  1229. vp->mii.phy_id = vp->phys[0];
  1230. }
  1231. if (vp->capabilities & CapBusMaster) {
  1232. vp->full_bus_master_tx = 1;
  1233. if (print_info) {
  1234. pr_info(" Enabling bus-master transmits and %s receives.\n",
  1235. (vp->info2 & 1) ? "early" : "whole-frame" );
  1236. }
  1237. vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
  1238. vp->bus_master = 0; /* AKPM: vortex only */
  1239. }
  1240. /* The 3c59x-specific entries in the device structure. */
  1241. if (vp->full_bus_master_tx) {
  1242. dev->netdev_ops = &boomrang_netdev_ops;
  1243. /* Actually, it still should work with iommu. */
  1244. if (card_idx < MAX_UNITS &&
  1245. ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
  1246. hw_checksums[card_idx] == 1)) {
  1247. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  1248. }
  1249. } else
  1250. dev->netdev_ops = &vortex_netdev_ops;
  1251. if (print_info) {
  1252. pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
  1253. print_name,
  1254. (dev->features & NETIF_F_SG) ? "en":"dis",
  1255. (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
  1256. }
  1257. dev->ethtool_ops = &vortex_ethtool_ops;
  1258. dev->watchdog_timeo = (watchdog * HZ) / 1000;
  1259. if (pdev) {
  1260. vp->pm_state_valid = 1;
  1261. pci_save_state(pdev);
  1262. acpi_set_WOL(dev);
  1263. }
  1264. retval = register_netdev(dev);
  1265. if (retval == 0)
  1266. return 0;
  1267. free_ring:
  1268. dma_free_coherent(&pdev->dev,
  1269. sizeof(struct boom_rx_desc) * RX_RING_SIZE +
  1270. sizeof(struct boom_tx_desc) * TX_RING_SIZE,
  1271. vp->rx_ring, vp->rx_ring_dma);
  1272. free_device:
  1273. free_netdev(dev);
  1274. pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
  1275. out:
  1276. return retval;
  1277. }
  1278. static void
  1279. issue_and_wait(struct net_device *dev, int cmd)
  1280. {
  1281. struct vortex_private *vp = netdev_priv(dev);
  1282. void __iomem *ioaddr = vp->ioaddr;
  1283. int i;
  1284. iowrite16(cmd, ioaddr + EL3_CMD);
  1285. for (i = 0; i < 2000; i++) {
  1286. if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
  1287. return;
  1288. }
  1289. /* OK, that didn't work. Do it the slow way. One second */
  1290. for (i = 0; i < 100000; i++) {
  1291. if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
  1292. if (vortex_debug > 1)
  1293. pr_info("%s: command 0x%04x took %d usecs\n",
  1294. dev->name, cmd, i * 10);
  1295. return;
  1296. }
  1297. udelay(10);
  1298. }
  1299. pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
  1300. dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
  1301. }
  1302. static void
  1303. vortex_set_duplex(struct net_device *dev)
  1304. {
  1305. struct vortex_private *vp = netdev_priv(dev);
  1306. pr_info("%s: setting %s-duplex.\n",
  1307. dev->name, (vp->full_duplex) ? "full" : "half");
  1308. /* Set the full-duplex bit. */
  1309. window_write16(vp,
  1310. ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
  1311. (vp->large_frames ? 0x40 : 0) |
  1312. ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
  1313. 0x100 : 0),
  1314. 3, Wn3_MAC_Ctrl);
  1315. }
  1316. static void vortex_check_media(struct net_device *dev, unsigned int init)
  1317. {
  1318. struct vortex_private *vp = netdev_priv(dev);
  1319. unsigned int ok_to_print = 0;
  1320. if (vortex_debug > 3)
  1321. ok_to_print = 1;
  1322. if (mii_check_media(&vp->mii, ok_to_print, init)) {
  1323. vp->full_duplex = vp->mii.full_duplex;
  1324. vortex_set_duplex(dev);
  1325. } else if (init) {
  1326. vortex_set_duplex(dev);
  1327. }
  1328. }
  1329. static int
  1330. vortex_up(struct net_device *dev)
  1331. {
  1332. struct vortex_private *vp = netdev_priv(dev);
  1333. void __iomem *ioaddr = vp->ioaddr;
  1334. unsigned int config;
  1335. int i, mii_reg1, mii_reg5, err = 0;
  1336. if (VORTEX_PCI(vp)) {
  1337. pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
  1338. if (vp->pm_state_valid)
  1339. pci_restore_state(VORTEX_PCI(vp));
  1340. err = pci_enable_device(VORTEX_PCI(vp));
  1341. if (err) {
  1342. pr_warn("%s: Could not enable device\n", dev->name);
  1343. goto err_out;
  1344. }
  1345. }
  1346. /* Before initializing select the active media port. */
  1347. config = window_read32(vp, 3, Wn3_Config);
  1348. if (vp->media_override != 7) {
  1349. pr_info("%s: Media override to transceiver %d (%s).\n",
  1350. dev->name, vp->media_override,
  1351. media_tbl[vp->media_override].name);
  1352. dev->if_port = vp->media_override;
  1353. } else if (vp->autoselect) {
  1354. if (vp->has_nway) {
  1355. if (vortex_debug > 1)
  1356. pr_info("%s: using NWAY device table, not %d\n",
  1357. dev->name, dev->if_port);
  1358. dev->if_port = XCVR_NWAY;
  1359. } else {
  1360. /* Find first available media type, starting with 100baseTx. */
  1361. dev->if_port = XCVR_100baseTx;
  1362. while (! (vp->available_media & media_tbl[dev->if_port].mask))
  1363. dev->if_port = media_tbl[dev->if_port].next;
  1364. if (vortex_debug > 1)
  1365. pr_info("%s: first available media type: %s\n",
  1366. dev->name, media_tbl[dev->if_port].name);
  1367. }
  1368. } else {
  1369. dev->if_port = vp->default_media;
  1370. if (vortex_debug > 1)
  1371. pr_info("%s: using default media %s\n",
  1372. dev->name, media_tbl[dev->if_port].name);
  1373. }
  1374. timer_setup(&vp->timer, vortex_timer, 0);
  1375. mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
  1376. if (vortex_debug > 1)
  1377. pr_debug("%s: Initial media type %s.\n",
  1378. dev->name, media_tbl[dev->if_port].name);
  1379. vp->full_duplex = vp->mii.force_media;
  1380. config = BFINS(config, dev->if_port, 20, 4);
  1381. if (vortex_debug > 6)
  1382. pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
  1383. window_write32(vp, config, 3, Wn3_Config);
  1384. if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
  1385. mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
  1386. mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
  1387. vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
  1388. vp->mii.full_duplex = vp->full_duplex;
  1389. vortex_check_media(dev, 1);
  1390. }
  1391. else
  1392. vortex_set_duplex(dev);
  1393. issue_and_wait(dev, TxReset);
  1394. /*
  1395. * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
  1396. */
  1397. issue_and_wait(dev, RxReset|0x04);
  1398. iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
  1399. if (vortex_debug > 1) {
  1400. pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
  1401. dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
  1402. }
  1403. /* Set the station address and mask in window 2 each time opened. */
  1404. for (i = 0; i < 6; i++)
  1405. window_write8(vp, dev->dev_addr[i], 2, i);
  1406. for (; i < 12; i+=2)
  1407. window_write16(vp, 0, 2, i);
  1408. if (vp->cb_fn_base) {
  1409. unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
  1410. if (vp->drv_flags & INVERT_LED_PWR)
  1411. n |= 0x10;
  1412. if (vp->drv_flags & INVERT_MII_PWR)
  1413. n |= 0x4000;
  1414. window_write16(vp, n, 2, Wn2_ResetOptions);
  1415. }
  1416. if (dev->if_port == XCVR_10base2)
  1417. /* Start the thinnet transceiver. We should really wait 50ms...*/
  1418. iowrite16(StartCoax, ioaddr + EL3_CMD);
  1419. if (dev->if_port != XCVR_NWAY) {
  1420. window_write16(vp,
  1421. (window_read16(vp, 4, Wn4_Media) &
  1422. ~(Media_10TP|Media_SQE)) |
  1423. media_tbl[dev->if_port].media_bits,
  1424. 4, Wn4_Media);
  1425. }
  1426. /* Switch to the stats window, and clear all stats by reading. */
  1427. iowrite16(StatsDisable, ioaddr + EL3_CMD);
  1428. for (i = 0; i < 10; i++)
  1429. window_read8(vp, 6, i);
  1430. window_read16(vp, 6, 10);
  1431. window_read16(vp, 6, 12);
  1432. /* New: On the Vortex we must also clear the BadSSD counter. */
  1433. window_read8(vp, 4, 12);
  1434. /* ..and on the Boomerang we enable the extra statistics bits. */
  1435. window_write16(vp, 0x0040, 4, Wn4_NetDiag);
  1436. if (vp->full_bus_master_rx) { /* Boomerang bus master. */
  1437. vp->cur_rx = 0;
  1438. /* Initialize the RxEarly register as recommended. */
  1439. iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
  1440. iowrite32(0x0020, ioaddr + PktStatus);
  1441. iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
  1442. }
  1443. if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
  1444. vp->cur_tx = vp->dirty_tx = 0;
  1445. if (vp->drv_flags & IS_BOOMERANG)
  1446. iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
  1447. /* Clear the Rx, Tx rings. */
  1448. for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
  1449. vp->rx_ring[i].status = 0;
  1450. for (i = 0; i < TX_RING_SIZE; i++)
  1451. vp->tx_skbuff[i] = NULL;
  1452. iowrite32(0, ioaddr + DownListPtr);
  1453. }
  1454. /* Set receiver mode: presumably accept b-case and phys addr only. */
  1455. set_rx_mode(dev);
  1456. /* enable 802.1q tagged frames */
  1457. set_8021q_mode(dev, 1);
  1458. iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
  1459. iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
  1460. iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
  1461. /* Allow status bits to be seen. */
  1462. vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
  1463. (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
  1464. (vp->full_bus_master_rx ? UpComplete : RxComplete) |
  1465. (vp->bus_master ? DMADone : 0);
  1466. vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
  1467. (vp->full_bus_master_rx ? 0 : RxComplete) |
  1468. StatsFull | HostError | TxComplete | IntReq
  1469. | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
  1470. iowrite16(vp->status_enable, ioaddr + EL3_CMD);
  1471. /* Ack all pending events, and set active indicator mask. */
  1472. iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
  1473. ioaddr + EL3_CMD);
  1474. iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
  1475. if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
  1476. iowrite32(0x8000, vp->cb_fn_base + 4);
  1477. netif_start_queue (dev);
  1478. netdev_reset_queue(dev);
  1479. err_out:
  1480. return err;
  1481. }
  1482. static int
  1483. vortex_open(struct net_device *dev)
  1484. {
  1485. struct vortex_private *vp = netdev_priv(dev);
  1486. int i;
  1487. int retval;
  1488. dma_addr_t dma;
  1489. /* Use the now-standard shared IRQ implementation. */
  1490. if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
  1491. pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
  1492. goto err;
  1493. }
  1494. if (vp->full_bus_master_rx) { /* Boomerang bus master. */
  1495. if (vortex_debug > 2)
  1496. pr_debug("%s: Filling in the Rx ring.\n", dev->name);
  1497. for (i = 0; i < RX_RING_SIZE; i++) {
  1498. struct sk_buff *skb;
  1499. vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
  1500. vp->rx_ring[i].status = 0; /* Clear complete bit. */
  1501. vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
  1502. skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
  1503. GFP_KERNEL);
  1504. vp->rx_skbuff[i] = skb;
  1505. if (skb == NULL)
  1506. break; /* Bad news! */
  1507. skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
  1508. dma = dma_map_single(vp->gendev, skb->data,
  1509. PKT_BUF_SZ, DMA_FROM_DEVICE);
  1510. if (dma_mapping_error(vp->gendev, dma))
  1511. break;
  1512. vp->rx_ring[i].addr = cpu_to_le32(dma);
  1513. }
  1514. if (i != RX_RING_SIZE) {
  1515. pr_emerg("%s: no memory for rx ring\n", dev->name);
  1516. retval = -ENOMEM;
  1517. goto err_free_skb;
  1518. }
  1519. /* Wrap the ring. */
  1520. vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
  1521. }
  1522. retval = vortex_up(dev);
  1523. if (!retval)
  1524. goto out;
  1525. err_free_skb:
  1526. for (i = 0; i < RX_RING_SIZE; i++) {
  1527. if (vp->rx_skbuff[i]) {
  1528. dev_kfree_skb(vp->rx_skbuff[i]);
  1529. vp->rx_skbuff[i] = NULL;
  1530. }
  1531. }
  1532. free_irq(dev->irq, dev);
  1533. err:
  1534. if (vortex_debug > 1)
  1535. pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
  1536. out:
  1537. return retval;
  1538. }
  1539. static void
  1540. vortex_timer(struct timer_list *t)
  1541. {
  1542. struct vortex_private *vp = from_timer(vp, t, timer);
  1543. struct net_device *dev = vp->mii.dev;
  1544. void __iomem *ioaddr = vp->ioaddr;
  1545. int next_tick = 60*HZ;
  1546. int ok = 0;
  1547. int media_status;
  1548. if (vortex_debug > 2) {
  1549. pr_debug("%s: Media selection timer tick happened, %s.\n",
  1550. dev->name, media_tbl[dev->if_port].name);
  1551. pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
  1552. }
  1553. media_status = window_read16(vp, 4, Wn4_Media);
  1554. switch (dev->if_port) {
  1555. case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
  1556. if (media_status & Media_LnkBeat) {
  1557. netif_carrier_on(dev);
  1558. ok = 1;
  1559. if (vortex_debug > 1)
  1560. pr_debug("%s: Media %s has link beat, %x.\n",
  1561. dev->name, media_tbl[dev->if_port].name, media_status);
  1562. } else {
  1563. netif_carrier_off(dev);
  1564. if (vortex_debug > 1) {
  1565. pr_debug("%s: Media %s has no link beat, %x.\n",
  1566. dev->name, media_tbl[dev->if_port].name, media_status);
  1567. }
  1568. }
  1569. break;
  1570. case XCVR_MII: case XCVR_NWAY:
  1571. {
  1572. ok = 1;
  1573. vortex_check_media(dev, 0);
  1574. }
  1575. break;
  1576. default: /* Other media types handled by Tx timeouts. */
  1577. if (vortex_debug > 1)
  1578. pr_debug("%s: Media %s has no indication, %x.\n",
  1579. dev->name, media_tbl[dev->if_port].name, media_status);
  1580. ok = 1;
  1581. }
  1582. if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
  1583. next_tick = 5*HZ;
  1584. if (vp->medialock)
  1585. goto leave_media_alone;
  1586. if (!ok) {
  1587. unsigned int config;
  1588. spin_lock_irq(&vp->lock);
  1589. do {
  1590. dev->if_port = media_tbl[dev->if_port].next;
  1591. } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
  1592. if (dev->if_port == XCVR_Default) { /* Go back to default. */
  1593. dev->if_port = vp->default_media;
  1594. if (vortex_debug > 1)
  1595. pr_debug("%s: Media selection failing, using default %s port.\n",
  1596. dev->name, media_tbl[dev->if_port].name);
  1597. } else {
  1598. if (vortex_debug > 1)
  1599. pr_debug("%s: Media selection failed, now trying %s port.\n",
  1600. dev->name, media_tbl[dev->if_port].name);
  1601. next_tick = media_tbl[dev->if_port].wait;
  1602. }
  1603. window_write16(vp,
  1604. (media_status & ~(Media_10TP|Media_SQE)) |
  1605. media_tbl[dev->if_port].media_bits,
  1606. 4, Wn4_Media);
  1607. config = window_read32(vp, 3, Wn3_Config);
  1608. config = BFINS(config, dev->if_port, 20, 4);
  1609. window_write32(vp, config, 3, Wn3_Config);
  1610. iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
  1611. ioaddr + EL3_CMD);
  1612. if (vortex_debug > 1)
  1613. pr_debug("wrote 0x%08x to Wn3_Config\n", config);
  1614. /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
  1615. spin_unlock_irq(&vp->lock);
  1616. }
  1617. leave_media_alone:
  1618. if (vortex_debug > 2)
  1619. pr_debug("%s: Media selection timer finished, %s.\n",
  1620. dev->name, media_tbl[dev->if_port].name);
  1621. mod_timer(&vp->timer, RUN_AT(next_tick));
  1622. if (vp->deferred)
  1623. iowrite16(FakeIntr, ioaddr + EL3_CMD);
  1624. }
  1625. static void vortex_tx_timeout(struct net_device *dev)
  1626. {
  1627. struct vortex_private *vp = netdev_priv(dev);
  1628. void __iomem *ioaddr = vp->ioaddr;
  1629. pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
  1630. dev->name, ioread8(ioaddr + TxStatus),
  1631. ioread16(ioaddr + EL3_STATUS));
  1632. pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
  1633. window_read16(vp, 4, Wn4_NetDiag),
  1634. window_read16(vp, 4, Wn4_Media),
  1635. ioread32(ioaddr + PktStatus),
  1636. window_read16(vp, 4, Wn4_FIFODiag));
  1637. /* Slight code bloat to be user friendly. */
  1638. if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
  1639. pr_err("%s: Transmitter encountered 16 collisions --"
  1640. " network cable problem?\n", dev->name);
  1641. if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
  1642. pr_err("%s: Interrupt posted but not delivered --"
  1643. " IRQ blocked by another device?\n", dev->name);
  1644. /* Bad idea here.. but we might as well handle a few events. */
  1645. vortex_boomerang_interrupt(dev->irq, dev);
  1646. }
  1647. if (vortex_debug > 0)
  1648. dump_tx_ring(dev);
  1649. issue_and_wait(dev, TxReset);
  1650. dev->stats.tx_errors++;
  1651. if (vp->full_bus_master_tx) {
  1652. pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
  1653. if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
  1654. iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
  1655. ioaddr + DownListPtr);
  1656. if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
  1657. netif_wake_queue (dev);
  1658. netdev_reset_queue (dev);
  1659. }
  1660. if (vp->drv_flags & IS_BOOMERANG)
  1661. iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
  1662. iowrite16(DownUnstall, ioaddr + EL3_CMD);
  1663. } else {
  1664. dev->stats.tx_dropped++;
  1665. netif_wake_queue(dev);
  1666. netdev_reset_queue(dev);
  1667. }
  1668. /* Issue Tx Enable */
  1669. iowrite16(TxEnable, ioaddr + EL3_CMD);
  1670. netif_trans_update(dev); /* prevent tx timeout */
  1671. }
  1672. /*
  1673. * Handle uncommon interrupt sources. This is a separate routine to minimize
  1674. * the cache impact.
  1675. */
  1676. static void
  1677. vortex_error(struct net_device *dev, int status)
  1678. {
  1679. struct vortex_private *vp = netdev_priv(dev);
  1680. void __iomem *ioaddr = vp->ioaddr;
  1681. int do_tx_reset = 0, reset_mask = 0;
  1682. unsigned char tx_status = 0;
  1683. if (vortex_debug > 2) {
  1684. pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
  1685. }
  1686. if (status & TxComplete) { /* Really "TxError" for us. */
  1687. tx_status = ioread8(ioaddr + TxStatus);
  1688. /* Presumably a tx-timeout. We must merely re-enable. */
  1689. if (vortex_debug > 2 ||
  1690. (tx_status != 0x88 && vortex_debug > 0)) {
  1691. pr_err("%s: Transmit error, Tx status register %2.2x.\n",
  1692. dev->name, tx_status);
  1693. if (tx_status == 0x82) {
  1694. pr_err("Probably a duplex mismatch. See "
  1695. "Documentation/networking/vortex.txt\n");
  1696. }
  1697. dump_tx_ring(dev);
  1698. }
  1699. if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
  1700. if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
  1701. if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
  1702. iowrite8(0, ioaddr + TxStatus);
  1703. if (tx_status & 0x30) { /* txJabber or txUnderrun */
  1704. do_tx_reset = 1;
  1705. } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
  1706. do_tx_reset = 1;
  1707. reset_mask = 0x0108; /* Reset interface logic, but not download logic */
  1708. } else { /* Merely re-enable the transmitter. */
  1709. iowrite16(TxEnable, ioaddr + EL3_CMD);
  1710. }
  1711. }
  1712. if (status & RxEarly) /* Rx early is unused. */
  1713. iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
  1714. if (status & StatsFull) { /* Empty statistics. */
  1715. static int DoneDidThat;
  1716. if (vortex_debug > 4)
  1717. pr_debug("%s: Updating stats.\n", dev->name);
  1718. update_stats(ioaddr, dev);
  1719. /* HACK: Disable statistics as an interrupt source. */
  1720. /* This occurs when we have the wrong media type! */
  1721. if (DoneDidThat == 0 &&
  1722. ioread16(ioaddr + EL3_STATUS) & StatsFull) {
  1723. pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n",
  1724. dev->name);
  1725. iowrite16(SetIntrEnb |
  1726. (window_read16(vp, 5, 10) & ~StatsFull),
  1727. ioaddr + EL3_CMD);
  1728. vp->intr_enable &= ~StatsFull;
  1729. DoneDidThat++;
  1730. }
  1731. }
  1732. if (status & IntReq) { /* Restore all interrupt sources. */
  1733. iowrite16(vp->status_enable, ioaddr + EL3_CMD);
  1734. iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
  1735. }
  1736. if (status & HostError) {
  1737. u16 fifo_diag;
  1738. fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
  1739. pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
  1740. dev->name, fifo_diag);
  1741. /* Adapter failure requires Tx/Rx reset and reinit. */
  1742. if (vp->full_bus_master_tx) {
  1743. int bus_status = ioread32(ioaddr + PktStatus);
  1744. /* 0x80000000 PCI master abort. */
  1745. /* 0x40000000 PCI target abort. */
  1746. if (vortex_debug)
  1747. pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
  1748. /* In this case, blow the card away */
  1749. /* Must not enter D3 or we can't legally issue the reset! */
  1750. vortex_down(dev, 0);
  1751. issue_and_wait(dev, TotalReset | 0xff);
  1752. vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
  1753. } else if (fifo_diag & 0x0400)
  1754. do_tx_reset = 1;
  1755. if (fifo_diag & 0x3000) {
  1756. /* Reset Rx fifo and upload logic */
  1757. issue_and_wait(dev, RxReset|0x07);
  1758. /* Set the Rx filter to the current state. */
  1759. set_rx_mode(dev);
  1760. /* enable 802.1q VLAN tagged frames */
  1761. set_8021q_mode(dev, 1);
  1762. iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
  1763. iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
  1764. }
  1765. }
  1766. if (do_tx_reset) {
  1767. issue_and_wait(dev, TxReset|reset_mask);
  1768. iowrite16(TxEnable, ioaddr + EL3_CMD);
  1769. if (!vp->full_bus_master_tx)
  1770. netif_wake_queue(dev);
  1771. }
  1772. }
  1773. static netdev_tx_t
  1774. vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1775. {
  1776. struct vortex_private *vp = netdev_priv(dev);
  1777. void __iomem *ioaddr = vp->ioaddr;
  1778. int skblen = skb->len;
  1779. /* Put out the doubleword header... */
  1780. iowrite32(skb->len, ioaddr + TX_FIFO);
  1781. if (vp->bus_master) {
  1782. /* Set the bus-master controller to transfer the packet. */
  1783. int len = (skb->len + 3) & ~3;
  1784. vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
  1785. DMA_TO_DEVICE);
  1786. if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
  1787. dev_kfree_skb_any(skb);
  1788. dev->stats.tx_dropped++;
  1789. return NETDEV_TX_OK;
  1790. }
  1791. spin_lock_irq(&vp->window_lock);
  1792. window_set(vp, 7);
  1793. iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
  1794. iowrite16(len, ioaddr + Wn7_MasterLen);
  1795. spin_unlock_irq(&vp->window_lock);
  1796. vp->tx_skb = skb;
  1797. skb_tx_timestamp(skb);
  1798. iowrite16(StartDMADown, ioaddr + EL3_CMD);
  1799. /* netif_wake_queue() will be called at the DMADone interrupt. */
  1800. } else {
  1801. /* ... and the packet rounded to a doubleword. */
  1802. skb_tx_timestamp(skb);
  1803. iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
  1804. dev_consume_skb_any (skb);
  1805. if (ioread16(ioaddr + TxFree) > 1536) {
  1806. netif_start_queue (dev); /* AKPM: redundant? */
  1807. } else {
  1808. /* Interrupt us when the FIFO has room for max-sized packet. */
  1809. netif_stop_queue(dev);
  1810. iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
  1811. }
  1812. }
  1813. netdev_sent_queue(dev, skblen);
  1814. /* Clear the Tx status stack. */
  1815. {
  1816. int tx_status;
  1817. int i = 32;
  1818. while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
  1819. if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
  1820. if (vortex_debug > 2)
  1821. pr_debug("%s: Tx error, status %2.2x.\n",
  1822. dev->name, tx_status);
  1823. if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
  1824. if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
  1825. if (tx_status & 0x30) {
  1826. issue_and_wait(dev, TxReset);
  1827. }
  1828. iowrite16(TxEnable, ioaddr + EL3_CMD);
  1829. }
  1830. iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
  1831. }
  1832. }
  1833. return NETDEV_TX_OK;
  1834. }
  1835. static netdev_tx_t
  1836. boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1837. {
  1838. struct vortex_private *vp = netdev_priv(dev);
  1839. void __iomem *ioaddr = vp->ioaddr;
  1840. /* Calculate the next Tx descriptor entry. */
  1841. int entry = vp->cur_tx % TX_RING_SIZE;
  1842. int skblen = skb->len;
  1843. struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
  1844. unsigned long flags;
  1845. dma_addr_t dma_addr;
  1846. if (vortex_debug > 6) {
  1847. pr_debug("boomerang_start_xmit()\n");
  1848. pr_debug("%s: Trying to send a packet, Tx index %d.\n",
  1849. dev->name, vp->cur_tx);
  1850. }
  1851. /*
  1852. * We can't allow a recursion from our interrupt handler back into the
  1853. * tx routine, as they take the same spin lock, and that causes
  1854. * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
  1855. * a bit
  1856. */
  1857. if (vp->handling_irq)
  1858. return NETDEV_TX_BUSY;
  1859. if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
  1860. if (vortex_debug > 0)
  1861. pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n",
  1862. dev->name);
  1863. netif_stop_queue(dev);
  1864. return NETDEV_TX_BUSY;
  1865. }
  1866. vp->tx_skbuff[entry] = skb;
  1867. vp->tx_ring[entry].next = 0;
  1868. #if DO_ZEROCOPY
  1869. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1870. vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
  1871. else
  1872. vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
  1873. if (!skb_shinfo(skb)->nr_frags) {
  1874. dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
  1875. DMA_TO_DEVICE);
  1876. if (dma_mapping_error(vp->gendev, dma_addr))
  1877. goto out_dma_err;
  1878. vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
  1879. vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
  1880. } else {
  1881. int i;
  1882. dma_addr = dma_map_single(vp->gendev, skb->data,
  1883. skb_headlen(skb), DMA_TO_DEVICE);
  1884. if (dma_mapping_error(vp->gendev, dma_addr))
  1885. goto out_dma_err;
  1886. vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
  1887. vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
  1888. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1889. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1890. dma_addr = skb_frag_dma_map(vp->gendev, frag,
  1891. 0,
  1892. frag->size,
  1893. DMA_TO_DEVICE);
  1894. if (dma_mapping_error(vp->gendev, dma_addr)) {
  1895. for(i = i-1; i >= 0; i--)
  1896. dma_unmap_page(vp->gendev,
  1897. le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
  1898. le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
  1899. DMA_TO_DEVICE);
  1900. dma_unmap_single(vp->gendev,
  1901. le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
  1902. le32_to_cpu(vp->tx_ring[entry].frag[0].length),
  1903. DMA_TO_DEVICE);
  1904. goto out_dma_err;
  1905. }
  1906. vp->tx_ring[entry].frag[i+1].addr =
  1907. cpu_to_le32(dma_addr);
  1908. if (i == skb_shinfo(skb)->nr_frags-1)
  1909. vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
  1910. else
  1911. vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
  1912. }
  1913. }
  1914. #else
  1915. dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
  1916. if (dma_mapping_error(vp->gendev, dma_addr))
  1917. goto out_dma_err;
  1918. vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
  1919. vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
  1920. vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
  1921. #endif
  1922. spin_lock_irqsave(&vp->lock, flags);
  1923. /* Wait for the stall to complete. */
  1924. issue_and_wait(dev, DownStall);
  1925. prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
  1926. if (ioread32(ioaddr + DownListPtr) == 0) {
  1927. iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
  1928. vp->queued_packet++;
  1929. }
  1930. vp->cur_tx++;
  1931. netdev_sent_queue(dev, skblen);
  1932. if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
  1933. netif_stop_queue (dev);
  1934. } else { /* Clear previous interrupt enable. */
  1935. #if defined(tx_interrupt_mitigation)
  1936. /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
  1937. * were selected, this would corrupt DN_COMPLETE. No?
  1938. */
  1939. prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
  1940. #endif
  1941. }
  1942. skb_tx_timestamp(skb);
  1943. iowrite16(DownUnstall, ioaddr + EL3_CMD);
  1944. spin_unlock_irqrestore(&vp->lock, flags);
  1945. out:
  1946. return NETDEV_TX_OK;
  1947. out_dma_err:
  1948. dev_err(vp->gendev, "Error mapping dma buffer\n");
  1949. goto out;
  1950. }
  1951. /* The interrupt handler does all of the Rx thread work and cleans up
  1952. after the Tx thread. */
  1953. /*
  1954. * This is the ISR for the vortex series chips.
  1955. * full_bus_master_tx == 0 && full_bus_master_rx == 0
  1956. */
  1957. static irqreturn_t
  1958. _vortex_interrupt(int irq, struct net_device *dev)
  1959. {
  1960. struct vortex_private *vp = netdev_priv(dev);
  1961. void __iomem *ioaddr;
  1962. int status;
  1963. int work_done = max_interrupt_work;
  1964. int handled = 0;
  1965. unsigned int bytes_compl = 0, pkts_compl = 0;
  1966. ioaddr = vp->ioaddr;
  1967. status = ioread16(ioaddr + EL3_STATUS);
  1968. if (vortex_debug > 6)
  1969. pr_debug("vortex_interrupt(). status=0x%4x\n", status);
  1970. if ((status & IntLatch) == 0)
  1971. goto handler_exit; /* No interrupt: shared IRQs cause this */
  1972. handled = 1;
  1973. if (status & IntReq) {
  1974. status |= vp->deferred;
  1975. vp->deferred = 0;
  1976. }
  1977. if (status == 0xffff) /* h/w no longer present (hotplug)? */
  1978. goto handler_exit;
  1979. if (vortex_debug > 4)
  1980. pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
  1981. dev->name, status, ioread8(ioaddr + Timer));
  1982. spin_lock(&vp->window_lock);
  1983. window_set(vp, 7);
  1984. do {
  1985. if (vortex_debug > 5)
  1986. pr_debug("%s: In interrupt loop, status %4.4x.\n",
  1987. dev->name, status);
  1988. if (status & RxComplete)
  1989. vortex_rx(dev);
  1990. if (status & TxAvailable) {
  1991. if (vortex_debug > 5)
  1992. pr_debug(" TX room bit was handled.\n");
  1993. /* There's room in the FIFO for a full-sized packet. */
  1994. iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
  1995. netif_wake_queue (dev);
  1996. }
  1997. if (status & DMADone) {
  1998. if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
  1999. iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
  2000. dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
  2001. pkts_compl++;
  2002. bytes_compl += vp->tx_skb->len;
  2003. dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
  2004. if (ioread16(ioaddr + TxFree) > 1536) {
  2005. /*
  2006. * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
  2007. * insufficient FIFO room, the TxAvailable test will succeed and call
  2008. * netif_wake_queue()
  2009. */
  2010. netif_wake_queue(dev);
  2011. } else { /* Interrupt when FIFO has room for max-sized packet. */
  2012. iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
  2013. netif_stop_queue(dev);
  2014. }
  2015. }
  2016. }
  2017. /* Check for all uncommon interrupts at once. */
  2018. if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
  2019. if (status == 0xffff)
  2020. break;
  2021. if (status & RxEarly)
  2022. vortex_rx(dev);
  2023. spin_unlock(&vp->window_lock);
  2024. vortex_error(dev, status);
  2025. spin_lock(&vp->window_lock);
  2026. window_set(vp, 7);
  2027. }
  2028. if (--work_done < 0) {
  2029. pr_warn("%s: Too much work in interrupt, status %4.4x\n",
  2030. dev->name, status);
  2031. /* Disable all pending interrupts. */
  2032. do {
  2033. vp->deferred |= status;
  2034. iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
  2035. ioaddr + EL3_CMD);
  2036. iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
  2037. } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
  2038. /* The timer will reenable interrupts. */
  2039. mod_timer(&vp->timer, jiffies + 1*HZ);
  2040. break;
  2041. }
  2042. /* Acknowledge the IRQ. */
  2043. iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
  2044. } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
  2045. netdev_completed_queue(dev, pkts_compl, bytes_compl);
  2046. spin_unlock(&vp->window_lock);
  2047. if (vortex_debug > 4)
  2048. pr_debug("%s: exiting interrupt, status %4.4x.\n",
  2049. dev->name, status);
  2050. handler_exit:
  2051. return IRQ_RETVAL(handled);
  2052. }
  2053. /*
  2054. * This is the ISR for the boomerang series chips.
  2055. * full_bus_master_tx == 1 && full_bus_master_rx == 1
  2056. */
  2057. static irqreturn_t
  2058. _boomerang_interrupt(int irq, struct net_device *dev)
  2059. {
  2060. struct vortex_private *vp = netdev_priv(dev);
  2061. void __iomem *ioaddr;
  2062. int status;
  2063. int work_done = max_interrupt_work;
  2064. int handled = 0;
  2065. unsigned int bytes_compl = 0, pkts_compl = 0;
  2066. ioaddr = vp->ioaddr;
  2067. vp->handling_irq = 1;
  2068. status = ioread16(ioaddr + EL3_STATUS);
  2069. if (vortex_debug > 6)
  2070. pr_debug("boomerang_interrupt. status=0x%4x\n", status);
  2071. if ((status & IntLatch) == 0)
  2072. goto handler_exit; /* No interrupt: shared IRQs can cause this */
  2073. handled = 1;
  2074. if (status == 0xffff) { /* h/w no longer present (hotplug)? */
  2075. if (vortex_debug > 1)
  2076. pr_debug("boomerang_interrupt(1): status = 0xffff\n");
  2077. goto handler_exit;
  2078. }
  2079. if (status & IntReq) {
  2080. status |= vp->deferred;
  2081. vp->deferred = 0;
  2082. }
  2083. if (vortex_debug > 4)
  2084. pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
  2085. dev->name, status, ioread8(ioaddr + Timer));
  2086. do {
  2087. if (vortex_debug > 5)
  2088. pr_debug("%s: In interrupt loop, status %4.4x.\n",
  2089. dev->name, status);
  2090. if (status & UpComplete) {
  2091. iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
  2092. if (vortex_debug > 5)
  2093. pr_debug("boomerang_interrupt->boomerang_rx\n");
  2094. boomerang_rx(dev);
  2095. }
  2096. if (status & DownComplete) {
  2097. unsigned int dirty_tx = vp->dirty_tx;
  2098. iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
  2099. while (vp->cur_tx - dirty_tx > 0) {
  2100. int entry = dirty_tx % TX_RING_SIZE;
  2101. #if 1 /* AKPM: the latter is faster, but cyclone-only */
  2102. if (ioread32(ioaddr + DownListPtr) ==
  2103. vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
  2104. break; /* It still hasn't been processed. */
  2105. #else
  2106. if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
  2107. break; /* It still hasn't been processed. */
  2108. #endif
  2109. if (vp->tx_skbuff[entry]) {
  2110. struct sk_buff *skb = vp->tx_skbuff[entry];
  2111. #if DO_ZEROCOPY
  2112. int i;
  2113. dma_unmap_single(vp->gendev,
  2114. le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
  2115. le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
  2116. DMA_TO_DEVICE);
  2117. for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
  2118. dma_unmap_page(vp->gendev,
  2119. le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
  2120. le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
  2121. DMA_TO_DEVICE);
  2122. #else
  2123. dma_unmap_single(vp->gendev,
  2124. le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
  2125. #endif
  2126. pkts_compl++;
  2127. bytes_compl += skb->len;
  2128. dev_kfree_skb_irq(skb);
  2129. vp->tx_skbuff[entry] = NULL;
  2130. } else {
  2131. pr_debug("boomerang_interrupt: no skb!\n");
  2132. }
  2133. /* dev->stats.tx_packets++; Counted below. */
  2134. dirty_tx++;
  2135. }
  2136. vp->dirty_tx = dirty_tx;
  2137. if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
  2138. if (vortex_debug > 6)
  2139. pr_debug("boomerang_interrupt: wake queue\n");
  2140. netif_wake_queue (dev);
  2141. }
  2142. }
  2143. /* Check for all uncommon interrupts at once. */
  2144. if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
  2145. vortex_error(dev, status);
  2146. if (--work_done < 0) {
  2147. pr_warn("%s: Too much work in interrupt, status %4.4x\n",
  2148. dev->name, status);
  2149. /* Disable all pending interrupts. */
  2150. do {
  2151. vp->deferred |= status;
  2152. iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
  2153. ioaddr + EL3_CMD);
  2154. iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
  2155. } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
  2156. /* The timer will reenable interrupts. */
  2157. mod_timer(&vp->timer, jiffies + 1*HZ);
  2158. break;
  2159. }
  2160. /* Acknowledge the IRQ. */
  2161. iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
  2162. if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
  2163. iowrite32(0x8000, vp->cb_fn_base + 4);
  2164. } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
  2165. netdev_completed_queue(dev, pkts_compl, bytes_compl);
  2166. if (vortex_debug > 4)
  2167. pr_debug("%s: exiting interrupt, status %4.4x.\n",
  2168. dev->name, status);
  2169. handler_exit:
  2170. vp->handling_irq = 0;
  2171. return IRQ_RETVAL(handled);
  2172. }
  2173. static irqreturn_t
  2174. vortex_boomerang_interrupt(int irq, void *dev_id)
  2175. {
  2176. struct net_device *dev = dev_id;
  2177. struct vortex_private *vp = netdev_priv(dev);
  2178. unsigned long flags;
  2179. irqreturn_t ret;
  2180. spin_lock_irqsave(&vp->lock, flags);
  2181. if (vp->full_bus_master_rx)
  2182. ret = _boomerang_interrupt(dev->irq, dev);
  2183. else
  2184. ret = _vortex_interrupt(dev->irq, dev);
  2185. spin_unlock_irqrestore(&vp->lock, flags);
  2186. return ret;
  2187. }
  2188. static int vortex_rx(struct net_device *dev)
  2189. {
  2190. struct vortex_private *vp = netdev_priv(dev);
  2191. void __iomem *ioaddr = vp->ioaddr;
  2192. int i;
  2193. short rx_status;
  2194. if (vortex_debug > 5)
  2195. pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
  2196. ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
  2197. while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
  2198. if (rx_status & 0x4000) { /* Error, update stats. */
  2199. unsigned char rx_error = ioread8(ioaddr + RxErrors);
  2200. if (vortex_debug > 2)
  2201. pr_debug(" Rx error: status %2.2x.\n", rx_error);
  2202. dev->stats.rx_errors++;
  2203. if (rx_error & 0x01) dev->stats.rx_over_errors++;
  2204. if (rx_error & 0x02) dev->stats.rx_length_errors++;
  2205. if (rx_error & 0x04) dev->stats.rx_frame_errors++;
  2206. if (rx_error & 0x08) dev->stats.rx_crc_errors++;
  2207. if (rx_error & 0x10) dev->stats.rx_length_errors++;
  2208. } else {
  2209. /* The packet length: up to 4.5K!. */
  2210. int pkt_len = rx_status & 0x1fff;
  2211. struct sk_buff *skb;
  2212. skb = netdev_alloc_skb(dev, pkt_len + 5);
  2213. if (vortex_debug > 4)
  2214. pr_debug("Receiving packet size %d status %4.4x.\n",
  2215. pkt_len, rx_status);
  2216. if (skb != NULL) {
  2217. skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
  2218. /* 'skb_put()' points to the start of sk_buff data area. */
  2219. if (vp->bus_master &&
  2220. ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
  2221. dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
  2222. pkt_len, DMA_FROM_DEVICE);
  2223. iowrite32(dma, ioaddr + Wn7_MasterAddr);
  2224. iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
  2225. iowrite16(StartDMAUp, ioaddr + EL3_CMD);
  2226. while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
  2227. ;
  2228. dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
  2229. } else {
  2230. ioread32_rep(ioaddr + RX_FIFO,
  2231. skb_put(skb, pkt_len),
  2232. (pkt_len + 3) >> 2);
  2233. }
  2234. iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
  2235. skb->protocol = eth_type_trans(skb, dev);
  2236. netif_rx(skb);
  2237. dev->stats.rx_packets++;
  2238. /* Wait a limited time to go to next packet. */
  2239. for (i = 200; i >= 0; i--)
  2240. if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
  2241. break;
  2242. continue;
  2243. } else if (vortex_debug > 0)
  2244. pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
  2245. dev->name, pkt_len);
  2246. dev->stats.rx_dropped++;
  2247. }
  2248. issue_and_wait(dev, RxDiscard);
  2249. }
  2250. return 0;
  2251. }
  2252. static int
  2253. boomerang_rx(struct net_device *dev)
  2254. {
  2255. struct vortex_private *vp = netdev_priv(dev);
  2256. int entry = vp->cur_rx % RX_RING_SIZE;
  2257. void __iomem *ioaddr = vp->ioaddr;
  2258. int rx_status;
  2259. int rx_work_limit = RX_RING_SIZE;
  2260. if (vortex_debug > 5)
  2261. pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
  2262. while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
  2263. if (--rx_work_limit < 0)
  2264. break;
  2265. if (rx_status & RxDError) { /* Error, update stats. */
  2266. unsigned char rx_error = rx_status >> 16;
  2267. if (vortex_debug > 2)
  2268. pr_debug(" Rx error: status %2.2x.\n", rx_error);
  2269. dev->stats.rx_errors++;
  2270. if (rx_error & 0x01) dev->stats.rx_over_errors++;
  2271. if (rx_error & 0x02) dev->stats.rx_length_errors++;
  2272. if (rx_error & 0x04) dev->stats.rx_frame_errors++;
  2273. if (rx_error & 0x08) dev->stats.rx_crc_errors++;
  2274. if (rx_error & 0x10) dev->stats.rx_length_errors++;
  2275. } else {
  2276. /* The packet length: up to 4.5K!. */
  2277. int pkt_len = rx_status & 0x1fff;
  2278. struct sk_buff *skb, *newskb;
  2279. dma_addr_t newdma;
  2280. dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
  2281. if (vortex_debug > 4)
  2282. pr_debug("Receiving packet size %d status %4.4x.\n",
  2283. pkt_len, rx_status);
  2284. /* Check if the packet is long enough to just accept without
  2285. copying to a properly sized skbuff. */
  2286. if (pkt_len < rx_copybreak &&
  2287. (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
  2288. skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
  2289. dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
  2290. /* 'skb_put()' points to the start of sk_buff data area. */
  2291. skb_put_data(skb, vp->rx_skbuff[entry]->data,
  2292. pkt_len);
  2293. dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
  2294. vp->rx_copy++;
  2295. } else {
  2296. /* Pre-allocate the replacement skb. If it or its
  2297. * mapping fails then recycle the buffer thats already
  2298. * in place
  2299. */
  2300. newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
  2301. if (!newskb) {
  2302. dev->stats.rx_dropped++;
  2303. goto clear_complete;
  2304. }
  2305. newdma = dma_map_single(vp->gendev, newskb->data,
  2306. PKT_BUF_SZ, DMA_FROM_DEVICE);
  2307. if (dma_mapping_error(vp->gendev, newdma)) {
  2308. dev->stats.rx_dropped++;
  2309. consume_skb(newskb);
  2310. goto clear_complete;
  2311. }
  2312. /* Pass up the skbuff already on the Rx ring. */
  2313. skb = vp->rx_skbuff[entry];
  2314. vp->rx_skbuff[entry] = newskb;
  2315. vp->rx_ring[entry].addr = cpu_to_le32(newdma);
  2316. skb_put(skb, pkt_len);
  2317. dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
  2318. vp->rx_nocopy++;
  2319. }
  2320. skb->protocol = eth_type_trans(skb, dev);
  2321. { /* Use hardware checksum info. */
  2322. int csum_bits = rx_status & 0xee000000;
  2323. if (csum_bits &&
  2324. (csum_bits == (IPChksumValid | TCPChksumValid) ||
  2325. csum_bits == (IPChksumValid | UDPChksumValid))) {
  2326. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2327. vp->rx_csumhits++;
  2328. }
  2329. }
  2330. netif_rx(skb);
  2331. dev->stats.rx_packets++;
  2332. }
  2333. clear_complete:
  2334. vp->rx_ring[entry].status = 0; /* Clear complete bit. */
  2335. iowrite16(UpUnstall, ioaddr + EL3_CMD);
  2336. entry = (++vp->cur_rx) % RX_RING_SIZE;
  2337. }
  2338. return 0;
  2339. }
  2340. static void
  2341. vortex_down(struct net_device *dev, int final_down)
  2342. {
  2343. struct vortex_private *vp = netdev_priv(dev);
  2344. void __iomem *ioaddr = vp->ioaddr;
  2345. netdev_reset_queue(dev);
  2346. netif_stop_queue(dev);
  2347. del_timer_sync(&vp->timer);
  2348. /* Turn off statistics ASAP. We update dev->stats below. */
  2349. iowrite16(StatsDisable, ioaddr + EL3_CMD);
  2350. /* Disable the receiver and transmitter. */
  2351. iowrite16(RxDisable, ioaddr + EL3_CMD);
  2352. iowrite16(TxDisable, ioaddr + EL3_CMD);
  2353. /* Disable receiving 802.1q tagged frames */
  2354. set_8021q_mode(dev, 0);
  2355. if (dev->if_port == XCVR_10base2)
  2356. /* Turn off thinnet power. Green! */
  2357. iowrite16(StopCoax, ioaddr + EL3_CMD);
  2358. iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
  2359. update_stats(ioaddr, dev);
  2360. if (vp->full_bus_master_rx)
  2361. iowrite32(0, ioaddr + UpListPtr);
  2362. if (vp->full_bus_master_tx)
  2363. iowrite32(0, ioaddr + DownListPtr);
  2364. if (final_down && VORTEX_PCI(vp)) {
  2365. vp->pm_state_valid = 1;
  2366. pci_save_state(VORTEX_PCI(vp));
  2367. acpi_set_WOL(dev);
  2368. }
  2369. }
  2370. static int
  2371. vortex_close(struct net_device *dev)
  2372. {
  2373. struct vortex_private *vp = netdev_priv(dev);
  2374. void __iomem *ioaddr = vp->ioaddr;
  2375. int i;
  2376. if (netif_device_present(dev))
  2377. vortex_down(dev, 1);
  2378. if (vortex_debug > 1) {
  2379. pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
  2380. dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
  2381. pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
  2382. " tx_queued %d Rx pre-checksummed %d.\n",
  2383. dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
  2384. }
  2385. #if DO_ZEROCOPY
  2386. if (vp->rx_csumhits &&
  2387. (vp->drv_flags & HAS_HWCKSM) == 0 &&
  2388. (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
  2389. pr_warn("%s supports hardware checksums, and we're not using them!\n",
  2390. dev->name);
  2391. }
  2392. #endif
  2393. free_irq(dev->irq, dev);
  2394. if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
  2395. for (i = 0; i < RX_RING_SIZE; i++)
  2396. if (vp->rx_skbuff[i]) {
  2397. dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
  2398. PKT_BUF_SZ, DMA_FROM_DEVICE);
  2399. dev_kfree_skb(vp->rx_skbuff[i]);
  2400. vp->rx_skbuff[i] = NULL;
  2401. }
  2402. }
  2403. if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
  2404. for (i = 0; i < TX_RING_SIZE; i++) {
  2405. if (vp->tx_skbuff[i]) {
  2406. struct sk_buff *skb = vp->tx_skbuff[i];
  2407. #if DO_ZEROCOPY
  2408. int k;
  2409. for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
  2410. dma_unmap_single(vp->gendev,
  2411. le32_to_cpu(vp->tx_ring[i].frag[k].addr),
  2412. le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
  2413. DMA_TO_DEVICE);
  2414. #else
  2415. dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
  2416. #endif
  2417. dev_kfree_skb(skb);
  2418. vp->tx_skbuff[i] = NULL;
  2419. }
  2420. }
  2421. }
  2422. return 0;
  2423. }
  2424. static void
  2425. dump_tx_ring(struct net_device *dev)
  2426. {
  2427. if (vortex_debug > 0) {
  2428. struct vortex_private *vp = netdev_priv(dev);
  2429. void __iomem *ioaddr = vp->ioaddr;
  2430. if (vp->full_bus_master_tx) {
  2431. int i;
  2432. int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
  2433. pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
  2434. vp->full_bus_master_tx,
  2435. vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
  2436. vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
  2437. pr_err(" Transmit list %8.8x vs. %p.\n",
  2438. ioread32(ioaddr + DownListPtr),
  2439. &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
  2440. issue_and_wait(dev, DownStall);
  2441. for (i = 0; i < TX_RING_SIZE; i++) {
  2442. unsigned int length;
  2443. #if DO_ZEROCOPY
  2444. length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
  2445. #else
  2446. length = le32_to_cpu(vp->tx_ring[i].length);
  2447. #endif
  2448. pr_err(" %d: @%p length %8.8x status %8.8x\n",
  2449. i, &vp->tx_ring[i], length,
  2450. le32_to_cpu(vp->tx_ring[i].status));
  2451. }
  2452. if (!stalled)
  2453. iowrite16(DownUnstall, ioaddr + EL3_CMD);
  2454. }
  2455. }
  2456. }
  2457. static struct net_device_stats *vortex_get_stats(struct net_device *dev)
  2458. {
  2459. struct vortex_private *vp = netdev_priv(dev);
  2460. void __iomem *ioaddr = vp->ioaddr;
  2461. unsigned long flags;
  2462. if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
  2463. spin_lock_irqsave (&vp->lock, flags);
  2464. update_stats(ioaddr, dev);
  2465. spin_unlock_irqrestore (&vp->lock, flags);
  2466. }
  2467. return &dev->stats;
  2468. }
  2469. /* Update statistics.
  2470. Unlike with the EL3 we need not worry about interrupts changing
  2471. the window setting from underneath us, but we must still guard
  2472. against a race condition with a StatsUpdate interrupt updating the
  2473. table. This is done by checking that the ASM (!) code generated uses
  2474. atomic updates with '+='.
  2475. */
  2476. static void update_stats(void __iomem *ioaddr, struct net_device *dev)
  2477. {
  2478. struct vortex_private *vp = netdev_priv(dev);
  2479. /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
  2480. /* Switch to the stats window, and read everything. */
  2481. dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
  2482. dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
  2483. dev->stats.tx_window_errors += window_read8(vp, 6, 4);
  2484. dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
  2485. dev->stats.tx_packets += window_read8(vp, 6, 6);
  2486. dev->stats.tx_packets += (window_read8(vp, 6, 9) &
  2487. 0x30) << 4;
  2488. /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
  2489. /* Don't bother with register 9, an extension of registers 6&7.
  2490. If we do use the 6&7 values the atomic update assumption above
  2491. is invalid. */
  2492. dev->stats.rx_bytes += window_read16(vp, 6, 10);
  2493. dev->stats.tx_bytes += window_read16(vp, 6, 12);
  2494. /* Extra stats for get_ethtool_stats() */
  2495. vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
  2496. vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
  2497. vp->xstats.tx_deferred += window_read8(vp, 6, 8);
  2498. vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
  2499. dev->stats.collisions = vp->xstats.tx_multiple_collisions
  2500. + vp->xstats.tx_single_collisions
  2501. + vp->xstats.tx_max_collisions;
  2502. {
  2503. u8 up = window_read8(vp, 4, 13);
  2504. dev->stats.rx_bytes += (up & 0x0f) << 16;
  2505. dev->stats.tx_bytes += (up & 0xf0) << 12;
  2506. }
  2507. }
  2508. static int vortex_nway_reset(struct net_device *dev)
  2509. {
  2510. struct vortex_private *vp = netdev_priv(dev);
  2511. return mii_nway_restart(&vp->mii);
  2512. }
  2513. static int vortex_get_link_ksettings(struct net_device *dev,
  2514. struct ethtool_link_ksettings *cmd)
  2515. {
  2516. struct vortex_private *vp = netdev_priv(dev);
  2517. mii_ethtool_get_link_ksettings(&vp->mii, cmd);
  2518. return 0;
  2519. }
  2520. static int vortex_set_link_ksettings(struct net_device *dev,
  2521. const struct ethtool_link_ksettings *cmd)
  2522. {
  2523. struct vortex_private *vp = netdev_priv(dev);
  2524. return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
  2525. }
  2526. static u32 vortex_get_msglevel(struct net_device *dev)
  2527. {
  2528. return vortex_debug;
  2529. }
  2530. static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
  2531. {
  2532. vortex_debug = dbg;
  2533. }
  2534. static int vortex_get_sset_count(struct net_device *dev, int sset)
  2535. {
  2536. switch (sset) {
  2537. case ETH_SS_STATS:
  2538. return VORTEX_NUM_STATS;
  2539. default:
  2540. return -EOPNOTSUPP;
  2541. }
  2542. }
  2543. static void vortex_get_ethtool_stats(struct net_device *dev,
  2544. struct ethtool_stats *stats, u64 *data)
  2545. {
  2546. struct vortex_private *vp = netdev_priv(dev);
  2547. void __iomem *ioaddr = vp->ioaddr;
  2548. unsigned long flags;
  2549. spin_lock_irqsave(&vp->lock, flags);
  2550. update_stats(ioaddr, dev);
  2551. spin_unlock_irqrestore(&vp->lock, flags);
  2552. data[0] = vp->xstats.tx_deferred;
  2553. data[1] = vp->xstats.tx_max_collisions;
  2554. data[2] = vp->xstats.tx_multiple_collisions;
  2555. data[3] = vp->xstats.tx_single_collisions;
  2556. data[4] = vp->xstats.rx_bad_ssd;
  2557. }
  2558. static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  2559. {
  2560. switch (stringset) {
  2561. case ETH_SS_STATS:
  2562. memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
  2563. break;
  2564. default:
  2565. WARN_ON(1);
  2566. break;
  2567. }
  2568. }
  2569. static void vortex_get_drvinfo(struct net_device *dev,
  2570. struct ethtool_drvinfo *info)
  2571. {
  2572. struct vortex_private *vp = netdev_priv(dev);
  2573. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  2574. if (VORTEX_PCI(vp)) {
  2575. strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
  2576. sizeof(info->bus_info));
  2577. } else {
  2578. if (VORTEX_EISA(vp))
  2579. strlcpy(info->bus_info, dev_name(vp->gendev),
  2580. sizeof(info->bus_info));
  2581. else
  2582. snprintf(info->bus_info, sizeof(info->bus_info),
  2583. "EISA 0x%lx %d", dev->base_addr, dev->irq);
  2584. }
  2585. }
  2586. static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2587. {
  2588. struct vortex_private *vp = netdev_priv(dev);
  2589. if (!VORTEX_PCI(vp))
  2590. return;
  2591. wol->supported = WAKE_MAGIC;
  2592. wol->wolopts = 0;
  2593. if (vp->enable_wol)
  2594. wol->wolopts |= WAKE_MAGIC;
  2595. }
  2596. static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2597. {
  2598. struct vortex_private *vp = netdev_priv(dev);
  2599. if (!VORTEX_PCI(vp))
  2600. return -EOPNOTSUPP;
  2601. if (wol->wolopts & ~WAKE_MAGIC)
  2602. return -EINVAL;
  2603. if (wol->wolopts & WAKE_MAGIC)
  2604. vp->enable_wol = 1;
  2605. else
  2606. vp->enable_wol = 0;
  2607. acpi_set_WOL(dev);
  2608. return 0;
  2609. }
  2610. static const struct ethtool_ops vortex_ethtool_ops = {
  2611. .get_drvinfo = vortex_get_drvinfo,
  2612. .get_strings = vortex_get_strings,
  2613. .get_msglevel = vortex_get_msglevel,
  2614. .set_msglevel = vortex_set_msglevel,
  2615. .get_ethtool_stats = vortex_get_ethtool_stats,
  2616. .get_sset_count = vortex_get_sset_count,
  2617. .get_link = ethtool_op_get_link,
  2618. .nway_reset = vortex_nway_reset,
  2619. .get_wol = vortex_get_wol,
  2620. .set_wol = vortex_set_wol,
  2621. .get_ts_info = ethtool_op_get_ts_info,
  2622. .get_link_ksettings = vortex_get_link_ksettings,
  2623. .set_link_ksettings = vortex_set_link_ksettings,
  2624. };
  2625. #ifdef CONFIG_PCI
  2626. /*
  2627. * Must power the device up to do MDIO operations
  2628. */
  2629. static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2630. {
  2631. int err;
  2632. struct vortex_private *vp = netdev_priv(dev);
  2633. pci_power_t state = 0;
  2634. if(VORTEX_PCI(vp))
  2635. state = VORTEX_PCI(vp)->current_state;
  2636. /* The kernel core really should have pci_get_power_state() */
  2637. if(state != 0)
  2638. pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
  2639. err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
  2640. if(state != 0)
  2641. pci_set_power_state(VORTEX_PCI(vp), state);
  2642. return err;
  2643. }
  2644. #endif
  2645. /* Pre-Cyclone chips have no documented multicast filter, so the only
  2646. multicast setting is to receive all multicast frames. At least
  2647. the chip has a very clean way to set the mode, unlike many others. */
  2648. static void set_rx_mode(struct net_device *dev)
  2649. {
  2650. struct vortex_private *vp = netdev_priv(dev);
  2651. void __iomem *ioaddr = vp->ioaddr;
  2652. int new_mode;
  2653. if (dev->flags & IFF_PROMISC) {
  2654. if (vortex_debug > 3)
  2655. pr_notice("%s: Setting promiscuous mode.\n", dev->name);
  2656. new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
  2657. } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
  2658. new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
  2659. } else
  2660. new_mode = SetRxFilter | RxStation | RxBroadcast;
  2661. iowrite16(new_mode, ioaddr + EL3_CMD);
  2662. }
  2663. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  2664. /* Setup the card so that it can receive frames with an 802.1q VLAN tag.
  2665. Note that this must be done after each RxReset due to some backwards
  2666. compatibility logic in the Cyclone and Tornado ASICs */
  2667. /* The Ethernet Type used for 802.1q tagged frames */
  2668. #define VLAN_ETHER_TYPE 0x8100
  2669. static void set_8021q_mode(struct net_device *dev, int enable)
  2670. {
  2671. struct vortex_private *vp = netdev_priv(dev);
  2672. int mac_ctrl;
  2673. if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
  2674. /* cyclone and tornado chipsets can recognize 802.1q
  2675. * tagged frames and treat them correctly */
  2676. int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */
  2677. if (enable)
  2678. max_pkt_size += 4; /* 802.1Q VLAN tag */
  2679. window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
  2680. /* set VlanEtherType to let the hardware checksumming
  2681. treat tagged frames correctly */
  2682. window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
  2683. } else {
  2684. /* on older cards we have to enable large frames */
  2685. vp->large_frames = dev->mtu > 1500 || enable;
  2686. mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
  2687. if (vp->large_frames)
  2688. mac_ctrl |= 0x40;
  2689. else
  2690. mac_ctrl &= ~0x40;
  2691. window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
  2692. }
  2693. }
  2694. #else
  2695. static void set_8021q_mode(struct net_device *dev, int enable)
  2696. {
  2697. }
  2698. #endif
  2699. /* MII transceiver control section.
  2700. Read and write the MII registers using software-generated serial
  2701. MDIO protocol. See the MII specifications or DP83840A data sheet
  2702. for details. */
  2703. /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
  2704. met by back-to-back PCI I/O cycles, but we insert a delay to avoid
  2705. "overclocking" issues. */
  2706. static void mdio_delay(struct vortex_private *vp)
  2707. {
  2708. window_read32(vp, 4, Wn4_PhysicalMgmt);
  2709. }
  2710. #define MDIO_SHIFT_CLK 0x01
  2711. #define MDIO_DIR_WRITE 0x04
  2712. #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
  2713. #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
  2714. #define MDIO_DATA_READ 0x02
  2715. #define MDIO_ENB_IN 0x00
  2716. /* Generate the preamble required for initial synchronization and
  2717. a few older transceivers. */
  2718. static void mdio_sync(struct vortex_private *vp, int bits)
  2719. {
  2720. /* Establish sync by sending at least 32 logic ones. */
  2721. while (-- bits >= 0) {
  2722. window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
  2723. mdio_delay(vp);
  2724. window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
  2725. 4, Wn4_PhysicalMgmt);
  2726. mdio_delay(vp);
  2727. }
  2728. }
  2729. static int mdio_read(struct net_device *dev, int phy_id, int location)
  2730. {
  2731. int i;
  2732. struct vortex_private *vp = netdev_priv(dev);
  2733. int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
  2734. unsigned int retval = 0;
  2735. spin_lock_bh(&vp->mii_lock);
  2736. if (mii_preamble_required)
  2737. mdio_sync(vp, 32);
  2738. /* Shift the read command bits out. */
  2739. for (i = 14; i >= 0; i--) {
  2740. int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
  2741. window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
  2742. mdio_delay(vp);
  2743. window_write16(vp, dataval | MDIO_SHIFT_CLK,
  2744. 4, Wn4_PhysicalMgmt);
  2745. mdio_delay(vp);
  2746. }
  2747. /* Read the two transition, 16 data, and wire-idle bits. */
  2748. for (i = 19; i > 0; i--) {
  2749. window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
  2750. mdio_delay(vp);
  2751. retval = (retval << 1) |
  2752. ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
  2753. MDIO_DATA_READ) ? 1 : 0);
  2754. window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
  2755. 4, Wn4_PhysicalMgmt);
  2756. mdio_delay(vp);
  2757. }
  2758. spin_unlock_bh(&vp->mii_lock);
  2759. return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
  2760. }
  2761. static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
  2762. {
  2763. struct vortex_private *vp = netdev_priv(dev);
  2764. int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
  2765. int i;
  2766. spin_lock_bh(&vp->mii_lock);
  2767. if (mii_preamble_required)
  2768. mdio_sync(vp, 32);
  2769. /* Shift the command bits out. */
  2770. for (i = 31; i >= 0; i--) {
  2771. int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
  2772. window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
  2773. mdio_delay(vp);
  2774. window_write16(vp, dataval | MDIO_SHIFT_CLK,
  2775. 4, Wn4_PhysicalMgmt);
  2776. mdio_delay(vp);
  2777. }
  2778. /* Leave the interface idle. */
  2779. for (i = 1; i >= 0; i--) {
  2780. window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
  2781. mdio_delay(vp);
  2782. window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
  2783. 4, Wn4_PhysicalMgmt);
  2784. mdio_delay(vp);
  2785. }
  2786. spin_unlock_bh(&vp->mii_lock);
  2787. }
  2788. /* ACPI: Advanced Configuration and Power Interface. */
  2789. /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
  2790. static void acpi_set_WOL(struct net_device *dev)
  2791. {
  2792. struct vortex_private *vp = netdev_priv(dev);
  2793. void __iomem *ioaddr = vp->ioaddr;
  2794. device_set_wakeup_enable(vp->gendev, vp->enable_wol);
  2795. if (vp->enable_wol) {
  2796. /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
  2797. window_write16(vp, 2, 7, 0x0c);
  2798. /* The RxFilter must accept the WOL frames. */
  2799. iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
  2800. iowrite16(RxEnable, ioaddr + EL3_CMD);
  2801. if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
  2802. pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
  2803. vp->enable_wol = 0;
  2804. return;
  2805. }
  2806. if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
  2807. return;
  2808. /* Change the power state to D3; RxEnable doesn't take effect. */
  2809. pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
  2810. }
  2811. }
  2812. static void vortex_remove_one(struct pci_dev *pdev)
  2813. {
  2814. struct net_device *dev = pci_get_drvdata(pdev);
  2815. struct vortex_private *vp;
  2816. if (!dev) {
  2817. pr_err("vortex_remove_one called for Compaq device!\n");
  2818. BUG();
  2819. }
  2820. vp = netdev_priv(dev);
  2821. if (vp->cb_fn_base)
  2822. pci_iounmap(pdev, vp->cb_fn_base);
  2823. unregister_netdev(dev);
  2824. pci_set_power_state(pdev, PCI_D0); /* Go active */
  2825. if (vp->pm_state_valid)
  2826. pci_restore_state(pdev);
  2827. pci_disable_device(pdev);
  2828. /* Should really use issue_and_wait() here */
  2829. iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
  2830. vp->ioaddr + EL3_CMD);
  2831. pci_iounmap(pdev, vp->ioaddr);
  2832. dma_free_coherent(&pdev->dev,
  2833. sizeof(struct boom_rx_desc) * RX_RING_SIZE +
  2834. sizeof(struct boom_tx_desc) * TX_RING_SIZE,
  2835. vp->rx_ring, vp->rx_ring_dma);
  2836. pci_release_regions(pdev);
  2837. free_netdev(dev);
  2838. }
  2839. static struct pci_driver vortex_driver = {
  2840. .name = "3c59x",
  2841. .probe = vortex_init_one,
  2842. .remove = vortex_remove_one,
  2843. .id_table = vortex_pci_tbl,
  2844. .driver.pm = VORTEX_PM_OPS,
  2845. };
  2846. static int vortex_have_pci;
  2847. static int vortex_have_eisa;
  2848. static int __init vortex_init(void)
  2849. {
  2850. int pci_rc, eisa_rc;
  2851. pci_rc = pci_register_driver(&vortex_driver);
  2852. eisa_rc = vortex_eisa_init();
  2853. if (pci_rc == 0)
  2854. vortex_have_pci = 1;
  2855. if (eisa_rc > 0)
  2856. vortex_have_eisa = 1;
  2857. return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
  2858. }
  2859. static void __exit vortex_eisa_cleanup(void)
  2860. {
  2861. void __iomem *ioaddr;
  2862. #ifdef CONFIG_EISA
  2863. /* Take care of the EISA devices */
  2864. eisa_driver_unregister(&vortex_eisa_driver);
  2865. #endif
  2866. if (compaq_net_device) {
  2867. ioaddr = ioport_map(compaq_net_device->base_addr,
  2868. VORTEX_TOTAL_SIZE);
  2869. unregister_netdev(compaq_net_device);
  2870. iowrite16(TotalReset, ioaddr + EL3_CMD);
  2871. release_region(compaq_net_device->base_addr,
  2872. VORTEX_TOTAL_SIZE);
  2873. free_netdev(compaq_net_device);
  2874. }
  2875. }
  2876. static void __exit vortex_cleanup(void)
  2877. {
  2878. if (vortex_have_pci)
  2879. pci_unregister_driver(&vortex_driver);
  2880. if (vortex_have_eisa)
  2881. vortex_eisa_cleanup();
  2882. }
  2883. module_init(vortex_init);
  2884. module_exit(vortex_cleanup);