sundance.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030
  1. /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
  2. /*
  3. Written 1999-2000 by Donald Becker.
  4. This software may be used and distributed according to the terms of
  5. the GNU General Public License (GPL), incorporated herein by reference.
  6. Drivers based on or derived from this code fall under the GPL and must
  7. retain the authorship, copyright and license notice. This file is not
  8. a complete program and may only be used when the entire operating
  9. system is licensed under the GPL.
  10. The author may be reached as becker@scyld.com, or C/O
  11. Scyld Computing Corporation
  12. 410 Severn Ave., Suite 210
  13. Annapolis MD 21403
  14. Support and updates available at
  15. http://www.scyld.com/network/sundance.html
  16. [link no longer provides useful info -jgarzik]
  17. Archives of the mailing list are still available at
  18. http://www.beowulf.org/pipermail/netdrivers/
  19. */
  20. #define DRV_NAME "sundance"
  21. #define DRV_VERSION "1.2"
  22. #define DRV_RELDATE "11-Sep-2006"
  23. /* The user-configurable values.
  24. These may be modified when a driver module is loaded.*/
  25. static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
  26. /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  27. Typical is a 64 element hash table based on the Ethernet CRC. */
  28. static const int multicast_filter_limit = 32;
  29. /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  30. Setting to > 1518 effectively disables this feature.
  31. This chip can receive into offset buffers, so the Alpha does not
  32. need a copy-align. */
  33. static int rx_copybreak;
  34. static int flowctrl=1;
  35. /* media[] specifies the media type the NIC operates at.
  36. autosense Autosensing active media.
  37. 10mbps_hd 10Mbps half duplex.
  38. 10mbps_fd 10Mbps full duplex.
  39. 100mbps_hd 100Mbps half duplex.
  40. 100mbps_fd 100Mbps full duplex.
  41. 0 Autosensing active media.
  42. 1 10Mbps half duplex.
  43. 2 10Mbps full duplex.
  44. 3 100Mbps half duplex.
  45. 4 100Mbps full duplex.
  46. */
  47. #define MAX_UNITS 8
  48. static char *media[MAX_UNITS];
  49. /* Operational parameters that are set at compile time. */
  50. /* Keep the ring sizes a power of two for compile efficiency.
  51. The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  52. Making the Tx ring too large decreases the effectiveness of channel
  53. bonding and packet priority, and more than 128 requires modifying the
  54. Tx error recovery.
  55. Large receive rings merely waste memory. */
  56. #define TX_RING_SIZE 32
  57. #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
  58. #define RX_RING_SIZE 64
  59. #define RX_BUDGET 32
  60. #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
  61. #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
  62. /* Operational parameters that usually are not changed. */
  63. /* Time in jiffies before concluding the transmitter is hung. */
  64. #define TX_TIMEOUT (4*HZ)
  65. #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
  66. /* Include files, designed to support most kernel versions 2.0.0 and later. */
  67. #include <linux/module.h>
  68. #include <linux/kernel.h>
  69. #include <linux/string.h>
  70. #include <linux/timer.h>
  71. #include <linux/errno.h>
  72. #include <linux/ioport.h>
  73. #include <linux/interrupt.h>
  74. #include <linux/pci.h>
  75. #include <linux/netdevice.h>
  76. #include <linux/etherdevice.h>
  77. #include <linux/skbuff.h>
  78. #include <linux/init.h>
  79. #include <linux/bitops.h>
  80. #include <linux/uaccess.h>
  81. #include <asm/processor.h> /* Processor type for cache alignment. */
  82. #include <asm/io.h>
  83. #include <linux/delay.h>
  84. #include <linux/spinlock.h>
  85. #include <linux/dma-mapping.h>
  86. #include <linux/crc32.h>
  87. #include <linux/ethtool.h>
  88. #include <linux/mii.h>
  89. /* These identify the driver base version and may not be removed. */
  90. static const char version[] =
  91. KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
  92. " Written by Donald Becker\n";
  93. MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  94. MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
  95. MODULE_LICENSE("GPL");
  96. module_param(debug, int, 0);
  97. module_param(rx_copybreak, int, 0);
  98. module_param_array(media, charp, NULL, 0);
  99. module_param(flowctrl, int, 0);
  100. MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
  101. MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
  102. MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
  103. /*
  104. Theory of Operation
  105. I. Board Compatibility
  106. This driver is designed for the Sundance Technologies "Alta" ST201 chip.
  107. II. Board-specific settings
  108. III. Driver operation
  109. IIIa. Ring buffers
  110. This driver uses two statically allocated fixed-size descriptor lists
  111. formed into rings by a branch from the final descriptor to the beginning of
  112. the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
  113. Some chips explicitly use only 2^N sized rings, while others use a
  114. 'next descriptor' pointer that the driver forms into rings.
  115. IIIb/c. Transmit/Receive Structure
  116. This driver uses a zero-copy receive and transmit scheme.
  117. The driver allocates full frame size skbuffs for the Rx ring buffers at
  118. open() time and passes the skb->data field to the chip as receive data
  119. buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
  120. a fresh skbuff is allocated and the frame is copied to the new skbuff.
  121. When the incoming frame is larger, the skbuff is passed directly up the
  122. protocol stack. Buffers consumed this way are replaced by newly allocated
  123. skbuffs in a later phase of receives.
  124. The RX_COPYBREAK value is chosen to trade-off the memory wasted by
  125. using a full-sized skbuff for small frames vs. the copying costs of larger
  126. frames. New boards are typically used in generously configured machines
  127. and the underfilled buffers have negligible impact compared to the benefit of
  128. a single allocation size, so the default value of zero results in never
  129. copying packets. When copying is done, the cost is usually mitigated by using
  130. a combined copy/checksum routine. Copying also preloads the cache, which is
  131. most useful with small frames.
  132. A subtle aspect of the operation is that the IP header at offset 14 in an
  133. ethernet frame isn't longword aligned for further processing.
  134. Unaligned buffers are permitted by the Sundance hardware, so
  135. frames are received into the skbuff at an offset of "+2", 16-byte aligning
  136. the IP header.
  137. IIId. Synchronization
  138. The driver runs as two independent, single-threaded flows of control. One
  139. is the send-packet routine, which enforces single-threaded use by the
  140. dev->tbusy flag. The other thread is the interrupt handler, which is single
  141. threaded by the hardware and interrupt handling software.
  142. The send packet thread has partial control over the Tx ring and 'dev->tbusy'
  143. flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
  144. queue slot is empty, it clears the tbusy flag when finished otherwise it sets
  145. the 'lp->tx_full' flag.
  146. The interrupt handler has exclusive control over the Rx ring and records stats
  147. from the Tx ring. After reaping the stats, it marks the Tx queue entry as
  148. empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
  149. clears both the tx_full and tbusy flags.
  150. IV. Notes
  151. IVb. References
  152. The Sundance ST201 datasheet, preliminary version.
  153. The Kendin KS8723 datasheet, preliminary version.
  154. The ICplus IP100 datasheet, preliminary version.
  155. http://www.scyld.com/expert/100mbps.html
  156. http://www.scyld.com/expert/NWay.html
  157. IVc. Errata
  158. */
  159. /* Work-around for Kendin chip bugs. */
  160. #ifndef CONFIG_SUNDANCE_MMIO
  161. #define USE_IO_OPS 1
  162. #endif
  163. static const struct pci_device_id sundance_pci_tbl[] = {
  164. { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
  165. { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
  166. { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
  167. { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
  168. { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
  169. { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
  170. { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
  171. { }
  172. };
  173. MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
  174. enum {
  175. netdev_io_size = 128
  176. };
  177. struct pci_id_info {
  178. const char *name;
  179. };
  180. static const struct pci_id_info pci_id_tbl[] = {
  181. {"D-Link DFE-550TX FAST Ethernet Adapter"},
  182. {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
  183. {"D-Link DFE-580TX 4 port Server Adapter"},
  184. {"D-Link DFE-530TXS FAST Ethernet Adapter"},
  185. {"D-Link DL10050-based FAST Ethernet Adapter"},
  186. {"Sundance Technology Alta"},
  187. {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
  188. { } /* terminate list. */
  189. };
  190. /* This driver was written to use PCI memory space, however x86-oriented
  191. hardware often uses I/O space accesses. */
  192. /* Offsets to the device registers.
  193. Unlike software-only systems, device drivers interact with complex hardware.
  194. It's not useful to define symbolic names for every register bit in the
  195. device. The name can only partially document the semantics and make
  196. the driver longer and more difficult to read.
  197. In general, only the important configuration values or bits changed
  198. multiple times should be defined symbolically.
  199. */
  200. enum alta_offsets {
  201. DMACtrl = 0x00,
  202. TxListPtr = 0x04,
  203. TxDMABurstThresh = 0x08,
  204. TxDMAUrgentThresh = 0x09,
  205. TxDMAPollPeriod = 0x0a,
  206. RxDMAStatus = 0x0c,
  207. RxListPtr = 0x10,
  208. DebugCtrl0 = 0x1a,
  209. DebugCtrl1 = 0x1c,
  210. RxDMABurstThresh = 0x14,
  211. RxDMAUrgentThresh = 0x15,
  212. RxDMAPollPeriod = 0x16,
  213. LEDCtrl = 0x1a,
  214. ASICCtrl = 0x30,
  215. EEData = 0x34,
  216. EECtrl = 0x36,
  217. FlashAddr = 0x40,
  218. FlashData = 0x44,
  219. WakeEvent = 0x45,
  220. TxStatus = 0x46,
  221. TxFrameId = 0x47,
  222. DownCounter = 0x18,
  223. IntrClear = 0x4a,
  224. IntrEnable = 0x4c,
  225. IntrStatus = 0x4e,
  226. MACCtrl0 = 0x50,
  227. MACCtrl1 = 0x52,
  228. StationAddr = 0x54,
  229. MaxFrameSize = 0x5A,
  230. RxMode = 0x5c,
  231. MIICtrl = 0x5e,
  232. MulticastFilter0 = 0x60,
  233. MulticastFilter1 = 0x64,
  234. RxOctetsLow = 0x68,
  235. RxOctetsHigh = 0x6a,
  236. TxOctetsLow = 0x6c,
  237. TxOctetsHigh = 0x6e,
  238. TxFramesOK = 0x70,
  239. RxFramesOK = 0x72,
  240. StatsCarrierError = 0x74,
  241. StatsLateColl = 0x75,
  242. StatsMultiColl = 0x76,
  243. StatsOneColl = 0x77,
  244. StatsTxDefer = 0x78,
  245. RxMissed = 0x79,
  246. StatsTxXSDefer = 0x7a,
  247. StatsTxAbort = 0x7b,
  248. StatsBcastTx = 0x7c,
  249. StatsBcastRx = 0x7d,
  250. StatsMcastTx = 0x7e,
  251. StatsMcastRx = 0x7f,
  252. /* Aliased and bogus values! */
  253. RxStatus = 0x0c,
  254. };
  255. #define ASIC_HI_WORD(x) ((x) + 2)
  256. enum ASICCtrl_HiWord_bit {
  257. GlobalReset = 0x0001,
  258. RxReset = 0x0002,
  259. TxReset = 0x0004,
  260. DMAReset = 0x0008,
  261. FIFOReset = 0x0010,
  262. NetworkReset = 0x0020,
  263. HostReset = 0x0040,
  264. ResetBusy = 0x0400,
  265. };
  266. /* Bits in the interrupt status/mask registers. */
  267. enum intr_status_bits {
  268. IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
  269. IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
  270. IntrDrvRqst=0x0040,
  271. StatsMax=0x0080, LinkChange=0x0100,
  272. IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
  273. };
  274. /* Bits in the RxMode register. */
  275. enum rx_mode_bits {
  276. AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
  277. AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
  278. };
  279. /* Bits in MACCtrl. */
  280. enum mac_ctrl0_bits {
  281. EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
  282. EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
  283. };
  284. enum mac_ctrl1_bits {
  285. StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
  286. TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
  287. RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
  288. };
  289. /* Bits in WakeEvent register. */
  290. enum wake_event_bits {
  291. WakePktEnable = 0x01,
  292. MagicPktEnable = 0x02,
  293. LinkEventEnable = 0x04,
  294. WolEnable = 0x80,
  295. };
  296. /* The Rx and Tx buffer descriptors. */
  297. /* Note that using only 32 bit fields simplifies conversion to big-endian
  298. architectures. */
  299. struct netdev_desc {
  300. __le32 next_desc;
  301. __le32 status;
  302. struct desc_frag { __le32 addr, length; } frag[1];
  303. };
  304. /* Bits in netdev_desc.status */
  305. enum desc_status_bits {
  306. DescOwn=0x8000,
  307. DescEndPacket=0x4000,
  308. DescEndRing=0x2000,
  309. LastFrag=0x80000000,
  310. DescIntrOnTx=0x8000,
  311. DescIntrOnDMADone=0x80000000,
  312. DisableAlign = 0x00000001,
  313. };
  314. #define PRIV_ALIGN 15 /* Required alignment mask */
  315. /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
  316. within the structure. */
  317. #define MII_CNT 4
  318. struct netdev_private {
  319. /* Descriptor rings first for alignment. */
  320. struct netdev_desc *rx_ring;
  321. struct netdev_desc *tx_ring;
  322. struct sk_buff* rx_skbuff[RX_RING_SIZE];
  323. struct sk_buff* tx_skbuff[TX_RING_SIZE];
  324. dma_addr_t tx_ring_dma;
  325. dma_addr_t rx_ring_dma;
  326. struct timer_list timer; /* Media monitoring timer. */
  327. /* ethtool extra stats */
  328. struct {
  329. u64 tx_multiple_collisions;
  330. u64 tx_single_collisions;
  331. u64 tx_late_collisions;
  332. u64 tx_deferred;
  333. u64 tx_deferred_excessive;
  334. u64 tx_aborted;
  335. u64 tx_bcasts;
  336. u64 rx_bcasts;
  337. u64 tx_mcasts;
  338. u64 rx_mcasts;
  339. } xstats;
  340. /* Frequently used values: keep some adjacent for cache effect. */
  341. spinlock_t lock;
  342. int msg_enable;
  343. int chip_id;
  344. unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
  345. unsigned int rx_buf_sz; /* Based on MTU+slack. */
  346. struct netdev_desc *last_tx; /* Last Tx descriptor used. */
  347. unsigned int cur_tx, dirty_tx;
  348. /* These values are keep track of the transceiver/media in use. */
  349. unsigned int flowctrl:1;
  350. unsigned int default_port:4; /* Last dev->if_port value. */
  351. unsigned int an_enable:1;
  352. unsigned int speed;
  353. unsigned int wol_enabled:1; /* Wake on LAN enabled */
  354. struct tasklet_struct rx_tasklet;
  355. struct tasklet_struct tx_tasklet;
  356. int budget;
  357. int cur_task;
  358. /* Multicast and receive mode. */
  359. spinlock_t mcastlock; /* SMP lock multicast updates. */
  360. u16 mcast_filter[4];
  361. /* MII transceiver section. */
  362. struct mii_if_info mii_if;
  363. int mii_preamble_required;
  364. unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
  365. struct pci_dev *pci_dev;
  366. void __iomem *base;
  367. spinlock_t statlock;
  368. };
  369. /* The station address location in the EEPROM. */
  370. #define EEPROM_SA_OFFSET 0x10
  371. #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
  372. IntrDrvRqst | IntrTxDone | StatsMax | \
  373. LinkChange)
  374. static int change_mtu(struct net_device *dev, int new_mtu);
  375. static int eeprom_read(void __iomem *ioaddr, int location);
  376. static int mdio_read(struct net_device *dev, int phy_id, int location);
  377. static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
  378. static int mdio_wait_link(struct net_device *dev, int wait);
  379. static int netdev_open(struct net_device *dev);
  380. static void check_duplex(struct net_device *dev);
  381. static void netdev_timer(struct timer_list *t);
  382. static void tx_timeout(struct net_device *dev);
  383. static void init_ring(struct net_device *dev);
  384. static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
  385. static int reset_tx (struct net_device *dev);
  386. static irqreturn_t intr_handler(int irq, void *dev_instance);
  387. static void rx_poll(unsigned long data);
  388. static void tx_poll(unsigned long data);
  389. static void refill_rx (struct net_device *dev);
  390. static void netdev_error(struct net_device *dev, int intr_status);
  391. static void netdev_error(struct net_device *dev, int intr_status);
  392. static void set_rx_mode(struct net_device *dev);
  393. static int __set_mac_addr(struct net_device *dev);
  394. static int sundance_set_mac_addr(struct net_device *dev, void *data);
  395. static struct net_device_stats *get_stats(struct net_device *dev);
  396. static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  397. static int netdev_close(struct net_device *dev);
  398. static const struct ethtool_ops ethtool_ops;
  399. static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
  400. {
  401. struct netdev_private *np = netdev_priv(dev);
  402. void __iomem *ioaddr = np->base + ASICCtrl;
  403. int countdown;
  404. /* ST201 documentation states ASICCtrl is a 32bit register */
  405. iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
  406. /* ST201 documentation states reset can take up to 1 ms */
  407. countdown = 10 + 1;
  408. while (ioread32 (ioaddr) & (ResetBusy << 16)) {
  409. if (--countdown == 0) {
  410. printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
  411. break;
  412. }
  413. udelay(100);
  414. }
  415. }
  416. #ifdef CONFIG_NET_POLL_CONTROLLER
  417. static void sundance_poll_controller(struct net_device *dev)
  418. {
  419. struct netdev_private *np = netdev_priv(dev);
  420. disable_irq(np->pci_dev->irq);
  421. intr_handler(np->pci_dev->irq, dev);
  422. enable_irq(np->pci_dev->irq);
  423. }
  424. #endif
  425. static const struct net_device_ops netdev_ops = {
  426. .ndo_open = netdev_open,
  427. .ndo_stop = netdev_close,
  428. .ndo_start_xmit = start_tx,
  429. .ndo_get_stats = get_stats,
  430. .ndo_set_rx_mode = set_rx_mode,
  431. .ndo_do_ioctl = netdev_ioctl,
  432. .ndo_tx_timeout = tx_timeout,
  433. .ndo_change_mtu = change_mtu,
  434. .ndo_set_mac_address = sundance_set_mac_addr,
  435. .ndo_validate_addr = eth_validate_addr,
  436. #ifdef CONFIG_NET_POLL_CONTROLLER
  437. .ndo_poll_controller = sundance_poll_controller,
  438. #endif
  439. };
  440. static int sundance_probe1(struct pci_dev *pdev,
  441. const struct pci_device_id *ent)
  442. {
  443. struct net_device *dev;
  444. struct netdev_private *np;
  445. static int card_idx;
  446. int chip_idx = ent->driver_data;
  447. int irq;
  448. int i;
  449. void __iomem *ioaddr;
  450. u16 mii_ctl;
  451. void *ring_space;
  452. dma_addr_t ring_dma;
  453. #ifdef USE_IO_OPS
  454. int bar = 0;
  455. #else
  456. int bar = 1;
  457. #endif
  458. int phy, phy_end, phy_idx = 0;
  459. /* when built into the kernel, we only print version if device is found */
  460. #ifndef MODULE
  461. static int printed_version;
  462. if (!printed_version++)
  463. printk(version);
  464. #endif
  465. if (pci_enable_device(pdev))
  466. return -EIO;
  467. pci_set_master(pdev);
  468. irq = pdev->irq;
  469. dev = alloc_etherdev(sizeof(*np));
  470. if (!dev)
  471. return -ENOMEM;
  472. SET_NETDEV_DEV(dev, &pdev->dev);
  473. if (pci_request_regions(pdev, DRV_NAME))
  474. goto err_out_netdev;
  475. ioaddr = pci_iomap(pdev, bar, netdev_io_size);
  476. if (!ioaddr)
  477. goto err_out_res;
  478. for (i = 0; i < 3; i++)
  479. ((__le16 *)dev->dev_addr)[i] =
  480. cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
  481. np = netdev_priv(dev);
  482. np->base = ioaddr;
  483. np->pci_dev = pdev;
  484. np->chip_id = chip_idx;
  485. np->msg_enable = (1 << debug) - 1;
  486. spin_lock_init(&np->lock);
  487. spin_lock_init(&np->statlock);
  488. tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
  489. tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
  490. ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
  491. &ring_dma, GFP_KERNEL);
  492. if (!ring_space)
  493. goto err_out_cleardev;
  494. np->tx_ring = (struct netdev_desc *)ring_space;
  495. np->tx_ring_dma = ring_dma;
  496. ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
  497. &ring_dma, GFP_KERNEL);
  498. if (!ring_space)
  499. goto err_out_unmap_tx;
  500. np->rx_ring = (struct netdev_desc *)ring_space;
  501. np->rx_ring_dma = ring_dma;
  502. np->mii_if.dev = dev;
  503. np->mii_if.mdio_read = mdio_read;
  504. np->mii_if.mdio_write = mdio_write;
  505. np->mii_if.phy_id_mask = 0x1f;
  506. np->mii_if.reg_num_mask = 0x1f;
  507. /* The chip-specific entries in the device structure. */
  508. dev->netdev_ops = &netdev_ops;
  509. dev->ethtool_ops = &ethtool_ops;
  510. dev->watchdog_timeo = TX_TIMEOUT;
  511. /* MTU range: 68 - 8191 */
  512. dev->min_mtu = ETH_MIN_MTU;
  513. dev->max_mtu = 8191;
  514. pci_set_drvdata(pdev, dev);
  515. i = register_netdev(dev);
  516. if (i)
  517. goto err_out_unmap_rx;
  518. printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
  519. dev->name, pci_id_tbl[chip_idx].name, ioaddr,
  520. dev->dev_addr, irq);
  521. np->phys[0] = 1; /* Default setting */
  522. np->mii_preamble_required++;
  523. /*
  524. * It seems some phys doesn't deal well with address 0 being accessed
  525. * first
  526. */
  527. if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
  528. phy = 0;
  529. phy_end = 31;
  530. } else {
  531. phy = 1;
  532. phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
  533. }
  534. for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
  535. int phyx = phy & 0x1f;
  536. int mii_status = mdio_read(dev, phyx, MII_BMSR);
  537. if (mii_status != 0xffff && mii_status != 0x0000) {
  538. np->phys[phy_idx++] = phyx;
  539. np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
  540. if ((mii_status & 0x0040) == 0)
  541. np->mii_preamble_required++;
  542. printk(KERN_INFO "%s: MII PHY found at address %d, status "
  543. "0x%4.4x advertising %4.4x.\n",
  544. dev->name, phyx, mii_status, np->mii_if.advertising);
  545. }
  546. }
  547. np->mii_preamble_required--;
  548. if (phy_idx == 0) {
  549. printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
  550. dev->name, ioread32(ioaddr + ASICCtrl));
  551. goto err_out_unregister;
  552. }
  553. np->mii_if.phy_id = np->phys[0];
  554. /* Parse override configuration */
  555. np->an_enable = 1;
  556. if (card_idx < MAX_UNITS) {
  557. if (media[card_idx] != NULL) {
  558. np->an_enable = 0;
  559. if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
  560. strcmp (media[card_idx], "4") == 0) {
  561. np->speed = 100;
  562. np->mii_if.full_duplex = 1;
  563. } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
  564. strcmp (media[card_idx], "3") == 0) {
  565. np->speed = 100;
  566. np->mii_if.full_duplex = 0;
  567. } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
  568. strcmp (media[card_idx], "2") == 0) {
  569. np->speed = 10;
  570. np->mii_if.full_duplex = 1;
  571. } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
  572. strcmp (media[card_idx], "1") == 0) {
  573. np->speed = 10;
  574. np->mii_if.full_duplex = 0;
  575. } else {
  576. np->an_enable = 1;
  577. }
  578. }
  579. if (flowctrl == 1)
  580. np->flowctrl = 1;
  581. }
  582. /* Fibre PHY? */
  583. if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
  584. /* Default 100Mbps Full */
  585. if (np->an_enable) {
  586. np->speed = 100;
  587. np->mii_if.full_duplex = 1;
  588. np->an_enable = 0;
  589. }
  590. }
  591. /* Reset PHY */
  592. mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
  593. mdelay (300);
  594. /* If flow control enabled, we need to advertise it.*/
  595. if (np->flowctrl)
  596. mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
  597. mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
  598. /* Force media type */
  599. if (!np->an_enable) {
  600. mii_ctl = 0;
  601. mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
  602. mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
  603. mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
  604. printk (KERN_INFO "Override speed=%d, %s duplex\n",
  605. np->speed, np->mii_if.full_duplex ? "Full" : "Half");
  606. }
  607. /* Perhaps move the reset here? */
  608. /* Reset the chip to erase previous misconfiguration. */
  609. if (netif_msg_hw(np))
  610. printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
  611. sundance_reset(dev, 0x00ff << 16);
  612. if (netif_msg_hw(np))
  613. printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
  614. card_idx++;
  615. return 0;
  616. err_out_unregister:
  617. unregister_netdev(dev);
  618. err_out_unmap_rx:
  619. dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
  620. np->rx_ring, np->rx_ring_dma);
  621. err_out_unmap_tx:
  622. dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
  623. np->tx_ring, np->tx_ring_dma);
  624. err_out_cleardev:
  625. pci_iounmap(pdev, ioaddr);
  626. err_out_res:
  627. pci_release_regions(pdev);
  628. err_out_netdev:
  629. free_netdev (dev);
  630. return -ENODEV;
  631. }
  632. static int change_mtu(struct net_device *dev, int new_mtu)
  633. {
  634. if (netif_running(dev))
  635. return -EBUSY;
  636. dev->mtu = new_mtu;
  637. return 0;
  638. }
  639. #define eeprom_delay(ee_addr) ioread32(ee_addr)
  640. /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
  641. static int eeprom_read(void __iomem *ioaddr, int location)
  642. {
  643. int boguscnt = 10000; /* Typical 1900 ticks. */
  644. iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
  645. do {
  646. eeprom_delay(ioaddr + EECtrl);
  647. if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
  648. return ioread16(ioaddr + EEData);
  649. }
  650. } while (--boguscnt > 0);
  651. return 0;
  652. }
  653. /* MII transceiver control section.
  654. Read and write the MII registers using software-generated serial
  655. MDIO protocol. See the MII specifications or DP83840A data sheet
  656. for details.
  657. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
  658. met by back-to-back 33Mhz PCI cycles. */
  659. #define mdio_delay() ioread8(mdio_addr)
  660. enum mii_reg_bits {
  661. MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
  662. };
  663. #define MDIO_EnbIn (0)
  664. #define MDIO_WRITE0 (MDIO_EnbOutput)
  665. #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
  666. /* Generate the preamble required for initial synchronization and
  667. a few older transceivers. */
  668. static void mdio_sync(void __iomem *mdio_addr)
  669. {
  670. int bits = 32;
  671. /* Establish sync by sending at least 32 logic ones. */
  672. while (--bits >= 0) {
  673. iowrite8(MDIO_WRITE1, mdio_addr);
  674. mdio_delay();
  675. iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
  676. mdio_delay();
  677. }
  678. }
  679. static int mdio_read(struct net_device *dev, int phy_id, int location)
  680. {
  681. struct netdev_private *np = netdev_priv(dev);
  682. void __iomem *mdio_addr = np->base + MIICtrl;
  683. int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
  684. int i, retval = 0;
  685. if (np->mii_preamble_required)
  686. mdio_sync(mdio_addr);
  687. /* Shift the read command bits out. */
  688. for (i = 15; i >= 0; i--) {
  689. int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
  690. iowrite8(dataval, mdio_addr);
  691. mdio_delay();
  692. iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
  693. mdio_delay();
  694. }
  695. /* Read the two transition, 16 data, and wire-idle bits. */
  696. for (i = 19; i > 0; i--) {
  697. iowrite8(MDIO_EnbIn, mdio_addr);
  698. mdio_delay();
  699. retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
  700. iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
  701. mdio_delay();
  702. }
  703. return (retval>>1) & 0xffff;
  704. }
  705. static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
  706. {
  707. struct netdev_private *np = netdev_priv(dev);
  708. void __iomem *mdio_addr = np->base + MIICtrl;
  709. int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
  710. int i;
  711. if (np->mii_preamble_required)
  712. mdio_sync(mdio_addr);
  713. /* Shift the command bits out. */
  714. for (i = 31; i >= 0; i--) {
  715. int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
  716. iowrite8(dataval, mdio_addr);
  717. mdio_delay();
  718. iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
  719. mdio_delay();
  720. }
  721. /* Clear out extra bits. */
  722. for (i = 2; i > 0; i--) {
  723. iowrite8(MDIO_EnbIn, mdio_addr);
  724. mdio_delay();
  725. iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
  726. mdio_delay();
  727. }
  728. }
  729. static int mdio_wait_link(struct net_device *dev, int wait)
  730. {
  731. int bmsr;
  732. int phy_id;
  733. struct netdev_private *np;
  734. np = netdev_priv(dev);
  735. phy_id = np->phys[0];
  736. do {
  737. bmsr = mdio_read(dev, phy_id, MII_BMSR);
  738. if (bmsr & 0x0004)
  739. return 0;
  740. mdelay(1);
  741. } while (--wait > 0);
  742. return -1;
  743. }
  744. static int netdev_open(struct net_device *dev)
  745. {
  746. struct netdev_private *np = netdev_priv(dev);
  747. void __iomem *ioaddr = np->base;
  748. const int irq = np->pci_dev->irq;
  749. unsigned long flags;
  750. int i;
  751. sundance_reset(dev, 0x00ff << 16);
  752. i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
  753. if (i)
  754. return i;
  755. if (netif_msg_ifup(np))
  756. printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
  757. init_ring(dev);
  758. iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
  759. /* The Tx list pointer is written as packets are queued. */
  760. /* Initialize other registers. */
  761. __set_mac_addr(dev);
  762. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  763. iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
  764. #else
  765. iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
  766. #endif
  767. if (dev->mtu > 2047)
  768. iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
  769. /* Configure the PCI bus bursts and FIFO thresholds. */
  770. if (dev->if_port == 0)
  771. dev->if_port = np->default_port;
  772. spin_lock_init(&np->mcastlock);
  773. set_rx_mode(dev);
  774. iowrite16(0, ioaddr + IntrEnable);
  775. iowrite16(0, ioaddr + DownCounter);
  776. /* Set the chip to poll every N*320nsec. */
  777. iowrite8(100, ioaddr + RxDMAPollPeriod);
  778. iowrite8(127, ioaddr + TxDMAPollPeriod);
  779. /* Fix DFE-580TX packet drop issue */
  780. if (np->pci_dev->revision >= 0x14)
  781. iowrite8(0x01, ioaddr + DebugCtrl1);
  782. netif_start_queue(dev);
  783. spin_lock_irqsave(&np->lock, flags);
  784. reset_tx(dev);
  785. spin_unlock_irqrestore(&np->lock, flags);
  786. iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
  787. /* Disable Wol */
  788. iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
  789. np->wol_enabled = 0;
  790. if (netif_msg_ifup(np))
  791. printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
  792. "MAC Control %x, %4.4x %4.4x.\n",
  793. dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
  794. ioread32(ioaddr + MACCtrl0),
  795. ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
  796. /* Set the timer to check for link beat. */
  797. timer_setup(&np->timer, netdev_timer, 0);
  798. np->timer.expires = jiffies + 3*HZ;
  799. add_timer(&np->timer);
  800. /* Enable interrupts by setting the interrupt mask. */
  801. iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
  802. return 0;
  803. }
  804. static void check_duplex(struct net_device *dev)
  805. {
  806. struct netdev_private *np = netdev_priv(dev);
  807. void __iomem *ioaddr = np->base;
  808. int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
  809. int negotiated = mii_lpa & np->mii_if.advertising;
  810. int duplex;
  811. /* Force media */
  812. if (!np->an_enable || mii_lpa == 0xffff) {
  813. if (np->mii_if.full_duplex)
  814. iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
  815. ioaddr + MACCtrl0);
  816. return;
  817. }
  818. /* Autonegotiation */
  819. duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
  820. if (np->mii_if.full_duplex != duplex) {
  821. np->mii_if.full_duplex = duplex;
  822. if (netif_msg_link(np))
  823. printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
  824. "negotiated capability %4.4x.\n", dev->name,
  825. duplex ? "full" : "half", np->phys[0], negotiated);
  826. iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
  827. }
  828. }
  829. static void netdev_timer(struct timer_list *t)
  830. {
  831. struct netdev_private *np = from_timer(np, t, timer);
  832. struct net_device *dev = np->mii_if.dev;
  833. void __iomem *ioaddr = np->base;
  834. int next_tick = 10*HZ;
  835. if (netif_msg_timer(np)) {
  836. printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
  837. "Tx %x Rx %x.\n",
  838. dev->name, ioread16(ioaddr + IntrEnable),
  839. ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
  840. }
  841. check_duplex(dev);
  842. np->timer.expires = jiffies + next_tick;
  843. add_timer(&np->timer);
  844. }
  845. static void tx_timeout(struct net_device *dev)
  846. {
  847. struct netdev_private *np = netdev_priv(dev);
  848. void __iomem *ioaddr = np->base;
  849. unsigned long flag;
  850. netif_stop_queue(dev);
  851. tasklet_disable(&np->tx_tasklet);
  852. iowrite16(0, ioaddr + IntrEnable);
  853. printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
  854. "TxFrameId %2.2x,"
  855. " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
  856. ioread8(ioaddr + TxFrameId));
  857. {
  858. int i;
  859. for (i=0; i<TX_RING_SIZE; i++) {
  860. printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
  861. (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
  862. le32_to_cpu(np->tx_ring[i].next_desc),
  863. le32_to_cpu(np->tx_ring[i].status),
  864. (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
  865. le32_to_cpu(np->tx_ring[i].frag[0].addr),
  866. le32_to_cpu(np->tx_ring[i].frag[0].length));
  867. }
  868. printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
  869. ioread32(np->base + TxListPtr),
  870. netif_queue_stopped(dev));
  871. printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
  872. np->cur_tx, np->cur_tx % TX_RING_SIZE,
  873. np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
  874. printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
  875. printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
  876. }
  877. spin_lock_irqsave(&np->lock, flag);
  878. /* Stop and restart the chip's Tx processes . */
  879. reset_tx(dev);
  880. spin_unlock_irqrestore(&np->lock, flag);
  881. dev->if_port = 0;
  882. netif_trans_update(dev); /* prevent tx timeout */
  883. dev->stats.tx_errors++;
  884. if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
  885. netif_wake_queue(dev);
  886. }
  887. iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
  888. tasklet_enable(&np->tx_tasklet);
  889. }
  890. /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
  891. static void init_ring(struct net_device *dev)
  892. {
  893. struct netdev_private *np = netdev_priv(dev);
  894. int i;
  895. np->cur_rx = np->cur_tx = 0;
  896. np->dirty_rx = np->dirty_tx = 0;
  897. np->cur_task = 0;
  898. np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
  899. /* Initialize all Rx descriptors. */
  900. for (i = 0; i < RX_RING_SIZE; i++) {
  901. np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
  902. ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
  903. np->rx_ring[i].status = 0;
  904. np->rx_ring[i].frag[0].length = 0;
  905. np->rx_skbuff[i] = NULL;
  906. }
  907. /* Fill in the Rx buffers. Handle allocation failure gracefully. */
  908. for (i = 0; i < RX_RING_SIZE; i++) {
  909. struct sk_buff *skb =
  910. netdev_alloc_skb(dev, np->rx_buf_sz + 2);
  911. np->rx_skbuff[i] = skb;
  912. if (skb == NULL)
  913. break;
  914. skb_reserve(skb, 2); /* 16 byte align the IP header. */
  915. np->rx_ring[i].frag[0].addr = cpu_to_le32(
  916. dma_map_single(&np->pci_dev->dev, skb->data,
  917. np->rx_buf_sz, DMA_FROM_DEVICE));
  918. if (dma_mapping_error(&np->pci_dev->dev,
  919. np->rx_ring[i].frag[0].addr)) {
  920. dev_kfree_skb(skb);
  921. np->rx_skbuff[i] = NULL;
  922. break;
  923. }
  924. np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
  925. }
  926. np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
  927. for (i = 0; i < TX_RING_SIZE; i++) {
  928. np->tx_skbuff[i] = NULL;
  929. np->tx_ring[i].status = 0;
  930. }
  931. }
  932. static void tx_poll (unsigned long data)
  933. {
  934. struct net_device *dev = (struct net_device *)data;
  935. struct netdev_private *np = netdev_priv(dev);
  936. unsigned head = np->cur_task % TX_RING_SIZE;
  937. struct netdev_desc *txdesc =
  938. &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
  939. /* Chain the next pointer */
  940. for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
  941. int entry = np->cur_task % TX_RING_SIZE;
  942. txdesc = &np->tx_ring[entry];
  943. if (np->last_tx) {
  944. np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
  945. entry*sizeof(struct netdev_desc));
  946. }
  947. np->last_tx = txdesc;
  948. }
  949. /* Indicate the latest descriptor of tx ring */
  950. txdesc->status |= cpu_to_le32(DescIntrOnTx);
  951. if (ioread32 (np->base + TxListPtr) == 0)
  952. iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
  953. np->base + TxListPtr);
  954. }
  955. static netdev_tx_t
  956. start_tx (struct sk_buff *skb, struct net_device *dev)
  957. {
  958. struct netdev_private *np = netdev_priv(dev);
  959. struct netdev_desc *txdesc;
  960. unsigned entry;
  961. /* Calculate the next Tx descriptor entry. */
  962. entry = np->cur_tx % TX_RING_SIZE;
  963. np->tx_skbuff[entry] = skb;
  964. txdesc = &np->tx_ring[entry];
  965. txdesc->next_desc = 0;
  966. txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
  967. txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
  968. skb->data, skb->len, DMA_TO_DEVICE));
  969. if (dma_mapping_error(&np->pci_dev->dev,
  970. txdesc->frag[0].addr))
  971. goto drop_frame;
  972. txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
  973. /* Increment cur_tx before tasklet_schedule() */
  974. np->cur_tx++;
  975. mb();
  976. /* Schedule a tx_poll() task */
  977. tasklet_schedule(&np->tx_tasklet);
  978. /* On some architectures: explicitly flush cache lines here. */
  979. if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
  980. !netif_queue_stopped(dev)) {
  981. /* do nothing */
  982. } else {
  983. netif_stop_queue (dev);
  984. }
  985. if (netif_msg_tx_queued(np)) {
  986. printk (KERN_DEBUG
  987. "%s: Transmit frame #%d queued in slot %d.\n",
  988. dev->name, np->cur_tx, entry);
  989. }
  990. return NETDEV_TX_OK;
  991. drop_frame:
  992. dev_kfree_skb_any(skb);
  993. np->tx_skbuff[entry] = NULL;
  994. dev->stats.tx_dropped++;
  995. return NETDEV_TX_OK;
  996. }
  997. /* Reset hardware tx and free all of tx buffers */
  998. static int
  999. reset_tx (struct net_device *dev)
  1000. {
  1001. struct netdev_private *np = netdev_priv(dev);
  1002. void __iomem *ioaddr = np->base;
  1003. struct sk_buff *skb;
  1004. int i;
  1005. /* Reset tx logic, TxListPtr will be cleaned */
  1006. iowrite16 (TxDisable, ioaddr + MACCtrl1);
  1007. sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
  1008. /* free all tx skbuff */
  1009. for (i = 0; i < TX_RING_SIZE; i++) {
  1010. np->tx_ring[i].next_desc = 0;
  1011. skb = np->tx_skbuff[i];
  1012. if (skb) {
  1013. dma_unmap_single(&np->pci_dev->dev,
  1014. le32_to_cpu(np->tx_ring[i].frag[0].addr),
  1015. skb->len, DMA_TO_DEVICE);
  1016. dev_kfree_skb_any(skb);
  1017. np->tx_skbuff[i] = NULL;
  1018. dev->stats.tx_dropped++;
  1019. }
  1020. }
  1021. np->cur_tx = np->dirty_tx = 0;
  1022. np->cur_task = 0;
  1023. np->last_tx = NULL;
  1024. iowrite8(127, ioaddr + TxDMAPollPeriod);
  1025. iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
  1026. return 0;
  1027. }
  1028. /* The interrupt handler cleans up after the Tx thread,
  1029. and schedule a Rx thread work */
  1030. static irqreturn_t intr_handler(int irq, void *dev_instance)
  1031. {
  1032. struct net_device *dev = (struct net_device *)dev_instance;
  1033. struct netdev_private *np = netdev_priv(dev);
  1034. void __iomem *ioaddr = np->base;
  1035. int hw_frame_id;
  1036. int tx_cnt;
  1037. int tx_status;
  1038. int handled = 0;
  1039. int i;
  1040. do {
  1041. int intr_status = ioread16(ioaddr + IntrStatus);
  1042. iowrite16(intr_status, ioaddr + IntrStatus);
  1043. if (netif_msg_intr(np))
  1044. printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
  1045. dev->name, intr_status);
  1046. if (!(intr_status & DEFAULT_INTR))
  1047. break;
  1048. handled = 1;
  1049. if (intr_status & (IntrRxDMADone)) {
  1050. iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
  1051. ioaddr + IntrEnable);
  1052. if (np->budget < 0)
  1053. np->budget = RX_BUDGET;
  1054. tasklet_schedule(&np->rx_tasklet);
  1055. }
  1056. if (intr_status & (IntrTxDone | IntrDrvRqst)) {
  1057. tx_status = ioread16 (ioaddr + TxStatus);
  1058. for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
  1059. if (netif_msg_tx_done(np))
  1060. printk
  1061. ("%s: Transmit status is %2.2x.\n",
  1062. dev->name, tx_status);
  1063. if (tx_status & 0x1e) {
  1064. if (netif_msg_tx_err(np))
  1065. printk("%s: Transmit error status %4.4x.\n",
  1066. dev->name, tx_status);
  1067. dev->stats.tx_errors++;
  1068. if (tx_status & 0x10)
  1069. dev->stats.tx_fifo_errors++;
  1070. if (tx_status & 0x08)
  1071. dev->stats.collisions++;
  1072. if (tx_status & 0x04)
  1073. dev->stats.tx_fifo_errors++;
  1074. if (tx_status & 0x02)
  1075. dev->stats.tx_window_errors++;
  1076. /*
  1077. ** This reset has been verified on
  1078. ** DFE-580TX boards ! phdm@macqel.be.
  1079. */
  1080. if (tx_status & 0x10) { /* TxUnderrun */
  1081. /* Restart Tx FIFO and transmitter */
  1082. sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
  1083. /* No need to reset the Tx pointer here */
  1084. }
  1085. /* Restart the Tx. Need to make sure tx enabled */
  1086. i = 10;
  1087. do {
  1088. iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
  1089. if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
  1090. break;
  1091. mdelay(1);
  1092. } while (--i);
  1093. }
  1094. /* Yup, this is a documentation bug. It cost me *hours*. */
  1095. iowrite16 (0, ioaddr + TxStatus);
  1096. if (tx_cnt < 0) {
  1097. iowrite32(5000, ioaddr + DownCounter);
  1098. break;
  1099. }
  1100. tx_status = ioread16 (ioaddr + TxStatus);
  1101. }
  1102. hw_frame_id = (tx_status >> 8) & 0xff;
  1103. } else {
  1104. hw_frame_id = ioread8(ioaddr + TxFrameId);
  1105. }
  1106. if (np->pci_dev->revision >= 0x14) {
  1107. spin_lock(&np->lock);
  1108. for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
  1109. int entry = np->dirty_tx % TX_RING_SIZE;
  1110. struct sk_buff *skb;
  1111. int sw_frame_id;
  1112. sw_frame_id = (le32_to_cpu(
  1113. np->tx_ring[entry].status) >> 2) & 0xff;
  1114. if (sw_frame_id == hw_frame_id &&
  1115. !(le32_to_cpu(np->tx_ring[entry].status)
  1116. & 0x00010000))
  1117. break;
  1118. if (sw_frame_id == (hw_frame_id + 1) %
  1119. TX_RING_SIZE)
  1120. break;
  1121. skb = np->tx_skbuff[entry];
  1122. /* Free the original skb. */
  1123. dma_unmap_single(&np->pci_dev->dev,
  1124. le32_to_cpu(np->tx_ring[entry].frag[0].addr),
  1125. skb->len, DMA_TO_DEVICE);
  1126. dev_kfree_skb_irq (np->tx_skbuff[entry]);
  1127. np->tx_skbuff[entry] = NULL;
  1128. np->tx_ring[entry].frag[0].addr = 0;
  1129. np->tx_ring[entry].frag[0].length = 0;
  1130. }
  1131. spin_unlock(&np->lock);
  1132. } else {
  1133. spin_lock(&np->lock);
  1134. for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
  1135. int entry = np->dirty_tx % TX_RING_SIZE;
  1136. struct sk_buff *skb;
  1137. if (!(le32_to_cpu(np->tx_ring[entry].status)
  1138. & 0x00010000))
  1139. break;
  1140. skb = np->tx_skbuff[entry];
  1141. /* Free the original skb. */
  1142. dma_unmap_single(&np->pci_dev->dev,
  1143. le32_to_cpu(np->tx_ring[entry].frag[0].addr),
  1144. skb->len, DMA_TO_DEVICE);
  1145. dev_kfree_skb_irq (np->tx_skbuff[entry]);
  1146. np->tx_skbuff[entry] = NULL;
  1147. np->tx_ring[entry].frag[0].addr = 0;
  1148. np->tx_ring[entry].frag[0].length = 0;
  1149. }
  1150. spin_unlock(&np->lock);
  1151. }
  1152. if (netif_queue_stopped(dev) &&
  1153. np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
  1154. /* The ring is no longer full, clear busy flag. */
  1155. netif_wake_queue (dev);
  1156. }
  1157. /* Abnormal error summary/uncommon events handlers. */
  1158. if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
  1159. netdev_error(dev, intr_status);
  1160. } while (0);
  1161. if (netif_msg_intr(np))
  1162. printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
  1163. dev->name, ioread16(ioaddr + IntrStatus));
  1164. return IRQ_RETVAL(handled);
  1165. }
  1166. static void rx_poll(unsigned long data)
  1167. {
  1168. struct net_device *dev = (struct net_device *)data;
  1169. struct netdev_private *np = netdev_priv(dev);
  1170. int entry = np->cur_rx % RX_RING_SIZE;
  1171. int boguscnt = np->budget;
  1172. void __iomem *ioaddr = np->base;
  1173. int received = 0;
  1174. /* If EOP is set on the next entry, it's a new packet. Send it up. */
  1175. while (1) {
  1176. struct netdev_desc *desc = &(np->rx_ring[entry]);
  1177. u32 frame_status = le32_to_cpu(desc->status);
  1178. int pkt_len;
  1179. if (--boguscnt < 0) {
  1180. goto not_done;
  1181. }
  1182. if (!(frame_status & DescOwn))
  1183. break;
  1184. pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
  1185. if (netif_msg_rx_status(np))
  1186. printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
  1187. frame_status);
  1188. if (frame_status & 0x001f4000) {
  1189. /* There was a error. */
  1190. if (netif_msg_rx_err(np))
  1191. printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
  1192. frame_status);
  1193. dev->stats.rx_errors++;
  1194. if (frame_status & 0x00100000)
  1195. dev->stats.rx_length_errors++;
  1196. if (frame_status & 0x00010000)
  1197. dev->stats.rx_fifo_errors++;
  1198. if (frame_status & 0x00060000)
  1199. dev->stats.rx_frame_errors++;
  1200. if (frame_status & 0x00080000)
  1201. dev->stats.rx_crc_errors++;
  1202. if (frame_status & 0x00100000) {
  1203. printk(KERN_WARNING "%s: Oversized Ethernet frame,"
  1204. " status %8.8x.\n",
  1205. dev->name, frame_status);
  1206. }
  1207. } else {
  1208. struct sk_buff *skb;
  1209. #ifndef final_version
  1210. if (netif_msg_rx_status(np))
  1211. printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
  1212. ", bogus_cnt %d.\n",
  1213. pkt_len, boguscnt);
  1214. #endif
  1215. /* Check if the packet is long enough to accept without copying
  1216. to a minimally-sized skbuff. */
  1217. if (pkt_len < rx_copybreak &&
  1218. (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
  1219. skb_reserve(skb, 2); /* 16 byte align the IP header */
  1220. dma_sync_single_for_cpu(&np->pci_dev->dev,
  1221. le32_to_cpu(desc->frag[0].addr),
  1222. np->rx_buf_sz, DMA_FROM_DEVICE);
  1223. skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
  1224. dma_sync_single_for_device(&np->pci_dev->dev,
  1225. le32_to_cpu(desc->frag[0].addr),
  1226. np->rx_buf_sz, DMA_FROM_DEVICE);
  1227. skb_put(skb, pkt_len);
  1228. } else {
  1229. dma_unmap_single(&np->pci_dev->dev,
  1230. le32_to_cpu(desc->frag[0].addr),
  1231. np->rx_buf_sz, DMA_FROM_DEVICE);
  1232. skb_put(skb = np->rx_skbuff[entry], pkt_len);
  1233. np->rx_skbuff[entry] = NULL;
  1234. }
  1235. skb->protocol = eth_type_trans(skb, dev);
  1236. /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
  1237. netif_rx(skb);
  1238. }
  1239. entry = (entry + 1) % RX_RING_SIZE;
  1240. received++;
  1241. }
  1242. np->cur_rx = entry;
  1243. refill_rx (dev);
  1244. np->budget -= received;
  1245. iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
  1246. return;
  1247. not_done:
  1248. np->cur_rx = entry;
  1249. refill_rx (dev);
  1250. if (!received)
  1251. received = 1;
  1252. np->budget -= received;
  1253. if (np->budget <= 0)
  1254. np->budget = RX_BUDGET;
  1255. tasklet_schedule(&np->rx_tasklet);
  1256. }
  1257. static void refill_rx (struct net_device *dev)
  1258. {
  1259. struct netdev_private *np = netdev_priv(dev);
  1260. int entry;
  1261. int cnt = 0;
  1262. /* Refill the Rx ring buffers. */
  1263. for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
  1264. np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
  1265. struct sk_buff *skb;
  1266. entry = np->dirty_rx % RX_RING_SIZE;
  1267. if (np->rx_skbuff[entry] == NULL) {
  1268. skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
  1269. np->rx_skbuff[entry] = skb;
  1270. if (skb == NULL)
  1271. break; /* Better luck next round. */
  1272. skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
  1273. np->rx_ring[entry].frag[0].addr = cpu_to_le32(
  1274. dma_map_single(&np->pci_dev->dev, skb->data,
  1275. np->rx_buf_sz, DMA_FROM_DEVICE));
  1276. if (dma_mapping_error(&np->pci_dev->dev,
  1277. np->rx_ring[entry].frag[0].addr)) {
  1278. dev_kfree_skb_irq(skb);
  1279. np->rx_skbuff[entry] = NULL;
  1280. break;
  1281. }
  1282. }
  1283. /* Perhaps we need not reset this field. */
  1284. np->rx_ring[entry].frag[0].length =
  1285. cpu_to_le32(np->rx_buf_sz | LastFrag);
  1286. np->rx_ring[entry].status = 0;
  1287. cnt++;
  1288. }
  1289. }
  1290. static void netdev_error(struct net_device *dev, int intr_status)
  1291. {
  1292. struct netdev_private *np = netdev_priv(dev);
  1293. void __iomem *ioaddr = np->base;
  1294. u16 mii_ctl, mii_advertise, mii_lpa;
  1295. int speed;
  1296. if (intr_status & LinkChange) {
  1297. if (mdio_wait_link(dev, 10) == 0) {
  1298. printk(KERN_INFO "%s: Link up\n", dev->name);
  1299. if (np->an_enable) {
  1300. mii_advertise = mdio_read(dev, np->phys[0],
  1301. MII_ADVERTISE);
  1302. mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
  1303. mii_advertise &= mii_lpa;
  1304. printk(KERN_INFO "%s: Link changed: ",
  1305. dev->name);
  1306. if (mii_advertise & ADVERTISE_100FULL) {
  1307. np->speed = 100;
  1308. printk("100Mbps, full duplex\n");
  1309. } else if (mii_advertise & ADVERTISE_100HALF) {
  1310. np->speed = 100;
  1311. printk("100Mbps, half duplex\n");
  1312. } else if (mii_advertise & ADVERTISE_10FULL) {
  1313. np->speed = 10;
  1314. printk("10Mbps, full duplex\n");
  1315. } else if (mii_advertise & ADVERTISE_10HALF) {
  1316. np->speed = 10;
  1317. printk("10Mbps, half duplex\n");
  1318. } else
  1319. printk("\n");
  1320. } else {
  1321. mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
  1322. speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
  1323. np->speed = speed;
  1324. printk(KERN_INFO "%s: Link changed: %dMbps ,",
  1325. dev->name, speed);
  1326. printk("%s duplex.\n",
  1327. (mii_ctl & BMCR_FULLDPLX) ?
  1328. "full" : "half");
  1329. }
  1330. check_duplex(dev);
  1331. if (np->flowctrl && np->mii_if.full_duplex) {
  1332. iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
  1333. ioaddr + MulticastFilter1+2);
  1334. iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
  1335. ioaddr + MACCtrl0);
  1336. }
  1337. netif_carrier_on(dev);
  1338. } else {
  1339. printk(KERN_INFO "%s: Link down\n", dev->name);
  1340. netif_carrier_off(dev);
  1341. }
  1342. }
  1343. if (intr_status & StatsMax) {
  1344. get_stats(dev);
  1345. }
  1346. if (intr_status & IntrPCIErr) {
  1347. printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
  1348. dev->name, intr_status);
  1349. /* We must do a global reset of DMA to continue. */
  1350. }
  1351. }
  1352. static struct net_device_stats *get_stats(struct net_device *dev)
  1353. {
  1354. struct netdev_private *np = netdev_priv(dev);
  1355. void __iomem *ioaddr = np->base;
  1356. unsigned long flags;
  1357. u8 late_coll, single_coll, mult_coll;
  1358. spin_lock_irqsave(&np->statlock, flags);
  1359. /* The chip only need report frame silently dropped. */
  1360. dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
  1361. dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
  1362. dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
  1363. dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
  1364. mult_coll = ioread8(ioaddr + StatsMultiColl);
  1365. np->xstats.tx_multiple_collisions += mult_coll;
  1366. single_coll = ioread8(ioaddr + StatsOneColl);
  1367. np->xstats.tx_single_collisions += single_coll;
  1368. late_coll = ioread8(ioaddr + StatsLateColl);
  1369. np->xstats.tx_late_collisions += late_coll;
  1370. dev->stats.collisions += mult_coll
  1371. + single_coll
  1372. + late_coll;
  1373. np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
  1374. np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
  1375. np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
  1376. np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
  1377. np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
  1378. np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
  1379. np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
  1380. dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
  1381. dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
  1382. dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
  1383. dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
  1384. spin_unlock_irqrestore(&np->statlock, flags);
  1385. return &dev->stats;
  1386. }
  1387. static void set_rx_mode(struct net_device *dev)
  1388. {
  1389. struct netdev_private *np = netdev_priv(dev);
  1390. void __iomem *ioaddr = np->base;
  1391. u16 mc_filter[4]; /* Multicast hash filter */
  1392. u32 rx_mode;
  1393. int i;
  1394. if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
  1395. memset(mc_filter, 0xff, sizeof(mc_filter));
  1396. rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
  1397. } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
  1398. (dev->flags & IFF_ALLMULTI)) {
  1399. /* Too many to match, or accept all multicasts. */
  1400. memset(mc_filter, 0xff, sizeof(mc_filter));
  1401. rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
  1402. } else if (!netdev_mc_empty(dev)) {
  1403. struct netdev_hw_addr *ha;
  1404. int bit;
  1405. int index;
  1406. int crc;
  1407. memset (mc_filter, 0, sizeof (mc_filter));
  1408. netdev_for_each_mc_addr(ha, dev) {
  1409. crc = ether_crc_le(ETH_ALEN, ha->addr);
  1410. for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
  1411. if (crc & 0x80000000) index |= 1 << bit;
  1412. mc_filter[index/16] |= (1 << (index % 16));
  1413. }
  1414. rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
  1415. } else {
  1416. iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
  1417. return;
  1418. }
  1419. if (np->mii_if.full_duplex && np->flowctrl)
  1420. mc_filter[3] |= 0x0200;
  1421. for (i = 0; i < 4; i++)
  1422. iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
  1423. iowrite8(rx_mode, ioaddr + RxMode);
  1424. }
  1425. static int __set_mac_addr(struct net_device *dev)
  1426. {
  1427. struct netdev_private *np = netdev_priv(dev);
  1428. u16 addr16;
  1429. addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
  1430. iowrite16(addr16, np->base + StationAddr);
  1431. addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
  1432. iowrite16(addr16, np->base + StationAddr+2);
  1433. addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
  1434. iowrite16(addr16, np->base + StationAddr+4);
  1435. return 0;
  1436. }
  1437. /* Invoked with rtnl_lock held */
  1438. static int sundance_set_mac_addr(struct net_device *dev, void *data)
  1439. {
  1440. const struct sockaddr *addr = data;
  1441. if (!is_valid_ether_addr(addr->sa_data))
  1442. return -EADDRNOTAVAIL;
  1443. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  1444. __set_mac_addr(dev);
  1445. return 0;
  1446. }
  1447. static const struct {
  1448. const char name[ETH_GSTRING_LEN];
  1449. } sundance_stats[] = {
  1450. { "tx_multiple_collisions" },
  1451. { "tx_single_collisions" },
  1452. { "tx_late_collisions" },
  1453. { "tx_deferred" },
  1454. { "tx_deferred_excessive" },
  1455. { "tx_aborted" },
  1456. { "tx_bcasts" },
  1457. { "rx_bcasts" },
  1458. { "tx_mcasts" },
  1459. { "rx_mcasts" },
  1460. };
  1461. static int check_if_running(struct net_device *dev)
  1462. {
  1463. if (!netif_running(dev))
  1464. return -EINVAL;
  1465. return 0;
  1466. }
  1467. static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1468. {
  1469. struct netdev_private *np = netdev_priv(dev);
  1470. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1471. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1472. strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
  1473. }
  1474. static int get_link_ksettings(struct net_device *dev,
  1475. struct ethtool_link_ksettings *cmd)
  1476. {
  1477. struct netdev_private *np = netdev_priv(dev);
  1478. spin_lock_irq(&np->lock);
  1479. mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
  1480. spin_unlock_irq(&np->lock);
  1481. return 0;
  1482. }
  1483. static int set_link_ksettings(struct net_device *dev,
  1484. const struct ethtool_link_ksettings *cmd)
  1485. {
  1486. struct netdev_private *np = netdev_priv(dev);
  1487. int res;
  1488. spin_lock_irq(&np->lock);
  1489. res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
  1490. spin_unlock_irq(&np->lock);
  1491. return res;
  1492. }
  1493. static int nway_reset(struct net_device *dev)
  1494. {
  1495. struct netdev_private *np = netdev_priv(dev);
  1496. return mii_nway_restart(&np->mii_if);
  1497. }
  1498. static u32 get_link(struct net_device *dev)
  1499. {
  1500. struct netdev_private *np = netdev_priv(dev);
  1501. return mii_link_ok(&np->mii_if);
  1502. }
  1503. static u32 get_msglevel(struct net_device *dev)
  1504. {
  1505. struct netdev_private *np = netdev_priv(dev);
  1506. return np->msg_enable;
  1507. }
  1508. static void set_msglevel(struct net_device *dev, u32 val)
  1509. {
  1510. struct netdev_private *np = netdev_priv(dev);
  1511. np->msg_enable = val;
  1512. }
  1513. static void get_strings(struct net_device *dev, u32 stringset,
  1514. u8 *data)
  1515. {
  1516. if (stringset == ETH_SS_STATS)
  1517. memcpy(data, sundance_stats, sizeof(sundance_stats));
  1518. }
  1519. static int get_sset_count(struct net_device *dev, int sset)
  1520. {
  1521. switch (sset) {
  1522. case ETH_SS_STATS:
  1523. return ARRAY_SIZE(sundance_stats);
  1524. default:
  1525. return -EOPNOTSUPP;
  1526. }
  1527. }
  1528. static void get_ethtool_stats(struct net_device *dev,
  1529. struct ethtool_stats *stats, u64 *data)
  1530. {
  1531. struct netdev_private *np = netdev_priv(dev);
  1532. int i = 0;
  1533. get_stats(dev);
  1534. data[i++] = np->xstats.tx_multiple_collisions;
  1535. data[i++] = np->xstats.tx_single_collisions;
  1536. data[i++] = np->xstats.tx_late_collisions;
  1537. data[i++] = np->xstats.tx_deferred;
  1538. data[i++] = np->xstats.tx_deferred_excessive;
  1539. data[i++] = np->xstats.tx_aborted;
  1540. data[i++] = np->xstats.tx_bcasts;
  1541. data[i++] = np->xstats.rx_bcasts;
  1542. data[i++] = np->xstats.tx_mcasts;
  1543. data[i++] = np->xstats.rx_mcasts;
  1544. }
  1545. #ifdef CONFIG_PM
  1546. static void sundance_get_wol(struct net_device *dev,
  1547. struct ethtool_wolinfo *wol)
  1548. {
  1549. struct netdev_private *np = netdev_priv(dev);
  1550. void __iomem *ioaddr = np->base;
  1551. u8 wol_bits;
  1552. wol->wolopts = 0;
  1553. wol->supported = (WAKE_PHY | WAKE_MAGIC);
  1554. if (!np->wol_enabled)
  1555. return;
  1556. wol_bits = ioread8(ioaddr + WakeEvent);
  1557. if (wol_bits & MagicPktEnable)
  1558. wol->wolopts |= WAKE_MAGIC;
  1559. if (wol_bits & LinkEventEnable)
  1560. wol->wolopts |= WAKE_PHY;
  1561. }
  1562. static int sundance_set_wol(struct net_device *dev,
  1563. struct ethtool_wolinfo *wol)
  1564. {
  1565. struct netdev_private *np = netdev_priv(dev);
  1566. void __iomem *ioaddr = np->base;
  1567. u8 wol_bits;
  1568. if (!device_can_wakeup(&np->pci_dev->dev))
  1569. return -EOPNOTSUPP;
  1570. np->wol_enabled = !!(wol->wolopts);
  1571. wol_bits = ioread8(ioaddr + WakeEvent);
  1572. wol_bits &= ~(WakePktEnable | MagicPktEnable |
  1573. LinkEventEnable | WolEnable);
  1574. if (np->wol_enabled) {
  1575. if (wol->wolopts & WAKE_MAGIC)
  1576. wol_bits |= (MagicPktEnable | WolEnable);
  1577. if (wol->wolopts & WAKE_PHY)
  1578. wol_bits |= (LinkEventEnable | WolEnable);
  1579. }
  1580. iowrite8(wol_bits, ioaddr + WakeEvent);
  1581. device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
  1582. return 0;
  1583. }
  1584. #else
  1585. #define sundance_get_wol NULL
  1586. #define sundance_set_wol NULL
  1587. #endif /* CONFIG_PM */
  1588. static const struct ethtool_ops ethtool_ops = {
  1589. .begin = check_if_running,
  1590. .get_drvinfo = get_drvinfo,
  1591. .nway_reset = nway_reset,
  1592. .get_link = get_link,
  1593. .get_wol = sundance_get_wol,
  1594. .set_wol = sundance_set_wol,
  1595. .get_msglevel = get_msglevel,
  1596. .set_msglevel = set_msglevel,
  1597. .get_strings = get_strings,
  1598. .get_sset_count = get_sset_count,
  1599. .get_ethtool_stats = get_ethtool_stats,
  1600. .get_link_ksettings = get_link_ksettings,
  1601. .set_link_ksettings = set_link_ksettings,
  1602. };
  1603. static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1604. {
  1605. struct netdev_private *np = netdev_priv(dev);
  1606. int rc;
  1607. if (!netif_running(dev))
  1608. return -EINVAL;
  1609. spin_lock_irq(&np->lock);
  1610. rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
  1611. spin_unlock_irq(&np->lock);
  1612. return rc;
  1613. }
  1614. static int netdev_close(struct net_device *dev)
  1615. {
  1616. struct netdev_private *np = netdev_priv(dev);
  1617. void __iomem *ioaddr = np->base;
  1618. struct sk_buff *skb;
  1619. int i;
  1620. /* Wait and kill tasklet */
  1621. tasklet_kill(&np->rx_tasklet);
  1622. tasklet_kill(&np->tx_tasklet);
  1623. np->cur_tx = 0;
  1624. np->dirty_tx = 0;
  1625. np->cur_task = 0;
  1626. np->last_tx = NULL;
  1627. netif_stop_queue(dev);
  1628. if (netif_msg_ifdown(np)) {
  1629. printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
  1630. "Rx %4.4x Int %2.2x.\n",
  1631. dev->name, ioread8(ioaddr + TxStatus),
  1632. ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
  1633. printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
  1634. dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
  1635. }
  1636. /* Disable interrupts by clearing the interrupt mask. */
  1637. iowrite16(0x0000, ioaddr + IntrEnable);
  1638. /* Disable Rx and Tx DMA for safely release resource */
  1639. iowrite32(0x500, ioaddr + DMACtrl);
  1640. /* Stop the chip's Tx and Rx processes. */
  1641. iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
  1642. for (i = 2000; i > 0; i--) {
  1643. if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
  1644. break;
  1645. mdelay(1);
  1646. }
  1647. iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
  1648. ioaddr + ASIC_HI_WORD(ASICCtrl));
  1649. for (i = 2000; i > 0; i--) {
  1650. if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
  1651. break;
  1652. mdelay(1);
  1653. }
  1654. #ifdef __i386__
  1655. if (netif_msg_hw(np)) {
  1656. printk(KERN_DEBUG " Tx ring at %8.8x:\n",
  1657. (int)(np->tx_ring_dma));
  1658. for (i = 0; i < TX_RING_SIZE; i++)
  1659. printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
  1660. i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
  1661. np->tx_ring[i].frag[0].length);
  1662. printk(KERN_DEBUG " Rx ring %8.8x:\n",
  1663. (int)(np->rx_ring_dma));
  1664. for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
  1665. printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
  1666. i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
  1667. np->rx_ring[i].frag[0].length);
  1668. }
  1669. }
  1670. #endif /* __i386__ debugging only */
  1671. free_irq(np->pci_dev->irq, dev);
  1672. del_timer_sync(&np->timer);
  1673. /* Free all the skbuffs in the Rx queue. */
  1674. for (i = 0; i < RX_RING_SIZE; i++) {
  1675. np->rx_ring[i].status = 0;
  1676. skb = np->rx_skbuff[i];
  1677. if (skb) {
  1678. dma_unmap_single(&np->pci_dev->dev,
  1679. le32_to_cpu(np->rx_ring[i].frag[0].addr),
  1680. np->rx_buf_sz, DMA_FROM_DEVICE);
  1681. dev_kfree_skb(skb);
  1682. np->rx_skbuff[i] = NULL;
  1683. }
  1684. np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
  1685. }
  1686. for (i = 0; i < TX_RING_SIZE; i++) {
  1687. np->tx_ring[i].next_desc = 0;
  1688. skb = np->tx_skbuff[i];
  1689. if (skb) {
  1690. dma_unmap_single(&np->pci_dev->dev,
  1691. le32_to_cpu(np->tx_ring[i].frag[0].addr),
  1692. skb->len, DMA_TO_DEVICE);
  1693. dev_kfree_skb(skb);
  1694. np->tx_skbuff[i] = NULL;
  1695. }
  1696. }
  1697. return 0;
  1698. }
  1699. static void sundance_remove1(struct pci_dev *pdev)
  1700. {
  1701. struct net_device *dev = pci_get_drvdata(pdev);
  1702. if (dev) {
  1703. struct netdev_private *np = netdev_priv(dev);
  1704. unregister_netdev(dev);
  1705. dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
  1706. np->rx_ring, np->rx_ring_dma);
  1707. dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
  1708. np->tx_ring, np->tx_ring_dma);
  1709. pci_iounmap(pdev, np->base);
  1710. pci_release_regions(pdev);
  1711. free_netdev(dev);
  1712. }
  1713. }
  1714. #ifdef CONFIG_PM
  1715. static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
  1716. {
  1717. struct net_device *dev = pci_get_drvdata(pci_dev);
  1718. struct netdev_private *np = netdev_priv(dev);
  1719. void __iomem *ioaddr = np->base;
  1720. if (!netif_running(dev))
  1721. return 0;
  1722. netdev_close(dev);
  1723. netif_device_detach(dev);
  1724. pci_save_state(pci_dev);
  1725. if (np->wol_enabled) {
  1726. iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
  1727. iowrite16(RxEnable, ioaddr + MACCtrl1);
  1728. }
  1729. pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
  1730. np->wol_enabled);
  1731. pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
  1732. return 0;
  1733. }
  1734. static int sundance_resume(struct pci_dev *pci_dev)
  1735. {
  1736. struct net_device *dev = pci_get_drvdata(pci_dev);
  1737. int err = 0;
  1738. if (!netif_running(dev))
  1739. return 0;
  1740. pci_set_power_state(pci_dev, PCI_D0);
  1741. pci_restore_state(pci_dev);
  1742. pci_enable_wake(pci_dev, PCI_D0, 0);
  1743. err = netdev_open(dev);
  1744. if (err) {
  1745. printk(KERN_ERR "%s: Can't resume interface!\n",
  1746. dev->name);
  1747. goto out;
  1748. }
  1749. netif_device_attach(dev);
  1750. out:
  1751. return err;
  1752. }
  1753. #endif /* CONFIG_PM */
  1754. static struct pci_driver sundance_driver = {
  1755. .name = DRV_NAME,
  1756. .id_table = sundance_pci_tbl,
  1757. .probe = sundance_probe1,
  1758. .remove = sundance_remove1,
  1759. #ifdef CONFIG_PM
  1760. .suspend = sundance_suspend,
  1761. .resume = sundance_resume,
  1762. #endif /* CONFIG_PM */
  1763. };
  1764. static int __init sundance_init(void)
  1765. {
  1766. /* when a module, this is printed whether or not devices are found in probe */
  1767. #ifdef MODULE
  1768. printk(version);
  1769. #endif
  1770. return pci_register_driver(&sundance_driver);
  1771. }
  1772. static void __exit sundance_exit(void)
  1773. {
  1774. pci_unregister_driver(&sundance_driver);
  1775. }
  1776. module_init(sundance_init);
  1777. module_exit(sundance_exit);