ks8842.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * ks8842.c timberdale KS8842 ethernet driver
  3. * Copyright (c) 2009 Intel Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /* Supports:
  19. * The Micrel KS8842 behind the timberdale FPGA
  20. * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/interrupt.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/ks8842.h>
  31. #include <linux/dmaengine.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/scatterlist.h>
  34. #define DRV_NAME "ks8842"
  35. /* Timberdale specific Registers */
  36. #define REG_TIMB_RST 0x1c
  37. #define REG_TIMB_FIFO 0x20
  38. #define REG_TIMB_ISR 0x24
  39. #define REG_TIMB_IER 0x28
  40. #define REG_TIMB_IAR 0x2C
  41. #define REQ_TIMB_DMA_RESUME 0x30
  42. /* KS8842 registers */
  43. #define REG_SELECT_BANK 0x0e
  44. /* bank 0 registers */
  45. #define REG_QRFCR 0x04
  46. /* bank 2 registers */
  47. #define REG_MARL 0x00
  48. #define REG_MARM 0x02
  49. #define REG_MARH 0x04
  50. /* bank 3 registers */
  51. #define REG_GRR 0x06
  52. /* bank 16 registers */
  53. #define REG_TXCR 0x00
  54. #define REG_TXSR 0x02
  55. #define REG_RXCR 0x04
  56. #define REG_TXMIR 0x08
  57. #define REG_RXMIR 0x0A
  58. /* bank 17 registers */
  59. #define REG_TXQCR 0x00
  60. #define REG_RXQCR 0x02
  61. #define REG_TXFDPR 0x04
  62. #define REG_RXFDPR 0x06
  63. #define REG_QMU_DATA_LO 0x08
  64. #define REG_QMU_DATA_HI 0x0A
  65. /* bank 18 registers */
  66. #define REG_IER 0x00
  67. #define IRQ_LINK_CHANGE 0x8000
  68. #define IRQ_TX 0x4000
  69. #define IRQ_RX 0x2000
  70. #define IRQ_RX_OVERRUN 0x0800
  71. #define IRQ_TX_STOPPED 0x0200
  72. #define IRQ_RX_STOPPED 0x0100
  73. #define IRQ_RX_ERROR 0x0080
  74. #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
  75. IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
  76. /* When running via timberdale in DMA mode, the RX interrupt should be
  77. enabled in the KS8842, but not in the FPGA IP, since the IP handles
  78. RX DMA internally.
  79. TX interrupts are not needed it is handled by the FPGA the driver is
  80. notified via DMA callbacks.
  81. */
  82. #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
  83. IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
  84. #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
  85. #define REG_ISR 0x02
  86. #define REG_RXSR 0x04
  87. #define RXSR_VALID 0x8000
  88. #define RXSR_BROADCAST 0x80
  89. #define RXSR_MULTICAST 0x40
  90. #define RXSR_UNICAST 0x20
  91. #define RXSR_FRAMETYPE 0x08
  92. #define RXSR_TOO_LONG 0x04
  93. #define RXSR_RUNT 0x02
  94. #define RXSR_CRC_ERROR 0x01
  95. #define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
  96. /* bank 32 registers */
  97. #define REG_SW_ID_AND_ENABLE 0x00
  98. #define REG_SGCR1 0x02
  99. #define REG_SGCR2 0x04
  100. #define REG_SGCR3 0x06
  101. /* bank 39 registers */
  102. #define REG_MACAR1 0x00
  103. #define REG_MACAR2 0x02
  104. #define REG_MACAR3 0x04
  105. /* bank 45 registers */
  106. #define REG_P1MBCR 0x00
  107. #define REG_P1MBSR 0x02
  108. /* bank 46 registers */
  109. #define REG_P2MBCR 0x00
  110. #define REG_P2MBSR 0x02
  111. /* bank 48 registers */
  112. #define REG_P1CR2 0x02
  113. /* bank 49 registers */
  114. #define REG_P1CR4 0x02
  115. #define REG_P1SR 0x04
  116. /* flags passed by platform_device for configuration */
  117. #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
  118. #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
  119. #define DMA_BUFFER_SIZE 2048
  120. struct ks8842_tx_dma_ctl {
  121. struct dma_chan *chan;
  122. struct dma_async_tx_descriptor *adesc;
  123. void *buf;
  124. struct scatterlist sg;
  125. int channel;
  126. };
  127. struct ks8842_rx_dma_ctl {
  128. struct dma_chan *chan;
  129. struct dma_async_tx_descriptor *adesc;
  130. struct sk_buff *skb;
  131. struct scatterlist sg;
  132. struct tasklet_struct tasklet;
  133. int channel;
  134. };
  135. #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
  136. ((adapter)->dma_rx.channel != -1))
  137. struct ks8842_adapter {
  138. void __iomem *hw_addr;
  139. int irq;
  140. unsigned long conf_flags; /* copy of platform_device config */
  141. struct tasklet_struct tasklet;
  142. spinlock_t lock; /* spinlock to be interrupt safe */
  143. struct work_struct timeout_work;
  144. struct net_device *netdev;
  145. struct device *dev;
  146. struct ks8842_tx_dma_ctl dma_tx;
  147. struct ks8842_rx_dma_ctl dma_rx;
  148. };
  149. static void ks8842_dma_rx_cb(void *data);
  150. static void ks8842_dma_tx_cb(void *data);
  151. static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
  152. {
  153. iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
  154. }
  155. static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
  156. {
  157. iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
  158. }
  159. static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
  160. u8 value, int offset)
  161. {
  162. ks8842_select_bank(adapter, bank);
  163. iowrite8(value, adapter->hw_addr + offset);
  164. }
  165. static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
  166. u16 value, int offset)
  167. {
  168. ks8842_select_bank(adapter, bank);
  169. iowrite16(value, adapter->hw_addr + offset);
  170. }
  171. static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
  172. u16 bits, int offset)
  173. {
  174. u16 reg;
  175. ks8842_select_bank(adapter, bank);
  176. reg = ioread16(adapter->hw_addr + offset);
  177. reg |= bits;
  178. iowrite16(reg, adapter->hw_addr + offset);
  179. }
  180. static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
  181. u16 bits, int offset)
  182. {
  183. u16 reg;
  184. ks8842_select_bank(adapter, bank);
  185. reg = ioread16(adapter->hw_addr + offset);
  186. reg &= ~bits;
  187. iowrite16(reg, adapter->hw_addr + offset);
  188. }
  189. static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
  190. u32 value, int offset)
  191. {
  192. ks8842_select_bank(adapter, bank);
  193. iowrite32(value, adapter->hw_addr + offset);
  194. }
  195. static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
  196. int offset)
  197. {
  198. ks8842_select_bank(adapter, bank);
  199. return ioread8(adapter->hw_addr + offset);
  200. }
  201. static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
  202. int offset)
  203. {
  204. ks8842_select_bank(adapter, bank);
  205. return ioread16(adapter->hw_addr + offset);
  206. }
  207. static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
  208. int offset)
  209. {
  210. ks8842_select_bank(adapter, bank);
  211. return ioread32(adapter->hw_addr + offset);
  212. }
  213. static void ks8842_reset(struct ks8842_adapter *adapter)
  214. {
  215. if (adapter->conf_flags & MICREL_KS884X) {
  216. ks8842_write16(adapter, 3, 1, REG_GRR);
  217. msleep(10);
  218. iowrite16(0, adapter->hw_addr + REG_GRR);
  219. } else {
  220. /* The KS8842 goes haywire when doing softare reset
  221. * a work around in the timberdale IP is implemented to
  222. * do a hardware reset instead
  223. ks8842_write16(adapter, 3, 1, REG_GRR);
  224. msleep(10);
  225. iowrite16(0, adapter->hw_addr + REG_GRR);
  226. */
  227. iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
  228. msleep(20);
  229. }
  230. }
  231. static void ks8842_update_link_status(struct net_device *netdev,
  232. struct ks8842_adapter *adapter)
  233. {
  234. /* check the status of the link */
  235. if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
  236. netif_carrier_on(netdev);
  237. netif_wake_queue(netdev);
  238. } else {
  239. netif_stop_queue(netdev);
  240. netif_carrier_off(netdev);
  241. }
  242. }
  243. static void ks8842_enable_tx(struct ks8842_adapter *adapter)
  244. {
  245. ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
  246. }
  247. static void ks8842_disable_tx(struct ks8842_adapter *adapter)
  248. {
  249. ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
  250. }
  251. static void ks8842_enable_rx(struct ks8842_adapter *adapter)
  252. {
  253. ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
  254. }
  255. static void ks8842_disable_rx(struct ks8842_adapter *adapter)
  256. {
  257. ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
  258. }
  259. static void ks8842_reset_hw(struct ks8842_adapter *adapter)
  260. {
  261. /* reset the HW */
  262. ks8842_reset(adapter);
  263. /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
  264. ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
  265. /* enable the receiver, uni + multi + broadcast + flow ctrl
  266. + crc strip */
  267. ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
  268. REG_RXCR);
  269. /* TX frame pointer autoincrement */
  270. ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
  271. /* RX frame pointer autoincrement */
  272. ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
  273. /* RX 2 kb high watermark */
  274. ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
  275. /* aggressive back off in half duplex */
  276. ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
  277. /* enable no excessive collison drop */
  278. ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
  279. /* Enable port 1 force flow control / back pressure / transmit / recv */
  280. ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
  281. /* restart port auto-negotiation */
  282. ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
  283. /* Enable the transmitter */
  284. ks8842_enable_tx(adapter);
  285. /* Enable the receiver */
  286. ks8842_enable_rx(adapter);
  287. /* clear all interrupts */
  288. ks8842_write16(adapter, 18, 0xffff, REG_ISR);
  289. /* enable interrupts */
  290. if (KS8842_USE_DMA(adapter)) {
  291. /* When running in DMA Mode the RX interrupt is not enabled in
  292. timberdale because RX data is received by DMA callbacks
  293. it must still be enabled in the KS8842 because it indicates
  294. to timberdale when there is RX data for it's DMA FIFOs */
  295. iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
  296. ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
  297. } else {
  298. if (!(adapter->conf_flags & MICREL_KS884X))
  299. iowrite16(ENABLED_IRQS,
  300. adapter->hw_addr + REG_TIMB_IER);
  301. ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
  302. }
  303. /* enable the switch */
  304. ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
  305. }
  306. static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
  307. {
  308. int i;
  309. u16 mac;
  310. for (i = 0; i < ETH_ALEN; i++)
  311. dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
  312. if (adapter->conf_flags & MICREL_KS884X) {
  313. /*
  314. the sequence of saving mac addr between MAC and Switch is
  315. different.
  316. */
  317. mac = ks8842_read16(adapter, 2, REG_MARL);
  318. ks8842_write16(adapter, 39, mac, REG_MACAR3);
  319. mac = ks8842_read16(adapter, 2, REG_MARM);
  320. ks8842_write16(adapter, 39, mac, REG_MACAR2);
  321. mac = ks8842_read16(adapter, 2, REG_MARH);
  322. ks8842_write16(adapter, 39, mac, REG_MACAR1);
  323. } else {
  324. /* make sure the switch port uses the same MAC as the QMU */
  325. mac = ks8842_read16(adapter, 2, REG_MARL);
  326. ks8842_write16(adapter, 39, mac, REG_MACAR1);
  327. mac = ks8842_read16(adapter, 2, REG_MARM);
  328. ks8842_write16(adapter, 39, mac, REG_MACAR2);
  329. mac = ks8842_read16(adapter, 2, REG_MARH);
  330. ks8842_write16(adapter, 39, mac, REG_MACAR3);
  331. }
  332. }
  333. static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
  334. {
  335. unsigned long flags;
  336. unsigned i;
  337. spin_lock_irqsave(&adapter->lock, flags);
  338. for (i = 0; i < ETH_ALEN; i++) {
  339. ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
  340. if (!(adapter->conf_flags & MICREL_KS884X))
  341. ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
  342. REG_MACAR1 + i);
  343. }
  344. if (adapter->conf_flags & MICREL_KS884X) {
  345. /*
  346. the sequence of saving mac addr between MAC and Switch is
  347. different.
  348. */
  349. u16 mac;
  350. mac = ks8842_read16(adapter, 2, REG_MARL);
  351. ks8842_write16(adapter, 39, mac, REG_MACAR3);
  352. mac = ks8842_read16(adapter, 2, REG_MARM);
  353. ks8842_write16(adapter, 39, mac, REG_MACAR2);
  354. mac = ks8842_read16(adapter, 2, REG_MARH);
  355. ks8842_write16(adapter, 39, mac, REG_MACAR1);
  356. }
  357. spin_unlock_irqrestore(&adapter->lock, flags);
  358. }
  359. static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
  360. {
  361. return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
  362. }
  363. static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
  364. {
  365. struct ks8842_adapter *adapter = netdev_priv(netdev);
  366. struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
  367. u8 *buf = ctl->buf;
  368. if (ctl->adesc) {
  369. netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
  370. /* transfer ongoing */
  371. return NETDEV_TX_BUSY;
  372. }
  373. sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
  374. /* copy data to the TX buffer */
  375. /* the control word, enable IRQ, port 1 and the length */
  376. *buf++ = 0x00;
  377. *buf++ = 0x01; /* Port 1 */
  378. *buf++ = skb->len & 0xff;
  379. *buf++ = (skb->len >> 8) & 0xff;
  380. skb_copy_from_linear_data(skb, buf, skb->len);
  381. dma_sync_single_range_for_device(adapter->dev,
  382. sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
  383. DMA_TO_DEVICE);
  384. /* make sure the length is a multiple of 4 */
  385. if (sg_dma_len(&ctl->sg) % 4)
  386. sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
  387. ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
  388. &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
  389. if (!ctl->adesc)
  390. return NETDEV_TX_BUSY;
  391. ctl->adesc->callback_param = netdev;
  392. ctl->adesc->callback = ks8842_dma_tx_cb;
  393. ctl->adesc->tx_submit(ctl->adesc);
  394. netdev->stats.tx_bytes += skb->len;
  395. dev_kfree_skb(skb);
  396. return NETDEV_TX_OK;
  397. }
  398. static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
  399. {
  400. struct ks8842_adapter *adapter = netdev_priv(netdev);
  401. int len = skb->len;
  402. netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
  403. __func__, skb->len, skb->head, skb->data,
  404. skb_tail_pointer(skb), skb_end_pointer(skb));
  405. /* check FIFO buffer space, we need space for CRC and command bits */
  406. if (ks8842_tx_fifo_space(adapter) < len + 8)
  407. return NETDEV_TX_BUSY;
  408. if (adapter->conf_flags & KS884X_16BIT) {
  409. u16 *ptr16 = (u16 *)skb->data;
  410. ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
  411. ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
  412. netdev->stats.tx_bytes += len;
  413. /* copy buffer */
  414. while (len > 0) {
  415. iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
  416. iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
  417. len -= sizeof(u32);
  418. }
  419. } else {
  420. u32 *ptr = (u32 *)skb->data;
  421. u32 ctrl;
  422. /* the control word, enable IRQ, port 1 and the length */
  423. ctrl = 0x8000 | 0x100 | (len << 16);
  424. ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
  425. netdev->stats.tx_bytes += len;
  426. /* copy buffer */
  427. while (len > 0) {
  428. iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
  429. len -= sizeof(u32);
  430. ptr++;
  431. }
  432. }
  433. /* enqueue packet */
  434. ks8842_write16(adapter, 17, 1, REG_TXQCR);
  435. dev_kfree_skb(skb);
  436. return NETDEV_TX_OK;
  437. }
  438. static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
  439. {
  440. netdev_dbg(netdev, "RX error, status: %x\n", status);
  441. netdev->stats.rx_errors++;
  442. if (status & RXSR_TOO_LONG)
  443. netdev->stats.rx_length_errors++;
  444. if (status & RXSR_CRC_ERROR)
  445. netdev->stats.rx_crc_errors++;
  446. if (status & RXSR_RUNT)
  447. netdev->stats.rx_frame_errors++;
  448. }
  449. static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
  450. int len)
  451. {
  452. netdev_dbg(netdev, "RX packet, len: %d\n", len);
  453. netdev->stats.rx_packets++;
  454. netdev->stats.rx_bytes += len;
  455. if (status & RXSR_MULTICAST)
  456. netdev->stats.multicast++;
  457. }
  458. static int __ks8842_start_new_rx_dma(struct net_device *netdev)
  459. {
  460. struct ks8842_adapter *adapter = netdev_priv(netdev);
  461. struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
  462. struct scatterlist *sg = &ctl->sg;
  463. int err;
  464. ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
  465. if (ctl->skb) {
  466. sg_init_table(sg, 1);
  467. sg_dma_address(sg) = dma_map_single(adapter->dev,
  468. ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  469. if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) {
  470. err = -ENOMEM;
  471. sg_dma_address(sg) = 0;
  472. goto out;
  473. }
  474. sg_dma_len(sg) = DMA_BUFFER_SIZE;
  475. ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
  476. sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
  477. if (!ctl->adesc) {
  478. err = -ENOMEM;
  479. goto out;
  480. }
  481. ctl->adesc->callback_param = netdev;
  482. ctl->adesc->callback = ks8842_dma_rx_cb;
  483. ctl->adesc->tx_submit(ctl->adesc);
  484. } else {
  485. err = -ENOMEM;
  486. sg_dma_address(sg) = 0;
  487. goto out;
  488. }
  489. return 0;
  490. out:
  491. if (sg_dma_address(sg))
  492. dma_unmap_single(adapter->dev, sg_dma_address(sg),
  493. DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  494. sg_dma_address(sg) = 0;
  495. if (ctl->skb)
  496. dev_kfree_skb(ctl->skb);
  497. ctl->skb = NULL;
  498. printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
  499. return err;
  500. }
  501. static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
  502. {
  503. struct net_device *netdev = (struct net_device *)arg;
  504. struct ks8842_adapter *adapter = netdev_priv(netdev);
  505. struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
  506. struct sk_buff *skb = ctl->skb;
  507. dma_addr_t addr = sg_dma_address(&ctl->sg);
  508. u32 status;
  509. ctl->adesc = NULL;
  510. /* kick next transfer going */
  511. __ks8842_start_new_rx_dma(netdev);
  512. /* now handle the data we got */
  513. dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  514. status = *((u32 *)skb->data);
  515. netdev_dbg(netdev, "%s - rx_data: status: %x\n",
  516. __func__, status & 0xffff);
  517. /* check the status */
  518. if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
  519. int len = (status >> 16) & 0x7ff;
  520. ks8842_update_rx_counters(netdev, status, len);
  521. /* reserve 4 bytes which is the status word */
  522. skb_reserve(skb, 4);
  523. skb_put(skb, len);
  524. skb->protocol = eth_type_trans(skb, netdev);
  525. netif_rx(skb);
  526. } else {
  527. ks8842_update_rx_err_counters(netdev, status);
  528. dev_kfree_skb(skb);
  529. }
  530. }
  531. static void ks8842_rx_frame(struct net_device *netdev,
  532. struct ks8842_adapter *adapter)
  533. {
  534. u32 status;
  535. int len;
  536. if (adapter->conf_flags & KS884X_16BIT) {
  537. status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
  538. len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
  539. netdev_dbg(netdev, "%s - rx_data: status: %x\n",
  540. __func__, status);
  541. } else {
  542. status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
  543. len = (status >> 16) & 0x7ff;
  544. status &= 0xffff;
  545. netdev_dbg(netdev, "%s - rx_data: status: %x\n",
  546. __func__, status);
  547. }
  548. /* check the status */
  549. if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
  550. struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
  551. if (skb) {
  552. ks8842_update_rx_counters(netdev, status, len);
  553. if (adapter->conf_flags & KS884X_16BIT) {
  554. u16 *data16 = skb_put(skb, len);
  555. ks8842_select_bank(adapter, 17);
  556. while (len > 0) {
  557. *data16++ = ioread16(adapter->hw_addr +
  558. REG_QMU_DATA_LO);
  559. *data16++ = ioread16(adapter->hw_addr +
  560. REG_QMU_DATA_HI);
  561. len -= sizeof(u32);
  562. }
  563. } else {
  564. u32 *data = skb_put(skb, len);
  565. ks8842_select_bank(adapter, 17);
  566. while (len > 0) {
  567. *data++ = ioread32(adapter->hw_addr +
  568. REG_QMU_DATA_LO);
  569. len -= sizeof(u32);
  570. }
  571. }
  572. skb->protocol = eth_type_trans(skb, netdev);
  573. netif_rx(skb);
  574. } else
  575. netdev->stats.rx_dropped++;
  576. } else
  577. ks8842_update_rx_err_counters(netdev, status);
  578. /* set high watermark to 3K */
  579. ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
  580. /* release the frame */
  581. ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
  582. /* set high watermark to 2K */
  583. ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
  584. }
  585. static void ks8842_handle_rx(struct net_device *netdev,
  586. struct ks8842_adapter *adapter)
  587. {
  588. u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
  589. netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
  590. while (rx_data) {
  591. ks8842_rx_frame(netdev, adapter);
  592. rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
  593. }
  594. }
  595. static void ks8842_handle_tx(struct net_device *netdev,
  596. struct ks8842_adapter *adapter)
  597. {
  598. u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
  599. netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
  600. netdev->stats.tx_packets++;
  601. if (netif_queue_stopped(netdev))
  602. netif_wake_queue(netdev);
  603. }
  604. static void ks8842_handle_rx_overrun(struct net_device *netdev,
  605. struct ks8842_adapter *adapter)
  606. {
  607. netdev_dbg(netdev, "%s: entry\n", __func__);
  608. netdev->stats.rx_errors++;
  609. netdev->stats.rx_fifo_errors++;
  610. }
  611. static void ks8842_tasklet(unsigned long arg)
  612. {
  613. struct net_device *netdev = (struct net_device *)arg;
  614. struct ks8842_adapter *adapter = netdev_priv(netdev);
  615. u16 isr;
  616. unsigned long flags;
  617. u16 entry_bank;
  618. /* read current bank to be able to set it back */
  619. spin_lock_irqsave(&adapter->lock, flags);
  620. entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
  621. spin_unlock_irqrestore(&adapter->lock, flags);
  622. isr = ks8842_read16(adapter, 18, REG_ISR);
  623. netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
  624. /* when running in DMA mode, do not ack RX interrupts, it is handled
  625. internally by timberdale, otherwise it's DMA FIFO:s would stop
  626. */
  627. if (KS8842_USE_DMA(adapter))
  628. isr &= ~IRQ_RX;
  629. /* Ack */
  630. ks8842_write16(adapter, 18, isr, REG_ISR);
  631. if (!(adapter->conf_flags & MICREL_KS884X))
  632. /* Ack in the timberdale IP as well */
  633. iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
  634. if (!netif_running(netdev))
  635. return;
  636. if (isr & IRQ_LINK_CHANGE)
  637. ks8842_update_link_status(netdev, adapter);
  638. /* should not get IRQ_RX when running DMA mode */
  639. if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
  640. ks8842_handle_rx(netdev, adapter);
  641. /* should only happen when in PIO mode */
  642. if (isr & IRQ_TX)
  643. ks8842_handle_tx(netdev, adapter);
  644. if (isr & IRQ_RX_OVERRUN)
  645. ks8842_handle_rx_overrun(netdev, adapter);
  646. if (isr & IRQ_TX_STOPPED) {
  647. ks8842_disable_tx(adapter);
  648. ks8842_enable_tx(adapter);
  649. }
  650. if (isr & IRQ_RX_STOPPED) {
  651. ks8842_disable_rx(adapter);
  652. ks8842_enable_rx(adapter);
  653. }
  654. /* re-enable interrupts, put back the bank selection register */
  655. spin_lock_irqsave(&adapter->lock, flags);
  656. if (KS8842_USE_DMA(adapter))
  657. ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
  658. else
  659. ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
  660. iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
  661. /* Make sure timberdale continues DMA operations, they are stopped while
  662. we are handling the ks8842 because we might change bank */
  663. if (KS8842_USE_DMA(adapter))
  664. ks8842_resume_dma(adapter);
  665. spin_unlock_irqrestore(&adapter->lock, flags);
  666. }
  667. static irqreturn_t ks8842_irq(int irq, void *devid)
  668. {
  669. struct net_device *netdev = devid;
  670. struct ks8842_adapter *adapter = netdev_priv(netdev);
  671. u16 isr;
  672. u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
  673. irqreturn_t ret = IRQ_NONE;
  674. isr = ks8842_read16(adapter, 18, REG_ISR);
  675. netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
  676. if (isr) {
  677. if (KS8842_USE_DMA(adapter))
  678. /* disable all but RX IRQ, since the FPGA relies on it*/
  679. ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
  680. else
  681. /* disable IRQ */
  682. ks8842_write16(adapter, 18, 0x00, REG_IER);
  683. /* schedule tasklet */
  684. tasklet_schedule(&adapter->tasklet);
  685. ret = IRQ_HANDLED;
  686. }
  687. iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
  688. /* After an interrupt, tell timberdale to continue DMA operations.
  689. DMA is disabled while we are handling the ks8842 because we might
  690. change bank */
  691. ks8842_resume_dma(adapter);
  692. return ret;
  693. }
  694. static void ks8842_dma_rx_cb(void *data)
  695. {
  696. struct net_device *netdev = data;
  697. struct ks8842_adapter *adapter = netdev_priv(netdev);
  698. netdev_dbg(netdev, "RX DMA finished\n");
  699. /* schedule tasklet */
  700. if (adapter->dma_rx.adesc)
  701. tasklet_schedule(&adapter->dma_rx.tasklet);
  702. }
  703. static void ks8842_dma_tx_cb(void *data)
  704. {
  705. struct net_device *netdev = data;
  706. struct ks8842_adapter *adapter = netdev_priv(netdev);
  707. struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
  708. netdev_dbg(netdev, "TX DMA finished\n");
  709. if (!ctl->adesc)
  710. return;
  711. netdev->stats.tx_packets++;
  712. ctl->adesc = NULL;
  713. if (netif_queue_stopped(netdev))
  714. netif_wake_queue(netdev);
  715. }
  716. static void ks8842_stop_dma(struct ks8842_adapter *adapter)
  717. {
  718. struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
  719. struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
  720. tx_ctl->adesc = NULL;
  721. if (tx_ctl->chan)
  722. dmaengine_terminate_all(tx_ctl->chan);
  723. rx_ctl->adesc = NULL;
  724. if (rx_ctl->chan)
  725. dmaengine_terminate_all(rx_ctl->chan);
  726. if (sg_dma_address(&rx_ctl->sg))
  727. dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
  728. DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  729. sg_dma_address(&rx_ctl->sg) = 0;
  730. dev_kfree_skb(rx_ctl->skb);
  731. rx_ctl->skb = NULL;
  732. }
  733. static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
  734. {
  735. struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
  736. struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
  737. ks8842_stop_dma(adapter);
  738. if (tx_ctl->chan)
  739. dma_release_channel(tx_ctl->chan);
  740. tx_ctl->chan = NULL;
  741. if (rx_ctl->chan)
  742. dma_release_channel(rx_ctl->chan);
  743. rx_ctl->chan = NULL;
  744. tasklet_kill(&rx_ctl->tasklet);
  745. if (sg_dma_address(&tx_ctl->sg))
  746. dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
  747. DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  748. sg_dma_address(&tx_ctl->sg) = 0;
  749. kfree(tx_ctl->buf);
  750. tx_ctl->buf = NULL;
  751. }
  752. static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
  753. {
  754. return chan->chan_id == (long)filter_param;
  755. }
  756. static int ks8842_alloc_dma_bufs(struct net_device *netdev)
  757. {
  758. struct ks8842_adapter *adapter = netdev_priv(netdev);
  759. struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
  760. struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
  761. int err;
  762. dma_cap_mask_t mask;
  763. dma_cap_zero(mask);
  764. dma_cap_set(DMA_SLAVE, mask);
  765. dma_cap_set(DMA_PRIVATE, mask);
  766. sg_init_table(&tx_ctl->sg, 1);
  767. tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
  768. (void *)(long)tx_ctl->channel);
  769. if (!tx_ctl->chan) {
  770. err = -ENODEV;
  771. goto err;
  772. }
  773. /* allocate DMA buffer */
  774. tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
  775. if (!tx_ctl->buf) {
  776. err = -ENOMEM;
  777. goto err;
  778. }
  779. sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
  780. tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  781. if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
  782. err = -ENOMEM;
  783. sg_dma_address(&tx_ctl->sg) = 0;
  784. goto err;
  785. }
  786. rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
  787. (void *)(long)rx_ctl->channel);
  788. if (!rx_ctl->chan) {
  789. err = -ENODEV;
  790. goto err;
  791. }
  792. tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
  793. (unsigned long)netdev);
  794. return 0;
  795. err:
  796. ks8842_dealloc_dma_bufs(adapter);
  797. return err;
  798. }
  799. /* Netdevice operations */
  800. static int ks8842_open(struct net_device *netdev)
  801. {
  802. struct ks8842_adapter *adapter = netdev_priv(netdev);
  803. int err;
  804. netdev_dbg(netdev, "%s - entry\n", __func__);
  805. if (KS8842_USE_DMA(adapter)) {
  806. err = ks8842_alloc_dma_bufs(netdev);
  807. if (!err) {
  808. /* start RX dma */
  809. err = __ks8842_start_new_rx_dma(netdev);
  810. if (err)
  811. ks8842_dealloc_dma_bufs(adapter);
  812. }
  813. if (err) {
  814. printk(KERN_WARNING DRV_NAME
  815. ": Failed to initiate DMA, running PIO\n");
  816. ks8842_dealloc_dma_bufs(adapter);
  817. adapter->dma_rx.channel = -1;
  818. adapter->dma_tx.channel = -1;
  819. }
  820. }
  821. /* reset the HW */
  822. ks8842_reset_hw(adapter);
  823. ks8842_write_mac_addr(adapter, netdev->dev_addr);
  824. ks8842_update_link_status(netdev, adapter);
  825. err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
  826. netdev);
  827. if (err) {
  828. pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
  829. return err;
  830. }
  831. return 0;
  832. }
  833. static int ks8842_close(struct net_device *netdev)
  834. {
  835. struct ks8842_adapter *adapter = netdev_priv(netdev);
  836. netdev_dbg(netdev, "%s - entry\n", __func__);
  837. cancel_work_sync(&adapter->timeout_work);
  838. if (KS8842_USE_DMA(adapter))
  839. ks8842_dealloc_dma_bufs(adapter);
  840. /* free the irq */
  841. free_irq(adapter->irq, netdev);
  842. /* disable the switch */
  843. ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
  844. return 0;
  845. }
  846. static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
  847. struct net_device *netdev)
  848. {
  849. int ret;
  850. struct ks8842_adapter *adapter = netdev_priv(netdev);
  851. netdev_dbg(netdev, "%s: entry\n", __func__);
  852. if (KS8842_USE_DMA(adapter)) {
  853. unsigned long flags;
  854. ret = ks8842_tx_frame_dma(skb, netdev);
  855. /* for now only allow one transfer at the time */
  856. spin_lock_irqsave(&adapter->lock, flags);
  857. if (adapter->dma_tx.adesc)
  858. netif_stop_queue(netdev);
  859. spin_unlock_irqrestore(&adapter->lock, flags);
  860. return ret;
  861. }
  862. ret = ks8842_tx_frame(skb, netdev);
  863. if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
  864. netif_stop_queue(netdev);
  865. return ret;
  866. }
  867. static int ks8842_set_mac(struct net_device *netdev, void *p)
  868. {
  869. struct ks8842_adapter *adapter = netdev_priv(netdev);
  870. struct sockaddr *addr = p;
  871. char *mac = (u8 *)addr->sa_data;
  872. netdev_dbg(netdev, "%s: entry\n", __func__);
  873. if (!is_valid_ether_addr(addr->sa_data))
  874. return -EADDRNOTAVAIL;
  875. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  876. ks8842_write_mac_addr(adapter, mac);
  877. return 0;
  878. }
  879. static void ks8842_tx_timeout_work(struct work_struct *work)
  880. {
  881. struct ks8842_adapter *adapter =
  882. container_of(work, struct ks8842_adapter, timeout_work);
  883. struct net_device *netdev = adapter->netdev;
  884. unsigned long flags;
  885. netdev_dbg(netdev, "%s: entry\n", __func__);
  886. spin_lock_irqsave(&adapter->lock, flags);
  887. if (KS8842_USE_DMA(adapter))
  888. ks8842_stop_dma(adapter);
  889. /* disable interrupts */
  890. ks8842_write16(adapter, 18, 0, REG_IER);
  891. ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
  892. netif_stop_queue(netdev);
  893. spin_unlock_irqrestore(&adapter->lock, flags);
  894. ks8842_reset_hw(adapter);
  895. ks8842_write_mac_addr(adapter, netdev->dev_addr);
  896. ks8842_update_link_status(netdev, adapter);
  897. if (KS8842_USE_DMA(adapter))
  898. __ks8842_start_new_rx_dma(netdev);
  899. }
  900. static void ks8842_tx_timeout(struct net_device *netdev)
  901. {
  902. struct ks8842_adapter *adapter = netdev_priv(netdev);
  903. netdev_dbg(netdev, "%s: entry\n", __func__);
  904. schedule_work(&adapter->timeout_work);
  905. }
  906. static const struct net_device_ops ks8842_netdev_ops = {
  907. .ndo_open = ks8842_open,
  908. .ndo_stop = ks8842_close,
  909. .ndo_start_xmit = ks8842_xmit_frame,
  910. .ndo_set_mac_address = ks8842_set_mac,
  911. .ndo_tx_timeout = ks8842_tx_timeout,
  912. .ndo_validate_addr = eth_validate_addr
  913. };
  914. static const struct ethtool_ops ks8842_ethtool_ops = {
  915. .get_link = ethtool_op_get_link,
  916. };
  917. static int ks8842_probe(struct platform_device *pdev)
  918. {
  919. int err = -ENOMEM;
  920. struct resource *iomem;
  921. struct net_device *netdev;
  922. struct ks8842_adapter *adapter;
  923. struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
  924. u16 id;
  925. unsigned i;
  926. iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  927. if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
  928. goto err_mem_region;
  929. netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
  930. if (!netdev)
  931. goto err_alloc_etherdev;
  932. SET_NETDEV_DEV(netdev, &pdev->dev);
  933. adapter = netdev_priv(netdev);
  934. adapter->netdev = netdev;
  935. INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
  936. adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
  937. adapter->conf_flags = iomem->flags;
  938. if (!adapter->hw_addr)
  939. goto err_ioremap;
  940. adapter->irq = platform_get_irq(pdev, 0);
  941. if (adapter->irq < 0) {
  942. err = adapter->irq;
  943. goto err_get_irq;
  944. }
  945. adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
  946. /* DMA is only supported when accessed via timberdale */
  947. if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
  948. (pdata->tx_dma_channel != -1) &&
  949. (pdata->rx_dma_channel != -1)) {
  950. adapter->dma_rx.channel = pdata->rx_dma_channel;
  951. adapter->dma_tx.channel = pdata->tx_dma_channel;
  952. } else {
  953. adapter->dma_rx.channel = -1;
  954. adapter->dma_tx.channel = -1;
  955. }
  956. tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
  957. spin_lock_init(&adapter->lock);
  958. netdev->netdev_ops = &ks8842_netdev_ops;
  959. netdev->ethtool_ops = &ks8842_ethtool_ops;
  960. /* Check if a mac address was given */
  961. i = netdev->addr_len;
  962. if (pdata) {
  963. for (i = 0; i < netdev->addr_len; i++)
  964. if (pdata->macaddr[i] != 0)
  965. break;
  966. if (i < netdev->addr_len)
  967. /* an address was passed, use it */
  968. memcpy(netdev->dev_addr, pdata->macaddr,
  969. netdev->addr_len);
  970. }
  971. if (i == netdev->addr_len) {
  972. ks8842_read_mac_addr(adapter, netdev->dev_addr);
  973. if (!is_valid_ether_addr(netdev->dev_addr))
  974. eth_hw_addr_random(netdev);
  975. }
  976. id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
  977. strcpy(netdev->name, "eth%d");
  978. err = register_netdev(netdev);
  979. if (err)
  980. goto err_register;
  981. platform_set_drvdata(pdev, netdev);
  982. pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
  983. (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
  984. return 0;
  985. err_register:
  986. err_get_irq:
  987. iounmap(adapter->hw_addr);
  988. err_ioremap:
  989. free_netdev(netdev);
  990. err_alloc_etherdev:
  991. release_mem_region(iomem->start, resource_size(iomem));
  992. err_mem_region:
  993. return err;
  994. }
  995. static int ks8842_remove(struct platform_device *pdev)
  996. {
  997. struct net_device *netdev = platform_get_drvdata(pdev);
  998. struct ks8842_adapter *adapter = netdev_priv(netdev);
  999. struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1000. unregister_netdev(netdev);
  1001. tasklet_kill(&adapter->tasklet);
  1002. iounmap(adapter->hw_addr);
  1003. free_netdev(netdev);
  1004. release_mem_region(iomem->start, resource_size(iomem));
  1005. return 0;
  1006. }
  1007. static struct platform_driver ks8842_platform_driver = {
  1008. .driver = {
  1009. .name = DRV_NAME,
  1010. },
  1011. .probe = ks8842_probe,
  1012. .remove = ks8842_remove,
  1013. };
  1014. module_platform_driver(ks8842_platform_driver);
  1015. MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
  1016. MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
  1017. MODULE_LICENSE("GPL v2");
  1018. MODULE_ALIAS("platform:ks8842");