macmace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * Driver for the Macintosh 68K onboard MACE controller with PSC
  3. * driven DMA. The MACE driver code is derived from mace.c. The
  4. * Mac68k theory of operation is courtesy of the MacBSD wizards.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * Copyright (C) 1996 Paul Mackerras.
  12. * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
  13. *
  14. * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
  15. *
  16. * Copyright (C) 2007 Finn Thain
  17. *
  18. * Converted to DMA API, converted to unified driver model,
  19. * sync'd some routines with mace.c and fixed various bugs.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/delay.h>
  26. #include <linux/string.h>
  27. #include <linux/crc32.h>
  28. #include <linux/bitrev.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/gfp.h>
  32. #include <asm/io.h>
  33. #include <asm/irq.h>
  34. #include <asm/macintosh.h>
  35. #include <asm/macints.h>
  36. #include <asm/mac_psc.h>
  37. #include <asm/page.h>
  38. #include "mace.h"
  39. static char mac_mace_string[] = "macmace";
  40. #define N_TX_BUFF_ORDER 0
  41. #define N_TX_RING (1 << N_TX_BUFF_ORDER)
  42. #define N_RX_BUFF_ORDER 3
  43. #define N_RX_RING (1 << N_RX_BUFF_ORDER)
  44. #define TX_TIMEOUT HZ
  45. #define MACE_BUFF_SIZE 0x800
  46. /* Chip rev needs workaround on HW & multicast addr change */
  47. #define BROKEN_ADDRCHG_REV 0x0941
  48. /* The MACE is simply wired down on a Mac68K box */
  49. #define MACE_BASE (void *)(0x50F1C000)
  50. #define MACE_PROM (void *)(0x50F08001)
  51. struct mace_data {
  52. volatile struct mace *mace;
  53. unsigned char *tx_ring;
  54. dma_addr_t tx_ring_phys;
  55. unsigned char *rx_ring;
  56. dma_addr_t rx_ring_phys;
  57. int dma_intr;
  58. int rx_slot, rx_tail;
  59. int tx_slot, tx_sloti, tx_count;
  60. int chipid;
  61. struct device *device;
  62. };
  63. struct mace_frame {
  64. u8 rcvcnt;
  65. u8 pad1;
  66. u8 rcvsts;
  67. u8 pad2;
  68. u8 rntpc;
  69. u8 pad3;
  70. u8 rcvcc;
  71. u8 pad4;
  72. u32 pad5;
  73. u32 pad6;
  74. u8 data[1];
  75. /* And frame continues.. */
  76. };
  77. #define PRIV_BYTES sizeof(struct mace_data)
  78. static int mace_open(struct net_device *dev);
  79. static int mace_close(struct net_device *dev);
  80. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  81. static void mace_set_multicast(struct net_device *dev);
  82. static int mace_set_address(struct net_device *dev, void *addr);
  83. static void mace_reset(struct net_device *dev);
  84. static irqreturn_t mace_interrupt(int irq, void *dev_id);
  85. static irqreturn_t mace_dma_intr(int irq, void *dev_id);
  86. static void mace_tx_timeout(struct net_device *dev);
  87. static void __mace_set_address(struct net_device *dev, void *addr);
  88. /*
  89. * Load a receive DMA channel with a base address and ring length
  90. */
  91. static void mace_load_rxdma_base(struct net_device *dev, int set)
  92. {
  93. struct mace_data *mp = netdev_priv(dev);
  94. psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
  95. psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
  96. psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
  97. psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
  98. mp->rx_tail = 0;
  99. }
  100. /*
  101. * Reset the receive DMA subsystem
  102. */
  103. static void mace_rxdma_reset(struct net_device *dev)
  104. {
  105. struct mace_data *mp = netdev_priv(dev);
  106. volatile struct mace *mace = mp->mace;
  107. u8 maccc = mace->maccc;
  108. mace->maccc = maccc & ~ENRCV;
  109. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  110. mace_load_rxdma_base(dev, 0x00);
  111. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  112. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  113. mace_load_rxdma_base(dev, 0x10);
  114. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  115. mace->maccc = maccc;
  116. mp->rx_slot = 0;
  117. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
  118. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
  119. }
  120. /*
  121. * Reset the transmit DMA subsystem
  122. */
  123. static void mace_txdma_reset(struct net_device *dev)
  124. {
  125. struct mace_data *mp = netdev_priv(dev);
  126. volatile struct mace *mace = mp->mace;
  127. u8 maccc;
  128. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  129. maccc = mace->maccc;
  130. mace->maccc = maccc & ~ENXMT;
  131. mp->tx_slot = mp->tx_sloti = 0;
  132. mp->tx_count = N_TX_RING;
  133. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  134. mace->maccc = maccc;
  135. }
  136. /*
  137. * Disable DMA
  138. */
  139. static void mace_dma_off(struct net_device *dev)
  140. {
  141. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  142. psc_write_word(PSC_ENETRD_CTL, 0x1000);
  143. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
  144. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
  145. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  146. psc_write_word(PSC_ENETWR_CTL, 0x1000);
  147. psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
  148. psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
  149. }
  150. static const struct net_device_ops mace_netdev_ops = {
  151. .ndo_open = mace_open,
  152. .ndo_stop = mace_close,
  153. .ndo_start_xmit = mace_xmit_start,
  154. .ndo_tx_timeout = mace_tx_timeout,
  155. .ndo_set_multicast_list = mace_set_multicast,
  156. .ndo_set_mac_address = mace_set_address,
  157. .ndo_change_mtu = eth_change_mtu,
  158. .ndo_validate_addr = eth_validate_addr,
  159. };
  160. /*
  161. * Not really much of a probe. The hardware table tells us if this
  162. * model of Macintrash has a MACE (AV macintoshes)
  163. */
  164. static int __devinit mace_probe(struct platform_device *pdev)
  165. {
  166. int j;
  167. struct mace_data *mp;
  168. unsigned char *addr;
  169. struct net_device *dev;
  170. unsigned char checksum = 0;
  171. static int found = 0;
  172. int err;
  173. if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
  174. return -ENODEV;
  175. found = 1; /* prevent 'finding' one on every device probe */
  176. dev = alloc_etherdev(PRIV_BYTES);
  177. if (!dev)
  178. return -ENOMEM;
  179. mp = netdev_priv(dev);
  180. mp->device = &pdev->dev;
  181. SET_NETDEV_DEV(dev, &pdev->dev);
  182. dev->base_addr = (u32)MACE_BASE;
  183. mp->mace = (volatile struct mace *) MACE_BASE;
  184. dev->irq = IRQ_MAC_MACE;
  185. mp->dma_intr = IRQ_MAC_MACE_DMA;
  186. mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
  187. /*
  188. * The PROM contains 8 bytes which total 0xFF when XOR'd
  189. * together. Due to the usual peculiar apple brain damage
  190. * the bytes are spaced out in a strange boundary and the
  191. * bits are reversed.
  192. */
  193. addr = (void *)MACE_PROM;
  194. for (j = 0; j < 6; ++j) {
  195. u8 v = bitrev8(addr[j<<4]);
  196. checksum ^= v;
  197. dev->dev_addr[j] = v;
  198. }
  199. for (; j < 8; ++j) {
  200. checksum ^= bitrev8(addr[j<<4]);
  201. }
  202. if (checksum != 0xFF) {
  203. free_netdev(dev);
  204. return -ENODEV;
  205. }
  206. dev->netdev_ops = &mace_netdev_ops;
  207. dev->watchdog_timeo = TX_TIMEOUT;
  208. printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
  209. dev->name, dev->dev_addr);
  210. err = register_netdev(dev);
  211. if (!err)
  212. return 0;
  213. free_netdev(dev);
  214. return err;
  215. }
  216. /*
  217. * Reset the chip.
  218. */
  219. static void mace_reset(struct net_device *dev)
  220. {
  221. struct mace_data *mp = netdev_priv(dev);
  222. volatile struct mace *mb = mp->mace;
  223. int i;
  224. /* soft-reset the chip */
  225. i = 200;
  226. while (--i) {
  227. mb->biucc = SWRST;
  228. if (mb->biucc & SWRST) {
  229. udelay(10);
  230. continue;
  231. }
  232. break;
  233. }
  234. if (!i) {
  235. printk(KERN_ERR "macmace: cannot reset chip!\n");
  236. return;
  237. }
  238. mb->maccc = 0; /* turn off tx, rx */
  239. mb->imr = 0xFF; /* disable all intrs for now */
  240. i = mb->ir;
  241. mb->biucc = XMTSP_64;
  242. mb->utr = RTRD;
  243. mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
  244. mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
  245. mb->rcvfc = 0;
  246. /* load up the hardware address */
  247. __mace_set_address(dev, dev->dev_addr);
  248. /* clear the multicast filter */
  249. if (mp->chipid == BROKEN_ADDRCHG_REV)
  250. mb->iac = LOGADDR;
  251. else {
  252. mb->iac = ADDRCHG | LOGADDR;
  253. while ((mb->iac & ADDRCHG) != 0)
  254. ;
  255. }
  256. for (i = 0; i < 8; ++i)
  257. mb->ladrf = 0;
  258. /* done changing address */
  259. if (mp->chipid != BROKEN_ADDRCHG_REV)
  260. mb->iac = 0;
  261. mb->plscc = PORTSEL_AUI;
  262. }
  263. /*
  264. * Load the address on a mace controller.
  265. */
  266. static void __mace_set_address(struct net_device *dev, void *addr)
  267. {
  268. struct mace_data *mp = netdev_priv(dev);
  269. volatile struct mace *mb = mp->mace;
  270. unsigned char *p = addr;
  271. int i;
  272. /* load up the hardware address */
  273. if (mp->chipid == BROKEN_ADDRCHG_REV)
  274. mb->iac = PHYADDR;
  275. else {
  276. mb->iac = ADDRCHG | PHYADDR;
  277. while ((mb->iac & ADDRCHG) != 0)
  278. ;
  279. }
  280. for (i = 0; i < 6; ++i)
  281. mb->padr = dev->dev_addr[i] = p[i];
  282. if (mp->chipid != BROKEN_ADDRCHG_REV)
  283. mb->iac = 0;
  284. }
  285. static int mace_set_address(struct net_device *dev, void *addr)
  286. {
  287. struct mace_data *mp = netdev_priv(dev);
  288. volatile struct mace *mb = mp->mace;
  289. unsigned long flags;
  290. u8 maccc;
  291. local_irq_save(flags);
  292. maccc = mb->maccc;
  293. __mace_set_address(dev, addr);
  294. mb->maccc = maccc;
  295. local_irq_restore(flags);
  296. return 0;
  297. }
  298. /*
  299. * Open the Macintosh MACE. Most of this is playing with the DMA
  300. * engine. The ethernet chip is quite friendly.
  301. */
  302. static int mace_open(struct net_device *dev)
  303. {
  304. struct mace_data *mp = netdev_priv(dev);
  305. volatile struct mace *mb = mp->mace;
  306. /* reset the chip */
  307. mace_reset(dev);
  308. if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
  309. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
  310. return -EAGAIN;
  311. }
  312. if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
  313. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
  314. free_irq(dev->irq, dev);
  315. return -EAGAIN;
  316. }
  317. /* Allocate the DMA ring buffers */
  318. mp->tx_ring = dma_alloc_coherent(mp->device,
  319. N_TX_RING * MACE_BUFF_SIZE,
  320. &mp->tx_ring_phys, GFP_KERNEL);
  321. if (mp->tx_ring == NULL) {
  322. printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
  323. goto out1;
  324. }
  325. mp->rx_ring = dma_alloc_coherent(mp->device,
  326. N_RX_RING * MACE_BUFF_SIZE,
  327. &mp->rx_ring_phys, GFP_KERNEL);
  328. if (mp->rx_ring == NULL) {
  329. printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
  330. goto out2;
  331. }
  332. mace_dma_off(dev);
  333. /* Not sure what these do */
  334. psc_write_word(PSC_ENETWR_CTL, 0x9000);
  335. psc_write_word(PSC_ENETRD_CTL, 0x9000);
  336. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  337. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  338. mace_rxdma_reset(dev);
  339. mace_txdma_reset(dev);
  340. /* turn it on! */
  341. mb->maccc = ENXMT | ENRCV;
  342. /* enable all interrupts except receive interrupts */
  343. mb->imr = RCVINT;
  344. return 0;
  345. out2:
  346. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  347. mp->tx_ring, mp->tx_ring_phys);
  348. out1:
  349. free_irq(dev->irq, dev);
  350. free_irq(mp->dma_intr, dev);
  351. return -ENOMEM;
  352. }
  353. /*
  354. * Shut down the mace and its interrupt channel
  355. */
  356. static int mace_close(struct net_device *dev)
  357. {
  358. struct mace_data *mp = netdev_priv(dev);
  359. volatile struct mace *mb = mp->mace;
  360. mb->maccc = 0; /* disable rx and tx */
  361. mb->imr = 0xFF; /* disable all irqs */
  362. mace_dma_off(dev); /* disable rx and tx dma */
  363. return 0;
  364. }
  365. /*
  366. * Transmit a frame
  367. */
  368. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
  369. {
  370. struct mace_data *mp = netdev_priv(dev);
  371. unsigned long flags;
  372. /* Stop the queue since there's only the one buffer */
  373. local_irq_save(flags);
  374. netif_stop_queue(dev);
  375. if (!mp->tx_count) {
  376. printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
  377. local_irq_restore(flags);
  378. return NETDEV_TX_BUSY;
  379. }
  380. mp->tx_count--;
  381. local_irq_restore(flags);
  382. dev->stats.tx_packets++;
  383. dev->stats.tx_bytes += skb->len;
  384. /* We need to copy into our xmit buffer to take care of alignment and caching issues */
  385. skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
  386. /* load the Tx DMA and fire it off */
  387. psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
  388. psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
  389. psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
  390. mp->tx_slot ^= 0x10;
  391. dev_kfree_skb(skb);
  392. return NETDEV_TX_OK;
  393. }
  394. static void mace_set_multicast(struct net_device *dev)
  395. {
  396. struct mace_data *mp = netdev_priv(dev);
  397. volatile struct mace *mb = mp->mace;
  398. int i;
  399. u32 crc;
  400. u8 maccc;
  401. unsigned long flags;
  402. local_irq_save(flags);
  403. maccc = mb->maccc;
  404. mb->maccc &= ~PROM;
  405. if (dev->flags & IFF_PROMISC) {
  406. mb->maccc |= PROM;
  407. } else {
  408. unsigned char multicast_filter[8];
  409. struct netdev_hw_addr *ha;
  410. if (dev->flags & IFF_ALLMULTI) {
  411. for (i = 0; i < 8; i++) {
  412. multicast_filter[i] = 0xFF;
  413. }
  414. } else {
  415. for (i = 0; i < 8; i++)
  416. multicast_filter[i] = 0;
  417. netdev_for_each_mc_addr(ha, dev) {
  418. crc = ether_crc_le(6, ha->addr);
  419. /* bit number in multicast_filter */
  420. i = crc >> 26;
  421. multicast_filter[i >> 3] |= 1 << (i & 7);
  422. }
  423. }
  424. if (mp->chipid == BROKEN_ADDRCHG_REV)
  425. mb->iac = LOGADDR;
  426. else {
  427. mb->iac = ADDRCHG | LOGADDR;
  428. while ((mb->iac & ADDRCHG) != 0)
  429. ;
  430. }
  431. for (i = 0; i < 8; ++i)
  432. mb->ladrf = multicast_filter[i];
  433. if (mp->chipid != BROKEN_ADDRCHG_REV)
  434. mb->iac = 0;
  435. }
  436. mb->maccc = maccc;
  437. local_irq_restore(flags);
  438. }
  439. static void mace_handle_misc_intrs(struct net_device *dev, int intr)
  440. {
  441. struct mace_data *mp = netdev_priv(dev);
  442. volatile struct mace *mb = mp->mace;
  443. static int mace_babbles, mace_jabbers;
  444. if (intr & MPCO)
  445. dev->stats.rx_missed_errors += 256;
  446. dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
  447. if (intr & RNTPCO)
  448. dev->stats.rx_length_errors += 256;
  449. dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
  450. if (intr & CERR)
  451. ++dev->stats.tx_heartbeat_errors;
  452. if (intr & BABBLE)
  453. if (mace_babbles++ < 4)
  454. printk(KERN_DEBUG "macmace: babbling transmitter\n");
  455. if (intr & JABBER)
  456. if (mace_jabbers++ < 4)
  457. printk(KERN_DEBUG "macmace: jabbering transceiver\n");
  458. }
  459. static irqreturn_t mace_interrupt(int irq, void *dev_id)
  460. {
  461. struct net_device *dev = (struct net_device *) dev_id;
  462. struct mace_data *mp = netdev_priv(dev);
  463. volatile struct mace *mb = mp->mace;
  464. int intr, fs;
  465. unsigned long flags;
  466. /* don't want the dma interrupt handler to fire */
  467. local_irq_save(flags);
  468. intr = mb->ir; /* read interrupt register */
  469. mace_handle_misc_intrs(dev, intr);
  470. if (intr & XMTINT) {
  471. fs = mb->xmtfs;
  472. if ((fs & XMTSV) == 0) {
  473. printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
  474. mace_reset(dev);
  475. /*
  476. * XXX mace likes to hang the machine after a xmtfs error.
  477. * This is hard to reproduce, reseting *may* help
  478. */
  479. }
  480. /* dma should have finished */
  481. if (!mp->tx_count) {
  482. printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
  483. }
  484. /* Update stats */
  485. if (fs & (UFLO|LCOL|LCAR|RTRY)) {
  486. ++dev->stats.tx_errors;
  487. if (fs & LCAR)
  488. ++dev->stats.tx_carrier_errors;
  489. else if (fs & (UFLO|LCOL|RTRY)) {
  490. ++dev->stats.tx_aborted_errors;
  491. if (mb->xmtfs & UFLO) {
  492. printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
  493. dev->stats.tx_fifo_errors++;
  494. mace_txdma_reset(dev);
  495. }
  496. }
  497. }
  498. }
  499. if (mp->tx_count)
  500. netif_wake_queue(dev);
  501. local_irq_restore(flags);
  502. return IRQ_HANDLED;
  503. }
  504. static void mace_tx_timeout(struct net_device *dev)
  505. {
  506. struct mace_data *mp = netdev_priv(dev);
  507. volatile struct mace *mb = mp->mace;
  508. unsigned long flags;
  509. local_irq_save(flags);
  510. /* turn off both tx and rx and reset the chip */
  511. mb->maccc = 0;
  512. printk(KERN_ERR "macmace: transmit timeout - resetting\n");
  513. mace_txdma_reset(dev);
  514. mace_reset(dev);
  515. /* restart rx dma */
  516. mace_rxdma_reset(dev);
  517. mp->tx_count = N_TX_RING;
  518. netif_wake_queue(dev);
  519. /* turn it on! */
  520. mb->maccc = ENXMT | ENRCV;
  521. /* enable all interrupts except receive interrupts */
  522. mb->imr = RCVINT;
  523. local_irq_restore(flags);
  524. }
  525. /*
  526. * Handle a newly arrived frame
  527. */
  528. static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
  529. {
  530. struct sk_buff *skb;
  531. unsigned int frame_status = mf->rcvsts;
  532. if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
  533. dev->stats.rx_errors++;
  534. if (frame_status & RS_OFLO) {
  535. printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
  536. dev->stats.rx_fifo_errors++;
  537. }
  538. if (frame_status & RS_CLSN)
  539. dev->stats.collisions++;
  540. if (frame_status & RS_FRAMERR)
  541. dev->stats.rx_frame_errors++;
  542. if (frame_status & RS_FCSERR)
  543. dev->stats.rx_crc_errors++;
  544. } else {
  545. unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
  546. skb = dev_alloc_skb(frame_length + 2);
  547. if (!skb) {
  548. dev->stats.rx_dropped++;
  549. return;
  550. }
  551. skb_reserve(skb, 2);
  552. memcpy(skb_put(skb, frame_length), mf->data, frame_length);
  553. skb->protocol = eth_type_trans(skb, dev);
  554. netif_rx(skb);
  555. dev->stats.rx_packets++;
  556. dev->stats.rx_bytes += frame_length;
  557. }
  558. }
  559. /*
  560. * The PSC has passed us a DMA interrupt event.
  561. */
  562. static irqreturn_t mace_dma_intr(int irq, void *dev_id)
  563. {
  564. struct net_device *dev = (struct net_device *) dev_id;
  565. struct mace_data *mp = netdev_priv(dev);
  566. int left, head;
  567. u16 status;
  568. u32 baka;
  569. /* Not sure what this does */
  570. while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
  571. if (!(baka & 0x60000000)) return IRQ_NONE;
  572. /*
  573. * Process the read queue
  574. */
  575. status = psc_read_word(PSC_ENETRD_CTL);
  576. if (status & 0x2000) {
  577. mace_rxdma_reset(dev);
  578. } else if (status & 0x0100) {
  579. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
  580. left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
  581. head = N_RX_RING - left;
  582. /* Loop through the ring buffer and process new packages */
  583. while (mp->rx_tail < head) {
  584. mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
  585. + (mp->rx_tail * MACE_BUFF_SIZE)));
  586. mp->rx_tail++;
  587. }
  588. /* If we're out of buffers in this ring then switch to */
  589. /* the other set, otherwise just reactivate this one. */
  590. if (!left) {
  591. mace_load_rxdma_base(dev, mp->rx_slot);
  592. mp->rx_slot ^= 0x10;
  593. } else {
  594. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
  595. }
  596. }
  597. /*
  598. * Process the write queue
  599. */
  600. status = psc_read_word(PSC_ENETWR_CTL);
  601. if (status & 0x2000) {
  602. mace_txdma_reset(dev);
  603. } else if (status & 0x0100) {
  604. psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
  605. mp->tx_sloti ^= 0x10;
  606. mp->tx_count++;
  607. }
  608. return IRQ_HANDLED;
  609. }
  610. MODULE_LICENSE("GPL");
  611. MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
  612. MODULE_ALIAS("platform:macmace");
  613. static int __devexit mac_mace_device_remove (struct platform_device *pdev)
  614. {
  615. struct net_device *dev = platform_get_drvdata(pdev);
  616. struct mace_data *mp = netdev_priv(dev);
  617. unregister_netdev(dev);
  618. free_irq(dev->irq, dev);
  619. free_irq(IRQ_MAC_MACE_DMA, dev);
  620. dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
  621. mp->rx_ring, mp->rx_ring_phys);
  622. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  623. mp->tx_ring, mp->tx_ring_phys);
  624. free_netdev(dev);
  625. return 0;
  626. }
  627. static struct platform_driver mac_mace_driver = {
  628. .probe = mace_probe,
  629. .remove = __devexit_p(mac_mace_device_remove),
  630. .driver = {
  631. .name = mac_mace_string,
  632. .owner = THIS_MODULE,
  633. },
  634. };
  635. static int __init mac_mace_init_module(void)
  636. {
  637. if (!MACH_IS_MAC)
  638. return -ENODEV;
  639. return platform_driver_register(&mac_mace_driver);
  640. }
  641. static void __exit mac_mace_cleanup_module(void)
  642. {
  643. platform_driver_unregister(&mac_mace_driver);
  644. }
  645. module_init(mac_mace_init_module);
  646. module_exit(mac_mace_cleanup_module);