a2065.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Amiga Linux/68k A2065 Ethernet Driver
  3. *
  4. * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
  5. *
  6. * Fixes and tips by:
  7. * - Janos Farkas (CHEXUM@sparta.banki.hu)
  8. * - Jes Degn Soerensen (jds@kom.auc.dk)
  9. * - Matt Domsch (Matt_Domsch@dell.com)
  10. *
  11. * ----------------------------------------------------------------------------
  12. *
  13. * This program is based on
  14. *
  15. * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
  16. * (C) Copyright 1995 by Geert Uytterhoeven,
  17. * Peter De Schrijver
  18. *
  19. * lance.c: An AMD LANCE ethernet driver for linux.
  20. * Written 1993-94 by Donald Becker.
  21. *
  22. * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
  23. * Advanced Micro Devices
  24. * Publication #16907, Rev. B, Amendment/0, May 1994
  25. *
  26. * ----------------------------------------------------------------------------
  27. *
  28. * This file is subject to the terms and conditions of the GNU General Public
  29. * License. See the file COPYING in the main directory of the Linux
  30. * distribution for more details.
  31. *
  32. * ----------------------------------------------------------------------------
  33. *
  34. * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
  35. *
  36. * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
  37. * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
  38. */
  39. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40. /*#define DEBUG*/
  41. /*#define TEST_HITS*/
  42. #include <linux/errno.h>
  43. #include <linux/netdevice.h>
  44. #include <linux/etherdevice.h>
  45. #include <linux/module.h>
  46. #include <linux/stddef.h>
  47. #include <linux/kernel.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/ioport.h>
  50. #include <linux/skbuff.h>
  51. #include <linux/string.h>
  52. #include <linux/init.h>
  53. #include <linux/crc32.h>
  54. #include <linux/zorro.h>
  55. #include <linux/bitops.h>
  56. #include <asm/byteorder.h>
  57. #include <asm/irq.h>
  58. #include <asm/amigaints.h>
  59. #include <asm/amigahw.h>
  60. #include "a2065.h"
  61. /* Transmit/Receive Ring Definitions */
  62. #define LANCE_LOG_TX_BUFFERS (2)
  63. #define LANCE_LOG_RX_BUFFERS (4)
  64. #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
  65. #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
  66. #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
  67. #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
  68. #define PKT_BUF_SIZE (1544)
  69. #define RX_BUFF_SIZE PKT_BUF_SIZE
  70. #define TX_BUFF_SIZE PKT_BUF_SIZE
  71. /* Layout of the Lance's RAM Buffer */
  72. struct lance_init_block {
  73. unsigned short mode; /* Pre-set mode (reg. 15) */
  74. unsigned char phys_addr[6]; /* Physical ethernet address */
  75. unsigned filter[2]; /* Multicast filter. */
  76. /* Receive and transmit ring base, along with extra bits. */
  77. unsigned short rx_ptr; /* receive descriptor addr */
  78. unsigned short rx_len; /* receive len and high addr */
  79. unsigned short tx_ptr; /* transmit descriptor addr */
  80. unsigned short tx_len; /* transmit len and high addr */
  81. /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
  82. struct lance_rx_desc brx_ring[RX_RING_SIZE];
  83. struct lance_tx_desc btx_ring[TX_RING_SIZE];
  84. char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
  85. char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
  86. };
  87. /* Private Device Data */
  88. struct lance_private {
  89. char *name;
  90. volatile struct lance_regs *ll;
  91. volatile struct lance_init_block *init_block; /* Hosts view */
  92. volatile struct lance_init_block *lance_init_block; /* Lance view */
  93. int rx_new, tx_new;
  94. int rx_old, tx_old;
  95. int lance_log_rx_bufs, lance_log_tx_bufs;
  96. int rx_ring_mod_mask, tx_ring_mod_mask;
  97. int tpe; /* cable-selection is TPE */
  98. int auto_select; /* cable-selection by carrier */
  99. unsigned short busmaster_regval;
  100. #ifdef CONFIG_SUNLANCE
  101. struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
  102. int burst_sizes; /* ledma SBus burst sizes */
  103. #endif
  104. struct timer_list multicast_timer;
  105. struct net_device *dev;
  106. };
  107. #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
  108. /* Load the CSR registers */
  109. static void load_csrs(struct lance_private *lp)
  110. {
  111. volatile struct lance_regs *ll = lp->ll;
  112. volatile struct lance_init_block *aib = lp->lance_init_block;
  113. int leptr = LANCE_ADDR(aib);
  114. ll->rap = LE_CSR1;
  115. ll->rdp = (leptr & 0xFFFF);
  116. ll->rap = LE_CSR2;
  117. ll->rdp = leptr >> 16;
  118. ll->rap = LE_CSR3;
  119. ll->rdp = lp->busmaster_regval;
  120. /* Point back to csr0 */
  121. ll->rap = LE_CSR0;
  122. }
  123. /* Setup the Lance Rx and Tx rings */
  124. static void lance_init_ring(struct net_device *dev)
  125. {
  126. struct lance_private *lp = netdev_priv(dev);
  127. volatile struct lance_init_block *ib = lp->init_block;
  128. volatile struct lance_init_block *aib = lp->lance_init_block;
  129. /* for LANCE_ADDR computations */
  130. int leptr;
  131. int i;
  132. /* Lock out other processes while setting up hardware */
  133. netif_stop_queue(dev);
  134. lp->rx_new = lp->tx_new = 0;
  135. lp->rx_old = lp->tx_old = 0;
  136. ib->mode = 0;
  137. /* Copy the ethernet address to the lance init block
  138. * Note that on the sparc you need to swap the ethernet address.
  139. */
  140. ib->phys_addr[0] = dev->dev_addr[1];
  141. ib->phys_addr[1] = dev->dev_addr[0];
  142. ib->phys_addr[2] = dev->dev_addr[3];
  143. ib->phys_addr[3] = dev->dev_addr[2];
  144. ib->phys_addr[4] = dev->dev_addr[5];
  145. ib->phys_addr[5] = dev->dev_addr[4];
  146. /* Setup the Tx ring entries */
  147. netdev_dbg(dev, "TX rings:\n");
  148. for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
  149. leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
  150. ib->btx_ring[i].tmd0 = leptr;
  151. ib->btx_ring[i].tmd1_hadr = leptr >> 16;
  152. ib->btx_ring[i].tmd1_bits = 0;
  153. ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
  154. ib->btx_ring[i].misc = 0;
  155. if (i < 3)
  156. netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
  157. }
  158. /* Setup the Rx ring entries */
  159. netdev_dbg(dev, "RX rings:\n");
  160. for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
  161. leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
  162. ib->brx_ring[i].rmd0 = leptr;
  163. ib->brx_ring[i].rmd1_hadr = leptr >> 16;
  164. ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
  165. ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
  166. ib->brx_ring[i].mblength = 0;
  167. if (i < 3)
  168. netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
  169. }
  170. /* Setup the initialization block */
  171. /* Setup rx descriptor pointer */
  172. leptr = LANCE_ADDR(&aib->brx_ring);
  173. ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
  174. ib->rx_ptr = leptr;
  175. netdev_dbg(dev, "RX ptr: %08x\n", leptr);
  176. /* Setup tx descriptor pointer */
  177. leptr = LANCE_ADDR(&aib->btx_ring);
  178. ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
  179. ib->tx_ptr = leptr;
  180. netdev_dbg(dev, "TX ptr: %08x\n", leptr);
  181. /* Clear the multicast filter */
  182. ib->filter[0] = 0;
  183. ib->filter[1] = 0;
  184. }
  185. static int init_restart_lance(struct lance_private *lp)
  186. {
  187. volatile struct lance_regs *ll = lp->ll;
  188. int i;
  189. ll->rap = LE_CSR0;
  190. ll->rdp = LE_C0_INIT;
  191. /* Wait for the lance to complete initialization */
  192. for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
  193. barrier();
  194. if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
  195. pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
  196. return -EIO;
  197. }
  198. /* Clear IDON by writing a "1", enable interrupts and start lance */
  199. ll->rdp = LE_C0_IDON;
  200. ll->rdp = LE_C0_INEA | LE_C0_STRT;
  201. return 0;
  202. }
  203. static int lance_rx(struct net_device *dev)
  204. {
  205. struct lance_private *lp = netdev_priv(dev);
  206. volatile struct lance_init_block *ib = lp->init_block;
  207. volatile struct lance_regs *ll = lp->ll;
  208. volatile struct lance_rx_desc *rd;
  209. unsigned char bits;
  210. #ifdef TEST_HITS
  211. int i;
  212. char buf[RX_RING_SIZE + 1];
  213. for (i = 0; i < RX_RING_SIZE; i++) {
  214. char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
  215. if (i == lp->rx_new)
  216. buf[i] = r1_own ? '_' : 'X';
  217. else
  218. buf[i] = r1_own ? '.' : '1';
  219. }
  220. buf[RX_RING_SIZE] = 0;
  221. pr_debug("RxRing TestHits: [%s]\n", buf);
  222. #endif
  223. ll->rdp = LE_C0_RINT | LE_C0_INEA;
  224. for (rd = &ib->brx_ring[lp->rx_new];
  225. !((bits = rd->rmd1_bits) & LE_R1_OWN);
  226. rd = &ib->brx_ring[lp->rx_new]) {
  227. /* We got an incomplete frame? */
  228. if ((bits & LE_R1_POK) != LE_R1_POK) {
  229. dev->stats.rx_over_errors++;
  230. dev->stats.rx_errors++;
  231. continue;
  232. } else if (bits & LE_R1_ERR) {
  233. /* Count only the end frame as a rx error,
  234. * not the beginning
  235. */
  236. if (bits & LE_R1_BUF)
  237. dev->stats.rx_fifo_errors++;
  238. if (bits & LE_R1_CRC)
  239. dev->stats.rx_crc_errors++;
  240. if (bits & LE_R1_OFL)
  241. dev->stats.rx_over_errors++;
  242. if (bits & LE_R1_FRA)
  243. dev->stats.rx_frame_errors++;
  244. if (bits & LE_R1_EOP)
  245. dev->stats.rx_errors++;
  246. } else {
  247. int len = (rd->mblength & 0xfff) - 4;
  248. struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
  249. if (!skb) {
  250. dev->stats.rx_dropped++;
  251. rd->mblength = 0;
  252. rd->rmd1_bits = LE_R1_OWN;
  253. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  254. return 0;
  255. }
  256. skb_reserve(skb, 2); /* 16 byte align */
  257. skb_put(skb, len); /* make room */
  258. skb_copy_to_linear_data(skb,
  259. (unsigned char *)&ib->rx_buf[lp->rx_new][0],
  260. len);
  261. skb->protocol = eth_type_trans(skb, dev);
  262. netif_rx(skb);
  263. dev->stats.rx_packets++;
  264. dev->stats.rx_bytes += len;
  265. }
  266. /* Return the packet to the pool */
  267. rd->mblength = 0;
  268. rd->rmd1_bits = LE_R1_OWN;
  269. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  270. }
  271. return 0;
  272. }
  273. static int lance_tx(struct net_device *dev)
  274. {
  275. struct lance_private *lp = netdev_priv(dev);
  276. volatile struct lance_init_block *ib = lp->init_block;
  277. volatile struct lance_regs *ll = lp->ll;
  278. volatile struct lance_tx_desc *td;
  279. int i, j;
  280. int status;
  281. /* csr0 is 2f3 */
  282. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  283. /* csr0 is 73 */
  284. j = lp->tx_old;
  285. for (i = j; i != lp->tx_new; i = j) {
  286. td = &ib->btx_ring[i];
  287. /* If we hit a packet not owned by us, stop */
  288. if (td->tmd1_bits & LE_T1_OWN)
  289. break;
  290. if (td->tmd1_bits & LE_T1_ERR) {
  291. status = td->misc;
  292. dev->stats.tx_errors++;
  293. if (status & LE_T3_RTY)
  294. dev->stats.tx_aborted_errors++;
  295. if (status & LE_T3_LCOL)
  296. dev->stats.tx_window_errors++;
  297. if (status & LE_T3_CLOS) {
  298. dev->stats.tx_carrier_errors++;
  299. if (lp->auto_select) {
  300. lp->tpe = 1 - lp->tpe;
  301. netdev_err(dev, "Carrier Lost, trying %s\n",
  302. lp->tpe ? "TPE" : "AUI");
  303. /* Stop the lance */
  304. ll->rap = LE_CSR0;
  305. ll->rdp = LE_C0_STOP;
  306. lance_init_ring(dev);
  307. load_csrs(lp);
  308. init_restart_lance(lp);
  309. return 0;
  310. }
  311. }
  312. /* buffer errors and underflows turn off
  313. * the transmitter, so restart the adapter
  314. */
  315. if (status & (LE_T3_BUF | LE_T3_UFL)) {
  316. dev->stats.tx_fifo_errors++;
  317. netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
  318. /* Stop the lance */
  319. ll->rap = LE_CSR0;
  320. ll->rdp = LE_C0_STOP;
  321. lance_init_ring(dev);
  322. load_csrs(lp);
  323. init_restart_lance(lp);
  324. return 0;
  325. }
  326. } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
  327. /* So we don't count the packet more than once. */
  328. td->tmd1_bits &= ~(LE_T1_POK);
  329. /* One collision before packet was sent. */
  330. if (td->tmd1_bits & LE_T1_EONE)
  331. dev->stats.collisions++;
  332. /* More than one collision, be optimistic. */
  333. if (td->tmd1_bits & LE_T1_EMORE)
  334. dev->stats.collisions += 2;
  335. dev->stats.tx_packets++;
  336. }
  337. j = (j + 1) & lp->tx_ring_mod_mask;
  338. }
  339. lp->tx_old = j;
  340. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  341. return 0;
  342. }
  343. static int lance_tx_buffs_avail(struct lance_private *lp)
  344. {
  345. if (lp->tx_old <= lp->tx_new)
  346. return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
  347. return lp->tx_old - lp->tx_new - 1;
  348. }
  349. static irqreturn_t lance_interrupt(int irq, void *dev_id)
  350. {
  351. struct net_device *dev = dev_id;
  352. struct lance_private *lp = netdev_priv(dev);
  353. volatile struct lance_regs *ll = lp->ll;
  354. int csr0;
  355. ll->rap = LE_CSR0; /* LANCE Controller Status */
  356. csr0 = ll->rdp;
  357. if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
  358. return IRQ_NONE; /* been generated by the Lance. */
  359. /* Acknowledge all the interrupt sources ASAP */
  360. ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
  361. LE_C0_INIT);
  362. if (csr0 & LE_C0_ERR) {
  363. /* Clear the error condition */
  364. ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
  365. }
  366. if (csr0 & LE_C0_RINT)
  367. lance_rx(dev);
  368. if (csr0 & LE_C0_TINT)
  369. lance_tx(dev);
  370. /* Log misc errors. */
  371. if (csr0 & LE_C0_BABL)
  372. dev->stats.tx_errors++; /* Tx babble. */
  373. if (csr0 & LE_C0_MISS)
  374. dev->stats.rx_errors++; /* Missed a Rx frame. */
  375. if (csr0 & LE_C0_MERR) {
  376. netdev_err(dev, "Bus master arbitration failure, status %04x\n",
  377. csr0);
  378. /* Restart the chip. */
  379. ll->rdp = LE_C0_STRT;
  380. }
  381. if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
  382. netif_wake_queue(dev);
  383. ll->rap = LE_CSR0;
  384. ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
  385. LE_C0_IDON | LE_C0_INEA);
  386. return IRQ_HANDLED;
  387. }
  388. static int lance_open(struct net_device *dev)
  389. {
  390. struct lance_private *lp = netdev_priv(dev);
  391. volatile struct lance_regs *ll = lp->ll;
  392. int ret;
  393. /* Stop the Lance */
  394. ll->rap = LE_CSR0;
  395. ll->rdp = LE_C0_STOP;
  396. /* Install the Interrupt handler */
  397. ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
  398. dev->name, dev);
  399. if (ret)
  400. return ret;
  401. load_csrs(lp);
  402. lance_init_ring(dev);
  403. netif_start_queue(dev);
  404. return init_restart_lance(lp);
  405. }
  406. static int lance_close(struct net_device *dev)
  407. {
  408. struct lance_private *lp = netdev_priv(dev);
  409. volatile struct lance_regs *ll = lp->ll;
  410. netif_stop_queue(dev);
  411. del_timer_sync(&lp->multicast_timer);
  412. /* Stop the card */
  413. ll->rap = LE_CSR0;
  414. ll->rdp = LE_C0_STOP;
  415. free_irq(IRQ_AMIGA_PORTS, dev);
  416. return 0;
  417. }
  418. static inline int lance_reset(struct net_device *dev)
  419. {
  420. struct lance_private *lp = netdev_priv(dev);
  421. volatile struct lance_regs *ll = lp->ll;
  422. int status;
  423. /* Stop the lance */
  424. ll->rap = LE_CSR0;
  425. ll->rdp = LE_C0_STOP;
  426. load_csrs(lp);
  427. lance_init_ring(dev);
  428. netif_trans_update(dev); /* prevent tx timeout */
  429. netif_start_queue(dev);
  430. status = init_restart_lance(lp);
  431. netdev_dbg(dev, "Lance restart=%d\n", status);
  432. return status;
  433. }
  434. static void lance_tx_timeout(struct net_device *dev)
  435. {
  436. struct lance_private *lp = netdev_priv(dev);
  437. volatile struct lance_regs *ll = lp->ll;
  438. netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
  439. lance_reset(dev);
  440. netif_wake_queue(dev);
  441. }
  442. static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
  443. struct net_device *dev)
  444. {
  445. struct lance_private *lp = netdev_priv(dev);
  446. volatile struct lance_regs *ll = lp->ll;
  447. volatile struct lance_init_block *ib = lp->init_block;
  448. int entry, skblen;
  449. int status = NETDEV_TX_OK;
  450. unsigned long flags;
  451. if (skb_padto(skb, ETH_ZLEN))
  452. return NETDEV_TX_OK;
  453. skblen = max_t(unsigned, skb->len, ETH_ZLEN);
  454. local_irq_save(flags);
  455. if (!lance_tx_buffs_avail(lp))
  456. goto out_free;
  457. #ifdef DEBUG
  458. /* dump the packet */
  459. print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
  460. 16, 1, skb->data, 64, true);
  461. #endif
  462. entry = lp->tx_new & lp->tx_ring_mod_mask;
  463. ib->btx_ring[entry].length = (-skblen) | 0xf000;
  464. ib->btx_ring[entry].misc = 0;
  465. skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
  466. /* Now, give the packet to the lance */
  467. ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
  468. lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
  469. dev->stats.tx_bytes += skblen;
  470. if (lance_tx_buffs_avail(lp) <= 0)
  471. netif_stop_queue(dev);
  472. /* Kick the lance: transmit now */
  473. ll->rdp = LE_C0_INEA | LE_C0_TDMD;
  474. out_free:
  475. dev_kfree_skb(skb);
  476. local_irq_restore(flags);
  477. return status;
  478. }
  479. /* taken from the depca driver */
  480. static void lance_load_multicast(struct net_device *dev)
  481. {
  482. struct lance_private *lp = netdev_priv(dev);
  483. volatile struct lance_init_block *ib = lp->init_block;
  484. volatile u16 *mcast_table = (u16 *)&ib->filter;
  485. struct netdev_hw_addr *ha;
  486. u32 crc;
  487. /* set all multicast bits */
  488. if (dev->flags & IFF_ALLMULTI) {
  489. ib->filter[0] = 0xffffffff;
  490. ib->filter[1] = 0xffffffff;
  491. return;
  492. }
  493. /* clear the multicast filter */
  494. ib->filter[0] = 0;
  495. ib->filter[1] = 0;
  496. /* Add addresses */
  497. netdev_for_each_mc_addr(ha, dev) {
  498. crc = ether_crc_le(6, ha->addr);
  499. crc = crc >> 26;
  500. mcast_table[crc >> 4] |= 1 << (crc & 0xf);
  501. }
  502. }
  503. static void lance_set_multicast(struct net_device *dev)
  504. {
  505. struct lance_private *lp = netdev_priv(dev);
  506. volatile struct lance_init_block *ib = lp->init_block;
  507. volatile struct lance_regs *ll = lp->ll;
  508. if (!netif_running(dev))
  509. return;
  510. if (lp->tx_old != lp->tx_new) {
  511. mod_timer(&lp->multicast_timer, jiffies + 4);
  512. netif_wake_queue(dev);
  513. return;
  514. }
  515. netif_stop_queue(dev);
  516. ll->rap = LE_CSR0;
  517. ll->rdp = LE_C0_STOP;
  518. lance_init_ring(dev);
  519. if (dev->flags & IFF_PROMISC) {
  520. ib->mode |= LE_MO_PROM;
  521. } else {
  522. ib->mode &= ~LE_MO_PROM;
  523. lance_load_multicast(dev);
  524. }
  525. load_csrs(lp);
  526. init_restart_lance(lp);
  527. netif_wake_queue(dev);
  528. }
  529. static void lance_set_multicast_retry(struct timer_list *t)
  530. {
  531. struct lance_private *lp = from_timer(lp, t, multicast_timer);
  532. lance_set_multicast(lp->dev);
  533. }
  534. static int a2065_init_one(struct zorro_dev *z,
  535. const struct zorro_device_id *ent);
  536. static void a2065_remove_one(struct zorro_dev *z);
  537. static const struct zorro_device_id a2065_zorro_tbl[] = {
  538. { ZORRO_PROD_CBM_A2065_1 },
  539. { ZORRO_PROD_CBM_A2065_2 },
  540. { ZORRO_PROD_AMERISTAR_A2065 },
  541. { 0 }
  542. };
  543. MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
  544. static struct zorro_driver a2065_driver = {
  545. .name = "a2065",
  546. .id_table = a2065_zorro_tbl,
  547. .probe = a2065_init_one,
  548. .remove = a2065_remove_one,
  549. };
  550. static const struct net_device_ops lance_netdev_ops = {
  551. .ndo_open = lance_open,
  552. .ndo_stop = lance_close,
  553. .ndo_start_xmit = lance_start_xmit,
  554. .ndo_tx_timeout = lance_tx_timeout,
  555. .ndo_set_rx_mode = lance_set_multicast,
  556. .ndo_validate_addr = eth_validate_addr,
  557. .ndo_set_mac_address = eth_mac_addr,
  558. };
  559. static int a2065_init_one(struct zorro_dev *z,
  560. const struct zorro_device_id *ent)
  561. {
  562. struct net_device *dev;
  563. struct lance_private *priv;
  564. unsigned long board = z->resource.start;
  565. unsigned long base_addr = board + A2065_LANCE;
  566. unsigned long mem_start = board + A2065_RAM;
  567. struct resource *r1, *r2;
  568. u32 serial;
  569. int err;
  570. r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
  571. "Am7990");
  572. if (!r1)
  573. return -EBUSY;
  574. r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
  575. if (!r2) {
  576. release_mem_region(base_addr, sizeof(struct lance_regs));
  577. return -EBUSY;
  578. }
  579. dev = alloc_etherdev(sizeof(struct lance_private));
  580. if (dev == NULL) {
  581. release_mem_region(base_addr, sizeof(struct lance_regs));
  582. release_mem_region(mem_start, A2065_RAM_SIZE);
  583. return -ENOMEM;
  584. }
  585. priv = netdev_priv(dev);
  586. r1->name = dev->name;
  587. r2->name = dev->name;
  588. serial = be32_to_cpu(z->rom.er_SerialNumber);
  589. dev->dev_addr[0] = 0x00;
  590. if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
  591. dev->dev_addr[1] = 0x80;
  592. dev->dev_addr[2] = 0x10;
  593. } else { /* Ameristar */
  594. dev->dev_addr[1] = 0x00;
  595. dev->dev_addr[2] = 0x9f;
  596. }
  597. dev->dev_addr[3] = (serial >> 16) & 0xff;
  598. dev->dev_addr[4] = (serial >> 8) & 0xff;
  599. dev->dev_addr[5] = serial & 0xff;
  600. dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
  601. dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
  602. dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
  603. priv->ll = (volatile struct lance_regs *)dev->base_addr;
  604. priv->init_block = (struct lance_init_block *)dev->mem_start;
  605. priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
  606. priv->auto_select = 0;
  607. priv->busmaster_regval = LE_C3_BSWP;
  608. priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
  609. priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
  610. priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
  611. priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
  612. priv->dev = dev;
  613. dev->netdev_ops = &lance_netdev_ops;
  614. dev->watchdog_timeo = 5*HZ;
  615. dev->dma = 0;
  616. timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0);
  617. err = register_netdev(dev);
  618. if (err) {
  619. release_mem_region(base_addr, sizeof(struct lance_regs));
  620. release_mem_region(mem_start, A2065_RAM_SIZE);
  621. free_netdev(dev);
  622. return err;
  623. }
  624. zorro_set_drvdata(z, dev);
  625. netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
  626. board, dev->dev_addr);
  627. return 0;
  628. }
  629. static void a2065_remove_one(struct zorro_dev *z)
  630. {
  631. struct net_device *dev = zorro_get_drvdata(z);
  632. unregister_netdev(dev);
  633. release_mem_region(ZTWO_PADDR(dev->base_addr),
  634. sizeof(struct lance_regs));
  635. release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
  636. free_netdev(dev);
  637. }
  638. static int __init a2065_init_module(void)
  639. {
  640. return zorro_register_driver(&a2065_driver);
  641. }
  642. static void __exit a2065_cleanup_module(void)
  643. {
  644. zorro_unregister_driver(&a2065_driver);
  645. }
  646. module_init(a2065_init_module);
  647. module_exit(a2065_cleanup_module);
  648. MODULE_LICENSE("GPL");