7990.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * 7990.c -- LANCE ethernet IC generic routines.
  4. * This is an attempt to separate out the bits of various ethernet
  5. * drivers that are common because they all use the AMD 7990 LANCE
  6. * (Local Area Network Controller for Ethernet) chip.
  7. *
  8. * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
  9. *
  10. * Most of this stuff was obtained by looking at other LANCE drivers,
  11. * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
  12. * NB: this was made easy by the fact that Jes Sorensen had cleaned up
  13. * most of a2025 and sunlance with the aim of merging them, so the
  14. * common code was pretty obvious.
  15. */
  16. #include <linux/crc32.h>
  17. #include <linux/delay.h>
  18. #include <linux/errno.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ioport.h>
  27. #include <linux/in.h>
  28. #include <linux/route.h>
  29. #include <linux/string.h>
  30. #include <linux/skbuff.h>
  31. #include <asm/irq.h>
  32. /* Used for the temporal inet entries and routing */
  33. #include <linux/socket.h>
  34. #include <linux/bitops.h>
  35. #include <asm/io.h>
  36. #include <asm/dma.h>
  37. #include <asm/pgtable.h>
  38. #ifdef CONFIG_HP300
  39. #include <asm/blinken.h>
  40. #endif
  41. #include "7990.h"
  42. #define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
  43. #define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
  44. #define READRDP(lp) in_be16(lp->base + LANCE_RDP)
  45. #if IS_ENABLED(CONFIG_HPLANCE)
  46. #include "hplance.h"
  47. #undef WRITERAP
  48. #undef WRITERDP
  49. #undef READRDP
  50. #if IS_ENABLED(CONFIG_MVME147_NET)
  51. /* Lossage Factor Nine, Mr Sulu. */
  52. #define WRITERAP(lp, x) (lp->writerap(lp, x))
  53. #define WRITERDP(lp, x) (lp->writerdp(lp, x))
  54. #define READRDP(lp) (lp->readrdp(lp))
  55. #else
  56. /* These inlines can be used if only CONFIG_HPLANCE is defined */
  57. static inline void WRITERAP(struct lance_private *lp, __u16 value)
  58. {
  59. do {
  60. out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
  61. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  62. }
  63. static inline void WRITERDP(struct lance_private *lp, __u16 value)
  64. {
  65. do {
  66. out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
  67. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  68. }
  69. static inline __u16 READRDP(struct lance_private *lp)
  70. {
  71. __u16 value;
  72. do {
  73. value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
  74. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  75. return value;
  76. }
  77. #endif
  78. #endif /* IS_ENABLED(CONFIG_HPLANCE) */
  79. /* debugging output macros, various flavours */
  80. /* #define TEST_HITS */
  81. #ifdef UNDEF
  82. #define PRINT_RINGS() \
  83. do { \
  84. int t; \
  85. for (t = 0; t < RX_RING_SIZE; t++) { \
  86. printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
  87. t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
  88. ib->brx_ring[t].length, \
  89. ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
  90. } \
  91. for (t = 0; t < TX_RING_SIZE; t++) { \
  92. printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
  93. t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
  94. ib->btx_ring[t].length, \
  95. ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
  96. } \
  97. } while (0)
  98. #else
  99. #define PRINT_RINGS()
  100. #endif
  101. /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
  102. static void load_csrs(struct lance_private *lp)
  103. {
  104. volatile struct lance_init_block *aib = lp->lance_init_block;
  105. int leptr;
  106. leptr = LANCE_ADDR(aib);
  107. WRITERAP(lp, LE_CSR1); /* load address of init block */
  108. WRITERDP(lp, leptr & 0xFFFF);
  109. WRITERAP(lp, LE_CSR2);
  110. WRITERDP(lp, leptr >> 16);
  111. WRITERAP(lp, LE_CSR3);
  112. WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
  113. /* Point back to csr0 */
  114. WRITERAP(lp, LE_CSR0);
  115. }
  116. /* #define to 0 or 1 appropriately */
  117. #define DEBUG_IRING 0
  118. /* Set up the Lance Rx and Tx rings and the init block */
  119. static void lance_init_ring(struct net_device *dev)
  120. {
  121. struct lance_private *lp = netdev_priv(dev);
  122. volatile struct lance_init_block *ib = lp->init_block;
  123. volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
  124. int leptr;
  125. int i;
  126. aib = lp->lance_init_block;
  127. lp->rx_new = lp->tx_new = 0;
  128. lp->rx_old = lp->tx_old = 0;
  129. ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
  130. /* Copy the ethernet address to the lance init block
  131. * Notice that we do a byteswap if we're big endian.
  132. * [I think this is the right criterion; at least, sunlance,
  133. * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
  134. * However, the datasheet says that the BSWAP bit doesn't affect
  135. * the init block, so surely it should be low byte first for
  136. * everybody? Um.]
  137. * We could define the ib->physaddr as three 16bit values and
  138. * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
  139. */
  140. #ifdef __BIG_ENDIAN
  141. ib->phys_addr[0] = dev->dev_addr[1];
  142. ib->phys_addr[1] = dev->dev_addr[0];
  143. ib->phys_addr[2] = dev->dev_addr[3];
  144. ib->phys_addr[3] = dev->dev_addr[2];
  145. ib->phys_addr[4] = dev->dev_addr[5];
  146. ib->phys_addr[5] = dev->dev_addr[4];
  147. #else
  148. for (i = 0; i < 6; i++)
  149. ib->phys_addr[i] = dev->dev_addr[i];
  150. #endif
  151. if (DEBUG_IRING)
  152. printk("TX rings:\n");
  153. lp->tx_full = 0;
  154. /* Setup the Tx ring entries */
  155. for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
  156. leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
  157. ib->btx_ring[i].tmd0 = leptr;
  158. ib->btx_ring[i].tmd1_hadr = leptr >> 16;
  159. ib->btx_ring[i].tmd1_bits = 0;
  160. ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
  161. ib->btx_ring[i].misc = 0;
  162. if (DEBUG_IRING)
  163. printk("%d: 0x%8.8x\n", i, leptr);
  164. }
  165. /* Setup the Rx ring entries */
  166. if (DEBUG_IRING)
  167. printk("RX rings:\n");
  168. for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
  169. leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
  170. ib->brx_ring[i].rmd0 = leptr;
  171. ib->brx_ring[i].rmd1_hadr = leptr >> 16;
  172. ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
  173. /* 0xf000 == bits that must be one (reserved, presumably) */
  174. ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
  175. ib->brx_ring[i].mblength = 0;
  176. if (DEBUG_IRING)
  177. printk("%d: 0x%8.8x\n", i, leptr);
  178. }
  179. /* Setup the initialization block */
  180. /* Setup rx descriptor pointer */
  181. leptr = LANCE_ADDR(&aib->brx_ring);
  182. ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
  183. ib->rx_ptr = leptr;
  184. if (DEBUG_IRING)
  185. printk("RX ptr: %8.8x\n", leptr);
  186. /* Setup tx descriptor pointer */
  187. leptr = LANCE_ADDR(&aib->btx_ring);
  188. ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
  189. ib->tx_ptr = leptr;
  190. if (DEBUG_IRING)
  191. printk("TX ptr: %8.8x\n", leptr);
  192. /* Clear the multicast filter */
  193. ib->filter[0] = 0;
  194. ib->filter[1] = 0;
  195. PRINT_RINGS();
  196. }
  197. /* LANCE must be STOPped before we do this, too... */
  198. static int init_restart_lance(struct lance_private *lp)
  199. {
  200. int i;
  201. WRITERAP(lp, LE_CSR0);
  202. WRITERDP(lp, LE_C0_INIT);
  203. /* Need a hook here for sunlance ledma stuff */
  204. /* Wait for the lance to complete initialization */
  205. for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
  206. barrier();
  207. if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
  208. printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
  209. return -1;
  210. }
  211. /* Clear IDON by writing a "1", enable interrupts and start lance */
  212. WRITERDP(lp, LE_C0_IDON);
  213. WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
  214. return 0;
  215. }
  216. static int lance_reset(struct net_device *dev)
  217. {
  218. struct lance_private *lp = netdev_priv(dev);
  219. int status;
  220. /* Stop the lance */
  221. WRITERAP(lp, LE_CSR0);
  222. WRITERDP(lp, LE_C0_STOP);
  223. load_csrs(lp);
  224. lance_init_ring(dev);
  225. netif_trans_update(dev); /* prevent tx timeout */
  226. status = init_restart_lance(lp);
  227. #ifdef DEBUG_DRIVER
  228. printk("Lance restart=%d\n", status);
  229. #endif
  230. return status;
  231. }
  232. static int lance_rx(struct net_device *dev)
  233. {
  234. struct lance_private *lp = netdev_priv(dev);
  235. volatile struct lance_init_block *ib = lp->init_block;
  236. volatile struct lance_rx_desc *rd;
  237. unsigned char bits;
  238. #ifdef TEST_HITS
  239. int i;
  240. #endif
  241. #ifdef TEST_HITS
  242. printk("[");
  243. for (i = 0; i < RX_RING_SIZE; i++) {
  244. if (i == lp->rx_new)
  245. printk("%s",
  246. ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
  247. else
  248. printk("%s",
  249. ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
  250. }
  251. printk("]");
  252. #endif
  253. #ifdef CONFIG_HP300
  254. blinken_leds(0x40, 0);
  255. #endif
  256. WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
  257. for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
  258. !((bits = rd->rmd1_bits) & LE_R1_OWN);
  259. rd = &ib->brx_ring[lp->rx_new]) {
  260. /* We got an incomplete frame? */
  261. if ((bits & LE_R1_POK) != LE_R1_POK) {
  262. dev->stats.rx_over_errors++;
  263. dev->stats.rx_errors++;
  264. continue;
  265. } else if (bits & LE_R1_ERR) {
  266. /* Count only the end frame as a rx error,
  267. * not the beginning
  268. */
  269. if (bits & LE_R1_BUF)
  270. dev->stats.rx_fifo_errors++;
  271. if (bits & LE_R1_CRC)
  272. dev->stats.rx_crc_errors++;
  273. if (bits & LE_R1_OFL)
  274. dev->stats.rx_over_errors++;
  275. if (bits & LE_R1_FRA)
  276. dev->stats.rx_frame_errors++;
  277. if (bits & LE_R1_EOP)
  278. dev->stats.rx_errors++;
  279. } else {
  280. int len = (rd->mblength & 0xfff) - 4;
  281. struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
  282. if (!skb) {
  283. dev->stats.rx_dropped++;
  284. rd->mblength = 0;
  285. rd->rmd1_bits = LE_R1_OWN;
  286. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  287. return 0;
  288. }
  289. skb_reserve(skb, 2); /* 16 byte align */
  290. skb_put(skb, len); /* make room */
  291. skb_copy_to_linear_data(skb,
  292. (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
  293. len);
  294. skb->protocol = eth_type_trans(skb, dev);
  295. netif_rx(skb);
  296. dev->stats.rx_packets++;
  297. dev->stats.rx_bytes += len;
  298. }
  299. /* Return the packet to the pool */
  300. rd->mblength = 0;
  301. rd->rmd1_bits = LE_R1_OWN;
  302. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  303. }
  304. return 0;
  305. }
  306. static int lance_tx(struct net_device *dev)
  307. {
  308. struct lance_private *lp = netdev_priv(dev);
  309. volatile struct lance_init_block *ib = lp->init_block;
  310. volatile struct lance_tx_desc *td;
  311. int i, j;
  312. int status;
  313. #ifdef CONFIG_HP300
  314. blinken_leds(0x80, 0);
  315. #endif
  316. /* csr0 is 2f3 */
  317. WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
  318. /* csr0 is 73 */
  319. j = lp->tx_old;
  320. for (i = j; i != lp->tx_new; i = j) {
  321. td = &ib->btx_ring[i];
  322. /* If we hit a packet not owned by us, stop */
  323. if (td->tmd1_bits & LE_T1_OWN)
  324. break;
  325. if (td->tmd1_bits & LE_T1_ERR) {
  326. status = td->misc;
  327. dev->stats.tx_errors++;
  328. if (status & LE_T3_RTY)
  329. dev->stats.tx_aborted_errors++;
  330. if (status & LE_T3_LCOL)
  331. dev->stats.tx_window_errors++;
  332. if (status & LE_T3_CLOS) {
  333. dev->stats.tx_carrier_errors++;
  334. if (lp->auto_select) {
  335. lp->tpe = 1 - lp->tpe;
  336. printk("%s: Carrier Lost, trying %s\n",
  337. dev->name,
  338. lp->tpe ? "TPE" : "AUI");
  339. /* Stop the lance */
  340. WRITERAP(lp, LE_CSR0);
  341. WRITERDP(lp, LE_C0_STOP);
  342. lance_init_ring(dev);
  343. load_csrs(lp);
  344. init_restart_lance(lp);
  345. return 0;
  346. }
  347. }
  348. /* buffer errors and underflows turn off the transmitter */
  349. /* Restart the adapter */
  350. if (status & (LE_T3_BUF|LE_T3_UFL)) {
  351. dev->stats.tx_fifo_errors++;
  352. printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
  353. dev->name);
  354. /* Stop the lance */
  355. WRITERAP(lp, LE_CSR0);
  356. WRITERDP(lp, LE_C0_STOP);
  357. lance_init_ring(dev);
  358. load_csrs(lp);
  359. init_restart_lance(lp);
  360. return 0;
  361. }
  362. } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
  363. /*
  364. * So we don't count the packet more than once.
  365. */
  366. td->tmd1_bits &= ~(LE_T1_POK);
  367. /* One collision before packet was sent. */
  368. if (td->tmd1_bits & LE_T1_EONE)
  369. dev->stats.collisions++;
  370. /* More than one collision, be optimistic. */
  371. if (td->tmd1_bits & LE_T1_EMORE)
  372. dev->stats.collisions += 2;
  373. dev->stats.tx_packets++;
  374. }
  375. j = (j + 1) & lp->tx_ring_mod_mask;
  376. }
  377. lp->tx_old = j;
  378. WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
  379. return 0;
  380. }
  381. static irqreturn_t
  382. lance_interrupt(int irq, void *dev_id)
  383. {
  384. struct net_device *dev = (struct net_device *)dev_id;
  385. struct lance_private *lp = netdev_priv(dev);
  386. int csr0;
  387. spin_lock(&lp->devlock);
  388. WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
  389. csr0 = READRDP(lp);
  390. PRINT_RINGS();
  391. if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
  392. spin_unlock(&lp->devlock);
  393. return IRQ_NONE; /* been generated by the Lance. */
  394. }
  395. /* Acknowledge all the interrupt sources ASAP */
  396. WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
  397. if ((csr0 & LE_C0_ERR)) {
  398. /* Clear the error condition */
  399. WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
  400. }
  401. if (csr0 & LE_C0_RINT)
  402. lance_rx(dev);
  403. if (csr0 & LE_C0_TINT)
  404. lance_tx(dev);
  405. /* Log misc errors. */
  406. if (csr0 & LE_C0_BABL)
  407. dev->stats.tx_errors++; /* Tx babble. */
  408. if (csr0 & LE_C0_MISS)
  409. dev->stats.rx_errors++; /* Missed a Rx frame. */
  410. if (csr0 & LE_C0_MERR) {
  411. printk("%s: Bus master arbitration failure, status %4.4x.\n",
  412. dev->name, csr0);
  413. /* Restart the chip. */
  414. WRITERDP(lp, LE_C0_STRT);
  415. }
  416. if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
  417. lp->tx_full = 0;
  418. netif_wake_queue(dev);
  419. }
  420. WRITERAP(lp, LE_CSR0);
  421. WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
  422. spin_unlock(&lp->devlock);
  423. return IRQ_HANDLED;
  424. }
  425. int lance_open(struct net_device *dev)
  426. {
  427. struct lance_private *lp = netdev_priv(dev);
  428. int res;
  429. /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
  430. if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
  431. return -EAGAIN;
  432. res = lance_reset(dev);
  433. spin_lock_init(&lp->devlock);
  434. netif_start_queue(dev);
  435. return res;
  436. }
  437. EXPORT_SYMBOL_GPL(lance_open);
  438. int lance_close(struct net_device *dev)
  439. {
  440. struct lance_private *lp = netdev_priv(dev);
  441. netif_stop_queue(dev);
  442. /* Stop the LANCE */
  443. WRITERAP(lp, LE_CSR0);
  444. WRITERDP(lp, LE_C0_STOP);
  445. free_irq(lp->irq, dev);
  446. return 0;
  447. }
  448. EXPORT_SYMBOL_GPL(lance_close);
  449. void lance_tx_timeout(struct net_device *dev)
  450. {
  451. printk("lance_tx_timeout\n");
  452. lance_reset(dev);
  453. netif_trans_update(dev); /* prevent tx timeout */
  454. netif_wake_queue(dev);
  455. }
  456. EXPORT_SYMBOL_GPL(lance_tx_timeout);
  457. int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
  458. {
  459. struct lance_private *lp = netdev_priv(dev);
  460. volatile struct lance_init_block *ib = lp->init_block;
  461. int entry, skblen, len;
  462. static int outs;
  463. unsigned long flags;
  464. netif_stop_queue(dev);
  465. if (!TX_BUFFS_AVAIL) {
  466. dev_consume_skb_any(skb);
  467. return NETDEV_TX_OK;
  468. }
  469. skblen = skb->len;
  470. #ifdef DEBUG_DRIVER
  471. /* dump the packet */
  472. {
  473. int i;
  474. for (i = 0; i < 64; i++) {
  475. if ((i % 16) == 0)
  476. printk("\n");
  477. printk("%2.2x ", skb->data[i]);
  478. }
  479. }
  480. #endif
  481. len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
  482. entry = lp->tx_new & lp->tx_ring_mod_mask;
  483. ib->btx_ring[entry].length = (-len) | 0xf000;
  484. ib->btx_ring[entry].misc = 0;
  485. if (skb->len < ETH_ZLEN)
  486. memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
  487. skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
  488. /* Now, give the packet to the lance */
  489. ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
  490. lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
  491. outs++;
  492. /* Kick the lance: transmit now */
  493. WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
  494. dev_consume_skb_any(skb);
  495. spin_lock_irqsave(&lp->devlock, flags);
  496. if (TX_BUFFS_AVAIL)
  497. netif_start_queue(dev);
  498. else
  499. lp->tx_full = 1;
  500. spin_unlock_irqrestore(&lp->devlock, flags);
  501. return NETDEV_TX_OK;
  502. }
  503. EXPORT_SYMBOL_GPL(lance_start_xmit);
  504. /* taken from the depca driver via a2065.c */
  505. static void lance_load_multicast(struct net_device *dev)
  506. {
  507. struct lance_private *lp = netdev_priv(dev);
  508. volatile struct lance_init_block *ib = lp->init_block;
  509. volatile u16 *mcast_table = (u16 *)&ib->filter;
  510. struct netdev_hw_addr *ha;
  511. u32 crc;
  512. /* set all multicast bits */
  513. if (dev->flags & IFF_ALLMULTI) {
  514. ib->filter[0] = 0xffffffff;
  515. ib->filter[1] = 0xffffffff;
  516. return;
  517. }
  518. /* clear the multicast filter */
  519. ib->filter[0] = 0;
  520. ib->filter[1] = 0;
  521. /* Add addresses */
  522. netdev_for_each_mc_addr(ha, dev) {
  523. crc = ether_crc_le(6, ha->addr);
  524. crc = crc >> 26;
  525. mcast_table[crc >> 4] |= 1 << (crc & 0xf);
  526. }
  527. }
  528. void lance_set_multicast(struct net_device *dev)
  529. {
  530. struct lance_private *lp = netdev_priv(dev);
  531. volatile struct lance_init_block *ib = lp->init_block;
  532. int stopped;
  533. stopped = netif_queue_stopped(dev);
  534. if (!stopped)
  535. netif_stop_queue(dev);
  536. while (lp->tx_old != lp->tx_new)
  537. schedule();
  538. WRITERAP(lp, LE_CSR0);
  539. WRITERDP(lp, LE_C0_STOP);
  540. lance_init_ring(dev);
  541. if (dev->flags & IFF_PROMISC) {
  542. ib->mode |= LE_MO_PROM;
  543. } else {
  544. ib->mode &= ~LE_MO_PROM;
  545. lance_load_multicast(dev);
  546. }
  547. load_csrs(lp);
  548. init_restart_lance(lp);
  549. if (!stopped)
  550. netif_start_queue(dev);
  551. }
  552. EXPORT_SYMBOL_GPL(lance_set_multicast);
  553. #ifdef CONFIG_NET_POLL_CONTROLLER
  554. void lance_poll(struct net_device *dev)
  555. {
  556. struct lance_private *lp = netdev_priv(dev);
  557. spin_lock(&lp->devlock);
  558. WRITERAP(lp, LE_CSR0);
  559. WRITERDP(lp, LE_C0_STRT);
  560. spin_unlock(&lp->devlock);
  561. lance_interrupt(dev->irq, dev);
  562. }
  563. EXPORT_SYMBOL_GPL(lance_poll);
  564. #endif
  565. MODULE_LICENSE("GPL");