sbni.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624
  1. /* sbni.c: Granch SBNI12 leased line adapters driver for linux
  2. *
  3. * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
  4. *
  5. * Previous versions were written by Yaroslav Polyakov,
  6. * Alexey Zverev and Max Khon.
  7. *
  8. * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
  9. * double-channel, PCI and ISA modifications.
  10. * More info and useful utilities to work with SBNI12 cards you can find
  11. * at http://www.granch.com (English) or http://www.granch.ru (Russian)
  12. *
  13. * This software may be used and distributed according to the terms
  14. * of the GNU General Public License.
  15. *
  16. *
  17. * 5.0.1 Jun 22 2001
  18. * - Fixed bug in probe
  19. * 5.0.0 Jun 06 2001
  20. * - Driver was completely redesigned by Denis I.Timofeev,
  21. * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
  22. * - supported
  23. * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
  24. * - PCI cards support
  25. * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
  26. * - Completely rebuilt all the packet storage system
  27. * - to work in Ethernet-like style.
  28. * 3.1.1 just fixed some bugs (5 aug 1999)
  29. * 3.1.0 added balancing feature (26 apr 1999)
  30. * 3.0.1 just fixed some bugs (14 apr 1999).
  31. * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
  32. * - added pre-calculation for CRC, fixed bug with "len-2" frames,
  33. * - removed outbound fragmentation (MTU=1000), written CRC-calculation
  34. * - on asm, added work with hard_headers and now we have our own cache
  35. * - for them, optionally supported word-interchange on some chipsets,
  36. *
  37. * Known problem: this driver wasn't tested on multiprocessor machine.
  38. */
  39. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40. #include <linux/module.h>
  41. #include <linux/kernel.h>
  42. #include <linux/ptrace.h>
  43. #include <linux/fcntl.h>
  44. #include <linux/ioport.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/string.h>
  47. #include <linux/errno.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include <linux/pci.h>
  51. #include <linux/skbuff.h>
  52. #include <linux/timer.h>
  53. #include <linux/init.h>
  54. #include <linux/delay.h>
  55. #include <net/net_namespace.h>
  56. #include <net/arp.h>
  57. #include <net/Space.h>
  58. #include <asm/io.h>
  59. #include <asm/types.h>
  60. #include <asm/byteorder.h>
  61. #include <asm/irq.h>
  62. #include <linux/uaccess.h>
  63. #include "sbni.h"
  64. /* device private data */
  65. struct net_local {
  66. struct timer_list watchdog;
  67. struct net_device *watchdog_dev;
  68. spinlock_t lock;
  69. struct sk_buff *rx_buf_p; /* receive buffer ptr */
  70. struct sk_buff *tx_buf_p; /* transmit buffer ptr */
  71. unsigned int framelen; /* current frame length */
  72. unsigned int maxframe; /* maximum valid frame length */
  73. unsigned int state;
  74. unsigned int inppos, outpos; /* positions in rx/tx buffers */
  75. /* transmitting frame number - from frames qty to 1 */
  76. unsigned int tx_frameno;
  77. /* expected number of next receiving frame */
  78. unsigned int wait_frameno;
  79. /* count of failed attempts to frame send - 32 attempts do before
  80. error - while receiver tunes on opposite side of wire */
  81. unsigned int trans_errors;
  82. /* idle time; send pong when limit exceeded */
  83. unsigned int timer_ticks;
  84. /* fields used for receive level autoselection */
  85. int delta_rxl;
  86. unsigned int cur_rxl_index, timeout_rxl;
  87. unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
  88. struct sbni_csr1 csr1; /* current value of CSR1 */
  89. struct sbni_in_stats in_stats; /* internal statistics */
  90. struct net_device *second; /* for ISA/dual cards */
  91. #ifdef CONFIG_SBNI_MULTILINE
  92. struct net_device *master;
  93. struct net_device *link;
  94. #endif
  95. };
  96. static int sbni_card_probe( unsigned long );
  97. static int sbni_pci_probe( struct net_device * );
  98. static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
  99. static int sbni_open( struct net_device * );
  100. static int sbni_close( struct net_device * );
  101. static netdev_tx_t sbni_start_xmit(struct sk_buff *,
  102. struct net_device * );
  103. static int sbni_ioctl( struct net_device *, struct ifreq *, int );
  104. static void set_multicast_list( struct net_device * );
  105. static irqreturn_t sbni_interrupt( int, void * );
  106. static void handle_channel( struct net_device * );
  107. static int recv_frame( struct net_device * );
  108. static void send_frame( struct net_device * );
  109. static int upload_data( struct net_device *,
  110. unsigned, unsigned, unsigned, u32 );
  111. static void download_data( struct net_device *, u32 * );
  112. static void sbni_watchdog(struct timer_list *);
  113. static void interpret_ack( struct net_device *, unsigned );
  114. static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
  115. static void indicate_pkt( struct net_device * );
  116. static void card_start( struct net_device * );
  117. static void prepare_to_send( struct sk_buff *, struct net_device * );
  118. static void drop_xmit_queue( struct net_device * );
  119. static void send_frame_header( struct net_device *, u32 * );
  120. static int skip_tail( unsigned int, unsigned int, u32 );
  121. static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
  122. static void change_level( struct net_device * );
  123. static void timeout_change_level( struct net_device * );
  124. static u32 calc_crc32( u32, u8 *, u32 );
  125. static struct sk_buff * get_rx_buf( struct net_device * );
  126. static int sbni_init( struct net_device * );
  127. #ifdef CONFIG_SBNI_MULTILINE
  128. static int enslave( struct net_device *, struct net_device * );
  129. static int emancipate( struct net_device * );
  130. #endif
  131. static const char version[] =
  132. "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
  133. static bool skip_pci_probe __initdata = false;
  134. static int scandone __initdata = 0;
  135. static int num __initdata = 0;
  136. static unsigned char rxl_tab[];
  137. static u32 crc32tab[];
  138. /* A list of all installed devices, for removing the driver module. */
  139. static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
  140. /* Lists of device's parameters */
  141. static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
  142. { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
  143. static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
  144. static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
  145. static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
  146. { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
  147. static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
  148. #ifndef MODULE
  149. typedef u32 iarr[];
  150. static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
  151. #endif
  152. /* A zero-terminated list of I/O addresses to be probed on ISA bus */
  153. static unsigned int netcard_portlist[ ] __initdata = {
  154. 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
  155. 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
  156. 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
  157. 0 };
  158. #define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
  159. /*
  160. * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
  161. * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
  162. */
  163. static inline int __init
  164. sbni_isa_probe( struct net_device *dev )
  165. {
  166. if( dev->base_addr > 0x1ff &&
  167. request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
  168. sbni_probe1( dev, dev->base_addr, dev->irq ) )
  169. return 0;
  170. else {
  171. pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
  172. dev->base_addr);
  173. return -ENODEV;
  174. }
  175. }
  176. static const struct net_device_ops sbni_netdev_ops = {
  177. .ndo_open = sbni_open,
  178. .ndo_stop = sbni_close,
  179. .ndo_start_xmit = sbni_start_xmit,
  180. .ndo_set_rx_mode = set_multicast_list,
  181. .ndo_do_ioctl = sbni_ioctl,
  182. .ndo_set_mac_address = eth_mac_addr,
  183. .ndo_validate_addr = eth_validate_addr,
  184. };
  185. static void __init sbni_devsetup(struct net_device *dev)
  186. {
  187. ether_setup( dev );
  188. dev->netdev_ops = &sbni_netdev_ops;
  189. }
  190. int __init sbni_probe(int unit)
  191. {
  192. struct net_device *dev;
  193. int err;
  194. dev = alloc_netdev(sizeof(struct net_local), "sbni",
  195. NET_NAME_UNKNOWN, sbni_devsetup);
  196. if (!dev)
  197. return -ENOMEM;
  198. dev->netdev_ops = &sbni_netdev_ops;
  199. sprintf(dev->name, "sbni%d", unit);
  200. netdev_boot_setup_check(dev);
  201. err = sbni_init(dev);
  202. if (err) {
  203. free_netdev(dev);
  204. return err;
  205. }
  206. err = register_netdev(dev);
  207. if (err) {
  208. release_region( dev->base_addr, SBNI_IO_EXTENT );
  209. free_netdev(dev);
  210. return err;
  211. }
  212. pr_info_once("%s", version);
  213. return 0;
  214. }
  215. static int __init sbni_init(struct net_device *dev)
  216. {
  217. int i;
  218. if( dev->base_addr )
  219. return sbni_isa_probe( dev );
  220. /* otherwise we have to perform search our adapter */
  221. if( io[ num ] != -1 )
  222. dev->base_addr = io[ num ],
  223. dev->irq = irq[ num ];
  224. else if( scandone || io[ 0 ] != -1 )
  225. return -ENODEV;
  226. /* if io[ num ] contains non-zero address, then that is on ISA bus */
  227. if( dev->base_addr )
  228. return sbni_isa_probe( dev );
  229. /* ...otherwise - scan PCI first */
  230. if( !skip_pci_probe && !sbni_pci_probe( dev ) )
  231. return 0;
  232. if( io[ num ] == -1 ) {
  233. /* Auto-scan will be stopped when first ISA card were found */
  234. scandone = 1;
  235. if( num > 0 )
  236. return -ENODEV;
  237. }
  238. for( i = 0; netcard_portlist[ i ]; ++i ) {
  239. int ioaddr = netcard_portlist[ i ];
  240. if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
  241. sbni_probe1( dev, ioaddr, 0 ))
  242. return 0;
  243. }
  244. return -ENODEV;
  245. }
  246. static int __init
  247. sbni_pci_probe( struct net_device *dev )
  248. {
  249. struct pci_dev *pdev = NULL;
  250. while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
  251. != NULL ) {
  252. int pci_irq_line;
  253. unsigned long pci_ioaddr;
  254. if( pdev->vendor != SBNI_PCI_VENDOR &&
  255. pdev->device != SBNI_PCI_DEVICE )
  256. continue;
  257. pci_ioaddr = pci_resource_start( pdev, 0 );
  258. pci_irq_line = pdev->irq;
  259. /* Avoid already found cards from previous calls */
  260. if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
  261. if (pdev->subsystem_device != 2)
  262. continue;
  263. /* Dual adapter is present */
  264. if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
  265. dev->name ) )
  266. continue;
  267. }
  268. if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
  269. pr_warn(
  270. "WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
  271. "You should use the PCI BIOS setup to assign a valid IRQ line.\n",
  272. pci_irq_line );
  273. /* avoiding re-enable dual adapters */
  274. if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
  275. release_region( pci_ioaddr, SBNI_IO_EXTENT );
  276. pci_dev_put( pdev );
  277. return -EIO;
  278. }
  279. if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
  280. SET_NETDEV_DEV(dev, &pdev->dev);
  281. /* not the best thing to do, but this is all messed up
  282. for hotplug systems anyway... */
  283. pci_dev_put( pdev );
  284. return 0;
  285. }
  286. }
  287. return -ENODEV;
  288. }
  289. static struct net_device * __init
  290. sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
  291. {
  292. struct net_local *nl;
  293. if( sbni_card_probe( ioaddr ) ) {
  294. release_region( ioaddr, SBNI_IO_EXTENT );
  295. return NULL;
  296. }
  297. outb( 0, ioaddr + CSR0 );
  298. if( irq < 2 ) {
  299. unsigned long irq_mask;
  300. irq_mask = probe_irq_on();
  301. outb( EN_INT | TR_REQ, ioaddr + CSR0 );
  302. outb( PR_RES, ioaddr + CSR1 );
  303. mdelay(50);
  304. irq = probe_irq_off(irq_mask);
  305. outb( 0, ioaddr + CSR0 );
  306. if( !irq ) {
  307. pr_err("%s: can't detect device irq!\n", dev->name);
  308. release_region( ioaddr, SBNI_IO_EXTENT );
  309. return NULL;
  310. }
  311. } else if( irq == 2 )
  312. irq = 9;
  313. dev->irq = irq;
  314. dev->base_addr = ioaddr;
  315. /* Fill in sbni-specific dev fields. */
  316. nl = netdev_priv(dev);
  317. if( !nl ) {
  318. pr_err("%s: unable to get memory!\n", dev->name);
  319. release_region( ioaddr, SBNI_IO_EXTENT );
  320. return NULL;
  321. }
  322. memset( nl, 0, sizeof(struct net_local) );
  323. spin_lock_init( &nl->lock );
  324. /* store MAC address (generate if that isn't known) */
  325. *(__be16 *)dev->dev_addr = htons( 0x00ff );
  326. *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
  327. ((mac[num] ?
  328. mac[num] :
  329. (u32)((long)netdev_priv(dev))) & 0x00ffffff));
  330. /* store link settings (speed, receive level ) */
  331. nl->maxframe = DEFAULT_FRAME_LEN;
  332. nl->csr1.rate = baud[ num ];
  333. if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
  334. /* autotune rxl */
  335. nl->cur_rxl_index = DEF_RXL,
  336. nl->delta_rxl = DEF_RXL_DELTA;
  337. else
  338. nl->delta_rxl = 0;
  339. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  340. if( inb( ioaddr + CSR0 ) & 0x01 )
  341. nl->state |= FL_SLOW_MODE;
  342. pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
  343. dev->name, dev->base_addr, dev->irq,
  344. ((u8 *)dev->dev_addr)[3],
  345. ((u8 *)dev->dev_addr)[4],
  346. ((u8 *)dev->dev_addr)[5]);
  347. pr_notice("%s: speed %d",
  348. dev->name,
  349. ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
  350. / (1 << nl->csr1.rate));
  351. if( nl->delta_rxl == 0 )
  352. pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
  353. else
  354. pr_cont(", receive level (auto)\n");
  355. #ifdef CONFIG_SBNI_MULTILINE
  356. nl->master = dev;
  357. nl->link = NULL;
  358. #endif
  359. sbni_cards[ num++ ] = dev;
  360. return dev;
  361. }
  362. /* -------------------------------------------------------------------------- */
  363. #ifdef CONFIG_SBNI_MULTILINE
  364. static netdev_tx_t
  365. sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
  366. {
  367. struct net_device *p;
  368. netif_stop_queue( dev );
  369. /* Looking for idle device in the list */
  370. for( p = dev; p; ) {
  371. struct net_local *nl = netdev_priv(p);
  372. spin_lock( &nl->lock );
  373. if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
  374. p = nl->link;
  375. spin_unlock( &nl->lock );
  376. } else {
  377. /* Idle dev is found */
  378. prepare_to_send( skb, p );
  379. spin_unlock( &nl->lock );
  380. netif_start_queue( dev );
  381. return NETDEV_TX_OK;
  382. }
  383. }
  384. return NETDEV_TX_BUSY;
  385. }
  386. #else /* CONFIG_SBNI_MULTILINE */
  387. static netdev_tx_t
  388. sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
  389. {
  390. struct net_local *nl = netdev_priv(dev);
  391. netif_stop_queue( dev );
  392. spin_lock( &nl->lock );
  393. prepare_to_send( skb, dev );
  394. spin_unlock( &nl->lock );
  395. return NETDEV_TX_OK;
  396. }
  397. #endif /* CONFIG_SBNI_MULTILINE */
  398. /* -------------------------------------------------------------------------- */
  399. /* interrupt handler */
  400. /*
  401. * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
  402. * be looked as two independent single-channel devices. Every channel seems
  403. * as Ethernet interface but interrupt handler must be common. Really, first
  404. * channel ("master") driver only registers the handler. In its struct net_local
  405. * it has got pointer to "slave" channel's struct net_local and handles that's
  406. * interrupts too.
  407. * dev of successfully attached ISA SBNI boards is linked to list.
  408. * While next board driver is initialized, it scans this list. If one
  409. * has found dev with same irq and ioaddr different by 4 then it assumes
  410. * this board to be "master".
  411. */
  412. static irqreturn_t
  413. sbni_interrupt( int irq, void *dev_id )
  414. {
  415. struct net_device *dev = dev_id;
  416. struct net_local *nl = netdev_priv(dev);
  417. int repeat;
  418. spin_lock( &nl->lock );
  419. if( nl->second )
  420. spin_lock(&NET_LOCAL_LOCK(nl->second));
  421. do {
  422. repeat = 0;
  423. if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
  424. handle_channel( dev ),
  425. repeat = 1;
  426. if( nl->second && /* second channel present */
  427. (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
  428. handle_channel( nl->second ),
  429. repeat = 1;
  430. } while( repeat );
  431. if( nl->second )
  432. spin_unlock(&NET_LOCAL_LOCK(nl->second));
  433. spin_unlock( &nl->lock );
  434. return IRQ_HANDLED;
  435. }
  436. static void
  437. handle_channel( struct net_device *dev )
  438. {
  439. struct net_local *nl = netdev_priv(dev);
  440. unsigned long ioaddr = dev->base_addr;
  441. int req_ans;
  442. unsigned char csr0;
  443. #ifdef CONFIG_SBNI_MULTILINE
  444. /* Lock the master device because we going to change its local data */
  445. if( nl->state & FL_SLAVE )
  446. spin_lock(&NET_LOCAL_LOCK(nl->master));
  447. #endif
  448. outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
  449. nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
  450. for(;;) {
  451. csr0 = inb( ioaddr + CSR0 );
  452. if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
  453. break;
  454. req_ans = !(nl->state & FL_PREV_OK);
  455. if( csr0 & RC_RDY )
  456. req_ans = recv_frame( dev );
  457. /*
  458. * TR_RDY always equals 1 here because we have owned the marker,
  459. * and we set TR_REQ when disabled interrupts
  460. */
  461. csr0 = inb( ioaddr + CSR0 );
  462. if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
  463. netdev_err(dev, "internal error!\n");
  464. /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
  465. if( req_ans || nl->tx_frameno != 0 )
  466. send_frame( dev );
  467. else
  468. /* send marker without any data */
  469. outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
  470. }
  471. outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
  472. #ifdef CONFIG_SBNI_MULTILINE
  473. if( nl->state & FL_SLAVE )
  474. spin_unlock(&NET_LOCAL_LOCK(nl->master));
  475. #endif
  476. }
  477. /*
  478. * Routine returns 1 if it needs to acknowledge received frame.
  479. * Empty frame received without errors won't be acknowledged.
  480. */
  481. static int
  482. recv_frame( struct net_device *dev )
  483. {
  484. struct net_local *nl = netdev_priv(dev);
  485. unsigned long ioaddr = dev->base_addr;
  486. u32 crc = CRC32_INITIAL;
  487. unsigned framelen = 0, frameno, ack;
  488. unsigned is_first, frame_ok = 0;
  489. if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
  490. frame_ok = framelen > 4
  491. ? upload_data( dev, framelen, frameno, is_first, crc )
  492. : skip_tail( ioaddr, framelen, crc );
  493. if( frame_ok )
  494. interpret_ack( dev, ack );
  495. }
  496. outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
  497. if( frame_ok ) {
  498. nl->state |= FL_PREV_OK;
  499. if( framelen > 4 )
  500. nl->in_stats.all_rx_number++;
  501. } else
  502. nl->state &= ~FL_PREV_OK,
  503. change_level( dev ),
  504. nl->in_stats.all_rx_number++,
  505. nl->in_stats.bad_rx_number++;
  506. return !frame_ok || framelen > 4;
  507. }
  508. static void
  509. send_frame( struct net_device *dev )
  510. {
  511. struct net_local *nl = netdev_priv(dev);
  512. u32 crc = CRC32_INITIAL;
  513. if( nl->state & FL_NEED_RESEND ) {
  514. /* if frame was sended but not ACK'ed - resend it */
  515. if( nl->trans_errors ) {
  516. --nl->trans_errors;
  517. if( nl->framelen != 0 )
  518. nl->in_stats.resend_tx_number++;
  519. } else {
  520. /* cannot xmit with many attempts */
  521. #ifdef CONFIG_SBNI_MULTILINE
  522. if( (nl->state & FL_SLAVE) || nl->link )
  523. #endif
  524. nl->state |= FL_LINE_DOWN;
  525. drop_xmit_queue( dev );
  526. goto do_send;
  527. }
  528. } else
  529. nl->trans_errors = TR_ERROR_COUNT;
  530. send_frame_header( dev, &crc );
  531. nl->state |= FL_NEED_RESEND;
  532. /*
  533. * FL_NEED_RESEND will be cleared after ACK, but if empty
  534. * frame sended then in prepare_to_send next frame
  535. */
  536. if( nl->framelen ) {
  537. download_data( dev, &crc );
  538. nl->in_stats.all_tx_number++;
  539. nl->state |= FL_WAIT_ACK;
  540. }
  541. outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
  542. do_send:
  543. outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
  544. if( nl->tx_frameno )
  545. /* next frame exists - we request card to send it */
  546. outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
  547. dev->base_addr + CSR0 );
  548. }
  549. /*
  550. * Write the frame data into adapter's buffer memory, and calculate CRC.
  551. * Do padding if necessary.
  552. */
  553. static void
  554. download_data( struct net_device *dev, u32 *crc_p )
  555. {
  556. struct net_local *nl = netdev_priv(dev);
  557. struct sk_buff *skb = nl->tx_buf_p;
  558. unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
  559. outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
  560. *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
  561. /* if packet too short we should write some more bytes to pad */
  562. for( len = nl->framelen - len; len--; )
  563. outb( 0, dev->base_addr + DAT ),
  564. *crc_p = CRC32( 0, *crc_p );
  565. }
  566. static int
  567. upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
  568. unsigned is_first, u32 crc )
  569. {
  570. struct net_local *nl = netdev_priv(dev);
  571. int frame_ok;
  572. if( is_first )
  573. nl->wait_frameno = frameno,
  574. nl->inppos = 0;
  575. if( nl->wait_frameno == frameno ) {
  576. if( nl->inppos + framelen <= ETHER_MAX_LEN )
  577. frame_ok = append_frame_to_pkt( dev, framelen, crc );
  578. /*
  579. * if CRC is right but framelen incorrect then transmitter
  580. * error was occurred... drop entire packet
  581. */
  582. else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
  583. != 0 )
  584. nl->wait_frameno = 0,
  585. nl->inppos = 0,
  586. #ifdef CONFIG_SBNI_MULTILINE
  587. nl->master->stats.rx_errors++,
  588. nl->master->stats.rx_missed_errors++;
  589. #else
  590. dev->stats.rx_errors++,
  591. dev->stats.rx_missed_errors++;
  592. #endif
  593. /* now skip all frames until is_first != 0 */
  594. } else
  595. frame_ok = skip_tail( dev->base_addr, framelen, crc );
  596. if( is_first && !frame_ok )
  597. /*
  598. * Frame has been broken, but we had already stored
  599. * is_first... Drop entire packet.
  600. */
  601. nl->wait_frameno = 0,
  602. #ifdef CONFIG_SBNI_MULTILINE
  603. nl->master->stats.rx_errors++,
  604. nl->master->stats.rx_crc_errors++;
  605. #else
  606. dev->stats.rx_errors++,
  607. dev->stats.rx_crc_errors++;
  608. #endif
  609. return frame_ok;
  610. }
  611. static inline void
  612. send_complete( struct net_device *dev )
  613. {
  614. struct net_local *nl = netdev_priv(dev);
  615. #ifdef CONFIG_SBNI_MULTILINE
  616. nl->master->stats.tx_packets++;
  617. nl->master->stats.tx_bytes += nl->tx_buf_p->len;
  618. #else
  619. dev->stats.tx_packets++;
  620. dev->stats.tx_bytes += nl->tx_buf_p->len;
  621. #endif
  622. dev_consume_skb_irq(nl->tx_buf_p);
  623. nl->tx_buf_p = NULL;
  624. nl->outpos = 0;
  625. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  626. nl->framelen = 0;
  627. }
  628. static void
  629. interpret_ack( struct net_device *dev, unsigned ack )
  630. {
  631. struct net_local *nl = netdev_priv(dev);
  632. if( ack == FRAME_SENT_OK ) {
  633. nl->state &= ~FL_NEED_RESEND;
  634. if( nl->state & FL_WAIT_ACK ) {
  635. nl->outpos += nl->framelen;
  636. if( --nl->tx_frameno )
  637. nl->framelen = min_t(unsigned int,
  638. nl->maxframe,
  639. nl->tx_buf_p->len - nl->outpos);
  640. else
  641. send_complete( dev ),
  642. #ifdef CONFIG_SBNI_MULTILINE
  643. netif_wake_queue( nl->master );
  644. #else
  645. netif_wake_queue( dev );
  646. #endif
  647. }
  648. }
  649. nl->state &= ~FL_WAIT_ACK;
  650. }
  651. /*
  652. * Glue received frame with previous fragments of packet.
  653. * Indicate packet when last frame would be accepted.
  654. */
  655. static int
  656. append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
  657. {
  658. struct net_local *nl = netdev_priv(dev);
  659. u8 *p;
  660. if( nl->inppos + framelen > ETHER_MAX_LEN )
  661. return 0;
  662. if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
  663. return 0;
  664. p = nl->rx_buf_p->data + nl->inppos;
  665. insb( dev->base_addr + DAT, p, framelen );
  666. if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
  667. return 0;
  668. nl->inppos += framelen - 4;
  669. if( --nl->wait_frameno == 0 ) /* last frame received */
  670. indicate_pkt( dev );
  671. return 1;
  672. }
  673. /*
  674. * Prepare to start output on adapter.
  675. * Transmitter will be actually activated when marker is accepted.
  676. */
  677. static void
  678. prepare_to_send( struct sk_buff *skb, struct net_device *dev )
  679. {
  680. struct net_local *nl = netdev_priv(dev);
  681. unsigned int len;
  682. /* nl->tx_buf_p == NULL here! */
  683. if( nl->tx_buf_p )
  684. netdev_err(dev, "memory leak!\n");
  685. nl->outpos = 0;
  686. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  687. len = skb->len;
  688. if( len < SBNI_MIN_LEN )
  689. len = SBNI_MIN_LEN;
  690. nl->tx_buf_p = skb;
  691. nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
  692. nl->framelen = len < nl->maxframe ? len : nl->maxframe;
  693. outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
  694. #ifdef CONFIG_SBNI_MULTILINE
  695. netif_trans_update(nl->master);
  696. #else
  697. netif_trans_update(dev);
  698. #endif
  699. }
  700. static void
  701. drop_xmit_queue( struct net_device *dev )
  702. {
  703. struct net_local *nl = netdev_priv(dev);
  704. if( nl->tx_buf_p )
  705. dev_kfree_skb_any( nl->tx_buf_p ),
  706. nl->tx_buf_p = NULL,
  707. #ifdef CONFIG_SBNI_MULTILINE
  708. nl->master->stats.tx_errors++,
  709. nl->master->stats.tx_carrier_errors++;
  710. #else
  711. dev->stats.tx_errors++,
  712. dev->stats.tx_carrier_errors++;
  713. #endif
  714. nl->tx_frameno = 0;
  715. nl->framelen = 0;
  716. nl->outpos = 0;
  717. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  718. #ifdef CONFIG_SBNI_MULTILINE
  719. netif_start_queue( nl->master );
  720. netif_trans_update(nl->master);
  721. #else
  722. netif_start_queue( dev );
  723. netif_trans_update(dev);
  724. #endif
  725. }
  726. static void
  727. send_frame_header( struct net_device *dev, u32 *crc_p )
  728. {
  729. struct net_local *nl = netdev_priv(dev);
  730. u32 crc = *crc_p;
  731. u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
  732. u8 value;
  733. if( nl->state & FL_NEED_RESEND )
  734. len_field |= FRAME_RETRY; /* non-first attempt... */
  735. if( nl->outpos == 0 )
  736. len_field |= FRAME_FIRST;
  737. len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
  738. outb( SBNI_SIG, dev->base_addr + DAT );
  739. value = (u8) len_field;
  740. outb( value, dev->base_addr + DAT );
  741. crc = CRC32( value, crc );
  742. value = (u8) (len_field >> 8);
  743. outb( value, dev->base_addr + DAT );
  744. crc = CRC32( value, crc );
  745. outb( nl->tx_frameno, dev->base_addr + DAT );
  746. crc = CRC32( nl->tx_frameno, crc );
  747. outb( 0, dev->base_addr + DAT );
  748. crc = CRC32( 0, crc );
  749. *crc_p = crc;
  750. }
  751. /*
  752. * if frame tail not needed (incorrect number or received twice),
  753. * it won't store, but CRC will be calculated
  754. */
  755. static int
  756. skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
  757. {
  758. while( tail_len-- )
  759. crc = CRC32( inb( ioaddr + DAT ), crc );
  760. return crc == CRC32_REMAINDER;
  761. }
  762. /*
  763. * Preliminary checks if frame header is correct, calculates its CRC
  764. * and split it to simple fields
  765. */
  766. static int
  767. check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
  768. u32 *is_first, u32 *crc_p )
  769. {
  770. u32 crc = *crc_p;
  771. u8 value;
  772. if( inb( ioaddr + DAT ) != SBNI_SIG )
  773. return 0;
  774. value = inb( ioaddr + DAT );
  775. *framelen = (u32)value;
  776. crc = CRC32( value, crc );
  777. value = inb( ioaddr + DAT );
  778. *framelen |= ((u32)value) << 8;
  779. crc = CRC32( value, crc );
  780. *ack = *framelen & FRAME_ACK_MASK;
  781. *is_first = (*framelen & FRAME_FIRST) != 0;
  782. if( (*framelen &= FRAME_LEN_MASK) < 6 ||
  783. *framelen > SBNI_MAX_FRAME - 3 )
  784. return 0;
  785. value = inb( ioaddr + DAT );
  786. *frameno = (u32)value;
  787. crc = CRC32( value, crc );
  788. crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
  789. *framelen -= 2;
  790. *crc_p = crc;
  791. return 1;
  792. }
  793. static struct sk_buff *
  794. get_rx_buf( struct net_device *dev )
  795. {
  796. /* +2 is to compensate for the alignment fixup below */
  797. struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
  798. if( !skb )
  799. return NULL;
  800. skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
  801. return skb;
  802. }
  803. static void
  804. indicate_pkt( struct net_device *dev )
  805. {
  806. struct net_local *nl = netdev_priv(dev);
  807. struct sk_buff *skb = nl->rx_buf_p;
  808. skb_put( skb, nl->inppos );
  809. #ifdef CONFIG_SBNI_MULTILINE
  810. skb->protocol = eth_type_trans( skb, nl->master );
  811. netif_rx( skb );
  812. ++nl->master->stats.rx_packets;
  813. nl->master->stats.rx_bytes += nl->inppos;
  814. #else
  815. skb->protocol = eth_type_trans( skb, dev );
  816. netif_rx( skb );
  817. ++dev->stats.rx_packets;
  818. dev->stats.rx_bytes += nl->inppos;
  819. #endif
  820. nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
  821. }
  822. /* -------------------------------------------------------------------------- */
  823. /*
  824. * Routine checks periodically wire activity and regenerates marker if
  825. * connect was inactive for a long time.
  826. */
  827. static void
  828. sbni_watchdog(struct timer_list *t)
  829. {
  830. struct net_local *nl = from_timer(nl, t, watchdog);
  831. struct net_device *dev = nl->watchdog_dev;
  832. unsigned long flags;
  833. unsigned char csr0;
  834. spin_lock_irqsave( &nl->lock, flags );
  835. csr0 = inb( dev->base_addr + CSR0 );
  836. if( csr0 & RC_CHK ) {
  837. if( nl->timer_ticks ) {
  838. if( csr0 & (RC_RDY | BU_EMP) )
  839. /* receiving not active */
  840. nl->timer_ticks--;
  841. } else {
  842. nl->in_stats.timeout_number++;
  843. if( nl->delta_rxl )
  844. timeout_change_level( dev );
  845. outb( *(u_char *)&nl->csr1 | PR_RES,
  846. dev->base_addr + CSR1 );
  847. csr0 = inb( dev->base_addr + CSR0 );
  848. }
  849. } else
  850. nl->state &= ~FL_LINE_DOWN;
  851. outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
  852. mod_timer(t, jiffies + SBNI_TIMEOUT);
  853. spin_unlock_irqrestore( &nl->lock, flags );
  854. }
  855. static unsigned char rxl_tab[] = {
  856. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
  857. 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
  858. };
  859. #define SIZE_OF_TIMEOUT_RXL_TAB 4
  860. static unsigned char timeout_rxl_tab[] = {
  861. 0x03, 0x05, 0x08, 0x0b
  862. };
  863. /* -------------------------------------------------------------------------- */
  864. static void
  865. card_start( struct net_device *dev )
  866. {
  867. struct net_local *nl = netdev_priv(dev);
  868. nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
  869. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  870. nl->state |= FL_PREV_OK;
  871. nl->inppos = nl->outpos = 0;
  872. nl->wait_frameno = 0;
  873. nl->tx_frameno = 0;
  874. nl->framelen = 0;
  875. outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
  876. outb( EN_INT, dev->base_addr + CSR0 );
  877. }
  878. /* -------------------------------------------------------------------------- */
  879. /* Receive level auto-selection */
  880. static void
  881. change_level( struct net_device *dev )
  882. {
  883. struct net_local *nl = netdev_priv(dev);
  884. if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
  885. return;
  886. if( nl->cur_rxl_index == 0 )
  887. nl->delta_rxl = 1;
  888. else if( nl->cur_rxl_index == 15 )
  889. nl->delta_rxl = -1;
  890. else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
  891. nl->delta_rxl = -nl->delta_rxl;
  892. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
  893. inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
  894. outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
  895. nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
  896. nl->cur_rxl_rcvd = 0;
  897. }
  898. static void
  899. timeout_change_level( struct net_device *dev )
  900. {
  901. struct net_local *nl = netdev_priv(dev);
  902. nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
  903. if( ++nl->timeout_rxl >= 4 )
  904. nl->timeout_rxl = 0;
  905. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  906. inb( dev->base_addr + CSR0 );
  907. outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
  908. nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
  909. nl->cur_rxl_rcvd = 0;
  910. }
  911. /* -------------------------------------------------------------------------- */
  912. /*
  913. * Open/initialize the board.
  914. */
  915. static int
  916. sbni_open( struct net_device *dev )
  917. {
  918. struct net_local *nl = netdev_priv(dev);
  919. struct timer_list *w = &nl->watchdog;
  920. /*
  921. * For double ISA adapters within "common irq" mode, we have to
  922. * determine whether primary or secondary channel is initialized,
  923. * and set the irq handler only in first case.
  924. */
  925. if( dev->base_addr < 0x400 ) { /* ISA only */
  926. struct net_device **p = sbni_cards;
  927. for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
  928. if( (*p)->irq == dev->irq &&
  929. ((*p)->base_addr == dev->base_addr + 4 ||
  930. (*p)->base_addr == dev->base_addr - 4) &&
  931. (*p)->flags & IFF_UP ) {
  932. ((struct net_local *) (netdev_priv(*p)))
  933. ->second = dev;
  934. netdev_notice(dev, "using shared irq with %s\n",
  935. (*p)->name);
  936. nl->state |= FL_SECONDARY;
  937. goto handler_attached;
  938. }
  939. }
  940. if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
  941. netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
  942. return -EAGAIN;
  943. }
  944. handler_attached:
  945. spin_lock( &nl->lock );
  946. memset( &dev->stats, 0, sizeof(struct net_device_stats) );
  947. memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
  948. card_start( dev );
  949. netif_start_queue( dev );
  950. /* set timer watchdog */
  951. nl->watchdog_dev = dev;
  952. timer_setup(w, sbni_watchdog, 0);
  953. w->expires = jiffies + SBNI_TIMEOUT;
  954. add_timer( w );
  955. spin_unlock( &nl->lock );
  956. return 0;
  957. }
  958. static int
  959. sbni_close( struct net_device *dev )
  960. {
  961. struct net_local *nl = netdev_priv(dev);
  962. if( nl->second && nl->second->flags & IFF_UP ) {
  963. netdev_notice(dev, "Secondary channel (%s) is active!\n",
  964. nl->second->name);
  965. return -EBUSY;
  966. }
  967. #ifdef CONFIG_SBNI_MULTILINE
  968. if( nl->state & FL_SLAVE )
  969. emancipate( dev );
  970. else
  971. while( nl->link ) /* it's master device! */
  972. emancipate( nl->link );
  973. #endif
  974. spin_lock( &nl->lock );
  975. nl->second = NULL;
  976. drop_xmit_queue( dev );
  977. netif_stop_queue( dev );
  978. del_timer( &nl->watchdog );
  979. outb( 0, dev->base_addr + CSR0 );
  980. if( !(nl->state & FL_SECONDARY) )
  981. free_irq( dev->irq, dev );
  982. nl->state &= FL_SECONDARY;
  983. spin_unlock( &nl->lock );
  984. return 0;
  985. }
  986. /*
  987. Valid combinations in CSR0 (for probing):
  988. VALID_DECODER 0000,0011,1011,1010
  989. ; 0 ; -
  990. TR_REQ ; 1 ; +
  991. TR_RDY ; 2 ; -
  992. TR_RDY TR_REQ ; 3 ; +
  993. BU_EMP ; 4 ; +
  994. BU_EMP TR_REQ ; 5 ; +
  995. BU_EMP TR_RDY ; 6 ; -
  996. BU_EMP TR_RDY TR_REQ ; 7 ; +
  997. RC_RDY ; 8 ; +
  998. RC_RDY TR_REQ ; 9 ; +
  999. RC_RDY TR_RDY ; 10 ; -
  1000. RC_RDY TR_RDY TR_REQ ; 11 ; -
  1001. RC_RDY BU_EMP ; 12 ; -
  1002. RC_RDY BU_EMP TR_REQ ; 13 ; -
  1003. RC_RDY BU_EMP TR_RDY ; 14 ; -
  1004. RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
  1005. */
  1006. #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
  1007. static int
  1008. sbni_card_probe( unsigned long ioaddr )
  1009. {
  1010. unsigned char csr0;
  1011. csr0 = inb( ioaddr + CSR0 );
  1012. if( csr0 != 0xff && csr0 != 0x00 ) {
  1013. csr0 &= ~EN_INT;
  1014. if( csr0 & BU_EMP )
  1015. csr0 |= EN_INT;
  1016. if( VALID_DECODER & (1 << (csr0 >> 4)) )
  1017. return 0;
  1018. }
  1019. return -ENODEV;
  1020. }
  1021. /* -------------------------------------------------------------------------- */
  1022. static int
  1023. sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
  1024. {
  1025. struct net_local *nl = netdev_priv(dev);
  1026. struct sbni_flags flags;
  1027. int error = 0;
  1028. #ifdef CONFIG_SBNI_MULTILINE
  1029. struct net_device *slave_dev;
  1030. char slave_name[ 8 ];
  1031. #endif
  1032. switch( cmd ) {
  1033. case SIOCDEVGETINSTATS :
  1034. if (copy_to_user( ifr->ifr_data, &nl->in_stats,
  1035. sizeof(struct sbni_in_stats) ))
  1036. error = -EFAULT;
  1037. break;
  1038. case SIOCDEVRESINSTATS :
  1039. if (!capable(CAP_NET_ADMIN))
  1040. return -EPERM;
  1041. memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
  1042. break;
  1043. case SIOCDEVGHWSTATE :
  1044. flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
  1045. flags.rate = nl->csr1.rate;
  1046. flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
  1047. flags.rxl = nl->cur_rxl_index;
  1048. flags.fixed_rxl = nl->delta_rxl == 0;
  1049. if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
  1050. error = -EFAULT;
  1051. break;
  1052. case SIOCDEVSHWSTATE :
  1053. if (!capable(CAP_NET_ADMIN))
  1054. return -EPERM;
  1055. spin_lock( &nl->lock );
  1056. flags = *(struct sbni_flags*) &ifr->ifr_ifru;
  1057. if( flags.fixed_rxl )
  1058. nl->delta_rxl = 0,
  1059. nl->cur_rxl_index = flags.rxl;
  1060. else
  1061. nl->delta_rxl = DEF_RXL_DELTA,
  1062. nl->cur_rxl_index = DEF_RXL;
  1063. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  1064. nl->csr1.rate = flags.rate;
  1065. outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
  1066. spin_unlock( &nl->lock );
  1067. break;
  1068. #ifdef CONFIG_SBNI_MULTILINE
  1069. case SIOCDEVENSLAVE :
  1070. if (!capable(CAP_NET_ADMIN))
  1071. return -EPERM;
  1072. if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
  1073. return -EFAULT;
  1074. slave_dev = dev_get_by_name(&init_net, slave_name );
  1075. if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
  1076. netdev_err(dev, "trying to enslave non-active device %s\n",
  1077. slave_name);
  1078. if (slave_dev)
  1079. dev_put(slave_dev);
  1080. return -EPERM;
  1081. }
  1082. return enslave( dev, slave_dev );
  1083. case SIOCDEVEMANSIPATE :
  1084. if (!capable(CAP_NET_ADMIN))
  1085. return -EPERM;
  1086. return emancipate( dev );
  1087. #endif /* CONFIG_SBNI_MULTILINE */
  1088. default :
  1089. return -EOPNOTSUPP;
  1090. }
  1091. return error;
  1092. }
  1093. #ifdef CONFIG_SBNI_MULTILINE
  1094. static int
  1095. enslave( struct net_device *dev, struct net_device *slave_dev )
  1096. {
  1097. struct net_local *nl = netdev_priv(dev);
  1098. struct net_local *snl = netdev_priv(slave_dev);
  1099. if( nl->state & FL_SLAVE ) /* This isn't master or free device */
  1100. return -EBUSY;
  1101. if( snl->state & FL_SLAVE ) /* That was already enslaved */
  1102. return -EBUSY;
  1103. spin_lock( &nl->lock );
  1104. spin_lock( &snl->lock );
  1105. /* append to list */
  1106. snl->link = nl->link;
  1107. nl->link = slave_dev;
  1108. snl->master = dev;
  1109. snl->state |= FL_SLAVE;
  1110. /* Summary statistics of MultiLine operation will be stored
  1111. in master's counters */
  1112. memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
  1113. netif_stop_queue( slave_dev );
  1114. netif_wake_queue( dev ); /* Now we are able to transmit */
  1115. spin_unlock( &snl->lock );
  1116. spin_unlock( &nl->lock );
  1117. netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
  1118. return 0;
  1119. }
  1120. static int
  1121. emancipate( struct net_device *dev )
  1122. {
  1123. struct net_local *snl = netdev_priv(dev);
  1124. struct net_device *p = snl->master;
  1125. struct net_local *nl = netdev_priv(p);
  1126. if( !(snl->state & FL_SLAVE) )
  1127. return -EINVAL;
  1128. spin_lock( &nl->lock );
  1129. spin_lock( &snl->lock );
  1130. drop_xmit_queue( dev );
  1131. /* exclude from list */
  1132. for(;;) { /* must be in list */
  1133. struct net_local *t = netdev_priv(p);
  1134. if( t->link == dev ) {
  1135. t->link = snl->link;
  1136. break;
  1137. }
  1138. p = t->link;
  1139. }
  1140. snl->link = NULL;
  1141. snl->master = dev;
  1142. snl->state &= ~FL_SLAVE;
  1143. netif_start_queue( dev );
  1144. spin_unlock( &snl->lock );
  1145. spin_unlock( &nl->lock );
  1146. dev_put( dev );
  1147. return 0;
  1148. }
  1149. #endif
  1150. static void
  1151. set_multicast_list( struct net_device *dev )
  1152. {
  1153. return; /* sbni always operate in promiscuos mode */
  1154. }
  1155. #ifdef MODULE
  1156. module_param_hw_array(io, int, ioport, NULL, 0);
  1157. module_param_hw_array(irq, int, irq, NULL, 0);
  1158. module_param_array(baud, int, NULL, 0);
  1159. module_param_array(rxl, int, NULL, 0);
  1160. module_param_array(mac, int, NULL, 0);
  1161. module_param(skip_pci_probe, bool, 0);
  1162. MODULE_LICENSE("GPL");
  1163. int __init init_module( void )
  1164. {
  1165. struct net_device *dev;
  1166. int err;
  1167. while( num < SBNI_MAX_NUM_CARDS ) {
  1168. dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
  1169. NET_NAME_UNKNOWN, sbni_devsetup);
  1170. if( !dev)
  1171. break;
  1172. sprintf( dev->name, "sbni%d", num );
  1173. err = sbni_init(dev);
  1174. if (err) {
  1175. free_netdev(dev);
  1176. break;
  1177. }
  1178. if( register_netdev( dev ) ) {
  1179. release_region( dev->base_addr, SBNI_IO_EXTENT );
  1180. free_netdev( dev );
  1181. break;
  1182. }
  1183. }
  1184. return *sbni_cards ? 0 : -ENODEV;
  1185. }
  1186. void
  1187. cleanup_module(void)
  1188. {
  1189. int i;
  1190. for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
  1191. struct net_device *dev = sbni_cards[i];
  1192. if (dev != NULL) {
  1193. unregister_netdev(dev);
  1194. release_region(dev->base_addr, SBNI_IO_EXTENT);
  1195. free_netdev(dev);
  1196. }
  1197. }
  1198. }
  1199. #else /* MODULE */
  1200. static int __init
  1201. sbni_setup( char *p )
  1202. {
  1203. int n, parm;
  1204. if( *p++ != '(' )
  1205. goto bad_param;
  1206. for( n = 0, parm = 0; *p && n < 8; ) {
  1207. (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
  1208. if( !*p || *p == ')' )
  1209. return 1;
  1210. if( *p == ';' )
  1211. ++p, ++n, parm = 0;
  1212. else if( *p++ != ',' )
  1213. break;
  1214. else
  1215. if( ++parm >= 5 )
  1216. break;
  1217. }
  1218. bad_param:
  1219. pr_err("Error in sbni kernel parameter!\n");
  1220. return 0;
  1221. }
  1222. __setup( "sbni=", sbni_setup );
  1223. #endif /* MODULE */
  1224. /* -------------------------------------------------------------------------- */
  1225. static u32
  1226. calc_crc32( u32 crc, u8 *p, u32 len )
  1227. {
  1228. while( len-- )
  1229. crc = CRC32( *p++, crc );
  1230. return crc;
  1231. }
  1232. static u32 crc32tab[] __attribute__ ((aligned(8))) = {
  1233. 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
  1234. 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
  1235. 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
  1236. 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
  1237. 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
  1238. 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
  1239. 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
  1240. 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
  1241. 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
  1242. 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
  1243. 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
  1244. 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
  1245. 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
  1246. 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
  1247. 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
  1248. 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
  1249. 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
  1250. 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
  1251. 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
  1252. 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
  1253. 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
  1254. 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
  1255. 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
  1256. 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
  1257. 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
  1258. 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
  1259. 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
  1260. 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
  1261. 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
  1262. 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
  1263. 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
  1264. 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
  1265. 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
  1266. 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
  1267. 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
  1268. 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
  1269. 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
  1270. 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
  1271. 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
  1272. 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
  1273. 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
  1274. 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
  1275. 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
  1276. 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
  1277. 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
  1278. 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
  1279. 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
  1280. 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
  1281. 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
  1282. 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
  1283. 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
  1284. 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
  1285. 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
  1286. 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
  1287. 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
  1288. 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
  1289. 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
  1290. 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
  1291. 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
  1292. 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
  1293. 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
  1294. 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
  1295. 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
  1296. 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
  1297. };