lanstreamer.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919
  1. /*
  2. * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
  3. *
  4. * Written By: Mike Sullivan, IBM Corporation
  5. *
  6. * Copyright (C) 1999 IBM Corporation
  7. *
  8. * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
  9. * chipset.
  10. *
  11. * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
  12. * chipsets) written by:
  13. * 1999 Peter De Schrijver All Rights Reserved
  14. * 1999 Mike Phillips (phillim@amtrak.com)
  15. *
  16. * Base Driver Skeleton:
  17. * Written 1993-94 by Donald Becker.
  18. *
  19. * Copyright 1993 United States Government as represented by the
  20. * Director, National Security Agency.
  21. *
  22. * This program is free software; you can redistribute it and/or modify
  23. * it under the terms of the GNU General Public License as published by
  24. * the Free Software Foundation; either version 2 of the License, or
  25. * (at your option) any later version.
  26. *
  27. * This program is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  30. * GNU General Public License for more details.
  31. *
  32. * NO WARRANTY
  33. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  34. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  35. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  36. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  37. * solely responsible for determining the appropriateness of using and
  38. * distributing the Program and assumes all risks associated with its
  39. * exercise of rights under this Agreement, including but not limited to
  40. * the risks and costs of program errors, damage to or loss of data,
  41. * programs or equipment, and unavailability or interruption of operations.
  42. *
  43. * DISCLAIMER OF LIABILITY
  44. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  45. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  46. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  47. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  48. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  49. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  50. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  51. *
  52. * You should have received a copy of the GNU General Public License
  53. * along with this program; if not, write to the Free Software
  54. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  55. *
  56. *
  57. * 12/10/99 - Alpha Release 0.1.0
  58. * First release to the public
  59. * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
  60. * malloc free checks, reviewed code. <alan@redhat.com>
  61. * 03/13/00 - Added spinlocks for smp
  62. * 03/08/01 - Added support for module_init() and module_exit()
  63. * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
  64. * calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
  65. * 11/05/01 - Restructured the interrupt function, added delays, reduced the
  66. * the number of TX descriptors to 1, which together can prevent
  67. * the card from locking up the box - <yoder1@us.ibm.com>
  68. * 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
  69. * 11/13/02 - Removed free_irq calls which could cause a hang, added
  70. * netif_carrier_{on|off} - <yoder1@us.ibm.com>
  71. *
  72. * To Do:
  73. *
  74. *
  75. * If Problems do Occur
  76. * Most problems can be rectified by either closing and opening the interface
  77. * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
  78. * if compiled into the kernel).
  79. */
  80. /* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
  81. #define STREAMER_DEBUG 0
  82. #define STREAMER_DEBUG_PACKETS 0
  83. /* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
  84. * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
  85. * kernel.
  86. * Intended to be used to create a ring-error reporting network module
  87. * i.e. it will give you the source address of beaconers on the ring
  88. */
  89. #define STREAMER_NETWORK_MONITOR 0
  90. /* #define CONFIG_PROC_FS */
  91. /*
  92. * Allow or disallow ioctl's for debugging
  93. */
  94. #define STREAMER_IOCTL 0
  95. #include <linux/module.h>
  96. #include <linux/kernel.h>
  97. #include <linux/errno.h>
  98. #include <linux/timer.h>
  99. #include <linux/in.h>
  100. #include <linux/ioport.h>
  101. #include <linux/string.h>
  102. #include <linux/proc_fs.h>
  103. #include <linux/ptrace.h>
  104. #include <linux/skbuff.h>
  105. #include <linux/interrupt.h>
  106. #include <linux/delay.h>
  107. #include <linux/netdevice.h>
  108. #include <linux/trdevice.h>
  109. #include <linux/stddef.h>
  110. #include <linux/init.h>
  111. #include <linux/pci.h>
  112. #include <linux/dma-mapping.h>
  113. #include <linux/spinlock.h>
  114. #include <linux/bitops.h>
  115. #include <linux/jiffies.h>
  116. #include <linux/slab.h>
  117. #include <net/net_namespace.h>
  118. #include <net/checksum.h>
  119. #include <asm/io.h>
  120. #include <asm/system.h>
  121. #include "lanstreamer.h"
  122. #if (BITS_PER_LONG == 64)
  123. #error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int
  124. #endif
  125. /* I've got to put some intelligence into the version number so that Peter and I know
  126. * which version of the code somebody has got.
  127. * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
  128. * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
  129. *
  130. * Official releases will only have an a.b.c version number format.
  131. */
  132. static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
  133. " v0.5.3 11/13/02 - Kent Yoder";
  134. static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
  135. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
  136. {} /* terminating entry */
  137. };
  138. MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
  139. static char *open_maj_error[] = {
  140. "No error", "Lobe Media Test", "Physical Insertion",
  141. "Address Verification", "Neighbor Notification (Ring Poll)",
  142. "Request Parameters", "FDX Registration Request",
  143. "FDX Lobe Media Test", "FDX Duplicate Address Check",
  144. "Unknown stage"
  145. };
  146. static char *open_min_error[] = {
  147. "No error", "Function Failure", "Signal Lost", "Wire Fault",
  148. "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
  149. "Duplicate Node Address", "Request Parameters", "Remove Received",
  150. "Reserved", "Reserved", "No Monitor Detected for RPL",
  151. "Monitor Contention failer for RPL", "FDX Protocol Error"
  152. };
  153. /* Module parameters */
  154. /* Ring Speed 0,4,16
  155. * 0 = Autosense
  156. * 4,16 = Selected speed only, no autosense
  157. * This allows the card to be the first on the ring
  158. * and become the active monitor.
  159. *
  160. * WARNING: Some hubs will allow you to insert
  161. * at the wrong speed
  162. */
  163. static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
  164. module_param_array(ringspeed, int, NULL, 0);
  165. /* Packet buffer size */
  166. static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
  167. module_param_array(pkt_buf_sz, int, NULL, 0);
  168. /* Message Level */
  169. static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
  170. module_param_array(message_level, int, NULL, 0);
  171. #if STREAMER_IOCTL
  172. static int streamer_ioctl(struct net_device *, struct ifreq *, int);
  173. #endif
  174. static int streamer_reset(struct net_device *dev);
  175. static int streamer_open(struct net_device *dev);
  176. static netdev_tx_t streamer_xmit(struct sk_buff *skb,
  177. struct net_device *dev);
  178. static int streamer_close(struct net_device *dev);
  179. static void streamer_set_rx_mode(struct net_device *dev);
  180. static irqreturn_t streamer_interrupt(int irq, void *dev_id);
  181. static int streamer_set_mac_address(struct net_device *dev, void *addr);
  182. static void streamer_arb_cmd(struct net_device *dev);
  183. static int streamer_change_mtu(struct net_device *dev, int mtu);
  184. static void streamer_srb_bh(struct net_device *dev);
  185. static void streamer_asb_bh(struct net_device *dev);
  186. #if STREAMER_NETWORK_MONITOR
  187. #ifdef CONFIG_PROC_FS
  188. static int streamer_proc_info(char *buffer, char **start, off_t offset,
  189. int length, int *eof, void *data);
  190. static int sprintf_info(char *buffer, struct net_device *dev);
  191. struct streamer_private *dev_streamer=NULL;
  192. #endif
  193. #endif
  194. static const struct net_device_ops streamer_netdev_ops = {
  195. .ndo_open = streamer_open,
  196. .ndo_stop = streamer_close,
  197. .ndo_start_xmit = streamer_xmit,
  198. .ndo_change_mtu = streamer_change_mtu,
  199. #if STREAMER_IOCTL
  200. .ndo_do_ioctl = streamer_ioctl,
  201. #endif
  202. .ndo_set_multicast_list = streamer_set_rx_mode,
  203. .ndo_set_mac_address = streamer_set_mac_address,
  204. };
  205. static int __devinit streamer_init_one(struct pci_dev *pdev,
  206. const struct pci_device_id *ent)
  207. {
  208. struct net_device *dev;
  209. struct streamer_private *streamer_priv;
  210. unsigned long pio_start, pio_end, pio_flags, pio_len;
  211. unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
  212. int rc = 0;
  213. static int card_no=-1;
  214. u16 pcr;
  215. #if STREAMER_DEBUG
  216. printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
  217. #endif
  218. card_no++;
  219. dev = alloc_trdev(sizeof(*streamer_priv));
  220. if (dev==NULL) {
  221. printk(KERN_ERR "lanstreamer: out of memory.\n");
  222. return -ENOMEM;
  223. }
  224. streamer_priv = netdev_priv(dev);
  225. #if STREAMER_NETWORK_MONITOR
  226. #ifdef CONFIG_PROC_FS
  227. if (!dev_streamer)
  228. create_proc_read_entry("streamer_tr", 0, init_net.proc_net,
  229. streamer_proc_info, NULL);
  230. streamer_priv->next = dev_streamer;
  231. dev_streamer = streamer_priv;
  232. #endif
  233. #endif
  234. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  235. if (rc) {
  236. printk(KERN_ERR "%s: No suitable PCI mapping available.\n",
  237. dev->name);
  238. rc = -ENODEV;
  239. goto err_out;
  240. }
  241. rc = pci_enable_device(pdev);
  242. if (rc) {
  243. printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
  244. rc=-EIO;
  245. goto err_out;
  246. }
  247. pci_set_master(pdev);
  248. rc = pci_set_mwi(pdev);
  249. if (rc) {
  250. printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n");
  251. goto err_out_disable;
  252. }
  253. pio_start = pci_resource_start(pdev, 0);
  254. pio_end = pci_resource_end(pdev, 0);
  255. pio_flags = pci_resource_flags(pdev, 0);
  256. pio_len = pci_resource_len(pdev, 0);
  257. mmio_start = pci_resource_start(pdev, 1);
  258. mmio_end = pci_resource_end(pdev, 1);
  259. mmio_flags = pci_resource_flags(pdev, 1);
  260. mmio_len = pci_resource_len(pdev, 1);
  261. #if STREAMER_DEBUG
  262. printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
  263. pio_start, pio_end, pio_len, pio_flags);
  264. printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
  265. mmio_start, mmio_end, mmio_flags, mmio_len);
  266. #endif
  267. if (!request_region(pio_start, pio_len, "lanstreamer")) {
  268. printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n",
  269. pio_start);
  270. rc= -EBUSY;
  271. goto err_out_mwi;
  272. }
  273. if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
  274. printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n",
  275. mmio_start);
  276. rc= -EBUSY;
  277. goto err_out_free_pio;
  278. }
  279. streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
  280. if (streamer_priv->streamer_mmio == NULL) {
  281. printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n",
  282. mmio_start);
  283. rc= -EIO;
  284. goto err_out_free_mmio;
  285. }
  286. init_waitqueue_head(&streamer_priv->srb_wait);
  287. init_waitqueue_head(&streamer_priv->trb_wait);
  288. dev->netdev_ops = &streamer_netdev_ops;
  289. dev->irq = pdev->irq;
  290. dev->base_addr=pio_start;
  291. SET_NETDEV_DEV(dev, &pdev->dev);
  292. streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
  293. streamer_priv->pci_dev = pdev;
  294. if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
  295. streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
  296. else
  297. streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
  298. streamer_priv->streamer_ring_speed = ringspeed[card_no];
  299. streamer_priv->streamer_message_level = message_level[card_no];
  300. pci_set_drvdata(pdev, dev);
  301. spin_lock_init(&streamer_priv->streamer_lock);
  302. pci_read_config_word (pdev, PCI_COMMAND, &pcr);
  303. pcr |= PCI_COMMAND_SERR;
  304. pci_write_config_word (pdev, PCI_COMMAND, pcr);
  305. printk("%s\n", version);
  306. printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
  307. streamer_priv->streamer_card_name,
  308. (unsigned int) dev->base_addr,
  309. streamer_priv->streamer_mmio,
  310. dev->irq);
  311. if (streamer_reset(dev))
  312. goto err_out_unmap;
  313. rc = register_netdev(dev);
  314. if (rc)
  315. goto err_out_unmap;
  316. return 0;
  317. err_out_unmap:
  318. iounmap(streamer_priv->streamer_mmio);
  319. err_out_free_mmio:
  320. release_mem_region(mmio_start, mmio_len);
  321. err_out_free_pio:
  322. release_region(pio_start, pio_len);
  323. err_out_mwi:
  324. pci_clear_mwi(pdev);
  325. err_out_disable:
  326. pci_disable_device(pdev);
  327. err_out:
  328. free_netdev(dev);
  329. #if STREAMER_DEBUG
  330. printk("lanstreamer: Exit error %x\n",rc);
  331. #endif
  332. return rc;
  333. }
  334. static void __devexit streamer_remove_one(struct pci_dev *pdev)
  335. {
  336. struct net_device *dev=pci_get_drvdata(pdev);
  337. struct streamer_private *streamer_priv;
  338. #if STREAMER_DEBUG
  339. printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
  340. #endif
  341. if (dev == NULL) {
  342. printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
  343. return;
  344. }
  345. streamer_priv=netdev_priv(dev);
  346. if (streamer_priv == NULL) {
  347. printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
  348. return;
  349. }
  350. #if STREAMER_NETWORK_MONITOR
  351. #ifdef CONFIG_PROC_FS
  352. {
  353. struct streamer_private **p, **next;
  354. for (p = &dev_streamer; *p; p = next) {
  355. next = &(*p)->next;
  356. if (*p == streamer_priv) {
  357. *p = *next;
  358. break;
  359. }
  360. }
  361. if (!dev_streamer)
  362. remove_proc_entry("streamer_tr", init_net.proc_net);
  363. }
  364. #endif
  365. #endif
  366. unregister_netdev(dev);
  367. iounmap(streamer_priv->streamer_mmio);
  368. release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
  369. release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
  370. pci_clear_mwi(pdev);
  371. pci_disable_device(pdev);
  372. free_netdev(dev);
  373. pci_set_drvdata(pdev, NULL);
  374. }
  375. static int streamer_reset(struct net_device *dev)
  376. {
  377. struct streamer_private *streamer_priv;
  378. __u8 __iomem *streamer_mmio;
  379. unsigned long t;
  380. unsigned int uaa_addr;
  381. struct sk_buff *skb = NULL;
  382. __u16 misr;
  383. streamer_priv = netdev_priv(dev);
  384. streamer_mmio = streamer_priv->streamer_mmio;
  385. writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
  386. t = jiffies;
  387. /* Hold soft reset bit for a while */
  388. ssleep(1);
  389. writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
  390. streamer_mmio + BCTL);
  391. #if STREAMER_DEBUG
  392. printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
  393. printk("GPR: %x\n", readw(streamer_mmio + GPR));
  394. printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
  395. #endif
  396. writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
  397. if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */
  398. writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
  399. streamer_mmio + GPR);
  400. if (streamer_priv->streamer_message_level)
  401. printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
  402. dev->name);
  403. } else if (streamer_priv->streamer_ring_speed == 16) {
  404. if (streamer_priv->streamer_message_level)
  405. printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
  406. dev->name);
  407. writew(GPR_16MBPS, streamer_mmio + GPR);
  408. } else if (streamer_priv->streamer_ring_speed == 4) {
  409. if (streamer_priv->streamer_message_level)
  410. printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
  411. dev->name);
  412. writew(0, streamer_mmio + GPR);
  413. }
  414. skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
  415. if (!skb) {
  416. printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
  417. dev->name);
  418. } else {
  419. struct streamer_rx_desc *rx_ring;
  420. u8 *data;
  421. rx_ring=(struct streamer_rx_desc *)skb->data;
  422. data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
  423. rx_ring->forward=0;
  424. rx_ring->status=0;
  425. rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
  426. 512, PCI_DMA_FROMDEVICE));
  427. rx_ring->framelen_buflen=512;
  428. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
  429. streamer_mmio+RXBDA);
  430. }
  431. #if STREAMER_DEBUG
  432. printk("GPR = %x\n", readw(streamer_mmio + GPR));
  433. #endif
  434. /* start solo init */
  435. writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
  436. while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
  437. msleep_interruptible(100);
  438. if (time_after(jiffies, t + 40 * HZ)) {
  439. printk(KERN_ERR
  440. "IBM PCI tokenring card not responding\n");
  441. release_region(dev->base_addr, STREAMER_IO_SPACE);
  442. if (skb)
  443. dev_kfree_skb(skb);
  444. return -1;
  445. }
  446. }
  447. writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
  448. misr = readw(streamer_mmio + MISR_RUM);
  449. writew(~misr, streamer_mmio + MISR_RUM);
  450. if (skb)
  451. dev_kfree_skb(skb); /* release skb used for diagnostics */
  452. #if STREAMER_DEBUG
  453. printk("LAPWWO: %x, LAPA: %x LAPE: %x\n",
  454. readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
  455. readw(streamer_mmio + LAPE));
  456. #endif
  457. #if STREAMER_DEBUG
  458. {
  459. int i;
  460. writew(readw(streamer_mmio + LAPWWO),
  461. streamer_mmio + LAPA);
  462. printk("initialization response srb dump: ");
  463. for (i = 0; i < 10; i++)
  464. printk("%x:",
  465. ntohs(readw(streamer_mmio + LAPDINC)));
  466. printk("\n");
  467. }
  468. #endif
  469. writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
  470. if (readw(streamer_mmio + LAPD)) {
  471. printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",
  472. ntohs(readw(streamer_mmio + LAPD)));
  473. release_region(dev->base_addr, STREAMER_IO_SPACE);
  474. return -1;
  475. }
  476. writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
  477. uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
  478. readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */
  479. streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
  480. streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
  481. #if STREAMER_DEBUG
  482. printk("UAA resides at %x\n", uaa_addr);
  483. #endif
  484. /* setup uaa area for access with LAPD */
  485. {
  486. int i;
  487. __u16 addr;
  488. writew(uaa_addr, streamer_mmio + LAPA);
  489. for (i = 0; i < 6; i += 2) {
  490. addr=ntohs(readw(streamer_mmio+LAPDINC));
  491. dev->dev_addr[i]= (addr >> 8) & 0xff;
  492. dev->dev_addr[i+1]= addr & 0xff;
  493. }
  494. #if STREAMER_DEBUG
  495. printk("Adapter address: %pM\n", dev->dev_addr);
  496. #endif
  497. }
  498. return 0;
  499. }
  500. static int streamer_open(struct net_device *dev)
  501. {
  502. struct streamer_private *streamer_priv = netdev_priv(dev);
  503. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  504. unsigned long flags;
  505. char open_error[255];
  506. int i, open_finished = 1;
  507. __u16 srb_word;
  508. __u16 srb_open;
  509. int rc;
  510. if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
  511. rc=streamer_reset(dev);
  512. }
  513. if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
  514. return -EAGAIN;
  515. }
  516. #if STREAMER_DEBUG
  517. printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
  518. printk("pending ints: %x\n", readw(streamer_mmio + SISR));
  519. #endif
  520. writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
  521. writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */
  522. /* adapter is closed, so SRB is pointed to by LAPWWO */
  523. writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
  524. #if STREAMER_DEBUG
  525. printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
  526. readw(streamer_mmio + LAPA));
  527. printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
  528. printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
  529. #endif
  530. do {
  531. for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
  532. writew(0, streamer_mmio + LAPDINC);
  533. }
  534. writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
  535. writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */
  536. writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
  537. writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
  538. writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
  539. #if STREAMER_NETWORK_MONITOR
  540. /* If Network Monitor, instruct card to copy MAC frames through the ARB */
  541. writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */
  542. #else
  543. writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */
  544. #endif
  545. if (streamer_priv->streamer_laa[0]) {
  546. writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
  547. writew(htons((streamer_priv->streamer_laa[0] << 8) |
  548. streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
  549. writew(htons((streamer_priv->streamer_laa[2] << 8) |
  550. streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
  551. writew(htons((streamer_priv->streamer_laa[4] << 8) |
  552. streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
  553. memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
  554. }
  555. /* save off srb open offset */
  556. srb_open = readw(streamer_mmio + LAPWWO);
  557. #if STREAMER_DEBUG
  558. writew(readw(streamer_mmio + LAPWWO),
  559. streamer_mmio + LAPA);
  560. printk("srb open request:\n");
  561. for (i = 0; i < 16; i++) {
  562. printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
  563. }
  564. printk("\n");
  565. #endif
  566. spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
  567. streamer_priv->srb_queued = 1;
  568. /* signal solo that SRB command has been issued */
  569. writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
  570. spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
  571. while (streamer_priv->srb_queued) {
  572. interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
  573. if (signal_pending(current)) {
  574. printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
  575. printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
  576. readw(streamer_mmio + SISR),
  577. readw(streamer_mmio + MISR_RUM),
  578. readw(streamer_mmio + LISR));
  579. streamer_priv->srb_queued = 0;
  580. break;
  581. }
  582. }
  583. #if STREAMER_DEBUG
  584. printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
  585. printk("srb open response:\n");
  586. writew(srb_open, streamer_mmio + LAPA);
  587. for (i = 0; i < 10; i++) {
  588. printk("%x:",
  589. ntohs(readw(streamer_mmio + LAPDINC)));
  590. }
  591. #endif
  592. /* If we get the same return response as we set, the interrupt wasn't raised and the open
  593. * timed out.
  594. */
  595. writew(srb_open + 2, streamer_mmio + LAPA);
  596. srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
  597. if (srb_word == STREAMER_CLEAR_RET_CODE) {
  598. printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
  599. dev->name);
  600. return -EIO;
  601. }
  602. if (srb_word != 0) {
  603. if (srb_word == 0x07) {
  604. if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
  605. printk(KERN_WARNING "%s: Retrying at different ring speed\n",
  606. dev->name);
  607. open_finished = 0;
  608. } else {
  609. __u16 error_code;
  610. writew(srb_open + 6, streamer_mmio + LAPA);
  611. error_code = ntohs(readw(streamer_mmio + LAPD));
  612. strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
  613. strcat(open_error, " - ");
  614. strcat(open_error, open_min_error[(error_code & 0x0f)]);
  615. if (!streamer_priv->streamer_ring_speed &&
  616. ((error_code & 0x0f) == 0x0d))
  617. {
  618. printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
  619. printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
  620. free_irq(dev->irq, dev);
  621. return -EIO;
  622. }
  623. printk(KERN_WARNING "%s: %s\n",
  624. dev->name, open_error);
  625. free_irq(dev->irq, dev);
  626. return -EIO;
  627. } /* if autosense && open_finished */
  628. } else {
  629. printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
  630. dev->name, srb_word);
  631. free_irq(dev->irq, dev);
  632. return -EIO;
  633. }
  634. } else
  635. open_finished = 1;
  636. } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
  637. writew(srb_open + 18, streamer_mmio + LAPA);
  638. srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
  639. if (srb_word & (1 << 3))
  640. if (streamer_priv->streamer_message_level)
  641. printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
  642. if (srb_word & 1)
  643. streamer_priv->streamer_ring_speed = 16;
  644. else
  645. streamer_priv->streamer_ring_speed = 4;
  646. if (streamer_priv->streamer_message_level)
  647. printk(KERN_INFO "%s: Opened in %d Mbps mode\n",
  648. dev->name,
  649. streamer_priv->streamer_ring_speed);
  650. writew(srb_open + 8, streamer_mmio + LAPA);
  651. streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
  652. streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
  653. streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
  654. readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */
  655. streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
  656. streamer_priv->streamer_receive_options = 0x00;
  657. streamer_priv->streamer_copy_all_options = 0;
  658. /* setup rx ring */
  659. /* enable rx channel */
  660. writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
  661. /* setup rx descriptors */
  662. streamer_priv->streamer_rx_ring=
  663. kmalloc( sizeof(struct streamer_rx_desc)*
  664. STREAMER_RX_RING_SIZE,GFP_KERNEL);
  665. if (!streamer_priv->streamer_rx_ring) {
  666. printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
  667. return -EIO;
  668. }
  669. for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
  670. struct sk_buff *skb;
  671. skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
  672. if (skb == NULL)
  673. break;
  674. skb->dev = dev;
  675. streamer_priv->streamer_rx_ring[i].forward =
  676. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
  677. sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
  678. streamer_priv->streamer_rx_ring[i].status = 0;
  679. streamer_priv->streamer_rx_ring[i].buffer =
  680. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
  681. streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
  682. streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
  683. streamer_priv->rx_ring_skb[i] = skb;
  684. }
  685. streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
  686. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
  687. sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
  688. if (i == 0) {
  689. printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
  690. free_irq(dev->irq, dev);
  691. return -EIO;
  692. }
  693. streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
  694. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
  695. sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
  696. streamer_mmio + RXBDA);
  697. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
  698. sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
  699. streamer_mmio + RXLBDA);
  700. /* set bus master interrupt event mask */
  701. writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
  702. /* setup tx ring */
  703. streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
  704. STREAMER_TX_RING_SIZE,GFP_KERNEL);
  705. if (!streamer_priv->streamer_tx_ring) {
  706. printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
  707. return -EIO;
  708. }
  709. writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
  710. for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
  711. streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
  712. &streamer_priv->streamer_tx_ring[i + 1],
  713. sizeof(struct streamer_tx_desc),
  714. PCI_DMA_TODEVICE));
  715. streamer_priv->streamer_tx_ring[i].status = 0;
  716. streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
  717. streamer_priv->streamer_tx_ring[i].buffer = 0;
  718. streamer_priv->streamer_tx_ring[i].buflen = 0;
  719. streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
  720. streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
  721. streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
  722. }
  723. streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
  724. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
  725. sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
  726. streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
  727. streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
  728. streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
  729. /* set Busmaster interrupt event mask (handle receives on interrupt only */
  730. writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
  731. /* set system event interrupt mask */
  732. writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
  733. #if STREAMER_DEBUG
  734. printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
  735. printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
  736. #endif
  737. #if STREAMER_NETWORK_MONITOR
  738. writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
  739. printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
  740. ntohs(readw(streamer_mmio + LAPDINC)),
  741. ntohs(readw(streamer_mmio + LAPDINC)),
  742. ntohs(readw(streamer_mmio + LAPDINC)));
  743. readw(streamer_mmio + LAPDINC);
  744. readw(streamer_mmio + LAPDINC);
  745. printk("%s: Functional Address: %04x:%04x\n", dev->name,
  746. ntohs(readw(streamer_mmio + LAPDINC)),
  747. ntohs(readw(streamer_mmio + LAPDINC)));
  748. writew(streamer_priv->streamer_parms_addr + 4,
  749. streamer_mmio + LAPA);
  750. printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
  751. ntohs(readw(streamer_mmio + LAPDINC)),
  752. ntohs(readw(streamer_mmio + LAPDINC)),
  753. ntohs(readw(streamer_mmio + LAPDINC)));
  754. #endif
  755. netif_start_queue(dev);
  756. netif_carrier_on(dev);
  757. return 0;
  758. }
  759. /*
  760. * When we enter the rx routine we do not know how many frames have been
  761. * queued on the rx channel. Therefore we start at the next rx status
  762. * position and travel around the receive ring until we have completed
  763. * all the frames.
  764. *
  765. * This means that we may process the frame before we receive the end
  766. * of frame interrupt. This is why we always test the status instead
  767. * of blindly processing the next frame.
  768. *
  769. */
  770. static void streamer_rx(struct net_device *dev)
  771. {
  772. struct streamer_private *streamer_priv =
  773. netdev_priv(dev);
  774. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  775. struct streamer_rx_desc *rx_desc;
  776. int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
  777. struct sk_buff *skb, *skb2;
  778. /* setup the next rx descriptor to be received */
  779. rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
  780. rx_ring_last_received = streamer_priv->rx_ring_last_received;
  781. while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */
  782. if (rx_ring_last_received != streamer_priv->rx_ring_last_received)
  783. {
  784. printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
  785. rx_ring_last_received, streamer_priv->rx_ring_last_received);
  786. }
  787. streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
  788. rx_ring_last_received = streamer_priv->rx_ring_last_received;
  789. length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
  790. frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
  791. if (rx_desc->status & 0x7E830000) { /* errors */
  792. if (streamer_priv->streamer_message_level) {
  793. printk(KERN_WARNING "%s: Rx Error %x\n",
  794. dev->name, rx_desc->status);
  795. }
  796. } else { /* received without errors */
  797. if (rx_desc->status & 0x80000000) { /* frame complete */
  798. buffer_cnt = 1;
  799. skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
  800. } else {
  801. skb = dev_alloc_skb(frame_length);
  802. }
  803. if (skb == NULL)
  804. {
  805. printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
  806. dev->stats.rx_dropped++;
  807. } else { /* we allocated an skb OK */
  808. if (buffer_cnt == 1) {
  809. /* release the DMA mapping */
  810. pci_unmap_single(streamer_priv->pci_dev,
  811. le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
  812. streamer_priv->pkt_buf_sz,
  813. PCI_DMA_FROMDEVICE);
  814. skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
  815. #if STREAMER_DEBUG_PACKETS
  816. {
  817. int i;
  818. printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head);
  819. for (i = 0; i < frame_length; i++)
  820. {
  821. printk("%x:", skb2->data[i]);
  822. if (((i + 1) % 16) == 0)
  823. printk("\n");
  824. }
  825. printk("\n");
  826. }
  827. #endif
  828. skb_put(skb2, length);
  829. skb2->protocol = tr_type_trans(skb2, dev);
  830. /* recycle this descriptor */
  831. streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
  832. streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
  833. streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
  834. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
  835. PCI_DMA_FROMDEVICE));
  836. streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
  837. /* place recycled descriptor back on the adapter */
  838. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
  839. &streamer_priv->streamer_rx_ring[rx_ring_last_received],
  840. sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
  841. streamer_mmio + RXLBDA);
  842. /* pass the received skb up to the protocol */
  843. netif_rx(skb2);
  844. } else {
  845. do { /* Walk the buffers */
  846. pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
  847. memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
  848. streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
  849. streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
  850. /* give descriptor back to the adapter */
  851. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
  852. &streamer_priv->streamer_rx_ring[rx_ring_last_received],
  853. length, PCI_DMA_FROMDEVICE)),
  854. streamer_mmio + RXLBDA);
  855. if (rx_desc->status & 0x80000000)
  856. break; /* this descriptor completes the frame */
  857. /* else get the next pending descriptor */
  858. if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
  859. {
  860. printk("RX Error rx_ring_last_received not the same %x %x\n",
  861. rx_ring_last_received,
  862. streamer_priv->rx_ring_last_received);
  863. }
  864. rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
  865. length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
  866. streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
  867. rx_ring_last_received = streamer_priv->rx_ring_last_received;
  868. } while (1);
  869. skb->protocol = tr_type_trans(skb, dev);
  870. /* send up to the protocol */
  871. netif_rx(skb);
  872. }
  873. dev->stats.rx_packets++;
  874. dev->stats.rx_bytes += length;
  875. } /* if skb == null */
  876. } /* end received without errors */
  877. /* try the next one */
  878. rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
  879. } /* end for all completed rx descriptors */
  880. }
  881. static irqreturn_t streamer_interrupt(int irq, void *dev_id)
  882. {
  883. struct net_device *dev = (struct net_device *) dev_id;
  884. struct streamer_private *streamer_priv =
  885. netdev_priv(dev);
  886. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  887. __u16 sisr;
  888. __u16 misr;
  889. u8 max_intr = MAX_INTR;
  890. spin_lock(&streamer_priv->streamer_lock);
  891. sisr = readw(streamer_mmio + SISR);
  892. while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE |
  893. SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) &&
  894. (max_intr > 0)) {
  895. if(sisr & SISR_PAR_ERR) {
  896. writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
  897. (void)readw(streamer_mmio + SISR_RUM);
  898. }
  899. else if(sisr & SISR_SERR_ERR) {
  900. writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
  901. (void)readw(streamer_mmio + SISR_RUM);
  902. }
  903. else if(sisr & SISR_MI) {
  904. misr = readw(streamer_mmio + MISR_RUM);
  905. if (misr & MISR_TX2_EOF) {
  906. while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
  907. streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
  908. streamer_priv->free_tx_ring_entries++;
  909. dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
  910. dev->stats.tx_packets++;
  911. dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
  912. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
  913. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
  914. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
  915. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
  916. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
  917. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
  918. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
  919. }
  920. netif_wake_queue(dev);
  921. }
  922. if (misr & MISR_RX_EOF) {
  923. streamer_rx(dev);
  924. }
  925. /* MISR_RX_EOF */
  926. if (misr & MISR_RX_NOBUF) {
  927. /* According to the documentation, we don't have to do anything,
  928. * but trapping it keeps it out of /var/log/messages.
  929. */
  930. } /* SISR_RX_NOBUF */
  931. writew(~misr, streamer_mmio + MISR_RUM);
  932. (void)readw(streamer_mmio + MISR_RUM);
  933. }
  934. else if (sisr & SISR_SRB_REPLY) {
  935. if (streamer_priv->srb_queued == 1) {
  936. wake_up_interruptible(&streamer_priv->srb_wait);
  937. } else if (streamer_priv->srb_queued == 2) {
  938. streamer_srb_bh(dev);
  939. }
  940. streamer_priv->srb_queued = 0;
  941. writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
  942. (void)readw(streamer_mmio + SISR_RUM);
  943. }
  944. else if (sisr & SISR_ADAPTER_CHECK) {
  945. printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
  946. writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
  947. printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
  948. dev->name, readw(streamer_mmio + LAPDINC),
  949. ntohs(readw(streamer_mmio + LAPDINC)),
  950. ntohs(readw(streamer_mmio + LAPDINC)),
  951. ntohs(readw(streamer_mmio + LAPDINC)));
  952. netif_stop_queue(dev);
  953. netif_carrier_off(dev);
  954. printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
  955. }
  956. /* SISR_ADAPTER_CHECK */
  957. else if (sisr & SISR_ASB_FREE) {
  958. /* Wake up anything that is waiting for the asb response */
  959. if (streamer_priv->asb_queued) {
  960. streamer_asb_bh(dev);
  961. }
  962. writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
  963. (void)readw(streamer_mmio + SISR_RUM);
  964. }
  965. /* SISR_ASB_FREE */
  966. else if (sisr & SISR_ARB_CMD) {
  967. streamer_arb_cmd(dev);
  968. writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
  969. (void)readw(streamer_mmio + SISR_RUM);
  970. }
  971. /* SISR_ARB_CMD */
  972. else if (sisr & SISR_TRB_REPLY) {
  973. /* Wake up anything that is waiting for the trb response */
  974. if (streamer_priv->trb_queued) {
  975. wake_up_interruptible(&streamer_priv->
  976. trb_wait);
  977. }
  978. streamer_priv->trb_queued = 0;
  979. writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
  980. (void)readw(streamer_mmio + SISR_RUM);
  981. }
  982. /* SISR_TRB_REPLY */
  983. sisr = readw(streamer_mmio + SISR);
  984. max_intr--;
  985. } /* while() */
  986. spin_unlock(&streamer_priv->streamer_lock) ;
  987. return IRQ_HANDLED;
  988. }
  989. static netdev_tx_t streamer_xmit(struct sk_buff *skb,
  990. struct net_device *dev)
  991. {
  992. struct streamer_private *streamer_priv =
  993. netdev_priv(dev);
  994. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  995. unsigned long flags ;
  996. spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
  997. if (streamer_priv->free_tx_ring_entries) {
  998. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
  999. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
  1000. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
  1001. cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
  1002. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
  1003. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
  1004. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
  1005. streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
  1006. streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
  1007. streamer_priv->free_tx_ring_entries--;
  1008. #if STREAMER_DEBUG_PACKETS
  1009. {
  1010. int i;
  1011. printk("streamer_xmit packet print:\n");
  1012. for (i = 0; i < skb->len; i++) {
  1013. printk("%x:", skb->data[i]);
  1014. if (((i + 1) % 16) == 0)
  1015. printk("\n");
  1016. }
  1017. printk("\n");
  1018. }
  1019. #endif
  1020. writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
  1021. &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
  1022. sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
  1023. streamer_mmio + TX2LFDA);
  1024. (void)readl(streamer_mmio + TX2LFDA);
  1025. streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
  1026. spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
  1027. return NETDEV_TX_OK;
  1028. } else {
  1029. netif_stop_queue(dev);
  1030. spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
  1031. return NETDEV_TX_BUSY;
  1032. }
  1033. }
  1034. static int streamer_close(struct net_device *dev)
  1035. {
  1036. struct streamer_private *streamer_priv =
  1037. netdev_priv(dev);
  1038. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1039. unsigned long flags;
  1040. int i;
  1041. netif_stop_queue(dev);
  1042. netif_carrier_off(dev);
  1043. writew(streamer_priv->srb, streamer_mmio + LAPA);
  1044. writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
  1045. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1046. spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
  1047. streamer_priv->srb_queued = 1;
  1048. writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
  1049. spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
  1050. while (streamer_priv->srb_queued)
  1051. {
  1052. interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
  1053. jiffies + 60 * HZ);
  1054. if (signal_pending(current))
  1055. {
  1056. printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
  1057. printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
  1058. readw(streamer_mmio + SISR),
  1059. readw(streamer_mmio + MISR_RUM),
  1060. readw(streamer_mmio + LISR));
  1061. streamer_priv->srb_queued = 0;
  1062. break;
  1063. }
  1064. }
  1065. streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
  1066. for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
  1067. if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
  1068. dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
  1069. }
  1070. streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
  1071. }
  1072. /* reset tx/rx fifo's and busmaster logic */
  1073. /* TBD. Add graceful way to reset the LLC channel without doing a soft reset.
  1074. writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
  1075. udelay(1);
  1076. writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
  1077. */
  1078. #if STREAMER_DEBUG
  1079. writew(streamer_priv->srb, streamer_mmio + LAPA);
  1080. printk("srb): ");
  1081. for (i = 0; i < 2; i++) {
  1082. printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
  1083. }
  1084. printk("\n");
  1085. #endif
  1086. free_irq(dev->irq, dev);
  1087. return 0;
  1088. }
  1089. static void streamer_set_rx_mode(struct net_device *dev)
  1090. {
  1091. struct streamer_private *streamer_priv =
  1092. netdev_priv(dev);
  1093. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1094. __u8 options = 0;
  1095. struct netdev_hw_addr *ha;
  1096. unsigned char dev_mc_address[5];
  1097. writel(streamer_priv->srb, streamer_mmio + LAPA);
  1098. options = streamer_priv->streamer_copy_all_options;
  1099. if (dev->flags & IFF_PROMISC)
  1100. options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */
  1101. else
  1102. options &= ~(3 << 5);
  1103. /* Only issue the srb if there is a change in options */
  1104. if ((options ^ streamer_priv->streamer_copy_all_options))
  1105. {
  1106. /* Now to issue the srb command to alter the copy.all.options */
  1107. writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
  1108. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1109. writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
  1110. writew(htons(0x4a41),streamer_mmio+LAPDINC);
  1111. writew(htons(0x4d45),streamer_mmio+LAPDINC);
  1112. writew(htons(0x5320),streamer_mmio+LAPDINC);
  1113. writew(0x2020, streamer_mmio + LAPDINC);
  1114. streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
  1115. writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
  1116. streamer_priv->streamer_copy_all_options = options;
  1117. return;
  1118. }
  1119. /* Set the functional addresses we need for multicast */
  1120. writel(streamer_priv->srb,streamer_mmio+LAPA);
  1121. dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
  1122. netdev_for_each_mc_addr(ha, dev) {
  1123. dev_mc_address[0] |= ha->addr[2];
  1124. dev_mc_address[1] |= ha->addr[3];
  1125. dev_mc_address[2] |= ha->addr[4];
  1126. dev_mc_address[3] |= ha->addr[5];
  1127. }
  1128. writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
  1129. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1130. writew(0,streamer_mmio+LAPDINC);
  1131. writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
  1132. writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
  1133. streamer_priv->srb_queued = 2 ;
  1134. writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
  1135. }
  1136. static void streamer_srb_bh(struct net_device *dev)
  1137. {
  1138. struct streamer_private *streamer_priv = netdev_priv(dev);
  1139. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1140. __u16 srb_word;
  1141. writew(streamer_priv->srb, streamer_mmio + LAPA);
  1142. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1143. switch (srb_word) {
  1144. /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
  1145. * At some point we should do something if we get an error, such as
  1146. * resetting the IFF_PROMISC flag in dev
  1147. */
  1148. case SRB_MODIFY_RECEIVE_OPTIONS:
  1149. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1150. switch (srb_word) {
  1151. case 0x01:
  1152. printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
  1153. break;
  1154. case 0x04:
  1155. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1156. break;
  1157. default:
  1158. if (streamer_priv->streamer_message_level)
  1159. printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
  1160. dev->name,
  1161. streamer_priv->streamer_copy_all_options,
  1162. streamer_priv->streamer_receive_options);
  1163. break;
  1164. } /* switch srb[2] */
  1165. break;
  1166. /* SRB_SET_GROUP_ADDRESS - Multicast group setting
  1167. */
  1168. case SRB_SET_GROUP_ADDRESS:
  1169. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1170. switch (srb_word) {
  1171. case 0x00:
  1172. break;
  1173. case 0x01:
  1174. printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
  1175. break;
  1176. case 0x04:
  1177. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1178. break;
  1179. case 0x3c:
  1180. printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
  1181. break;
  1182. case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
  1183. printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
  1184. break;
  1185. case 0x55:
  1186. printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
  1187. break;
  1188. default:
  1189. break;
  1190. } /* switch srb[2] */
  1191. break;
  1192. /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
  1193. */
  1194. case SRB_RESET_GROUP_ADDRESS:
  1195. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1196. switch (srb_word) {
  1197. case 0x00:
  1198. break;
  1199. case 0x01:
  1200. printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
  1201. break;
  1202. case 0x04:
  1203. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1204. break;
  1205. case 0x39: /* Must deal with this if individual multicast addresses used */
  1206. printk(KERN_INFO "%s: Group address not found\n", dev->name);
  1207. break;
  1208. default:
  1209. break;
  1210. } /* switch srb[2] */
  1211. break;
  1212. /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
  1213. */
  1214. case SRB_SET_FUNC_ADDRESS:
  1215. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1216. switch (srb_word) {
  1217. case 0x00:
  1218. if (streamer_priv->streamer_message_level)
  1219. printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
  1220. break;
  1221. case 0x01:
  1222. printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
  1223. break;
  1224. case 0x04:
  1225. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1226. break;
  1227. default:
  1228. break;
  1229. } /* switch srb[2] */
  1230. break;
  1231. /* SRB_READ_LOG - Read and reset the adapter error counters
  1232. */
  1233. case SRB_READ_LOG:
  1234. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1235. switch (srb_word) {
  1236. case 0x00:
  1237. {
  1238. int i;
  1239. if (streamer_priv->streamer_message_level)
  1240. printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
  1241. printk("Read Log statistics: ");
  1242. writew(streamer_priv->srb + 6,
  1243. streamer_mmio + LAPA);
  1244. for (i = 0; i < 5; i++) {
  1245. printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
  1246. }
  1247. printk("\n");
  1248. }
  1249. break;
  1250. case 0x01:
  1251. printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
  1252. break;
  1253. case 0x04:
  1254. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1255. break;
  1256. } /* switch srb[2] */
  1257. break;
  1258. /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
  1259. case SRB_READ_SR_COUNTERS:
  1260. srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
  1261. switch (srb_word) {
  1262. case 0x00:
  1263. if (streamer_priv->streamer_message_level)
  1264. printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
  1265. break;
  1266. case 0x01:
  1267. printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
  1268. break;
  1269. case 0x04:
  1270. printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
  1271. break;
  1272. default:
  1273. break;
  1274. } /* switch srb[2] */
  1275. break;
  1276. default:
  1277. printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
  1278. break;
  1279. } /* switch srb[0] */
  1280. }
  1281. static int streamer_set_mac_address(struct net_device *dev, void *addr)
  1282. {
  1283. struct sockaddr *saddr = addr;
  1284. struct streamer_private *streamer_priv = netdev_priv(dev);
  1285. if (netif_running(dev))
  1286. {
  1287. printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
  1288. return -EIO;
  1289. }
  1290. memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
  1291. if (streamer_priv->streamer_message_level) {
  1292. printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",
  1293. dev->name, streamer_priv->streamer_laa[0],
  1294. streamer_priv->streamer_laa[1],
  1295. streamer_priv->streamer_laa[2],
  1296. streamer_priv->streamer_laa[3],
  1297. streamer_priv->streamer_laa[4],
  1298. streamer_priv->streamer_laa[5]);
  1299. }
  1300. return 0;
  1301. }
  1302. static void streamer_arb_cmd(struct net_device *dev)
  1303. {
  1304. struct streamer_private *streamer_priv =
  1305. netdev_priv(dev);
  1306. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1307. __u8 header_len;
  1308. __u16 frame_len, buffer_len;
  1309. struct sk_buff *mac_frame;
  1310. __u8 frame_data[256];
  1311. __u16 buff_off;
  1312. __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */
  1313. __u8 fdx_prot_error;
  1314. __u16 next_ptr;
  1315. __u16 arb_word;
  1316. #if STREAMER_NETWORK_MONITOR
  1317. struct trh_hdr *mac_hdr;
  1318. #endif
  1319. writew(streamer_priv->arb, streamer_mmio + LAPA);
  1320. arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
  1321. if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
  1322. writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
  1323. streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
  1324. header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
  1325. frame_len = ntohs(readw(streamer_mmio + LAPDINC));
  1326. #if STREAMER_DEBUG
  1327. {
  1328. int i;
  1329. __u16 next;
  1330. __u8 status;
  1331. __u16 len;
  1332. writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */
  1333. next = htons(readw(streamer_mmio + LAPDINC));
  1334. status =
  1335. ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
  1336. len = ntohs(readw(streamer_mmio + LAPDINC));
  1337. /* print out 1st 14 bytes of frame data */
  1338. for (i = 0; i < 7; i++) {
  1339. printk("Loc %d = %04x\n", i,
  1340. ntohs(readw
  1341. (streamer_mmio + LAPDINC)));
  1342. }
  1343. printk("next %04x, fs %02x, len %04x\n", next,
  1344. status, len);
  1345. }
  1346. #endif
  1347. if (!(mac_frame = dev_alloc_skb(frame_len))) {
  1348. printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
  1349. dev->name);
  1350. goto drop_frame;
  1351. }
  1352. /* Walk the buffer chain, creating the frame */
  1353. do {
  1354. int i;
  1355. __u16 rx_word;
  1356. writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */
  1357. next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
  1358. readw(streamer_mmio + LAPDINC); /* read thru status word */
  1359. buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
  1360. if (buffer_len > 256)
  1361. break;
  1362. i = 0;
  1363. while (i < buffer_len) {
  1364. rx_word=ntohs(readw(streamer_mmio+LAPDINC));
  1365. frame_data[i]=rx_word >> 8;
  1366. frame_data[i+1]=rx_word & 0xff;
  1367. i += 2;
  1368. }
  1369. memcpy(skb_put(mac_frame, buffer_len),
  1370. frame_data, buffer_len);
  1371. } while (next_ptr && (buff_off = next_ptr));
  1372. mac_frame->protocol = tr_type_trans(mac_frame, dev);
  1373. #if STREAMER_NETWORK_MONITOR
  1374. printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
  1375. dev->name);
  1376. mac_hdr = tr_hdr(mac_frame);
  1377. printk(KERN_WARNING
  1378. "%s: MAC Frame Dest. Addr: %pM\n",
  1379. dev->name, mac_hdr->daddr);
  1380. printk(KERN_WARNING
  1381. "%s: MAC Frame Srce. Addr: %pM\n",
  1382. dev->name, mac_hdr->saddr);
  1383. #endif
  1384. netif_rx(mac_frame);
  1385. /* Now tell the card we have dealt with the received frame */
  1386. drop_frame:
  1387. /* Set LISR Bit 1 */
  1388. writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
  1389. /* Is the ASB free ? */
  1390. if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE))
  1391. {
  1392. streamer_priv->asb_queued = 1;
  1393. writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
  1394. return;
  1395. /* Drop out and wait for the bottom half to be run */
  1396. }
  1397. writew(streamer_priv->asb, streamer_mmio + LAPA);
  1398. writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
  1399. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1400. writew(0, streamer_mmio + LAPDINC);
  1401. writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
  1402. writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
  1403. streamer_priv->asb_queued = 2;
  1404. return;
  1405. } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
  1406. writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
  1407. lan_status = ntohs(readw(streamer_mmio + LAPDINC));
  1408. fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
  1409. /* Issue ARB Free */
  1410. writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
  1411. lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) &
  1412. lan_status;
  1413. if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR))
  1414. {
  1415. if (lan_status_diff & LSC_LWF)
  1416. printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
  1417. if (lan_status_diff & LSC_ARW)
  1418. printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
  1419. if (lan_status_diff & LSC_FPE)
  1420. printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
  1421. if (lan_status_diff & LSC_RR)
  1422. printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
  1423. /* Adapter has been closed by the hardware */
  1424. /* reset tx/rx fifo's and busmaster logic */
  1425. /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
  1426. udelay(1);
  1427. writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
  1428. netif_stop_queue(dev);
  1429. netif_carrier_off(dev);
  1430. printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
  1431. }
  1432. /* If serious error */
  1433. if (streamer_priv->streamer_message_level) {
  1434. if (lan_status_diff & LSC_SIG_LOSS)
  1435. printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
  1436. if (lan_status_diff & LSC_HARD_ERR)
  1437. printk(KERN_INFO "%s: Beaconing\n", dev->name);
  1438. if (lan_status_diff & LSC_SOFT_ERR)
  1439. printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
  1440. if (lan_status_diff & LSC_TRAN_BCN)
  1441. printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
  1442. if (lan_status_diff & LSC_SS)
  1443. printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
  1444. if (lan_status_diff & LSC_RING_REC)
  1445. printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
  1446. if (lan_status_diff & LSC_FDX_MODE)
  1447. printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
  1448. }
  1449. if (lan_status_diff & LSC_CO) {
  1450. if (streamer_priv->streamer_message_level)
  1451. printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
  1452. /* Issue READ.LOG command */
  1453. writew(streamer_priv->srb, streamer_mmio + LAPA);
  1454. writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
  1455. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1456. writew(0, streamer_mmio + LAPDINC);
  1457. streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
  1458. writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
  1459. }
  1460. if (lan_status_diff & LSC_SR_CO) {
  1461. if (streamer_priv->streamer_message_level)
  1462. printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
  1463. /* Issue a READ.SR.COUNTERS */
  1464. writew(streamer_priv->srb, streamer_mmio + LAPA);
  1465. writew(htons(SRB_READ_SR_COUNTERS << 8),
  1466. streamer_mmio+LAPDINC);
  1467. writew(htons(STREAMER_CLEAR_RET_CODE << 8),
  1468. streamer_mmio+LAPDINC);
  1469. streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
  1470. writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
  1471. }
  1472. streamer_priv->streamer_lan_status = lan_status;
  1473. } /* Lan.change.status */
  1474. else
  1475. printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
  1476. }
  1477. static void streamer_asb_bh(struct net_device *dev)
  1478. {
  1479. struct streamer_private *streamer_priv =
  1480. netdev_priv(dev);
  1481. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1482. if (streamer_priv->asb_queued == 1)
  1483. {
  1484. /* Dropped through the first time */
  1485. writew(streamer_priv->asb, streamer_mmio + LAPA);
  1486. writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
  1487. writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
  1488. writew(0, streamer_mmio + LAPDINC);
  1489. writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
  1490. writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
  1491. streamer_priv->asb_queued = 2;
  1492. return;
  1493. }
  1494. if (streamer_priv->asb_queued == 2) {
  1495. __u8 rc;
  1496. writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
  1497. rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
  1498. switch (rc) {
  1499. case 0x01:
  1500. printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
  1501. break;
  1502. case 0x26:
  1503. printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
  1504. break;
  1505. case 0xFF:
  1506. /* Valid response, everything should be ok again */
  1507. break;
  1508. default:
  1509. printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
  1510. break;
  1511. }
  1512. }
  1513. streamer_priv->asb_queued = 0;
  1514. }
  1515. static int streamer_change_mtu(struct net_device *dev, int mtu)
  1516. {
  1517. struct streamer_private *streamer_priv =
  1518. netdev_priv(dev);
  1519. __u16 max_mtu;
  1520. if (streamer_priv->streamer_ring_speed == 4)
  1521. max_mtu = 4500;
  1522. else
  1523. max_mtu = 18000;
  1524. if (mtu > max_mtu)
  1525. return -EINVAL;
  1526. if (mtu < 100)
  1527. return -EINVAL;
  1528. dev->mtu = mtu;
  1529. streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
  1530. return 0;
  1531. }
  1532. #if STREAMER_NETWORK_MONITOR
  1533. #ifdef CONFIG_PROC_FS
  1534. static int streamer_proc_info(char *buffer, char **start, off_t offset,
  1535. int length, int *eof, void *data)
  1536. {
  1537. struct streamer_private *sdev=NULL;
  1538. struct pci_dev *pci_device = NULL;
  1539. int len = 0;
  1540. off_t begin = 0;
  1541. off_t pos = 0;
  1542. int size;
  1543. struct net_device *dev;
  1544. size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
  1545. pos += size;
  1546. len += size;
  1547. for(sdev=dev_streamer; sdev; sdev=sdev->next) {
  1548. pci_device=sdev->pci_dev;
  1549. dev=pci_get_drvdata(pci_device);
  1550. size = sprintf_info(buffer + len, dev);
  1551. len += size;
  1552. pos = begin + len;
  1553. if (pos < offset) {
  1554. len = 0;
  1555. begin = pos;
  1556. }
  1557. if (pos > offset + length)
  1558. break;
  1559. } /* for */
  1560. *start = buffer + (offset - begin); /* Start of wanted data */
  1561. len -= (offset - begin); /* Start slop */
  1562. if (len > length)
  1563. len = length; /* Ending slop */
  1564. return len;
  1565. }
  1566. static int sprintf_info(char *buffer, struct net_device *dev)
  1567. {
  1568. struct streamer_private *streamer_priv =
  1569. netdev_priv(dev);
  1570. __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
  1571. struct streamer_adapter_addr_table sat;
  1572. struct streamer_parameters_table spt;
  1573. int size = 0;
  1574. int i;
  1575. writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
  1576. for (i = 0; i < 14; i += 2) {
  1577. __u16 io_word;
  1578. __u8 *datap = (__u8 *) & sat;
  1579. io_word=ntohs(readw(streamer_mmio+LAPDINC));
  1580. datap[size]=io_word >> 8;
  1581. datap[size+1]=io_word & 0xff;
  1582. }
  1583. writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
  1584. for (i = 0; i < 68; i += 2) {
  1585. __u16 io_word;
  1586. __u8 *datap = (__u8 *) & spt;
  1587. io_word=ntohs(readw(streamer_mmio+LAPDINC));
  1588. datap[size]=io_word >> 8;
  1589. datap[size+1]=io_word & 0xff;
  1590. }
  1591. size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
  1592. size += sprintf(buffer + size,
  1593. "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
  1594. dev->name, dev->dev_addr, sat.node_addr,
  1595. sat.func_addr[0], sat.func_addr[1],
  1596. sat.func_addr[2], sat.func_addr[3]);
  1597. size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
  1598. size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
  1599. size += sprintf(buffer + size,
  1600. "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
  1601. dev->name, spt.phys_addr[0], spt.phys_addr[1],
  1602. spt.phys_addr[2], spt.phys_addr[3],
  1603. spt.up_node_addr, spt.poll_addr,
  1604. ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
  1605. ntohs(spt.att_code));
  1606. size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
  1607. size += sprintf(buffer + size,
  1608. "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
  1609. dev->name, spt.source_addr,
  1610. ntohs(spt.beacon_type), ntohs(spt.major_vector),
  1611. ntohs(spt.lan_status), ntohs(spt.local_ring),
  1612. ntohs(spt.mon_error), ntohs(spt.frame_correl));
  1613. size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
  1614. dev->name);
  1615. size += sprintf(buffer + size,
  1616. "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
  1617. dev->name, ntohs(spt.beacon_transmit),
  1618. ntohs(spt.beacon_receive),
  1619. spt.beacon_naun,
  1620. spt.beacon_phys[0], spt.beacon_phys[1],
  1621. spt.beacon_phys[2], spt.beacon_phys[3]);
  1622. return size;
  1623. }
  1624. #endif
  1625. #endif
  1626. static struct pci_driver streamer_pci_driver = {
  1627. .name = "lanstreamer",
  1628. .id_table = streamer_pci_tbl,
  1629. .probe = streamer_init_one,
  1630. .remove = __devexit_p(streamer_remove_one),
  1631. };
  1632. static int __init streamer_init_module(void) {
  1633. return pci_register_driver(&streamer_pci_driver);
  1634. }
  1635. static void __exit streamer_cleanup_module(void) {
  1636. pci_unregister_driver(&streamer_pci_driver);
  1637. }
  1638. module_init(streamer_init_module);
  1639. module_exit(streamer_cleanup_module);
  1640. MODULE_LICENSE("GPL");