zatm.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670
  1. /* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
  2. /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
  3. #include <linux/module.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/pci.h>
  7. #include <linux/errno.h>
  8. #include <linux/atm.h>
  9. #include <linux/atmdev.h>
  10. #include <linux/sonet.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/delay.h>
  14. #include <linux/uio.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/atm_zatm.h>
  19. #include <linux/capability.h>
  20. #include <linux/bitops.h>
  21. #include <linux/wait.h>
  22. #include <linux/slab.h>
  23. #include <linux/nospec.h>
  24. #include <asm/byteorder.h>
  25. #include <asm/string.h>
  26. #include <asm/io.h>
  27. #include <linux/atomic.h>
  28. #include <asm/uaccess.h>
  29. #include "uPD98401.h"
  30. #include "uPD98402.h"
  31. #include "zeprom.h"
  32. #include "zatm.h"
  33. /*
  34. * TODO:
  35. *
  36. * Minor features
  37. * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
  38. * - proper use of CDV, credit = max(1,CDVT*PCR)
  39. * - AAL0
  40. * - better receive timestamps
  41. * - OAM
  42. */
  43. #define ZATM_COPPER 1
  44. #if 0
  45. #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
  46. #else
  47. #define DPRINTK(format,args...)
  48. #endif
  49. #ifndef CONFIG_ATM_ZATM_DEBUG
  50. #define NULLCHECK(x)
  51. #define EVENT(s,a,b)
  52. static void event_dump(void)
  53. {
  54. }
  55. #else
  56. /*
  57. * NULL pointer checking
  58. */
  59. #define NULLCHECK(x) \
  60. if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
  61. /*
  62. * Very extensive activity logging. Greatly improves bug detection speed but
  63. * costs a few Mbps if enabled.
  64. */
  65. #define EV 64
  66. static const char *ev[EV];
  67. static unsigned long ev_a[EV],ev_b[EV];
  68. static int ec = 0;
  69. static void EVENT(const char *s,unsigned long a,unsigned long b)
  70. {
  71. ev[ec] = s;
  72. ev_a[ec] = a;
  73. ev_b[ec] = b;
  74. ec = (ec+1) % EV;
  75. }
  76. static void event_dump(void)
  77. {
  78. int n,i;
  79. printk(KERN_NOTICE "----- event dump follows -----\n");
  80. for (n = 0; n < EV; n++) {
  81. i = (ec+n) % EV;
  82. printk(KERN_NOTICE);
  83. printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
  84. }
  85. printk(KERN_NOTICE "----- event dump ends here -----\n");
  86. }
  87. #endif /* CONFIG_ATM_ZATM_DEBUG */
  88. #define RING_BUSY 1 /* indication from do_tx that PDU has to be
  89. backlogged */
  90. static struct atm_dev *zatm_boards = NULL;
  91. static unsigned long dummy[2] = {0,0};
  92. #define zin_n(r) inl(zatm_dev->base+r*4)
  93. #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
  94. #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
  95. #define zwait while (zin(CMR) & uPD98401_BUSY)
  96. /* RX0, RX1, TX0, TX1 */
  97. static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
  98. static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
  99. #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
  100. /*-------------------------------- utilities --------------------------------*/
  101. static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
  102. {
  103. zwait;
  104. zout(value,CER);
  105. zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
  106. (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  107. }
  108. static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
  109. {
  110. zwait;
  111. zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
  112. (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  113. zwait;
  114. return zin(CER);
  115. }
  116. /*------------------------------- free lists --------------------------------*/
  117. /*
  118. * Free buffer head structure:
  119. * [0] pointer to buffer (for SAR)
  120. * [1] buffer descr link pointer (for SAR)
  121. * [2] back pointer to skb (for poll_rx)
  122. * [3] data
  123. * ...
  124. */
  125. struct rx_buffer_head {
  126. u32 buffer; /* pointer to buffer (for SAR) */
  127. u32 link; /* buffer descriptor link pointer (for SAR) */
  128. struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
  129. };
  130. static void refill_pool(struct atm_dev *dev,int pool)
  131. {
  132. struct zatm_dev *zatm_dev;
  133. struct sk_buff *skb;
  134. struct rx_buffer_head *first;
  135. unsigned long flags;
  136. int align,offset,free,count,size;
  137. EVENT("refill_pool\n",0,0);
  138. zatm_dev = ZATM_DEV(dev);
  139. size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
  140. pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
  141. if (size < PAGE_SIZE) {
  142. align = 32; /* for 32 byte alignment */
  143. offset = sizeof(struct rx_buffer_head);
  144. }
  145. else {
  146. align = 4096;
  147. offset = zatm_dev->pool_info[pool].offset+
  148. sizeof(struct rx_buffer_head);
  149. }
  150. size += align;
  151. spin_lock_irqsave(&zatm_dev->lock, flags);
  152. free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
  153. uPD98401_RXFP_REMAIN;
  154. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  155. if (free >= zatm_dev->pool_info[pool].low_water) return;
  156. EVENT("starting ... POOL: 0x%x, 0x%x\n",
  157. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
  158. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
  159. EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  160. count = 0;
  161. first = NULL;
  162. while (free < zatm_dev->pool_info[pool].high_water) {
  163. struct rx_buffer_head *head;
  164. skb = alloc_skb(size,GFP_ATOMIC);
  165. if (!skb) {
  166. printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
  167. "skb (%d) with %d free\n",dev->number,size,free);
  168. break;
  169. }
  170. skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
  171. align+offset-1) & ~(unsigned long) (align-1))-offset)-
  172. skb->data);
  173. head = (struct rx_buffer_head *) skb->data;
  174. skb_reserve(skb,sizeof(struct rx_buffer_head));
  175. if (!first) first = head;
  176. count++;
  177. head->buffer = virt_to_bus(skb->data);
  178. head->link = 0;
  179. head->skb = skb;
  180. EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
  181. (unsigned long) head);
  182. spin_lock_irqsave(&zatm_dev->lock, flags);
  183. if (zatm_dev->last_free[pool])
  184. ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
  185. data))[-1].link = virt_to_bus(head);
  186. zatm_dev->last_free[pool] = skb;
  187. skb_queue_tail(&zatm_dev->pool[pool],skb);
  188. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  189. free++;
  190. }
  191. if (first) {
  192. spin_lock_irqsave(&zatm_dev->lock, flags);
  193. zwait;
  194. zout(virt_to_bus(first),CER);
  195. zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
  196. CMR);
  197. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  198. EVENT ("POOL: 0x%x, 0x%x\n",
  199. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
  200. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
  201. EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  202. }
  203. }
  204. static void drain_free(struct atm_dev *dev,int pool)
  205. {
  206. skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
  207. }
  208. static int pool_index(int max_pdu)
  209. {
  210. int i;
  211. if (max_pdu % ATM_CELL_PAYLOAD)
  212. printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
  213. "max_pdu is %d\n",max_pdu);
  214. if (max_pdu > 65536) return -1;
  215. for (i = 0; (64 << i) < max_pdu; i++);
  216. return i+ZATM_AAL5_POOL_BASE;
  217. }
  218. /* use_pool isn't reentrant */
  219. static void use_pool(struct atm_dev *dev,int pool)
  220. {
  221. struct zatm_dev *zatm_dev;
  222. unsigned long flags;
  223. int size;
  224. zatm_dev = ZATM_DEV(dev);
  225. if (!(zatm_dev->pool_info[pool].ref_count++)) {
  226. skb_queue_head_init(&zatm_dev->pool[pool]);
  227. size = pool-ZATM_AAL5_POOL_BASE;
  228. if (size < 0) size = 0; /* 64B... */
  229. else if (size > 10) size = 10; /* ... 64kB */
  230. spin_lock_irqsave(&zatm_dev->lock, flags);
  231. zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
  232. uPD98401_RXFP_ALERT_SHIFT) |
  233. (1 << uPD98401_RXFP_BTSZ_SHIFT) |
  234. (size << uPD98401_RXFP_BFSZ_SHIFT),
  235. zatm_dev->pool_base+pool*2);
  236. zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
  237. pool*2+1);
  238. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  239. zatm_dev->last_free[pool] = NULL;
  240. refill_pool(dev,pool);
  241. }
  242. DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
  243. }
  244. static void unuse_pool(struct atm_dev *dev,int pool)
  245. {
  246. if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
  247. drain_free(dev,pool);
  248. }
  249. /*----------------------------------- RX ------------------------------------*/
  250. #if 0
  251. static void exception(struct atm_vcc *vcc)
  252. {
  253. static int count = 0;
  254. struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
  255. struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
  256. unsigned long *qrp;
  257. int i;
  258. if (count++ > 2) return;
  259. for (i = 0; i < 8; i++)
  260. printk("TX%d: 0x%08lx\n",i,
  261. zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
  262. for (i = 0; i < 5; i++)
  263. printk("SH%d: 0x%08lx\n",i,
  264. zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
  265. qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
  266. uPD98401_TXVC_QRP);
  267. printk("qrp=0x%08lx\n",(unsigned long) qrp);
  268. for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
  269. }
  270. #endif
  271. static const char *err_txt[] = {
  272. "No error",
  273. "RX buf underflow",
  274. "RX FIFO overrun",
  275. "Maximum len violation",
  276. "CRC error",
  277. "User abort",
  278. "Length violation",
  279. "T1 error",
  280. "Deactivated",
  281. "???",
  282. "???",
  283. "???",
  284. "???",
  285. "???",
  286. "???",
  287. "???"
  288. };
  289. static void poll_rx(struct atm_dev *dev,int mbx)
  290. {
  291. struct zatm_dev *zatm_dev;
  292. unsigned long pos;
  293. u32 x;
  294. int error;
  295. EVENT("poll_rx\n",0,0);
  296. zatm_dev = ZATM_DEV(dev);
  297. pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
  298. while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
  299. u32 *here;
  300. struct sk_buff *skb;
  301. struct atm_vcc *vcc;
  302. int cells,size,chan;
  303. EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
  304. here = (u32 *) pos;
  305. if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
  306. pos = zatm_dev->mbx_start[mbx];
  307. cells = here[0] & uPD98401_AAL5_SIZE;
  308. #if 0
  309. printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
  310. {
  311. unsigned long *x;
  312. printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
  313. zatm_dev->pool_base),
  314. zpeekl(zatm_dev,zatm_dev->pool_base+1));
  315. x = (unsigned long *) here[2];
  316. printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
  317. x[0],x[1],x[2],x[3]);
  318. }
  319. #endif
  320. error = 0;
  321. if (here[3] & uPD98401_AAL5_ERR) {
  322. error = (here[3] & uPD98401_AAL5_ES) >>
  323. uPD98401_AAL5_ES_SHIFT;
  324. if (error == uPD98401_AAL5_ES_DEACT ||
  325. error == uPD98401_AAL5_ES_FREE) continue;
  326. }
  327. EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
  328. uPD98401_AAL5_ES_SHIFT,error);
  329. skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
  330. __net_timestamp(skb);
  331. #if 0
  332. printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
  333. ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
  334. ((unsigned *) skb->data)[0]);
  335. #endif
  336. EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
  337. (unsigned long) here);
  338. #if 0
  339. printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  340. #endif
  341. size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
  342. ATM_CELL_PAYLOAD/sizeof(u16)-3]);
  343. EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
  344. chan = (here[3] & uPD98401_AAL5_CHAN) >>
  345. uPD98401_AAL5_CHAN_SHIFT;
  346. if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
  347. int pos;
  348. vcc = zatm_dev->rx_map[chan];
  349. pos = ZATM_VCC(vcc)->pool;
  350. if (skb == zatm_dev->last_free[pos])
  351. zatm_dev->last_free[pos] = NULL;
  352. skb_unlink(skb, zatm_dev->pool + pos);
  353. }
  354. else {
  355. printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
  356. "for non-existing channel\n",dev->number);
  357. size = 0;
  358. vcc = NULL;
  359. event_dump();
  360. }
  361. if (error) {
  362. static unsigned long silence = 0;
  363. static int last_error = 0;
  364. if (error != last_error ||
  365. time_after(jiffies, silence) || silence == 0){
  366. printk(KERN_WARNING DEV_LABEL "(itf %d): "
  367. "chan %d error %s\n",dev->number,chan,
  368. err_txt[error]);
  369. last_error = error;
  370. silence = (jiffies+2*HZ)|1;
  371. }
  372. size = 0;
  373. }
  374. if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
  375. size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
  376. printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
  377. "cells\n",dev->number,size,cells);
  378. size = 0;
  379. event_dump();
  380. }
  381. if (size > ATM_MAX_AAL5_PDU) {
  382. printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
  383. "(%d)\n",dev->number,size);
  384. size = 0;
  385. event_dump();
  386. }
  387. if (!size) {
  388. dev_kfree_skb_irq(skb);
  389. if (vcc) atomic_inc(&vcc->stats->rx_err);
  390. continue;
  391. }
  392. if (!atm_charge(vcc,skb->truesize)) {
  393. dev_kfree_skb_irq(skb);
  394. continue;
  395. }
  396. skb->len = size;
  397. ATM_SKB(skb)->vcc = vcc;
  398. vcc->push(vcc,skb);
  399. atomic_inc(&vcc->stats->rx);
  400. }
  401. zout(pos & 0xffff,MTA(mbx));
  402. #if 0 /* probably a stupid idea */
  403. refill_pool(dev,zatm_vcc->pool);
  404. /* maybe this saves us a few interrupts */
  405. #endif
  406. }
  407. static int open_rx_first(struct atm_vcc *vcc)
  408. {
  409. struct zatm_dev *zatm_dev;
  410. struct zatm_vcc *zatm_vcc;
  411. unsigned long flags;
  412. unsigned short chan;
  413. int cells;
  414. DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
  415. zatm_dev = ZATM_DEV(vcc->dev);
  416. zatm_vcc = ZATM_VCC(vcc);
  417. zatm_vcc->rx_chan = 0;
  418. if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
  419. if (vcc->qos.aal == ATM_AAL5) {
  420. if (vcc->qos.rxtp.max_sdu > 65464)
  421. vcc->qos.rxtp.max_sdu = 65464;
  422. /* fix this - we may want to receive 64kB SDUs
  423. later */
  424. cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
  425. ATM_CELL_PAYLOAD);
  426. zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
  427. }
  428. else {
  429. cells = 1;
  430. zatm_vcc->pool = ZATM_AAL0_POOL;
  431. }
  432. if (zatm_vcc->pool < 0) return -EMSGSIZE;
  433. spin_lock_irqsave(&zatm_dev->lock, flags);
  434. zwait;
  435. zout(uPD98401_OPEN_CHAN,CMR);
  436. zwait;
  437. DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
  438. chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
  439. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  440. DPRINTK("chan is %d\n",chan);
  441. if (!chan) return -EAGAIN;
  442. use_pool(vcc->dev,zatm_vcc->pool);
  443. DPRINTK("pool %d\n",zatm_vcc->pool);
  444. /* set up VC descriptor */
  445. spin_lock_irqsave(&zatm_dev->lock, flags);
  446. zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
  447. chan*VC_SIZE/4);
  448. zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
  449. uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
  450. zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
  451. zatm_vcc->rx_chan = chan;
  452. zatm_dev->rx_map[chan] = vcc;
  453. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  454. return 0;
  455. }
  456. static int open_rx_second(struct atm_vcc *vcc)
  457. {
  458. struct zatm_dev *zatm_dev;
  459. struct zatm_vcc *zatm_vcc;
  460. unsigned long flags;
  461. int pos,shift;
  462. DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
  463. zatm_dev = ZATM_DEV(vcc->dev);
  464. zatm_vcc = ZATM_VCC(vcc);
  465. if (!zatm_vcc->rx_chan) return 0;
  466. spin_lock_irqsave(&zatm_dev->lock, flags);
  467. /* should also handle VPI @@@ */
  468. pos = vcc->vci >> 1;
  469. shift = (1-(vcc->vci & 1)) << 4;
  470. zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
  471. ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
  472. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  473. return 0;
  474. }
  475. static void close_rx(struct atm_vcc *vcc)
  476. {
  477. struct zatm_dev *zatm_dev;
  478. struct zatm_vcc *zatm_vcc;
  479. unsigned long flags;
  480. int pos,shift;
  481. zatm_vcc = ZATM_VCC(vcc);
  482. zatm_dev = ZATM_DEV(vcc->dev);
  483. if (!zatm_vcc->rx_chan) return;
  484. DPRINTK("close_rx\n");
  485. /* disable receiver */
  486. if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
  487. spin_lock_irqsave(&zatm_dev->lock, flags);
  488. pos = vcc->vci >> 1;
  489. shift = (1-(vcc->vci & 1)) << 4;
  490. zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
  491. zwait;
  492. zout(uPD98401_NOP,CMR);
  493. zwait;
  494. zout(uPD98401_NOP,CMR);
  495. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  496. }
  497. spin_lock_irqsave(&zatm_dev->lock, flags);
  498. zwait;
  499. zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
  500. uPD98401_CHAN_ADDR_SHIFT),CMR);
  501. zwait;
  502. udelay(10); /* why oh why ... ? */
  503. zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
  504. uPD98401_CHAN_ADDR_SHIFT),CMR);
  505. zwait;
  506. if (!(zin(CMR) & uPD98401_CHAN_ADDR))
  507. printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
  508. "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
  509. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  510. zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
  511. zatm_vcc->rx_chan = 0;
  512. unuse_pool(vcc->dev,zatm_vcc->pool);
  513. }
  514. static int start_rx(struct atm_dev *dev)
  515. {
  516. struct zatm_dev *zatm_dev;
  517. int i;
  518. DPRINTK("start_rx\n");
  519. zatm_dev = ZATM_DEV(dev);
  520. zatm_dev->rx_map = kcalloc(zatm_dev->chans,
  521. sizeof(*zatm_dev->rx_map),
  522. GFP_KERNEL);
  523. if (!zatm_dev->rx_map) return -ENOMEM;
  524. /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
  525. zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
  526. /* prepare free buffer pools */
  527. for (i = 0; i <= ZATM_LAST_POOL; i++) {
  528. zatm_dev->pool_info[i].ref_count = 0;
  529. zatm_dev->pool_info[i].rqa_count = 0;
  530. zatm_dev->pool_info[i].rqu_count = 0;
  531. zatm_dev->pool_info[i].low_water = LOW_MARK;
  532. zatm_dev->pool_info[i].high_water = HIGH_MARK;
  533. zatm_dev->pool_info[i].offset = 0;
  534. zatm_dev->pool_info[i].next_off = 0;
  535. zatm_dev->pool_info[i].next_cnt = 0;
  536. zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
  537. }
  538. return 0;
  539. }
  540. /*----------------------------------- TX ------------------------------------*/
  541. static int do_tx(struct sk_buff *skb)
  542. {
  543. struct atm_vcc *vcc;
  544. struct zatm_dev *zatm_dev;
  545. struct zatm_vcc *zatm_vcc;
  546. u32 *dsc;
  547. unsigned long flags;
  548. EVENT("do_tx\n",0,0);
  549. DPRINTK("sending skb %p\n",skb);
  550. vcc = ATM_SKB(skb)->vcc;
  551. zatm_dev = ZATM_DEV(vcc->dev);
  552. zatm_vcc = ZATM_VCC(vcc);
  553. EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
  554. spin_lock_irqsave(&zatm_dev->lock, flags);
  555. if (!skb_shinfo(skb)->nr_frags) {
  556. if (zatm_vcc->txing == RING_ENTRIES-1) {
  557. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  558. return RING_BUSY;
  559. }
  560. zatm_vcc->txing++;
  561. dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
  562. zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
  563. (RING_ENTRIES*RING_WORDS-1);
  564. dsc[1] = 0;
  565. dsc[2] = skb->len;
  566. dsc[3] = virt_to_bus(skb->data);
  567. mb();
  568. dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
  569. | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
  570. (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
  571. uPD98401_CLPM_1 : uPD98401_CLPM_0));
  572. EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
  573. }
  574. else {
  575. printk("NONONONOO!!!!\n");
  576. dsc = NULL;
  577. #if 0
  578. u32 *put;
  579. int i;
  580. dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
  581. uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
  582. if (!dsc) {
  583. if (vcc->pop)
  584. vcc->pop(vcc, skb);
  585. else
  586. dev_kfree_skb_irq(skb);
  587. return -EAGAIN;
  588. }
  589. /* @@@ should check alignment */
  590. put = dsc+8;
  591. dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
  592. (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
  593. (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
  594. uPD98401_CLPM_1 : uPD98401_CLPM_0));
  595. dsc[1] = 0;
  596. dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
  597. dsc[3] = virt_to_bus(put);
  598. for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
  599. *put++ = ((struct iovec *) skb->data)[i].iov_len;
  600. *put++ = virt_to_bus(((struct iovec *)
  601. skb->data)[i].iov_base);
  602. }
  603. put[-2] |= uPD98401_TXBD_LAST;
  604. #endif
  605. }
  606. ZATM_PRV_DSC(skb) = dsc;
  607. skb_queue_tail(&zatm_vcc->tx_queue,skb);
  608. DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
  609. uPD98401_TXVC_QRP));
  610. zwait;
  611. zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
  612. uPD98401_CHAN_ADDR_SHIFT),CMR);
  613. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  614. EVENT("done\n",0,0);
  615. return 0;
  616. }
  617. static inline void dequeue_tx(struct atm_vcc *vcc)
  618. {
  619. struct zatm_vcc *zatm_vcc;
  620. struct sk_buff *skb;
  621. EVENT("dequeue_tx\n",0,0);
  622. zatm_vcc = ZATM_VCC(vcc);
  623. skb = skb_dequeue(&zatm_vcc->tx_queue);
  624. if (!skb) {
  625. printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
  626. "txing\n",vcc->dev->number);
  627. return;
  628. }
  629. #if 0 /* @@@ would fail on CLP */
  630. if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
  631. uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
  632. *ZATM_PRV_DSC(skb));
  633. #endif
  634. *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
  635. zatm_vcc->txing--;
  636. if (vcc->pop) vcc->pop(vcc,skb);
  637. else dev_kfree_skb_irq(skb);
  638. while ((skb = skb_dequeue(&zatm_vcc->backlog)))
  639. if (do_tx(skb) == RING_BUSY) {
  640. skb_queue_head(&zatm_vcc->backlog,skb);
  641. break;
  642. }
  643. atomic_inc(&vcc->stats->tx);
  644. wake_up(&zatm_vcc->tx_wait);
  645. }
  646. static void poll_tx(struct atm_dev *dev,int mbx)
  647. {
  648. struct zatm_dev *zatm_dev;
  649. unsigned long pos;
  650. u32 x;
  651. EVENT("poll_tx\n",0,0);
  652. zatm_dev = ZATM_DEV(dev);
  653. pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
  654. while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
  655. int chan;
  656. #if 1
  657. u32 data,*addr;
  658. EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
  659. addr = (u32 *) pos;
  660. data = *addr;
  661. chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
  662. EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
  663. data);
  664. EVENT("chan = %d\n",chan,0);
  665. #else
  666. NO !
  667. chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
  668. >> uPD98401_TXI_CONN_SHIFT;
  669. #endif
  670. if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
  671. dequeue_tx(zatm_dev->tx_map[chan]);
  672. else {
  673. printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
  674. "for non-existing channel %d\n",dev->number,chan);
  675. event_dump();
  676. }
  677. if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
  678. pos = zatm_dev->mbx_start[mbx];
  679. }
  680. zout(pos & 0xffff,MTA(mbx));
  681. }
  682. /*
  683. * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
  684. */
  685. static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
  686. {
  687. struct zatm_dev *zatm_dev;
  688. unsigned long flags;
  689. unsigned long i,m,c;
  690. int shaper;
  691. DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
  692. zatm_dev = ZATM_DEV(dev);
  693. if (!zatm_dev->free_shapers) return -EAGAIN;
  694. for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
  695. zatm_dev->free_shapers &= ~1 << shaper;
  696. if (ubr) {
  697. c = 5;
  698. i = m = 1;
  699. zatm_dev->ubr_ref_cnt++;
  700. zatm_dev->ubr = shaper;
  701. *pcr = 0;
  702. }
  703. else {
  704. if (min) {
  705. if (min <= 255) {
  706. i = min;
  707. m = ATM_OC3_PCR;
  708. }
  709. else {
  710. i = 255;
  711. m = ATM_OC3_PCR*255/min;
  712. }
  713. }
  714. else {
  715. if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
  716. if (max <= 255) {
  717. i = max;
  718. m = ATM_OC3_PCR;
  719. }
  720. else {
  721. i = 255;
  722. m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
  723. }
  724. }
  725. if (i > m) {
  726. printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
  727. "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
  728. m = i;
  729. }
  730. *pcr = i*ATM_OC3_PCR/m;
  731. c = 20; /* @@@ should use max_cdv ! */
  732. if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
  733. if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
  734. zatm_dev->tx_bw -= *pcr;
  735. }
  736. spin_lock_irqsave(&zatm_dev->lock, flags);
  737. DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
  738. zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
  739. zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
  740. zpokel(zatm_dev,0,uPD98401_X(shaper));
  741. zpokel(zatm_dev,0,uPD98401_Y(shaper));
  742. zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
  743. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  744. return shaper;
  745. }
  746. static void dealloc_shaper(struct atm_dev *dev,int shaper)
  747. {
  748. struct zatm_dev *zatm_dev;
  749. unsigned long flags;
  750. zatm_dev = ZATM_DEV(dev);
  751. if (shaper == zatm_dev->ubr) {
  752. if (--zatm_dev->ubr_ref_cnt) return;
  753. zatm_dev->ubr = -1;
  754. }
  755. spin_lock_irqsave(&zatm_dev->lock, flags);
  756. zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
  757. uPD98401_PS(shaper));
  758. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  759. zatm_dev->free_shapers |= 1 << shaper;
  760. }
  761. static void close_tx(struct atm_vcc *vcc)
  762. {
  763. struct zatm_dev *zatm_dev;
  764. struct zatm_vcc *zatm_vcc;
  765. unsigned long flags;
  766. int chan;
  767. zatm_vcc = ZATM_VCC(vcc);
  768. zatm_dev = ZATM_DEV(vcc->dev);
  769. chan = zatm_vcc->tx_chan;
  770. if (!chan) return;
  771. DPRINTK("close_tx\n");
  772. if (skb_peek(&zatm_vcc->backlog)) {
  773. printk("waiting for backlog to drain ...\n");
  774. event_dump();
  775. wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
  776. }
  777. if (skb_peek(&zatm_vcc->tx_queue)) {
  778. printk("waiting for TX queue to drain ...\n");
  779. event_dump();
  780. wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
  781. }
  782. spin_lock_irqsave(&zatm_dev->lock, flags);
  783. #if 0
  784. zwait;
  785. zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
  786. #endif
  787. zwait;
  788. zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
  789. zwait;
  790. if (!(zin(CMR) & uPD98401_CHAN_ADDR))
  791. printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
  792. "%d\n",vcc->dev->number,chan);
  793. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  794. zatm_vcc->tx_chan = 0;
  795. zatm_dev->tx_map[chan] = NULL;
  796. if (zatm_vcc->shaper != zatm_dev->ubr) {
  797. zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
  798. dealloc_shaper(vcc->dev,zatm_vcc->shaper);
  799. }
  800. kfree(zatm_vcc->ring);
  801. }
  802. static int open_tx_first(struct atm_vcc *vcc)
  803. {
  804. struct zatm_dev *zatm_dev;
  805. struct zatm_vcc *zatm_vcc;
  806. unsigned long flags;
  807. u32 *loop;
  808. unsigned short chan;
  809. int unlimited;
  810. DPRINTK("open_tx_first\n");
  811. zatm_dev = ZATM_DEV(vcc->dev);
  812. zatm_vcc = ZATM_VCC(vcc);
  813. zatm_vcc->tx_chan = 0;
  814. if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
  815. spin_lock_irqsave(&zatm_dev->lock, flags);
  816. zwait;
  817. zout(uPD98401_OPEN_CHAN,CMR);
  818. zwait;
  819. DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
  820. chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
  821. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  822. DPRINTK("chan is %d\n",chan);
  823. if (!chan) return -EAGAIN;
  824. unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
  825. (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
  826. vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
  827. if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
  828. else {
  829. int uninitialized_var(pcr);
  830. if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
  831. if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
  832. vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
  833. < 0) {
  834. close_tx(vcc);
  835. return zatm_vcc->shaper;
  836. }
  837. if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
  838. vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
  839. }
  840. zatm_vcc->tx_chan = chan;
  841. skb_queue_head_init(&zatm_vcc->tx_queue);
  842. init_waitqueue_head(&zatm_vcc->tx_wait);
  843. /* initialize ring */
  844. zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
  845. if (!zatm_vcc->ring) return -ENOMEM;
  846. loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
  847. loop[0] = uPD98401_TXPD_V;
  848. loop[1] = loop[2] = 0;
  849. loop[3] = virt_to_bus(zatm_vcc->ring);
  850. zatm_vcc->ring_curr = 0;
  851. zatm_vcc->txing = 0;
  852. skb_queue_head_init(&zatm_vcc->backlog);
  853. zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
  854. chan*VC_SIZE/4+uPD98401_TXVC_QRP);
  855. return 0;
  856. }
  857. static int open_tx_second(struct atm_vcc *vcc)
  858. {
  859. struct zatm_dev *zatm_dev;
  860. struct zatm_vcc *zatm_vcc;
  861. unsigned long flags;
  862. DPRINTK("open_tx_second\n");
  863. zatm_dev = ZATM_DEV(vcc->dev);
  864. zatm_vcc = ZATM_VCC(vcc);
  865. if (!zatm_vcc->tx_chan) return 0;
  866. /* set up VC descriptor */
  867. spin_lock_irqsave(&zatm_dev->lock, flags);
  868. zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
  869. zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
  870. uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
  871. vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
  872. zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
  873. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  874. zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
  875. return 0;
  876. }
  877. static int start_tx(struct atm_dev *dev)
  878. {
  879. struct zatm_dev *zatm_dev;
  880. int i;
  881. DPRINTK("start_tx\n");
  882. zatm_dev = ZATM_DEV(dev);
  883. zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
  884. sizeof(*zatm_dev->tx_map),
  885. GFP_KERNEL);
  886. if (!zatm_dev->tx_map) return -ENOMEM;
  887. zatm_dev->tx_bw = ATM_OC3_PCR;
  888. zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
  889. zatm_dev->ubr = -1;
  890. zatm_dev->ubr_ref_cnt = 0;
  891. /* initialize shapers */
  892. for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
  893. return 0;
  894. }
  895. /*------------------------------- interrupts --------------------------------*/
  896. static irqreturn_t zatm_int(int irq,void *dev_id)
  897. {
  898. struct atm_dev *dev;
  899. struct zatm_dev *zatm_dev;
  900. u32 reason;
  901. int handled = 0;
  902. dev = dev_id;
  903. zatm_dev = ZATM_DEV(dev);
  904. while ((reason = zin(GSR))) {
  905. handled = 1;
  906. EVENT("reason 0x%x\n",reason,0);
  907. if (reason & uPD98401_INT_PI) {
  908. EVENT("PHY int\n",0,0);
  909. dev->phy->interrupt(dev);
  910. }
  911. if (reason & uPD98401_INT_RQA) {
  912. unsigned long pools;
  913. int i;
  914. pools = zin(RQA);
  915. EVENT("RQA (0x%08x)\n",pools,0);
  916. for (i = 0; pools; i++) {
  917. if (pools & 1) {
  918. refill_pool(dev,i);
  919. zatm_dev->pool_info[i].rqa_count++;
  920. }
  921. pools >>= 1;
  922. }
  923. }
  924. if (reason & uPD98401_INT_RQU) {
  925. unsigned long pools;
  926. int i;
  927. pools = zin(RQU);
  928. printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
  929. dev->number,pools);
  930. event_dump();
  931. for (i = 0; pools; i++) {
  932. if (pools & 1) {
  933. refill_pool(dev,i);
  934. zatm_dev->pool_info[i].rqu_count++;
  935. }
  936. pools >>= 1;
  937. }
  938. }
  939. /* don't handle RD */
  940. if (reason & uPD98401_INT_SPE)
  941. printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
  942. "error at 0x%08x\n",dev->number,zin(ADDR));
  943. if (reason & uPD98401_INT_CPE)
  944. printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
  945. "parity error at 0x%08x\n",dev->number,zin(ADDR));
  946. if (reason & uPD98401_INT_SBE) {
  947. printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
  948. "error at 0x%08x\n",dev->number,zin(ADDR));
  949. event_dump();
  950. }
  951. /* don't handle IND */
  952. if (reason & uPD98401_INT_MF) {
  953. printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
  954. "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
  955. >> uPD98401_INT_MF_SHIFT);
  956. event_dump();
  957. /* @@@ should try to recover */
  958. }
  959. if (reason & uPD98401_INT_MM) {
  960. if (reason & 1) poll_rx(dev,0);
  961. if (reason & 2) poll_rx(dev,1);
  962. if (reason & 4) poll_tx(dev,2);
  963. if (reason & 8) poll_tx(dev,3);
  964. }
  965. /* @@@ handle RCRn */
  966. }
  967. return IRQ_RETVAL(handled);
  968. }
  969. /*----------------------------- (E)EPROM access -----------------------------*/
  970. static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
  971. unsigned short cmd)
  972. {
  973. int error;
  974. if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
  975. printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
  976. error);
  977. }
  978. static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
  979. {
  980. unsigned int value;
  981. int error;
  982. if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
  983. printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
  984. error);
  985. return value;
  986. }
  987. static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
  988. int bits, unsigned short cmd)
  989. {
  990. unsigned long value;
  991. int i;
  992. for (i = bits-1; i >= 0; i--) {
  993. value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
  994. eprom_set(zatm_dev,value,cmd);
  995. eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
  996. eprom_set(zatm_dev,value,cmd);
  997. }
  998. }
  999. static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
  1000. unsigned short cmd)
  1001. {
  1002. int i;
  1003. *byte = 0;
  1004. for (i = 8; i; i--) {
  1005. eprom_set(zatm_dev,ZEPROM_CS,cmd);
  1006. eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
  1007. *byte <<= 1;
  1008. if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
  1009. eprom_set(zatm_dev,ZEPROM_CS,cmd);
  1010. }
  1011. }
  1012. static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
  1013. int swap)
  1014. {
  1015. unsigned char buf[ZEPROM_SIZE];
  1016. struct zatm_dev *zatm_dev;
  1017. int i;
  1018. zatm_dev = ZATM_DEV(dev);
  1019. for (i = 0; i < ZEPROM_SIZE; i += 2) {
  1020. eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
  1021. eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
  1022. eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
  1023. eprom_get_byte(zatm_dev,buf+i+swap,cmd);
  1024. eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
  1025. eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
  1026. }
  1027. memcpy(dev->esi,buf+offset,ESI_LEN);
  1028. return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
  1029. }
  1030. static void eprom_get_esi(struct atm_dev *dev)
  1031. {
  1032. if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
  1033. (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
  1034. }
  1035. /*--------------------------------- entries ---------------------------------*/
  1036. static int zatm_init(struct atm_dev *dev)
  1037. {
  1038. struct zatm_dev *zatm_dev;
  1039. struct pci_dev *pci_dev;
  1040. unsigned short command;
  1041. int error,i,last;
  1042. unsigned long t0,t1,t2;
  1043. DPRINTK(">zatm_init\n");
  1044. zatm_dev = ZATM_DEV(dev);
  1045. spin_lock_init(&zatm_dev->lock);
  1046. pci_dev = zatm_dev->pci_dev;
  1047. zatm_dev->base = pci_resource_start(pci_dev, 0);
  1048. zatm_dev->irq = pci_dev->irq;
  1049. if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
  1050. printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
  1051. dev->number,error);
  1052. return -EINVAL;
  1053. }
  1054. if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
  1055. command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
  1056. printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
  1057. "\n",dev->number,error);
  1058. return -EIO;
  1059. }
  1060. eprom_get_esi(dev);
  1061. printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
  1062. dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
  1063. /* reset uPD98401 */
  1064. zout(0,SWR);
  1065. while (!(zin(GSR) & uPD98401_INT_IND));
  1066. zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
  1067. last = MAX_CRAM_SIZE;
  1068. for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
  1069. zpokel(zatm_dev,0x55555555,i);
  1070. if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
  1071. else {
  1072. zpokel(zatm_dev,0xAAAAAAAA,i);
  1073. if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
  1074. else zpokel(zatm_dev,i,i);
  1075. }
  1076. }
  1077. for (i = 0; i < last; i += RAM_INCREMENT)
  1078. if (zpeekl(zatm_dev,i) != i) break;
  1079. zatm_dev->mem = i << 2;
  1080. while (i) zpokel(zatm_dev,0,--i);
  1081. /* reset again to rebuild memory pointers */
  1082. zout(0,SWR);
  1083. while (!(zin(GSR) & uPD98401_INT_IND));
  1084. zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
  1085. uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
  1086. /* TODO: should shrink allocation now */
  1087. printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
  1088. "MMF");
  1089. for (i = 0; i < ESI_LEN; i++)
  1090. printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
  1091. do {
  1092. unsigned long flags;
  1093. spin_lock_irqsave(&zatm_dev->lock, flags);
  1094. t0 = zpeekl(zatm_dev,uPD98401_TSR);
  1095. udelay(10);
  1096. t1 = zpeekl(zatm_dev,uPD98401_TSR);
  1097. udelay(1010);
  1098. t2 = zpeekl(zatm_dev,uPD98401_TSR);
  1099. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1100. }
  1101. while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
  1102. zatm_dev->khz = t2-2*t1+t0;
  1103. printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
  1104. "MHz\n",dev->number,
  1105. (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
  1106. zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
  1107. return uPD98402_init(dev);
  1108. }
  1109. static int zatm_start(struct atm_dev *dev)
  1110. {
  1111. struct zatm_dev *zatm_dev = ZATM_DEV(dev);
  1112. struct pci_dev *pdev = zatm_dev->pci_dev;
  1113. unsigned long curr;
  1114. int pools,vccs,rx;
  1115. int error, i, ld;
  1116. DPRINTK("zatm_start\n");
  1117. zatm_dev->rx_map = zatm_dev->tx_map = NULL;
  1118. for (i = 0; i < NR_MBX; i++)
  1119. zatm_dev->mbx_start[i] = 0;
  1120. error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
  1121. if (error < 0) {
  1122. printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
  1123. dev->number,zatm_dev->irq);
  1124. goto done;
  1125. }
  1126. /* define memory regions */
  1127. pools = NR_POOLS;
  1128. if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
  1129. pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
  1130. vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
  1131. (2*VC_SIZE+RX_SIZE);
  1132. ld = -1;
  1133. for (rx = 1; rx < vccs; rx <<= 1) ld++;
  1134. dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
  1135. dev->ci_range.vci_bits = ld;
  1136. dev->link_rate = ATM_OC3_PCR;
  1137. zatm_dev->chans = vccs; /* ??? */
  1138. curr = rx*RX_SIZE/4;
  1139. DPRINTK("RX pool 0x%08lx\n",curr);
  1140. zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
  1141. zatm_dev->pool_base = curr;
  1142. curr += pools*POOL_SIZE/4;
  1143. DPRINTK("Shapers 0x%08lx\n",curr);
  1144. zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
  1145. curr += NR_SHAPERS*SHAPER_SIZE/4;
  1146. DPRINTK("Free 0x%08lx\n",curr);
  1147. zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
  1148. printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
  1149. "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
  1150. (zatm_dev->mem-curr*4)/VC_SIZE);
  1151. /* create mailboxes */
  1152. for (i = 0; i < NR_MBX; i++) {
  1153. void *mbx;
  1154. dma_addr_t mbx_dma;
  1155. if (!mbx_entries[i])
  1156. continue;
  1157. mbx = dma_alloc_coherent(&pdev->dev,
  1158. 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
  1159. if (!mbx) {
  1160. error = -ENOMEM;
  1161. goto out;
  1162. }
  1163. /*
  1164. * Alignment provided by dma_alloc_coherent() isn't enough
  1165. * for this device.
  1166. */
  1167. if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
  1168. printk(KERN_ERR DEV_LABEL "(itf %d): system "
  1169. "bus incompatible with driver\n", dev->number);
  1170. dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
  1171. error = -ENODEV;
  1172. goto out;
  1173. }
  1174. DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
  1175. zatm_dev->mbx_start[i] = (unsigned long)mbx;
  1176. zatm_dev->mbx_dma[i] = mbx_dma;
  1177. zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
  1178. 0xffff;
  1179. zout(mbx_dma >> 16, MSH(i));
  1180. zout(mbx_dma, MSL(i));
  1181. zout(zatm_dev->mbx_end[i], MBA(i));
  1182. zout((unsigned long)mbx & 0xffff, MTA(i));
  1183. zout((unsigned long)mbx & 0xffff, MWA(i));
  1184. }
  1185. error = start_tx(dev);
  1186. if (error)
  1187. goto out;
  1188. error = start_rx(dev);
  1189. if (error)
  1190. goto out_tx;
  1191. error = dev->phy->start(dev);
  1192. if (error)
  1193. goto out_rx;
  1194. zout(0xffffffff,IMR); /* enable interrupts */
  1195. /* enable TX & RX */
  1196. zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
  1197. done:
  1198. return error;
  1199. out_rx:
  1200. kfree(zatm_dev->rx_map);
  1201. out_tx:
  1202. kfree(zatm_dev->tx_map);
  1203. out:
  1204. while (i-- > 0) {
  1205. dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
  1206. (void *)zatm_dev->mbx_start[i],
  1207. zatm_dev->mbx_dma[i]);
  1208. }
  1209. free_irq(zatm_dev->irq, dev);
  1210. goto done;
  1211. }
  1212. static void zatm_close(struct atm_vcc *vcc)
  1213. {
  1214. DPRINTK(">zatm_close\n");
  1215. if (!ZATM_VCC(vcc)) return;
  1216. clear_bit(ATM_VF_READY,&vcc->flags);
  1217. close_rx(vcc);
  1218. EVENT("close_tx\n",0,0);
  1219. close_tx(vcc);
  1220. DPRINTK("zatm_close: done waiting\n");
  1221. /* deallocate memory */
  1222. kfree(ZATM_VCC(vcc));
  1223. vcc->dev_data = NULL;
  1224. clear_bit(ATM_VF_ADDR,&vcc->flags);
  1225. }
  1226. static int zatm_open(struct atm_vcc *vcc)
  1227. {
  1228. struct zatm_dev *zatm_dev;
  1229. struct zatm_vcc *zatm_vcc;
  1230. short vpi = vcc->vpi;
  1231. int vci = vcc->vci;
  1232. int error;
  1233. DPRINTK(">zatm_open\n");
  1234. zatm_dev = ZATM_DEV(vcc->dev);
  1235. if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
  1236. vcc->dev_data = NULL;
  1237. if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
  1238. set_bit(ATM_VF_ADDR,&vcc->flags);
  1239. if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
  1240. DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
  1241. vcc->vci);
  1242. if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
  1243. zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
  1244. if (!zatm_vcc) {
  1245. clear_bit(ATM_VF_ADDR,&vcc->flags);
  1246. return -ENOMEM;
  1247. }
  1248. vcc->dev_data = zatm_vcc;
  1249. ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
  1250. if ((error = open_rx_first(vcc))) {
  1251. zatm_close(vcc);
  1252. return error;
  1253. }
  1254. if ((error = open_tx_first(vcc))) {
  1255. zatm_close(vcc);
  1256. return error;
  1257. }
  1258. }
  1259. if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
  1260. if ((error = open_rx_second(vcc))) {
  1261. zatm_close(vcc);
  1262. return error;
  1263. }
  1264. if ((error = open_tx_second(vcc))) {
  1265. zatm_close(vcc);
  1266. return error;
  1267. }
  1268. set_bit(ATM_VF_READY,&vcc->flags);
  1269. return 0;
  1270. }
  1271. static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
  1272. {
  1273. printk("Not yet implemented\n");
  1274. return -ENOSYS;
  1275. /* @@@ */
  1276. }
  1277. static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
  1278. {
  1279. struct zatm_dev *zatm_dev;
  1280. unsigned long flags;
  1281. zatm_dev = ZATM_DEV(dev);
  1282. switch (cmd) {
  1283. case ZATM_GETPOOLZ:
  1284. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  1285. /* fall through */
  1286. case ZATM_GETPOOL:
  1287. {
  1288. struct zatm_pool_info info;
  1289. int pool;
  1290. if (get_user(pool,
  1291. &((struct zatm_pool_req __user *) arg)->pool_num))
  1292. return -EFAULT;
  1293. if (pool < 0 || pool > ZATM_LAST_POOL)
  1294. return -EINVAL;
  1295. pool = array_index_nospec(pool,
  1296. ZATM_LAST_POOL + 1);
  1297. spin_lock_irqsave(&zatm_dev->lock, flags);
  1298. info = zatm_dev->pool_info[pool];
  1299. if (cmd == ZATM_GETPOOLZ) {
  1300. zatm_dev->pool_info[pool].rqa_count = 0;
  1301. zatm_dev->pool_info[pool].rqu_count = 0;
  1302. }
  1303. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1304. return copy_to_user(
  1305. &((struct zatm_pool_req __user *) arg)->info,
  1306. &info,sizeof(info)) ? -EFAULT : 0;
  1307. }
  1308. case ZATM_SETPOOL:
  1309. {
  1310. struct zatm_pool_info info;
  1311. int pool;
  1312. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  1313. if (get_user(pool,
  1314. &((struct zatm_pool_req __user *) arg)->pool_num))
  1315. return -EFAULT;
  1316. if (pool < 0 || pool > ZATM_LAST_POOL)
  1317. return -EINVAL;
  1318. pool = array_index_nospec(pool,
  1319. ZATM_LAST_POOL + 1);
  1320. if (copy_from_user(&info,
  1321. &((struct zatm_pool_req __user *) arg)->info,
  1322. sizeof(info))) return -EFAULT;
  1323. if (!info.low_water)
  1324. info.low_water = zatm_dev->
  1325. pool_info[pool].low_water;
  1326. if (!info.high_water)
  1327. info.high_water = zatm_dev->
  1328. pool_info[pool].high_water;
  1329. if (!info.next_thres)
  1330. info.next_thres = zatm_dev->
  1331. pool_info[pool].next_thres;
  1332. if (info.low_water >= info.high_water ||
  1333. info.low_water < 0)
  1334. return -EINVAL;
  1335. spin_lock_irqsave(&zatm_dev->lock, flags);
  1336. zatm_dev->pool_info[pool].low_water =
  1337. info.low_water;
  1338. zatm_dev->pool_info[pool].high_water =
  1339. info.high_water;
  1340. zatm_dev->pool_info[pool].next_thres =
  1341. info.next_thres;
  1342. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1343. return 0;
  1344. }
  1345. default:
  1346. if (!dev->phy->ioctl) return -ENOIOCTLCMD;
  1347. return dev->phy->ioctl(dev,cmd,arg);
  1348. }
  1349. }
  1350. static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
  1351. void __user *optval,int optlen)
  1352. {
  1353. return -EINVAL;
  1354. }
  1355. static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
  1356. void __user *optval,unsigned int optlen)
  1357. {
  1358. return -EINVAL;
  1359. }
  1360. static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
  1361. {
  1362. int error;
  1363. EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
  1364. if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
  1365. if (vcc->pop) vcc->pop(vcc,skb);
  1366. else dev_kfree_skb(skb);
  1367. return -EINVAL;
  1368. }
  1369. if (!skb) {
  1370. printk(KERN_CRIT "!skb in zatm_send ?\n");
  1371. if (vcc->pop) vcc->pop(vcc,skb);
  1372. return -EINVAL;
  1373. }
  1374. ATM_SKB(skb)->vcc = vcc;
  1375. error = do_tx(skb);
  1376. if (error != RING_BUSY) return error;
  1377. skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
  1378. return 0;
  1379. }
  1380. static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
  1381. unsigned long addr)
  1382. {
  1383. struct zatm_dev *zatm_dev;
  1384. zatm_dev = ZATM_DEV(dev);
  1385. zwait;
  1386. zout(value,CER);
  1387. zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
  1388. (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  1389. }
  1390. static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
  1391. {
  1392. struct zatm_dev *zatm_dev;
  1393. zatm_dev = ZATM_DEV(dev);
  1394. zwait;
  1395. zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
  1396. (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  1397. zwait;
  1398. return zin(CER) & 0xff;
  1399. }
  1400. static const struct atmdev_ops ops = {
  1401. .open = zatm_open,
  1402. .close = zatm_close,
  1403. .ioctl = zatm_ioctl,
  1404. .getsockopt = zatm_getsockopt,
  1405. .setsockopt = zatm_setsockopt,
  1406. .send = zatm_send,
  1407. .phy_put = zatm_phy_put,
  1408. .phy_get = zatm_phy_get,
  1409. .change_qos = zatm_change_qos,
  1410. };
  1411. static int zatm_init_one(struct pci_dev *pci_dev,
  1412. const struct pci_device_id *ent)
  1413. {
  1414. struct atm_dev *dev;
  1415. struct zatm_dev *zatm_dev;
  1416. int ret = -ENOMEM;
  1417. zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
  1418. if (!zatm_dev) {
  1419. printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
  1420. goto out;
  1421. }
  1422. dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
  1423. if (!dev)
  1424. goto out_free;
  1425. ret = pci_enable_device(pci_dev);
  1426. if (ret < 0)
  1427. goto out_deregister;
  1428. ret = pci_request_regions(pci_dev, DEV_LABEL);
  1429. if (ret < 0)
  1430. goto out_disable;
  1431. ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
  1432. if (ret < 0)
  1433. goto out_disable;
  1434. zatm_dev->pci_dev = pci_dev;
  1435. dev->dev_data = zatm_dev;
  1436. zatm_dev->copper = (int)ent->driver_data;
  1437. if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
  1438. goto out_release;
  1439. pci_set_drvdata(pci_dev, dev);
  1440. zatm_dev->more = zatm_boards;
  1441. zatm_boards = dev;
  1442. ret = 0;
  1443. out:
  1444. return ret;
  1445. out_release:
  1446. pci_release_regions(pci_dev);
  1447. out_disable:
  1448. pci_disable_device(pci_dev);
  1449. out_deregister:
  1450. atm_dev_deregister(dev);
  1451. out_free:
  1452. kfree(zatm_dev);
  1453. goto out;
  1454. }
  1455. MODULE_LICENSE("GPL");
  1456. static struct pci_device_id zatm_pci_tbl[] = {
  1457. { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
  1458. { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
  1459. { 0, }
  1460. };
  1461. MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
  1462. static struct pci_driver zatm_driver = {
  1463. .name = DEV_LABEL,
  1464. .id_table = zatm_pci_tbl,
  1465. .probe = zatm_init_one,
  1466. };
  1467. static int __init zatm_init_module(void)
  1468. {
  1469. return pci_register_driver(&zatm_driver);
  1470. }
  1471. module_init(zatm_init_module);
  1472. /* module_exit not defined so not unloadable */