zatm.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668
  1. /* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
  2. /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
  3. #include <linux/module.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/pci.h>
  7. #include <linux/errno.h>
  8. #include <linux/atm.h>
  9. #include <linux/atmdev.h>
  10. #include <linux/sonet.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/delay.h>
  14. #include <linux/uio.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/atm_zatm.h>
  19. #include <linux/capability.h>
  20. #include <linux/bitops.h>
  21. #include <linux/wait.h>
  22. #include <linux/slab.h>
  23. #include <asm/byteorder.h>
  24. #include <asm/string.h>
  25. #include <asm/io.h>
  26. #include <linux/atomic.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/nospec.h>
  29. #include "uPD98401.h"
  30. #include "uPD98402.h"
  31. #include "zeprom.h"
  32. #include "zatm.h"
  33. /*
  34. * TODO:
  35. *
  36. * Minor features
  37. * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
  38. * - proper use of CDV, credit = max(1,CDVT*PCR)
  39. * - AAL0
  40. * - better receive timestamps
  41. * - OAM
  42. */
  43. #define ZATM_COPPER 1
  44. #if 0
  45. #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
  46. #else
  47. #define DPRINTK(format,args...)
  48. #endif
  49. #ifndef CONFIG_ATM_ZATM_DEBUG
  50. #define NULLCHECK(x)
  51. #define EVENT(s,a,b)
  52. static void event_dump(void)
  53. {
  54. }
  55. #else
  56. /*
  57. * NULL pointer checking
  58. */
  59. #define NULLCHECK(x) \
  60. if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
  61. /*
  62. * Very extensive activity logging. Greatly improves bug detection speed but
  63. * costs a few Mbps if enabled.
  64. */
  65. #define EV 64
  66. static const char *ev[EV];
  67. static unsigned long ev_a[EV],ev_b[EV];
  68. static int ec = 0;
  69. static void EVENT(const char *s,unsigned long a,unsigned long b)
  70. {
  71. ev[ec] = s;
  72. ev_a[ec] = a;
  73. ev_b[ec] = b;
  74. ec = (ec+1) % EV;
  75. }
  76. static void event_dump(void)
  77. {
  78. int n,i;
  79. printk(KERN_NOTICE "----- event dump follows -----\n");
  80. for (n = 0; n < EV; n++) {
  81. i = (ec+n) % EV;
  82. printk(KERN_NOTICE);
  83. printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
  84. }
  85. printk(KERN_NOTICE "----- event dump ends here -----\n");
  86. }
  87. #endif /* CONFIG_ATM_ZATM_DEBUG */
  88. #define RING_BUSY 1 /* indication from do_tx that PDU has to be
  89. backlogged */
  90. static struct atm_dev *zatm_boards = NULL;
  91. static unsigned long dummy[2] = {0,0};
  92. #define zin_n(r) inl(zatm_dev->base+r*4)
  93. #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
  94. #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
  95. #define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
  96. /* RX0, RX1, TX0, TX1 */
  97. static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
  98. static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
  99. #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
  100. /*-------------------------------- utilities --------------------------------*/
  101. static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
  102. {
  103. zwait();
  104. zout(value,CER);
  105. zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
  106. (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  107. }
  108. static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
  109. {
  110. zwait();
  111. zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
  112. (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  113. zwait();
  114. return zin(CER);
  115. }
  116. /*------------------------------- free lists --------------------------------*/
  117. /*
  118. * Free buffer head structure:
  119. * [0] pointer to buffer (for SAR)
  120. * [1] buffer descr link pointer (for SAR)
  121. * [2] back pointer to skb (for poll_rx)
  122. * [3] data
  123. * ...
  124. */
  125. struct rx_buffer_head {
  126. u32 buffer; /* pointer to buffer (for SAR) */
  127. u32 link; /* buffer descriptor link pointer (for SAR) */
  128. struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
  129. };
  130. static void refill_pool(struct atm_dev *dev,int pool)
  131. {
  132. struct zatm_dev *zatm_dev;
  133. struct sk_buff *skb;
  134. struct rx_buffer_head *first;
  135. unsigned long flags;
  136. int align,offset,free,count,size;
  137. EVENT("refill_pool\n",0,0);
  138. zatm_dev = ZATM_DEV(dev);
  139. size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
  140. pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
  141. if (size < PAGE_SIZE) {
  142. align = 32; /* for 32 byte alignment */
  143. offset = sizeof(struct rx_buffer_head);
  144. }
  145. else {
  146. align = 4096;
  147. offset = zatm_dev->pool_info[pool].offset+
  148. sizeof(struct rx_buffer_head);
  149. }
  150. size += align;
  151. spin_lock_irqsave(&zatm_dev->lock, flags);
  152. free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
  153. uPD98401_RXFP_REMAIN;
  154. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  155. if (free >= zatm_dev->pool_info[pool].low_water) return;
  156. EVENT("starting ... POOL: 0x%x, 0x%x\n",
  157. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
  158. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
  159. EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  160. count = 0;
  161. first = NULL;
  162. while (free < zatm_dev->pool_info[pool].high_water) {
  163. struct rx_buffer_head *head;
  164. skb = alloc_skb(size,GFP_ATOMIC);
  165. if (!skb) {
  166. printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
  167. "skb (%d) with %d free\n",dev->number,size,free);
  168. break;
  169. }
  170. skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
  171. align+offset-1) & ~(unsigned long) (align-1))-offset)-
  172. skb->data);
  173. head = (struct rx_buffer_head *) skb->data;
  174. skb_reserve(skb,sizeof(struct rx_buffer_head));
  175. if (!first) first = head;
  176. count++;
  177. head->buffer = virt_to_bus(skb->data);
  178. head->link = 0;
  179. head->skb = skb;
  180. EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
  181. (unsigned long) head);
  182. spin_lock_irqsave(&zatm_dev->lock, flags);
  183. if (zatm_dev->last_free[pool])
  184. ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
  185. data))[-1].link = virt_to_bus(head);
  186. zatm_dev->last_free[pool] = skb;
  187. skb_queue_tail(&zatm_dev->pool[pool],skb);
  188. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  189. free++;
  190. }
  191. if (first) {
  192. spin_lock_irqsave(&zatm_dev->lock, flags);
  193. zwait();
  194. zout(virt_to_bus(first),CER);
  195. zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
  196. CMR);
  197. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  198. EVENT ("POOL: 0x%x, 0x%x\n",
  199. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
  200. zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
  201. EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  202. }
  203. }
  204. static void drain_free(struct atm_dev *dev,int pool)
  205. {
  206. skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
  207. }
  208. static int pool_index(int max_pdu)
  209. {
  210. int i;
  211. if (max_pdu % ATM_CELL_PAYLOAD)
  212. printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
  213. "max_pdu is %d\n",max_pdu);
  214. if (max_pdu > 65536) return -1;
  215. for (i = 0; (64 << i) < max_pdu; i++);
  216. return i+ZATM_AAL5_POOL_BASE;
  217. }
  218. /* use_pool isn't reentrant */
  219. static void use_pool(struct atm_dev *dev,int pool)
  220. {
  221. struct zatm_dev *zatm_dev;
  222. unsigned long flags;
  223. int size;
  224. zatm_dev = ZATM_DEV(dev);
  225. if (!(zatm_dev->pool_info[pool].ref_count++)) {
  226. skb_queue_head_init(&zatm_dev->pool[pool]);
  227. size = pool-ZATM_AAL5_POOL_BASE;
  228. if (size < 0) size = 0; /* 64B... */
  229. else if (size > 10) size = 10; /* ... 64kB */
  230. spin_lock_irqsave(&zatm_dev->lock, flags);
  231. zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
  232. uPD98401_RXFP_ALERT_SHIFT) |
  233. (1 << uPD98401_RXFP_BTSZ_SHIFT) |
  234. (size << uPD98401_RXFP_BFSZ_SHIFT),
  235. zatm_dev->pool_base+pool*2);
  236. zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
  237. pool*2+1);
  238. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  239. zatm_dev->last_free[pool] = NULL;
  240. refill_pool(dev,pool);
  241. }
  242. DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
  243. }
  244. static void unuse_pool(struct atm_dev *dev,int pool)
  245. {
  246. if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
  247. drain_free(dev,pool);
  248. }
  249. /*----------------------------------- RX ------------------------------------*/
  250. #if 0
  251. static void exception(struct atm_vcc *vcc)
  252. {
  253. static int count = 0;
  254. struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
  255. struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
  256. unsigned long *qrp;
  257. int i;
  258. if (count++ > 2) return;
  259. for (i = 0; i < 8; i++)
  260. printk("TX%d: 0x%08lx\n",i,
  261. zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
  262. for (i = 0; i < 5; i++)
  263. printk("SH%d: 0x%08lx\n",i,
  264. zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
  265. qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
  266. uPD98401_TXVC_QRP);
  267. printk("qrp=0x%08lx\n",(unsigned long) qrp);
  268. for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
  269. }
  270. #endif
  271. static const char *err_txt[] = {
  272. "No error",
  273. "RX buf underflow",
  274. "RX FIFO overrun",
  275. "Maximum len violation",
  276. "CRC error",
  277. "User abort",
  278. "Length violation",
  279. "T1 error",
  280. "Deactivated",
  281. "???",
  282. "???",
  283. "???",
  284. "???",
  285. "???",
  286. "???",
  287. "???"
  288. };
  289. static void poll_rx(struct atm_dev *dev,int mbx)
  290. {
  291. struct zatm_dev *zatm_dev;
  292. unsigned long pos;
  293. u32 x;
  294. int error;
  295. EVENT("poll_rx\n",0,0);
  296. zatm_dev = ZATM_DEV(dev);
  297. pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
  298. while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
  299. u32 *here;
  300. struct sk_buff *skb;
  301. struct atm_vcc *vcc;
  302. int cells,size,chan;
  303. EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
  304. here = (u32 *) pos;
  305. if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
  306. pos = zatm_dev->mbx_start[mbx];
  307. cells = here[0] & uPD98401_AAL5_SIZE;
  308. #if 0
  309. printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
  310. {
  311. unsigned long *x;
  312. printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
  313. zatm_dev->pool_base),
  314. zpeekl(zatm_dev,zatm_dev->pool_base+1));
  315. x = (unsigned long *) here[2];
  316. printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
  317. x[0],x[1],x[2],x[3]);
  318. }
  319. #endif
  320. error = 0;
  321. if (here[3] & uPD98401_AAL5_ERR) {
  322. error = (here[3] & uPD98401_AAL5_ES) >>
  323. uPD98401_AAL5_ES_SHIFT;
  324. if (error == uPD98401_AAL5_ES_DEACT ||
  325. error == uPD98401_AAL5_ES_FREE) continue;
  326. }
  327. EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
  328. uPD98401_AAL5_ES_SHIFT,error);
  329. skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
  330. __net_timestamp(skb);
  331. #if 0
  332. printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
  333. ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
  334. ((unsigned *) skb->data)[0]);
  335. #endif
  336. EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
  337. (unsigned long) here);
  338. #if 0
  339. printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
  340. #endif
  341. size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
  342. ATM_CELL_PAYLOAD/sizeof(u16)-3]);
  343. EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
  344. chan = (here[3] & uPD98401_AAL5_CHAN) >>
  345. uPD98401_AAL5_CHAN_SHIFT;
  346. if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
  347. int pos;
  348. vcc = zatm_dev->rx_map[chan];
  349. pos = ZATM_VCC(vcc)->pool;
  350. if (skb == zatm_dev->last_free[pos])
  351. zatm_dev->last_free[pos] = NULL;
  352. skb_unlink(skb, zatm_dev->pool + pos);
  353. }
  354. else {
  355. printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
  356. "for non-existing channel\n",dev->number);
  357. size = 0;
  358. vcc = NULL;
  359. event_dump();
  360. }
  361. if (error) {
  362. static unsigned long silence = 0;
  363. static int last_error = 0;
  364. if (error != last_error ||
  365. time_after(jiffies, silence) || silence == 0){
  366. printk(KERN_WARNING DEV_LABEL "(itf %d): "
  367. "chan %d error %s\n",dev->number,chan,
  368. err_txt[error]);
  369. last_error = error;
  370. silence = (jiffies+2*HZ)|1;
  371. }
  372. size = 0;
  373. }
  374. if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
  375. size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
  376. printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
  377. "cells\n",dev->number,size,cells);
  378. size = 0;
  379. event_dump();
  380. }
  381. if (size > ATM_MAX_AAL5_PDU) {
  382. printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
  383. "(%d)\n",dev->number,size);
  384. size = 0;
  385. event_dump();
  386. }
  387. if (!size) {
  388. dev_kfree_skb_irq(skb);
  389. if (vcc) atomic_inc(&vcc->stats->rx_err);
  390. continue;
  391. }
  392. if (!atm_charge(vcc,skb->truesize)) {
  393. dev_kfree_skb_irq(skb);
  394. continue;
  395. }
  396. skb->len = size;
  397. ATM_SKB(skb)->vcc = vcc;
  398. vcc->push(vcc,skb);
  399. atomic_inc(&vcc->stats->rx);
  400. }
  401. zout(pos & 0xffff,MTA(mbx));
  402. #if 0 /* probably a stupid idea */
  403. refill_pool(dev,zatm_vcc->pool);
  404. /* maybe this saves us a few interrupts */
  405. #endif
  406. }
  407. static int open_rx_first(struct atm_vcc *vcc)
  408. {
  409. struct zatm_dev *zatm_dev;
  410. struct zatm_vcc *zatm_vcc;
  411. unsigned long flags;
  412. unsigned short chan;
  413. int cells;
  414. DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
  415. zatm_dev = ZATM_DEV(vcc->dev);
  416. zatm_vcc = ZATM_VCC(vcc);
  417. zatm_vcc->rx_chan = 0;
  418. if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
  419. if (vcc->qos.aal == ATM_AAL5) {
  420. if (vcc->qos.rxtp.max_sdu > 65464)
  421. vcc->qos.rxtp.max_sdu = 65464;
  422. /* fix this - we may want to receive 64kB SDUs
  423. later */
  424. cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
  425. ATM_CELL_PAYLOAD);
  426. zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
  427. }
  428. else {
  429. cells = 1;
  430. zatm_vcc->pool = ZATM_AAL0_POOL;
  431. }
  432. if (zatm_vcc->pool < 0) return -EMSGSIZE;
  433. spin_lock_irqsave(&zatm_dev->lock, flags);
  434. zwait();
  435. zout(uPD98401_OPEN_CHAN,CMR);
  436. zwait();
  437. DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
  438. chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
  439. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  440. DPRINTK("chan is %d\n",chan);
  441. if (!chan) return -EAGAIN;
  442. use_pool(vcc->dev,zatm_vcc->pool);
  443. DPRINTK("pool %d\n",zatm_vcc->pool);
  444. /* set up VC descriptor */
  445. spin_lock_irqsave(&zatm_dev->lock, flags);
  446. zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
  447. chan*VC_SIZE/4);
  448. zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
  449. uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
  450. zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
  451. zatm_vcc->rx_chan = chan;
  452. zatm_dev->rx_map[chan] = vcc;
  453. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  454. return 0;
  455. }
  456. static int open_rx_second(struct atm_vcc *vcc)
  457. {
  458. struct zatm_dev *zatm_dev;
  459. struct zatm_vcc *zatm_vcc;
  460. unsigned long flags;
  461. int pos,shift;
  462. DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
  463. zatm_dev = ZATM_DEV(vcc->dev);
  464. zatm_vcc = ZATM_VCC(vcc);
  465. if (!zatm_vcc->rx_chan) return 0;
  466. spin_lock_irqsave(&zatm_dev->lock, flags);
  467. /* should also handle VPI @@@ */
  468. pos = vcc->vci >> 1;
  469. shift = (1-(vcc->vci & 1)) << 4;
  470. zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
  471. ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
  472. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  473. return 0;
  474. }
  475. static void close_rx(struct atm_vcc *vcc)
  476. {
  477. struct zatm_dev *zatm_dev;
  478. struct zatm_vcc *zatm_vcc;
  479. unsigned long flags;
  480. int pos,shift;
  481. zatm_vcc = ZATM_VCC(vcc);
  482. zatm_dev = ZATM_DEV(vcc->dev);
  483. if (!zatm_vcc->rx_chan) return;
  484. DPRINTK("close_rx\n");
  485. /* disable receiver */
  486. if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
  487. spin_lock_irqsave(&zatm_dev->lock, flags);
  488. pos = vcc->vci >> 1;
  489. shift = (1-(vcc->vci & 1)) << 4;
  490. zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
  491. zwait();
  492. zout(uPD98401_NOP,CMR);
  493. zwait();
  494. zout(uPD98401_NOP,CMR);
  495. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  496. }
  497. spin_lock_irqsave(&zatm_dev->lock, flags);
  498. zwait();
  499. zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
  500. uPD98401_CHAN_ADDR_SHIFT),CMR);
  501. zwait();
  502. udelay(10); /* why oh why ... ? */
  503. zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
  504. uPD98401_CHAN_ADDR_SHIFT),CMR);
  505. zwait();
  506. if (!(zin(CMR) & uPD98401_CHAN_ADDR))
  507. printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
  508. "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
  509. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  510. zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
  511. zatm_vcc->rx_chan = 0;
  512. unuse_pool(vcc->dev,zatm_vcc->pool);
  513. }
  514. static int start_rx(struct atm_dev *dev)
  515. {
  516. struct zatm_dev *zatm_dev;
  517. int i;
  518. DPRINTK("start_rx\n");
  519. zatm_dev = ZATM_DEV(dev);
  520. zatm_dev->rx_map = kcalloc(zatm_dev->chans,
  521. sizeof(*zatm_dev->rx_map),
  522. GFP_KERNEL);
  523. if (!zatm_dev->rx_map) return -ENOMEM;
  524. /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
  525. zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
  526. /* prepare free buffer pools */
  527. for (i = 0; i <= ZATM_LAST_POOL; i++) {
  528. zatm_dev->pool_info[i].ref_count = 0;
  529. zatm_dev->pool_info[i].rqa_count = 0;
  530. zatm_dev->pool_info[i].rqu_count = 0;
  531. zatm_dev->pool_info[i].low_water = LOW_MARK;
  532. zatm_dev->pool_info[i].high_water = HIGH_MARK;
  533. zatm_dev->pool_info[i].offset = 0;
  534. zatm_dev->pool_info[i].next_off = 0;
  535. zatm_dev->pool_info[i].next_cnt = 0;
  536. zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
  537. }
  538. return 0;
  539. }
  540. /*----------------------------------- TX ------------------------------------*/
  541. static int do_tx(struct sk_buff *skb)
  542. {
  543. struct atm_vcc *vcc;
  544. struct zatm_dev *zatm_dev;
  545. struct zatm_vcc *zatm_vcc;
  546. u32 *dsc;
  547. unsigned long flags;
  548. EVENT("do_tx\n",0,0);
  549. DPRINTK("sending skb %p\n",skb);
  550. vcc = ATM_SKB(skb)->vcc;
  551. zatm_dev = ZATM_DEV(vcc->dev);
  552. zatm_vcc = ZATM_VCC(vcc);
  553. EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
  554. spin_lock_irqsave(&zatm_dev->lock, flags);
  555. if (!skb_shinfo(skb)->nr_frags) {
  556. if (zatm_vcc->txing == RING_ENTRIES-1) {
  557. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  558. return RING_BUSY;
  559. }
  560. zatm_vcc->txing++;
  561. dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
  562. zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
  563. (RING_ENTRIES*RING_WORDS-1);
  564. dsc[1] = 0;
  565. dsc[2] = skb->len;
  566. dsc[3] = virt_to_bus(skb->data);
  567. mb();
  568. dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
  569. | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
  570. (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
  571. uPD98401_CLPM_1 : uPD98401_CLPM_0));
  572. EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
  573. }
  574. else {
  575. printk("NONONONOO!!!!\n");
  576. dsc = NULL;
  577. #if 0
  578. u32 *put;
  579. int i;
  580. dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
  581. uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
  582. if (!dsc) {
  583. if (vcc->pop)
  584. vcc->pop(vcc, skb);
  585. else
  586. dev_kfree_skb_irq(skb);
  587. return -EAGAIN;
  588. }
  589. /* @@@ should check alignment */
  590. put = dsc+8;
  591. dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
  592. (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
  593. (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
  594. uPD98401_CLPM_1 : uPD98401_CLPM_0));
  595. dsc[1] = 0;
  596. dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
  597. dsc[3] = virt_to_bus(put);
  598. for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
  599. *put++ = ((struct iovec *) skb->data)[i].iov_len;
  600. *put++ = virt_to_bus(((struct iovec *)
  601. skb->data)[i].iov_base);
  602. }
  603. put[-2] |= uPD98401_TXBD_LAST;
  604. #endif
  605. }
  606. ZATM_PRV_DSC(skb) = dsc;
  607. skb_queue_tail(&zatm_vcc->tx_queue,skb);
  608. DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
  609. uPD98401_TXVC_QRP));
  610. zwait();
  611. zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
  612. uPD98401_CHAN_ADDR_SHIFT),CMR);
  613. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  614. EVENT("done\n",0,0);
  615. return 0;
  616. }
  617. static inline void dequeue_tx(struct atm_vcc *vcc)
  618. {
  619. struct zatm_vcc *zatm_vcc;
  620. struct sk_buff *skb;
  621. EVENT("dequeue_tx\n",0,0);
  622. zatm_vcc = ZATM_VCC(vcc);
  623. skb = skb_dequeue(&zatm_vcc->tx_queue);
  624. if (!skb) {
  625. printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
  626. "txing\n",vcc->dev->number);
  627. return;
  628. }
  629. #if 0 /* @@@ would fail on CLP */
  630. if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
  631. uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
  632. *ZATM_PRV_DSC(skb));
  633. #endif
  634. *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
  635. zatm_vcc->txing--;
  636. if (vcc->pop) vcc->pop(vcc,skb);
  637. else dev_kfree_skb_irq(skb);
  638. while ((skb = skb_dequeue(&zatm_vcc->backlog)))
  639. if (do_tx(skb) == RING_BUSY) {
  640. skb_queue_head(&zatm_vcc->backlog,skb);
  641. break;
  642. }
  643. atomic_inc(&vcc->stats->tx);
  644. wake_up(&zatm_vcc->tx_wait);
  645. }
  646. static void poll_tx(struct atm_dev *dev,int mbx)
  647. {
  648. struct zatm_dev *zatm_dev;
  649. unsigned long pos;
  650. u32 x;
  651. EVENT("poll_tx\n",0,0);
  652. zatm_dev = ZATM_DEV(dev);
  653. pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
  654. while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
  655. int chan;
  656. #if 1
  657. u32 data,*addr;
  658. EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
  659. addr = (u32 *) pos;
  660. data = *addr;
  661. chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
  662. EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
  663. data);
  664. EVENT("chan = %d\n",chan,0);
  665. #else
  666. NO !
  667. chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
  668. >> uPD98401_TXI_CONN_SHIFT;
  669. #endif
  670. if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
  671. dequeue_tx(zatm_dev->tx_map[chan]);
  672. else {
  673. printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
  674. "for non-existing channel %d\n",dev->number,chan);
  675. event_dump();
  676. }
  677. if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
  678. pos = zatm_dev->mbx_start[mbx];
  679. }
  680. zout(pos & 0xffff,MTA(mbx));
  681. }
  682. /*
  683. * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
  684. */
  685. static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
  686. {
  687. struct zatm_dev *zatm_dev;
  688. unsigned long flags;
  689. unsigned long i,m,c;
  690. int shaper;
  691. DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
  692. zatm_dev = ZATM_DEV(dev);
  693. if (!zatm_dev->free_shapers) return -EAGAIN;
  694. for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
  695. zatm_dev->free_shapers &= ~1 << shaper;
  696. if (ubr) {
  697. c = 5;
  698. i = m = 1;
  699. zatm_dev->ubr_ref_cnt++;
  700. zatm_dev->ubr = shaper;
  701. *pcr = 0;
  702. }
  703. else {
  704. if (min) {
  705. if (min <= 255) {
  706. i = min;
  707. m = ATM_OC3_PCR;
  708. }
  709. else {
  710. i = 255;
  711. m = ATM_OC3_PCR*255/min;
  712. }
  713. }
  714. else {
  715. if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
  716. if (max <= 255) {
  717. i = max;
  718. m = ATM_OC3_PCR;
  719. }
  720. else {
  721. i = 255;
  722. m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
  723. }
  724. }
  725. if (i > m) {
  726. printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
  727. "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
  728. m = i;
  729. }
  730. *pcr = i*ATM_OC3_PCR/m;
  731. c = 20; /* @@@ should use max_cdv ! */
  732. if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
  733. if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
  734. zatm_dev->tx_bw -= *pcr;
  735. }
  736. spin_lock_irqsave(&zatm_dev->lock, flags);
  737. DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
  738. zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
  739. zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
  740. zpokel(zatm_dev,0,uPD98401_X(shaper));
  741. zpokel(zatm_dev,0,uPD98401_Y(shaper));
  742. zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
  743. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  744. return shaper;
  745. }
  746. static void dealloc_shaper(struct atm_dev *dev,int shaper)
  747. {
  748. struct zatm_dev *zatm_dev;
  749. unsigned long flags;
  750. zatm_dev = ZATM_DEV(dev);
  751. if (shaper == zatm_dev->ubr) {
  752. if (--zatm_dev->ubr_ref_cnt) return;
  753. zatm_dev->ubr = -1;
  754. }
  755. spin_lock_irqsave(&zatm_dev->lock, flags);
  756. zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
  757. uPD98401_PS(shaper));
  758. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  759. zatm_dev->free_shapers |= 1 << shaper;
  760. }
  761. static void close_tx(struct atm_vcc *vcc)
  762. {
  763. struct zatm_dev *zatm_dev;
  764. struct zatm_vcc *zatm_vcc;
  765. unsigned long flags;
  766. int chan;
  767. zatm_vcc = ZATM_VCC(vcc);
  768. zatm_dev = ZATM_DEV(vcc->dev);
  769. chan = zatm_vcc->tx_chan;
  770. if (!chan) return;
  771. DPRINTK("close_tx\n");
  772. if (skb_peek(&zatm_vcc->backlog)) {
  773. printk("waiting for backlog to drain ...\n");
  774. event_dump();
  775. wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
  776. }
  777. if (skb_peek(&zatm_vcc->tx_queue)) {
  778. printk("waiting for TX queue to drain ...\n");
  779. event_dump();
  780. wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
  781. }
  782. spin_lock_irqsave(&zatm_dev->lock, flags);
  783. #if 0
  784. zwait();
  785. zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
  786. #endif
  787. zwait();
  788. zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
  789. zwait();
  790. if (!(zin(CMR) & uPD98401_CHAN_ADDR))
  791. printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
  792. "%d\n",vcc->dev->number,chan);
  793. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  794. zatm_vcc->tx_chan = 0;
  795. zatm_dev->tx_map[chan] = NULL;
  796. if (zatm_vcc->shaper != zatm_dev->ubr) {
  797. zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
  798. dealloc_shaper(vcc->dev,zatm_vcc->shaper);
  799. }
  800. kfree(zatm_vcc->ring);
  801. }
  802. static int open_tx_first(struct atm_vcc *vcc)
  803. {
  804. struct zatm_dev *zatm_dev;
  805. struct zatm_vcc *zatm_vcc;
  806. unsigned long flags;
  807. u32 *loop;
  808. unsigned short chan;
  809. int unlimited;
  810. DPRINTK("open_tx_first\n");
  811. zatm_dev = ZATM_DEV(vcc->dev);
  812. zatm_vcc = ZATM_VCC(vcc);
  813. zatm_vcc->tx_chan = 0;
  814. if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
  815. spin_lock_irqsave(&zatm_dev->lock, flags);
  816. zwait();
  817. zout(uPD98401_OPEN_CHAN,CMR);
  818. zwait();
  819. DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
  820. chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
  821. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  822. DPRINTK("chan is %d\n",chan);
  823. if (!chan) return -EAGAIN;
  824. unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
  825. (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
  826. vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
  827. if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
  828. else {
  829. int uninitialized_var(pcr);
  830. if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
  831. if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
  832. vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
  833. < 0) {
  834. close_tx(vcc);
  835. return zatm_vcc->shaper;
  836. }
  837. if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
  838. vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
  839. }
  840. zatm_vcc->tx_chan = chan;
  841. skb_queue_head_init(&zatm_vcc->tx_queue);
  842. init_waitqueue_head(&zatm_vcc->tx_wait);
  843. /* initialize ring */
  844. zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
  845. if (!zatm_vcc->ring) return -ENOMEM;
  846. loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
  847. loop[0] = uPD98401_TXPD_V;
  848. loop[1] = loop[2] = 0;
  849. loop[3] = virt_to_bus(zatm_vcc->ring);
  850. zatm_vcc->ring_curr = 0;
  851. zatm_vcc->txing = 0;
  852. skb_queue_head_init(&zatm_vcc->backlog);
  853. zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
  854. chan*VC_SIZE/4+uPD98401_TXVC_QRP);
  855. return 0;
  856. }
  857. static int open_tx_second(struct atm_vcc *vcc)
  858. {
  859. struct zatm_dev *zatm_dev;
  860. struct zatm_vcc *zatm_vcc;
  861. unsigned long flags;
  862. DPRINTK("open_tx_second\n");
  863. zatm_dev = ZATM_DEV(vcc->dev);
  864. zatm_vcc = ZATM_VCC(vcc);
  865. if (!zatm_vcc->tx_chan) return 0;
  866. /* set up VC descriptor */
  867. spin_lock_irqsave(&zatm_dev->lock, flags);
  868. zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
  869. zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
  870. uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
  871. vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
  872. zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
  873. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  874. zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
  875. return 0;
  876. }
  877. static int start_tx(struct atm_dev *dev)
  878. {
  879. struct zatm_dev *zatm_dev;
  880. int i;
  881. DPRINTK("start_tx\n");
  882. zatm_dev = ZATM_DEV(dev);
  883. zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
  884. sizeof(*zatm_dev->tx_map),
  885. GFP_KERNEL);
  886. if (!zatm_dev->tx_map) return -ENOMEM;
  887. zatm_dev->tx_bw = ATM_OC3_PCR;
  888. zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
  889. zatm_dev->ubr = -1;
  890. zatm_dev->ubr_ref_cnt = 0;
  891. /* initialize shapers */
  892. for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
  893. return 0;
  894. }
  895. /*------------------------------- interrupts --------------------------------*/
  896. static irqreturn_t zatm_int(int irq,void *dev_id)
  897. {
  898. struct atm_dev *dev;
  899. struct zatm_dev *zatm_dev;
  900. u32 reason;
  901. int handled = 0;
  902. dev = dev_id;
  903. zatm_dev = ZATM_DEV(dev);
  904. while ((reason = zin(GSR))) {
  905. handled = 1;
  906. EVENT("reason 0x%x\n",reason,0);
  907. if (reason & uPD98401_INT_PI) {
  908. EVENT("PHY int\n",0,0);
  909. dev->phy->interrupt(dev);
  910. }
  911. if (reason & uPD98401_INT_RQA) {
  912. unsigned long pools;
  913. int i;
  914. pools = zin(RQA);
  915. EVENT("RQA (0x%08x)\n",pools,0);
  916. for (i = 0; pools; i++) {
  917. if (pools & 1) {
  918. refill_pool(dev,i);
  919. zatm_dev->pool_info[i].rqa_count++;
  920. }
  921. pools >>= 1;
  922. }
  923. }
  924. if (reason & uPD98401_INT_RQU) {
  925. unsigned long pools;
  926. int i;
  927. pools = zin(RQU);
  928. printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
  929. dev->number,pools);
  930. event_dump();
  931. for (i = 0; pools; i++) {
  932. if (pools & 1) {
  933. refill_pool(dev,i);
  934. zatm_dev->pool_info[i].rqu_count++;
  935. }
  936. pools >>= 1;
  937. }
  938. }
  939. /* don't handle RD */
  940. if (reason & uPD98401_INT_SPE)
  941. printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
  942. "error at 0x%08x\n",dev->number,zin(ADDR));
  943. if (reason & uPD98401_INT_CPE)
  944. printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
  945. "parity error at 0x%08x\n",dev->number,zin(ADDR));
  946. if (reason & uPD98401_INT_SBE) {
  947. printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
  948. "error at 0x%08x\n",dev->number,zin(ADDR));
  949. event_dump();
  950. }
  951. /* don't handle IND */
  952. if (reason & uPD98401_INT_MF) {
  953. printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
  954. "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
  955. >> uPD98401_INT_MF_SHIFT);
  956. event_dump();
  957. /* @@@ should try to recover */
  958. }
  959. if (reason & uPD98401_INT_MM) {
  960. if (reason & 1) poll_rx(dev,0);
  961. if (reason & 2) poll_rx(dev,1);
  962. if (reason & 4) poll_tx(dev,2);
  963. if (reason & 8) poll_tx(dev,3);
  964. }
  965. /* @@@ handle RCRn */
  966. }
  967. return IRQ_RETVAL(handled);
  968. }
  969. /*----------------------------- (E)EPROM access -----------------------------*/
  970. static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
  971. unsigned short cmd)
  972. {
  973. int error;
  974. if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
  975. printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
  976. error);
  977. }
  978. static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
  979. {
  980. unsigned int value;
  981. int error;
  982. if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
  983. printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
  984. error);
  985. return value;
  986. }
  987. static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
  988. int bits, unsigned short cmd)
  989. {
  990. unsigned long value;
  991. int i;
  992. for (i = bits-1; i >= 0; i--) {
  993. value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
  994. eprom_set(zatm_dev,value,cmd);
  995. eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
  996. eprom_set(zatm_dev,value,cmd);
  997. }
  998. }
  999. static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
  1000. unsigned short cmd)
  1001. {
  1002. int i;
  1003. *byte = 0;
  1004. for (i = 8; i; i--) {
  1005. eprom_set(zatm_dev,ZEPROM_CS,cmd);
  1006. eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
  1007. *byte <<= 1;
  1008. if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
  1009. eprom_set(zatm_dev,ZEPROM_CS,cmd);
  1010. }
  1011. }
  1012. static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
  1013. int swap)
  1014. {
  1015. unsigned char buf[ZEPROM_SIZE];
  1016. struct zatm_dev *zatm_dev;
  1017. int i;
  1018. zatm_dev = ZATM_DEV(dev);
  1019. for (i = 0; i < ZEPROM_SIZE; i += 2) {
  1020. eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
  1021. eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
  1022. eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
  1023. eprom_get_byte(zatm_dev,buf+i+swap,cmd);
  1024. eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
  1025. eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
  1026. }
  1027. memcpy(dev->esi,buf+offset,ESI_LEN);
  1028. return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
  1029. }
  1030. static void eprom_get_esi(struct atm_dev *dev)
  1031. {
  1032. if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
  1033. (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
  1034. }
  1035. /*--------------------------------- entries ---------------------------------*/
  1036. static int zatm_init(struct atm_dev *dev)
  1037. {
  1038. struct zatm_dev *zatm_dev;
  1039. struct pci_dev *pci_dev;
  1040. unsigned short command;
  1041. int error,i,last;
  1042. unsigned long t0,t1,t2;
  1043. DPRINTK(">zatm_init\n");
  1044. zatm_dev = ZATM_DEV(dev);
  1045. spin_lock_init(&zatm_dev->lock);
  1046. pci_dev = zatm_dev->pci_dev;
  1047. zatm_dev->base = pci_resource_start(pci_dev, 0);
  1048. zatm_dev->irq = pci_dev->irq;
  1049. if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
  1050. printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
  1051. dev->number,error);
  1052. return -EINVAL;
  1053. }
  1054. if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
  1055. command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
  1056. printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
  1057. "\n",dev->number,error);
  1058. return -EIO;
  1059. }
  1060. eprom_get_esi(dev);
  1061. printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
  1062. dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
  1063. /* reset uPD98401 */
  1064. zout(0,SWR);
  1065. while (!(zin(GSR) & uPD98401_INT_IND));
  1066. zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
  1067. last = MAX_CRAM_SIZE;
  1068. for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
  1069. zpokel(zatm_dev,0x55555555,i);
  1070. if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
  1071. else {
  1072. zpokel(zatm_dev,0xAAAAAAAA,i);
  1073. if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
  1074. else zpokel(zatm_dev,i,i);
  1075. }
  1076. }
  1077. for (i = 0; i < last; i += RAM_INCREMENT)
  1078. if (zpeekl(zatm_dev,i) != i) break;
  1079. zatm_dev->mem = i << 2;
  1080. while (i) zpokel(zatm_dev,0,--i);
  1081. /* reset again to rebuild memory pointers */
  1082. zout(0,SWR);
  1083. while (!(zin(GSR) & uPD98401_INT_IND));
  1084. zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
  1085. uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
  1086. /* TODO: should shrink allocation now */
  1087. printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
  1088. "MMF");
  1089. for (i = 0; i < ESI_LEN; i++)
  1090. printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
  1091. do {
  1092. unsigned long flags;
  1093. spin_lock_irqsave(&zatm_dev->lock, flags);
  1094. t0 = zpeekl(zatm_dev,uPD98401_TSR);
  1095. udelay(10);
  1096. t1 = zpeekl(zatm_dev,uPD98401_TSR);
  1097. udelay(1010);
  1098. t2 = zpeekl(zatm_dev,uPD98401_TSR);
  1099. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1100. }
  1101. while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
  1102. zatm_dev->khz = t2-2*t1+t0;
  1103. printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
  1104. "MHz\n",dev->number,
  1105. (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
  1106. zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
  1107. return uPD98402_init(dev);
  1108. }
  1109. static int zatm_start(struct atm_dev *dev)
  1110. {
  1111. struct zatm_dev *zatm_dev = ZATM_DEV(dev);
  1112. struct pci_dev *pdev = zatm_dev->pci_dev;
  1113. unsigned long curr;
  1114. int pools,vccs,rx;
  1115. int error, i, ld;
  1116. DPRINTK("zatm_start\n");
  1117. zatm_dev->rx_map = zatm_dev->tx_map = NULL;
  1118. for (i = 0; i < NR_MBX; i++)
  1119. zatm_dev->mbx_start[i] = 0;
  1120. error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
  1121. if (error < 0) {
  1122. printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
  1123. dev->number,zatm_dev->irq);
  1124. goto done;
  1125. }
  1126. /* define memory regions */
  1127. pools = NR_POOLS;
  1128. if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
  1129. pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
  1130. vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
  1131. (2*VC_SIZE+RX_SIZE);
  1132. ld = -1;
  1133. for (rx = 1; rx < vccs; rx <<= 1) ld++;
  1134. dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
  1135. dev->ci_range.vci_bits = ld;
  1136. dev->link_rate = ATM_OC3_PCR;
  1137. zatm_dev->chans = vccs; /* ??? */
  1138. curr = rx*RX_SIZE/4;
  1139. DPRINTK("RX pool 0x%08lx\n",curr);
  1140. zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
  1141. zatm_dev->pool_base = curr;
  1142. curr += pools*POOL_SIZE/4;
  1143. DPRINTK("Shapers 0x%08lx\n",curr);
  1144. zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
  1145. curr += NR_SHAPERS*SHAPER_SIZE/4;
  1146. DPRINTK("Free 0x%08lx\n",curr);
  1147. zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
  1148. printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
  1149. "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
  1150. (zatm_dev->mem-curr*4)/VC_SIZE);
  1151. /* create mailboxes */
  1152. for (i = 0; i < NR_MBX; i++) {
  1153. void *mbx;
  1154. dma_addr_t mbx_dma;
  1155. if (!mbx_entries[i])
  1156. continue;
  1157. mbx = dma_alloc_coherent(&pdev->dev,
  1158. 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
  1159. if (!mbx) {
  1160. error = -ENOMEM;
  1161. goto out;
  1162. }
  1163. /*
  1164. * Alignment provided by dma_alloc_coherent() isn't enough
  1165. * for this device.
  1166. */
  1167. if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
  1168. printk(KERN_ERR DEV_LABEL "(itf %d): system "
  1169. "bus incompatible with driver\n", dev->number);
  1170. dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
  1171. error = -ENODEV;
  1172. goto out;
  1173. }
  1174. DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
  1175. zatm_dev->mbx_start[i] = (unsigned long)mbx;
  1176. zatm_dev->mbx_dma[i] = mbx_dma;
  1177. zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
  1178. 0xffff;
  1179. zout(mbx_dma >> 16, MSH(i));
  1180. zout(mbx_dma, MSL(i));
  1181. zout(zatm_dev->mbx_end[i], MBA(i));
  1182. zout((unsigned long)mbx & 0xffff, MTA(i));
  1183. zout((unsigned long)mbx & 0xffff, MWA(i));
  1184. }
  1185. error = start_tx(dev);
  1186. if (error)
  1187. goto out;
  1188. error = start_rx(dev);
  1189. if (error)
  1190. goto out_tx;
  1191. error = dev->phy->start(dev);
  1192. if (error)
  1193. goto out_rx;
  1194. zout(0xffffffff,IMR); /* enable interrupts */
  1195. /* enable TX & RX */
  1196. zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
  1197. done:
  1198. return error;
  1199. out_rx:
  1200. kfree(zatm_dev->rx_map);
  1201. out_tx:
  1202. kfree(zatm_dev->tx_map);
  1203. out:
  1204. while (i-- > 0) {
  1205. dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
  1206. (void *)zatm_dev->mbx_start[i],
  1207. zatm_dev->mbx_dma[i]);
  1208. }
  1209. free_irq(zatm_dev->irq, dev);
  1210. goto done;
  1211. }
  1212. static void zatm_close(struct atm_vcc *vcc)
  1213. {
  1214. DPRINTK(">zatm_close\n");
  1215. if (!ZATM_VCC(vcc)) return;
  1216. clear_bit(ATM_VF_READY,&vcc->flags);
  1217. close_rx(vcc);
  1218. EVENT("close_tx\n",0,0);
  1219. close_tx(vcc);
  1220. DPRINTK("zatm_close: done waiting\n");
  1221. /* deallocate memory */
  1222. kfree(ZATM_VCC(vcc));
  1223. vcc->dev_data = NULL;
  1224. clear_bit(ATM_VF_ADDR,&vcc->flags);
  1225. }
  1226. static int zatm_open(struct atm_vcc *vcc)
  1227. {
  1228. struct zatm_vcc *zatm_vcc;
  1229. short vpi = vcc->vpi;
  1230. int vci = vcc->vci;
  1231. int error;
  1232. DPRINTK(">zatm_open\n");
  1233. if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
  1234. vcc->dev_data = NULL;
  1235. if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
  1236. set_bit(ATM_VF_ADDR,&vcc->flags);
  1237. if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
  1238. DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
  1239. vcc->vci);
  1240. if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
  1241. zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
  1242. if (!zatm_vcc) {
  1243. clear_bit(ATM_VF_ADDR,&vcc->flags);
  1244. return -ENOMEM;
  1245. }
  1246. vcc->dev_data = zatm_vcc;
  1247. ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
  1248. if ((error = open_rx_first(vcc))) {
  1249. zatm_close(vcc);
  1250. return error;
  1251. }
  1252. if ((error = open_tx_first(vcc))) {
  1253. zatm_close(vcc);
  1254. return error;
  1255. }
  1256. }
  1257. if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
  1258. if ((error = open_rx_second(vcc))) {
  1259. zatm_close(vcc);
  1260. return error;
  1261. }
  1262. if ((error = open_tx_second(vcc))) {
  1263. zatm_close(vcc);
  1264. return error;
  1265. }
  1266. set_bit(ATM_VF_READY,&vcc->flags);
  1267. return 0;
  1268. }
  1269. static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
  1270. {
  1271. printk("Not yet implemented\n");
  1272. return -ENOSYS;
  1273. /* @@@ */
  1274. }
  1275. static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
  1276. {
  1277. struct zatm_dev *zatm_dev;
  1278. unsigned long flags;
  1279. zatm_dev = ZATM_DEV(dev);
  1280. switch (cmd) {
  1281. case ZATM_GETPOOLZ:
  1282. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  1283. /* fall through */
  1284. case ZATM_GETPOOL:
  1285. {
  1286. struct zatm_pool_info info;
  1287. int pool;
  1288. if (get_user(pool,
  1289. &((struct zatm_pool_req __user *) arg)->pool_num))
  1290. return -EFAULT;
  1291. if (pool < 0 || pool > ZATM_LAST_POOL)
  1292. return -EINVAL;
  1293. pool = array_index_nospec(pool,
  1294. ZATM_LAST_POOL + 1);
  1295. spin_lock_irqsave(&zatm_dev->lock, flags);
  1296. info = zatm_dev->pool_info[pool];
  1297. if (cmd == ZATM_GETPOOLZ) {
  1298. zatm_dev->pool_info[pool].rqa_count = 0;
  1299. zatm_dev->pool_info[pool].rqu_count = 0;
  1300. }
  1301. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1302. return copy_to_user(
  1303. &((struct zatm_pool_req __user *) arg)->info,
  1304. &info,sizeof(info)) ? -EFAULT : 0;
  1305. }
  1306. case ZATM_SETPOOL:
  1307. {
  1308. struct zatm_pool_info info;
  1309. int pool;
  1310. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  1311. if (get_user(pool,
  1312. &((struct zatm_pool_req __user *) arg)->pool_num))
  1313. return -EFAULT;
  1314. if (pool < 0 || pool > ZATM_LAST_POOL)
  1315. return -EINVAL;
  1316. pool = array_index_nospec(pool,
  1317. ZATM_LAST_POOL + 1);
  1318. if (copy_from_user(&info,
  1319. &((struct zatm_pool_req __user *) arg)->info,
  1320. sizeof(info))) return -EFAULT;
  1321. if (!info.low_water)
  1322. info.low_water = zatm_dev->
  1323. pool_info[pool].low_water;
  1324. if (!info.high_water)
  1325. info.high_water = zatm_dev->
  1326. pool_info[pool].high_water;
  1327. if (!info.next_thres)
  1328. info.next_thres = zatm_dev->
  1329. pool_info[pool].next_thres;
  1330. if (info.low_water >= info.high_water ||
  1331. info.low_water < 0)
  1332. return -EINVAL;
  1333. spin_lock_irqsave(&zatm_dev->lock, flags);
  1334. zatm_dev->pool_info[pool].low_water =
  1335. info.low_water;
  1336. zatm_dev->pool_info[pool].high_water =
  1337. info.high_water;
  1338. zatm_dev->pool_info[pool].next_thres =
  1339. info.next_thres;
  1340. spin_unlock_irqrestore(&zatm_dev->lock, flags);
  1341. return 0;
  1342. }
  1343. default:
  1344. if (!dev->phy->ioctl) return -ENOIOCTLCMD;
  1345. return dev->phy->ioctl(dev,cmd,arg);
  1346. }
  1347. }
  1348. static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
  1349. void __user *optval,int optlen)
  1350. {
  1351. return -EINVAL;
  1352. }
  1353. static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
  1354. void __user *optval,unsigned int optlen)
  1355. {
  1356. return -EINVAL;
  1357. }
  1358. static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
  1359. {
  1360. int error;
  1361. EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
  1362. if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
  1363. if (vcc->pop) vcc->pop(vcc,skb);
  1364. else dev_kfree_skb(skb);
  1365. return -EINVAL;
  1366. }
  1367. if (!skb) {
  1368. printk(KERN_CRIT "!skb in zatm_send ?\n");
  1369. if (vcc->pop) vcc->pop(vcc,skb);
  1370. return -EINVAL;
  1371. }
  1372. ATM_SKB(skb)->vcc = vcc;
  1373. error = do_tx(skb);
  1374. if (error != RING_BUSY) return error;
  1375. skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
  1376. return 0;
  1377. }
  1378. static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
  1379. unsigned long addr)
  1380. {
  1381. struct zatm_dev *zatm_dev;
  1382. zatm_dev = ZATM_DEV(dev);
  1383. zwait();
  1384. zout(value,CER);
  1385. zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
  1386. (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  1387. }
  1388. static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
  1389. {
  1390. struct zatm_dev *zatm_dev;
  1391. zatm_dev = ZATM_DEV(dev);
  1392. zwait();
  1393. zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
  1394. (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
  1395. zwait();
  1396. return zin(CER) & 0xff;
  1397. }
  1398. static const struct atmdev_ops ops = {
  1399. .open = zatm_open,
  1400. .close = zatm_close,
  1401. .ioctl = zatm_ioctl,
  1402. .getsockopt = zatm_getsockopt,
  1403. .setsockopt = zatm_setsockopt,
  1404. .send = zatm_send,
  1405. .phy_put = zatm_phy_put,
  1406. .phy_get = zatm_phy_get,
  1407. .change_qos = zatm_change_qos,
  1408. };
  1409. static int zatm_init_one(struct pci_dev *pci_dev,
  1410. const struct pci_device_id *ent)
  1411. {
  1412. struct atm_dev *dev;
  1413. struct zatm_dev *zatm_dev;
  1414. int ret = -ENOMEM;
  1415. zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
  1416. if (!zatm_dev) {
  1417. printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
  1418. goto out;
  1419. }
  1420. dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
  1421. if (!dev)
  1422. goto out_free;
  1423. ret = pci_enable_device(pci_dev);
  1424. if (ret < 0)
  1425. goto out_deregister;
  1426. ret = pci_request_regions(pci_dev, DEV_LABEL);
  1427. if (ret < 0)
  1428. goto out_disable;
  1429. ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
  1430. if (ret < 0)
  1431. goto out_release;
  1432. zatm_dev->pci_dev = pci_dev;
  1433. dev->dev_data = zatm_dev;
  1434. zatm_dev->copper = (int)ent->driver_data;
  1435. if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
  1436. goto out_release;
  1437. pci_set_drvdata(pci_dev, dev);
  1438. zatm_dev->more = zatm_boards;
  1439. zatm_boards = dev;
  1440. ret = 0;
  1441. out:
  1442. return ret;
  1443. out_release:
  1444. pci_release_regions(pci_dev);
  1445. out_disable:
  1446. pci_disable_device(pci_dev);
  1447. out_deregister:
  1448. atm_dev_deregister(dev);
  1449. out_free:
  1450. kfree(zatm_dev);
  1451. goto out;
  1452. }
  1453. MODULE_LICENSE("GPL");
  1454. static const struct pci_device_id zatm_pci_tbl[] = {
  1455. { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
  1456. { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
  1457. { 0, }
  1458. };
  1459. MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
  1460. static struct pci_driver zatm_driver = {
  1461. .name = DEV_LABEL,
  1462. .id_table = zatm_pci_tbl,
  1463. .probe = zatm_init_one,
  1464. };
  1465. static int __init zatm_init_module(void)
  1466. {
  1467. return pci_register_driver(&zatm_driver);
  1468. }
  1469. module_init(zatm_init_module);
  1470. /* module_exit not defined so not unloadable */