iphase.c 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309
  1. /******************************************************************************
  2. iphase.c: Device driver for Interphase ATM PCI adapter cards
  3. Author: Peter Wang <pwang@iphase.com>
  4. Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  5. Interphase Corporation <www.iphase.com>
  6. Version: 1.0
  7. *******************************************************************************
  8. This software may be used and distributed according to the terms
  9. of the GNU General Public License (GPL), incorporated herein by reference.
  10. Drivers based on this skeleton fall under the GPL and must retain
  11. the authorship (implicit copyright) notice.
  12. This program is distributed in the hope that it will be useful, but
  13. WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. General Public License for more details.
  16. Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
  17. was originally written by Monalisa Agrawal at UNH. Now this driver
  18. supports a variety of varients of Interphase ATM PCI (i)Chip adapter
  19. card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
  20. in terms of PHY type, the size of control memory and the size of
  21. packet memory. The following are the change log and history:
  22. Bugfix the Mona's UBR driver.
  23. Modify the basic memory allocation and dma logic.
  24. Port the driver to the latest kernel from 2.0.46.
  25. Complete the ABR logic of the driver, and added the ABR work-
  26. around for the hardware anormalies.
  27. Add the CBR support.
  28. Add the flow control logic to the driver to allow rate-limit VC.
  29. Add 4K VC support to the board with 512K control memory.
  30. Add the support of all the variants of the Interphase ATM PCI
  31. (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
  32. (25M UTP25) and x531 (DS3 and E3).
  33. Add SMP support.
  34. Support and updates available at: ftp://ftp.iphase.com/pub/atm
  35. *******************************************************************************/
  36. #include <linux/module.h>
  37. #include <linux/kernel.h>
  38. #include <linux/mm.h>
  39. #include <linux/pci.h>
  40. #include <linux/errno.h>
  41. #include <linux/atm.h>
  42. #include <linux/atmdev.h>
  43. #include <linux/sonet.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/time.h>
  46. #include <linux/delay.h>
  47. #include <linux/uio.h>
  48. #include <linux/init.h>
  49. #include <linux/interrupt.h>
  50. #include <linux/wait.h>
  51. #include <linux/slab.h>
  52. #include <asm/io.h>
  53. #include <linux/atomic.h>
  54. #include <linux/uaccess.h>
  55. #include <asm/string.h>
  56. #include <asm/byteorder.h>
  57. #include <linux/vmalloc.h>
  58. #include <linux/jiffies.h>
  59. #include <linux/nospec.h>
  60. #include "iphase.h"
  61. #include "suni.h"
  62. #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
  63. #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
  64. static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
  65. static void desc_dbg(IADEV *iadev);
  66. static IADEV *ia_dev[8];
  67. static struct atm_dev *_ia_dev[8];
  68. static int iadev_count;
  69. static void ia_led_timer(struct timer_list *unused);
  70. static DEFINE_TIMER(ia_timer, ia_led_timer);
  71. static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
  72. static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
  73. static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
  74. |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
  75. module_param(IA_TX_BUF, int, 0);
  76. module_param(IA_TX_BUF_SZ, int, 0);
  77. module_param(IA_RX_BUF, int, 0);
  78. module_param(IA_RX_BUF_SZ, int, 0);
  79. module_param(IADebugFlag, uint, 0644);
  80. MODULE_LICENSE("GPL");
  81. /**************************** IA_LIB **********************************/
  82. static void ia_init_rtn_q (IARTN_Q *que)
  83. {
  84. que->next = NULL;
  85. que->tail = NULL;
  86. }
  87. static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
  88. {
  89. data->next = NULL;
  90. if (que->next == NULL)
  91. que->next = que->tail = data;
  92. else {
  93. data->next = que->next;
  94. que->next = data;
  95. }
  96. return;
  97. }
  98. static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
  99. IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  100. if (!entry)
  101. return -ENOMEM;
  102. entry->data = data;
  103. entry->next = NULL;
  104. if (que->next == NULL)
  105. que->next = que->tail = entry;
  106. else {
  107. que->tail->next = entry;
  108. que->tail = que->tail->next;
  109. }
  110. return 1;
  111. }
  112. static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
  113. IARTN_Q *tmpdata;
  114. if (que->next == NULL)
  115. return NULL;
  116. tmpdata = que->next;
  117. if ( que->next == que->tail)
  118. que->next = que->tail = NULL;
  119. else
  120. que->next = que->next->next;
  121. return tmpdata;
  122. }
  123. static void ia_hack_tcq(IADEV *dev) {
  124. u_short desc1;
  125. u_short tcq_wr;
  126. struct ia_vcc *iavcc_r = NULL;
  127. tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
  128. while (dev->host_tcq_wr != tcq_wr) {
  129. desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
  130. if (!desc1) ;
  131. else if (!dev->desc_tbl[desc1 -1].timestamp) {
  132. IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
  133. *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
  134. }
  135. else if (dev->desc_tbl[desc1 -1].timestamp) {
  136. if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
  137. printk("IA: Fatal err in get_desc\n");
  138. continue;
  139. }
  140. iavcc_r->vc_desc_cnt--;
  141. dev->desc_tbl[desc1 -1].timestamp = 0;
  142. IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
  143. dev->desc_tbl[desc1 -1].txskb, desc1);)
  144. if (iavcc_r->pcr < dev->rate_limit) {
  145. IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
  146. if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
  147. printk("ia_hack_tcq: No memory available\n");
  148. }
  149. dev->desc_tbl[desc1 -1].iavcc = NULL;
  150. dev->desc_tbl[desc1 -1].txskb = NULL;
  151. }
  152. dev->host_tcq_wr += 2;
  153. if (dev->host_tcq_wr > dev->ffL.tcq_ed)
  154. dev->host_tcq_wr = dev->ffL.tcq_st;
  155. }
  156. } /* ia_hack_tcq */
  157. static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
  158. u_short desc_num, i;
  159. struct sk_buff *skb;
  160. struct ia_vcc *iavcc_r = NULL;
  161. unsigned long delta;
  162. static unsigned long timer = 0;
  163. int ltimeout;
  164. ia_hack_tcq (dev);
  165. if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
  166. timer = jiffies;
  167. i=0;
  168. while (i < dev->num_tx_desc) {
  169. if (!dev->desc_tbl[i].timestamp) {
  170. i++;
  171. continue;
  172. }
  173. ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
  174. delta = jiffies - dev->desc_tbl[i].timestamp;
  175. if (delta >= ltimeout) {
  176. IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
  177. if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
  178. dev->ffL.tcq_rd = dev->ffL.tcq_ed;
  179. else
  180. dev->ffL.tcq_rd -= 2;
  181. *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
  182. if (!(skb = dev->desc_tbl[i].txskb) ||
  183. !(iavcc_r = dev->desc_tbl[i].iavcc))
  184. printk("Fatal err, desc table vcc or skb is NULL\n");
  185. else
  186. iavcc_r->vc_desc_cnt--;
  187. dev->desc_tbl[i].timestamp = 0;
  188. dev->desc_tbl[i].iavcc = NULL;
  189. dev->desc_tbl[i].txskb = NULL;
  190. }
  191. i++;
  192. } /* while */
  193. }
  194. if (dev->ffL.tcq_rd == dev->host_tcq_wr)
  195. return 0xFFFF;
  196. /* Get the next available descriptor number from TCQ */
  197. desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
  198. while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
  199. dev->ffL.tcq_rd += 2;
  200. if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
  201. dev->ffL.tcq_rd = dev->ffL.tcq_st;
  202. if (dev->ffL.tcq_rd == dev->host_tcq_wr)
  203. return 0xFFFF;
  204. desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
  205. }
  206. /* get system time */
  207. dev->desc_tbl[desc_num -1].timestamp = jiffies;
  208. return desc_num;
  209. }
  210. static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
  211. u_char foundLockUp;
  212. vcstatus_t *vcstatus;
  213. u_short *shd_tbl;
  214. u_short tempCellSlot, tempFract;
  215. struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
  216. struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
  217. u_int i;
  218. if (vcc->qos.txtp.traffic_class == ATM_ABR) {
  219. vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
  220. vcstatus->cnt++;
  221. foundLockUp = 0;
  222. if( vcstatus->cnt == 0x05 ) {
  223. abr_vc += vcc->vci;
  224. eabr_vc += vcc->vci;
  225. if( eabr_vc->last_desc ) {
  226. if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
  227. /* Wait for 10 Micro sec */
  228. udelay(10);
  229. if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
  230. foundLockUp = 1;
  231. }
  232. else {
  233. tempCellSlot = abr_vc->last_cell_slot;
  234. tempFract = abr_vc->fraction;
  235. if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
  236. && (tempFract == dev->testTable[vcc->vci]->fract))
  237. foundLockUp = 1;
  238. dev->testTable[vcc->vci]->lastTime = tempCellSlot;
  239. dev->testTable[vcc->vci]->fract = tempFract;
  240. }
  241. } /* last descriptor */
  242. vcstatus->cnt = 0;
  243. } /* vcstatus->cnt */
  244. if (foundLockUp) {
  245. IF_ABR(printk("LOCK UP found\n");)
  246. writew(0xFFFD, dev->seg_reg+MODE_REG_0);
  247. /* Wait for 10 Micro sec */
  248. udelay(10);
  249. abr_vc->status &= 0xFFF8;
  250. abr_vc->status |= 0x0001; /* state is idle */
  251. shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
  252. for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
  253. if (i < dev->num_vc)
  254. shd_tbl[i] = vcc->vci;
  255. else
  256. IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
  257. writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
  258. writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
  259. writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
  260. vcstatus->cnt = 0;
  261. } /* foundLockUp */
  262. } /* if an ABR VC */
  263. }
  264. /*
  265. ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
  266. **
  267. ** +----+----+------------------+-------------------------------+
  268. ** | R | NZ | 5-bit exponent | 9-bit mantissa |
  269. ** +----+----+------------------+-------------------------------+
  270. **
  271. ** R = reserved (written as 0)
  272. ** NZ = 0 if 0 cells/sec; 1 otherwise
  273. **
  274. ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
  275. */
  276. static u16
  277. cellrate_to_float(u32 cr)
  278. {
  279. #define NZ 0x4000
  280. #define M_BITS 9 /* Number of bits in mantissa */
  281. #define E_BITS 5 /* Number of bits in exponent */
  282. #define M_MASK 0x1ff
  283. #define E_MASK 0x1f
  284. u16 flot;
  285. u32 tmp = cr & 0x00ffffff;
  286. int i = 0;
  287. if (cr == 0)
  288. return 0;
  289. while (tmp != 1) {
  290. tmp >>= 1;
  291. i++;
  292. }
  293. if (i == M_BITS)
  294. flot = NZ | (i << M_BITS) | (cr & M_MASK);
  295. else if (i < M_BITS)
  296. flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
  297. else
  298. flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
  299. return flot;
  300. }
  301. #if 0
  302. /*
  303. ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
  304. */
  305. static u32
  306. float_to_cellrate(u16 rate)
  307. {
  308. u32 exp, mantissa, cps;
  309. if ((rate & NZ) == 0)
  310. return 0;
  311. exp = (rate >> M_BITS) & E_MASK;
  312. mantissa = rate & M_MASK;
  313. if (exp == 0)
  314. return 1;
  315. cps = (1 << M_BITS) | mantissa;
  316. if (exp == M_BITS)
  317. cps = cps;
  318. else if (exp > M_BITS)
  319. cps <<= (exp - M_BITS);
  320. else
  321. cps >>= (M_BITS - exp);
  322. return cps;
  323. }
  324. #endif
  325. static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
  326. srv_p->class_type = ATM_ABR;
  327. srv_p->pcr = dev->LineRate;
  328. srv_p->mcr = 0;
  329. srv_p->icr = 0x055cb7;
  330. srv_p->tbe = 0xffffff;
  331. srv_p->frtt = 0x3a;
  332. srv_p->rif = 0xf;
  333. srv_p->rdf = 0xb;
  334. srv_p->nrm = 0x4;
  335. srv_p->trm = 0x7;
  336. srv_p->cdf = 0x3;
  337. srv_p->adtf = 50;
  338. }
  339. static int
  340. ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
  341. struct atm_vcc *vcc, u8 flag)
  342. {
  343. f_vc_abr_entry *f_abr_vc;
  344. r_vc_abr_entry *r_abr_vc;
  345. u32 icr;
  346. u8 trm, nrm, crm;
  347. u16 adtf, air, *ptr16;
  348. f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
  349. f_abr_vc += vcc->vci;
  350. switch (flag) {
  351. case 1: /* FFRED initialization */
  352. #if 0 /* sanity check */
  353. if (srv_p->pcr == 0)
  354. return INVALID_PCR;
  355. if (srv_p->pcr > dev->LineRate)
  356. srv_p->pcr = dev->LineRate;
  357. if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
  358. return MCR_UNAVAILABLE;
  359. if (srv_p->mcr > srv_p->pcr)
  360. return INVALID_MCR;
  361. if (!(srv_p->icr))
  362. srv_p->icr = srv_p->pcr;
  363. if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
  364. return INVALID_ICR;
  365. if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
  366. return INVALID_TBE;
  367. if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
  368. return INVALID_FRTT;
  369. if (srv_p->nrm > MAX_NRM)
  370. return INVALID_NRM;
  371. if (srv_p->trm > MAX_TRM)
  372. return INVALID_TRM;
  373. if (srv_p->adtf > MAX_ADTF)
  374. return INVALID_ADTF;
  375. else if (srv_p->adtf == 0)
  376. srv_p->adtf = 1;
  377. if (srv_p->cdf > MAX_CDF)
  378. return INVALID_CDF;
  379. if (srv_p->rif > MAX_RIF)
  380. return INVALID_RIF;
  381. if (srv_p->rdf > MAX_RDF)
  382. return INVALID_RDF;
  383. #endif
  384. memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
  385. f_abr_vc->f_vc_type = ABR;
  386. nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
  387. /* i.e 2**n = 2 << (n-1) */
  388. f_abr_vc->f_nrm = nrm << 8 | nrm;
  389. trm = 100000/(2 << (16 - srv_p->trm));
  390. if ( trm == 0) trm = 1;
  391. f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
  392. crm = srv_p->tbe / nrm;
  393. if (crm == 0) crm = 1;
  394. f_abr_vc->f_crm = crm & 0xff;
  395. f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
  396. icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
  397. ((srv_p->tbe/srv_p->frtt)*1000000) :
  398. (1000000/(srv_p->frtt/srv_p->tbe)));
  399. f_abr_vc->f_icr = cellrate_to_float(icr);
  400. adtf = (10000 * srv_p->adtf)/8192;
  401. if (adtf == 0) adtf = 1;
  402. f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
  403. f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
  404. f_abr_vc->f_acr = f_abr_vc->f_icr;
  405. f_abr_vc->f_status = 0x0042;
  406. break;
  407. case 0: /* RFRED initialization */
  408. ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
  409. *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
  410. r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
  411. r_abr_vc += vcc->vci;
  412. r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
  413. air = srv_p->pcr << (15 - srv_p->rif);
  414. if (air == 0) air = 1;
  415. r_abr_vc->r_air = cellrate_to_float(air);
  416. dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
  417. dev->sum_mcr += srv_p->mcr;
  418. dev->n_abr++;
  419. break;
  420. default:
  421. break;
  422. }
  423. return 0;
  424. }
  425. static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
  426. u32 rateLow=0, rateHigh, rate;
  427. int entries;
  428. struct ia_vcc *ia_vcc;
  429. int idealSlot =0, testSlot, toBeAssigned, inc;
  430. u32 spacing;
  431. u16 *SchedTbl, *TstSchedTbl;
  432. u16 cbrVC, vcIndex;
  433. u32 fracSlot = 0;
  434. u32 sp_mod = 0;
  435. u32 sp_mod2 = 0;
  436. /* IpAdjustTrafficParams */
  437. if (vcc->qos.txtp.max_pcr <= 0) {
  438. IF_ERR(printk("PCR for CBR not defined\n");)
  439. return -1;
  440. }
  441. rate = vcc->qos.txtp.max_pcr;
  442. entries = rate / dev->Granularity;
  443. IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
  444. entries, rate, dev->Granularity);)
  445. if (entries < 1)
  446. IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
  447. rateLow = entries * dev->Granularity;
  448. rateHigh = (entries + 1) * dev->Granularity;
  449. if (3*(rate - rateLow) > (rateHigh - rate))
  450. entries++;
  451. if (entries > dev->CbrRemEntries) {
  452. IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
  453. IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
  454. entries, dev->CbrRemEntries);)
  455. return -EBUSY;
  456. }
  457. ia_vcc = INPH_IA_VCC(vcc);
  458. ia_vcc->NumCbrEntry = entries;
  459. dev->sum_mcr += entries * dev->Granularity;
  460. /* IaFFrednInsertCbrSched */
  461. // Starting at an arbitrary location, place the entries into the table
  462. // as smoothly as possible
  463. cbrVC = 0;
  464. spacing = dev->CbrTotEntries / entries;
  465. sp_mod = dev->CbrTotEntries % entries; // get modulo
  466. toBeAssigned = entries;
  467. fracSlot = 0;
  468. vcIndex = vcc->vci;
  469. IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
  470. while (toBeAssigned)
  471. {
  472. // If this is the first time, start the table loading for this connection
  473. // as close to entryPoint as possible.
  474. if (toBeAssigned == entries)
  475. {
  476. idealSlot = dev->CbrEntryPt;
  477. dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
  478. if (dev->CbrEntryPt >= dev->CbrTotEntries)
  479. dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
  480. } else {
  481. idealSlot += (u32)(spacing + fracSlot); // Point to the next location
  482. // in the table that would be smoothest
  483. fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
  484. sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
  485. }
  486. if (idealSlot >= (int)dev->CbrTotEntries)
  487. idealSlot -= dev->CbrTotEntries;
  488. // Continuously check around this ideal value until a null
  489. // location is encountered.
  490. SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
  491. inc = 0;
  492. testSlot = idealSlot;
  493. TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
  494. IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
  495. testSlot, TstSchedTbl,toBeAssigned);)
  496. memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
  497. while (cbrVC) // If another VC at this location, we have to keep looking
  498. {
  499. inc++;
  500. testSlot = idealSlot - inc;
  501. if (testSlot < 0) { // Wrap if necessary
  502. testSlot += dev->CbrTotEntries;
  503. IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
  504. SchedTbl,testSlot);)
  505. }
  506. TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
  507. memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
  508. if (!cbrVC)
  509. break;
  510. testSlot = idealSlot + inc;
  511. if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
  512. testSlot -= dev->CbrTotEntries;
  513. IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
  514. IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
  515. testSlot, toBeAssigned);)
  516. }
  517. // set table index and read in value
  518. TstSchedTbl = (u16*)(SchedTbl + testSlot);
  519. IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
  520. TstSchedTbl,cbrVC,inc);)
  521. memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
  522. } /* while */
  523. // Move this VCI number into this location of the CBR Sched table.
  524. memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
  525. dev->CbrRemEntries--;
  526. toBeAssigned--;
  527. } /* while */
  528. /* IaFFrednCbrEnable */
  529. dev->NumEnabledCBR++;
  530. if (dev->NumEnabledCBR == 1) {
  531. writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
  532. IF_CBR(printk("CBR is enabled\n");)
  533. }
  534. return 0;
  535. }
  536. static void ia_cbrVc_close (struct atm_vcc *vcc) {
  537. IADEV *iadev;
  538. u16 *SchedTbl, NullVci = 0;
  539. u32 i, NumFound;
  540. iadev = INPH_IA_DEV(vcc->dev);
  541. iadev->NumEnabledCBR--;
  542. SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
  543. if (iadev->NumEnabledCBR == 0) {
  544. writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
  545. IF_CBR (printk("CBR support disabled\n");)
  546. }
  547. NumFound = 0;
  548. for (i=0; i < iadev->CbrTotEntries; i++)
  549. {
  550. if (*SchedTbl == vcc->vci) {
  551. iadev->CbrRemEntries++;
  552. *SchedTbl = NullVci;
  553. IF_CBR(NumFound++;)
  554. }
  555. SchedTbl++;
  556. }
  557. IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
  558. }
  559. static int ia_avail_descs(IADEV *iadev) {
  560. int tmp = 0;
  561. ia_hack_tcq(iadev);
  562. if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
  563. tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
  564. else
  565. tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
  566. iadev->ffL.tcq_st) / 2;
  567. return tmp;
  568. }
  569. static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
  570. static int ia_que_tx (IADEV *iadev) {
  571. struct sk_buff *skb;
  572. int num_desc;
  573. struct atm_vcc *vcc;
  574. num_desc = ia_avail_descs(iadev);
  575. while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
  576. if (!(vcc = ATM_SKB(skb)->vcc)) {
  577. dev_kfree_skb_any(skb);
  578. printk("ia_que_tx: Null vcc\n");
  579. break;
  580. }
  581. if (!test_bit(ATM_VF_READY,&vcc->flags)) {
  582. dev_kfree_skb_any(skb);
  583. printk("Free the SKB on closed vci %d \n", vcc->vci);
  584. break;
  585. }
  586. if (ia_pkt_tx (vcc, skb)) {
  587. skb_queue_head(&iadev->tx_backlog, skb);
  588. }
  589. num_desc--;
  590. }
  591. return 0;
  592. }
  593. static void ia_tx_poll (IADEV *iadev) {
  594. struct atm_vcc *vcc = NULL;
  595. struct sk_buff *skb = NULL, *skb1 = NULL;
  596. struct ia_vcc *iavcc;
  597. IARTN_Q * rtne;
  598. ia_hack_tcq(iadev);
  599. while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
  600. skb = rtne->data.txskb;
  601. if (!skb) {
  602. printk("ia_tx_poll: skb is null\n");
  603. goto out;
  604. }
  605. vcc = ATM_SKB(skb)->vcc;
  606. if (!vcc) {
  607. printk("ia_tx_poll: vcc is null\n");
  608. dev_kfree_skb_any(skb);
  609. goto out;
  610. }
  611. iavcc = INPH_IA_VCC(vcc);
  612. if (!iavcc) {
  613. printk("ia_tx_poll: iavcc is null\n");
  614. dev_kfree_skb_any(skb);
  615. goto out;
  616. }
  617. skb1 = skb_dequeue(&iavcc->txing_skb);
  618. while (skb1 && (skb1 != skb)) {
  619. if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
  620. printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
  621. }
  622. IF_ERR(printk("Release the SKB not match\n");)
  623. if ((vcc->pop) && (skb1->len != 0))
  624. {
  625. vcc->pop(vcc, skb1);
  626. IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
  627. (long)skb1);)
  628. }
  629. else
  630. dev_kfree_skb_any(skb1);
  631. skb1 = skb_dequeue(&iavcc->txing_skb);
  632. }
  633. if (!skb1) {
  634. IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
  635. ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
  636. break;
  637. }
  638. if ((vcc->pop) && (skb->len != 0))
  639. {
  640. vcc->pop(vcc, skb);
  641. IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
  642. }
  643. else
  644. dev_kfree_skb_any(skb);
  645. kfree(rtne);
  646. }
  647. ia_que_tx(iadev);
  648. out:
  649. return;
  650. }
  651. #if 0
  652. static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
  653. {
  654. u32 t;
  655. int i;
  656. /*
  657. * Issue a command to enable writes to the NOVRAM
  658. */
  659. NVRAM_CMD (EXTEND + EWEN);
  660. NVRAM_CLR_CE;
  661. /*
  662. * issue the write command
  663. */
  664. NVRAM_CMD(IAWRITE + addr);
  665. /*
  666. * Send the data, starting with D15, then D14, and so on for 16 bits
  667. */
  668. for (i=15; i>=0; i--) {
  669. NVRAM_CLKOUT (val & 0x8000);
  670. val <<= 1;
  671. }
  672. NVRAM_CLR_CE;
  673. CFG_OR(NVCE);
  674. t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
  675. while (!(t & NVDO))
  676. t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
  677. NVRAM_CLR_CE;
  678. /*
  679. * disable writes again
  680. */
  681. NVRAM_CMD(EXTEND + EWDS)
  682. NVRAM_CLR_CE;
  683. CFG_AND(~NVDI);
  684. }
  685. #endif
  686. static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
  687. {
  688. u_short val;
  689. u32 t;
  690. int i;
  691. /*
  692. * Read the first bit that was clocked with the falling edge of the
  693. * the last command data clock
  694. */
  695. NVRAM_CMD(IAREAD + addr);
  696. /*
  697. * Now read the rest of the bits, the next bit read is D14, then D13,
  698. * and so on.
  699. */
  700. val = 0;
  701. for (i=15; i>=0; i--) {
  702. NVRAM_CLKIN(t);
  703. val |= (t << i);
  704. }
  705. NVRAM_CLR_CE;
  706. CFG_AND(~NVDI);
  707. return val;
  708. }
  709. static void ia_hw_type(IADEV *iadev) {
  710. u_short memType = ia_eeprom_get(iadev, 25);
  711. iadev->memType = memType;
  712. if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
  713. iadev->num_tx_desc = IA_TX_BUF;
  714. iadev->tx_buf_sz = IA_TX_BUF_SZ;
  715. iadev->num_rx_desc = IA_RX_BUF;
  716. iadev->rx_buf_sz = IA_RX_BUF_SZ;
  717. } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
  718. if (IA_TX_BUF == DFL_TX_BUFFERS)
  719. iadev->num_tx_desc = IA_TX_BUF / 2;
  720. else
  721. iadev->num_tx_desc = IA_TX_BUF;
  722. iadev->tx_buf_sz = IA_TX_BUF_SZ;
  723. if (IA_RX_BUF == DFL_RX_BUFFERS)
  724. iadev->num_rx_desc = IA_RX_BUF / 2;
  725. else
  726. iadev->num_rx_desc = IA_RX_BUF;
  727. iadev->rx_buf_sz = IA_RX_BUF_SZ;
  728. }
  729. else {
  730. if (IA_TX_BUF == DFL_TX_BUFFERS)
  731. iadev->num_tx_desc = IA_TX_BUF / 8;
  732. else
  733. iadev->num_tx_desc = IA_TX_BUF;
  734. iadev->tx_buf_sz = IA_TX_BUF_SZ;
  735. if (IA_RX_BUF == DFL_RX_BUFFERS)
  736. iadev->num_rx_desc = IA_RX_BUF / 8;
  737. else
  738. iadev->num_rx_desc = IA_RX_BUF;
  739. iadev->rx_buf_sz = IA_RX_BUF_SZ;
  740. }
  741. iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
  742. IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
  743. iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
  744. iadev->rx_buf_sz, iadev->rx_pkt_ram);)
  745. #if 0
  746. if ((memType & FE_MASK) == FE_SINGLE_MODE) {
  747. iadev->phy_type = PHY_OC3C_S;
  748. else if ((memType & FE_MASK) == FE_UTP_OPTION)
  749. iadev->phy_type = PHY_UTP155;
  750. else
  751. iadev->phy_type = PHY_OC3C_M;
  752. #endif
  753. iadev->phy_type = memType & FE_MASK;
  754. IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
  755. memType,iadev->phy_type);)
  756. if (iadev->phy_type == FE_25MBIT_PHY)
  757. iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
  758. else if (iadev->phy_type == FE_DS3_PHY)
  759. iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
  760. else if (iadev->phy_type == FE_E3_PHY)
  761. iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
  762. else
  763. iadev->LineRate = (u32)(ATM_OC3_PCR);
  764. IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
  765. }
  766. static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
  767. {
  768. return readl(ia->phy + (reg >> 2));
  769. }
  770. static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
  771. {
  772. writel(val, ia->phy + (reg >> 2));
  773. }
  774. static void ia_frontend_intr(struct iadev_priv *iadev)
  775. {
  776. u32 status;
  777. if (iadev->phy_type & FE_25MBIT_PHY) {
  778. status = ia_phy_read32(iadev, MB25_INTR_STATUS);
  779. iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
  780. } else if (iadev->phy_type & FE_DS3_PHY) {
  781. ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
  782. status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
  783. iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
  784. } else if (iadev->phy_type & FE_E3_PHY) {
  785. ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
  786. status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
  787. iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
  788. } else {
  789. status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
  790. iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
  791. }
  792. printk(KERN_INFO "IA: SUNI carrier %s\n",
  793. iadev->carrier_detect ? "detected" : "lost signal");
  794. }
  795. static void ia_mb25_init(struct iadev_priv *iadev)
  796. {
  797. #if 0
  798. mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
  799. #endif
  800. ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
  801. ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
  802. iadev->carrier_detect =
  803. (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
  804. }
  805. struct ia_reg {
  806. u16 reg;
  807. u16 val;
  808. };
  809. static void ia_phy_write(struct iadev_priv *iadev,
  810. const struct ia_reg *regs, int len)
  811. {
  812. while (len--) {
  813. ia_phy_write32(iadev, regs->reg, regs->val);
  814. regs++;
  815. }
  816. }
  817. static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
  818. {
  819. static const struct ia_reg suni_ds3_init[] = {
  820. { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
  821. { SUNI_DS3_FRM_CFG, 0x01 },
  822. { SUNI_DS3_TRAN_CFG, 0x01 },
  823. { SUNI_CONFIG, 0 },
  824. { SUNI_SPLR_CFG, 0 },
  825. { SUNI_SPLT_CFG, 0 }
  826. };
  827. u32 status;
  828. status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
  829. iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
  830. ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
  831. }
  832. static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
  833. {
  834. static const struct ia_reg suni_e3_init[] = {
  835. { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
  836. { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
  837. { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
  838. { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
  839. { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
  840. { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
  841. { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
  842. { SUNI_SPLR_CFG, 0x41 },
  843. { SUNI_SPLT_CFG, 0x41 }
  844. };
  845. u32 status;
  846. status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
  847. iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
  848. ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
  849. }
  850. static void ia_suni_pm7345_init(struct iadev_priv *iadev)
  851. {
  852. static const struct ia_reg suni_init[] = {
  853. /* Enable RSOP loss of signal interrupt. */
  854. { SUNI_INTR_ENBL, 0x28 },
  855. /* Clear error counters. */
  856. { SUNI_ID_RESET, 0 },
  857. /* Clear "PMCTST" in master test register. */
  858. { SUNI_MASTER_TEST, 0 },
  859. { SUNI_RXCP_CTRL, 0x2c },
  860. { SUNI_RXCP_FCTRL, 0x81 },
  861. { SUNI_RXCP_IDLE_PAT_H1, 0 },
  862. { SUNI_RXCP_IDLE_PAT_H2, 0 },
  863. { SUNI_RXCP_IDLE_PAT_H3, 0 },
  864. { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
  865. { SUNI_RXCP_IDLE_MASK_H1, 0xff },
  866. { SUNI_RXCP_IDLE_MASK_H2, 0xff },
  867. { SUNI_RXCP_IDLE_MASK_H3, 0xff },
  868. { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
  869. { SUNI_RXCP_CELL_PAT_H1, 0 },
  870. { SUNI_RXCP_CELL_PAT_H2, 0 },
  871. { SUNI_RXCP_CELL_PAT_H3, 0 },
  872. { SUNI_RXCP_CELL_PAT_H4, 0x01 },
  873. { SUNI_RXCP_CELL_MASK_H1, 0xff },
  874. { SUNI_RXCP_CELL_MASK_H2, 0xff },
  875. { SUNI_RXCP_CELL_MASK_H3, 0xff },
  876. { SUNI_RXCP_CELL_MASK_H4, 0xff },
  877. { SUNI_TXCP_CTRL, 0xa4 },
  878. { SUNI_TXCP_INTR_EN_STS, 0x10 },
  879. { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
  880. };
  881. if (iadev->phy_type & FE_DS3_PHY)
  882. ia_suni_pm7345_init_ds3(iadev);
  883. else
  884. ia_suni_pm7345_init_e3(iadev);
  885. ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
  886. ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
  887. ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
  888. SUNI_PM7345_DLB | SUNI_PM7345_PLB));
  889. #ifdef __SNMP__
  890. suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
  891. #endif /* __SNMP__ */
  892. return;
  893. }
  894. /***************************** IA_LIB END *****************************/
  895. #ifdef CONFIG_ATM_IA_DEBUG
  896. static int tcnter = 0;
  897. static void xdump( u_char* cp, int length, char* prefix )
  898. {
  899. int col, count;
  900. u_char prntBuf[120];
  901. u_char* pBuf = prntBuf;
  902. count = 0;
  903. while(count < length){
  904. pBuf += sprintf( pBuf, "%s", prefix );
  905. for(col = 0;count + col < length && col < 16; col++){
  906. if (col != 0 && (col % 4) == 0)
  907. pBuf += sprintf( pBuf, " " );
  908. pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
  909. }
  910. while(col++ < 16){ /* pad end of buffer with blanks */
  911. if ((col % 4) == 0)
  912. sprintf( pBuf, " " );
  913. pBuf += sprintf( pBuf, " " );
  914. }
  915. pBuf += sprintf( pBuf, " " );
  916. for(col = 0;count + col < length && col < 16; col++){
  917. if (isprint((int)cp[count + col]))
  918. pBuf += sprintf( pBuf, "%c", cp[count + col] );
  919. else
  920. pBuf += sprintf( pBuf, "." );
  921. }
  922. printk("%s\n", prntBuf);
  923. count += col;
  924. pBuf = prntBuf;
  925. }
  926. } /* close xdump(... */
  927. #endif /* CONFIG_ATM_IA_DEBUG */
  928. static struct atm_dev *ia_boards = NULL;
  929. #define ACTUAL_RAM_BASE \
  930. RAM_BASE*((iadev->mem)/(128 * 1024))
  931. #define ACTUAL_SEG_RAM_BASE \
  932. IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
  933. #define ACTUAL_REASS_RAM_BASE \
  934. IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
  935. /*-- some utilities and memory allocation stuff will come here -------------*/
  936. static void desc_dbg(IADEV *iadev) {
  937. u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
  938. u32 i;
  939. void __iomem *tmp;
  940. // regval = readl((u32)ia_cmds->maddr);
  941. tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
  942. printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
  943. tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
  944. readw(iadev->seg_ram+tcq_wr_ptr-2));
  945. printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
  946. iadev->ffL.tcq_rd);
  947. tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
  948. tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
  949. printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
  950. i = 0;
  951. while (tcq_st_ptr != tcq_ed_ptr) {
  952. tmp = iadev->seg_ram+tcq_st_ptr;
  953. printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
  954. tcq_st_ptr += 2;
  955. }
  956. for(i=0; i <iadev->num_tx_desc; i++)
  957. printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
  958. }
  959. /*----------------------------- Receiving side stuff --------------------------*/
  960. static void rx_excp_rcvd(struct atm_dev *dev)
  961. {
  962. #if 0 /* closing the receiving size will cause too many excp int */
  963. IADEV *iadev;
  964. u_short state;
  965. u_short excpq_rd_ptr;
  966. //u_short *ptr;
  967. int vci, error = 1;
  968. iadev = INPH_IA_DEV(dev);
  969. state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
  970. while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
  971. { printk("state = %x \n", state);
  972. excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
  973. printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
  974. if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
  975. IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
  976. // TODO: update exception stat
  977. vci = readw(iadev->reass_ram+excpq_rd_ptr);
  978. error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
  979. // pwang_test
  980. excpq_rd_ptr += 4;
  981. if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
  982. excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
  983. writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
  984. state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
  985. }
  986. #endif
  987. }
  988. static void free_desc(struct atm_dev *dev, int desc)
  989. {
  990. IADEV *iadev;
  991. iadev = INPH_IA_DEV(dev);
  992. writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
  993. iadev->rfL.fdq_wr +=2;
  994. if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
  995. iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
  996. writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
  997. }
  998. static int rx_pkt(struct atm_dev *dev)
  999. {
  1000. IADEV *iadev;
  1001. struct atm_vcc *vcc;
  1002. unsigned short status;
  1003. struct rx_buf_desc __iomem *buf_desc_ptr;
  1004. int desc;
  1005. struct dle* wr_ptr;
  1006. int len;
  1007. struct sk_buff *skb;
  1008. u_int buf_addr, dma_addr;
  1009. iadev = INPH_IA_DEV(dev);
  1010. if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
  1011. {
  1012. printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
  1013. return -EINVAL;
  1014. }
  1015. /* mask 1st 3 bits to get the actual descno. */
  1016. desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
  1017. IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
  1018. iadev->reass_ram, iadev->rfL.pcq_rd, desc);
  1019. printk(" pcq_wr_ptr = 0x%x\n",
  1020. readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
  1021. /* update the read pointer - maybe we shud do this in the end*/
  1022. if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
  1023. iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
  1024. else
  1025. iadev->rfL.pcq_rd += 2;
  1026. writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
  1027. /* get the buffer desc entry.
  1028. update stuff. - doesn't seem to be any update necessary
  1029. */
  1030. buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
  1031. /* make the ptr point to the corresponding buffer desc entry */
  1032. buf_desc_ptr += desc;
  1033. if (!desc || (desc > iadev->num_rx_desc) ||
  1034. ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
  1035. free_desc(dev, desc);
  1036. IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
  1037. return -1;
  1038. }
  1039. vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
  1040. if (!vcc)
  1041. {
  1042. free_desc(dev, desc);
  1043. printk("IA: null vcc, drop PDU\n");
  1044. return -1;
  1045. }
  1046. /* might want to check the status bits for errors */
  1047. status = (u_short) (buf_desc_ptr->desc_mode);
  1048. if (status & (RX_CER | RX_PTE | RX_OFL))
  1049. {
  1050. atomic_inc(&vcc->stats->rx_err);
  1051. IF_ERR(printk("IA: bad packet, dropping it");)
  1052. if (status & RX_CER) {
  1053. IF_ERR(printk(" cause: packet CRC error\n");)
  1054. }
  1055. else if (status & RX_PTE) {
  1056. IF_ERR(printk(" cause: packet time out\n");)
  1057. }
  1058. else {
  1059. IF_ERR(printk(" cause: buffer overflow\n");)
  1060. }
  1061. goto out_free_desc;
  1062. }
  1063. /*
  1064. build DLE.
  1065. */
  1066. buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
  1067. dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
  1068. len = dma_addr - buf_addr;
  1069. if (len > iadev->rx_buf_sz) {
  1070. printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
  1071. atomic_inc(&vcc->stats->rx_err);
  1072. goto out_free_desc;
  1073. }
  1074. if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
  1075. if (vcc->vci < 32)
  1076. printk("Drop control packets\n");
  1077. goto out_free_desc;
  1078. }
  1079. skb_put(skb,len);
  1080. // pwang_test
  1081. ATM_SKB(skb)->vcc = vcc;
  1082. ATM_DESC(skb) = desc;
  1083. skb_queue_tail(&iadev->rx_dma_q, skb);
  1084. /* Build the DLE structure */
  1085. wr_ptr = iadev->rx_dle_q.write;
  1086. wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
  1087. len, DMA_FROM_DEVICE);
  1088. wr_ptr->local_pkt_addr = buf_addr;
  1089. wr_ptr->bytes = len; /* We don't know this do we ?? */
  1090. wr_ptr->mode = DMA_INT_ENABLE;
  1091. /* shud take care of wrap around here too. */
  1092. if(++wr_ptr == iadev->rx_dle_q.end)
  1093. wr_ptr = iadev->rx_dle_q.start;
  1094. iadev->rx_dle_q.write = wr_ptr;
  1095. udelay(1);
  1096. /* Increment transaction counter */
  1097. writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
  1098. out: return 0;
  1099. out_free_desc:
  1100. free_desc(dev, desc);
  1101. goto out;
  1102. }
  1103. static void rx_intr(struct atm_dev *dev)
  1104. {
  1105. IADEV *iadev;
  1106. u_short status;
  1107. u_short state, i;
  1108. iadev = INPH_IA_DEV(dev);
  1109. status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
  1110. IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
  1111. if (status & RX_PKT_RCVD)
  1112. {
  1113. /* do something */
  1114. /* Basically recvd an interrupt for receiving a packet.
  1115. A descriptor would have been written to the packet complete
  1116. queue. Get all the descriptors and set up dma to move the
  1117. packets till the packet complete queue is empty..
  1118. */
  1119. state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
  1120. IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
  1121. while(!(state & PCQ_EMPTY))
  1122. {
  1123. rx_pkt(dev);
  1124. state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
  1125. }
  1126. iadev->rxing = 1;
  1127. }
  1128. if (status & RX_FREEQ_EMPT)
  1129. {
  1130. if (iadev->rxing) {
  1131. iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
  1132. iadev->rx_tmp_jif = jiffies;
  1133. iadev->rxing = 0;
  1134. }
  1135. else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
  1136. ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
  1137. for (i = 1; i <= iadev->num_rx_desc; i++)
  1138. free_desc(dev, i);
  1139. printk("Test logic RUN!!!!\n");
  1140. writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
  1141. iadev->rxing = 1;
  1142. }
  1143. IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
  1144. }
  1145. if (status & RX_EXCP_RCVD)
  1146. {
  1147. /* probably need to handle the exception queue also. */
  1148. IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
  1149. rx_excp_rcvd(dev);
  1150. }
  1151. if (status & RX_RAW_RCVD)
  1152. {
  1153. /* need to handle the raw incoming cells. This deepnds on
  1154. whether we have programmed to receive the raw cells or not.
  1155. Else ignore. */
  1156. IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
  1157. }
  1158. }
  1159. static void rx_dle_intr(struct atm_dev *dev)
  1160. {
  1161. IADEV *iadev;
  1162. struct atm_vcc *vcc;
  1163. struct sk_buff *skb;
  1164. int desc;
  1165. u_short state;
  1166. struct dle *dle, *cur_dle;
  1167. u_int dle_lp;
  1168. int len;
  1169. iadev = INPH_IA_DEV(dev);
  1170. /* free all the dles done, that is just update our own dle read pointer
  1171. - do we really need to do this. Think not. */
  1172. /* DMA is done, just get all the recevie buffers from the rx dma queue
  1173. and push them up to the higher layer protocol. Also free the desc
  1174. associated with the buffer. */
  1175. dle = iadev->rx_dle_q.read;
  1176. dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
  1177. cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
  1178. while(dle != cur_dle)
  1179. {
  1180. /* free the DMAed skb */
  1181. skb = skb_dequeue(&iadev->rx_dma_q);
  1182. if (!skb)
  1183. goto INCR_DLE;
  1184. desc = ATM_DESC(skb);
  1185. free_desc(dev, desc);
  1186. if (!(len = skb->len))
  1187. {
  1188. printk("rx_dle_intr: skb len 0\n");
  1189. dev_kfree_skb_any(skb);
  1190. }
  1191. else
  1192. {
  1193. struct cpcs_trailer *trailer;
  1194. u_short length;
  1195. struct ia_vcc *ia_vcc;
  1196. dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
  1197. len, DMA_FROM_DEVICE);
  1198. /* no VCC related housekeeping done as yet. lets see */
  1199. vcc = ATM_SKB(skb)->vcc;
  1200. if (!vcc) {
  1201. printk("IA: null vcc\n");
  1202. dev_kfree_skb_any(skb);
  1203. goto INCR_DLE;
  1204. }
  1205. ia_vcc = INPH_IA_VCC(vcc);
  1206. if (ia_vcc == NULL)
  1207. {
  1208. atomic_inc(&vcc->stats->rx_err);
  1209. atm_return(vcc, skb->truesize);
  1210. dev_kfree_skb_any(skb);
  1211. goto INCR_DLE;
  1212. }
  1213. // get real pkt length pwang_test
  1214. trailer = (struct cpcs_trailer*)((u_char *)skb->data +
  1215. skb->len - sizeof(*trailer));
  1216. length = swap_byte_order(trailer->length);
  1217. if ((length > iadev->rx_buf_sz) || (length >
  1218. (skb->len - sizeof(struct cpcs_trailer))))
  1219. {
  1220. atomic_inc(&vcc->stats->rx_err);
  1221. IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
  1222. length, skb->len);)
  1223. atm_return(vcc, skb->truesize);
  1224. dev_kfree_skb_any(skb);
  1225. goto INCR_DLE;
  1226. }
  1227. skb_trim(skb, length);
  1228. /* Display the packet */
  1229. IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
  1230. xdump(skb->data, skb->len, "RX: ");
  1231. printk("\n");)
  1232. IF_RX(printk("rx_dle_intr: skb push");)
  1233. vcc->push(vcc,skb);
  1234. atomic_inc(&vcc->stats->rx);
  1235. iadev->rx_pkt_cnt++;
  1236. }
  1237. INCR_DLE:
  1238. if (++dle == iadev->rx_dle_q.end)
  1239. dle = iadev->rx_dle_q.start;
  1240. }
  1241. iadev->rx_dle_q.read = dle;
  1242. /* if the interrupts are masked because there were no free desc available,
  1243. unmask them now. */
  1244. if (!iadev->rxing) {
  1245. state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
  1246. if (!(state & FREEQ_EMPTY)) {
  1247. state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
  1248. writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
  1249. iadev->reass_reg+REASS_MASK_REG);
  1250. iadev->rxing++;
  1251. }
  1252. }
  1253. }
  1254. static int open_rx(struct atm_vcc *vcc)
  1255. {
  1256. IADEV *iadev;
  1257. u_short __iomem *vc_table;
  1258. u_short __iomem *reass_ptr;
  1259. IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
  1260. if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
  1261. iadev = INPH_IA_DEV(vcc->dev);
  1262. if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
  1263. if (iadev->phy_type & FE_25MBIT_PHY) {
  1264. printk("IA: ABR not support\n");
  1265. return -EINVAL;
  1266. }
  1267. }
  1268. /* Make only this VCI in the vc table valid and let all
  1269. others be invalid entries */
  1270. vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
  1271. vc_table += vcc->vci;
  1272. /* mask the last 6 bits and OR it with 3 for 1K VCs */
  1273. *vc_table = vcc->vci << 6;
  1274. /* Also keep a list of open rx vcs so that we can attach them with
  1275. incoming PDUs later. */
  1276. if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
  1277. (vcc->qos.txtp.traffic_class == ATM_ABR))
  1278. {
  1279. srv_cls_param_t srv_p;
  1280. init_abr_vc(iadev, &srv_p);
  1281. ia_open_abr_vc(iadev, &srv_p, vcc, 0);
  1282. }
  1283. else { /* for UBR later may need to add CBR logic */
  1284. reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
  1285. reass_ptr += vcc->vci;
  1286. *reass_ptr = NO_AAL5_PKT;
  1287. }
  1288. if (iadev->rx_open[vcc->vci])
  1289. printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
  1290. vcc->dev->number, vcc->vci);
  1291. iadev->rx_open[vcc->vci] = vcc;
  1292. return 0;
  1293. }
  1294. static int rx_init(struct atm_dev *dev)
  1295. {
  1296. IADEV *iadev;
  1297. struct rx_buf_desc __iomem *buf_desc_ptr;
  1298. unsigned long rx_pkt_start = 0;
  1299. void *dle_addr;
  1300. struct abr_vc_table *abr_vc_table;
  1301. u16 *vc_table;
  1302. u16 *reass_table;
  1303. int i,j, vcsize_sel;
  1304. u_short freeq_st_adr;
  1305. u_short *freeq_start;
  1306. iadev = INPH_IA_DEV(dev);
  1307. // spin_lock_init(&iadev->rx_lock);
  1308. /* Allocate 4k bytes - more aligned than needed (4k boundary) */
  1309. dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
  1310. &iadev->rx_dle_dma, GFP_KERNEL);
  1311. if (!dle_addr) {
  1312. printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
  1313. goto err_out;
  1314. }
  1315. iadev->rx_dle_q.start = (struct dle *)dle_addr;
  1316. iadev->rx_dle_q.read = iadev->rx_dle_q.start;
  1317. iadev->rx_dle_q.write = iadev->rx_dle_q.start;
  1318. iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
  1319. /* the end of the dle q points to the entry after the last
  1320. DLE that can be used. */
  1321. /* write the upper 20 bits of the start address to rx list address register */
  1322. /* We know this is 32bit bus addressed so the following is safe */
  1323. writel(iadev->rx_dle_dma & 0xfffff000,
  1324. iadev->dma + IPHASE5575_RX_LIST_ADDR);
  1325. IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
  1326. iadev->dma+IPHASE5575_TX_LIST_ADDR,
  1327. readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
  1328. printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
  1329. iadev->dma+IPHASE5575_RX_LIST_ADDR,
  1330. readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
  1331. writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
  1332. writew(0, iadev->reass_reg+MODE_REG);
  1333. writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
  1334. /* Receive side control memory map
  1335. -------------------------------
  1336. Buffer descr 0x0000 (736 - 23K)
  1337. VP Table 0x5c00 (256 - 512)
  1338. Except q 0x5e00 (128 - 512)
  1339. Free buffer q 0x6000 (1K - 2K)
  1340. Packet comp q 0x6800 (1K - 2K)
  1341. Reass Table 0x7000 (1K - 2K)
  1342. VC Table 0x7800 (1K - 2K)
  1343. ABR VC Table 0x8000 (1K - 32K)
  1344. */
  1345. /* Base address for Buffer Descriptor Table */
  1346. writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
  1347. /* Set the buffer size register */
  1348. writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
  1349. /* Initialize each entry in the Buffer Descriptor Table */
  1350. iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
  1351. buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
  1352. memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
  1353. buf_desc_ptr++;
  1354. rx_pkt_start = iadev->rx_pkt_ram;
  1355. for(i=1; i<=iadev->num_rx_desc; i++)
  1356. {
  1357. memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
  1358. buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
  1359. buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
  1360. buf_desc_ptr++;
  1361. rx_pkt_start += iadev->rx_buf_sz;
  1362. }
  1363. IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
  1364. i = FREE_BUF_DESC_Q*iadev->memSize;
  1365. writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
  1366. writew(i, iadev->reass_reg+FREEQ_ST_ADR);
  1367. writew(i+iadev->num_rx_desc*sizeof(u_short),
  1368. iadev->reass_reg+FREEQ_ED_ADR);
  1369. writew(i, iadev->reass_reg+FREEQ_RD_PTR);
  1370. writew(i+iadev->num_rx_desc*sizeof(u_short),
  1371. iadev->reass_reg+FREEQ_WR_PTR);
  1372. /* Fill the FREEQ with all the free descriptors. */
  1373. freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
  1374. freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
  1375. for(i=1; i<=iadev->num_rx_desc; i++)
  1376. {
  1377. *freeq_start = (u_short)i;
  1378. freeq_start++;
  1379. }
  1380. IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
  1381. /* Packet Complete Queue */
  1382. i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
  1383. writew(i, iadev->reass_reg+PCQ_ST_ADR);
  1384. writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
  1385. writew(i, iadev->reass_reg+PCQ_RD_PTR);
  1386. writew(i, iadev->reass_reg+PCQ_WR_PTR);
  1387. /* Exception Queue */
  1388. i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
  1389. writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
  1390. writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
  1391. iadev->reass_reg+EXCP_Q_ED_ADR);
  1392. writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
  1393. writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
  1394. /* Load local copy of FREEQ and PCQ ptrs */
  1395. iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
  1396. iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
  1397. iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
  1398. iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
  1399. iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
  1400. iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
  1401. iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
  1402. iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
  1403. IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
  1404. iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
  1405. iadev->rfL.pcq_wr);)
  1406. /* just for check - no VP TBL */
  1407. /* VP Table */
  1408. /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
  1409. /* initialize VP Table for invalid VPIs
  1410. - I guess we can write all 1s or 0x000f in the entire memory
  1411. space or something similar.
  1412. */
  1413. /* This seems to work and looks right to me too !!! */
  1414. i = REASS_TABLE * iadev->memSize;
  1415. writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
  1416. /* initialize Reassembly table to I don't know what ???? */
  1417. reass_table = (u16 *)(iadev->reass_ram+i);
  1418. j = REASS_TABLE_SZ * iadev->memSize;
  1419. for(i=0; i < j; i++)
  1420. *reass_table++ = NO_AAL5_PKT;
  1421. i = 8*1024;
  1422. vcsize_sel = 0;
  1423. while (i != iadev->num_vc) {
  1424. i /= 2;
  1425. vcsize_sel++;
  1426. }
  1427. i = RX_VC_TABLE * iadev->memSize;
  1428. writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
  1429. vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
  1430. j = RX_VC_TABLE_SZ * iadev->memSize;
  1431. for(i = 0; i < j; i++)
  1432. {
  1433. /* shift the reassembly pointer by 3 + lower 3 bits of
  1434. vc_lkup_base register (=3 for 1K VCs) and the last byte
  1435. is those low 3 bits.
  1436. Shall program this later.
  1437. */
  1438. *vc_table = (i << 6) | 15; /* for invalid VCI */
  1439. vc_table++;
  1440. }
  1441. /* ABR VC table */
  1442. i = ABR_VC_TABLE * iadev->memSize;
  1443. writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
  1444. i = ABR_VC_TABLE * iadev->memSize;
  1445. abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
  1446. j = REASS_TABLE_SZ * iadev->memSize;
  1447. memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
  1448. for(i = 0; i < j; i++) {
  1449. abr_vc_table->rdf = 0x0003;
  1450. abr_vc_table->air = 0x5eb1;
  1451. abr_vc_table++;
  1452. }
  1453. /* Initialize other registers */
  1454. /* VP Filter Register set for VC Reassembly only */
  1455. writew(0xff00, iadev->reass_reg+VP_FILTER);
  1456. writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
  1457. writew(0x1, iadev->reass_reg+PROTOCOL_ID);
  1458. /* Packet Timeout Count related Registers :
  1459. Set packet timeout to occur in about 3 seconds
  1460. Set Packet Aging Interval count register to overflow in about 4 us
  1461. */
  1462. writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
  1463. i = (j >> 6) & 0xFF;
  1464. j += 2 * (j - 1);
  1465. i |= ((j << 2) & 0xFF00);
  1466. writew(i, iadev->reass_reg+TMOUT_RANGE);
  1467. /* initiate the desc_tble */
  1468. for(i=0; i<iadev->num_tx_desc;i++)
  1469. iadev->desc_tbl[i].timestamp = 0;
  1470. /* to clear the interrupt status register - read it */
  1471. readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
  1472. /* Mask Register - clear it */
  1473. writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
  1474. skb_queue_head_init(&iadev->rx_dma_q);
  1475. iadev->rx_free_desc_qhead = NULL;
  1476. iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
  1477. if (!iadev->rx_open) {
  1478. printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
  1479. dev->number);
  1480. goto err_free_dle;
  1481. }
  1482. iadev->rxing = 1;
  1483. iadev->rx_pkt_cnt = 0;
  1484. /* Mode Register */
  1485. writew(R_ONLINE, iadev->reass_reg+MODE_REG);
  1486. return 0;
  1487. err_free_dle:
  1488. dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
  1489. iadev->rx_dle_dma);
  1490. err_out:
  1491. return -ENOMEM;
  1492. }
  1493. /*
  1494. The memory map suggested in appendix A and the coding for it.
  1495. Keeping it around just in case we change our mind later.
  1496. Buffer descr 0x0000 (128 - 4K)
  1497. UBR sched 0x1000 (1K - 4K)
  1498. UBR Wait q 0x2000 (1K - 4K)
  1499. Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
  1500. (128 - 256) each
  1501. extended VC 0x4000 (1K - 8K)
  1502. ABR sched 0x6000 and ABR wait queue (1K - 2K) each
  1503. CBR sched 0x7000 (as needed)
  1504. VC table 0x8000 (1K - 32K)
  1505. */
  1506. static void tx_intr(struct atm_dev *dev)
  1507. {
  1508. IADEV *iadev;
  1509. unsigned short status;
  1510. unsigned long flags;
  1511. iadev = INPH_IA_DEV(dev);
  1512. status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
  1513. if (status & TRANSMIT_DONE){
  1514. IF_EVENT(printk("Transmit Done Intr logic run\n");)
  1515. spin_lock_irqsave(&iadev->tx_lock, flags);
  1516. ia_tx_poll(iadev);
  1517. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  1518. writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
  1519. if (iadev->close_pending)
  1520. wake_up(&iadev->close_wait);
  1521. }
  1522. if (status & TCQ_NOT_EMPTY)
  1523. {
  1524. IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
  1525. }
  1526. }
  1527. static void tx_dle_intr(struct atm_dev *dev)
  1528. {
  1529. IADEV *iadev;
  1530. struct dle *dle, *cur_dle;
  1531. struct sk_buff *skb;
  1532. struct atm_vcc *vcc;
  1533. struct ia_vcc *iavcc;
  1534. u_int dle_lp;
  1535. unsigned long flags;
  1536. iadev = INPH_IA_DEV(dev);
  1537. spin_lock_irqsave(&iadev->tx_lock, flags);
  1538. dle = iadev->tx_dle_q.read;
  1539. dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
  1540. (sizeof(struct dle)*DLE_ENTRIES - 1);
  1541. cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
  1542. while (dle != cur_dle)
  1543. {
  1544. /* free the DMAed skb */
  1545. skb = skb_dequeue(&iadev->tx_dma_q);
  1546. if (!skb) break;
  1547. /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
  1548. if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
  1549. dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
  1550. DMA_TO_DEVICE);
  1551. }
  1552. vcc = ATM_SKB(skb)->vcc;
  1553. if (!vcc) {
  1554. printk("tx_dle_intr: vcc is null\n");
  1555. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  1556. dev_kfree_skb_any(skb);
  1557. return;
  1558. }
  1559. iavcc = INPH_IA_VCC(vcc);
  1560. if (!iavcc) {
  1561. printk("tx_dle_intr: iavcc is null\n");
  1562. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  1563. dev_kfree_skb_any(skb);
  1564. return;
  1565. }
  1566. if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
  1567. if ((vcc->pop) && (skb->len != 0))
  1568. {
  1569. vcc->pop(vcc, skb);
  1570. }
  1571. else {
  1572. dev_kfree_skb_any(skb);
  1573. }
  1574. }
  1575. else { /* Hold the rate-limited skb for flow control */
  1576. IA_SKB_STATE(skb) |= IA_DLED;
  1577. skb_queue_tail(&iavcc->txing_skb, skb);
  1578. }
  1579. IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
  1580. if (++dle == iadev->tx_dle_q.end)
  1581. dle = iadev->tx_dle_q.start;
  1582. }
  1583. iadev->tx_dle_q.read = dle;
  1584. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  1585. }
  1586. static int open_tx(struct atm_vcc *vcc)
  1587. {
  1588. struct ia_vcc *ia_vcc;
  1589. IADEV *iadev;
  1590. struct main_vc *vc;
  1591. struct ext_vc *evc;
  1592. int ret;
  1593. IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
  1594. if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
  1595. iadev = INPH_IA_DEV(vcc->dev);
  1596. if (iadev->phy_type & FE_25MBIT_PHY) {
  1597. if (vcc->qos.txtp.traffic_class == ATM_ABR) {
  1598. printk("IA: ABR not support\n");
  1599. return -EINVAL;
  1600. }
  1601. if (vcc->qos.txtp.traffic_class == ATM_CBR) {
  1602. printk("IA: CBR not support\n");
  1603. return -EINVAL;
  1604. }
  1605. }
  1606. ia_vcc = INPH_IA_VCC(vcc);
  1607. memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
  1608. if (vcc->qos.txtp.max_sdu >
  1609. (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
  1610. printk("IA: SDU size over (%d) the configured SDU size %d\n",
  1611. vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
  1612. vcc->dev_data = NULL;
  1613. kfree(ia_vcc);
  1614. return -EINVAL;
  1615. }
  1616. ia_vcc->vc_desc_cnt = 0;
  1617. ia_vcc->txing = 1;
  1618. /* find pcr */
  1619. if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
  1620. vcc->qos.txtp.pcr = iadev->LineRate;
  1621. else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
  1622. vcc->qos.txtp.pcr = iadev->LineRate;
  1623. else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
  1624. vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
  1625. if (vcc->qos.txtp.pcr > iadev->LineRate)
  1626. vcc->qos.txtp.pcr = iadev->LineRate;
  1627. ia_vcc->pcr = vcc->qos.txtp.pcr;
  1628. if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
  1629. else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
  1630. else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
  1631. else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
  1632. if (ia_vcc->pcr < iadev->rate_limit)
  1633. skb_queue_head_init (&ia_vcc->txing_skb);
  1634. if (ia_vcc->pcr < iadev->rate_limit) {
  1635. struct sock *sk = sk_atm(vcc);
  1636. if (vcc->qos.txtp.max_sdu != 0) {
  1637. if (ia_vcc->pcr > 60000)
  1638. sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
  1639. else if (ia_vcc->pcr > 2000)
  1640. sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
  1641. else
  1642. sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
  1643. }
  1644. else
  1645. sk->sk_sndbuf = 24576;
  1646. }
  1647. vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
  1648. evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
  1649. vc += vcc->vci;
  1650. evc += vcc->vci;
  1651. memset((caddr_t)vc, 0, sizeof(*vc));
  1652. memset((caddr_t)evc, 0, sizeof(*evc));
  1653. /* store the most significant 4 bits of vci as the last 4 bits
  1654. of first part of atm header.
  1655. store the last 12 bits of vci as first 12 bits of the second
  1656. part of the atm header.
  1657. */
  1658. evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
  1659. evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
  1660. /* check the following for different traffic classes */
  1661. if (vcc->qos.txtp.traffic_class == ATM_UBR)
  1662. {
  1663. vc->type = UBR;
  1664. vc->status = CRC_APPEND;
  1665. vc->acr = cellrate_to_float(iadev->LineRate);
  1666. if (vcc->qos.txtp.pcr > 0)
  1667. vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
  1668. IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
  1669. vcc->qos.txtp.max_pcr,vc->acr);)
  1670. }
  1671. else if (vcc->qos.txtp.traffic_class == ATM_ABR)
  1672. { srv_cls_param_t srv_p;
  1673. IF_ABR(printk("Tx ABR VCC\n");)
  1674. init_abr_vc(iadev, &srv_p);
  1675. if (vcc->qos.txtp.pcr > 0)
  1676. srv_p.pcr = vcc->qos.txtp.pcr;
  1677. if (vcc->qos.txtp.min_pcr > 0) {
  1678. int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
  1679. if (tmpsum > iadev->LineRate)
  1680. return -EBUSY;
  1681. srv_p.mcr = vcc->qos.txtp.min_pcr;
  1682. iadev->sum_mcr += vcc->qos.txtp.min_pcr;
  1683. }
  1684. else srv_p.mcr = 0;
  1685. if (vcc->qos.txtp.icr)
  1686. srv_p.icr = vcc->qos.txtp.icr;
  1687. if (vcc->qos.txtp.tbe)
  1688. srv_p.tbe = vcc->qos.txtp.tbe;
  1689. if (vcc->qos.txtp.frtt)
  1690. srv_p.frtt = vcc->qos.txtp.frtt;
  1691. if (vcc->qos.txtp.rif)
  1692. srv_p.rif = vcc->qos.txtp.rif;
  1693. if (vcc->qos.txtp.rdf)
  1694. srv_p.rdf = vcc->qos.txtp.rdf;
  1695. if (vcc->qos.txtp.nrm_pres)
  1696. srv_p.nrm = vcc->qos.txtp.nrm;
  1697. if (vcc->qos.txtp.trm_pres)
  1698. srv_p.trm = vcc->qos.txtp.trm;
  1699. if (vcc->qos.txtp.adtf_pres)
  1700. srv_p.adtf = vcc->qos.txtp.adtf;
  1701. if (vcc->qos.txtp.cdf_pres)
  1702. srv_p.cdf = vcc->qos.txtp.cdf;
  1703. if (srv_p.icr > srv_p.pcr)
  1704. srv_p.icr = srv_p.pcr;
  1705. IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
  1706. srv_p.pcr, srv_p.mcr);)
  1707. ia_open_abr_vc(iadev, &srv_p, vcc, 1);
  1708. } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
  1709. if (iadev->phy_type & FE_25MBIT_PHY) {
  1710. printk("IA: CBR not support\n");
  1711. return -EINVAL;
  1712. }
  1713. if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
  1714. IF_CBR(printk("PCR is not available\n");)
  1715. return -1;
  1716. }
  1717. vc->type = CBR;
  1718. vc->status = CRC_APPEND;
  1719. if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
  1720. return ret;
  1721. }
  1722. } else {
  1723. printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
  1724. }
  1725. iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
  1726. IF_EVENT(printk("ia open_tx returning \n");)
  1727. return 0;
  1728. }
  1729. static int tx_init(struct atm_dev *dev)
  1730. {
  1731. IADEV *iadev;
  1732. struct tx_buf_desc *buf_desc_ptr;
  1733. unsigned int tx_pkt_start;
  1734. void *dle_addr;
  1735. int i;
  1736. u_short tcq_st_adr;
  1737. u_short *tcq_start;
  1738. u_short prq_st_adr;
  1739. u_short *prq_start;
  1740. struct main_vc *vc;
  1741. struct ext_vc *evc;
  1742. u_short tmp16;
  1743. u32 vcsize_sel;
  1744. iadev = INPH_IA_DEV(dev);
  1745. spin_lock_init(&iadev->tx_lock);
  1746. IF_INIT(printk("Tx MASK REG: 0x%0x\n",
  1747. readw(iadev->seg_reg+SEG_MASK_REG));)
  1748. /* Allocate 4k (boundary aligned) bytes */
  1749. dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
  1750. &iadev->tx_dle_dma, GFP_KERNEL);
  1751. if (!dle_addr) {
  1752. printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
  1753. goto err_out;
  1754. }
  1755. iadev->tx_dle_q.start = (struct dle*)dle_addr;
  1756. iadev->tx_dle_q.read = iadev->tx_dle_q.start;
  1757. iadev->tx_dle_q.write = iadev->tx_dle_q.start;
  1758. iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
  1759. /* write the upper 20 bits of the start address to tx list address register */
  1760. writel(iadev->tx_dle_dma & 0xfffff000,
  1761. iadev->dma + IPHASE5575_TX_LIST_ADDR);
  1762. writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
  1763. writew(0, iadev->seg_reg+MODE_REG_0);
  1764. writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
  1765. iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
  1766. iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
  1767. iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
  1768. /*
  1769. Transmit side control memory map
  1770. --------------------------------
  1771. Buffer descr 0x0000 (128 - 4K)
  1772. Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
  1773. (512 - 1K) each
  1774. TCQ - 4K, PRQ - 5K
  1775. CBR Table 0x1800 (as needed) - 6K
  1776. UBR Table 0x3000 (1K - 4K) - 12K
  1777. UBR Wait queue 0x4000 (1K - 4K) - 16K
  1778. ABR sched 0x5000 and ABR wait queue (1K - 2K) each
  1779. ABR Tbl - 20K, ABR Wq - 22K
  1780. extended VC 0x6000 (1K - 8K) - 24K
  1781. VC Table 0x8000 (1K - 32K) - 32K
  1782. Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
  1783. and Wait q, which can be allotted later.
  1784. */
  1785. /* Buffer Descriptor Table Base address */
  1786. writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
  1787. /* initialize each entry in the buffer descriptor table */
  1788. buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
  1789. memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
  1790. buf_desc_ptr++;
  1791. tx_pkt_start = TX_PACKET_RAM;
  1792. for(i=1; i<=iadev->num_tx_desc; i++)
  1793. {
  1794. memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
  1795. buf_desc_ptr->desc_mode = AAL5;
  1796. buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
  1797. buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
  1798. buf_desc_ptr++;
  1799. tx_pkt_start += iadev->tx_buf_sz;
  1800. }
  1801. iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
  1802. sizeof(*iadev->tx_buf),
  1803. GFP_KERNEL);
  1804. if (!iadev->tx_buf) {
  1805. printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
  1806. goto err_free_dle;
  1807. }
  1808. for (i= 0; i< iadev->num_tx_desc; i++)
  1809. {
  1810. struct cpcs_trailer *cpcs;
  1811. cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
  1812. if(!cpcs) {
  1813. printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
  1814. goto err_free_tx_bufs;
  1815. }
  1816. iadev->tx_buf[i].cpcs = cpcs;
  1817. iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
  1818. cpcs,
  1819. sizeof(*cpcs),
  1820. DMA_TO_DEVICE);
  1821. }
  1822. iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
  1823. sizeof(*iadev->desc_tbl),
  1824. GFP_KERNEL);
  1825. if (!iadev->desc_tbl) {
  1826. printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
  1827. goto err_free_all_tx_bufs;
  1828. }
  1829. /* Communication Queues base address */
  1830. i = TX_COMP_Q * iadev->memSize;
  1831. writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
  1832. /* Transmit Complete Queue */
  1833. writew(i, iadev->seg_reg+TCQ_ST_ADR);
  1834. writew(i, iadev->seg_reg+TCQ_RD_PTR);
  1835. writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
  1836. iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
  1837. writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
  1838. iadev->seg_reg+TCQ_ED_ADR);
  1839. /* Fill the TCQ with all the free descriptors. */
  1840. tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
  1841. tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
  1842. for(i=1; i<=iadev->num_tx_desc; i++)
  1843. {
  1844. *tcq_start = (u_short)i;
  1845. tcq_start++;
  1846. }
  1847. /* Packet Ready Queue */
  1848. i = PKT_RDY_Q * iadev->memSize;
  1849. writew(i, iadev->seg_reg+PRQ_ST_ADR);
  1850. writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
  1851. iadev->seg_reg+PRQ_ED_ADR);
  1852. writew(i, iadev->seg_reg+PRQ_RD_PTR);
  1853. writew(i, iadev->seg_reg+PRQ_WR_PTR);
  1854. /* Load local copy of PRQ and TCQ ptrs */
  1855. iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
  1856. iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
  1857. iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
  1858. iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
  1859. iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
  1860. iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
  1861. /* Just for safety initializing the queue to have desc 1 always */
  1862. /* Fill the PRQ with all the free descriptors. */
  1863. prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
  1864. prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
  1865. for(i=1; i<=iadev->num_tx_desc; i++)
  1866. {
  1867. *prq_start = (u_short)0; /* desc 1 in all entries */
  1868. prq_start++;
  1869. }
  1870. /* CBR Table */
  1871. IF_INIT(printk("Start CBR Init\n");)
  1872. #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
  1873. writew(0,iadev->seg_reg+CBR_PTR_BASE);
  1874. #else /* Charlie's logic is wrong ? */
  1875. tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
  1876. IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
  1877. writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
  1878. #endif
  1879. IF_INIT(printk("value in register = 0x%x\n",
  1880. readw(iadev->seg_reg+CBR_PTR_BASE));)
  1881. tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
  1882. writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
  1883. IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
  1884. readw(iadev->seg_reg+CBR_TAB_BEG));)
  1885. writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
  1886. tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
  1887. writew(tmp16, iadev->seg_reg+CBR_TAB_END);
  1888. IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
  1889. iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
  1890. IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
  1891. readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
  1892. readw(iadev->seg_reg+CBR_TAB_END+1));)
  1893. /* Initialize the CBR Schedualing Table */
  1894. memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
  1895. 0, iadev->num_vc*6);
  1896. iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
  1897. iadev->CbrEntryPt = 0;
  1898. iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
  1899. iadev->NumEnabledCBR = 0;
  1900. /* UBR scheduling Table and wait queue */
  1901. /* initialize all bytes of UBR scheduler table and wait queue to 0
  1902. - SCHEDSZ is 1K (# of entries).
  1903. - UBR Table size is 4K
  1904. - UBR wait queue is 4K
  1905. since the table and wait queues are contiguous, all the bytes
  1906. can be initialized by one memeset.
  1907. */
  1908. vcsize_sel = 0;
  1909. i = 8*1024;
  1910. while (i != iadev->num_vc) {
  1911. i /= 2;
  1912. vcsize_sel++;
  1913. }
  1914. i = MAIN_VC_TABLE * iadev->memSize;
  1915. writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
  1916. i = EXT_VC_TABLE * iadev->memSize;
  1917. writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
  1918. i = UBR_SCHED_TABLE * iadev->memSize;
  1919. writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
  1920. i = UBR_WAIT_Q * iadev->memSize;
  1921. writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
  1922. memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
  1923. 0, iadev->num_vc*8);
  1924. /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
  1925. /* initialize all bytes of ABR scheduler table and wait queue to 0
  1926. - SCHEDSZ is 1K (# of entries).
  1927. - ABR Table size is 2K
  1928. - ABR wait queue is 2K
  1929. since the table and wait queues are contiguous, all the bytes
  1930. can be initialized by one memeset.
  1931. */
  1932. i = ABR_SCHED_TABLE * iadev->memSize;
  1933. writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
  1934. i = ABR_WAIT_Q * iadev->memSize;
  1935. writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
  1936. i = ABR_SCHED_TABLE*iadev->memSize;
  1937. memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
  1938. vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
  1939. evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
  1940. iadev->testTable = kmalloc_array(iadev->num_vc,
  1941. sizeof(*iadev->testTable),
  1942. GFP_KERNEL);
  1943. if (!iadev->testTable) {
  1944. printk("Get freepage failed\n");
  1945. goto err_free_desc_tbl;
  1946. }
  1947. for(i=0; i<iadev->num_vc; i++)
  1948. {
  1949. memset((caddr_t)vc, 0, sizeof(*vc));
  1950. memset((caddr_t)evc, 0, sizeof(*evc));
  1951. iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
  1952. GFP_KERNEL);
  1953. if (!iadev->testTable[i])
  1954. goto err_free_test_tables;
  1955. iadev->testTable[i]->lastTime = 0;
  1956. iadev->testTable[i]->fract = 0;
  1957. iadev->testTable[i]->vc_status = VC_UBR;
  1958. vc++;
  1959. evc++;
  1960. }
  1961. /* Other Initialization */
  1962. /* Max Rate Register */
  1963. if (iadev->phy_type & FE_25MBIT_PHY) {
  1964. writew(RATE25, iadev->seg_reg+MAXRATE);
  1965. writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
  1966. }
  1967. else {
  1968. writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
  1969. writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
  1970. }
  1971. /* Set Idle Header Reigisters to be sure */
  1972. writew(0, iadev->seg_reg+IDLEHEADHI);
  1973. writew(0, iadev->seg_reg+IDLEHEADLO);
  1974. /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
  1975. writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
  1976. iadev->close_pending = 0;
  1977. init_waitqueue_head(&iadev->close_wait);
  1978. init_waitqueue_head(&iadev->timeout_wait);
  1979. skb_queue_head_init(&iadev->tx_dma_q);
  1980. ia_init_rtn_q(&iadev->tx_return_q);
  1981. /* RM Cell Protocol ID and Message Type */
  1982. writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
  1983. skb_queue_head_init (&iadev->tx_backlog);
  1984. /* Mode Register 1 */
  1985. writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
  1986. /* Mode Register 0 */
  1987. writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
  1988. /* Interrupt Status Register - read to clear */
  1989. readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
  1990. /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
  1991. writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
  1992. writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
  1993. iadev->tx_pkt_cnt = 0;
  1994. iadev->rate_limit = iadev->LineRate / 3;
  1995. return 0;
  1996. err_free_test_tables:
  1997. while (--i >= 0)
  1998. kfree(iadev->testTable[i]);
  1999. kfree(iadev->testTable);
  2000. err_free_desc_tbl:
  2001. kfree(iadev->desc_tbl);
  2002. err_free_all_tx_bufs:
  2003. i = iadev->num_tx_desc;
  2004. err_free_tx_bufs:
  2005. while (--i >= 0) {
  2006. struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
  2007. dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
  2008. sizeof(*desc->cpcs), DMA_TO_DEVICE);
  2009. kfree(desc->cpcs);
  2010. }
  2011. kfree(iadev->tx_buf);
  2012. err_free_dle:
  2013. dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
  2014. iadev->tx_dle_dma);
  2015. err_out:
  2016. return -ENOMEM;
  2017. }
  2018. static irqreturn_t ia_int(int irq, void *dev_id)
  2019. {
  2020. struct atm_dev *dev;
  2021. IADEV *iadev;
  2022. unsigned int status;
  2023. int handled = 0;
  2024. dev = dev_id;
  2025. iadev = INPH_IA_DEV(dev);
  2026. while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
  2027. {
  2028. handled = 1;
  2029. IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
  2030. if (status & STAT_REASSINT)
  2031. {
  2032. /* do something */
  2033. IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
  2034. rx_intr(dev);
  2035. }
  2036. if (status & STAT_DLERINT)
  2037. {
  2038. /* Clear this bit by writing a 1 to it. */
  2039. writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
  2040. rx_dle_intr(dev);
  2041. }
  2042. if (status & STAT_SEGINT)
  2043. {
  2044. /* do something */
  2045. IF_EVENT(printk("IA: tx_intr \n");)
  2046. tx_intr(dev);
  2047. }
  2048. if (status & STAT_DLETINT)
  2049. {
  2050. writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
  2051. tx_dle_intr(dev);
  2052. }
  2053. if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
  2054. {
  2055. if (status & STAT_FEINT)
  2056. ia_frontend_intr(iadev);
  2057. }
  2058. }
  2059. return IRQ_RETVAL(handled);
  2060. }
  2061. /*----------------------------- entries --------------------------------*/
  2062. static int get_esi(struct atm_dev *dev)
  2063. {
  2064. IADEV *iadev;
  2065. int i;
  2066. u32 mac1;
  2067. u16 mac2;
  2068. iadev = INPH_IA_DEV(dev);
  2069. mac1 = cpu_to_be32(le32_to_cpu(readl(
  2070. iadev->reg+IPHASE5575_MAC1)));
  2071. mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
  2072. IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
  2073. for (i=0; i<MAC1_LEN; i++)
  2074. dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
  2075. for (i=0; i<MAC2_LEN; i++)
  2076. dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
  2077. return 0;
  2078. }
  2079. static int reset_sar(struct atm_dev *dev)
  2080. {
  2081. IADEV *iadev;
  2082. int i, error = 1;
  2083. unsigned int pci[64];
  2084. iadev = INPH_IA_DEV(dev);
  2085. for(i=0; i<64; i++)
  2086. if ((error = pci_read_config_dword(iadev->pci,
  2087. i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
  2088. return error;
  2089. writel(0, iadev->reg+IPHASE5575_EXT_RESET);
  2090. for(i=0; i<64; i++)
  2091. if ((error = pci_write_config_dword(iadev->pci,
  2092. i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
  2093. return error;
  2094. udelay(5);
  2095. return 0;
  2096. }
  2097. static int ia_init(struct atm_dev *dev)
  2098. {
  2099. IADEV *iadev;
  2100. unsigned long real_base;
  2101. void __iomem *base;
  2102. unsigned short command;
  2103. int error, i;
  2104. /* The device has been identified and registered. Now we read
  2105. necessary configuration info like memory base address,
  2106. interrupt number etc */
  2107. IF_INIT(printk(">ia_init\n");)
  2108. dev->ci_range.vpi_bits = 0;
  2109. dev->ci_range.vci_bits = NR_VCI_LD;
  2110. iadev = INPH_IA_DEV(dev);
  2111. real_base = pci_resource_start (iadev->pci, 0);
  2112. iadev->irq = iadev->pci->irq;
  2113. error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
  2114. if (error) {
  2115. printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
  2116. dev->number,error);
  2117. return -EINVAL;
  2118. }
  2119. IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
  2120. dev->number, iadev->pci->revision, real_base, iadev->irq);)
  2121. /* find mapping size of board */
  2122. iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
  2123. if (iadev->pci_map_size == 0x100000){
  2124. iadev->num_vc = 4096;
  2125. dev->ci_range.vci_bits = NR_VCI_4K_LD;
  2126. iadev->memSize = 4;
  2127. }
  2128. else if (iadev->pci_map_size == 0x40000) {
  2129. iadev->num_vc = 1024;
  2130. iadev->memSize = 1;
  2131. }
  2132. else {
  2133. printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
  2134. return -EINVAL;
  2135. }
  2136. IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
  2137. /* enable bus mastering */
  2138. pci_set_master(iadev->pci);
  2139. /*
  2140. * Delay at least 1us before doing any mem accesses (how 'bout 10?)
  2141. */
  2142. udelay(10);
  2143. /* mapping the physical address to a virtual address in address space */
  2144. base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
  2145. if (!base)
  2146. {
  2147. printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
  2148. dev->number);
  2149. return -ENOMEM;
  2150. }
  2151. IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
  2152. dev->number, iadev->pci->revision, base, iadev->irq);)
  2153. /* filling the iphase dev structure */
  2154. iadev->mem = iadev->pci_map_size /2;
  2155. iadev->real_base = real_base;
  2156. iadev->base = base;
  2157. /* Bus Interface Control Registers */
  2158. iadev->reg = base + REG_BASE;
  2159. /* Segmentation Control Registers */
  2160. iadev->seg_reg = base + SEG_BASE;
  2161. /* Reassembly Control Registers */
  2162. iadev->reass_reg = base + REASS_BASE;
  2163. /* Front end/ DMA control registers */
  2164. iadev->phy = base + PHY_BASE;
  2165. iadev->dma = base + PHY_BASE;
  2166. /* RAM - Segmentation RAm and Reassembly RAM */
  2167. iadev->ram = base + ACTUAL_RAM_BASE;
  2168. iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
  2169. iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
  2170. /* lets print out the above */
  2171. IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
  2172. iadev->reg,iadev->seg_reg,iadev->reass_reg,
  2173. iadev->phy, iadev->ram, iadev->seg_ram,
  2174. iadev->reass_ram);)
  2175. /* lets try reading the MAC address */
  2176. error = get_esi(dev);
  2177. if (error) {
  2178. iounmap(iadev->base);
  2179. return error;
  2180. }
  2181. printk("IA: ");
  2182. for (i=0; i < ESI_LEN; i++)
  2183. printk("%s%02X",i ? "-" : "",dev->esi[i]);
  2184. printk("\n");
  2185. /* reset SAR */
  2186. if (reset_sar(dev)) {
  2187. iounmap(iadev->base);
  2188. printk("IA: reset SAR fail, please try again\n");
  2189. return 1;
  2190. }
  2191. return 0;
  2192. }
  2193. static void ia_update_stats(IADEV *iadev) {
  2194. if (!iadev->carrier_detect)
  2195. return;
  2196. iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
  2197. iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
  2198. iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
  2199. iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
  2200. iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
  2201. iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
  2202. return;
  2203. }
  2204. static void ia_led_timer(struct timer_list *unused) {
  2205. unsigned long flags;
  2206. static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
  2207. u_char i;
  2208. static u32 ctrl_reg;
  2209. for (i = 0; i < iadev_count; i++) {
  2210. if (ia_dev[i]) {
  2211. ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
  2212. if (blinking[i] == 0) {
  2213. blinking[i]++;
  2214. ctrl_reg &= (~CTRL_LED);
  2215. writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
  2216. ia_update_stats(ia_dev[i]);
  2217. }
  2218. else {
  2219. blinking[i] = 0;
  2220. ctrl_reg |= CTRL_LED;
  2221. writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
  2222. spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
  2223. if (ia_dev[i]->close_pending)
  2224. wake_up(&ia_dev[i]->close_wait);
  2225. ia_tx_poll(ia_dev[i]);
  2226. spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
  2227. }
  2228. }
  2229. }
  2230. mod_timer(&ia_timer, jiffies + HZ / 4);
  2231. return;
  2232. }
  2233. static void ia_phy_put(struct atm_dev *dev, unsigned char value,
  2234. unsigned long addr)
  2235. {
  2236. writel(value, INPH_IA_DEV(dev)->phy+addr);
  2237. }
  2238. static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
  2239. {
  2240. return readl(INPH_IA_DEV(dev)->phy+addr);
  2241. }
  2242. static void ia_free_tx(IADEV *iadev)
  2243. {
  2244. int i;
  2245. kfree(iadev->desc_tbl);
  2246. for (i = 0; i < iadev->num_vc; i++)
  2247. kfree(iadev->testTable[i]);
  2248. kfree(iadev->testTable);
  2249. for (i = 0; i < iadev->num_tx_desc; i++) {
  2250. struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
  2251. dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
  2252. sizeof(*desc->cpcs), DMA_TO_DEVICE);
  2253. kfree(desc->cpcs);
  2254. }
  2255. kfree(iadev->tx_buf);
  2256. dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
  2257. iadev->tx_dle_dma);
  2258. }
  2259. static void ia_free_rx(IADEV *iadev)
  2260. {
  2261. kfree(iadev->rx_open);
  2262. dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
  2263. iadev->rx_dle_dma);
  2264. }
  2265. static int ia_start(struct atm_dev *dev)
  2266. {
  2267. IADEV *iadev;
  2268. int error;
  2269. unsigned char phy;
  2270. u32 ctrl_reg;
  2271. IF_EVENT(printk(">ia_start\n");)
  2272. iadev = INPH_IA_DEV(dev);
  2273. if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
  2274. printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
  2275. dev->number, iadev->irq);
  2276. error = -EAGAIN;
  2277. goto err_out;
  2278. }
  2279. /* @@@ should release IRQ on error */
  2280. /* enabling memory + master */
  2281. if ((error = pci_write_config_word(iadev->pci,
  2282. PCI_COMMAND,
  2283. PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
  2284. {
  2285. printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
  2286. "master (0x%x)\n",dev->number, error);
  2287. error = -EIO;
  2288. goto err_free_irq;
  2289. }
  2290. udelay(10);
  2291. /* Maybe we should reset the front end, initialize Bus Interface Control
  2292. Registers and see. */
  2293. IF_INIT(printk("Bus ctrl reg: %08x\n",
  2294. readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
  2295. ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
  2296. ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
  2297. | CTRL_B8
  2298. | CTRL_B16
  2299. | CTRL_B32
  2300. | CTRL_B48
  2301. | CTRL_B64
  2302. | CTRL_B128
  2303. | CTRL_ERRMASK
  2304. | CTRL_DLETMASK /* shud be removed l8r */
  2305. | CTRL_DLERMASK
  2306. | CTRL_SEGMASK
  2307. | CTRL_REASSMASK
  2308. | CTRL_FEMASK
  2309. | CTRL_CSPREEMPT;
  2310. writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
  2311. IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
  2312. readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
  2313. printk("Bus status reg after init: %08x\n",
  2314. readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
  2315. ia_hw_type(iadev);
  2316. error = tx_init(dev);
  2317. if (error)
  2318. goto err_free_irq;
  2319. error = rx_init(dev);
  2320. if (error)
  2321. goto err_free_tx;
  2322. ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
  2323. writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
  2324. IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
  2325. readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
  2326. phy = 0; /* resolve compiler complaint */
  2327. IF_INIT (
  2328. if ((phy=ia_phy_get(dev,0)) == 0x30)
  2329. printk("IA: pm5346,rev.%d\n",phy&0x0f);
  2330. else
  2331. printk("IA: utopia,rev.%0x\n",phy);)
  2332. if (iadev->phy_type & FE_25MBIT_PHY)
  2333. ia_mb25_init(iadev);
  2334. else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
  2335. ia_suni_pm7345_init(iadev);
  2336. else {
  2337. error = suni_init(dev);
  2338. if (error)
  2339. goto err_free_rx;
  2340. if (dev->phy->start) {
  2341. error = dev->phy->start(dev);
  2342. if (error)
  2343. goto err_free_rx;
  2344. }
  2345. /* Get iadev->carrier_detect status */
  2346. ia_frontend_intr(iadev);
  2347. }
  2348. return 0;
  2349. err_free_rx:
  2350. ia_free_rx(iadev);
  2351. err_free_tx:
  2352. ia_free_tx(iadev);
  2353. err_free_irq:
  2354. free_irq(iadev->irq, dev);
  2355. err_out:
  2356. return error;
  2357. }
  2358. static void ia_close(struct atm_vcc *vcc)
  2359. {
  2360. DEFINE_WAIT(wait);
  2361. u16 *vc_table;
  2362. IADEV *iadev;
  2363. struct ia_vcc *ia_vcc;
  2364. struct sk_buff *skb = NULL;
  2365. struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
  2366. unsigned long closetime, flags;
  2367. iadev = INPH_IA_DEV(vcc->dev);
  2368. ia_vcc = INPH_IA_VCC(vcc);
  2369. if (!ia_vcc) return;
  2370. IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
  2371. ia_vcc->vc_desc_cnt,vcc->vci);)
  2372. clear_bit(ATM_VF_READY,&vcc->flags);
  2373. skb_queue_head_init (&tmp_tx_backlog);
  2374. skb_queue_head_init (&tmp_vcc_backlog);
  2375. if (vcc->qos.txtp.traffic_class != ATM_NONE) {
  2376. iadev->close_pending++;
  2377. prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
  2378. schedule_timeout(msecs_to_jiffies(500));
  2379. finish_wait(&iadev->timeout_wait, &wait);
  2380. spin_lock_irqsave(&iadev->tx_lock, flags);
  2381. while((skb = skb_dequeue(&iadev->tx_backlog))) {
  2382. if (ATM_SKB(skb)->vcc == vcc){
  2383. if (vcc->pop) vcc->pop(vcc, skb);
  2384. else dev_kfree_skb_any(skb);
  2385. }
  2386. else
  2387. skb_queue_tail(&tmp_tx_backlog, skb);
  2388. }
  2389. while((skb = skb_dequeue(&tmp_tx_backlog)))
  2390. skb_queue_tail(&iadev->tx_backlog, skb);
  2391. IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
  2392. closetime = 300000 / ia_vcc->pcr;
  2393. if (closetime == 0)
  2394. closetime = 1;
  2395. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  2396. wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
  2397. spin_lock_irqsave(&iadev->tx_lock, flags);
  2398. iadev->close_pending--;
  2399. iadev->testTable[vcc->vci]->lastTime = 0;
  2400. iadev->testTable[vcc->vci]->fract = 0;
  2401. iadev->testTable[vcc->vci]->vc_status = VC_UBR;
  2402. if (vcc->qos.txtp.traffic_class == ATM_ABR) {
  2403. if (vcc->qos.txtp.min_pcr > 0)
  2404. iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
  2405. }
  2406. if (vcc->qos.txtp.traffic_class == ATM_CBR) {
  2407. ia_vcc = INPH_IA_VCC(vcc);
  2408. iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
  2409. ia_cbrVc_close (vcc);
  2410. }
  2411. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  2412. }
  2413. if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
  2414. // reset reass table
  2415. vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
  2416. vc_table += vcc->vci;
  2417. *vc_table = NO_AAL5_PKT;
  2418. // reset vc table
  2419. vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
  2420. vc_table += vcc->vci;
  2421. *vc_table = (vcc->vci << 6) | 15;
  2422. if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
  2423. struct abr_vc_table __iomem *abr_vc_table =
  2424. (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
  2425. abr_vc_table += vcc->vci;
  2426. abr_vc_table->rdf = 0x0003;
  2427. abr_vc_table->air = 0x5eb1;
  2428. }
  2429. // Drain the packets
  2430. rx_dle_intr(vcc->dev);
  2431. iadev->rx_open[vcc->vci] = NULL;
  2432. }
  2433. kfree(INPH_IA_VCC(vcc));
  2434. ia_vcc = NULL;
  2435. vcc->dev_data = NULL;
  2436. clear_bit(ATM_VF_ADDR,&vcc->flags);
  2437. return;
  2438. }
  2439. static int ia_open(struct atm_vcc *vcc)
  2440. {
  2441. struct ia_vcc *ia_vcc;
  2442. int error;
  2443. if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
  2444. {
  2445. IF_EVENT(printk("ia: not partially allocated resources\n");)
  2446. vcc->dev_data = NULL;
  2447. }
  2448. if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
  2449. {
  2450. IF_EVENT(printk("iphase open: unspec part\n");)
  2451. set_bit(ATM_VF_ADDR,&vcc->flags);
  2452. }
  2453. if (vcc->qos.aal != ATM_AAL5)
  2454. return -EINVAL;
  2455. IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
  2456. vcc->dev->number, vcc->vpi, vcc->vci);)
  2457. /* Device dependent initialization */
  2458. ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
  2459. if (!ia_vcc) return -ENOMEM;
  2460. vcc->dev_data = ia_vcc;
  2461. if ((error = open_rx(vcc)))
  2462. {
  2463. IF_EVENT(printk("iadev: error in open_rx, closing\n");)
  2464. ia_close(vcc);
  2465. return error;
  2466. }
  2467. if ((error = open_tx(vcc)))
  2468. {
  2469. IF_EVENT(printk("iadev: error in open_tx, closing\n");)
  2470. ia_close(vcc);
  2471. return error;
  2472. }
  2473. set_bit(ATM_VF_READY,&vcc->flags);
  2474. #if 0
  2475. {
  2476. static u8 first = 1;
  2477. if (first) {
  2478. ia_timer.expires = jiffies + 3*HZ;
  2479. add_timer(&ia_timer);
  2480. first = 0;
  2481. }
  2482. }
  2483. #endif
  2484. IF_EVENT(printk("ia open returning\n");)
  2485. return 0;
  2486. }
  2487. static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
  2488. {
  2489. IF_EVENT(printk(">ia_change_qos\n");)
  2490. return 0;
  2491. }
  2492. static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
  2493. {
  2494. IA_CMDBUF ia_cmds;
  2495. IADEV *iadev;
  2496. int i, board;
  2497. u16 __user *tmps;
  2498. IF_EVENT(printk(">ia_ioctl\n");)
  2499. if (cmd != IA_CMD) {
  2500. if (!dev->phy->ioctl) return -EINVAL;
  2501. return dev->phy->ioctl(dev,cmd,arg);
  2502. }
  2503. if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
  2504. board = ia_cmds.status;
  2505. if ((board < 0) || (board > iadev_count))
  2506. board = 0;
  2507. board = array_index_nospec(board, iadev_count + 1);
  2508. iadev = ia_dev[board];
  2509. switch (ia_cmds.cmd) {
  2510. case MEMDUMP:
  2511. {
  2512. switch (ia_cmds.sub_cmd) {
  2513. case MEMDUMP_DEV:
  2514. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2515. if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
  2516. return -EFAULT;
  2517. ia_cmds.status = 0;
  2518. break;
  2519. case MEMDUMP_SEGREG:
  2520. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2521. tmps = (u16 __user *)ia_cmds.buf;
  2522. for(i=0; i<0x80; i+=2, tmps++)
  2523. if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
  2524. ia_cmds.status = 0;
  2525. ia_cmds.len = 0x80;
  2526. break;
  2527. case MEMDUMP_REASSREG:
  2528. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2529. tmps = (u16 __user *)ia_cmds.buf;
  2530. for(i=0; i<0x80; i+=2, tmps++)
  2531. if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
  2532. ia_cmds.status = 0;
  2533. ia_cmds.len = 0x80;
  2534. break;
  2535. case MEMDUMP_FFL:
  2536. {
  2537. ia_regs_t *regs_local;
  2538. ffredn_t *ffL;
  2539. rfredn_t *rfL;
  2540. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2541. regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
  2542. if (!regs_local) return -ENOMEM;
  2543. ffL = &regs_local->ffredn;
  2544. rfL = &regs_local->rfredn;
  2545. /* Copy real rfred registers into the local copy */
  2546. for (i=0; i<(sizeof (rfredn_t))/4; i++)
  2547. ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
  2548. /* Copy real ffred registers into the local copy */
  2549. for (i=0; i<(sizeof (ffredn_t))/4; i++)
  2550. ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
  2551. if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
  2552. kfree(regs_local);
  2553. return -EFAULT;
  2554. }
  2555. kfree(regs_local);
  2556. printk("Board %d registers dumped\n", board);
  2557. ia_cmds.status = 0;
  2558. }
  2559. break;
  2560. case READ_REG:
  2561. {
  2562. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2563. desc_dbg(iadev);
  2564. ia_cmds.status = 0;
  2565. }
  2566. break;
  2567. case 0x6:
  2568. {
  2569. ia_cmds.status = 0;
  2570. printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
  2571. printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
  2572. }
  2573. break;
  2574. case 0x8:
  2575. {
  2576. struct k_sonet_stats *stats;
  2577. stats = &PRIV(_ia_dev[board])->sonet_stats;
  2578. printk("section_bip: %d\n", atomic_read(&stats->section_bip));
  2579. printk("line_bip : %d\n", atomic_read(&stats->line_bip));
  2580. printk("path_bip : %d\n", atomic_read(&stats->path_bip));
  2581. printk("line_febe : %d\n", atomic_read(&stats->line_febe));
  2582. printk("path_febe : %d\n", atomic_read(&stats->path_febe));
  2583. printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
  2584. printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
  2585. printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
  2586. printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
  2587. }
  2588. ia_cmds.status = 0;
  2589. break;
  2590. case 0x9:
  2591. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2592. for (i = 1; i <= iadev->num_rx_desc; i++)
  2593. free_desc(_ia_dev[board], i);
  2594. writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
  2595. iadev->reass_reg+REASS_MASK_REG);
  2596. iadev->rxing = 1;
  2597. ia_cmds.status = 0;
  2598. break;
  2599. case 0xb:
  2600. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2601. ia_frontend_intr(iadev);
  2602. break;
  2603. case 0xa:
  2604. if (!capable(CAP_NET_ADMIN)) return -EPERM;
  2605. {
  2606. ia_cmds.status = 0;
  2607. IADebugFlag = ia_cmds.maddr;
  2608. printk("New debug option loaded\n");
  2609. }
  2610. break;
  2611. default:
  2612. ia_cmds.status = 0;
  2613. break;
  2614. }
  2615. }
  2616. break;
  2617. default:
  2618. break;
  2619. }
  2620. return 0;
  2621. }
  2622. static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
  2623. void __user *optval, int optlen)
  2624. {
  2625. IF_EVENT(printk(">ia_getsockopt\n");)
  2626. return -EINVAL;
  2627. }
  2628. static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
  2629. void __user *optval, unsigned int optlen)
  2630. {
  2631. IF_EVENT(printk(">ia_setsockopt\n");)
  2632. return -EINVAL;
  2633. }
  2634. static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
  2635. IADEV *iadev;
  2636. struct dle *wr_ptr;
  2637. struct tx_buf_desc __iomem *buf_desc_ptr;
  2638. int desc;
  2639. int comp_code;
  2640. int total_len;
  2641. struct cpcs_trailer *trailer;
  2642. struct ia_vcc *iavcc;
  2643. iadev = INPH_IA_DEV(vcc->dev);
  2644. iavcc = INPH_IA_VCC(vcc);
  2645. if (!iavcc->txing) {
  2646. printk("discard packet on closed VC\n");
  2647. if (vcc->pop)
  2648. vcc->pop(vcc, skb);
  2649. else
  2650. dev_kfree_skb_any(skb);
  2651. return 0;
  2652. }
  2653. if (skb->len > iadev->tx_buf_sz - 8) {
  2654. printk("Transmit size over tx buffer size\n");
  2655. if (vcc->pop)
  2656. vcc->pop(vcc, skb);
  2657. else
  2658. dev_kfree_skb_any(skb);
  2659. return 0;
  2660. }
  2661. if ((unsigned long)skb->data & 3) {
  2662. printk("Misaligned SKB\n");
  2663. if (vcc->pop)
  2664. vcc->pop(vcc, skb);
  2665. else
  2666. dev_kfree_skb_any(skb);
  2667. return 0;
  2668. }
  2669. /* Get a descriptor number from our free descriptor queue
  2670. We get the descr number from the TCQ now, since I am using
  2671. the TCQ as a free buffer queue. Initially TCQ will be
  2672. initialized with all the descriptors and is hence, full.
  2673. */
  2674. desc = get_desc (iadev, iavcc);
  2675. if (desc == 0xffff)
  2676. return 1;
  2677. comp_code = desc >> 13;
  2678. desc &= 0x1fff;
  2679. if ((desc == 0) || (desc > iadev->num_tx_desc))
  2680. {
  2681. IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
  2682. atomic_inc(&vcc->stats->tx);
  2683. if (vcc->pop)
  2684. vcc->pop(vcc, skb);
  2685. else
  2686. dev_kfree_skb_any(skb);
  2687. return 0; /* return SUCCESS */
  2688. }
  2689. if (comp_code)
  2690. {
  2691. IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
  2692. desc, comp_code);)
  2693. }
  2694. /* remember the desc and vcc mapping */
  2695. iavcc->vc_desc_cnt++;
  2696. iadev->desc_tbl[desc-1].iavcc = iavcc;
  2697. iadev->desc_tbl[desc-1].txskb = skb;
  2698. IA_SKB_STATE(skb) = 0;
  2699. iadev->ffL.tcq_rd += 2;
  2700. if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
  2701. iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
  2702. writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
  2703. /* Put the descriptor number in the packet ready queue
  2704. and put the updated write pointer in the DLE field
  2705. */
  2706. *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
  2707. iadev->ffL.prq_wr += 2;
  2708. if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
  2709. iadev->ffL.prq_wr = iadev->ffL.prq_st;
  2710. /* Figure out the exact length of the packet and padding required to
  2711. make it aligned on a 48 byte boundary. */
  2712. total_len = skb->len + sizeof(struct cpcs_trailer);
  2713. total_len = ((total_len + 47) / 48) * 48;
  2714. IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
  2715. /* Put the packet in a tx buffer */
  2716. trailer = iadev->tx_buf[desc-1].cpcs;
  2717. IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
  2718. skb, skb->data, skb->len, desc);)
  2719. trailer->control = 0;
  2720. /*big endian*/
  2721. trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
  2722. trailer->crc32 = 0; /* not needed - dummy bytes */
  2723. /* Display the packet */
  2724. IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
  2725. skb->len, tcnter++);
  2726. xdump(skb->data, skb->len, "TX: ");
  2727. printk("\n");)
  2728. /* Build the buffer descriptor */
  2729. buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
  2730. buf_desc_ptr += desc; /* points to the corresponding entry */
  2731. buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
  2732. /* Huh ? p.115 of users guide describes this as a read-only register */
  2733. writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
  2734. buf_desc_ptr->vc_index = vcc->vci;
  2735. buf_desc_ptr->bytes = total_len;
  2736. if (vcc->qos.txtp.traffic_class == ATM_ABR)
  2737. clear_lockup (vcc, iadev);
  2738. /* Build the DLE structure */
  2739. wr_ptr = iadev->tx_dle_q.write;
  2740. memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
  2741. wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
  2742. skb->len, DMA_TO_DEVICE);
  2743. wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
  2744. buf_desc_ptr->buf_start_lo;
  2745. /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
  2746. wr_ptr->bytes = skb->len;
  2747. /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
  2748. if ((wr_ptr->bytes >> 2) == 0xb)
  2749. wr_ptr->bytes = 0x30;
  2750. wr_ptr->mode = TX_DLE_PSI;
  2751. wr_ptr->prq_wr_ptr_data = 0;
  2752. /* end is not to be used for the DLE q */
  2753. if (++wr_ptr == iadev->tx_dle_q.end)
  2754. wr_ptr = iadev->tx_dle_q.start;
  2755. /* Build trailer dle */
  2756. wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
  2757. wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
  2758. buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
  2759. wr_ptr->bytes = sizeof(struct cpcs_trailer);
  2760. wr_ptr->mode = DMA_INT_ENABLE;
  2761. wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
  2762. /* end is not to be used for the DLE q */
  2763. if (++wr_ptr == iadev->tx_dle_q.end)
  2764. wr_ptr = iadev->tx_dle_q.start;
  2765. iadev->tx_dle_q.write = wr_ptr;
  2766. ATM_DESC(skb) = vcc->vci;
  2767. skb_queue_tail(&iadev->tx_dma_q, skb);
  2768. atomic_inc(&vcc->stats->tx);
  2769. iadev->tx_pkt_cnt++;
  2770. /* Increment transaction counter */
  2771. writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
  2772. #if 0
  2773. /* add flow control logic */
  2774. if (atomic_read(&vcc->stats->tx) % 20 == 0) {
  2775. if (iavcc->vc_desc_cnt > 10) {
  2776. vcc->tx_quota = vcc->tx_quota * 3 / 4;
  2777. printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
  2778. iavcc->flow_inc = -1;
  2779. iavcc->saved_tx_quota = vcc->tx_quota;
  2780. } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
  2781. // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
  2782. printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
  2783. iavcc->flow_inc = 0;
  2784. }
  2785. }
  2786. #endif
  2787. IF_TX(printk("ia send done\n");)
  2788. return 0;
  2789. }
  2790. static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
  2791. {
  2792. IADEV *iadev;
  2793. unsigned long flags;
  2794. iadev = INPH_IA_DEV(vcc->dev);
  2795. if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
  2796. {
  2797. if (!skb)
  2798. printk(KERN_CRIT "null skb in ia_send\n");
  2799. else dev_kfree_skb_any(skb);
  2800. return -EINVAL;
  2801. }
  2802. spin_lock_irqsave(&iadev->tx_lock, flags);
  2803. if (!test_bit(ATM_VF_READY,&vcc->flags)){
  2804. dev_kfree_skb_any(skb);
  2805. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  2806. return -EINVAL;
  2807. }
  2808. ATM_SKB(skb)->vcc = vcc;
  2809. if (skb_peek(&iadev->tx_backlog)) {
  2810. skb_queue_tail(&iadev->tx_backlog, skb);
  2811. }
  2812. else {
  2813. if (ia_pkt_tx (vcc, skb)) {
  2814. skb_queue_tail(&iadev->tx_backlog, skb);
  2815. }
  2816. }
  2817. spin_unlock_irqrestore(&iadev->tx_lock, flags);
  2818. return 0;
  2819. }
  2820. static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
  2821. {
  2822. int left = *pos, n;
  2823. char *tmpPtr;
  2824. IADEV *iadev = INPH_IA_DEV(dev);
  2825. if(!left--) {
  2826. if (iadev->phy_type == FE_25MBIT_PHY) {
  2827. n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
  2828. return n;
  2829. }
  2830. if (iadev->phy_type == FE_DS3_PHY)
  2831. n = sprintf(page, " Board Type : Iphase-ATM-DS3");
  2832. else if (iadev->phy_type == FE_E3_PHY)
  2833. n = sprintf(page, " Board Type : Iphase-ATM-E3");
  2834. else if (iadev->phy_type == FE_UTP_OPTION)
  2835. n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
  2836. else
  2837. n = sprintf(page, " Board Type : Iphase-ATM-OC3");
  2838. tmpPtr = page + n;
  2839. if (iadev->pci_map_size == 0x40000)
  2840. n += sprintf(tmpPtr, "-1KVC-");
  2841. else
  2842. n += sprintf(tmpPtr, "-4KVC-");
  2843. tmpPtr = page + n;
  2844. if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
  2845. n += sprintf(tmpPtr, "1M \n");
  2846. else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
  2847. n += sprintf(tmpPtr, "512K\n");
  2848. else
  2849. n += sprintf(tmpPtr, "128K\n");
  2850. return n;
  2851. }
  2852. if (!left) {
  2853. return sprintf(page, " Number of Tx Buffer: %u\n"
  2854. " Size of Tx Buffer : %u\n"
  2855. " Number of Rx Buffer: %u\n"
  2856. " Size of Rx Buffer : %u\n"
  2857. " Packets Received : %u\n"
  2858. " Packets Transmitted: %u\n"
  2859. " Cells Received : %u\n"
  2860. " Cells Transmitted : %u\n"
  2861. " Board Dropped Cells: %u\n"
  2862. " Board Dropped Pkts : %u\n",
  2863. iadev->num_tx_desc, iadev->tx_buf_sz,
  2864. iadev->num_rx_desc, iadev->rx_buf_sz,
  2865. iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
  2866. iadev->rx_cell_cnt, iadev->tx_cell_cnt,
  2867. iadev->drop_rxcell, iadev->drop_rxpkt);
  2868. }
  2869. return 0;
  2870. }
  2871. static const struct atmdev_ops ops = {
  2872. .open = ia_open,
  2873. .close = ia_close,
  2874. .ioctl = ia_ioctl,
  2875. .getsockopt = ia_getsockopt,
  2876. .setsockopt = ia_setsockopt,
  2877. .send = ia_send,
  2878. .phy_put = ia_phy_put,
  2879. .phy_get = ia_phy_get,
  2880. .change_qos = ia_change_qos,
  2881. .proc_read = ia_proc_read,
  2882. .owner = THIS_MODULE,
  2883. };
  2884. static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2885. {
  2886. struct atm_dev *dev;
  2887. IADEV *iadev;
  2888. int ret;
  2889. iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
  2890. if (!iadev) {
  2891. ret = -ENOMEM;
  2892. goto err_out;
  2893. }
  2894. iadev->pci = pdev;
  2895. IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
  2896. pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
  2897. if (pci_enable_device(pdev)) {
  2898. ret = -ENODEV;
  2899. goto err_out_free_iadev;
  2900. }
  2901. dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
  2902. if (!dev) {
  2903. ret = -ENOMEM;
  2904. goto err_out_disable_dev;
  2905. }
  2906. dev->dev_data = iadev;
  2907. IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
  2908. IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
  2909. iadev->LineRate);)
  2910. pci_set_drvdata(pdev, dev);
  2911. ia_dev[iadev_count] = iadev;
  2912. _ia_dev[iadev_count] = dev;
  2913. iadev_count++;
  2914. if (ia_init(dev) || ia_start(dev)) {
  2915. IF_INIT(printk("IA register failed!\n");)
  2916. iadev_count--;
  2917. ia_dev[iadev_count] = NULL;
  2918. _ia_dev[iadev_count] = NULL;
  2919. ret = -EINVAL;
  2920. goto err_out_deregister_dev;
  2921. }
  2922. IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
  2923. iadev->next_board = ia_boards;
  2924. ia_boards = dev;
  2925. return 0;
  2926. err_out_deregister_dev:
  2927. atm_dev_deregister(dev);
  2928. err_out_disable_dev:
  2929. pci_disable_device(pdev);
  2930. err_out_free_iadev:
  2931. kfree(iadev);
  2932. err_out:
  2933. return ret;
  2934. }
  2935. static void ia_remove_one(struct pci_dev *pdev)
  2936. {
  2937. struct atm_dev *dev = pci_get_drvdata(pdev);
  2938. IADEV *iadev = INPH_IA_DEV(dev);
  2939. /* Disable phy interrupts */
  2940. ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
  2941. SUNI_RSOP_CIE);
  2942. udelay(1);
  2943. if (dev->phy && dev->phy->stop)
  2944. dev->phy->stop(dev);
  2945. /* De-register device */
  2946. free_irq(iadev->irq, dev);
  2947. iadev_count--;
  2948. ia_dev[iadev_count] = NULL;
  2949. _ia_dev[iadev_count] = NULL;
  2950. IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
  2951. atm_dev_deregister(dev);
  2952. iounmap(iadev->base);
  2953. pci_disable_device(pdev);
  2954. ia_free_rx(iadev);
  2955. ia_free_tx(iadev);
  2956. kfree(iadev);
  2957. }
  2958. static const struct pci_device_id ia_pci_tbl[] = {
  2959. { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
  2960. { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
  2961. { 0,}
  2962. };
  2963. MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
  2964. static struct pci_driver ia_driver = {
  2965. .name = DEV_LABEL,
  2966. .id_table = ia_pci_tbl,
  2967. .probe = ia_init_one,
  2968. .remove = ia_remove_one,
  2969. };
  2970. static int __init ia_module_init(void)
  2971. {
  2972. int ret;
  2973. ret = pci_register_driver(&ia_driver);
  2974. if (ret >= 0) {
  2975. ia_timer.expires = jiffies + 3*HZ;
  2976. add_timer(&ia_timer);
  2977. } else
  2978. printk(KERN_ERR DEV_LABEL ": no adapter found\n");
  2979. return ret;
  2980. }
  2981. static void __exit ia_module_exit(void)
  2982. {
  2983. pci_unregister_driver(&ia_driver);
  2984. del_timer(&ia_timer);
  2985. }
  2986. module_init(ia_module_init);
  2987. module_exit(ia_module_exit);