layer2.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/mISDNif.h>
  18. #include <linux/slab.h>
  19. #include "core.h"
  20. #include "fsm.h"
  21. #include "layer2.h"
  22. static u_int *debug;
  23. static
  24. struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
  25. static char *strL2State[] =
  26. {
  27. "ST_L2_1",
  28. "ST_L2_2",
  29. "ST_L2_3",
  30. "ST_L2_4",
  31. "ST_L2_5",
  32. "ST_L2_6",
  33. "ST_L2_7",
  34. "ST_L2_8",
  35. };
  36. enum {
  37. EV_L2_UI,
  38. EV_L2_SABME,
  39. EV_L2_DISC,
  40. EV_L2_DM,
  41. EV_L2_UA,
  42. EV_L2_FRMR,
  43. EV_L2_SUPER,
  44. EV_L2_I,
  45. EV_L2_DL_DATA,
  46. EV_L2_ACK_PULL,
  47. EV_L2_DL_UNITDATA,
  48. EV_L2_DL_ESTABLISH_REQ,
  49. EV_L2_DL_RELEASE_REQ,
  50. EV_L2_MDL_ASSIGN,
  51. EV_L2_MDL_REMOVE,
  52. EV_L2_MDL_ERROR,
  53. EV_L1_DEACTIVATE,
  54. EV_L2_T200,
  55. EV_L2_T203,
  56. EV_L2_SET_OWN_BUSY,
  57. EV_L2_CLEAR_OWN_BUSY,
  58. EV_L2_FRAME_ERROR,
  59. };
  60. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  61. static char *strL2Event[] =
  62. {
  63. "EV_L2_UI",
  64. "EV_L2_SABME",
  65. "EV_L2_DISC",
  66. "EV_L2_DM",
  67. "EV_L2_UA",
  68. "EV_L2_FRMR",
  69. "EV_L2_SUPER",
  70. "EV_L2_I",
  71. "EV_L2_DL_DATA",
  72. "EV_L2_ACK_PULL",
  73. "EV_L2_DL_UNITDATA",
  74. "EV_L2_DL_ESTABLISH_REQ",
  75. "EV_L2_DL_RELEASE_REQ",
  76. "EV_L2_MDL_ASSIGN",
  77. "EV_L2_MDL_REMOVE",
  78. "EV_L2_MDL_ERROR",
  79. "EV_L1_DEACTIVATE",
  80. "EV_L2_T200",
  81. "EV_L2_T203",
  82. "EV_L2_SET_OWN_BUSY",
  83. "EV_L2_CLEAR_OWN_BUSY",
  84. "EV_L2_FRAME_ERROR",
  85. };
  86. static void
  87. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  88. {
  89. struct layer2 *l2 = fi->userdata;
  90. struct va_format vaf;
  91. va_list va;
  92. if (!(*debug & DEBUG_L2_FSM))
  93. return;
  94. va_start(va, fmt);
  95. vaf.fmt = fmt;
  96. vaf.va = &va;
  97. printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
  98. l2->sapi, l2->tei, &vaf);
  99. va_end(va);
  100. }
  101. inline u_int
  102. l2headersize(struct layer2 *l2, int ui)
  103. {
  104. return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  105. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  106. }
  107. inline u_int
  108. l2addrsize(struct layer2 *l2)
  109. {
  110. return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
  111. }
  112. static u_int
  113. l2_newid(struct layer2 *l2)
  114. {
  115. u_int id;
  116. id = l2->next_id++;
  117. if (id == 0x7fff)
  118. l2->next_id = 1;
  119. id <<= 16;
  120. id |= l2->tei << 8;
  121. id |= l2->sapi;
  122. return id;
  123. }
  124. static void
  125. l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
  126. {
  127. int err;
  128. if (!l2->up)
  129. return;
  130. mISDN_HEAD_PRIM(skb) = prim;
  131. mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
  132. err = l2->up->send(l2->up, skb);
  133. if (err) {
  134. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  135. dev_kfree_skb(skb);
  136. }
  137. }
  138. static void
  139. l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
  140. {
  141. struct sk_buff *skb;
  142. struct mISDNhead *hh;
  143. int err;
  144. if (!l2->up)
  145. return;
  146. skb = mI_alloc_skb(len, GFP_ATOMIC);
  147. if (!skb)
  148. return;
  149. hh = mISDN_HEAD_P(skb);
  150. hh->prim = prim;
  151. hh->id = (l2->ch.nr << 16) | l2->ch.addr;
  152. if (len)
  153. memcpy(skb_put(skb, len), arg, len);
  154. err = l2->up->send(l2->up, skb);
  155. if (err) {
  156. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  157. dev_kfree_skb(skb);
  158. }
  159. }
  160. static int
  161. l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
  162. int ret;
  163. ret = l2->ch.recv(l2->ch.peer, skb);
  164. if (ret && (*debug & DEBUG_L2_RECV))
  165. printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
  166. return ret;
  167. }
  168. static int
  169. l2down_raw(struct layer2 *l2, struct sk_buff *skb)
  170. {
  171. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  172. if (hh->prim == PH_DATA_REQ) {
  173. if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  174. skb_queue_tail(&l2->down_queue, skb);
  175. return 0;
  176. }
  177. l2->down_id = mISDN_HEAD_ID(skb);
  178. }
  179. return l2down_skb(l2, skb);
  180. }
  181. static int
  182. l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
  183. {
  184. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  185. hh->prim = prim;
  186. hh->id = id;
  187. return l2down_raw(l2, skb);
  188. }
  189. static int
  190. l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
  191. {
  192. struct sk_buff *skb;
  193. int err;
  194. struct mISDNhead *hh;
  195. skb = mI_alloc_skb(len, GFP_ATOMIC);
  196. if (!skb)
  197. return -ENOMEM;
  198. hh = mISDN_HEAD_P(skb);
  199. hh->prim = prim;
  200. hh->id = id;
  201. if (len)
  202. memcpy(skb_put(skb, len), arg, len);
  203. err = l2down_raw(l2, skb);
  204. if (err)
  205. dev_kfree_skb(skb);
  206. return err;
  207. }
  208. static int
  209. ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
  210. struct sk_buff *nskb = skb;
  211. int ret = -EAGAIN;
  212. if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
  213. if (hh->id == l2->down_id) {
  214. nskb = skb_dequeue(&l2->down_queue);
  215. if (nskb) {
  216. l2->down_id = mISDN_HEAD_ID(nskb);
  217. if (l2down_skb(l2, nskb)) {
  218. dev_kfree_skb(nskb);
  219. l2->down_id = MISDN_ID_NONE;
  220. }
  221. } else
  222. l2->down_id = MISDN_ID_NONE;
  223. if (ret) {
  224. dev_kfree_skb(skb);
  225. ret = 0;
  226. }
  227. if (l2->down_id == MISDN_ID_NONE) {
  228. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  229. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  230. }
  231. }
  232. }
  233. if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  234. nskb = skb_dequeue(&l2->down_queue);
  235. if (nskb) {
  236. l2->down_id = mISDN_HEAD_ID(nskb);
  237. if (l2down_skb(l2, nskb)) {
  238. dev_kfree_skb(nskb);
  239. l2->down_id = MISDN_ID_NONE;
  240. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  241. }
  242. } else
  243. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  244. }
  245. return ret;
  246. }
  247. static int
  248. l2mgr(struct layer2 *l2, u_int prim, void *arg) {
  249. long c = (long)arg;
  250. printk(KERN_WARNING
  251. "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
  252. if (test_bit(FLG_LAPD, &l2->flag) &&
  253. !test_bit(FLG_FIXED_TEI, &l2->flag)) {
  254. switch (c) {
  255. case 'C':
  256. case 'D':
  257. case 'G':
  258. case 'H':
  259. l2_tei(l2, prim, (u_long)arg);
  260. break;
  261. }
  262. }
  263. return 0;
  264. }
  265. static void
  266. set_peer_busy(struct layer2 *l2) {
  267. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  268. if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
  269. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  270. }
  271. static void
  272. clear_peer_busy(struct layer2 *l2) {
  273. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  274. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  275. }
  276. static void
  277. InitWin(struct layer2 *l2)
  278. {
  279. int i;
  280. for (i = 0; i < MAX_WINDOW; i++)
  281. l2->windowar[i] = NULL;
  282. }
  283. static int
  284. freewin(struct layer2 *l2)
  285. {
  286. int i, cnt = 0;
  287. for (i = 0; i < MAX_WINDOW; i++) {
  288. if (l2->windowar[i]) {
  289. cnt++;
  290. dev_kfree_skb(l2->windowar[i]);
  291. l2->windowar[i] = NULL;
  292. }
  293. }
  294. return cnt;
  295. }
  296. static void
  297. ReleaseWin(struct layer2 *l2)
  298. {
  299. int cnt = freewin(l2);
  300. if (cnt)
  301. printk(KERN_WARNING
  302. "isdnl2 freed %d skbuffs in release\n", cnt);
  303. }
  304. inline unsigned int
  305. cansend(struct layer2 *l2)
  306. {
  307. unsigned int p1;
  308. if (test_bit(FLG_MOD128, &l2->flag))
  309. p1 = (l2->vs - l2->va) % 128;
  310. else
  311. p1 = (l2->vs - l2->va) % 8;
  312. return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
  313. }
  314. inline void
  315. clear_exception(struct layer2 *l2)
  316. {
  317. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  318. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  319. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  320. clear_peer_busy(l2);
  321. }
  322. static int
  323. sethdraddr(struct layer2 *l2, u_char *header, int rsp)
  324. {
  325. u_char *ptr = header;
  326. int crbit = rsp;
  327. if (test_bit(FLG_LAPD, &l2->flag)) {
  328. if (test_bit(FLG_LAPD_NET, &l2->flag))
  329. crbit = !crbit;
  330. *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
  331. *ptr++ = (l2->tei << 1) | 1;
  332. return 2;
  333. } else {
  334. if (test_bit(FLG_ORIG, &l2->flag))
  335. crbit = !crbit;
  336. if (crbit)
  337. *ptr++ = l2->addr.B;
  338. else
  339. *ptr++ = l2->addr.A;
  340. return 1;
  341. }
  342. }
  343. static inline void
  344. enqueue_super(struct layer2 *l2, struct sk_buff *skb)
  345. {
  346. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  347. dev_kfree_skb(skb);
  348. }
  349. static inline void
  350. enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
  351. {
  352. if (l2->tm)
  353. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  354. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  355. dev_kfree_skb(skb);
  356. }
  357. inline int
  358. IsUI(u_char *data)
  359. {
  360. return (data[0] & 0xef) == UI;
  361. }
  362. inline int
  363. IsUA(u_char *data)
  364. {
  365. return (data[0] & 0xef) == UA;
  366. }
  367. inline int
  368. IsDM(u_char *data)
  369. {
  370. return (data[0] & 0xef) == DM;
  371. }
  372. inline int
  373. IsDISC(u_char *data)
  374. {
  375. return (data[0] & 0xef) == DISC;
  376. }
  377. inline int
  378. IsRR(u_char *data, struct layer2 *l2)
  379. {
  380. if (test_bit(FLG_MOD128, &l2->flag))
  381. return data[0] == RR;
  382. else
  383. return (data[0] & 0xf) == 1;
  384. }
  385. inline int
  386. IsSFrame(u_char *data, struct layer2 *l2)
  387. {
  388. register u_char d = *data;
  389. if (!test_bit(FLG_MOD128, &l2->flag))
  390. d &= 0xf;
  391. return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
  392. }
  393. inline int
  394. IsSABME(u_char *data, struct layer2 *l2)
  395. {
  396. u_char d = data[0] & ~0x10;
  397. return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
  398. }
  399. inline int
  400. IsREJ(u_char *data, struct layer2 *l2)
  401. {
  402. return test_bit(FLG_MOD128, &l2->flag) ?
  403. data[0] == REJ : (data[0] & 0xf) == REJ;
  404. }
  405. inline int
  406. IsFRMR(u_char *data)
  407. {
  408. return (data[0] & 0xef) == FRMR;
  409. }
  410. inline int
  411. IsRNR(u_char *data, struct layer2 *l2)
  412. {
  413. return test_bit(FLG_MOD128, &l2->flag) ?
  414. data[0] == RNR : (data[0] & 0xf) == RNR;
  415. }
  416. static int
  417. iframe_error(struct layer2 *l2, struct sk_buff *skb)
  418. {
  419. u_int i;
  420. int rsp = *skb->data & 0x2;
  421. i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
  422. if (test_bit(FLG_ORIG, &l2->flag))
  423. rsp = !rsp;
  424. if (rsp)
  425. return 'L';
  426. if (skb->len < i)
  427. return 'N';
  428. if ((skb->len - i) > l2->maxlen)
  429. return 'O';
  430. return 0;
  431. }
  432. static int
  433. super_error(struct layer2 *l2, struct sk_buff *skb)
  434. {
  435. if (skb->len != l2addrsize(l2) +
  436. (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
  437. return 'N';
  438. return 0;
  439. }
  440. static int
  441. unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
  442. {
  443. int rsp = (*skb->data & 0x2) >> 1;
  444. if (test_bit(FLG_ORIG, &l2->flag))
  445. rsp = !rsp;
  446. if (rsp != wantrsp)
  447. return 'L';
  448. if (skb->len != l2addrsize(l2) + 1)
  449. return 'N';
  450. return 0;
  451. }
  452. static int
  453. UI_error(struct layer2 *l2, struct sk_buff *skb)
  454. {
  455. int rsp = *skb->data & 0x2;
  456. if (test_bit(FLG_ORIG, &l2->flag))
  457. rsp = !rsp;
  458. if (rsp)
  459. return 'L';
  460. if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
  461. return 'O';
  462. return 0;
  463. }
  464. static int
  465. FRMR_error(struct layer2 *l2, struct sk_buff *skb)
  466. {
  467. u_int headers = l2addrsize(l2) + 1;
  468. u_char *datap = skb->data + headers;
  469. int rsp = *skb->data & 0x2;
  470. if (test_bit(FLG_ORIG, &l2->flag))
  471. rsp = !rsp;
  472. if (!rsp)
  473. return 'L';
  474. if (test_bit(FLG_MOD128, &l2->flag)) {
  475. if (skb->len < headers + 5)
  476. return 'N';
  477. else if (*debug & DEBUG_L2)
  478. l2m_debug(&l2->l2m,
  479. "FRMR information %2x %2x %2x %2x %2x",
  480. datap[0], datap[1], datap[2], datap[3], datap[4]);
  481. } else {
  482. if (skb->len < headers + 3)
  483. return 'N';
  484. else if (*debug & DEBUG_L2)
  485. l2m_debug(&l2->l2m,
  486. "FRMR information %2x %2x %2x",
  487. datap[0], datap[1], datap[2]);
  488. }
  489. return 0;
  490. }
  491. static unsigned int
  492. legalnr(struct layer2 *l2, unsigned int nr)
  493. {
  494. if (test_bit(FLG_MOD128, &l2->flag))
  495. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  496. else
  497. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  498. }
  499. static void
  500. setva(struct layer2 *l2, unsigned int nr)
  501. {
  502. struct sk_buff *skb;
  503. while (l2->va != nr) {
  504. l2->va++;
  505. if (test_bit(FLG_MOD128, &l2->flag))
  506. l2->va %= 128;
  507. else
  508. l2->va %= 8;
  509. if (l2->windowar[l2->sow]) {
  510. skb_trim(l2->windowar[l2->sow], 0);
  511. skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
  512. l2->windowar[l2->sow] = NULL;
  513. }
  514. l2->sow = (l2->sow + 1) % l2->window;
  515. }
  516. skb = skb_dequeue(&l2->tmp_queue);
  517. while (skb) {
  518. dev_kfree_skb(skb);
  519. skb = skb_dequeue(&l2->tmp_queue);
  520. }
  521. }
  522. static void
  523. send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
  524. {
  525. u_char tmp[MAX_L2HEADER_LEN];
  526. int i;
  527. i = sethdraddr(l2, tmp, cr);
  528. tmp[i++] = cmd;
  529. if (skb)
  530. skb_trim(skb, 0);
  531. else {
  532. skb = mI_alloc_skb(i, GFP_ATOMIC);
  533. if (!skb) {
  534. printk(KERN_WARNING "%s: can't alloc skbuff\n",
  535. __func__);
  536. return;
  537. }
  538. }
  539. memcpy(skb_put(skb, i), tmp, i);
  540. enqueue_super(l2, skb);
  541. }
  542. inline u_char
  543. get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
  544. {
  545. return skb->data[l2addrsize(l2)] & 0x10;
  546. }
  547. inline u_char
  548. get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
  549. {
  550. u_char PF;
  551. PF = get_PollFlag(l2, skb);
  552. dev_kfree_skb(skb);
  553. return PF;
  554. }
  555. inline void
  556. start_t200(struct layer2 *l2, int i)
  557. {
  558. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  559. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  560. }
  561. inline void
  562. restart_t200(struct layer2 *l2, int i)
  563. {
  564. mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  565. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  566. }
  567. inline void
  568. stop_t200(struct layer2 *l2, int i)
  569. {
  570. if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
  571. mISDN_FsmDelTimer(&l2->t200, i);
  572. }
  573. inline void
  574. st5_dl_release_l2l3(struct layer2 *l2)
  575. {
  576. int pr;
  577. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  578. pr = DL_RELEASE_CNF;
  579. else
  580. pr = DL_RELEASE_IND;
  581. l2up_create(l2, pr, 0, NULL);
  582. }
  583. inline void
  584. lapb_dl_release_l2l3(struct layer2 *l2, int f)
  585. {
  586. if (test_bit(FLG_LAPB, &l2->flag))
  587. l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
  588. l2up_create(l2, f, 0, NULL);
  589. }
  590. static void
  591. establishlink(struct FsmInst *fi)
  592. {
  593. struct layer2 *l2 = fi->userdata;
  594. u_char cmd;
  595. clear_exception(l2);
  596. l2->rc = 0;
  597. cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
  598. send_uframe(l2, NULL, cmd, CMD);
  599. mISDN_FsmDelTimer(&l2->t203, 1);
  600. restart_t200(l2, 1);
  601. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  602. freewin(l2);
  603. mISDN_FsmChangeState(fi, ST_L2_5);
  604. }
  605. static void
  606. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  607. {
  608. struct sk_buff *skb = arg;
  609. struct layer2 *l2 = fi->userdata;
  610. if (get_PollFlagFree(l2, skb))
  611. l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
  612. else
  613. l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
  614. }
  615. static void
  616. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  617. {
  618. struct sk_buff *skb = arg;
  619. struct layer2 *l2 = fi->userdata;
  620. if (get_PollFlagFree(l2, skb))
  621. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  622. else {
  623. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  624. establishlink(fi);
  625. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  626. }
  627. }
  628. static void
  629. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  630. {
  631. struct sk_buff *skb = arg;
  632. struct layer2 *l2 = fi->userdata;
  633. if (get_PollFlagFree(l2, skb))
  634. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  635. else
  636. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  637. establishlink(fi);
  638. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  639. }
  640. static void
  641. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  642. {
  643. dev_kfree_skb((struct sk_buff *)arg);
  644. mISDN_FsmChangeState(fi, ST_L2_3);
  645. }
  646. static void
  647. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  648. {
  649. struct layer2 *l2 = fi->userdata;
  650. mISDN_FsmChangeState(fi, ST_L2_3);
  651. dev_kfree_skb((struct sk_buff *)arg);
  652. l2_tei(l2, MDL_ASSIGN_IND, 0);
  653. }
  654. static void
  655. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  656. {
  657. struct layer2 *l2 = fi->userdata;
  658. struct sk_buff *skb = arg;
  659. skb_queue_tail(&l2->ui_queue, skb);
  660. mISDN_FsmChangeState(fi, ST_L2_2);
  661. l2_tei(l2, MDL_ASSIGN_IND, 0);
  662. }
  663. static void
  664. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  665. {
  666. struct layer2 *l2 = fi->userdata;
  667. struct sk_buff *skb = arg;
  668. skb_queue_tail(&l2->ui_queue, skb);
  669. }
  670. static void
  671. tx_ui(struct layer2 *l2)
  672. {
  673. struct sk_buff *skb;
  674. u_char header[MAX_L2HEADER_LEN];
  675. int i;
  676. i = sethdraddr(l2, header, CMD);
  677. if (test_bit(FLG_LAPD_NET, &l2->flag))
  678. header[1] = 0xff; /* tei 127 */
  679. header[i++] = UI;
  680. while ((skb = skb_dequeue(&l2->ui_queue))) {
  681. memcpy(skb_push(skb, i), header, i);
  682. enqueue_ui(l2, skb);
  683. }
  684. }
  685. static void
  686. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  687. {
  688. struct layer2 *l2 = fi->userdata;
  689. struct sk_buff *skb = arg;
  690. skb_queue_tail(&l2->ui_queue, skb);
  691. tx_ui(l2);
  692. }
  693. static void
  694. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  695. {
  696. struct layer2 *l2 = fi->userdata;
  697. struct sk_buff *skb = arg;
  698. skb_pull(skb, l2headersize(l2, 1));
  699. /*
  700. * in states 1-3 for broadcast
  701. */
  702. if (l2->tm)
  703. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  704. l2up(l2, DL_UNITDATA_IND, skb);
  705. }
  706. static void
  707. l2_establish(struct FsmInst *fi, int event, void *arg)
  708. {
  709. struct sk_buff *skb = arg;
  710. struct layer2 *l2 = fi->userdata;
  711. establishlink(fi);
  712. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  713. dev_kfree_skb(skb);
  714. }
  715. static void
  716. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  717. {
  718. struct sk_buff *skb = arg;
  719. struct layer2 *l2 = fi->userdata;
  720. skb_queue_purge(&l2->i_queue);
  721. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  722. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  723. dev_kfree_skb(skb);
  724. }
  725. static void
  726. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  727. {
  728. struct sk_buff *skb = arg;
  729. struct layer2 *l2 = fi->userdata;
  730. skb_queue_purge(&l2->i_queue);
  731. establishlink(fi);
  732. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  733. dev_kfree_skb(skb);
  734. }
  735. static void
  736. l2_release(struct FsmInst *fi, int event, void *arg)
  737. {
  738. struct layer2 *l2 = fi->userdata;
  739. struct sk_buff *skb = arg;
  740. skb_trim(skb, 0);
  741. l2up(l2, DL_RELEASE_CNF, skb);
  742. }
  743. static void
  744. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  745. {
  746. struct sk_buff *skb = arg;
  747. struct layer2 *l2 = fi->userdata;
  748. test_and_set_bit(FLG_PEND_REL, &l2->flag);
  749. dev_kfree_skb(skb);
  750. }
  751. static void
  752. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  753. {
  754. struct layer2 *l2 = fi->userdata;
  755. struct sk_buff *skb = arg;
  756. skb_queue_purge(&l2->i_queue);
  757. freewin(l2);
  758. mISDN_FsmChangeState(fi, ST_L2_6);
  759. l2->rc = 0;
  760. send_uframe(l2, NULL, DISC | 0x10, CMD);
  761. mISDN_FsmDelTimer(&l2->t203, 1);
  762. restart_t200(l2, 2);
  763. if (skb)
  764. dev_kfree_skb(skb);
  765. }
  766. static void
  767. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  768. {
  769. struct layer2 *l2 = fi->userdata;
  770. struct sk_buff *skb = arg;
  771. l2->vs = 0;
  772. l2->va = 0;
  773. l2->vr = 0;
  774. l2->sow = 0;
  775. clear_exception(l2);
  776. send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
  777. mISDN_FsmChangeState(fi, ST_L2_7);
  778. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  779. skb_trim(skb, 0);
  780. l2up(l2, DL_ESTABLISH_IND, skb);
  781. if (l2->tm)
  782. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  783. }
  784. static void
  785. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  786. {
  787. struct layer2 *l2 = fi->userdata;
  788. struct sk_buff *skb = arg;
  789. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  790. }
  791. static void
  792. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  793. {
  794. struct layer2 *l2 = fi->userdata;
  795. struct sk_buff *skb = arg;
  796. send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
  797. }
  798. static void
  799. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  800. {
  801. struct layer2 *l2 = fi->userdata;
  802. struct sk_buff *skb = arg;
  803. int est = 0;
  804. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  805. l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
  806. if (l2->vs != l2->va) {
  807. skb_queue_purge(&l2->i_queue);
  808. est = 1;
  809. }
  810. clear_exception(l2);
  811. l2->vs = 0;
  812. l2->va = 0;
  813. l2->vr = 0;
  814. l2->sow = 0;
  815. mISDN_FsmChangeState(fi, ST_L2_7);
  816. stop_t200(l2, 3);
  817. mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  818. if (est)
  819. l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
  820. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  821. * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
  822. * 0, NULL, 0);
  823. */
  824. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  825. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  826. }
  827. static void
  828. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  829. {
  830. struct layer2 *l2 = fi->userdata;
  831. struct sk_buff *skb = arg;
  832. mISDN_FsmChangeState(fi, ST_L2_4);
  833. mISDN_FsmDelTimer(&l2->t203, 3);
  834. stop_t200(l2, 4);
  835. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  836. skb_queue_purge(&l2->i_queue);
  837. freewin(l2);
  838. lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
  839. if (l2->tm)
  840. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  841. }
  842. static void
  843. l2_connected(struct FsmInst *fi, int event, void *arg)
  844. {
  845. struct layer2 *l2 = fi->userdata;
  846. struct sk_buff *skb = arg;
  847. int pr = -1;
  848. if (!get_PollFlag(l2, skb)) {
  849. l2_mdl_error_ua(fi, event, arg);
  850. return;
  851. }
  852. dev_kfree_skb(skb);
  853. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  854. l2_disconnect(fi, event, NULL);
  855. if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
  856. pr = DL_ESTABLISH_CNF;
  857. } else if (l2->vs != l2->va) {
  858. skb_queue_purge(&l2->i_queue);
  859. pr = DL_ESTABLISH_IND;
  860. }
  861. stop_t200(l2, 5);
  862. l2->vr = 0;
  863. l2->vs = 0;
  864. l2->va = 0;
  865. l2->sow = 0;
  866. mISDN_FsmChangeState(fi, ST_L2_7);
  867. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
  868. if (pr != -1)
  869. l2up_create(l2, pr, 0, NULL);
  870. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  871. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  872. if (l2->tm)
  873. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  874. }
  875. static void
  876. l2_released(struct FsmInst *fi, int event, void *arg)
  877. {
  878. struct layer2 *l2 = fi->userdata;
  879. struct sk_buff *skb = arg;
  880. if (!get_PollFlag(l2, skb)) {
  881. l2_mdl_error_ua(fi, event, arg);
  882. return;
  883. }
  884. dev_kfree_skb(skb);
  885. stop_t200(l2, 6);
  886. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  887. mISDN_FsmChangeState(fi, ST_L2_4);
  888. if (l2->tm)
  889. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  890. }
  891. static void
  892. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  893. {
  894. struct layer2 *l2 = fi->userdata;
  895. struct sk_buff *skb = arg;
  896. if (!get_PollFlagFree(l2, skb)) {
  897. establishlink(fi);
  898. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  899. }
  900. }
  901. static void
  902. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  903. {
  904. struct layer2 *l2 = fi->userdata;
  905. struct sk_buff *skb = arg;
  906. if (get_PollFlagFree(l2, skb)) {
  907. stop_t200(l2, 7);
  908. if (!test_bit(FLG_L3_INIT, &l2->flag))
  909. skb_queue_purge(&l2->i_queue);
  910. if (test_bit(FLG_LAPB, &l2->flag))
  911. l2down_create(l2, PH_DEACTIVATE_REQ,
  912. l2_newid(l2), 0, NULL);
  913. st5_dl_release_l2l3(l2);
  914. mISDN_FsmChangeState(fi, ST_L2_4);
  915. if (l2->tm)
  916. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  917. }
  918. }
  919. static void
  920. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  921. {
  922. struct layer2 *l2 = fi->userdata;
  923. struct sk_buff *skb = arg;
  924. if (get_PollFlagFree(l2, skb)) {
  925. stop_t200(l2, 8);
  926. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  927. mISDN_FsmChangeState(fi, ST_L2_4);
  928. if (l2->tm)
  929. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  930. }
  931. }
  932. static void
  933. enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
  934. {
  935. struct sk_buff *skb;
  936. u_char tmp[MAX_L2HEADER_LEN];
  937. int i;
  938. i = sethdraddr(l2, tmp, cr);
  939. if (test_bit(FLG_MOD128, &l2->flag)) {
  940. tmp[i++] = typ;
  941. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  942. } else
  943. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  944. skb = mI_alloc_skb(i, GFP_ATOMIC);
  945. if (!skb) {
  946. printk(KERN_WARNING
  947. "isdnl2 can't alloc sbbuff for enquiry_cr\n");
  948. return;
  949. }
  950. memcpy(skb_put(skb, i), tmp, i);
  951. enqueue_super(l2, skb);
  952. }
  953. inline void
  954. enquiry_response(struct layer2 *l2)
  955. {
  956. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  957. enquiry_cr(l2, RNR, RSP, 1);
  958. else
  959. enquiry_cr(l2, RR, RSP, 1);
  960. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  961. }
  962. inline void
  963. transmit_enquiry(struct layer2 *l2)
  964. {
  965. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  966. enquiry_cr(l2, RNR, CMD, 1);
  967. else
  968. enquiry_cr(l2, RR, CMD, 1);
  969. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  970. start_t200(l2, 9);
  971. }
  972. static void
  973. nrerrorrecovery(struct FsmInst *fi)
  974. {
  975. struct layer2 *l2 = fi->userdata;
  976. l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
  977. establishlink(fi);
  978. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  979. }
  980. static void
  981. invoke_retransmission(struct layer2 *l2, unsigned int nr)
  982. {
  983. u_int p1;
  984. if (l2->vs != nr) {
  985. while (l2->vs != nr) {
  986. (l2->vs)--;
  987. if (test_bit(FLG_MOD128, &l2->flag)) {
  988. l2->vs %= 128;
  989. p1 = (l2->vs - l2->va) % 128;
  990. } else {
  991. l2->vs %= 8;
  992. p1 = (l2->vs - l2->va) % 8;
  993. }
  994. p1 = (p1 + l2->sow) % l2->window;
  995. if (l2->windowar[p1])
  996. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  997. else
  998. printk(KERN_WARNING
  999. "%s: windowar[%d] is NULL\n",
  1000. __func__, p1);
  1001. l2->windowar[p1] = NULL;
  1002. }
  1003. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  1004. }
  1005. }
  1006. static void
  1007. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  1008. {
  1009. struct layer2 *l2 = fi->userdata;
  1010. struct sk_buff *skb = arg;
  1011. int PollFlag, rsp, typ = RR;
  1012. unsigned int nr;
  1013. rsp = *skb->data & 0x2;
  1014. if (test_bit(FLG_ORIG, &l2->flag))
  1015. rsp = !rsp;
  1016. skb_pull(skb, l2addrsize(l2));
  1017. if (IsRNR(skb->data, l2)) {
  1018. set_peer_busy(l2);
  1019. typ = RNR;
  1020. } else
  1021. clear_peer_busy(l2);
  1022. if (IsREJ(skb->data, l2))
  1023. typ = REJ;
  1024. if (test_bit(FLG_MOD128, &l2->flag)) {
  1025. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1026. nr = skb->data[1] >> 1;
  1027. } else {
  1028. PollFlag = (skb->data[0] & 0x10);
  1029. nr = (skb->data[0] >> 5) & 0x7;
  1030. }
  1031. dev_kfree_skb(skb);
  1032. if (PollFlag) {
  1033. if (rsp)
  1034. l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
  1035. else
  1036. enquiry_response(l2);
  1037. }
  1038. if (legalnr(l2, nr)) {
  1039. if (typ == REJ) {
  1040. setva(l2, nr);
  1041. invoke_retransmission(l2, nr);
  1042. stop_t200(l2, 10);
  1043. if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1044. EV_L2_T203, NULL, 6))
  1045. l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
  1046. } else if ((nr == l2->vs) && (typ == RR)) {
  1047. setva(l2, nr);
  1048. stop_t200(l2, 11);
  1049. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1050. EV_L2_T203, NULL, 7);
  1051. } else if ((l2->va != nr) || (typ == RNR)) {
  1052. setva(l2, nr);
  1053. if (typ != RR)
  1054. mISDN_FsmDelTimer(&l2->t203, 9);
  1055. restart_t200(l2, 12);
  1056. }
  1057. if (skb_queue_len(&l2->i_queue) && (typ == RR))
  1058. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1059. } else
  1060. nrerrorrecovery(fi);
  1061. }
  1062. static void
  1063. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  1064. {
  1065. struct layer2 *l2 = fi->userdata;
  1066. struct sk_buff *skb = arg;
  1067. if (!test_bit(FLG_L3_INIT, &l2->flag))
  1068. skb_queue_tail(&l2->i_queue, skb);
  1069. else
  1070. dev_kfree_skb(skb);
  1071. }
  1072. static void
  1073. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  1074. {
  1075. struct layer2 *l2 = fi->userdata;
  1076. struct sk_buff *skb = arg;
  1077. skb_queue_tail(&l2->i_queue, skb);
  1078. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1079. }
  1080. static void
  1081. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  1082. {
  1083. struct layer2 *l2 = fi->userdata;
  1084. struct sk_buff *skb = arg;
  1085. skb_queue_tail(&l2->i_queue, skb);
  1086. }
  1087. static void
  1088. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  1089. {
  1090. struct layer2 *l2 = fi->userdata;
  1091. struct sk_buff *skb = arg;
  1092. int PollFlag, i;
  1093. u_int ns, nr;
  1094. i = l2addrsize(l2);
  1095. if (test_bit(FLG_MOD128, &l2->flag)) {
  1096. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  1097. ns = skb->data[i] >> 1;
  1098. nr = (skb->data[i + 1] >> 1) & 0x7f;
  1099. } else {
  1100. PollFlag = (skb->data[i] & 0x10);
  1101. ns = (skb->data[i] >> 1) & 0x7;
  1102. nr = (skb->data[i] >> 5) & 0x7;
  1103. }
  1104. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  1105. dev_kfree_skb(skb);
  1106. if (PollFlag)
  1107. enquiry_response(l2);
  1108. } else {
  1109. if (l2->vr == ns) {
  1110. l2->vr++;
  1111. if (test_bit(FLG_MOD128, &l2->flag))
  1112. l2->vr %= 128;
  1113. else
  1114. l2->vr %= 8;
  1115. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  1116. if (PollFlag)
  1117. enquiry_response(l2);
  1118. else
  1119. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  1120. skb_pull(skb, l2headersize(l2, 0));
  1121. l2up(l2, DL_DATA_IND, skb);
  1122. } else {
  1123. /* n(s)!=v(r) */
  1124. dev_kfree_skb(skb);
  1125. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  1126. if (PollFlag)
  1127. enquiry_response(l2);
  1128. } else {
  1129. enquiry_cr(l2, REJ, RSP, PollFlag);
  1130. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1131. }
  1132. }
  1133. }
  1134. if (legalnr(l2, nr)) {
  1135. if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
  1136. (fi->state == ST_L2_7)) {
  1137. if (nr == l2->vs) {
  1138. stop_t200(l2, 13);
  1139. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1140. EV_L2_T203, NULL, 7);
  1141. } else if (nr != l2->va)
  1142. restart_t200(l2, 14);
  1143. }
  1144. setva(l2, nr);
  1145. } else {
  1146. nrerrorrecovery(fi);
  1147. return;
  1148. }
  1149. if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
  1150. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1151. if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
  1152. enquiry_cr(l2, RR, RSP, 0);
  1153. }
  1154. static void
  1155. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  1156. {
  1157. struct layer2 *l2 = fi->userdata;
  1158. u_int info;
  1159. l2->tei = (signed char)(long)arg;
  1160. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1161. info = DL_INFO_L2_CONNECT;
  1162. l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
  1163. if (fi->state == ST_L2_3) {
  1164. establishlink(fi);
  1165. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  1166. } else
  1167. mISDN_FsmChangeState(fi, ST_L2_4);
  1168. if (skb_queue_len(&l2->ui_queue))
  1169. tx_ui(l2);
  1170. }
  1171. static void
  1172. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  1173. {
  1174. struct layer2 *l2 = fi->userdata;
  1175. if (test_bit(FLG_LAPD, &l2->flag) &&
  1176. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1177. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1178. } else if (l2->rc == l2->N200) {
  1179. mISDN_FsmChangeState(fi, ST_L2_4);
  1180. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1181. skb_queue_purge(&l2->i_queue);
  1182. l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
  1183. if (test_bit(FLG_LAPB, &l2->flag))
  1184. l2down_create(l2, PH_DEACTIVATE_REQ,
  1185. l2_newid(l2), 0, NULL);
  1186. st5_dl_release_l2l3(l2);
  1187. if (l2->tm)
  1188. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1189. } else {
  1190. l2->rc++;
  1191. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1192. send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
  1193. SABME : SABM) | 0x10, CMD);
  1194. }
  1195. }
  1196. static void
  1197. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  1198. {
  1199. struct layer2 *l2 = fi->userdata;
  1200. if (test_bit(FLG_LAPD, &l2->flag) &&
  1201. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1202. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1203. } else if (l2->rc == l2->N200) {
  1204. mISDN_FsmChangeState(fi, ST_L2_4);
  1205. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1206. l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
  1207. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  1208. if (l2->tm)
  1209. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1210. } else {
  1211. l2->rc++;
  1212. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
  1213. NULL, 9);
  1214. send_uframe(l2, NULL, DISC | 0x10, CMD);
  1215. }
  1216. }
  1217. static void
  1218. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1219. {
  1220. struct layer2 *l2 = fi->userdata;
  1221. if (test_bit(FLG_LAPD, &l2->flag) &&
  1222. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1223. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1224. return;
  1225. }
  1226. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1227. l2->rc = 0;
  1228. mISDN_FsmChangeState(fi, ST_L2_8);
  1229. transmit_enquiry(l2);
  1230. l2->rc++;
  1231. }
  1232. static void
  1233. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1234. {
  1235. struct layer2 *l2 = fi->userdata;
  1236. if (test_bit(FLG_LAPD, &l2->flag) &&
  1237. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1238. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1239. return;
  1240. }
  1241. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1242. if (l2->rc == l2->N200) {
  1243. l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
  1244. establishlink(fi);
  1245. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1246. } else {
  1247. transmit_enquiry(l2);
  1248. l2->rc++;
  1249. }
  1250. }
  1251. static void
  1252. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1253. {
  1254. struct layer2 *l2 = fi->userdata;
  1255. if (test_bit(FLG_LAPD, &l2->flag) &&
  1256. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1257. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
  1258. return;
  1259. }
  1260. mISDN_FsmChangeState(fi, ST_L2_8);
  1261. transmit_enquiry(l2);
  1262. l2->rc = 0;
  1263. }
  1264. static void
  1265. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1266. {
  1267. struct layer2 *l2 = fi->userdata;
  1268. struct sk_buff *skb, *nskb, *oskb;
  1269. u_char header[MAX_L2HEADER_LEN];
  1270. u_int i, p1;
  1271. if (!cansend(l2))
  1272. return;
  1273. skb = skb_dequeue(&l2->i_queue);
  1274. if (!skb)
  1275. return;
  1276. if (test_bit(FLG_MOD128, &l2->flag))
  1277. p1 = (l2->vs - l2->va) % 128;
  1278. else
  1279. p1 = (l2->vs - l2->va) % 8;
  1280. p1 = (p1 + l2->sow) % l2->window;
  1281. if (l2->windowar[p1]) {
  1282. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1283. p1);
  1284. dev_kfree_skb(l2->windowar[p1]);
  1285. }
  1286. l2->windowar[p1] = skb;
  1287. i = sethdraddr(l2, header, CMD);
  1288. if (test_bit(FLG_MOD128, &l2->flag)) {
  1289. header[i++] = l2->vs << 1;
  1290. header[i++] = l2->vr << 1;
  1291. l2->vs = (l2->vs + 1) % 128;
  1292. } else {
  1293. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1294. l2->vs = (l2->vs + 1) % 8;
  1295. }
  1296. nskb = skb_clone(skb, GFP_ATOMIC);
  1297. p1 = skb_headroom(nskb);
  1298. if (p1 >= i)
  1299. memcpy(skb_push(nskb, i), header, i);
  1300. else {
  1301. printk(KERN_WARNING
  1302. "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1303. oskb = nskb;
  1304. nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
  1305. if (!nskb) {
  1306. dev_kfree_skb(oskb);
  1307. printk(KERN_WARNING "%s: no skb mem\n", __func__);
  1308. return;
  1309. }
  1310. memcpy(skb_put(nskb, i), header, i);
  1311. memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
  1312. dev_kfree_skb(oskb);
  1313. }
  1314. l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
  1315. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1316. if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
  1317. mISDN_FsmDelTimer(&l2->t203, 13);
  1318. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
  1319. }
  1320. }
  1321. static void
  1322. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1323. {
  1324. struct layer2 *l2 = fi->userdata;
  1325. struct sk_buff *skb = arg;
  1326. int PollFlag, rsp, rnr = 0;
  1327. unsigned int nr;
  1328. rsp = *skb->data & 0x2;
  1329. if (test_bit(FLG_ORIG, &l2->flag))
  1330. rsp = !rsp;
  1331. skb_pull(skb, l2addrsize(l2));
  1332. if (IsRNR(skb->data, l2)) {
  1333. set_peer_busy(l2);
  1334. rnr = 1;
  1335. } else
  1336. clear_peer_busy(l2);
  1337. if (test_bit(FLG_MOD128, &l2->flag)) {
  1338. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1339. nr = skb->data[1] >> 1;
  1340. } else {
  1341. PollFlag = (skb->data[0] & 0x10);
  1342. nr = (skb->data[0] >> 5) & 0x7;
  1343. }
  1344. dev_kfree_skb(skb);
  1345. if (rsp && PollFlag) {
  1346. if (legalnr(l2, nr)) {
  1347. if (rnr) {
  1348. restart_t200(l2, 15);
  1349. } else {
  1350. stop_t200(l2, 16);
  1351. mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1352. EV_L2_T203, NULL, 5);
  1353. setva(l2, nr);
  1354. }
  1355. invoke_retransmission(l2, nr);
  1356. mISDN_FsmChangeState(fi, ST_L2_7);
  1357. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  1358. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1359. } else
  1360. nrerrorrecovery(fi);
  1361. } else {
  1362. if (!rsp && PollFlag)
  1363. enquiry_response(l2);
  1364. if (legalnr(l2, nr))
  1365. setva(l2, nr);
  1366. else
  1367. nrerrorrecovery(fi);
  1368. }
  1369. }
  1370. static void
  1371. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1372. {
  1373. struct layer2 *l2 = fi->userdata;
  1374. struct sk_buff *skb = arg;
  1375. skb_pull(skb, l2addrsize(l2) + 1);
  1376. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1377. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1378. l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
  1379. establishlink(fi);
  1380. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1381. }
  1382. dev_kfree_skb(skb);
  1383. }
  1384. static void
  1385. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1386. {
  1387. struct layer2 *l2 = fi->userdata;
  1388. skb_queue_purge(&l2->ui_queue);
  1389. l2->tei = GROUP_TEI;
  1390. mISDN_FsmChangeState(fi, ST_L2_1);
  1391. }
  1392. static void
  1393. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1394. {
  1395. struct layer2 *l2 = fi->userdata;
  1396. skb_queue_purge(&l2->ui_queue);
  1397. l2->tei = GROUP_TEI;
  1398. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1399. mISDN_FsmChangeState(fi, ST_L2_1);
  1400. }
  1401. static void
  1402. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1403. {
  1404. struct layer2 *l2 = fi->userdata;
  1405. skb_queue_purge(&l2->i_queue);
  1406. skb_queue_purge(&l2->ui_queue);
  1407. freewin(l2);
  1408. l2->tei = GROUP_TEI;
  1409. stop_t200(l2, 17);
  1410. st5_dl_release_l2l3(l2);
  1411. mISDN_FsmChangeState(fi, ST_L2_1);
  1412. }
  1413. static void
  1414. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1415. {
  1416. struct layer2 *l2 = fi->userdata;
  1417. skb_queue_purge(&l2->ui_queue);
  1418. l2->tei = GROUP_TEI;
  1419. stop_t200(l2, 18);
  1420. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1421. mISDN_FsmChangeState(fi, ST_L2_1);
  1422. }
  1423. static void
  1424. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1425. {
  1426. struct layer2 *l2 = fi->userdata;
  1427. skb_queue_purge(&l2->i_queue);
  1428. skb_queue_purge(&l2->ui_queue);
  1429. freewin(l2);
  1430. l2->tei = GROUP_TEI;
  1431. stop_t200(l2, 17);
  1432. mISDN_FsmDelTimer(&l2->t203, 19);
  1433. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1434. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  1435. * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
  1436. * 0, NULL, 0);
  1437. */
  1438. mISDN_FsmChangeState(fi, ST_L2_1);
  1439. }
  1440. static void
  1441. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1442. {
  1443. struct layer2 *l2 = fi->userdata;
  1444. struct sk_buff *skb = arg;
  1445. skb_queue_purge(&l2->i_queue);
  1446. skb_queue_purge(&l2->ui_queue);
  1447. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1448. l2up(l2, DL_RELEASE_IND, skb);
  1449. else
  1450. dev_kfree_skb(skb);
  1451. }
  1452. static void
  1453. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1454. {
  1455. struct layer2 *l2 = fi->userdata;
  1456. struct sk_buff *skb = arg;
  1457. skb_queue_purge(&l2->i_queue);
  1458. skb_queue_purge(&l2->ui_queue);
  1459. freewin(l2);
  1460. stop_t200(l2, 19);
  1461. st5_dl_release_l2l3(l2);
  1462. mISDN_FsmChangeState(fi, ST_L2_4);
  1463. if (l2->tm)
  1464. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1465. dev_kfree_skb(skb);
  1466. }
  1467. static void
  1468. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1469. {
  1470. struct layer2 *l2 = fi->userdata;
  1471. struct sk_buff *skb = arg;
  1472. skb_queue_purge(&l2->ui_queue);
  1473. stop_t200(l2, 20);
  1474. l2up(l2, DL_RELEASE_CNF, skb);
  1475. mISDN_FsmChangeState(fi, ST_L2_4);
  1476. if (l2->tm)
  1477. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1478. }
  1479. static void
  1480. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1481. {
  1482. struct layer2 *l2 = fi->userdata;
  1483. struct sk_buff *skb = arg;
  1484. skb_queue_purge(&l2->i_queue);
  1485. skb_queue_purge(&l2->ui_queue);
  1486. freewin(l2);
  1487. stop_t200(l2, 19);
  1488. mISDN_FsmDelTimer(&l2->t203, 19);
  1489. l2up(l2, DL_RELEASE_IND, skb);
  1490. mISDN_FsmChangeState(fi, ST_L2_4);
  1491. if (l2->tm)
  1492. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1493. }
  1494. static void
  1495. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1496. {
  1497. struct layer2 *l2 = fi->userdata;
  1498. struct sk_buff *skb = arg;
  1499. if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
  1500. enquiry_cr(l2, RNR, RSP, 0);
  1501. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1502. }
  1503. if (skb)
  1504. dev_kfree_skb(skb);
  1505. }
  1506. static void
  1507. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1508. {
  1509. struct layer2 *l2 = fi->userdata;
  1510. struct sk_buff *skb = arg;
  1511. if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
  1512. enquiry_cr(l2, RR, RSP, 0);
  1513. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1514. }
  1515. if (skb)
  1516. dev_kfree_skb(skb);
  1517. }
  1518. static void
  1519. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1520. {
  1521. struct layer2 *l2 = fi->userdata;
  1522. l2mgr(l2, MDL_ERROR_IND, arg);
  1523. }
  1524. static void
  1525. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1526. {
  1527. struct layer2 *l2 = fi->userdata;
  1528. l2mgr(l2, MDL_ERROR_IND, arg);
  1529. establishlink(fi);
  1530. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1531. }
  1532. static struct FsmNode L2FnList[] =
  1533. {
  1534. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1535. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1536. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1537. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1538. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1539. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1540. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1541. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1542. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1543. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1544. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1545. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1546. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1547. {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
  1548. {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
  1549. {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
  1550. {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
  1551. {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
  1552. {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
  1553. {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
  1554. {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
  1555. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1556. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1557. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1558. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1559. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1560. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1561. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1562. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1563. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1564. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1565. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1566. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1567. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1568. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1569. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1570. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1571. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1572. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1573. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1574. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1575. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1576. {ST_L2_5, EV_L2_UA, l2_connected},
  1577. {ST_L2_6, EV_L2_UA, l2_released},
  1578. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1579. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1580. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1581. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1582. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1583. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1584. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1585. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1586. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1587. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1588. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1589. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1590. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1591. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1592. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1593. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1594. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1595. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1596. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1597. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1598. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1599. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1600. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1601. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1602. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1603. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1604. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1605. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1606. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1607. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1608. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1609. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1610. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1611. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1612. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1613. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1614. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1615. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1616. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1617. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1618. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1619. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1620. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1621. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1622. };
  1623. static int
  1624. ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
  1625. {
  1626. u_char *datap = skb->data;
  1627. int ret = -EINVAL;
  1628. int psapi, ptei;
  1629. u_int l;
  1630. int c = 0;
  1631. l = l2addrsize(l2);
  1632. if (skb->len <= l) {
  1633. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1634. return ret;
  1635. }
  1636. if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
  1637. psapi = *datap++;
  1638. ptei = *datap++;
  1639. if ((psapi & 1) || !(ptei & 1)) {
  1640. printk(KERN_WARNING
  1641. "l2 D-channel frame wrong EA0/EA1\n");
  1642. return ret;
  1643. }
  1644. psapi >>= 2;
  1645. ptei >>= 1;
  1646. if (psapi != l2->sapi) {
  1647. /* not our business */
  1648. if (*debug & DEBUG_L2)
  1649. printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
  1650. __func__, psapi, l2->sapi);
  1651. dev_kfree_skb(skb);
  1652. return 0;
  1653. }
  1654. if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
  1655. /* not our business */
  1656. if (*debug & DEBUG_L2)
  1657. printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
  1658. __func__, ptei, l2->tei);
  1659. dev_kfree_skb(skb);
  1660. return 0;
  1661. }
  1662. } else
  1663. datap += l;
  1664. if (!(*datap & 1)) { /* I-Frame */
  1665. c = iframe_error(l2, skb);
  1666. if (!c)
  1667. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
  1668. } else if (IsSFrame(datap, l2)) { /* S-Frame */
  1669. c = super_error(l2, skb);
  1670. if (!c)
  1671. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
  1672. } else if (IsUI(datap)) {
  1673. c = UI_error(l2, skb);
  1674. if (!c)
  1675. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
  1676. } else if (IsSABME(datap, l2)) {
  1677. c = unnum_error(l2, skb, CMD);
  1678. if (!c)
  1679. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
  1680. } else if (IsUA(datap)) {
  1681. c = unnum_error(l2, skb, RSP);
  1682. if (!c)
  1683. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
  1684. } else if (IsDISC(datap)) {
  1685. c = unnum_error(l2, skb, CMD);
  1686. if (!c)
  1687. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
  1688. } else if (IsDM(datap)) {
  1689. c = unnum_error(l2, skb, RSP);
  1690. if (!c)
  1691. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
  1692. } else if (IsFRMR(datap)) {
  1693. c = FRMR_error(l2, skb);
  1694. if (!c)
  1695. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
  1696. } else
  1697. c = 'L';
  1698. if (c) {
  1699. printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
  1700. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1701. }
  1702. return ret;
  1703. }
  1704. static int
  1705. l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
  1706. {
  1707. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1708. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  1709. int ret = -EINVAL;
  1710. if (*debug & DEBUG_L2_RECV)
  1711. printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
  1712. __func__, hh->prim, hh->id, l2->sapi, l2->tei);
  1713. switch (hh->prim) {
  1714. case PH_DATA_IND:
  1715. ret = ph_data_indication(l2, hh, skb);
  1716. break;
  1717. case PH_DATA_CNF:
  1718. ret = ph_data_confirm(l2, hh, skb);
  1719. break;
  1720. case PH_ACTIVATE_IND:
  1721. test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
  1722. l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
  1723. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1724. ret = mISDN_FsmEvent(&l2->l2m,
  1725. EV_L2_DL_ESTABLISH_REQ, skb);
  1726. break;
  1727. case PH_DEACTIVATE_IND:
  1728. test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
  1729. l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
  1730. ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
  1731. break;
  1732. case MPH_INFORMATION_IND:
  1733. if (!l2->up)
  1734. break;
  1735. ret = l2->up->send(l2->up, skb);
  1736. break;
  1737. case DL_DATA_REQ:
  1738. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
  1739. break;
  1740. case DL_UNITDATA_REQ:
  1741. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
  1742. break;
  1743. case DL_ESTABLISH_REQ:
  1744. if (test_bit(FLG_LAPB, &l2->flag))
  1745. test_and_set_bit(FLG_ORIG, &l2->flag);
  1746. if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
  1747. if (test_bit(FLG_LAPD, &l2->flag) ||
  1748. test_bit(FLG_ORIG, &l2->flag))
  1749. ret = mISDN_FsmEvent(&l2->l2m,
  1750. EV_L2_DL_ESTABLISH_REQ, skb);
  1751. } else {
  1752. if (test_bit(FLG_LAPD, &l2->flag) ||
  1753. test_bit(FLG_ORIG, &l2->flag)) {
  1754. test_and_set_bit(FLG_ESTAB_PEND,
  1755. &l2->flag);
  1756. }
  1757. ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
  1758. skb);
  1759. }
  1760. break;
  1761. case DL_RELEASE_REQ:
  1762. if (test_bit(FLG_LAPB, &l2->flag))
  1763. l2down_create(l2, PH_DEACTIVATE_REQ,
  1764. l2_newid(l2), 0, NULL);
  1765. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
  1766. skb);
  1767. break;
  1768. default:
  1769. if (*debug & DEBUG_L2)
  1770. l2m_debug(&l2->l2m, "l2 unknown pr %04x",
  1771. hh->prim);
  1772. }
  1773. if (ret) {
  1774. dev_kfree_skb(skb);
  1775. ret = 0;
  1776. }
  1777. return ret;
  1778. }
  1779. int
  1780. tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
  1781. {
  1782. int ret = -EINVAL;
  1783. if (*debug & DEBUG_L2_TEI)
  1784. printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
  1785. switch (cmd) {
  1786. case (MDL_ASSIGN_REQ):
  1787. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
  1788. break;
  1789. case (MDL_REMOVE_REQ):
  1790. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
  1791. break;
  1792. case (MDL_ERROR_IND):
  1793. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1794. break;
  1795. case (MDL_ERROR_RSP):
  1796. /* ETS 300-125 5.3.2.1 Test: TC13010 */
  1797. printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
  1798. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1799. break;
  1800. }
  1801. return ret;
  1802. }
  1803. static void
  1804. release_l2(struct layer2 *l2)
  1805. {
  1806. mISDN_FsmDelTimer(&l2->t200, 21);
  1807. mISDN_FsmDelTimer(&l2->t203, 16);
  1808. skb_queue_purge(&l2->i_queue);
  1809. skb_queue_purge(&l2->ui_queue);
  1810. skb_queue_purge(&l2->down_queue);
  1811. ReleaseWin(l2);
  1812. if (test_bit(FLG_LAPD, &l2->flag)) {
  1813. TEIrelease(l2);
  1814. if (l2->ch.st)
  1815. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
  1816. CLOSE_CHANNEL, NULL);
  1817. }
  1818. kfree(l2);
  1819. }
  1820. static int
  1821. l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  1822. {
  1823. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1824. u_int info;
  1825. if (*debug & DEBUG_L2_CTRL)
  1826. printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
  1827. switch (cmd) {
  1828. case OPEN_CHANNEL:
  1829. if (test_bit(FLG_LAPD, &l2->flag)) {
  1830. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1831. info = DL_INFO_L2_CONNECT;
  1832. l2up_create(l2, DL_INFORMATION_IND,
  1833. sizeof(info), &info);
  1834. }
  1835. break;
  1836. case CLOSE_CHANNEL:
  1837. if (l2->ch.peer)
  1838. l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
  1839. release_l2(l2);
  1840. break;
  1841. }
  1842. return 0;
  1843. }
  1844. struct layer2 *
  1845. create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
  1846. int sapi)
  1847. {
  1848. struct layer2 *l2;
  1849. struct channel_req rq;
  1850. l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
  1851. if (!l2) {
  1852. printk(KERN_ERR "kzalloc layer2 failed\n");
  1853. return NULL;
  1854. }
  1855. l2->next_id = 1;
  1856. l2->down_id = MISDN_ID_NONE;
  1857. l2->up = ch;
  1858. l2->ch.st = ch->st;
  1859. l2->ch.send = l2_send;
  1860. l2->ch.ctrl = l2_ctrl;
  1861. switch (protocol) {
  1862. case ISDN_P_LAPD_NT:
  1863. test_and_set_bit(FLG_LAPD, &l2->flag);
  1864. test_and_set_bit(FLG_LAPD_NET, &l2->flag);
  1865. test_and_set_bit(FLG_MOD128, &l2->flag);
  1866. l2->sapi = sapi;
  1867. l2->maxlen = MAX_DFRAME_LEN;
  1868. if (test_bit(OPTION_L2_PMX, &options))
  1869. l2->window = 7;
  1870. else
  1871. l2->window = 1;
  1872. if (test_bit(OPTION_L2_PTP, &options))
  1873. test_and_set_bit(FLG_PTP, &l2->flag);
  1874. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1875. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1876. l2->tei = tei;
  1877. l2->T200 = 1000;
  1878. l2->N200 = 3;
  1879. l2->T203 = 10000;
  1880. if (test_bit(OPTION_L2_PMX, &options))
  1881. rq.protocol = ISDN_P_NT_E1;
  1882. else
  1883. rq.protocol = ISDN_P_NT_S0;
  1884. rq.adr.channel = 0;
  1885. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1886. break;
  1887. case ISDN_P_LAPD_TE:
  1888. test_and_set_bit(FLG_LAPD, &l2->flag);
  1889. test_and_set_bit(FLG_MOD128, &l2->flag);
  1890. test_and_set_bit(FLG_ORIG, &l2->flag);
  1891. l2->sapi = sapi;
  1892. l2->maxlen = MAX_DFRAME_LEN;
  1893. if (test_bit(OPTION_L2_PMX, &options))
  1894. l2->window = 7;
  1895. else
  1896. l2->window = 1;
  1897. if (test_bit(OPTION_L2_PTP, &options))
  1898. test_and_set_bit(FLG_PTP, &l2->flag);
  1899. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1900. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1901. l2->tei = tei;
  1902. l2->T200 = 1000;
  1903. l2->N200 = 3;
  1904. l2->T203 = 10000;
  1905. if (test_bit(OPTION_L2_PMX, &options))
  1906. rq.protocol = ISDN_P_TE_E1;
  1907. else
  1908. rq.protocol = ISDN_P_TE_S0;
  1909. rq.adr.channel = 0;
  1910. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1911. break;
  1912. case ISDN_P_B_X75SLP:
  1913. test_and_set_bit(FLG_LAPB, &l2->flag);
  1914. l2->window = 7;
  1915. l2->maxlen = MAX_DATA_SIZE;
  1916. l2->T200 = 1000;
  1917. l2->N200 = 4;
  1918. l2->T203 = 5000;
  1919. l2->addr.A = 3;
  1920. l2->addr.B = 1;
  1921. break;
  1922. default:
  1923. printk(KERN_ERR "layer2 create failed prt %x\n",
  1924. protocol);
  1925. kfree(l2);
  1926. return NULL;
  1927. }
  1928. skb_queue_head_init(&l2->i_queue);
  1929. skb_queue_head_init(&l2->ui_queue);
  1930. skb_queue_head_init(&l2->down_queue);
  1931. skb_queue_head_init(&l2->tmp_queue);
  1932. InitWin(l2);
  1933. l2->l2m.fsm = &l2fsm;
  1934. if (test_bit(FLG_LAPB, &l2->flag) ||
  1935. test_bit(FLG_PTP, &l2->flag) ||
  1936. test_bit(FLG_LAPD_NET, &l2->flag))
  1937. l2->l2m.state = ST_L2_4;
  1938. else
  1939. l2->l2m.state = ST_L2_1;
  1940. l2->l2m.debug = *debug;
  1941. l2->l2m.userdata = l2;
  1942. l2->l2m.userint = 0;
  1943. l2->l2m.printdebug = l2m_debug;
  1944. mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
  1945. mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
  1946. return l2;
  1947. }
  1948. static int
  1949. x75create(struct channel_req *crq)
  1950. {
  1951. struct layer2 *l2;
  1952. if (crq->protocol != ISDN_P_B_X75SLP)
  1953. return -EPROTONOSUPPORT;
  1954. l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
  1955. if (!l2)
  1956. return -ENOMEM;
  1957. crq->ch = &l2->ch;
  1958. crq->protocol = ISDN_P_B_HDLC;
  1959. return 0;
  1960. }
  1961. static struct Bprotocol X75SLP = {
  1962. .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
  1963. .name = "X75SLP",
  1964. .create = x75create
  1965. };
  1966. int
  1967. Isdnl2_Init(u_int *deb)
  1968. {
  1969. debug = deb;
  1970. mISDN_register_Bprotocol(&X75SLP);
  1971. l2fsm.state_count = L2_STATE_COUNT;
  1972. l2fsm.event_count = L2_EVENT_COUNT;
  1973. l2fsm.strEvent = strL2Event;
  1974. l2fsm.strState = strL2State;
  1975. mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1976. TEIInit(deb);
  1977. return 0;
  1978. }
  1979. void
  1980. Isdnl2_cleanup(void)
  1981. {
  1982. mISDN_unregister_Bprotocol(&X75SLP);
  1983. TEIFree();
  1984. mISDN_FsmFree(&l2fsm);
  1985. }