stack.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/slab.h>
  18. #include <linux/mISDNif.h>
  19. #include <linux/kthread.h>
  20. #include <linux/sched.h>
  21. #include <linux/sched/cputime.h>
  22. #include <linux/signal.h>
  23. #include "core.h"
  24. static u_int *debug;
  25. static inline void
  26. _queue_message(struct mISDNstack *st, struct sk_buff *skb)
  27. {
  28. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  29. if (*debug & DEBUG_QUEUE_FUNC)
  30. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  31. __func__, hh->prim, hh->id, skb);
  32. skb_queue_tail(&st->msgq, skb);
  33. if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
  34. test_and_set_bit(mISDN_STACK_WORK, &st->status);
  35. wake_up_interruptible(&st->workq);
  36. }
  37. }
  38. static int
  39. mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
  40. {
  41. _queue_message(ch->st, skb);
  42. return 0;
  43. }
  44. static struct mISDNchannel *
  45. get_channel4id(struct mISDNstack *st, u_int id)
  46. {
  47. struct mISDNchannel *ch;
  48. mutex_lock(&st->lmutex);
  49. list_for_each_entry(ch, &st->layer2, list) {
  50. if (id == ch->nr)
  51. goto unlock;
  52. }
  53. ch = NULL;
  54. unlock:
  55. mutex_unlock(&st->lmutex);
  56. return ch;
  57. }
  58. static void
  59. send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
  60. {
  61. struct sock *sk;
  62. struct sk_buff *cskb = NULL;
  63. read_lock(&sl->lock);
  64. sk_for_each(sk, &sl->head) {
  65. if (sk->sk_state != MISDN_BOUND)
  66. continue;
  67. if (!cskb)
  68. cskb = skb_copy(skb, GFP_ATOMIC);
  69. if (!cskb) {
  70. printk(KERN_WARNING "%s no skb\n", __func__);
  71. break;
  72. }
  73. if (!sock_queue_rcv_skb(sk, cskb))
  74. cskb = NULL;
  75. }
  76. read_unlock(&sl->lock);
  77. if (cskb)
  78. dev_kfree_skb(cskb);
  79. }
  80. static void
  81. send_layer2(struct mISDNstack *st, struct sk_buff *skb)
  82. {
  83. struct sk_buff *cskb;
  84. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  85. struct mISDNchannel *ch;
  86. int ret;
  87. if (!st)
  88. return;
  89. mutex_lock(&st->lmutex);
  90. if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
  91. list_for_each_entry(ch, &st->layer2, list) {
  92. if (list_is_last(&ch->list, &st->layer2)) {
  93. cskb = skb;
  94. skb = NULL;
  95. } else {
  96. cskb = skb_copy(skb, GFP_KERNEL);
  97. }
  98. if (cskb) {
  99. ret = ch->send(ch, cskb);
  100. if (ret) {
  101. if (*debug & DEBUG_SEND_ERR)
  102. printk(KERN_DEBUG
  103. "%s ch%d prim(%x) addr(%x)"
  104. " err %d\n",
  105. __func__, ch->nr,
  106. hh->prim, ch->addr, ret);
  107. dev_kfree_skb(cskb);
  108. }
  109. } else {
  110. printk(KERN_WARNING "%s ch%d addr %x no mem\n",
  111. __func__, ch->nr, ch->addr);
  112. goto out;
  113. }
  114. }
  115. } else {
  116. list_for_each_entry(ch, &st->layer2, list) {
  117. if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
  118. ret = ch->send(ch, skb);
  119. if (!ret)
  120. skb = NULL;
  121. goto out;
  122. }
  123. }
  124. ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
  125. if (!ret)
  126. skb = NULL;
  127. else if (*debug & DEBUG_SEND_ERR)
  128. printk(KERN_DEBUG
  129. "%s mgr prim(%x) err %d\n",
  130. __func__, hh->prim, ret);
  131. }
  132. out:
  133. mutex_unlock(&st->lmutex);
  134. if (skb)
  135. dev_kfree_skb(skb);
  136. }
  137. static inline int
  138. send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
  139. {
  140. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  141. struct mISDNchannel *ch;
  142. int lm;
  143. lm = hh->prim & MISDN_LAYERMASK;
  144. if (*debug & DEBUG_QUEUE_FUNC)
  145. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  146. __func__, hh->prim, hh->id, skb);
  147. if (lm == 0x1) {
  148. if (!hlist_empty(&st->l1sock.head)) {
  149. __net_timestamp(skb);
  150. send_socklist(&st->l1sock, skb);
  151. }
  152. return st->layer1->send(st->layer1, skb);
  153. } else if (lm == 0x2) {
  154. if (!hlist_empty(&st->l1sock.head))
  155. send_socklist(&st->l1sock, skb);
  156. send_layer2(st, skb);
  157. return 0;
  158. } else if (lm == 0x4) {
  159. ch = get_channel4id(st, hh->id);
  160. if (ch)
  161. return ch->send(ch, skb);
  162. else
  163. printk(KERN_WARNING
  164. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  165. __func__, dev_name(&st->dev->dev), hh->prim,
  166. hh->id);
  167. } else if (lm == 0x8) {
  168. WARN_ON(lm == 0x8);
  169. ch = get_channel4id(st, hh->id);
  170. if (ch)
  171. return ch->send(ch, skb);
  172. else
  173. printk(KERN_WARNING
  174. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  175. __func__, dev_name(&st->dev->dev), hh->prim,
  176. hh->id);
  177. } else {
  178. /* broadcast not handled yet */
  179. printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
  180. __func__, dev_name(&st->dev->dev), hh->prim);
  181. }
  182. return -ESRCH;
  183. }
  184. static void
  185. do_clear_stack(struct mISDNstack *st)
  186. {
  187. }
  188. static int
  189. mISDNStackd(void *data)
  190. {
  191. struct mISDNstack *st = data;
  192. #ifdef MISDN_MSG_STATS
  193. u64 utime, stime;
  194. #endif
  195. int err = 0;
  196. sigfillset(&current->blocked);
  197. if (*debug & DEBUG_MSG_THREAD)
  198. printk(KERN_DEBUG "mISDNStackd %s started\n",
  199. dev_name(&st->dev->dev));
  200. if (st->notify != NULL) {
  201. complete(st->notify);
  202. st->notify = NULL;
  203. }
  204. for (;;) {
  205. struct sk_buff *skb;
  206. if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
  207. test_and_clear_bit(mISDN_STACK_WORK, &st->status);
  208. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  209. } else
  210. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  211. while (test_bit(mISDN_STACK_WORK, &st->status)) {
  212. skb = skb_dequeue(&st->msgq);
  213. if (!skb) {
  214. test_and_clear_bit(mISDN_STACK_WORK,
  215. &st->status);
  216. /* test if a race happens */
  217. skb = skb_dequeue(&st->msgq);
  218. if (!skb)
  219. continue;
  220. test_and_set_bit(mISDN_STACK_WORK,
  221. &st->status);
  222. }
  223. #ifdef MISDN_MSG_STATS
  224. st->msg_cnt++;
  225. #endif
  226. err = send_msg_to_layer(st, skb);
  227. if (unlikely(err)) {
  228. if (*debug & DEBUG_SEND_ERR)
  229. printk(KERN_DEBUG
  230. "%s: %s prim(%x) id(%x) "
  231. "send call(%d)\n",
  232. __func__, dev_name(&st->dev->dev),
  233. mISDN_HEAD_PRIM(skb),
  234. mISDN_HEAD_ID(skb), err);
  235. dev_kfree_skb(skb);
  236. continue;
  237. }
  238. if (unlikely(test_bit(mISDN_STACK_STOPPED,
  239. &st->status))) {
  240. test_and_clear_bit(mISDN_STACK_WORK,
  241. &st->status);
  242. test_and_clear_bit(mISDN_STACK_RUNNING,
  243. &st->status);
  244. break;
  245. }
  246. }
  247. if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
  248. test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
  249. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  250. do_clear_stack(st);
  251. test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
  252. test_and_set_bit(mISDN_STACK_RESTART, &st->status);
  253. }
  254. if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
  255. test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
  256. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  257. if (!skb_queue_empty(&st->msgq))
  258. test_and_set_bit(mISDN_STACK_WORK,
  259. &st->status);
  260. }
  261. if (test_bit(mISDN_STACK_ABORT, &st->status))
  262. break;
  263. if (st->notify != NULL) {
  264. complete(st->notify);
  265. st->notify = NULL;
  266. }
  267. #ifdef MISDN_MSG_STATS
  268. st->sleep_cnt++;
  269. #endif
  270. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  271. wait_event_interruptible(st->workq, (st->status &
  272. mISDN_STACK_ACTION_MASK));
  273. if (*debug & DEBUG_MSG_THREAD)
  274. printk(KERN_DEBUG "%s: %s wake status %08lx\n",
  275. __func__, dev_name(&st->dev->dev), st->status);
  276. test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
  277. test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
  278. if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
  279. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  280. #ifdef MISDN_MSG_STATS
  281. st->stopped_cnt++;
  282. #endif
  283. }
  284. }
  285. #ifdef MISDN_MSG_STATS
  286. printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
  287. "msg %d sleep %d stopped\n",
  288. dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
  289. st->stopped_cnt);
  290. task_cputime(st->thread, &utime, &stime);
  291. printk(KERN_DEBUG
  292. "mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
  293. dev_name(&st->dev->dev), utime, stime);
  294. printk(KERN_DEBUG
  295. "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
  296. dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
  297. printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
  298. dev_name(&st->dev->dev));
  299. #endif
  300. test_and_set_bit(mISDN_STACK_KILLED, &st->status);
  301. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  302. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  303. test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
  304. skb_queue_purge(&st->msgq);
  305. st->thread = NULL;
  306. if (st->notify != NULL) {
  307. complete(st->notify);
  308. st->notify = NULL;
  309. }
  310. return 0;
  311. }
  312. static int
  313. l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
  314. {
  315. if (!ch->st)
  316. return -ENODEV;
  317. __net_timestamp(skb);
  318. _queue_message(ch->st, skb);
  319. return 0;
  320. }
  321. void
  322. set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
  323. {
  324. ch->addr = sapi | (tei << 8);
  325. }
  326. void
  327. __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  328. {
  329. list_add_tail(&ch->list, &st->layer2);
  330. }
  331. void
  332. add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  333. {
  334. mutex_lock(&st->lmutex);
  335. __add_layer2(ch, st);
  336. mutex_unlock(&st->lmutex);
  337. }
  338. static int
  339. st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  340. {
  341. if (!ch->st || !ch->st->layer1)
  342. return -EINVAL;
  343. return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
  344. }
  345. int
  346. create_stack(struct mISDNdevice *dev)
  347. {
  348. struct mISDNstack *newst;
  349. int err;
  350. DECLARE_COMPLETION_ONSTACK(done);
  351. newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
  352. if (!newst) {
  353. printk(KERN_ERR "kmalloc mISDN_stack failed\n");
  354. return -ENOMEM;
  355. }
  356. newst->dev = dev;
  357. INIT_LIST_HEAD(&newst->layer2);
  358. INIT_HLIST_HEAD(&newst->l1sock.head);
  359. rwlock_init(&newst->l1sock.lock);
  360. init_waitqueue_head(&newst->workq);
  361. skb_queue_head_init(&newst->msgq);
  362. mutex_init(&newst->lmutex);
  363. dev->D.st = newst;
  364. err = create_teimanager(dev);
  365. if (err) {
  366. printk(KERN_ERR "kmalloc teimanager failed\n");
  367. kfree(newst);
  368. return err;
  369. }
  370. dev->teimgr->peer = &newst->own;
  371. dev->teimgr->recv = mISDN_queue_message;
  372. dev->teimgr->st = newst;
  373. newst->layer1 = &dev->D;
  374. dev->D.recv = l1_receive;
  375. dev->D.peer = &newst->own;
  376. newst->own.st = newst;
  377. newst->own.ctrl = st_own_ctrl;
  378. newst->own.send = mISDN_queue_message;
  379. newst->own.recv = mISDN_queue_message;
  380. if (*debug & DEBUG_CORE_FUNC)
  381. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  382. dev_name(&newst->dev->dev));
  383. newst->notify = &done;
  384. newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
  385. dev_name(&newst->dev->dev));
  386. if (IS_ERR(newst->thread)) {
  387. err = PTR_ERR(newst->thread);
  388. printk(KERN_ERR
  389. "mISDN:cannot create kernel thread for %s (%d)\n",
  390. dev_name(&newst->dev->dev), err);
  391. delete_teimanager(dev->teimgr);
  392. kfree(newst);
  393. } else
  394. wait_for_completion(&done);
  395. return err;
  396. }
  397. int
  398. connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
  399. u_int protocol, struct sockaddr_mISDN *adr)
  400. {
  401. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  402. struct channel_req rq;
  403. int err;
  404. if (*debug & DEBUG_CORE_FUNC)
  405. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  406. __func__, dev_name(&dev->dev), protocol, adr->dev,
  407. adr->channel, adr->sapi, adr->tei);
  408. switch (protocol) {
  409. case ISDN_P_NT_S0:
  410. case ISDN_P_NT_E1:
  411. case ISDN_P_TE_S0:
  412. case ISDN_P_TE_E1:
  413. ch->recv = mISDN_queue_message;
  414. ch->peer = &dev->D.st->own;
  415. ch->st = dev->D.st;
  416. rq.protocol = protocol;
  417. rq.adr.channel = adr->channel;
  418. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  419. printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
  420. dev->id);
  421. if (err)
  422. return err;
  423. write_lock_bh(&dev->D.st->l1sock.lock);
  424. sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
  425. write_unlock_bh(&dev->D.st->l1sock.lock);
  426. break;
  427. default:
  428. return -ENOPROTOOPT;
  429. }
  430. return 0;
  431. }
  432. int
  433. connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
  434. u_int protocol, struct sockaddr_mISDN *adr)
  435. {
  436. struct channel_req rq, rq2;
  437. int pmask, err;
  438. struct Bprotocol *bp;
  439. if (*debug & DEBUG_CORE_FUNC)
  440. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  441. __func__, dev_name(&dev->dev), protocol,
  442. adr->dev, adr->channel, adr->sapi,
  443. adr->tei);
  444. ch->st = dev->D.st;
  445. pmask = 1 << (protocol & ISDN_P_B_MASK);
  446. if (pmask & dev->Bprotocols) {
  447. rq.protocol = protocol;
  448. rq.adr = *adr;
  449. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  450. if (err)
  451. return err;
  452. ch->recv = rq.ch->send;
  453. ch->peer = rq.ch;
  454. rq.ch->recv = ch->send;
  455. rq.ch->peer = ch;
  456. rq.ch->st = dev->D.st;
  457. } else {
  458. bp = get_Bprotocol4mask(pmask);
  459. if (!bp)
  460. return -ENOPROTOOPT;
  461. rq2.protocol = protocol;
  462. rq2.adr = *adr;
  463. rq2.ch = ch;
  464. err = bp->create(&rq2);
  465. if (err)
  466. return err;
  467. ch->recv = rq2.ch->send;
  468. ch->peer = rq2.ch;
  469. rq2.ch->st = dev->D.st;
  470. rq.protocol = rq2.protocol;
  471. rq.adr = *adr;
  472. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  473. if (err) {
  474. rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
  475. return err;
  476. }
  477. rq2.ch->recv = rq.ch->send;
  478. rq2.ch->peer = rq.ch;
  479. rq.ch->recv = rq2.ch->send;
  480. rq.ch->peer = rq2.ch;
  481. rq.ch->st = dev->D.st;
  482. }
  483. ch->protocol = protocol;
  484. ch->nr = rq.ch->nr;
  485. return 0;
  486. }
  487. int
  488. create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
  489. u_int protocol, struct sockaddr_mISDN *adr)
  490. {
  491. struct channel_req rq;
  492. int err;
  493. if (*debug & DEBUG_CORE_FUNC)
  494. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  495. __func__, dev_name(&dev->dev), protocol,
  496. adr->dev, adr->channel, adr->sapi,
  497. adr->tei);
  498. rq.protocol = ISDN_P_TE_S0;
  499. if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
  500. rq.protocol = ISDN_P_TE_E1;
  501. switch (protocol) {
  502. case ISDN_P_LAPD_NT:
  503. rq.protocol = ISDN_P_NT_S0;
  504. if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
  505. rq.protocol = ISDN_P_NT_E1;
  506. /* fall through */
  507. case ISDN_P_LAPD_TE:
  508. ch->recv = mISDN_queue_message;
  509. ch->peer = &dev->D.st->own;
  510. ch->st = dev->D.st;
  511. rq.adr.channel = 0;
  512. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  513. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  514. if (err)
  515. break;
  516. rq.protocol = protocol;
  517. rq.adr = *adr;
  518. rq.ch = ch;
  519. err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
  520. printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
  521. if (!err) {
  522. if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
  523. break;
  524. add_layer2(rq.ch, dev->D.st);
  525. rq.ch->recv = mISDN_queue_message;
  526. rq.ch->peer = &dev->D.st->own;
  527. rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
  528. }
  529. break;
  530. default:
  531. err = -EPROTONOSUPPORT;
  532. }
  533. return err;
  534. }
  535. void
  536. delete_channel(struct mISDNchannel *ch)
  537. {
  538. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  539. struct mISDNchannel *pch;
  540. if (!ch->st) {
  541. printk(KERN_WARNING "%s: no stack\n", __func__);
  542. return;
  543. }
  544. if (*debug & DEBUG_CORE_FUNC)
  545. printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
  546. dev_name(&ch->st->dev->dev), ch->protocol);
  547. if (ch->protocol >= ISDN_P_B_START) {
  548. if (ch->peer) {
  549. ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
  550. ch->peer = NULL;
  551. }
  552. return;
  553. }
  554. switch (ch->protocol) {
  555. case ISDN_P_NT_S0:
  556. case ISDN_P_TE_S0:
  557. case ISDN_P_NT_E1:
  558. case ISDN_P_TE_E1:
  559. write_lock_bh(&ch->st->l1sock.lock);
  560. sk_del_node_init(&msk->sk);
  561. write_unlock_bh(&ch->st->l1sock.lock);
  562. ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
  563. break;
  564. case ISDN_P_LAPD_TE:
  565. pch = get_channel4id(ch->st, ch->nr);
  566. if (pch) {
  567. mutex_lock(&ch->st->lmutex);
  568. list_del(&pch->list);
  569. mutex_unlock(&ch->st->lmutex);
  570. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  571. pch = ch->st->dev->teimgr;
  572. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  573. } else
  574. printk(KERN_WARNING "%s: no l2 channel\n",
  575. __func__);
  576. break;
  577. case ISDN_P_LAPD_NT:
  578. pch = ch->st->dev->teimgr;
  579. if (pch) {
  580. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  581. } else
  582. printk(KERN_WARNING "%s: no l2 channel\n",
  583. __func__);
  584. break;
  585. default:
  586. break;
  587. }
  588. return;
  589. }
  590. void
  591. delete_stack(struct mISDNdevice *dev)
  592. {
  593. struct mISDNstack *st = dev->D.st;
  594. DECLARE_COMPLETION_ONSTACK(done);
  595. if (*debug & DEBUG_CORE_FUNC)
  596. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  597. dev_name(&st->dev->dev));
  598. if (dev->teimgr)
  599. delete_teimanager(dev->teimgr);
  600. if (st->thread) {
  601. if (st->notify) {
  602. printk(KERN_WARNING "%s: notifier in use\n",
  603. __func__);
  604. complete(st->notify);
  605. }
  606. st->notify = &done;
  607. test_and_set_bit(mISDN_STACK_ABORT, &st->status);
  608. test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
  609. wake_up_interruptible(&st->workq);
  610. wait_for_completion(&done);
  611. }
  612. if (!list_empty(&st->layer2))
  613. printk(KERN_WARNING "%s: layer2 list not empty\n",
  614. __func__);
  615. if (!hlist_empty(&st->l1sock.head))
  616. printk(KERN_WARNING "%s: layer1 list not empty\n",
  617. __func__);
  618. kfree(st);
  619. }
  620. void
  621. mISDN_initstack(u_int *dp)
  622. {
  623. debug = dp;
  624. }