sch_atm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
  2. /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. #include <linux/init.h>
  6. #include <linux/string.h>
  7. #include <linux/errno.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/atmdev.h>
  10. #include <linux/atmclip.h>
  11. #include <linux/rtnetlink.h>
  12. #include <linux/file.h> /* for fput */
  13. #include <net/netlink.h>
  14. #include <net/pkt_sched.h>
  15. extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
  16. /*
  17. * The ATM queuing discipline provides a framework for invoking classifiers
  18. * (aka "filters"), which in turn select classes of this queuing discipline.
  19. * Each class maps the flow(s) it is handling to a given VC. Multiple classes
  20. * may share the same VC.
  21. *
  22. * When creating a class, VCs are specified by passing the number of the open
  23. * socket descriptor by which the calling process references the VC. The kernel
  24. * keeps the VC open at least until all classes using it are removed.
  25. *
  26. * In this file, most functions are named atm_tc_* to avoid confusion with all
  27. * the atm_* in net/atm. This naming convention differs from what's used in the
  28. * rest of net/sched.
  29. *
  30. * Known bugs:
  31. * - sometimes messes up the IP stack
  32. * - any manipulations besides the few operations described in the README, are
  33. * untested and likely to crash the system
  34. * - should lock the flow while there is data in the queue (?)
  35. */
  36. #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
  37. struct atm_flow_data {
  38. struct Qdisc *q; /* FIFO, TBF, etc. */
  39. struct tcf_proto *filter_list;
  40. struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
  41. void (*old_pop)(struct atm_vcc *vcc,
  42. struct sk_buff *skb); /* chaining */
  43. struct atm_qdisc_data *parent; /* parent qdisc */
  44. struct socket *sock; /* for closing */
  45. u32 classid; /* x:y type ID */
  46. int ref; /* reference count */
  47. struct gnet_stats_basic_packed bstats;
  48. struct gnet_stats_queue qstats;
  49. struct list_head list;
  50. struct atm_flow_data *excess; /* flow for excess traffic;
  51. NULL to set CLP instead */
  52. int hdr_len;
  53. unsigned char hdr[0]; /* header data; MUST BE LAST */
  54. };
  55. struct atm_qdisc_data {
  56. struct atm_flow_data link; /* unclassified skbs go here */
  57. struct list_head flows; /* NB: "link" is also on this
  58. list */
  59. struct tasklet_struct task; /* dequeue tasklet */
  60. };
  61. /* ------------------------- Class/flow operations ------------------------- */
  62. static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
  63. {
  64. struct atm_qdisc_data *p = qdisc_priv(sch);
  65. struct atm_flow_data *flow;
  66. list_for_each_entry(flow, &p->flows, list) {
  67. if (flow->classid == classid)
  68. return flow;
  69. }
  70. return NULL;
  71. }
  72. static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
  73. struct Qdisc *new, struct Qdisc **old)
  74. {
  75. struct atm_qdisc_data *p = qdisc_priv(sch);
  76. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  77. pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
  78. sch, p, flow, new, old);
  79. if (list_empty(&flow->list))
  80. return -EINVAL;
  81. if (!new)
  82. new = &noop_qdisc;
  83. *old = flow->q;
  84. flow->q = new;
  85. if (*old)
  86. qdisc_reset(*old);
  87. return 0;
  88. }
  89. static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
  90. {
  91. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  92. pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
  93. return flow ? flow->q : NULL;
  94. }
  95. static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
  96. {
  97. struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
  98. struct atm_flow_data *flow;
  99. pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
  100. flow = lookup_flow(sch, classid);
  101. if (flow)
  102. flow->ref++;
  103. pr_debug("atm_tc_get: flow %p\n", flow);
  104. return (unsigned long)flow;
  105. }
  106. static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
  107. unsigned long parent, u32 classid)
  108. {
  109. return atm_tc_get(sch, classid);
  110. }
  111. /*
  112. * atm_tc_put handles all destructions, including the ones that are explicitly
  113. * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
  114. * anything that still seems to be in use.
  115. */
  116. static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
  117. {
  118. struct atm_qdisc_data *p = qdisc_priv(sch);
  119. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  120. pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  121. if (--flow->ref)
  122. return;
  123. pr_debug("atm_tc_put: destroying\n");
  124. list_del_init(&flow->list);
  125. pr_debug("atm_tc_put: qdisc %p\n", flow->q);
  126. qdisc_destroy(flow->q);
  127. tcf_destroy_chain(&flow->filter_list);
  128. if (flow->sock) {
  129. pr_debug("atm_tc_put: f_count %ld\n",
  130. file_count(flow->sock->file));
  131. flow->vcc->pop = flow->old_pop;
  132. sockfd_put(flow->sock);
  133. }
  134. if (flow->excess)
  135. atm_tc_put(sch, (unsigned long)flow->excess);
  136. if (flow != &p->link)
  137. kfree(flow);
  138. /*
  139. * If flow == &p->link, the qdisc no longer works at this point and
  140. * needs to be removed. (By the caller of atm_tc_put.)
  141. */
  142. }
  143. static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
  144. {
  145. struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
  146. pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
  147. VCC2FLOW(vcc)->old_pop(vcc, skb);
  148. tasklet_schedule(&p->task);
  149. }
  150. static const u8 llc_oui_ip[] = {
  151. 0xaa, /* DSAP: non-ISO */
  152. 0xaa, /* SSAP: non-ISO */
  153. 0x03, /* Ctrl: Unnumbered Information Command PDU */
  154. 0x00, /* OUI: EtherType */
  155. 0x00, 0x00,
  156. 0x08, 0x00
  157. }; /* Ethertype IP (0800) */
  158. static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
  159. [TCA_ATM_FD] = { .type = NLA_U32 },
  160. [TCA_ATM_EXCESS] = { .type = NLA_U32 },
  161. };
  162. static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
  163. struct nlattr **tca, unsigned long *arg)
  164. {
  165. struct atm_qdisc_data *p = qdisc_priv(sch);
  166. struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
  167. struct atm_flow_data *excess = NULL;
  168. struct nlattr *opt = tca[TCA_OPTIONS];
  169. struct nlattr *tb[TCA_ATM_MAX + 1];
  170. struct socket *sock;
  171. int fd, error, hdr_len;
  172. void *hdr;
  173. pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
  174. "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
  175. /*
  176. * The concept of parents doesn't apply for this qdisc.
  177. */
  178. if (parent && parent != TC_H_ROOT && parent != sch->handle)
  179. return -EINVAL;
  180. /*
  181. * ATM classes cannot be changed. In order to change properties of the
  182. * ATM connection, that socket needs to be modified directly (via the
  183. * native ATM API. In order to send a flow to a different VC, the old
  184. * class needs to be removed and a new one added. (This may be changed
  185. * later.)
  186. */
  187. if (flow)
  188. return -EBUSY;
  189. if (opt == NULL)
  190. return -EINVAL;
  191. error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy);
  192. if (error < 0)
  193. return error;
  194. if (!tb[TCA_ATM_FD])
  195. return -EINVAL;
  196. fd = nla_get_u32(tb[TCA_ATM_FD]);
  197. pr_debug("atm_tc_change: fd %d\n", fd);
  198. if (tb[TCA_ATM_HDR]) {
  199. hdr_len = nla_len(tb[TCA_ATM_HDR]);
  200. hdr = nla_data(tb[TCA_ATM_HDR]);
  201. } else {
  202. hdr_len = RFC1483LLC_LEN;
  203. hdr = NULL; /* default LLC/SNAP for IP */
  204. }
  205. if (!tb[TCA_ATM_EXCESS])
  206. excess = NULL;
  207. else {
  208. excess = (struct atm_flow_data *)
  209. atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
  210. if (!excess)
  211. return -ENOENT;
  212. }
  213. pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
  214. opt->nla_type, nla_len(opt), hdr_len);
  215. sock = sockfd_lookup(fd, &error);
  216. if (!sock)
  217. return error; /* f_count++ */
  218. pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
  219. if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
  220. error = -EPROTOTYPE;
  221. goto err_out;
  222. }
  223. /* @@@ should check if the socket is really operational or we'll crash
  224. on vcc->send */
  225. if (classid) {
  226. if (TC_H_MAJ(classid ^ sch->handle)) {
  227. pr_debug("atm_tc_change: classid mismatch\n");
  228. error = -EINVAL;
  229. goto err_out;
  230. }
  231. } else {
  232. int i;
  233. unsigned long cl;
  234. for (i = 1; i < 0x8000; i++) {
  235. classid = TC_H_MAKE(sch->handle, 0x8000 | i);
  236. cl = atm_tc_get(sch, classid);
  237. if (!cl)
  238. break;
  239. atm_tc_put(sch, cl);
  240. }
  241. }
  242. pr_debug("atm_tc_change: new id %x\n", classid);
  243. flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
  244. pr_debug("atm_tc_change: flow %p\n", flow);
  245. if (!flow) {
  246. error = -ENOBUFS;
  247. goto err_out;
  248. }
  249. flow->filter_list = NULL;
  250. flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
  251. if (!flow->q)
  252. flow->q = &noop_qdisc;
  253. pr_debug("atm_tc_change: qdisc %p\n", flow->q);
  254. flow->sock = sock;
  255. flow->vcc = ATM_SD(sock); /* speedup */
  256. flow->vcc->user_back = flow;
  257. pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
  258. flow->old_pop = flow->vcc->pop;
  259. flow->parent = p;
  260. flow->vcc->pop = sch_atm_pop;
  261. flow->classid = classid;
  262. flow->ref = 1;
  263. flow->excess = excess;
  264. list_add(&flow->list, &p->link.list);
  265. flow->hdr_len = hdr_len;
  266. if (hdr)
  267. memcpy(flow->hdr, hdr, hdr_len);
  268. else
  269. memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
  270. *arg = (unsigned long)flow;
  271. return 0;
  272. err_out:
  273. if (excess)
  274. atm_tc_put(sch, (unsigned long)excess);
  275. sockfd_put(sock);
  276. return error;
  277. }
  278. static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
  279. {
  280. struct atm_qdisc_data *p = qdisc_priv(sch);
  281. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  282. pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  283. if (list_empty(&flow->list))
  284. return -EINVAL;
  285. if (flow->filter_list || flow == &p->link)
  286. return -EBUSY;
  287. /*
  288. * Reference count must be 2: one for "keepalive" (set at class
  289. * creation), and one for the reference held when calling delete.
  290. */
  291. if (flow->ref < 2) {
  292. pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
  293. return -EINVAL;
  294. }
  295. if (flow->ref > 2)
  296. return -EBUSY; /* catch references via excess, etc. */
  297. atm_tc_put(sch, arg);
  298. return 0;
  299. }
  300. static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  301. {
  302. struct atm_qdisc_data *p = qdisc_priv(sch);
  303. struct atm_flow_data *flow;
  304. pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
  305. if (walker->stop)
  306. return;
  307. list_for_each_entry(flow, &p->flows, list) {
  308. if (walker->count >= walker->skip &&
  309. walker->fn(sch, (unsigned long)flow, walker) < 0) {
  310. walker->stop = 1;
  311. break;
  312. }
  313. walker->count++;
  314. }
  315. }
  316. static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
  317. {
  318. struct atm_qdisc_data *p = qdisc_priv(sch);
  319. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  320. pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  321. return flow ? &flow->filter_list : &p->link.filter_list;
  322. }
  323. /* --------------------------- Qdisc operations ---------------------------- */
  324. static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  325. {
  326. struct atm_qdisc_data *p = qdisc_priv(sch);
  327. struct atm_flow_data *flow;
  328. struct tcf_result res;
  329. int result;
  330. int ret = NET_XMIT_POLICED;
  331. pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
  332. result = TC_POLICE_OK; /* be nice to gcc */
  333. flow = NULL;
  334. if (TC_H_MAJ(skb->priority) != sch->handle ||
  335. !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
  336. list_for_each_entry(flow, &p->flows, list) {
  337. if (flow->filter_list) {
  338. result = tc_classify_compat(skb,
  339. flow->filter_list,
  340. &res);
  341. if (result < 0)
  342. continue;
  343. flow = (struct atm_flow_data *)res.class;
  344. if (!flow)
  345. flow = lookup_flow(sch, res.classid);
  346. goto done;
  347. }
  348. }
  349. flow = NULL;
  350. done:
  351. ;
  352. }
  353. if (!flow) {
  354. flow = &p->link;
  355. } else {
  356. if (flow->vcc)
  357. ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
  358. /*@@@ looks good ... but it's not supposed to work :-) */
  359. #ifdef CONFIG_NET_CLS_ACT
  360. switch (result) {
  361. case TC_ACT_QUEUED:
  362. case TC_ACT_STOLEN:
  363. kfree_skb(skb);
  364. return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  365. case TC_ACT_SHOT:
  366. kfree_skb(skb);
  367. goto drop;
  368. case TC_POLICE_RECLASSIFY:
  369. if (flow->excess)
  370. flow = flow->excess;
  371. else
  372. ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
  373. break;
  374. }
  375. #endif
  376. }
  377. ret = qdisc_enqueue(skb, flow->q);
  378. if (ret != NET_XMIT_SUCCESS) {
  379. drop: __maybe_unused
  380. if (net_xmit_drop_count(ret)) {
  381. sch->qstats.drops++;
  382. if (flow)
  383. flow->qstats.drops++;
  384. }
  385. return ret;
  386. }
  387. qdisc_bstats_update(sch, skb);
  388. bstats_update(&flow->bstats, skb);
  389. /*
  390. * Okay, this may seem weird. We pretend we've dropped the packet if
  391. * it goes via ATM. The reason for this is that the outer qdisc
  392. * expects to be able to q->dequeue the packet later on if we return
  393. * success at this place. Also, sch->q.qdisc needs to reflect whether
  394. * there is a packet egligible for dequeuing or not. Note that the
  395. * statistics of the outer qdisc are necessarily wrong because of all
  396. * this. There's currently no correct solution for this.
  397. */
  398. if (flow == &p->link) {
  399. sch->q.qlen++;
  400. return NET_XMIT_SUCCESS;
  401. }
  402. tasklet_schedule(&p->task);
  403. return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  404. }
  405. /*
  406. * Dequeue packets and send them over ATM. Note that we quite deliberately
  407. * avoid checking net_device's flow control here, simply because sch_atm
  408. * uses its own channels, which have nothing to do with any CLIP/LANE/or
  409. * non-ATM interfaces.
  410. */
  411. static void sch_atm_dequeue(unsigned long data)
  412. {
  413. struct Qdisc *sch = (struct Qdisc *)data;
  414. struct atm_qdisc_data *p = qdisc_priv(sch);
  415. struct atm_flow_data *flow;
  416. struct sk_buff *skb;
  417. pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
  418. list_for_each_entry(flow, &p->flows, list) {
  419. if (flow == &p->link)
  420. continue;
  421. /*
  422. * If traffic is properly shaped, this won't generate nasty
  423. * little bursts. Otherwise, it may ... (but that's okay)
  424. */
  425. while ((skb = flow->q->ops->peek(flow->q))) {
  426. if (!atm_may_send(flow->vcc, skb->truesize))
  427. break;
  428. skb = qdisc_dequeue_peeked(flow->q);
  429. if (unlikely(!skb))
  430. break;
  431. pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
  432. /* remove any LL header somebody else has attached */
  433. skb_pull(skb, skb_network_offset(skb));
  434. if (skb_headroom(skb) < flow->hdr_len) {
  435. struct sk_buff *new;
  436. new = skb_realloc_headroom(skb, flow->hdr_len);
  437. dev_kfree_skb(skb);
  438. if (!new)
  439. continue;
  440. skb = new;
  441. }
  442. pr_debug("sch_atm_dequeue: ip %p, data %p\n",
  443. skb_network_header(skb), skb->data);
  444. ATM_SKB(skb)->vcc = flow->vcc;
  445. memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
  446. flow->hdr_len);
  447. atomic_add(skb->truesize,
  448. &sk_atm(flow->vcc)->sk_wmem_alloc);
  449. /* atm.atm_options are already set by atm_tc_enqueue */
  450. flow->vcc->send(flow->vcc, skb);
  451. }
  452. }
  453. }
  454. static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
  455. {
  456. struct atm_qdisc_data *p = qdisc_priv(sch);
  457. struct sk_buff *skb;
  458. pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
  459. tasklet_schedule(&p->task);
  460. skb = qdisc_dequeue_peeked(p->link.q);
  461. if (skb)
  462. sch->q.qlen--;
  463. return skb;
  464. }
  465. static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
  466. {
  467. struct atm_qdisc_data *p = qdisc_priv(sch);
  468. pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
  469. return p->link.q->ops->peek(p->link.q);
  470. }
  471. static unsigned int atm_tc_drop(struct Qdisc *sch)
  472. {
  473. struct atm_qdisc_data *p = qdisc_priv(sch);
  474. struct atm_flow_data *flow;
  475. unsigned int len;
  476. pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
  477. list_for_each_entry(flow, &p->flows, list) {
  478. if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
  479. return len;
  480. }
  481. return 0;
  482. }
  483. static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
  484. {
  485. struct atm_qdisc_data *p = qdisc_priv(sch);
  486. pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
  487. INIT_LIST_HEAD(&p->flows);
  488. INIT_LIST_HEAD(&p->link.list);
  489. list_add(&p->link.list, &p->flows);
  490. p->link.q = qdisc_create_dflt(sch->dev_queue,
  491. &pfifo_qdisc_ops, sch->handle);
  492. if (!p->link.q)
  493. p->link.q = &noop_qdisc;
  494. pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
  495. p->link.filter_list = NULL;
  496. p->link.vcc = NULL;
  497. p->link.sock = NULL;
  498. p->link.classid = sch->handle;
  499. p->link.ref = 1;
  500. tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
  501. return 0;
  502. }
  503. static void atm_tc_reset(struct Qdisc *sch)
  504. {
  505. struct atm_qdisc_data *p = qdisc_priv(sch);
  506. struct atm_flow_data *flow;
  507. pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
  508. list_for_each_entry(flow, &p->flows, list)
  509. qdisc_reset(flow->q);
  510. sch->q.qlen = 0;
  511. }
  512. static void atm_tc_destroy(struct Qdisc *sch)
  513. {
  514. struct atm_qdisc_data *p = qdisc_priv(sch);
  515. struct atm_flow_data *flow, *tmp;
  516. pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
  517. list_for_each_entry(flow, &p->flows, list)
  518. tcf_destroy_chain(&flow->filter_list);
  519. list_for_each_entry_safe(flow, tmp, &p->flows, list) {
  520. if (flow->ref > 1)
  521. pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
  522. atm_tc_put(sch, (unsigned long)flow);
  523. }
  524. tasklet_kill(&p->task);
  525. }
  526. static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
  527. struct sk_buff *skb, struct tcmsg *tcm)
  528. {
  529. struct atm_qdisc_data *p = qdisc_priv(sch);
  530. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  531. struct nlattr *nest;
  532. pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
  533. sch, p, flow, skb, tcm);
  534. if (list_empty(&flow->list))
  535. return -EINVAL;
  536. tcm->tcm_handle = flow->classid;
  537. tcm->tcm_info = flow->q->handle;
  538. nest = nla_nest_start(skb, TCA_OPTIONS);
  539. if (nest == NULL)
  540. goto nla_put_failure;
  541. NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
  542. if (flow->vcc) {
  543. struct sockaddr_atmpvc pvc;
  544. int state;
  545. pvc.sap_family = AF_ATMPVC;
  546. pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
  547. pvc.sap_addr.vpi = flow->vcc->vpi;
  548. pvc.sap_addr.vci = flow->vcc->vci;
  549. NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
  550. state = ATM_VF2VS(flow->vcc->flags);
  551. NLA_PUT_U32(skb, TCA_ATM_STATE, state);
  552. }
  553. if (flow->excess)
  554. NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
  555. else
  556. NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
  557. nla_nest_end(skb, nest);
  558. return skb->len;
  559. nla_put_failure:
  560. nla_nest_cancel(skb, nest);
  561. return -1;
  562. }
  563. static int
  564. atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  565. struct gnet_dump *d)
  566. {
  567. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  568. flow->qstats.qlen = flow->q->q.qlen;
  569. if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
  570. gnet_stats_copy_queue(d, &flow->qstats) < 0)
  571. return -1;
  572. return 0;
  573. }
  574. static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
  575. {
  576. return 0;
  577. }
  578. static const struct Qdisc_class_ops atm_class_ops = {
  579. .graft = atm_tc_graft,
  580. .leaf = atm_tc_leaf,
  581. .get = atm_tc_get,
  582. .put = atm_tc_put,
  583. .change = atm_tc_change,
  584. .delete = atm_tc_delete,
  585. .walk = atm_tc_walk,
  586. .tcf_chain = atm_tc_find_tcf,
  587. .bind_tcf = atm_tc_bind_filter,
  588. .unbind_tcf = atm_tc_put,
  589. .dump = atm_tc_dump_class,
  590. .dump_stats = atm_tc_dump_class_stats,
  591. };
  592. static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
  593. .cl_ops = &atm_class_ops,
  594. .id = "atm",
  595. .priv_size = sizeof(struct atm_qdisc_data),
  596. .enqueue = atm_tc_enqueue,
  597. .dequeue = atm_tc_dequeue,
  598. .peek = atm_tc_peek,
  599. .drop = atm_tc_drop,
  600. .init = atm_tc_init,
  601. .reset = atm_tc_reset,
  602. .destroy = atm_tc_destroy,
  603. .dump = atm_tc_dump,
  604. .owner = THIS_MODULE,
  605. };
  606. static int __init atm_init(void)
  607. {
  608. return register_qdisc(&atm_qdisc_ops);
  609. }
  610. static void __exit atm_exit(void)
  611. {
  612. unregister_qdisc(&atm_qdisc_ops);
  613. }
  614. module_init(atm_init)
  615. module_exit(atm_exit)
  616. MODULE_LICENSE("GPL");