sch_plug.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * sch_plug.c Queue traffic until an explicit release command
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * There are two ways to use this qdisc:
  10. * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
  11. * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
  12. *
  13. * 2. For network output buffering (a.k.a output commit) functionality.
  14. * Output commit property is commonly used by applications using checkpoint
  15. * based fault-tolerance to ensure that the checkpoint from which a system
  16. * is being restored is consistent w.r.t outside world.
  17. *
  18. * Consider for e.g. Remus - a Virtual Machine checkpointing system,
  19. * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
  20. * asynchronously to the backup host, while the VM continues executing the
  21. * next epoch speculatively.
  22. *
  23. * The following is a typical sequence of output buffer operations:
  24. * 1.At epoch i, start_buffer(i)
  25. * 2. At end of epoch i (i.e. after 50ms):
  26. * 2.1 Stop VM and take checkpoint(i).
  27. * 2.2 start_buffer(i+1) and Resume VM
  28. * 3. While speculatively executing epoch(i+1), asynchronously replicate
  29. * checkpoint(i) to backup host.
  30. * 4. When checkpoint_ack(i) is received from backup, release_buffer(i)
  31. * Thus, this Qdisc would receive the following sequence of commands:
  32. * TCQ_PLUG_BUFFER (epoch i)
  33. * .. TCQ_PLUG_BUFFER (epoch i+1)
  34. * ....TCQ_PLUG_RELEASE_ONE (epoch i)
  35. * ......TCQ_PLUG_BUFFER (epoch i+2)
  36. * ........
  37. */
  38. #include <linux/module.h>
  39. #include <linux/types.h>
  40. #include <linux/kernel.h>
  41. #include <linux/errno.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/skbuff.h>
  44. #include <net/pkt_sched.h>
  45. /*
  46. * State of the queue, when used for network output buffering:
  47. *
  48. * plug(i+1) plug(i) head
  49. * ------------------+--------------------+---------------->
  50. * | |
  51. * | |
  52. * pkts_current_epoch| pkts_last_epoch |pkts_to_release
  53. * ----------------->|<--------+--------->|+--------------->
  54. * v v
  55. *
  56. */
  57. struct plug_sched_data {
  58. /* If true, the dequeue function releases all packets
  59. * from head to end of the queue. The queue turns into
  60. * a pass-through queue for newly arriving packets.
  61. */
  62. bool unplug_indefinite;
  63. /* Queue Limit in bytes */
  64. u32 limit;
  65. /* Number of packets (output) from the current speculatively
  66. * executing epoch.
  67. */
  68. u32 pkts_current_epoch;
  69. /* Number of packets corresponding to the recently finished
  70. * epoch. These will be released when we receive a
  71. * TCQ_PLUG_RELEASE_ONE command. This command is typically
  72. * issued after committing a checkpoint at the target.
  73. */
  74. u32 pkts_last_epoch;
  75. /*
  76. * Number of packets from the head of the queue, that can
  77. * be released (committed checkpoint).
  78. */
  79. u32 pkts_to_release;
  80. };
  81. static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  82. {
  83. struct plug_sched_data *q = qdisc_priv(sch);
  84. if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
  85. if (!q->unplug_indefinite)
  86. q->pkts_current_epoch++;
  87. return qdisc_enqueue_tail(skb, sch);
  88. }
  89. return qdisc_reshape_fail(skb, sch);
  90. }
  91. static struct sk_buff *plug_dequeue(struct Qdisc *sch)
  92. {
  93. struct plug_sched_data *q = qdisc_priv(sch);
  94. if (qdisc_is_throttled(sch))
  95. return NULL;
  96. if (!q->unplug_indefinite) {
  97. if (!q->pkts_to_release) {
  98. /* No more packets to dequeue. Block the queue
  99. * and wait for the next release command.
  100. */
  101. qdisc_throttled(sch);
  102. return NULL;
  103. }
  104. q->pkts_to_release--;
  105. }
  106. return qdisc_dequeue_head(sch);
  107. }
  108. static int plug_init(struct Qdisc *sch, struct nlattr *opt)
  109. {
  110. struct plug_sched_data *q = qdisc_priv(sch);
  111. q->pkts_current_epoch = 0;
  112. q->pkts_last_epoch = 0;
  113. q->pkts_to_release = 0;
  114. q->unplug_indefinite = false;
  115. if (opt == NULL) {
  116. /* We will set a default limit of 100 pkts (~150kB)
  117. * in case tx_queue_len is not available. The
  118. * default value is completely arbitrary.
  119. */
  120. u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
  121. q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
  122. } else {
  123. struct tc_plug_qopt *ctl = nla_data(opt);
  124. if (nla_len(opt) < sizeof(*ctl))
  125. return -EINVAL;
  126. q->limit = ctl->limit;
  127. }
  128. qdisc_throttled(sch);
  129. return 0;
  130. }
  131. /* Receives 4 types of messages:
  132. * TCQ_PLUG_BUFFER: Inset a plug into the queue and
  133. * buffer any incoming packets
  134. * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
  135. * to beginning of the next plug.
  136. * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
  137. * Stop buffering packets until the next TCQ_PLUG_BUFFER
  138. * command is received (just act as a pass-thru queue).
  139. * TCQ_PLUG_LIMIT: Increase/decrease queue size
  140. */
  141. static int plug_change(struct Qdisc *sch, struct nlattr *opt)
  142. {
  143. struct plug_sched_data *q = qdisc_priv(sch);
  144. struct tc_plug_qopt *msg;
  145. if (opt == NULL)
  146. return -EINVAL;
  147. msg = nla_data(opt);
  148. if (nla_len(opt) < sizeof(*msg))
  149. return -EINVAL;
  150. switch (msg->action) {
  151. case TCQ_PLUG_BUFFER:
  152. /* Save size of the current buffer */
  153. q->pkts_last_epoch = q->pkts_current_epoch;
  154. q->pkts_current_epoch = 0;
  155. if (q->unplug_indefinite)
  156. qdisc_throttled(sch);
  157. q->unplug_indefinite = false;
  158. break;
  159. case TCQ_PLUG_RELEASE_ONE:
  160. /* Add packets from the last complete buffer to the
  161. * packets to be released set.
  162. */
  163. q->pkts_to_release += q->pkts_last_epoch;
  164. q->pkts_last_epoch = 0;
  165. qdisc_unthrottled(sch);
  166. netif_schedule_queue(sch->dev_queue);
  167. break;
  168. case TCQ_PLUG_RELEASE_INDEFINITE:
  169. q->unplug_indefinite = true;
  170. q->pkts_to_release = 0;
  171. q->pkts_last_epoch = 0;
  172. q->pkts_current_epoch = 0;
  173. qdisc_unthrottled(sch);
  174. netif_schedule_queue(sch->dev_queue);
  175. break;
  176. case TCQ_PLUG_LIMIT:
  177. /* Limit is supplied in bytes */
  178. q->limit = msg->limit;
  179. break;
  180. default:
  181. return -EINVAL;
  182. }
  183. return 0;
  184. }
  185. static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
  186. .id = "plug",
  187. .priv_size = sizeof(struct plug_sched_data),
  188. .enqueue = plug_enqueue,
  189. .dequeue = plug_dequeue,
  190. .peek = qdisc_peek_head,
  191. .init = plug_init,
  192. .change = plug_change,
  193. .owner = THIS_MODULE,
  194. };
  195. static int __init plug_module_init(void)
  196. {
  197. return register_qdisc(&plug_qdisc_ops);
  198. }
  199. static void __exit plug_module_exit(void)
  200. {
  201. unregister_qdisc(&plug_qdisc_ops);
  202. }
  203. module_init(plug_module_init)
  204. module_exit(plug_module_exit)
  205. MODULE_LICENSE("GPL");