gen_stats.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * net/core/gen_stats.c
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Thomas Graf <tgraf@suug.ch>
  10. * Jamal Hadi Salim
  11. * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  12. *
  13. * See Documentation/networking/gen_stats.txt
  14. */
  15. #include <linux/types.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/socket.h>
  20. #include <linux/rtnetlink.h>
  21. #include <linux/gen_stats.h>
  22. #include <net/netlink.h>
  23. #include <net/gen_stats.h>
  24. static inline int
  25. gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
  26. {
  27. if (nla_put_64bit(d->skb, type, size, buf, padattr))
  28. goto nla_put_failure;
  29. return 0;
  30. nla_put_failure:
  31. if (d->lock)
  32. spin_unlock_bh(d->lock);
  33. kfree(d->xstats);
  34. d->xstats = NULL;
  35. d->xstats_len = 0;
  36. return -1;
  37. }
  38. /**
  39. * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
  40. * @skb: socket buffer to put statistics TLVs into
  41. * @type: TLV type for top level statistic TLV
  42. * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
  43. * @xstats_type: TLV type for backward compatibility xstats TLV
  44. * @lock: statistics lock
  45. * @d: dumping handle
  46. * @padattr: padding attribute
  47. *
  48. * Initializes the dumping handle, grabs the statistic lock and appends
  49. * an empty TLV header to the socket buffer for use a container for all
  50. * other statistic TLVS.
  51. *
  52. * The dumping handle is marked to be in backward compatibility mode telling
  53. * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
  54. *
  55. * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
  56. */
  57. int
  58. gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
  59. int xstats_type, spinlock_t *lock,
  60. struct gnet_dump *d, int padattr)
  61. __acquires(lock)
  62. {
  63. memset(d, 0, sizeof(*d));
  64. if (type)
  65. d->tail = (struct nlattr *)skb_tail_pointer(skb);
  66. d->skb = skb;
  67. d->compat_tc_stats = tc_stats_type;
  68. d->compat_xstats = xstats_type;
  69. d->padattr = padattr;
  70. if (lock) {
  71. d->lock = lock;
  72. spin_lock_bh(lock);
  73. }
  74. if (d->tail) {
  75. int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
  76. /* The initial attribute added in gnet_stats_copy() may be
  77. * preceded by a padding attribute, in which case d->tail will
  78. * end up pointing at the padding instead of the real attribute.
  79. * Fix this so gnet_stats_finish_copy() adjusts the length of
  80. * the right attribute.
  81. */
  82. if (ret == 0 && d->tail->nla_type == padattr)
  83. d->tail = (struct nlattr *)((char *)d->tail +
  84. NLA_ALIGN(d->tail->nla_len));
  85. return ret;
  86. }
  87. return 0;
  88. }
  89. EXPORT_SYMBOL(gnet_stats_start_copy_compat);
  90. /**
  91. * gnet_stats_start_copy - start dumping procedure in compatibility mode
  92. * @skb: socket buffer to put statistics TLVs into
  93. * @type: TLV type for top level statistic TLV
  94. * @lock: statistics lock
  95. * @d: dumping handle
  96. * @padattr: padding attribute
  97. *
  98. * Initializes the dumping handle, grabs the statistic lock and appends
  99. * an empty TLV header to the socket buffer for use a container for all
  100. * other statistic TLVS.
  101. *
  102. * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
  103. */
  104. int
  105. gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
  106. struct gnet_dump *d, int padattr)
  107. {
  108. return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
  109. }
  110. EXPORT_SYMBOL(gnet_stats_start_copy);
  111. static void
  112. __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
  113. struct gnet_stats_basic_cpu __percpu *cpu)
  114. {
  115. int i;
  116. for_each_possible_cpu(i) {
  117. struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
  118. unsigned int start;
  119. u64 bytes;
  120. u32 packets;
  121. do {
  122. start = u64_stats_fetch_begin_irq(&bcpu->syncp);
  123. bytes = bcpu->bstats.bytes;
  124. packets = bcpu->bstats.packets;
  125. } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
  126. bstats->bytes += bytes;
  127. bstats->packets += packets;
  128. }
  129. }
  130. void
  131. __gnet_stats_copy_basic(const seqcount_t *running,
  132. struct gnet_stats_basic_packed *bstats,
  133. struct gnet_stats_basic_cpu __percpu *cpu,
  134. struct gnet_stats_basic_packed *b)
  135. {
  136. unsigned int seq;
  137. if (cpu) {
  138. __gnet_stats_copy_basic_cpu(bstats, cpu);
  139. return;
  140. }
  141. do {
  142. if (running)
  143. seq = read_seqcount_begin(running);
  144. bstats->bytes = b->bytes;
  145. bstats->packets = b->packets;
  146. } while (running && read_seqcount_retry(running, seq));
  147. }
  148. EXPORT_SYMBOL(__gnet_stats_copy_basic);
  149. /**
  150. * gnet_stats_copy_basic - copy basic statistics into statistic TLV
  151. * @running: seqcount_t pointer
  152. * @d: dumping handle
  153. * @cpu: copy statistic per cpu
  154. * @b: basic statistics
  155. *
  156. * Appends the basic statistics to the top level TLV created by
  157. * gnet_stats_start_copy().
  158. *
  159. * Returns 0 on success or -1 with the statistic lock released
  160. * if the room in the socket buffer was not sufficient.
  161. */
  162. int
  163. gnet_stats_copy_basic(const seqcount_t *running,
  164. struct gnet_dump *d,
  165. struct gnet_stats_basic_cpu __percpu *cpu,
  166. struct gnet_stats_basic_packed *b)
  167. {
  168. struct gnet_stats_basic_packed bstats = {0};
  169. __gnet_stats_copy_basic(running, &bstats, cpu, b);
  170. if (d->compat_tc_stats) {
  171. d->tc_stats.bytes = bstats.bytes;
  172. d->tc_stats.packets = bstats.packets;
  173. }
  174. if (d->tail) {
  175. struct gnet_stats_basic sb;
  176. memset(&sb, 0, sizeof(sb));
  177. sb.bytes = bstats.bytes;
  178. sb.packets = bstats.packets;
  179. return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb),
  180. TCA_STATS_PAD);
  181. }
  182. return 0;
  183. }
  184. EXPORT_SYMBOL(gnet_stats_copy_basic);
  185. /**
  186. * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
  187. * @d: dumping handle
  188. * @b: basic statistics
  189. * @r: rate estimator statistics
  190. *
  191. * Appends the rate estimator statistics to the top level TLV created by
  192. * gnet_stats_start_copy().
  193. *
  194. * Returns 0 on success or -1 with the statistic lock released
  195. * if the room in the socket buffer was not sufficient.
  196. */
  197. int
  198. gnet_stats_copy_rate_est(struct gnet_dump *d,
  199. const struct gnet_stats_basic_packed *b,
  200. struct gnet_stats_rate_est64 *r)
  201. {
  202. struct gnet_stats_rate_est est;
  203. int res;
  204. if (b && !gen_estimator_active(b, r))
  205. return 0;
  206. est.bps = min_t(u64, UINT_MAX, r->bps);
  207. /* we have some time before reaching 2^32 packets per second */
  208. est.pps = r->pps;
  209. if (d->compat_tc_stats) {
  210. d->tc_stats.bps = est.bps;
  211. d->tc_stats.pps = est.pps;
  212. }
  213. if (d->tail) {
  214. res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
  215. TCA_STATS_PAD);
  216. if (res < 0 || est.bps == r->bps)
  217. return res;
  218. /* emit 64bit stats only if needed */
  219. return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r),
  220. TCA_STATS_PAD);
  221. }
  222. return 0;
  223. }
  224. EXPORT_SYMBOL(gnet_stats_copy_rate_est);
  225. static void
  226. __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
  227. const struct gnet_stats_queue __percpu *q)
  228. {
  229. int i;
  230. for_each_possible_cpu(i) {
  231. const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
  232. qstats->qlen = 0;
  233. qstats->backlog += qcpu->backlog;
  234. qstats->drops += qcpu->drops;
  235. qstats->requeues += qcpu->requeues;
  236. qstats->overlimits += qcpu->overlimits;
  237. }
  238. }
  239. static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
  240. const struct gnet_stats_queue __percpu *cpu,
  241. const struct gnet_stats_queue *q,
  242. __u32 qlen)
  243. {
  244. if (cpu) {
  245. __gnet_stats_copy_queue_cpu(qstats, cpu);
  246. } else {
  247. qstats->qlen = q->qlen;
  248. qstats->backlog = q->backlog;
  249. qstats->drops = q->drops;
  250. qstats->requeues = q->requeues;
  251. qstats->overlimits = q->overlimits;
  252. }
  253. qstats->qlen = qlen;
  254. }
  255. /**
  256. * gnet_stats_copy_queue - copy queue statistics into statistics TLV
  257. * @d: dumping handle
  258. * @cpu_q: per cpu queue statistics
  259. * @q: queue statistics
  260. * @qlen: queue length statistics
  261. *
  262. * Appends the queue statistics to the top level TLV created by
  263. * gnet_stats_start_copy(). Using per cpu queue statistics if
  264. * they are available.
  265. *
  266. * Returns 0 on success or -1 with the statistic lock released
  267. * if the room in the socket buffer was not sufficient.
  268. */
  269. int
  270. gnet_stats_copy_queue(struct gnet_dump *d,
  271. struct gnet_stats_queue __percpu *cpu_q,
  272. struct gnet_stats_queue *q, __u32 qlen)
  273. {
  274. struct gnet_stats_queue qstats = {0};
  275. __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
  276. if (d->compat_tc_stats) {
  277. d->tc_stats.drops = qstats.drops;
  278. d->tc_stats.qlen = qstats.qlen;
  279. d->tc_stats.backlog = qstats.backlog;
  280. d->tc_stats.overlimits = qstats.overlimits;
  281. }
  282. if (d->tail)
  283. return gnet_stats_copy(d, TCA_STATS_QUEUE,
  284. &qstats, sizeof(qstats),
  285. TCA_STATS_PAD);
  286. return 0;
  287. }
  288. EXPORT_SYMBOL(gnet_stats_copy_queue);
  289. /**
  290. * gnet_stats_copy_app - copy application specific statistics into statistics TLV
  291. * @d: dumping handle
  292. * @st: application specific statistics data
  293. * @len: length of data
  294. *
  295. * Appends the application specific statistics to the top level TLV created by
  296. * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
  297. * handle is in backward compatibility mode.
  298. *
  299. * Returns 0 on success or -1 with the statistic lock released
  300. * if the room in the socket buffer was not sufficient.
  301. */
  302. int
  303. gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
  304. {
  305. if (d->compat_xstats) {
  306. d->xstats = kmemdup(st, len, GFP_ATOMIC);
  307. if (!d->xstats)
  308. goto err_out;
  309. d->xstats_len = len;
  310. }
  311. if (d->tail)
  312. return gnet_stats_copy(d, TCA_STATS_APP, st, len,
  313. TCA_STATS_PAD);
  314. return 0;
  315. err_out:
  316. if (d->lock)
  317. spin_unlock_bh(d->lock);
  318. d->xstats_len = 0;
  319. return -1;
  320. }
  321. EXPORT_SYMBOL(gnet_stats_copy_app);
  322. /**
  323. * gnet_stats_finish_copy - finish dumping procedure
  324. * @d: dumping handle
  325. *
  326. * Corrects the length of the top level TLV to include all TLVs added
  327. * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
  328. * if gnet_stats_start_copy_compat() was used and releases the statistics
  329. * lock.
  330. *
  331. * Returns 0 on success or -1 with the statistic lock released
  332. * if the room in the socket buffer was not sufficient.
  333. */
  334. int
  335. gnet_stats_finish_copy(struct gnet_dump *d)
  336. {
  337. if (d->tail)
  338. d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
  339. if (d->compat_tc_stats)
  340. if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
  341. sizeof(d->tc_stats), d->padattr) < 0)
  342. return -1;
  343. if (d->compat_xstats && d->xstats) {
  344. if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
  345. d->xstats_len, d->padattr) < 0)
  346. return -1;
  347. }
  348. if (d->lock)
  349. spin_unlock_bh(d->lock);
  350. kfree(d->xstats);
  351. d->xstats = NULL;
  352. d->xstats_len = 0;
  353. return 0;
  354. }
  355. EXPORT_SYMBOL(gnet_stats_finish_copy);