ordered-events.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. #include <linux/list.h>
  2. #include <linux/compiler.h>
  3. #include <linux/string.h>
  4. #include "ordered-events.h"
  5. #include "session.h"
  6. #include "asm/bug.h"
  7. #include "debug.h"
  8. #define pr_N(n, fmt, ...) \
  9. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  10. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  11. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  12. {
  13. struct ordered_event *last = oe->last;
  14. u64 timestamp = new->timestamp;
  15. struct list_head *p;
  16. ++oe->nr_events;
  17. oe->last = new;
  18. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  19. if (!last) {
  20. list_add(&new->list, &oe->events);
  21. oe->max_timestamp = timestamp;
  22. return;
  23. }
  24. /*
  25. * last event might point to some random place in the list as it's
  26. * the last queued event. We expect that the new event is close to
  27. * this.
  28. */
  29. if (last->timestamp <= timestamp) {
  30. while (last->timestamp <= timestamp) {
  31. p = last->list.next;
  32. if (p == &oe->events) {
  33. list_add_tail(&new->list, &oe->events);
  34. oe->max_timestamp = timestamp;
  35. return;
  36. }
  37. last = list_entry(p, struct ordered_event, list);
  38. }
  39. list_add_tail(&new->list, &last->list);
  40. } else {
  41. while (last->timestamp > timestamp) {
  42. p = last->list.prev;
  43. if (p == &oe->events) {
  44. list_add(&new->list, &oe->events);
  45. return;
  46. }
  47. last = list_entry(p, struct ordered_event, list);
  48. }
  49. list_add(&new->list, &last->list);
  50. }
  51. }
  52. static union perf_event *__dup_event(struct ordered_events *oe,
  53. union perf_event *event)
  54. {
  55. union perf_event *new_event = NULL;
  56. if (oe->cur_alloc_size < oe->max_alloc_size) {
  57. new_event = memdup(event, event->header.size);
  58. if (new_event)
  59. oe->cur_alloc_size += event->header.size;
  60. }
  61. return new_event;
  62. }
  63. static union perf_event *dup_event(struct ordered_events *oe,
  64. union perf_event *event)
  65. {
  66. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  67. }
  68. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  69. {
  70. if (oe->copy_on_queue) {
  71. oe->cur_alloc_size -= event->header.size;
  72. free(event);
  73. }
  74. }
  75. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  76. static struct ordered_event *alloc_event(struct ordered_events *oe,
  77. union perf_event *event)
  78. {
  79. struct list_head *cache = &oe->cache;
  80. struct ordered_event *new = NULL;
  81. union perf_event *new_event;
  82. new_event = dup_event(oe, event);
  83. if (!new_event)
  84. return NULL;
  85. if (!list_empty(cache)) {
  86. new = list_entry(cache->next, struct ordered_event, list);
  87. list_del(&new->list);
  88. } else if (oe->buffer) {
  89. new = oe->buffer + oe->buffer_idx;
  90. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  91. oe->buffer = NULL;
  92. } else if (oe->cur_alloc_size < oe->max_alloc_size) {
  93. size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
  94. oe->buffer = malloc(size);
  95. if (!oe->buffer) {
  96. free_dup_event(oe, new_event);
  97. return NULL;
  98. }
  99. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  100. oe->cur_alloc_size, size, oe->max_alloc_size);
  101. oe->cur_alloc_size += size;
  102. list_add(&oe->buffer->list, &oe->to_free);
  103. /* First entry is abused to maintain the to_free list. */
  104. oe->buffer_idx = 2;
  105. new = oe->buffer + 1;
  106. } else {
  107. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  108. }
  109. new->event = new_event;
  110. return new;
  111. }
  112. static struct ordered_event *
  113. ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
  114. union perf_event *event)
  115. {
  116. struct ordered_event *new;
  117. new = alloc_event(oe, event);
  118. if (new) {
  119. new->timestamp = timestamp;
  120. queue_event(oe, new);
  121. }
  122. return new;
  123. }
  124. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  125. {
  126. list_move(&event->list, &oe->cache);
  127. oe->nr_events--;
  128. free_dup_event(oe, event->event);
  129. }
  130. int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
  131. struct perf_sample *sample, u64 file_offset)
  132. {
  133. u64 timestamp = sample->time;
  134. struct ordered_event *oevent;
  135. if (!timestamp || timestamp == ~0ULL)
  136. return -ETIME;
  137. if (timestamp < oe->last_flush) {
  138. pr_oe_time(timestamp, "out of order event\n");
  139. pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
  140. oe->last_flush_type);
  141. oe->nr_unordered_events++;
  142. }
  143. oevent = ordered_events__new_event(oe, timestamp, event);
  144. if (!oevent) {
  145. ordered_events__flush(oe, OE_FLUSH__HALF);
  146. oevent = ordered_events__new_event(oe, timestamp, event);
  147. }
  148. if (!oevent)
  149. return -ENOMEM;
  150. oevent->file_offset = file_offset;
  151. return 0;
  152. }
  153. static int __ordered_events__flush(struct ordered_events *oe)
  154. {
  155. struct list_head *head = &oe->events;
  156. struct ordered_event *tmp, *iter;
  157. u64 limit = oe->next_flush;
  158. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  159. bool show_progress = limit == ULLONG_MAX;
  160. struct ui_progress prog;
  161. int ret;
  162. if (!limit)
  163. return 0;
  164. if (show_progress)
  165. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  166. list_for_each_entry_safe(iter, tmp, head, list) {
  167. if (session_done())
  168. return 0;
  169. if (iter->timestamp > limit)
  170. break;
  171. ret = oe->deliver(oe, iter);
  172. if (ret)
  173. return ret;
  174. ordered_events__delete(oe, iter);
  175. oe->last_flush = iter->timestamp;
  176. if (show_progress)
  177. ui_progress__update(&prog, 1);
  178. }
  179. if (list_empty(head))
  180. oe->last = NULL;
  181. else if (last_ts <= limit)
  182. oe->last = list_entry(head->prev, struct ordered_event, list);
  183. return 0;
  184. }
  185. int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
  186. {
  187. static const char * const str[] = {
  188. "NONE",
  189. "FINAL",
  190. "ROUND",
  191. "HALF ",
  192. };
  193. int err;
  194. if (oe->nr_events == 0)
  195. return 0;
  196. switch (how) {
  197. case OE_FLUSH__FINAL:
  198. oe->next_flush = ULLONG_MAX;
  199. break;
  200. case OE_FLUSH__HALF:
  201. {
  202. struct ordered_event *first, *last;
  203. struct list_head *head = &oe->events;
  204. first = list_entry(head->next, struct ordered_event, list);
  205. last = oe->last;
  206. /* Warn if we are called before any event got allocated. */
  207. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  208. return 0;
  209. oe->next_flush = first->timestamp;
  210. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  211. break;
  212. }
  213. case OE_FLUSH__ROUND:
  214. case OE_FLUSH__NONE:
  215. default:
  216. break;
  217. };
  218. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  219. str[how], oe->nr_events);
  220. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  221. err = __ordered_events__flush(oe);
  222. if (!err) {
  223. if (how == OE_FLUSH__ROUND)
  224. oe->next_flush = oe->max_timestamp;
  225. oe->last_flush_type = how;
  226. }
  227. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  228. str[how], oe->nr_events);
  229. pr_oe_time(oe->last_flush, "last_flush\n");
  230. return err;
  231. }
  232. void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
  233. {
  234. INIT_LIST_HEAD(&oe->events);
  235. INIT_LIST_HEAD(&oe->cache);
  236. INIT_LIST_HEAD(&oe->to_free);
  237. oe->max_alloc_size = (u64) -1;
  238. oe->cur_alloc_size = 0;
  239. oe->deliver = deliver;
  240. }
  241. void ordered_events__free(struct ordered_events *oe)
  242. {
  243. while (!list_empty(&oe->to_free)) {
  244. struct ordered_event *event;
  245. event = list_entry(oe->to_free.next, struct ordered_event, list);
  246. list_del(&event->list);
  247. free_dup_event(oe, event->event);
  248. free(event);
  249. }
  250. }