block-range.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. #include "block-range.h"
  2. #include "annotate.h"
  3. struct {
  4. struct rb_root root;
  5. u64 blocks;
  6. } block_ranges;
  7. static void block_range__debug(void)
  8. {
  9. /*
  10. * XXX still paranoid for now; see if we can make this depend on
  11. * DEBUG=1 builds.
  12. */
  13. #if 1
  14. struct rb_node *rb;
  15. u64 old = 0; /* NULL isn't executable */
  16. for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
  17. struct block_range *entry = rb_entry(rb, struct block_range, node);
  18. assert(old < entry->start);
  19. assert(entry->start <= entry->end); /* single instruction block; jump to a jump */
  20. old = entry->end;
  21. }
  22. #endif
  23. }
  24. struct block_range *block_range__find(u64 addr)
  25. {
  26. struct rb_node **p = &block_ranges.root.rb_node;
  27. struct rb_node *parent = NULL;
  28. struct block_range *entry;
  29. while (*p != NULL) {
  30. parent = *p;
  31. entry = rb_entry(parent, struct block_range, node);
  32. if (addr < entry->start)
  33. p = &parent->rb_left;
  34. else if (addr > entry->end)
  35. p = &parent->rb_right;
  36. else
  37. return entry;
  38. }
  39. return NULL;
  40. }
  41. static inline void rb_link_left_of_node(struct rb_node *left, struct rb_node *node)
  42. {
  43. struct rb_node **p = &node->rb_left;
  44. while (*p) {
  45. node = *p;
  46. p = &node->rb_right;
  47. }
  48. rb_link_node(left, node, p);
  49. }
  50. static inline void rb_link_right_of_node(struct rb_node *right, struct rb_node *node)
  51. {
  52. struct rb_node **p = &node->rb_right;
  53. while (*p) {
  54. node = *p;
  55. p = &node->rb_left;
  56. }
  57. rb_link_node(right, node, p);
  58. }
  59. /**
  60. * block_range__create
  61. * @start: branch target starting this basic block
  62. * @end: branch ending this basic block
  63. *
  64. * Create all the required block ranges to precisely span the given range.
  65. */
  66. struct block_range_iter block_range__create(u64 start, u64 end)
  67. {
  68. struct rb_node **p = &block_ranges.root.rb_node;
  69. struct rb_node *n, *parent = NULL;
  70. struct block_range *next, *entry = NULL;
  71. struct block_range_iter iter = { NULL, NULL };
  72. while (*p != NULL) {
  73. parent = *p;
  74. entry = rb_entry(parent, struct block_range, node);
  75. if (start < entry->start)
  76. p = &parent->rb_left;
  77. else if (start > entry->end)
  78. p = &parent->rb_right;
  79. else
  80. break;
  81. }
  82. /*
  83. * Didn't find anything.. there's a hole at @start, however @end might
  84. * be inside/behind the next range.
  85. */
  86. if (!*p) {
  87. if (!entry) /* tree empty */
  88. goto do_whole;
  89. /*
  90. * If the last node is before, advance one to find the next.
  91. */
  92. n = parent;
  93. if (entry->end < start) {
  94. n = rb_next(n);
  95. if (!n)
  96. goto do_whole;
  97. }
  98. next = rb_entry(n, struct block_range, node);
  99. if (next->start <= end) { /* add head: [start...][n->start...] */
  100. struct block_range *head = malloc(sizeof(struct block_range));
  101. if (!head)
  102. return iter;
  103. *head = (struct block_range){
  104. .start = start,
  105. .end = next->start - 1,
  106. .is_target = 1,
  107. .is_branch = 0,
  108. };
  109. rb_link_left_of_node(&head->node, &next->node);
  110. rb_insert_color(&head->node, &block_ranges.root);
  111. block_range__debug();
  112. iter.start = head;
  113. goto do_tail;
  114. }
  115. do_whole:
  116. /*
  117. * The whole [start..end] range is non-overlapping.
  118. */
  119. entry = malloc(sizeof(struct block_range));
  120. if (!entry)
  121. return iter;
  122. *entry = (struct block_range){
  123. .start = start,
  124. .end = end,
  125. .is_target = 1,
  126. .is_branch = 1,
  127. };
  128. rb_link_node(&entry->node, parent, p);
  129. rb_insert_color(&entry->node, &block_ranges.root);
  130. block_range__debug();
  131. iter.start = entry;
  132. iter.end = entry;
  133. goto done;
  134. }
  135. /*
  136. * We found a range that overlapped with ours, split if needed.
  137. */
  138. if (entry->start < start) { /* split: [e->start...][start...] */
  139. struct block_range *head = malloc(sizeof(struct block_range));
  140. if (!head)
  141. return iter;
  142. *head = (struct block_range){
  143. .start = entry->start,
  144. .end = start - 1,
  145. .is_target = entry->is_target,
  146. .is_branch = 0,
  147. .coverage = entry->coverage,
  148. .entry = entry->entry,
  149. };
  150. entry->start = start;
  151. entry->is_target = 1;
  152. entry->entry = 0;
  153. rb_link_left_of_node(&head->node, &entry->node);
  154. rb_insert_color(&head->node, &block_ranges.root);
  155. block_range__debug();
  156. } else if (entry->start == start)
  157. entry->is_target = 1;
  158. iter.start = entry;
  159. do_tail:
  160. /*
  161. * At this point we've got: @iter.start = [@start...] but @end can still be
  162. * inside or beyond it.
  163. */
  164. entry = iter.start;
  165. for (;;) {
  166. /*
  167. * If @end is inside @entry, split.
  168. */
  169. if (end < entry->end) { /* split: [...end][...e->end] */
  170. struct block_range *tail = malloc(sizeof(struct block_range));
  171. if (!tail)
  172. return iter;
  173. *tail = (struct block_range){
  174. .start = end + 1,
  175. .end = entry->end,
  176. .is_target = 0,
  177. .is_branch = entry->is_branch,
  178. .coverage = entry->coverage,
  179. .taken = entry->taken,
  180. .pred = entry->pred,
  181. };
  182. entry->end = end;
  183. entry->is_branch = 1;
  184. entry->taken = 0;
  185. entry->pred = 0;
  186. rb_link_right_of_node(&tail->node, &entry->node);
  187. rb_insert_color(&tail->node, &block_ranges.root);
  188. block_range__debug();
  189. iter.end = entry;
  190. goto done;
  191. }
  192. /*
  193. * If @end matches @entry, done
  194. */
  195. if (end == entry->end) {
  196. entry->is_branch = 1;
  197. iter.end = entry;
  198. goto done;
  199. }
  200. next = block_range__next(entry);
  201. if (!next)
  202. goto add_tail;
  203. /*
  204. * If @end is in beyond @entry but not inside @next, add tail.
  205. */
  206. if (end < next->start) { /* add tail: [...e->end][...end] */
  207. struct block_range *tail;
  208. add_tail:
  209. tail = malloc(sizeof(struct block_range));
  210. if (!tail)
  211. return iter;
  212. *tail = (struct block_range){
  213. .start = entry->end + 1,
  214. .end = end,
  215. .is_target = 0,
  216. .is_branch = 1,
  217. };
  218. rb_link_right_of_node(&tail->node, &entry->node);
  219. rb_insert_color(&tail->node, &block_ranges.root);
  220. block_range__debug();
  221. iter.end = tail;
  222. goto done;
  223. }
  224. /*
  225. * If there is a hole between @entry and @next, fill it.
  226. */
  227. if (entry->end + 1 != next->start) {
  228. struct block_range *hole = malloc(sizeof(struct block_range));
  229. if (!hole)
  230. return iter;
  231. *hole = (struct block_range){
  232. .start = entry->end + 1,
  233. .end = next->start - 1,
  234. .is_target = 0,
  235. .is_branch = 0,
  236. };
  237. rb_link_left_of_node(&hole->node, &next->node);
  238. rb_insert_color(&hole->node, &block_ranges.root);
  239. block_range__debug();
  240. }
  241. entry = next;
  242. }
  243. done:
  244. assert(iter.start->start == start && iter.start->is_target);
  245. assert(iter.end->end == end && iter.end->is_branch);
  246. block_ranges.blocks++;
  247. return iter;
  248. }
  249. /*
  250. * Compute coverage as:
  251. *
  252. * br->coverage / br->sym->max_coverage
  253. *
  254. * This ensures each symbol has a 100% spot, to reflect that each symbol has a
  255. * most covered section.
  256. *
  257. * Returns [0-1] for coverage and -1 if we had no data what so ever or the
  258. * symbol does not exist.
  259. */
  260. double block_range__coverage(struct block_range *br)
  261. {
  262. struct symbol *sym;
  263. if (!br) {
  264. if (block_ranges.blocks)
  265. return 0;
  266. return -1;
  267. }
  268. sym = br->sym;
  269. if (!sym)
  270. return -1;
  271. return (double)br->coverage / symbol__annotation(sym)->max_coverage;
  272. }