symbol.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100
  1. #include <dirent.h>
  2. #include <errno.h>
  3. #include <stdlib.h>
  4. #include <stdio.h>
  5. #include <string.h>
  6. #include <sys/types.h>
  7. #include <sys/stat.h>
  8. #include <sys/param.h>
  9. #include <fcntl.h>
  10. #include <unistd.h>
  11. #include <inttypes.h>
  12. #include "annotate.h"
  13. #include "build-id.h"
  14. #include "util.h"
  15. #include "debug.h"
  16. #include "machine.h"
  17. #include "symbol.h"
  18. #include "strlist.h"
  19. #include "intlist.h"
  20. #include "header.h"
  21. #include <elf.h>
  22. #include <limits.h>
  23. #include <symbol/kallsyms.h>
  24. #include <sys/utsname.h>
  25. static int dso__load_kernel_sym(struct dso *dso, struct map *map);
  26. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
  27. static bool symbol__is_idle(const char *name);
  28. int vmlinux_path__nr_entries;
  29. char **vmlinux_path;
  30. struct symbol_conf symbol_conf = {
  31. .use_modules = true,
  32. .try_vmlinux_path = true,
  33. .annotate_src = true,
  34. .demangle = true,
  35. .demangle_kernel = false,
  36. .cumulate_callchain = true,
  37. .show_hist_headers = true,
  38. .symfs = "",
  39. .event_group = true,
  40. };
  41. static enum dso_binary_type binary_type_symtab[] = {
  42. DSO_BINARY_TYPE__KALLSYMS,
  43. DSO_BINARY_TYPE__GUEST_KALLSYMS,
  44. DSO_BINARY_TYPE__JAVA_JIT,
  45. DSO_BINARY_TYPE__DEBUGLINK,
  46. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  47. DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
  48. DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
  49. DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
  50. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  51. DSO_BINARY_TYPE__GUEST_KMODULE,
  52. DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
  53. DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
  54. DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
  55. DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
  56. DSO_BINARY_TYPE__NOT_FOUND,
  57. };
  58. #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
  59. bool symbol_type__is_a(char symbol_type, enum map_type map_type)
  60. {
  61. symbol_type = toupper(symbol_type);
  62. switch (map_type) {
  63. case MAP__FUNCTION:
  64. return symbol_type == 'T' || symbol_type == 'W';
  65. case MAP__VARIABLE:
  66. return symbol_type == 'D';
  67. default:
  68. return false;
  69. }
  70. }
  71. static int prefix_underscores_count(const char *str)
  72. {
  73. const char *tail = str;
  74. while (*tail == '_')
  75. tail++;
  76. return tail - str;
  77. }
  78. int __weak arch__choose_best_symbol(struct symbol *syma,
  79. struct symbol *symb __maybe_unused)
  80. {
  81. /* Avoid "SyS" kernel syscall aliases */
  82. if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
  83. return SYMBOL_B;
  84. if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
  85. return SYMBOL_B;
  86. return SYMBOL_A;
  87. }
  88. static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
  89. {
  90. s64 a;
  91. s64 b;
  92. size_t na, nb;
  93. /* Prefer a symbol with non zero length */
  94. a = syma->end - syma->start;
  95. b = symb->end - symb->start;
  96. if ((b == 0) && (a > 0))
  97. return SYMBOL_A;
  98. else if ((a == 0) && (b > 0))
  99. return SYMBOL_B;
  100. /* Prefer a non weak symbol over a weak one */
  101. a = syma->binding == STB_WEAK;
  102. b = symb->binding == STB_WEAK;
  103. if (b && !a)
  104. return SYMBOL_A;
  105. if (a && !b)
  106. return SYMBOL_B;
  107. /* Prefer a global symbol over a non global one */
  108. a = syma->binding == STB_GLOBAL;
  109. b = symb->binding == STB_GLOBAL;
  110. if (a && !b)
  111. return SYMBOL_A;
  112. if (b && !a)
  113. return SYMBOL_B;
  114. /* Prefer a symbol with less underscores */
  115. a = prefix_underscores_count(syma->name);
  116. b = prefix_underscores_count(symb->name);
  117. if (b > a)
  118. return SYMBOL_A;
  119. else if (a > b)
  120. return SYMBOL_B;
  121. /* Choose the symbol with the longest name */
  122. na = strlen(syma->name);
  123. nb = strlen(symb->name);
  124. if (na > nb)
  125. return SYMBOL_A;
  126. else if (na < nb)
  127. return SYMBOL_B;
  128. return arch__choose_best_symbol(syma, symb);
  129. }
  130. void symbols__fixup_duplicate(struct rb_root *symbols)
  131. {
  132. struct rb_node *nd;
  133. struct symbol *curr, *next;
  134. if (symbol_conf.allow_aliases)
  135. return;
  136. nd = rb_first(symbols);
  137. while (nd) {
  138. curr = rb_entry(nd, struct symbol, rb_node);
  139. again:
  140. nd = rb_next(&curr->rb_node);
  141. next = rb_entry(nd, struct symbol, rb_node);
  142. if (!nd)
  143. break;
  144. if (curr->start != next->start)
  145. continue;
  146. if (choose_best_symbol(curr, next) == SYMBOL_A) {
  147. rb_erase(&next->rb_node, symbols);
  148. symbol__delete(next);
  149. goto again;
  150. } else {
  151. nd = rb_next(&curr->rb_node);
  152. rb_erase(&curr->rb_node, symbols);
  153. symbol__delete(curr);
  154. }
  155. }
  156. }
  157. void symbols__fixup_end(struct rb_root *symbols)
  158. {
  159. struct rb_node *nd, *prevnd = rb_first(symbols);
  160. struct symbol *curr, *prev;
  161. if (prevnd == NULL)
  162. return;
  163. curr = rb_entry(prevnd, struct symbol, rb_node);
  164. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  165. prev = curr;
  166. curr = rb_entry(nd, struct symbol, rb_node);
  167. if (prev->end == prev->start && prev->end != curr->start)
  168. prev->end = curr->start;
  169. }
  170. /* Last entry */
  171. if (curr->end == curr->start)
  172. curr->end = roundup(curr->start, 4096) + 4096;
  173. }
  174. void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
  175. {
  176. struct maps *maps = &mg->maps[type];
  177. struct map *next, *curr;
  178. pthread_rwlock_wrlock(&maps->lock);
  179. curr = maps__first(maps);
  180. if (curr == NULL)
  181. goto out_unlock;
  182. for (next = map__next(curr); next; next = map__next(curr)) {
  183. curr->end = next->start;
  184. curr = next;
  185. }
  186. /*
  187. * We still haven't the actual symbols, so guess the
  188. * last map final address.
  189. */
  190. curr->end = ~0ULL;
  191. out_unlock:
  192. pthread_rwlock_unlock(&maps->lock);
  193. }
  194. struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
  195. {
  196. size_t namelen = strlen(name) + 1;
  197. struct symbol *sym = calloc(1, (symbol_conf.priv_size +
  198. sizeof(*sym) + namelen));
  199. if (sym == NULL)
  200. return NULL;
  201. if (symbol_conf.priv_size) {
  202. if (symbol_conf.init_annotation) {
  203. struct annotation *notes = (void *)sym;
  204. pthread_mutex_init(&notes->lock, NULL);
  205. }
  206. sym = ((void *)sym) + symbol_conf.priv_size;
  207. }
  208. sym->start = start;
  209. sym->end = len ? start + len : start;
  210. sym->binding = binding;
  211. sym->namelen = namelen - 1;
  212. pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
  213. __func__, name, start, sym->end);
  214. memcpy(sym->name, name, namelen);
  215. return sym;
  216. }
  217. void symbol__delete(struct symbol *sym)
  218. {
  219. free(((void *)sym) - symbol_conf.priv_size);
  220. }
  221. void symbols__delete(struct rb_root *symbols)
  222. {
  223. struct symbol *pos;
  224. struct rb_node *next = rb_first(symbols);
  225. while (next) {
  226. pos = rb_entry(next, struct symbol, rb_node);
  227. next = rb_next(&pos->rb_node);
  228. rb_erase(&pos->rb_node, symbols);
  229. symbol__delete(pos);
  230. }
  231. }
  232. void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
  233. {
  234. struct rb_node **p = &symbols->rb_node;
  235. struct rb_node *parent = NULL;
  236. const u64 ip = sym->start;
  237. struct symbol *s;
  238. if (kernel) {
  239. const char *name = sym->name;
  240. /*
  241. * ppc64 uses function descriptors and appends a '.' to the
  242. * start of every instruction address. Remove it.
  243. */
  244. if (name[0] == '.')
  245. name++;
  246. sym->idle = symbol__is_idle(name);
  247. }
  248. while (*p != NULL) {
  249. parent = *p;
  250. s = rb_entry(parent, struct symbol, rb_node);
  251. if (ip < s->start)
  252. p = &(*p)->rb_left;
  253. else
  254. p = &(*p)->rb_right;
  255. }
  256. rb_link_node(&sym->rb_node, parent, p);
  257. rb_insert_color(&sym->rb_node, symbols);
  258. }
  259. void symbols__insert(struct rb_root *symbols, struct symbol *sym)
  260. {
  261. __symbols__insert(symbols, sym, false);
  262. }
  263. static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
  264. {
  265. struct rb_node *n;
  266. if (symbols == NULL)
  267. return NULL;
  268. n = symbols->rb_node;
  269. while (n) {
  270. struct symbol *s = rb_entry(n, struct symbol, rb_node);
  271. if (ip < s->start)
  272. n = n->rb_left;
  273. else if (ip > s->end || (ip == s->end && ip != s->start))
  274. n = n->rb_right;
  275. else
  276. return s;
  277. }
  278. return NULL;
  279. }
  280. static struct symbol *symbols__first(struct rb_root *symbols)
  281. {
  282. struct rb_node *n = rb_first(symbols);
  283. if (n)
  284. return rb_entry(n, struct symbol, rb_node);
  285. return NULL;
  286. }
  287. static struct symbol *symbols__last(struct rb_root *symbols)
  288. {
  289. struct rb_node *n = rb_last(symbols);
  290. if (n)
  291. return rb_entry(n, struct symbol, rb_node);
  292. return NULL;
  293. }
  294. static struct symbol *symbols__next(struct symbol *sym)
  295. {
  296. struct rb_node *n = rb_next(&sym->rb_node);
  297. if (n)
  298. return rb_entry(n, struct symbol, rb_node);
  299. return NULL;
  300. }
  301. static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
  302. {
  303. struct rb_node **p = &symbols->rb_node;
  304. struct rb_node *parent = NULL;
  305. struct symbol_name_rb_node *symn, *s;
  306. symn = container_of(sym, struct symbol_name_rb_node, sym);
  307. while (*p != NULL) {
  308. parent = *p;
  309. s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
  310. if (strcmp(sym->name, s->sym.name) < 0)
  311. p = &(*p)->rb_left;
  312. else
  313. p = &(*p)->rb_right;
  314. }
  315. rb_link_node(&symn->rb_node, parent, p);
  316. rb_insert_color(&symn->rb_node, symbols);
  317. }
  318. static void symbols__sort_by_name(struct rb_root *symbols,
  319. struct rb_root *source)
  320. {
  321. struct rb_node *nd;
  322. for (nd = rb_first(source); nd; nd = rb_next(nd)) {
  323. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  324. symbols__insert_by_name(symbols, pos);
  325. }
  326. }
  327. static struct symbol *symbols__find_by_name(struct rb_root *symbols,
  328. const char *name)
  329. {
  330. struct rb_node *n;
  331. struct symbol_name_rb_node *s = NULL;
  332. if (symbols == NULL)
  333. return NULL;
  334. n = symbols->rb_node;
  335. while (n) {
  336. int cmp;
  337. s = rb_entry(n, struct symbol_name_rb_node, rb_node);
  338. cmp = arch__compare_symbol_names(name, s->sym.name);
  339. if (cmp < 0)
  340. n = n->rb_left;
  341. else if (cmp > 0)
  342. n = n->rb_right;
  343. else
  344. break;
  345. }
  346. if (n == NULL)
  347. return NULL;
  348. /* return first symbol that has same name (if any) */
  349. for (n = rb_prev(n); n; n = rb_prev(n)) {
  350. struct symbol_name_rb_node *tmp;
  351. tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
  352. if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
  353. break;
  354. s = tmp;
  355. }
  356. return &s->sym;
  357. }
  358. void dso__reset_find_symbol_cache(struct dso *dso)
  359. {
  360. enum map_type type;
  361. for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
  362. dso->last_find_result[type].addr = 0;
  363. dso->last_find_result[type].symbol = NULL;
  364. }
  365. }
  366. void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
  367. {
  368. __symbols__insert(&dso->symbols[type], sym, dso->kernel);
  369. /* update the symbol cache if necessary */
  370. if (dso->last_find_result[type].addr >= sym->start &&
  371. (dso->last_find_result[type].addr < sym->end ||
  372. sym->start == sym->end)) {
  373. dso->last_find_result[type].symbol = sym;
  374. }
  375. }
  376. struct symbol *dso__find_symbol(struct dso *dso,
  377. enum map_type type, u64 addr)
  378. {
  379. if (dso->last_find_result[type].addr != addr) {
  380. dso->last_find_result[type].addr = addr;
  381. dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
  382. }
  383. return dso->last_find_result[type].symbol;
  384. }
  385. struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
  386. {
  387. return symbols__first(&dso->symbols[type]);
  388. }
  389. struct symbol *dso__last_symbol(struct dso *dso, enum map_type type)
  390. {
  391. return symbols__last(&dso->symbols[type]);
  392. }
  393. struct symbol *dso__next_symbol(struct symbol *sym)
  394. {
  395. return symbols__next(sym);
  396. }
  397. struct symbol *symbol__next_by_name(struct symbol *sym)
  398. {
  399. struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
  400. struct rb_node *n = rb_next(&s->rb_node);
  401. return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
  402. }
  403. /*
  404. * Teturns first symbol that matched with @name.
  405. */
  406. struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
  407. const char *name)
  408. {
  409. return symbols__find_by_name(&dso->symbol_names[type], name);
  410. }
  411. void dso__sort_by_name(struct dso *dso, enum map_type type)
  412. {
  413. dso__set_sorted_by_name(dso, type);
  414. return symbols__sort_by_name(&dso->symbol_names[type],
  415. &dso->symbols[type]);
  416. }
  417. int modules__parse(const char *filename, void *arg,
  418. int (*process_module)(void *arg, const char *name,
  419. u64 start))
  420. {
  421. char *line = NULL;
  422. size_t n;
  423. FILE *file;
  424. int err = 0;
  425. file = fopen(filename, "r");
  426. if (file == NULL)
  427. return -1;
  428. while (1) {
  429. char name[PATH_MAX];
  430. u64 start;
  431. char *sep;
  432. ssize_t line_len;
  433. line_len = getline(&line, &n, file);
  434. if (line_len < 0) {
  435. if (feof(file))
  436. break;
  437. err = -1;
  438. goto out;
  439. }
  440. if (!line) {
  441. err = -1;
  442. goto out;
  443. }
  444. line[--line_len] = '\0'; /* \n */
  445. sep = strrchr(line, 'x');
  446. if (sep == NULL)
  447. continue;
  448. hex2u64(sep + 1, &start);
  449. sep = strchr(line, ' ');
  450. if (sep == NULL)
  451. continue;
  452. *sep = '\0';
  453. scnprintf(name, sizeof(name), "[%s]", line);
  454. err = process_module(arg, name, start);
  455. if (err)
  456. break;
  457. }
  458. out:
  459. free(line);
  460. fclose(file);
  461. return err;
  462. }
  463. struct process_kallsyms_args {
  464. struct map *map;
  465. struct dso *dso;
  466. };
  467. /*
  468. * These are symbols in the kernel image, so make sure that
  469. * sym is from a kernel DSO.
  470. */
  471. static bool symbol__is_idle(const char *name)
  472. {
  473. const char * const idle_symbols[] = {
  474. "cpu_idle",
  475. "cpu_startup_entry",
  476. "intel_idle",
  477. "default_idle",
  478. "native_safe_halt",
  479. "enter_idle",
  480. "exit_idle",
  481. "mwait_idle",
  482. "mwait_idle_with_hints",
  483. "poll_idle",
  484. "ppc64_runlatch_off",
  485. "pseries_dedicated_idle_sleep",
  486. NULL
  487. };
  488. int i;
  489. for (i = 0; idle_symbols[i]; i++) {
  490. if (!strcmp(idle_symbols[i], name))
  491. return true;
  492. }
  493. return false;
  494. }
  495. static int map__process_kallsym_symbol(void *arg, const char *name,
  496. char type, u64 start)
  497. {
  498. struct symbol *sym;
  499. struct process_kallsyms_args *a = arg;
  500. struct rb_root *root = &a->dso->symbols[a->map->type];
  501. if (!symbol_type__is_a(type, a->map->type))
  502. return 0;
  503. /*
  504. * module symbols are not sorted so we add all
  505. * symbols, setting length to 0, and rely on
  506. * symbols__fixup_end() to fix it up.
  507. */
  508. sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
  509. if (sym == NULL)
  510. return -ENOMEM;
  511. /*
  512. * We will pass the symbols to the filter later, in
  513. * map__split_kallsyms, when we have split the maps per module
  514. */
  515. __symbols__insert(root, sym, !strchr(name, '['));
  516. return 0;
  517. }
  518. /*
  519. * Loads the function entries in /proc/kallsyms into kernel_map->dso,
  520. * so that we can in the next step set the symbol ->end address and then
  521. * call kernel_maps__split_kallsyms.
  522. */
  523. static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
  524. struct map *map)
  525. {
  526. struct process_kallsyms_args args = { .map = map, .dso = dso, };
  527. return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
  528. }
  529. static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
  530. {
  531. struct map_groups *kmaps = map__kmaps(map);
  532. struct map *curr_map;
  533. struct symbol *pos;
  534. int count = 0;
  535. struct rb_root old_root = dso->symbols[map->type];
  536. struct rb_root *root = &dso->symbols[map->type];
  537. struct rb_node *next = rb_first(root);
  538. if (!kmaps)
  539. return -1;
  540. *root = RB_ROOT;
  541. while (next) {
  542. char *module;
  543. pos = rb_entry(next, struct symbol, rb_node);
  544. next = rb_next(&pos->rb_node);
  545. rb_erase_init(&pos->rb_node, &old_root);
  546. module = strchr(pos->name, '\t');
  547. if (module)
  548. *module = '\0';
  549. curr_map = map_groups__find(kmaps, map->type, pos->start);
  550. if (!curr_map) {
  551. symbol__delete(pos);
  552. continue;
  553. }
  554. pos->start -= curr_map->start - curr_map->pgoff;
  555. if (pos->end)
  556. pos->end -= curr_map->start - curr_map->pgoff;
  557. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  558. ++count;
  559. }
  560. /* Symbols have been adjusted */
  561. dso->adjust_symbols = 1;
  562. return count;
  563. }
  564. /*
  565. * Split the symbols into maps, making sure there are no overlaps, i.e. the
  566. * kernel range is broken in several maps, named [kernel].N, as we don't have
  567. * the original ELF section names vmlinux have.
  568. */
  569. static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
  570. {
  571. struct map_groups *kmaps = map__kmaps(map);
  572. struct machine *machine;
  573. struct map *curr_map = map;
  574. struct symbol *pos;
  575. int count = 0, moved = 0;
  576. struct rb_root *root = &dso->symbols[map->type];
  577. struct rb_node *next = rb_first(root);
  578. int kernel_range = 0;
  579. if (!kmaps)
  580. return -1;
  581. machine = kmaps->machine;
  582. while (next) {
  583. char *module;
  584. pos = rb_entry(next, struct symbol, rb_node);
  585. next = rb_next(&pos->rb_node);
  586. module = strchr(pos->name, '\t');
  587. if (module) {
  588. if (!symbol_conf.use_modules)
  589. goto discard_symbol;
  590. *module++ = '\0';
  591. if (strcmp(curr_map->dso->short_name, module)) {
  592. if (curr_map != map &&
  593. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  594. machine__is_default_guest(machine)) {
  595. /*
  596. * We assume all symbols of a module are
  597. * continuous in * kallsyms, so curr_map
  598. * points to a module and all its
  599. * symbols are in its kmap. Mark it as
  600. * loaded.
  601. */
  602. dso__set_loaded(curr_map->dso,
  603. curr_map->type);
  604. }
  605. curr_map = map_groups__find_by_name(kmaps,
  606. map->type, module);
  607. if (curr_map == NULL) {
  608. pr_debug("%s/proc/{kallsyms,modules} "
  609. "inconsistency while looking "
  610. "for \"%s\" module!\n",
  611. machine->root_dir, module);
  612. curr_map = map;
  613. goto discard_symbol;
  614. }
  615. if (curr_map->dso->loaded &&
  616. !machine__is_default_guest(machine))
  617. goto discard_symbol;
  618. }
  619. /*
  620. * So that we look just like we get from .ko files,
  621. * i.e. not prelinked, relative to map->start.
  622. */
  623. pos->start = curr_map->map_ip(curr_map, pos->start);
  624. pos->end = curr_map->map_ip(curr_map, pos->end);
  625. } else if (curr_map != map) {
  626. char dso_name[PATH_MAX];
  627. struct dso *ndso;
  628. if (delta) {
  629. /* Kernel was relocated at boot time */
  630. pos->start -= delta;
  631. pos->end -= delta;
  632. }
  633. if (count == 0) {
  634. curr_map = map;
  635. goto add_symbol;
  636. }
  637. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  638. snprintf(dso_name, sizeof(dso_name),
  639. "[guest.kernel].%d",
  640. kernel_range++);
  641. else
  642. snprintf(dso_name, sizeof(dso_name),
  643. "[kernel].%d",
  644. kernel_range++);
  645. ndso = dso__new(dso_name);
  646. if (ndso == NULL)
  647. return -1;
  648. ndso->kernel = dso->kernel;
  649. curr_map = map__new2(pos->start, ndso, map->type);
  650. if (curr_map == NULL) {
  651. dso__put(ndso);
  652. return -1;
  653. }
  654. curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
  655. map_groups__insert(kmaps, curr_map);
  656. ++kernel_range;
  657. } else if (delta) {
  658. /* Kernel was relocated at boot time */
  659. pos->start -= delta;
  660. pos->end -= delta;
  661. }
  662. add_symbol:
  663. if (curr_map != map) {
  664. rb_erase(&pos->rb_node, root);
  665. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  666. ++moved;
  667. } else
  668. ++count;
  669. continue;
  670. discard_symbol:
  671. rb_erase(&pos->rb_node, root);
  672. symbol__delete(pos);
  673. }
  674. if (curr_map != map &&
  675. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  676. machine__is_default_guest(kmaps->machine)) {
  677. dso__set_loaded(curr_map->dso, curr_map->type);
  678. }
  679. return count + moved;
  680. }
  681. bool symbol__restricted_filename(const char *filename,
  682. const char *restricted_filename)
  683. {
  684. bool restricted = false;
  685. if (symbol_conf.kptr_restrict) {
  686. char *r = realpath(filename, NULL);
  687. if (r != NULL) {
  688. restricted = strcmp(r, restricted_filename) == 0;
  689. free(r);
  690. return restricted;
  691. }
  692. }
  693. return restricted;
  694. }
  695. struct module_info {
  696. struct rb_node rb_node;
  697. char *name;
  698. u64 start;
  699. };
  700. static void add_module(struct module_info *mi, struct rb_root *modules)
  701. {
  702. struct rb_node **p = &modules->rb_node;
  703. struct rb_node *parent = NULL;
  704. struct module_info *m;
  705. while (*p != NULL) {
  706. parent = *p;
  707. m = rb_entry(parent, struct module_info, rb_node);
  708. if (strcmp(mi->name, m->name) < 0)
  709. p = &(*p)->rb_left;
  710. else
  711. p = &(*p)->rb_right;
  712. }
  713. rb_link_node(&mi->rb_node, parent, p);
  714. rb_insert_color(&mi->rb_node, modules);
  715. }
  716. static void delete_modules(struct rb_root *modules)
  717. {
  718. struct module_info *mi;
  719. struct rb_node *next = rb_first(modules);
  720. while (next) {
  721. mi = rb_entry(next, struct module_info, rb_node);
  722. next = rb_next(&mi->rb_node);
  723. rb_erase(&mi->rb_node, modules);
  724. zfree(&mi->name);
  725. free(mi);
  726. }
  727. }
  728. static struct module_info *find_module(const char *name,
  729. struct rb_root *modules)
  730. {
  731. struct rb_node *n = modules->rb_node;
  732. while (n) {
  733. struct module_info *m;
  734. int cmp;
  735. m = rb_entry(n, struct module_info, rb_node);
  736. cmp = strcmp(name, m->name);
  737. if (cmp < 0)
  738. n = n->rb_left;
  739. else if (cmp > 0)
  740. n = n->rb_right;
  741. else
  742. return m;
  743. }
  744. return NULL;
  745. }
  746. static int __read_proc_modules(void *arg, const char *name, u64 start)
  747. {
  748. struct rb_root *modules = arg;
  749. struct module_info *mi;
  750. mi = zalloc(sizeof(struct module_info));
  751. if (!mi)
  752. return -ENOMEM;
  753. mi->name = strdup(name);
  754. mi->start = start;
  755. if (!mi->name) {
  756. free(mi);
  757. return -ENOMEM;
  758. }
  759. add_module(mi, modules);
  760. return 0;
  761. }
  762. static int read_proc_modules(const char *filename, struct rb_root *modules)
  763. {
  764. if (symbol__restricted_filename(filename, "/proc/modules"))
  765. return -1;
  766. if (modules__parse(filename, modules, __read_proc_modules)) {
  767. delete_modules(modules);
  768. return -1;
  769. }
  770. return 0;
  771. }
  772. int compare_proc_modules(const char *from, const char *to)
  773. {
  774. struct rb_root from_modules = RB_ROOT;
  775. struct rb_root to_modules = RB_ROOT;
  776. struct rb_node *from_node, *to_node;
  777. struct module_info *from_m, *to_m;
  778. int ret = -1;
  779. if (read_proc_modules(from, &from_modules))
  780. return -1;
  781. if (read_proc_modules(to, &to_modules))
  782. goto out_delete_from;
  783. from_node = rb_first(&from_modules);
  784. to_node = rb_first(&to_modules);
  785. while (from_node) {
  786. if (!to_node)
  787. break;
  788. from_m = rb_entry(from_node, struct module_info, rb_node);
  789. to_m = rb_entry(to_node, struct module_info, rb_node);
  790. if (from_m->start != to_m->start ||
  791. strcmp(from_m->name, to_m->name))
  792. break;
  793. from_node = rb_next(from_node);
  794. to_node = rb_next(to_node);
  795. }
  796. if (!from_node && !to_node)
  797. ret = 0;
  798. delete_modules(&to_modules);
  799. out_delete_from:
  800. delete_modules(&from_modules);
  801. return ret;
  802. }
  803. static int do_validate_kcore_modules(const char *filename, struct map *map,
  804. struct map_groups *kmaps)
  805. {
  806. struct rb_root modules = RB_ROOT;
  807. struct map *old_map;
  808. int err;
  809. err = read_proc_modules(filename, &modules);
  810. if (err)
  811. return err;
  812. old_map = map_groups__first(kmaps, map->type);
  813. while (old_map) {
  814. struct map *next = map_groups__next(old_map);
  815. struct module_info *mi;
  816. if (old_map == map || old_map->start == map->start) {
  817. /* The kernel map */
  818. old_map = next;
  819. continue;
  820. }
  821. /* Module must be in memory at the same address */
  822. mi = find_module(old_map->dso->short_name, &modules);
  823. if (!mi || mi->start != old_map->start) {
  824. err = -EINVAL;
  825. goto out;
  826. }
  827. old_map = next;
  828. }
  829. out:
  830. delete_modules(&modules);
  831. return err;
  832. }
  833. /*
  834. * If kallsyms is referenced by name then we look for filename in the same
  835. * directory.
  836. */
  837. static bool filename_from_kallsyms_filename(char *filename,
  838. const char *base_name,
  839. const char *kallsyms_filename)
  840. {
  841. char *name;
  842. strcpy(filename, kallsyms_filename);
  843. name = strrchr(filename, '/');
  844. if (!name)
  845. return false;
  846. name += 1;
  847. if (!strcmp(name, "kallsyms")) {
  848. strcpy(name, base_name);
  849. return true;
  850. }
  851. return false;
  852. }
  853. static int validate_kcore_modules(const char *kallsyms_filename,
  854. struct map *map)
  855. {
  856. struct map_groups *kmaps = map__kmaps(map);
  857. char modules_filename[PATH_MAX];
  858. if (!kmaps)
  859. return -EINVAL;
  860. if (!filename_from_kallsyms_filename(modules_filename, "modules",
  861. kallsyms_filename))
  862. return -EINVAL;
  863. if (do_validate_kcore_modules(modules_filename, map, kmaps))
  864. return -EINVAL;
  865. return 0;
  866. }
  867. static int validate_kcore_addresses(const char *kallsyms_filename,
  868. struct map *map)
  869. {
  870. struct kmap *kmap = map__kmap(map);
  871. if (!kmap)
  872. return -EINVAL;
  873. if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
  874. u64 start;
  875. start = kallsyms__get_function_start(kallsyms_filename,
  876. kmap->ref_reloc_sym->name);
  877. if (start != kmap->ref_reloc_sym->addr)
  878. return -EINVAL;
  879. }
  880. return validate_kcore_modules(kallsyms_filename, map);
  881. }
  882. struct kcore_mapfn_data {
  883. struct dso *dso;
  884. enum map_type type;
  885. struct list_head maps;
  886. };
  887. static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
  888. {
  889. struct kcore_mapfn_data *md = data;
  890. struct map *map;
  891. map = map__new2(start, md->dso, md->type);
  892. if (map == NULL)
  893. return -ENOMEM;
  894. map->end = map->start + len;
  895. map->pgoff = pgoff;
  896. list_add(&map->node, &md->maps);
  897. return 0;
  898. }
  899. static int dso__load_kcore(struct dso *dso, struct map *map,
  900. const char *kallsyms_filename)
  901. {
  902. struct map_groups *kmaps = map__kmaps(map);
  903. struct machine *machine;
  904. struct kcore_mapfn_data md;
  905. struct map *old_map, *new_map, *replacement_map = NULL;
  906. bool is_64_bit;
  907. int err, fd;
  908. char kcore_filename[PATH_MAX];
  909. struct symbol *sym;
  910. if (!kmaps)
  911. return -EINVAL;
  912. machine = kmaps->machine;
  913. /* This function requires that the map is the kernel map */
  914. if (map != machine->vmlinux_maps[map->type])
  915. return -EINVAL;
  916. if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
  917. kallsyms_filename))
  918. return -EINVAL;
  919. /* Modules and kernel must be present at their original addresses */
  920. if (validate_kcore_addresses(kallsyms_filename, map))
  921. return -EINVAL;
  922. md.dso = dso;
  923. md.type = map->type;
  924. INIT_LIST_HEAD(&md.maps);
  925. fd = open(kcore_filename, O_RDONLY);
  926. if (fd < 0) {
  927. pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
  928. kcore_filename);
  929. return -EINVAL;
  930. }
  931. /* Read new maps into temporary lists */
  932. err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
  933. &is_64_bit);
  934. if (err)
  935. goto out_err;
  936. dso->is_64_bit = is_64_bit;
  937. if (list_empty(&md.maps)) {
  938. err = -EINVAL;
  939. goto out_err;
  940. }
  941. /* Remove old maps */
  942. old_map = map_groups__first(kmaps, map->type);
  943. while (old_map) {
  944. struct map *next = map_groups__next(old_map);
  945. if (old_map != map)
  946. map_groups__remove(kmaps, old_map);
  947. old_map = next;
  948. }
  949. /* Find the kernel map using the first symbol */
  950. sym = dso__first_symbol(dso, map->type);
  951. list_for_each_entry(new_map, &md.maps, node) {
  952. if (sym && sym->start >= new_map->start &&
  953. sym->start < new_map->end) {
  954. replacement_map = new_map;
  955. break;
  956. }
  957. }
  958. if (!replacement_map)
  959. replacement_map = list_entry(md.maps.next, struct map, node);
  960. /* Add new maps */
  961. while (!list_empty(&md.maps)) {
  962. new_map = list_entry(md.maps.next, struct map, node);
  963. list_del_init(&new_map->node);
  964. if (new_map == replacement_map) {
  965. map->start = new_map->start;
  966. map->end = new_map->end;
  967. map->pgoff = new_map->pgoff;
  968. map->map_ip = new_map->map_ip;
  969. map->unmap_ip = new_map->unmap_ip;
  970. /* Ensure maps are correctly ordered */
  971. map__get(map);
  972. map_groups__remove(kmaps, map);
  973. map_groups__insert(kmaps, map);
  974. map__put(map);
  975. } else {
  976. map_groups__insert(kmaps, new_map);
  977. }
  978. map__put(new_map);
  979. }
  980. /*
  981. * Set the data type and long name so that kcore can be read via
  982. * dso__data_read_addr().
  983. */
  984. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  985. dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
  986. else
  987. dso->binary_type = DSO_BINARY_TYPE__KCORE;
  988. dso__set_long_name(dso, strdup(kcore_filename), true);
  989. close(fd);
  990. if (map->type == MAP__FUNCTION)
  991. pr_debug("Using %s for kernel object code\n", kcore_filename);
  992. else
  993. pr_debug("Using %s for kernel data\n", kcore_filename);
  994. return 0;
  995. out_err:
  996. while (!list_empty(&md.maps)) {
  997. map = list_entry(md.maps.next, struct map, node);
  998. list_del_init(&map->node);
  999. map__put(map);
  1000. }
  1001. close(fd);
  1002. return -EINVAL;
  1003. }
  1004. /*
  1005. * If the kernel is relocated at boot time, kallsyms won't match. Compute the
  1006. * delta based on the relocation reference symbol.
  1007. */
  1008. static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
  1009. {
  1010. struct kmap *kmap = map__kmap(map);
  1011. u64 addr;
  1012. if (!kmap)
  1013. return -1;
  1014. if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
  1015. return 0;
  1016. addr = kallsyms__get_function_start(filename,
  1017. kmap->ref_reloc_sym->name);
  1018. if (!addr)
  1019. return -1;
  1020. *delta = addr - kmap->ref_reloc_sym->addr;
  1021. return 0;
  1022. }
  1023. int __dso__load_kallsyms(struct dso *dso, const char *filename,
  1024. struct map *map, bool no_kcore)
  1025. {
  1026. u64 delta = 0;
  1027. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  1028. return -1;
  1029. if (dso__load_all_kallsyms(dso, filename, map) < 0)
  1030. return -1;
  1031. if (kallsyms__delta(map, filename, &delta))
  1032. return -1;
  1033. symbols__fixup_end(&dso->symbols[map->type]);
  1034. symbols__fixup_duplicate(&dso->symbols[map->type]);
  1035. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1036. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
  1037. else
  1038. dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
  1039. if (!no_kcore && !dso__load_kcore(dso, map, filename))
  1040. return dso__split_kallsyms_for_kcore(dso, map);
  1041. else
  1042. return dso__split_kallsyms(dso, map, delta);
  1043. }
  1044. int dso__load_kallsyms(struct dso *dso, const char *filename,
  1045. struct map *map)
  1046. {
  1047. return __dso__load_kallsyms(dso, filename, map, false);
  1048. }
  1049. static int dso__load_perf_map(struct dso *dso, struct map *map)
  1050. {
  1051. char *line = NULL;
  1052. size_t n;
  1053. FILE *file;
  1054. int nr_syms = 0;
  1055. file = fopen(dso->long_name, "r");
  1056. if (file == NULL)
  1057. goto out_failure;
  1058. while (!feof(file)) {
  1059. u64 start, size;
  1060. struct symbol *sym;
  1061. int line_len, len;
  1062. line_len = getline(&line, &n, file);
  1063. if (line_len < 0)
  1064. break;
  1065. if (!line)
  1066. goto out_failure;
  1067. line[--line_len] = '\0'; /* \n */
  1068. len = hex2u64(line, &start);
  1069. len++;
  1070. if (len + 2 >= line_len)
  1071. continue;
  1072. len += hex2u64(line + len, &size);
  1073. len++;
  1074. if (len + 2 >= line_len)
  1075. continue;
  1076. sym = symbol__new(start, size, STB_GLOBAL, line + len);
  1077. if (sym == NULL)
  1078. goto out_delete_line;
  1079. symbols__insert(&dso->symbols[map->type], sym);
  1080. nr_syms++;
  1081. }
  1082. free(line);
  1083. fclose(file);
  1084. return nr_syms;
  1085. out_delete_line:
  1086. free(line);
  1087. out_failure:
  1088. return -1;
  1089. }
  1090. static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
  1091. enum dso_binary_type type)
  1092. {
  1093. switch (type) {
  1094. case DSO_BINARY_TYPE__JAVA_JIT:
  1095. case DSO_BINARY_TYPE__DEBUGLINK:
  1096. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  1097. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  1098. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  1099. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  1100. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  1101. return !kmod && dso->kernel == DSO_TYPE_USER;
  1102. case DSO_BINARY_TYPE__KALLSYMS:
  1103. case DSO_BINARY_TYPE__VMLINUX:
  1104. case DSO_BINARY_TYPE__KCORE:
  1105. return dso->kernel == DSO_TYPE_KERNEL;
  1106. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  1107. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  1108. case DSO_BINARY_TYPE__GUEST_KCORE:
  1109. return dso->kernel == DSO_TYPE_GUEST_KERNEL;
  1110. case DSO_BINARY_TYPE__GUEST_KMODULE:
  1111. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  1112. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  1113. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  1114. /*
  1115. * kernel modules know their symtab type - it's set when
  1116. * creating a module dso in machine__findnew_module_map().
  1117. */
  1118. return kmod && dso->symtab_type == type;
  1119. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  1120. return true;
  1121. case DSO_BINARY_TYPE__NOT_FOUND:
  1122. default:
  1123. return false;
  1124. }
  1125. }
  1126. int dso__load(struct dso *dso, struct map *map)
  1127. {
  1128. char *name;
  1129. int ret = -1;
  1130. u_int i;
  1131. struct machine *machine;
  1132. char *root_dir = (char *) "";
  1133. int ss_pos = 0;
  1134. struct symsrc ss_[2];
  1135. struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
  1136. bool kmod;
  1137. unsigned char build_id[BUILD_ID_SIZE];
  1138. pthread_mutex_lock(&dso->lock);
  1139. /* check again under the dso->lock */
  1140. if (dso__loaded(dso, map->type)) {
  1141. ret = 1;
  1142. goto out;
  1143. }
  1144. if (dso->kernel) {
  1145. if (dso->kernel == DSO_TYPE_KERNEL)
  1146. ret = dso__load_kernel_sym(dso, map);
  1147. else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1148. ret = dso__load_guest_kernel_sym(dso, map);
  1149. goto out;
  1150. }
  1151. if (map->groups && map->groups->machine)
  1152. machine = map->groups->machine;
  1153. else
  1154. machine = NULL;
  1155. dso->adjust_symbols = 0;
  1156. if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
  1157. struct stat st;
  1158. if (lstat(dso->name, &st) < 0)
  1159. goto out;
  1160. if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
  1161. pr_warning("File %s not owned by current user or root, "
  1162. "ignoring it (use -f to override).\n", dso->name);
  1163. goto out;
  1164. }
  1165. ret = dso__load_perf_map(dso, map);
  1166. dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
  1167. DSO_BINARY_TYPE__NOT_FOUND;
  1168. goto out;
  1169. }
  1170. if (machine)
  1171. root_dir = machine->root_dir;
  1172. name = malloc(PATH_MAX);
  1173. if (!name)
  1174. goto out;
  1175. kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  1176. dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  1177. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
  1178. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  1179. /*
  1180. * Read the build id if possible. This is required for
  1181. * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
  1182. */
  1183. if (!dso->has_build_id &&
  1184. is_regular_file(dso->long_name) &&
  1185. filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
  1186. dso__set_build_id(dso, build_id);
  1187. /*
  1188. * Iterate over candidate debug images.
  1189. * Keep track of "interesting" ones (those which have a symtab, dynsym,
  1190. * and/or opd section) for processing.
  1191. */
  1192. for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
  1193. struct symsrc *ss = &ss_[ss_pos];
  1194. bool next_slot = false;
  1195. enum dso_binary_type symtab_type = binary_type_symtab[i];
  1196. if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
  1197. continue;
  1198. if (dso__read_binary_type_filename(dso, symtab_type,
  1199. root_dir, name, PATH_MAX))
  1200. continue;
  1201. if (!is_regular_file(name))
  1202. continue;
  1203. /* Name is now the name of the next image to try */
  1204. if (symsrc__init(ss, dso, name, symtab_type) < 0)
  1205. continue;
  1206. if (!syms_ss && symsrc__has_symtab(ss)) {
  1207. syms_ss = ss;
  1208. next_slot = true;
  1209. if (!dso->symsrc_filename)
  1210. dso->symsrc_filename = strdup(name);
  1211. }
  1212. if (!runtime_ss && symsrc__possibly_runtime(ss)) {
  1213. runtime_ss = ss;
  1214. next_slot = true;
  1215. }
  1216. if (next_slot) {
  1217. ss_pos++;
  1218. if (syms_ss && runtime_ss)
  1219. break;
  1220. } else {
  1221. symsrc__destroy(ss);
  1222. }
  1223. }
  1224. if (!runtime_ss && !syms_ss)
  1225. goto out_free;
  1226. if (runtime_ss && !syms_ss) {
  1227. syms_ss = runtime_ss;
  1228. }
  1229. /* We'll have to hope for the best */
  1230. if (!runtime_ss && syms_ss)
  1231. runtime_ss = syms_ss;
  1232. if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
  1233. if (dso__build_id_is_kmod(dso, name, PATH_MAX))
  1234. kmod = true;
  1235. if (syms_ss)
  1236. ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
  1237. else
  1238. ret = -1;
  1239. if (ret > 0) {
  1240. int nr_plt;
  1241. nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map);
  1242. if (nr_plt > 0)
  1243. ret += nr_plt;
  1244. }
  1245. for (; ss_pos > 0; ss_pos--)
  1246. symsrc__destroy(&ss_[ss_pos - 1]);
  1247. out_free:
  1248. free(name);
  1249. if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
  1250. ret = 0;
  1251. out:
  1252. dso__set_loaded(dso, map->type);
  1253. pthread_mutex_unlock(&dso->lock);
  1254. return ret;
  1255. }
  1256. struct map *map_groups__find_by_name(struct map_groups *mg,
  1257. enum map_type type, const char *name)
  1258. {
  1259. struct maps *maps = &mg->maps[type];
  1260. struct map *map;
  1261. pthread_rwlock_rdlock(&maps->lock);
  1262. for (map = maps__first(maps); map; map = map__next(map)) {
  1263. if (map->dso && strcmp(map->dso->short_name, name) == 0)
  1264. goto out_unlock;
  1265. }
  1266. map = NULL;
  1267. out_unlock:
  1268. pthread_rwlock_unlock(&maps->lock);
  1269. return map;
  1270. }
  1271. int dso__load_vmlinux(struct dso *dso, struct map *map,
  1272. const char *vmlinux, bool vmlinux_allocated)
  1273. {
  1274. int err = -1;
  1275. struct symsrc ss;
  1276. char symfs_vmlinux[PATH_MAX];
  1277. enum dso_binary_type symtab_type;
  1278. if (vmlinux[0] == '/')
  1279. snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
  1280. else
  1281. symbol__join_symfs(symfs_vmlinux, vmlinux);
  1282. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1283. symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1284. else
  1285. symtab_type = DSO_BINARY_TYPE__VMLINUX;
  1286. if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
  1287. return -1;
  1288. err = dso__load_sym(dso, map, &ss, &ss, 0);
  1289. symsrc__destroy(&ss);
  1290. if (err > 0) {
  1291. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1292. dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1293. else
  1294. dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
  1295. dso__set_long_name(dso, vmlinux, vmlinux_allocated);
  1296. dso__set_loaded(dso, map->type);
  1297. pr_debug("Using %s for symbols\n", symfs_vmlinux);
  1298. }
  1299. return err;
  1300. }
  1301. int dso__load_vmlinux_path(struct dso *dso, struct map *map)
  1302. {
  1303. int i, err = 0;
  1304. char *filename = NULL;
  1305. pr_debug("Looking at the vmlinux_path (%d entries long)\n",
  1306. vmlinux_path__nr_entries + 1);
  1307. for (i = 0; i < vmlinux_path__nr_entries; ++i) {
  1308. err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
  1309. if (err > 0)
  1310. goto out;
  1311. }
  1312. if (!symbol_conf.ignore_vmlinux_buildid)
  1313. filename = dso__build_id_filename(dso, NULL, 0);
  1314. if (filename != NULL) {
  1315. err = dso__load_vmlinux(dso, map, filename, true);
  1316. if (err > 0)
  1317. goto out;
  1318. free(filename);
  1319. }
  1320. out:
  1321. return err;
  1322. }
  1323. static bool visible_dir_filter(const char *name, struct dirent *d)
  1324. {
  1325. if (d->d_type != DT_DIR)
  1326. return false;
  1327. return lsdir_no_dot_filter(name, d);
  1328. }
  1329. static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
  1330. {
  1331. char kallsyms_filename[PATH_MAX];
  1332. int ret = -1;
  1333. struct strlist *dirs;
  1334. struct str_node *nd;
  1335. dirs = lsdir(dir, visible_dir_filter);
  1336. if (!dirs)
  1337. return -1;
  1338. strlist__for_each_entry(nd, dirs) {
  1339. scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
  1340. "%s/%s/kallsyms", dir, nd->s);
  1341. if (!validate_kcore_addresses(kallsyms_filename, map)) {
  1342. strlcpy(dir, kallsyms_filename, dir_sz);
  1343. ret = 0;
  1344. break;
  1345. }
  1346. }
  1347. strlist__delete(dirs);
  1348. return ret;
  1349. }
  1350. /*
  1351. * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
  1352. * since access(R_OK) only checks with real UID/GID but open() use effective
  1353. * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
  1354. */
  1355. static bool filename__readable(const char *file)
  1356. {
  1357. int fd = open(file, O_RDONLY);
  1358. if (fd < 0)
  1359. return false;
  1360. close(fd);
  1361. return true;
  1362. }
  1363. static char *dso__find_kallsyms(struct dso *dso, struct map *map)
  1364. {
  1365. u8 host_build_id[BUILD_ID_SIZE];
  1366. char sbuild_id[SBUILD_ID_SIZE];
  1367. bool is_host = false;
  1368. char path[PATH_MAX];
  1369. if (!dso->has_build_id) {
  1370. /*
  1371. * Last resort, if we don't have a build-id and couldn't find
  1372. * any vmlinux file, try the running kernel kallsyms table.
  1373. */
  1374. goto proc_kallsyms;
  1375. }
  1376. if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
  1377. sizeof(host_build_id)) == 0)
  1378. is_host = dso__build_id_equal(dso, host_build_id);
  1379. /* Try a fast path for /proc/kallsyms if possible */
  1380. if (is_host) {
  1381. /*
  1382. * Do not check the build-id cache, unless we know we cannot use
  1383. * /proc/kcore or module maps don't match to /proc/kallsyms.
  1384. * To check readability of /proc/kcore, do not use access(R_OK)
  1385. * since /proc/kcore requires CAP_SYS_RAWIO to read and access
  1386. * can't check it.
  1387. */
  1388. if (filename__readable("/proc/kcore") &&
  1389. !validate_kcore_addresses("/proc/kallsyms", map))
  1390. goto proc_kallsyms;
  1391. }
  1392. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1393. /* Find kallsyms in build-id cache with kcore */
  1394. scnprintf(path, sizeof(path), "%s/%s/%s",
  1395. buildid_dir, DSO__NAME_KCORE, sbuild_id);
  1396. if (!find_matching_kcore(map, path, sizeof(path)))
  1397. return strdup(path);
  1398. /* Use current /proc/kallsyms if possible */
  1399. if (is_host) {
  1400. proc_kallsyms:
  1401. return strdup("/proc/kallsyms");
  1402. }
  1403. /* Finally, find a cache of kallsyms */
  1404. if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
  1405. pr_err("No kallsyms or vmlinux with build-id %s was found\n",
  1406. sbuild_id);
  1407. return NULL;
  1408. }
  1409. return strdup(path);
  1410. }
  1411. static int dso__load_kernel_sym(struct dso *dso, struct map *map)
  1412. {
  1413. int err;
  1414. const char *kallsyms_filename = NULL;
  1415. char *kallsyms_allocated_filename = NULL;
  1416. /*
  1417. * Step 1: if the user specified a kallsyms or vmlinux filename, use
  1418. * it and only it, reporting errors to the user if it cannot be used.
  1419. *
  1420. * For instance, try to analyse an ARM perf.data file _without_ a
  1421. * build-id, or if the user specifies the wrong path to the right
  1422. * vmlinux file, obviously we can't fallback to another vmlinux (a
  1423. * x86_86 one, on the machine where analysis is being performed, say),
  1424. * or worse, /proc/kallsyms.
  1425. *
  1426. * If the specified file _has_ a build-id and there is a build-id
  1427. * section in the perf.data file, we will still do the expected
  1428. * validation in dso__load_vmlinux and will bail out if they don't
  1429. * match.
  1430. */
  1431. if (symbol_conf.kallsyms_name != NULL) {
  1432. kallsyms_filename = symbol_conf.kallsyms_name;
  1433. goto do_kallsyms;
  1434. }
  1435. if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
  1436. return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
  1437. }
  1438. if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
  1439. err = dso__load_vmlinux_path(dso, map);
  1440. if (err > 0)
  1441. return err;
  1442. }
  1443. /* do not try local files if a symfs was given */
  1444. if (symbol_conf.symfs[0] != 0)
  1445. return -1;
  1446. kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
  1447. if (!kallsyms_allocated_filename)
  1448. return -1;
  1449. kallsyms_filename = kallsyms_allocated_filename;
  1450. do_kallsyms:
  1451. err = dso__load_kallsyms(dso, kallsyms_filename, map);
  1452. if (err > 0)
  1453. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1454. free(kallsyms_allocated_filename);
  1455. if (err > 0 && !dso__is_kcore(dso)) {
  1456. dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
  1457. dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
  1458. map__fixup_start(map);
  1459. map__fixup_end(map);
  1460. }
  1461. return err;
  1462. }
  1463. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
  1464. {
  1465. int err;
  1466. const char *kallsyms_filename = NULL;
  1467. struct machine *machine;
  1468. char path[PATH_MAX];
  1469. if (!map->groups) {
  1470. pr_debug("Guest kernel map hasn't the point to groups\n");
  1471. return -1;
  1472. }
  1473. machine = map->groups->machine;
  1474. if (machine__is_default_guest(machine)) {
  1475. /*
  1476. * if the user specified a vmlinux filename, use it and only
  1477. * it, reporting errors to the user if it cannot be used.
  1478. * Or use file guest_kallsyms inputted by user on commandline
  1479. */
  1480. if (symbol_conf.default_guest_vmlinux_name != NULL) {
  1481. err = dso__load_vmlinux(dso, map,
  1482. symbol_conf.default_guest_vmlinux_name,
  1483. false);
  1484. return err;
  1485. }
  1486. kallsyms_filename = symbol_conf.default_guest_kallsyms;
  1487. if (!kallsyms_filename)
  1488. return -1;
  1489. } else {
  1490. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  1491. kallsyms_filename = path;
  1492. }
  1493. err = dso__load_kallsyms(dso, kallsyms_filename, map);
  1494. if (err > 0)
  1495. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1496. if (err > 0 && !dso__is_kcore(dso)) {
  1497. dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
  1498. machine__mmap_name(machine, path, sizeof(path));
  1499. dso__set_long_name(dso, strdup(path), true);
  1500. map__fixup_start(map);
  1501. map__fixup_end(map);
  1502. }
  1503. return err;
  1504. }
  1505. static void vmlinux_path__exit(void)
  1506. {
  1507. while (--vmlinux_path__nr_entries >= 0)
  1508. zfree(&vmlinux_path[vmlinux_path__nr_entries]);
  1509. vmlinux_path__nr_entries = 0;
  1510. zfree(&vmlinux_path);
  1511. }
  1512. static const char * const vmlinux_paths[] = {
  1513. "vmlinux",
  1514. "/boot/vmlinux"
  1515. };
  1516. static const char * const vmlinux_paths_upd[] = {
  1517. "/boot/vmlinux-%s",
  1518. "/usr/lib/debug/boot/vmlinux-%s",
  1519. "/lib/modules/%s/build/vmlinux",
  1520. "/usr/lib/debug/lib/modules/%s/vmlinux",
  1521. "/usr/lib/debug/boot/vmlinux-%s.debug"
  1522. };
  1523. static int vmlinux_path__add(const char *new_entry)
  1524. {
  1525. vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
  1526. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1527. return -1;
  1528. ++vmlinux_path__nr_entries;
  1529. return 0;
  1530. }
  1531. static int vmlinux_path__init(struct perf_env *env)
  1532. {
  1533. struct utsname uts;
  1534. char bf[PATH_MAX];
  1535. char *kernel_version;
  1536. unsigned int i;
  1537. vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
  1538. ARRAY_SIZE(vmlinux_paths_upd)));
  1539. if (vmlinux_path == NULL)
  1540. return -1;
  1541. for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
  1542. if (vmlinux_path__add(vmlinux_paths[i]) < 0)
  1543. goto out_fail;
  1544. /* only try kernel version if no symfs was given */
  1545. if (symbol_conf.symfs[0] != 0)
  1546. return 0;
  1547. if (env) {
  1548. kernel_version = env->os_release;
  1549. } else {
  1550. if (uname(&uts) < 0)
  1551. goto out_fail;
  1552. kernel_version = uts.release;
  1553. }
  1554. for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
  1555. snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
  1556. if (vmlinux_path__add(bf) < 0)
  1557. goto out_fail;
  1558. }
  1559. return 0;
  1560. out_fail:
  1561. vmlinux_path__exit();
  1562. return -1;
  1563. }
  1564. int setup_list(struct strlist **list, const char *list_str,
  1565. const char *list_name)
  1566. {
  1567. if (list_str == NULL)
  1568. return 0;
  1569. *list = strlist__new(list_str, NULL);
  1570. if (!*list) {
  1571. pr_err("problems parsing %s list\n", list_name);
  1572. return -1;
  1573. }
  1574. symbol_conf.has_filter = true;
  1575. return 0;
  1576. }
  1577. int setup_intlist(struct intlist **list, const char *list_str,
  1578. const char *list_name)
  1579. {
  1580. if (list_str == NULL)
  1581. return 0;
  1582. *list = intlist__new(list_str);
  1583. if (!*list) {
  1584. pr_err("problems parsing %s list\n", list_name);
  1585. return -1;
  1586. }
  1587. return 0;
  1588. }
  1589. static bool symbol__read_kptr_restrict(void)
  1590. {
  1591. bool value = false;
  1592. FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
  1593. if (fp != NULL) {
  1594. char line[8];
  1595. if (fgets(line, sizeof(line), fp) != NULL)
  1596. value = (geteuid() != 0) ?
  1597. (atoi(line) != 0) :
  1598. (atoi(line) == 2);
  1599. fclose(fp);
  1600. }
  1601. return value;
  1602. }
  1603. int symbol__annotation_init(void)
  1604. {
  1605. if (symbol_conf.initialized) {
  1606. pr_err("Annotation needs to be init before symbol__init()\n");
  1607. return -1;
  1608. }
  1609. if (symbol_conf.init_annotation) {
  1610. pr_warning("Annotation being initialized multiple times\n");
  1611. return 0;
  1612. }
  1613. symbol_conf.priv_size += sizeof(struct annotation);
  1614. symbol_conf.init_annotation = true;
  1615. return 0;
  1616. }
  1617. int symbol__init(struct perf_env *env)
  1618. {
  1619. const char *symfs;
  1620. if (symbol_conf.initialized)
  1621. return 0;
  1622. symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
  1623. symbol__elf_init();
  1624. if (symbol_conf.sort_by_name)
  1625. symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
  1626. sizeof(struct symbol));
  1627. if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
  1628. return -1;
  1629. if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
  1630. pr_err("'.' is the only non valid --field-separator argument\n");
  1631. return -1;
  1632. }
  1633. if (setup_list(&symbol_conf.dso_list,
  1634. symbol_conf.dso_list_str, "dso") < 0)
  1635. return -1;
  1636. if (setup_list(&symbol_conf.comm_list,
  1637. symbol_conf.comm_list_str, "comm") < 0)
  1638. goto out_free_dso_list;
  1639. if (setup_intlist(&symbol_conf.pid_list,
  1640. symbol_conf.pid_list_str, "pid") < 0)
  1641. goto out_free_comm_list;
  1642. if (setup_intlist(&symbol_conf.tid_list,
  1643. symbol_conf.tid_list_str, "tid") < 0)
  1644. goto out_free_pid_list;
  1645. if (setup_list(&symbol_conf.sym_list,
  1646. symbol_conf.sym_list_str, "symbol") < 0)
  1647. goto out_free_tid_list;
  1648. /*
  1649. * A path to symbols of "/" is identical to ""
  1650. * reset here for simplicity.
  1651. */
  1652. symfs = realpath(symbol_conf.symfs, NULL);
  1653. if (symfs == NULL)
  1654. symfs = symbol_conf.symfs;
  1655. if (strcmp(symfs, "/") == 0)
  1656. symbol_conf.symfs = "";
  1657. if (symfs != symbol_conf.symfs)
  1658. free((void *)symfs);
  1659. symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
  1660. symbol_conf.initialized = true;
  1661. return 0;
  1662. out_free_tid_list:
  1663. intlist__delete(symbol_conf.tid_list);
  1664. out_free_pid_list:
  1665. intlist__delete(symbol_conf.pid_list);
  1666. out_free_comm_list:
  1667. strlist__delete(symbol_conf.comm_list);
  1668. out_free_dso_list:
  1669. strlist__delete(symbol_conf.dso_list);
  1670. return -1;
  1671. }
  1672. void symbol__exit(void)
  1673. {
  1674. if (!symbol_conf.initialized)
  1675. return;
  1676. strlist__delete(symbol_conf.sym_list);
  1677. strlist__delete(symbol_conf.dso_list);
  1678. strlist__delete(symbol_conf.comm_list);
  1679. intlist__delete(symbol_conf.tid_list);
  1680. intlist__delete(symbol_conf.pid_list);
  1681. vmlinux_path__exit();
  1682. symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
  1683. symbol_conf.initialized = false;
  1684. }
  1685. int symbol__config_symfs(const struct option *opt __maybe_unused,
  1686. const char *dir, int unset __maybe_unused)
  1687. {
  1688. char *bf = NULL;
  1689. int ret;
  1690. symbol_conf.symfs = strdup(dir);
  1691. if (symbol_conf.symfs == NULL)
  1692. return -ENOMEM;
  1693. /* skip the locally configured cache if a symfs is given, and
  1694. * config buildid dir to symfs/.debug
  1695. */
  1696. ret = asprintf(&bf, "%s/%s", dir, ".debug");
  1697. if (ret < 0)
  1698. return -ENOMEM;
  1699. set_buildid_dir(bf);
  1700. free(bf);
  1701. return 0;
  1702. }