dso.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. #include <asm/bug.h>
  2. #include <sys/time.h>
  3. #include <sys/resource.h>
  4. #include "symbol.h"
  5. #include "dso.h"
  6. #include "machine.h"
  7. #include "auxtrace.h"
  8. #include "util.h"
  9. #include "debug.h"
  10. #include "vdso.h"
  11. char dso__symtab_origin(const struct dso *dso)
  12. {
  13. static const char origin[] = {
  14. [DSO_BINARY_TYPE__KALLSYMS] = 'k',
  15. [DSO_BINARY_TYPE__VMLINUX] = 'v',
  16. [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
  17. [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
  18. [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
  19. [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
  20. [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
  21. [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
  22. [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
  23. [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
  24. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
  25. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
  26. [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
  27. [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
  28. [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
  29. [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
  30. };
  31. if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  32. return '!';
  33. return origin[dso->symtab_type];
  34. }
  35. int dso__read_binary_type_filename(const struct dso *dso,
  36. enum dso_binary_type type,
  37. char *root_dir, char *filename, size_t size)
  38. {
  39. char build_id_hex[SBUILD_ID_SIZE];
  40. int ret = 0;
  41. size_t len;
  42. switch (type) {
  43. case DSO_BINARY_TYPE__DEBUGLINK: {
  44. char *debuglink;
  45. len = __symbol__join_symfs(filename, size, dso->long_name);
  46. debuglink = filename + len;
  47. while (debuglink != filename && *debuglink != '/')
  48. debuglink--;
  49. if (*debuglink == '/')
  50. debuglink++;
  51. ret = -1;
  52. if (!is_regular_file(filename))
  53. break;
  54. ret = filename__read_debuglink(filename, debuglink,
  55. size - (debuglink - filename));
  56. }
  57. break;
  58. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  59. if (dso__build_id_filename(dso, filename, size) == NULL)
  60. ret = -1;
  61. break;
  62. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  63. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  64. snprintf(filename + len, size - len, "%s.debug", dso->long_name);
  65. break;
  66. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  67. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  68. snprintf(filename + len, size - len, "%s", dso->long_name);
  69. break;
  70. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  71. {
  72. const char *last_slash;
  73. size_t dir_size;
  74. last_slash = dso->long_name + dso->long_name_len;
  75. while (last_slash != dso->long_name && *last_slash != '/')
  76. last_slash--;
  77. len = __symbol__join_symfs(filename, size, "");
  78. dir_size = last_slash - dso->long_name + 2;
  79. if (dir_size > (size - len)) {
  80. ret = -1;
  81. break;
  82. }
  83. len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
  84. len += scnprintf(filename + len , size - len, ".debug%s",
  85. last_slash);
  86. break;
  87. }
  88. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  89. if (!dso->has_build_id) {
  90. ret = -1;
  91. break;
  92. }
  93. build_id__sprintf(dso->build_id,
  94. sizeof(dso->build_id),
  95. build_id_hex);
  96. len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
  97. snprintf(filename + len, size - len, "%.2s/%s.debug",
  98. build_id_hex, build_id_hex + 2);
  99. break;
  100. case DSO_BINARY_TYPE__VMLINUX:
  101. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  102. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  103. __symbol__join_symfs(filename, size, dso->long_name);
  104. break;
  105. case DSO_BINARY_TYPE__GUEST_KMODULE:
  106. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  107. path__join3(filename, size, symbol_conf.symfs,
  108. root_dir, dso->long_name);
  109. break;
  110. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  111. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  112. __symbol__join_symfs(filename, size, dso->long_name);
  113. break;
  114. case DSO_BINARY_TYPE__KCORE:
  115. case DSO_BINARY_TYPE__GUEST_KCORE:
  116. snprintf(filename, size, "%s", dso->long_name);
  117. break;
  118. default:
  119. case DSO_BINARY_TYPE__KALLSYMS:
  120. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  121. case DSO_BINARY_TYPE__JAVA_JIT:
  122. case DSO_BINARY_TYPE__NOT_FOUND:
  123. ret = -1;
  124. break;
  125. }
  126. return ret;
  127. }
  128. static const struct {
  129. const char *fmt;
  130. int (*decompress)(const char *input, int output);
  131. } compressions[] = {
  132. #ifdef HAVE_ZLIB_SUPPORT
  133. { "gz", gzip_decompress_to_file },
  134. #endif
  135. #ifdef HAVE_LZMA_SUPPORT
  136. { "xz", lzma_decompress_to_file },
  137. #endif
  138. { NULL, NULL },
  139. };
  140. bool is_supported_compression(const char *ext)
  141. {
  142. unsigned i;
  143. for (i = 0; compressions[i].fmt; i++) {
  144. if (!strcmp(ext, compressions[i].fmt))
  145. return true;
  146. }
  147. return false;
  148. }
  149. bool is_kernel_module(const char *pathname, int cpumode)
  150. {
  151. struct kmod_path m;
  152. int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
  153. WARN_ONCE(mode != cpumode,
  154. "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
  155. cpumode);
  156. switch (mode) {
  157. case PERF_RECORD_MISC_USER:
  158. case PERF_RECORD_MISC_HYPERVISOR:
  159. case PERF_RECORD_MISC_GUEST_USER:
  160. return false;
  161. /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
  162. default:
  163. if (kmod_path__parse(&m, pathname)) {
  164. pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
  165. pathname);
  166. return true;
  167. }
  168. }
  169. return m.kmod;
  170. }
  171. bool decompress_to_file(const char *ext, const char *filename, int output_fd)
  172. {
  173. unsigned i;
  174. for (i = 0; compressions[i].fmt; i++) {
  175. if (!strcmp(ext, compressions[i].fmt))
  176. return !compressions[i].decompress(filename,
  177. output_fd);
  178. }
  179. return false;
  180. }
  181. bool dso__needs_decompress(struct dso *dso)
  182. {
  183. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  184. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  185. }
  186. /*
  187. * Parses kernel module specified in @path and updates
  188. * @m argument like:
  189. *
  190. * @comp - true if @path contains supported compression suffix,
  191. * false otherwise
  192. * @kmod - true if @path contains '.ko' suffix in right position,
  193. * false otherwise
  194. * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
  195. * of the kernel module without suffixes, otherwise strudup-ed
  196. * base name of @path
  197. * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
  198. * the compression suffix
  199. *
  200. * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  201. */
  202. int __kmod_path__parse(struct kmod_path *m, const char *path,
  203. bool alloc_name, bool alloc_ext)
  204. {
  205. const char *name = strrchr(path, '/');
  206. const char *ext = strrchr(path, '.');
  207. bool is_simple_name = false;
  208. memset(m, 0x0, sizeof(*m));
  209. name = name ? name + 1 : path;
  210. /*
  211. * '.' is also a valid character for module name. For example:
  212. * [aaa.bbb] is a valid module name. '[' should have higher
  213. * priority than '.ko' suffix.
  214. *
  215. * The kernel names are from machine__mmap_name. Such
  216. * name should belong to kernel itself, not kernel module.
  217. */
  218. if (name[0] == '[') {
  219. is_simple_name = true;
  220. if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
  221. (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
  222. (strncmp(name, "[vdso]", 6) == 0) ||
  223. (strncmp(name, "[vdso32]", 8) == 0) ||
  224. (strncmp(name, "[vdsox32]", 9) == 0) ||
  225. (strncmp(name, "[vsyscall]", 10) == 0)) {
  226. m->kmod = false;
  227. } else
  228. m->kmod = true;
  229. }
  230. /* No extension, just return name. */
  231. if ((ext == NULL) || is_simple_name) {
  232. if (alloc_name) {
  233. m->name = strdup(name);
  234. return m->name ? 0 : -ENOMEM;
  235. }
  236. return 0;
  237. }
  238. if (is_supported_compression(ext + 1)) {
  239. m->comp = true;
  240. ext -= 3;
  241. }
  242. /* Check .ko extension only if there's enough name left. */
  243. if (ext > name)
  244. m->kmod = !strncmp(ext, ".ko", 3);
  245. if (alloc_name) {
  246. if (m->kmod) {
  247. if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
  248. return -ENOMEM;
  249. } else {
  250. if (asprintf(&m->name, "%s", name) == -1)
  251. return -ENOMEM;
  252. }
  253. strxfrchar(m->name, '-', '_');
  254. }
  255. if (alloc_ext && m->comp) {
  256. m->ext = strdup(ext + 4);
  257. if (!m->ext) {
  258. free((void *) m->name);
  259. return -ENOMEM;
  260. }
  261. }
  262. return 0;
  263. }
  264. /*
  265. * Global list of open DSOs and the counter.
  266. */
  267. static LIST_HEAD(dso__data_open);
  268. static long dso__data_open_cnt;
  269. static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
  270. static void dso__list_add(struct dso *dso)
  271. {
  272. list_add_tail(&dso->data.open_entry, &dso__data_open);
  273. dso__data_open_cnt++;
  274. }
  275. static void dso__list_del(struct dso *dso)
  276. {
  277. list_del(&dso->data.open_entry);
  278. WARN_ONCE(dso__data_open_cnt <= 0,
  279. "DSO data fd counter out of bounds.");
  280. dso__data_open_cnt--;
  281. }
  282. static void close_first_dso(void);
  283. static int do_open(char *name)
  284. {
  285. int fd;
  286. char sbuf[STRERR_BUFSIZE];
  287. do {
  288. fd = open(name, O_RDONLY);
  289. if (fd >= 0)
  290. return fd;
  291. pr_debug("dso open failed: %s\n",
  292. str_error_r(errno, sbuf, sizeof(sbuf)));
  293. if (!dso__data_open_cnt || errno != EMFILE)
  294. break;
  295. close_first_dso();
  296. } while (1);
  297. return -1;
  298. }
  299. static int __open_dso(struct dso *dso, struct machine *machine)
  300. {
  301. int fd;
  302. char *root_dir = (char *)"";
  303. char *name = malloc(PATH_MAX);
  304. if (!name)
  305. return -ENOMEM;
  306. if (machine)
  307. root_dir = machine->root_dir;
  308. if (dso__read_binary_type_filename(dso, dso->binary_type,
  309. root_dir, name, PATH_MAX)) {
  310. free(name);
  311. return -EINVAL;
  312. }
  313. if (!is_regular_file(name))
  314. return -EINVAL;
  315. fd = do_open(name);
  316. free(name);
  317. return fd;
  318. }
  319. static void check_data_close(void);
  320. /**
  321. * dso_close - Open DSO data file
  322. * @dso: dso object
  323. *
  324. * Open @dso's data file descriptor and updates
  325. * list/count of open DSO objects.
  326. */
  327. static int open_dso(struct dso *dso, struct machine *machine)
  328. {
  329. int fd = __open_dso(dso, machine);
  330. if (fd >= 0) {
  331. dso__list_add(dso);
  332. /*
  333. * Check if we crossed the allowed number
  334. * of opened DSOs and close one if needed.
  335. */
  336. check_data_close();
  337. }
  338. return fd;
  339. }
  340. static void close_data_fd(struct dso *dso)
  341. {
  342. if (dso->data.fd >= 0) {
  343. close(dso->data.fd);
  344. dso->data.fd = -1;
  345. dso->data.file_size = 0;
  346. dso__list_del(dso);
  347. }
  348. }
  349. /**
  350. * dso_close - Close DSO data file
  351. * @dso: dso object
  352. *
  353. * Close @dso's data file descriptor and updates
  354. * list/count of open DSO objects.
  355. */
  356. static void close_dso(struct dso *dso)
  357. {
  358. close_data_fd(dso);
  359. }
  360. static void close_first_dso(void)
  361. {
  362. struct dso *dso;
  363. dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
  364. close_dso(dso);
  365. }
  366. static rlim_t get_fd_limit(void)
  367. {
  368. struct rlimit l;
  369. rlim_t limit = 0;
  370. /* Allow half of the current open fd limit. */
  371. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  372. if (l.rlim_cur == RLIM_INFINITY)
  373. limit = l.rlim_cur;
  374. else
  375. limit = l.rlim_cur / 2;
  376. } else {
  377. pr_err("failed to get fd limit\n");
  378. limit = 1;
  379. }
  380. return limit;
  381. }
  382. static rlim_t fd_limit;
  383. /*
  384. * Used only by tests/dso-data.c to reset the environment
  385. * for tests. I dont expect we should change this during
  386. * standard runtime.
  387. */
  388. void reset_fd_limit(void)
  389. {
  390. fd_limit = 0;
  391. }
  392. static bool may_cache_fd(void)
  393. {
  394. if (!fd_limit)
  395. fd_limit = get_fd_limit();
  396. if (fd_limit == RLIM_INFINITY)
  397. return true;
  398. return fd_limit > (rlim_t) dso__data_open_cnt;
  399. }
  400. /*
  401. * Check and close LRU dso if we crossed allowed limit
  402. * for opened dso file descriptors. The limit is half
  403. * of the RLIMIT_NOFILE files opened.
  404. */
  405. static void check_data_close(void)
  406. {
  407. bool cache_fd = may_cache_fd();
  408. if (!cache_fd)
  409. close_first_dso();
  410. }
  411. /**
  412. * dso__data_close - Close DSO data file
  413. * @dso: dso object
  414. *
  415. * External interface to close @dso's data file descriptor.
  416. */
  417. void dso__data_close(struct dso *dso)
  418. {
  419. pthread_mutex_lock(&dso__data_open_lock);
  420. close_dso(dso);
  421. pthread_mutex_unlock(&dso__data_open_lock);
  422. }
  423. static void try_to_open_dso(struct dso *dso, struct machine *machine)
  424. {
  425. enum dso_binary_type binary_type_data[] = {
  426. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  427. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  428. DSO_BINARY_TYPE__NOT_FOUND,
  429. };
  430. int i = 0;
  431. if (dso->data.fd >= 0)
  432. return;
  433. if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
  434. dso->data.fd = open_dso(dso, machine);
  435. goto out;
  436. }
  437. do {
  438. dso->binary_type = binary_type_data[i++];
  439. dso->data.fd = open_dso(dso, machine);
  440. if (dso->data.fd >= 0)
  441. goto out;
  442. } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
  443. out:
  444. if (dso->data.fd >= 0)
  445. dso->data.status = DSO_DATA_STATUS_OK;
  446. else
  447. dso->data.status = DSO_DATA_STATUS_ERROR;
  448. }
  449. /**
  450. * dso__data_get_fd - Get dso's data file descriptor
  451. * @dso: dso object
  452. * @machine: machine object
  453. *
  454. * External interface to find dso's file, open it and
  455. * returns file descriptor. It should be paired with
  456. * dso__data_put_fd() if it returns non-negative value.
  457. */
  458. int dso__data_get_fd(struct dso *dso, struct machine *machine)
  459. {
  460. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  461. return -1;
  462. if (pthread_mutex_lock(&dso__data_open_lock) < 0)
  463. return -1;
  464. try_to_open_dso(dso, machine);
  465. if (dso->data.fd < 0)
  466. pthread_mutex_unlock(&dso__data_open_lock);
  467. return dso->data.fd;
  468. }
  469. void dso__data_put_fd(struct dso *dso __maybe_unused)
  470. {
  471. pthread_mutex_unlock(&dso__data_open_lock);
  472. }
  473. bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
  474. {
  475. u32 flag = 1 << by;
  476. if (dso->data.status_seen & flag)
  477. return true;
  478. dso->data.status_seen |= flag;
  479. return false;
  480. }
  481. static void
  482. dso_cache__free(struct dso *dso)
  483. {
  484. struct rb_root *root = &dso->data.cache;
  485. struct rb_node *next = rb_first(root);
  486. pthread_mutex_lock(&dso->lock);
  487. while (next) {
  488. struct dso_cache *cache;
  489. cache = rb_entry(next, struct dso_cache, rb_node);
  490. next = rb_next(&cache->rb_node);
  491. rb_erase(&cache->rb_node, root);
  492. free(cache);
  493. }
  494. pthread_mutex_unlock(&dso->lock);
  495. }
  496. static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
  497. {
  498. const struct rb_root *root = &dso->data.cache;
  499. struct rb_node * const *p = &root->rb_node;
  500. const struct rb_node *parent = NULL;
  501. struct dso_cache *cache;
  502. while (*p != NULL) {
  503. u64 end;
  504. parent = *p;
  505. cache = rb_entry(parent, struct dso_cache, rb_node);
  506. end = cache->offset + DSO__DATA_CACHE_SIZE;
  507. if (offset < cache->offset)
  508. p = &(*p)->rb_left;
  509. else if (offset >= end)
  510. p = &(*p)->rb_right;
  511. else
  512. return cache;
  513. }
  514. return NULL;
  515. }
  516. static struct dso_cache *
  517. dso_cache__insert(struct dso *dso, struct dso_cache *new)
  518. {
  519. struct rb_root *root = &dso->data.cache;
  520. struct rb_node **p = &root->rb_node;
  521. struct rb_node *parent = NULL;
  522. struct dso_cache *cache;
  523. u64 offset = new->offset;
  524. pthread_mutex_lock(&dso->lock);
  525. while (*p != NULL) {
  526. u64 end;
  527. parent = *p;
  528. cache = rb_entry(parent, struct dso_cache, rb_node);
  529. end = cache->offset + DSO__DATA_CACHE_SIZE;
  530. if (offset < cache->offset)
  531. p = &(*p)->rb_left;
  532. else if (offset >= end)
  533. p = &(*p)->rb_right;
  534. else
  535. goto out;
  536. }
  537. rb_link_node(&new->rb_node, parent, p);
  538. rb_insert_color(&new->rb_node, root);
  539. cache = NULL;
  540. out:
  541. pthread_mutex_unlock(&dso->lock);
  542. return cache;
  543. }
  544. static ssize_t
  545. dso_cache__memcpy(struct dso_cache *cache, u64 offset,
  546. u8 *data, u64 size)
  547. {
  548. u64 cache_offset = offset - cache->offset;
  549. u64 cache_size = min(cache->size - cache_offset, size);
  550. memcpy(data, cache->data + cache_offset, cache_size);
  551. return cache_size;
  552. }
  553. static ssize_t
  554. dso_cache__read(struct dso *dso, struct machine *machine,
  555. u64 offset, u8 *data, ssize_t size)
  556. {
  557. struct dso_cache *cache;
  558. struct dso_cache *old;
  559. ssize_t ret;
  560. do {
  561. u64 cache_offset;
  562. cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
  563. if (!cache)
  564. return -ENOMEM;
  565. pthread_mutex_lock(&dso__data_open_lock);
  566. /*
  567. * dso->data.fd might be closed if other thread opened another
  568. * file (dso) due to open file limit (RLIMIT_NOFILE).
  569. */
  570. try_to_open_dso(dso, machine);
  571. if (dso->data.fd < 0) {
  572. ret = -errno;
  573. dso->data.status = DSO_DATA_STATUS_ERROR;
  574. break;
  575. }
  576. cache_offset = offset & DSO__DATA_CACHE_MASK;
  577. ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
  578. if (ret <= 0)
  579. break;
  580. cache->offset = cache_offset;
  581. cache->size = ret;
  582. } while (0);
  583. pthread_mutex_unlock(&dso__data_open_lock);
  584. if (ret > 0) {
  585. old = dso_cache__insert(dso, cache);
  586. if (old) {
  587. /* we lose the race */
  588. free(cache);
  589. cache = old;
  590. }
  591. ret = dso_cache__memcpy(cache, offset, data, size);
  592. }
  593. if (ret <= 0)
  594. free(cache);
  595. return ret;
  596. }
  597. static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
  598. u64 offset, u8 *data, ssize_t size)
  599. {
  600. struct dso_cache *cache;
  601. cache = dso_cache__find(dso, offset);
  602. if (cache)
  603. return dso_cache__memcpy(cache, offset, data, size);
  604. else
  605. return dso_cache__read(dso, machine, offset, data, size);
  606. }
  607. /*
  608. * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
  609. * in the rb_tree. Any read to already cached data is served
  610. * by cached data.
  611. */
  612. static ssize_t cached_read(struct dso *dso, struct machine *machine,
  613. u64 offset, u8 *data, ssize_t size)
  614. {
  615. ssize_t r = 0;
  616. u8 *p = data;
  617. do {
  618. ssize_t ret;
  619. ret = dso_cache_read(dso, machine, offset, p, size);
  620. if (ret < 0)
  621. return ret;
  622. /* Reached EOF, return what we have. */
  623. if (!ret)
  624. break;
  625. BUG_ON(ret > size);
  626. r += ret;
  627. p += ret;
  628. offset += ret;
  629. size -= ret;
  630. } while (size);
  631. return r;
  632. }
  633. static int data_file_size(struct dso *dso, struct machine *machine)
  634. {
  635. int ret = 0;
  636. struct stat st;
  637. char sbuf[STRERR_BUFSIZE];
  638. if (dso->data.file_size)
  639. return 0;
  640. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  641. return -1;
  642. pthread_mutex_lock(&dso__data_open_lock);
  643. /*
  644. * dso->data.fd might be closed if other thread opened another
  645. * file (dso) due to open file limit (RLIMIT_NOFILE).
  646. */
  647. try_to_open_dso(dso, machine);
  648. if (dso->data.fd < 0) {
  649. ret = -errno;
  650. dso->data.status = DSO_DATA_STATUS_ERROR;
  651. goto out;
  652. }
  653. if (fstat(dso->data.fd, &st) < 0) {
  654. ret = -errno;
  655. pr_err("dso cache fstat failed: %s\n",
  656. str_error_r(errno, sbuf, sizeof(sbuf)));
  657. dso->data.status = DSO_DATA_STATUS_ERROR;
  658. goto out;
  659. }
  660. dso->data.file_size = st.st_size;
  661. out:
  662. pthread_mutex_unlock(&dso__data_open_lock);
  663. return ret;
  664. }
  665. /**
  666. * dso__data_size - Return dso data size
  667. * @dso: dso object
  668. * @machine: machine object
  669. *
  670. * Return: dso data size
  671. */
  672. off_t dso__data_size(struct dso *dso, struct machine *machine)
  673. {
  674. if (data_file_size(dso, machine))
  675. return -1;
  676. /* For now just estimate dso data size is close to file size */
  677. return dso->data.file_size;
  678. }
  679. static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
  680. u64 offset, u8 *data, ssize_t size)
  681. {
  682. if (data_file_size(dso, machine))
  683. return -1;
  684. /* Check the offset sanity. */
  685. if (offset > dso->data.file_size)
  686. return -1;
  687. if (offset + size < offset)
  688. return -1;
  689. return cached_read(dso, machine, offset, data, size);
  690. }
  691. /**
  692. * dso__data_read_offset - Read data from dso file offset
  693. * @dso: dso object
  694. * @machine: machine object
  695. * @offset: file offset
  696. * @data: buffer to store data
  697. * @size: size of the @data buffer
  698. *
  699. * External interface to read data from dso file offset. Open
  700. * dso data file and use cached_read to get the data.
  701. */
  702. ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
  703. u64 offset, u8 *data, ssize_t size)
  704. {
  705. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  706. return -1;
  707. return data_read_offset(dso, machine, offset, data, size);
  708. }
  709. /**
  710. * dso__data_read_addr - Read data from dso address
  711. * @dso: dso object
  712. * @machine: machine object
  713. * @add: virtual memory address
  714. * @data: buffer to store data
  715. * @size: size of the @data buffer
  716. *
  717. * External interface to read data from dso address.
  718. */
  719. ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
  720. struct machine *machine, u64 addr,
  721. u8 *data, ssize_t size)
  722. {
  723. u64 offset = map->map_ip(map, addr);
  724. return dso__data_read_offset(dso, machine, offset, data, size);
  725. }
  726. struct map *dso__new_map(const char *name)
  727. {
  728. struct map *map = NULL;
  729. struct dso *dso = dso__new(name);
  730. if (dso)
  731. map = map__new2(0, dso, MAP__FUNCTION);
  732. return map;
  733. }
  734. struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
  735. const char *short_name, int dso_type)
  736. {
  737. /*
  738. * The kernel dso could be created by build_id processing.
  739. */
  740. struct dso *dso = machine__findnew_dso(machine, name);
  741. /*
  742. * We need to run this in all cases, since during the build_id
  743. * processing we had no idea this was the kernel dso.
  744. */
  745. if (dso != NULL) {
  746. dso__set_short_name(dso, short_name, false);
  747. dso->kernel = dso_type;
  748. }
  749. return dso;
  750. }
  751. /*
  752. * Find a matching entry and/or link current entry to RB tree.
  753. * Either one of the dso or name parameter must be non-NULL or the
  754. * function will not work.
  755. */
  756. static struct dso *__dso__findlink_by_longname(struct rb_root *root,
  757. struct dso *dso, const char *name)
  758. {
  759. struct rb_node **p = &root->rb_node;
  760. struct rb_node *parent = NULL;
  761. if (!name)
  762. name = dso->long_name;
  763. /*
  764. * Find node with the matching name
  765. */
  766. while (*p) {
  767. struct dso *this = rb_entry(*p, struct dso, rb_node);
  768. int rc = strcmp(name, this->long_name);
  769. parent = *p;
  770. if (rc == 0) {
  771. /*
  772. * In case the new DSO is a duplicate of an existing
  773. * one, print an one-time warning & put the new entry
  774. * at the end of the list of duplicates.
  775. */
  776. if (!dso || (dso == this))
  777. return this; /* Find matching dso */
  778. /*
  779. * The core kernel DSOs may have duplicated long name.
  780. * In this case, the short name should be different.
  781. * Comparing the short names to differentiate the DSOs.
  782. */
  783. rc = strcmp(dso->short_name, this->short_name);
  784. if (rc == 0) {
  785. pr_err("Duplicated dso name: %s\n", name);
  786. return NULL;
  787. }
  788. }
  789. if (rc < 0)
  790. p = &parent->rb_left;
  791. else
  792. p = &parent->rb_right;
  793. }
  794. if (dso) {
  795. /* Add new node and rebalance tree */
  796. rb_link_node(&dso->rb_node, parent, p);
  797. rb_insert_color(&dso->rb_node, root);
  798. dso->root = root;
  799. }
  800. return NULL;
  801. }
  802. static inline struct dso *__dso__find_by_longname(struct rb_root *root,
  803. const char *name)
  804. {
  805. return __dso__findlink_by_longname(root, NULL, name);
  806. }
  807. void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
  808. {
  809. struct rb_root *root = dso->root;
  810. if (name == NULL)
  811. return;
  812. if (dso->long_name_allocated)
  813. free((char *)dso->long_name);
  814. if (root) {
  815. rb_erase(&dso->rb_node, root);
  816. /*
  817. * __dso__findlink_by_longname() isn't guaranteed to add it
  818. * back, so a clean removal is required here.
  819. */
  820. RB_CLEAR_NODE(&dso->rb_node);
  821. dso->root = NULL;
  822. }
  823. dso->long_name = name;
  824. dso->long_name_len = strlen(name);
  825. dso->long_name_allocated = name_allocated;
  826. if (root)
  827. __dso__findlink_by_longname(root, dso, NULL);
  828. }
  829. void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
  830. {
  831. if (name == NULL)
  832. return;
  833. if (dso->short_name_allocated)
  834. free((char *)dso->short_name);
  835. dso->short_name = name;
  836. dso->short_name_len = strlen(name);
  837. dso->short_name_allocated = name_allocated;
  838. }
  839. static void dso__set_basename(struct dso *dso)
  840. {
  841. /*
  842. * basename() may modify path buffer, so we must pass
  843. * a copy.
  844. */
  845. char *base, *lname = strdup(dso->long_name);
  846. if (!lname)
  847. return;
  848. /*
  849. * basename() may return a pointer to internal
  850. * storage which is reused in subsequent calls
  851. * so copy the result.
  852. */
  853. base = strdup(basename(lname));
  854. free(lname);
  855. if (!base)
  856. return;
  857. dso__set_short_name(dso, base, true);
  858. }
  859. int dso__name_len(const struct dso *dso)
  860. {
  861. if (!dso)
  862. return strlen("[unknown]");
  863. if (verbose)
  864. return dso->long_name_len;
  865. return dso->short_name_len;
  866. }
  867. bool dso__loaded(const struct dso *dso, enum map_type type)
  868. {
  869. return dso->loaded & (1 << type);
  870. }
  871. bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
  872. {
  873. return dso->sorted_by_name & (1 << type);
  874. }
  875. void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
  876. {
  877. dso->sorted_by_name |= (1 << type);
  878. }
  879. struct dso *dso__new(const char *name)
  880. {
  881. struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
  882. if (dso != NULL) {
  883. int i;
  884. strcpy(dso->name, name);
  885. dso__set_long_name(dso, dso->name, false);
  886. dso__set_short_name(dso, dso->name, false);
  887. for (i = 0; i < MAP__NR_TYPES; ++i)
  888. dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
  889. dso->data.cache = RB_ROOT;
  890. dso->data.fd = -1;
  891. dso->data.status = DSO_DATA_STATUS_UNKNOWN;
  892. dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
  893. dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
  894. dso->is_64_bit = (sizeof(void *) == 8);
  895. dso->loaded = 0;
  896. dso->rel = 0;
  897. dso->sorted_by_name = 0;
  898. dso->has_build_id = 0;
  899. dso->has_srcline = 1;
  900. dso->a2l_fails = 1;
  901. dso->kernel = DSO_TYPE_USER;
  902. dso->needs_swap = DSO_SWAP__UNSET;
  903. RB_CLEAR_NODE(&dso->rb_node);
  904. dso->root = NULL;
  905. INIT_LIST_HEAD(&dso->node);
  906. INIT_LIST_HEAD(&dso->data.open_entry);
  907. pthread_mutex_init(&dso->lock, NULL);
  908. atomic_set(&dso->refcnt, 1);
  909. }
  910. return dso;
  911. }
  912. void dso__delete(struct dso *dso)
  913. {
  914. int i;
  915. if (!RB_EMPTY_NODE(&dso->rb_node))
  916. pr_err("DSO %s is still in rbtree when being deleted!\n",
  917. dso->long_name);
  918. for (i = 0; i < MAP__NR_TYPES; ++i)
  919. symbols__delete(&dso->symbols[i]);
  920. if (dso->short_name_allocated) {
  921. zfree((char **)&dso->short_name);
  922. dso->short_name_allocated = false;
  923. }
  924. if (dso->long_name_allocated) {
  925. zfree((char **)&dso->long_name);
  926. dso->long_name_allocated = false;
  927. }
  928. dso__data_close(dso);
  929. auxtrace_cache__free(dso->auxtrace_cache);
  930. dso_cache__free(dso);
  931. dso__free_a2l(dso);
  932. zfree(&dso->symsrc_filename);
  933. pthread_mutex_destroy(&dso->lock);
  934. free(dso);
  935. }
  936. struct dso *dso__get(struct dso *dso)
  937. {
  938. if (dso)
  939. atomic_inc(&dso->refcnt);
  940. return dso;
  941. }
  942. void dso__put(struct dso *dso)
  943. {
  944. if (dso && atomic_dec_and_test(&dso->refcnt))
  945. dso__delete(dso);
  946. }
  947. void dso__set_build_id(struct dso *dso, void *build_id)
  948. {
  949. memcpy(dso->build_id, build_id, sizeof(dso->build_id));
  950. dso->has_build_id = 1;
  951. }
  952. bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
  953. {
  954. return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
  955. }
  956. void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
  957. {
  958. char path[PATH_MAX];
  959. if (machine__is_default_guest(machine))
  960. return;
  961. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  962. if (sysfs__read_build_id(path, dso->build_id,
  963. sizeof(dso->build_id)) == 0)
  964. dso->has_build_id = true;
  965. }
  966. int dso__kernel_module_get_build_id(struct dso *dso,
  967. const char *root_dir)
  968. {
  969. char filename[PATH_MAX];
  970. /*
  971. * kernel module short names are of the form "[module]" and
  972. * we need just "module" here.
  973. */
  974. const char *name = dso->short_name + 1;
  975. snprintf(filename, sizeof(filename),
  976. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  977. root_dir, (int)strlen(name) - 1, name);
  978. if (sysfs__read_build_id(filename, dso->build_id,
  979. sizeof(dso->build_id)) == 0)
  980. dso->has_build_id = true;
  981. return 0;
  982. }
  983. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  984. {
  985. bool have_build_id = false;
  986. struct dso *pos;
  987. list_for_each_entry(pos, head, node) {
  988. if (with_hits && !pos->hit && !dso__is_vdso(pos))
  989. continue;
  990. if (pos->has_build_id) {
  991. have_build_id = true;
  992. continue;
  993. }
  994. if (filename__read_build_id(pos->long_name, pos->build_id,
  995. sizeof(pos->build_id)) > 0) {
  996. have_build_id = true;
  997. pos->has_build_id = true;
  998. }
  999. }
  1000. return have_build_id;
  1001. }
  1002. void __dsos__add(struct dsos *dsos, struct dso *dso)
  1003. {
  1004. list_add_tail(&dso->node, &dsos->head);
  1005. __dso__findlink_by_longname(&dsos->root, dso, NULL);
  1006. /*
  1007. * It is now in the linked list, grab a reference, then garbage collect
  1008. * this when needing memory, by looking at LRU dso instances in the
  1009. * list with atomic_read(&dso->refcnt) == 1, i.e. no references
  1010. * anywhere besides the one for the list, do, under a lock for the
  1011. * list: remove it from the list, then a dso__put(), that probably will
  1012. * be the last and will then call dso__delete(), end of life.
  1013. *
  1014. * That, or at the end of the 'struct machine' lifetime, when all
  1015. * 'struct dso' instances will be removed from the list, in
  1016. * dsos__exit(), if they have no other reference from some other data
  1017. * structure.
  1018. *
  1019. * E.g.: after processing a 'perf.data' file and storing references
  1020. * to objects instantiated while processing events, we will have
  1021. * references to the 'thread', 'map', 'dso' structs all from 'struct
  1022. * hist_entry' instances, but we may not need anything not referenced,
  1023. * so we might as well call machines__exit()/machines__delete() and
  1024. * garbage collect it.
  1025. */
  1026. dso__get(dso);
  1027. }
  1028. void dsos__add(struct dsos *dsos, struct dso *dso)
  1029. {
  1030. pthread_rwlock_wrlock(&dsos->lock);
  1031. __dsos__add(dsos, dso);
  1032. pthread_rwlock_unlock(&dsos->lock);
  1033. }
  1034. struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1035. {
  1036. struct dso *pos;
  1037. if (cmp_short) {
  1038. list_for_each_entry(pos, &dsos->head, node)
  1039. if (strcmp(pos->short_name, name) == 0)
  1040. return pos;
  1041. return NULL;
  1042. }
  1043. return __dso__find_by_longname(&dsos->root, name);
  1044. }
  1045. struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1046. {
  1047. struct dso *dso;
  1048. pthread_rwlock_rdlock(&dsos->lock);
  1049. dso = __dsos__find(dsos, name, cmp_short);
  1050. pthread_rwlock_unlock(&dsos->lock);
  1051. return dso;
  1052. }
  1053. struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
  1054. {
  1055. struct dso *dso = dso__new(name);
  1056. if (dso != NULL) {
  1057. __dsos__add(dsos, dso);
  1058. dso__set_basename(dso);
  1059. /* Put dso here because __dsos_add already got it */
  1060. dso__put(dso);
  1061. }
  1062. return dso;
  1063. }
  1064. struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
  1065. {
  1066. struct dso *dso = __dsos__find(dsos, name, false);
  1067. return dso ? dso : __dsos__addnew(dsos, name);
  1068. }
  1069. struct dso *dsos__findnew(struct dsos *dsos, const char *name)
  1070. {
  1071. struct dso *dso;
  1072. pthread_rwlock_wrlock(&dsos->lock);
  1073. dso = dso__get(__dsos__findnew(dsos, name));
  1074. pthread_rwlock_unlock(&dsos->lock);
  1075. return dso;
  1076. }
  1077. size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1078. bool (skip)(struct dso *dso, int parm), int parm)
  1079. {
  1080. struct dso *pos;
  1081. size_t ret = 0;
  1082. list_for_each_entry(pos, head, node) {
  1083. if (skip && skip(pos, parm))
  1084. continue;
  1085. ret += dso__fprintf_buildid(pos, fp);
  1086. ret += fprintf(fp, " %s\n", pos->long_name);
  1087. }
  1088. return ret;
  1089. }
  1090. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1091. {
  1092. struct dso *pos;
  1093. size_t ret = 0;
  1094. list_for_each_entry(pos, head, node) {
  1095. int i;
  1096. for (i = 0; i < MAP__NR_TYPES; ++i)
  1097. ret += dso__fprintf(pos, i, fp);
  1098. }
  1099. return ret;
  1100. }
  1101. size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
  1102. {
  1103. char sbuild_id[SBUILD_ID_SIZE];
  1104. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1105. return fprintf(fp, "%s", sbuild_id);
  1106. }
  1107. size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
  1108. {
  1109. struct rb_node *nd;
  1110. size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
  1111. if (dso->short_name != dso->long_name)
  1112. ret += fprintf(fp, "%s, ", dso->long_name);
  1113. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  1114. dso__loaded(dso, type) ? "" : "NOT ");
  1115. ret += dso__fprintf_buildid(dso, fp);
  1116. ret += fprintf(fp, ")\n");
  1117. for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
  1118. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  1119. ret += symbol__fprintf(pos, fp);
  1120. }
  1121. return ret;
  1122. }
  1123. enum dso_type dso__type(struct dso *dso, struct machine *machine)
  1124. {
  1125. int fd;
  1126. enum dso_type type = DSO__TYPE_UNKNOWN;
  1127. fd = dso__data_get_fd(dso, machine);
  1128. if (fd >= 0) {
  1129. type = dso__type_fd(fd);
  1130. dso__data_put_fd(dso);
  1131. }
  1132. return type;
  1133. }
  1134. int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
  1135. {
  1136. int idx, errnum = dso->load_errno;
  1137. /*
  1138. * This must have a same ordering as the enum dso_load_errno.
  1139. */
  1140. static const char *dso_load__error_str[] = {
  1141. "Internal tools/perf/ library error",
  1142. "Invalid ELF file",
  1143. "Can not read build id",
  1144. "Mismatching build id",
  1145. "Decompression failure",
  1146. };
  1147. BUG_ON(buflen == 0);
  1148. if (errnum >= 0) {
  1149. const char *err = str_error_r(errnum, buf, buflen);
  1150. if (err != buf)
  1151. scnprintf(buf, buflen, "%s", err);
  1152. return 0;
  1153. }
  1154. if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
  1155. return -1;
  1156. idx = errnum - __DSO_LOAD_ERRNO__START;
  1157. scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
  1158. return 0;
  1159. }