sortextable.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * sortextable.c: Sort the kernel's exception table
  3. *
  4. * Copyright 2011 - 2012 Cavium, Inc.
  5. *
  6. * Based on code taken from recortmcount.c which is:
  7. *
  8. * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
  9. * Licensed under the GNU General Public License, version 2 (GPLv2).
  10. *
  11. * Restructured to fit Linux format, as well as other updates:
  12. * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
  13. */
  14. /*
  15. * Strategy: alter the vmlinux file in-place.
  16. */
  17. #include <sys/types.h>
  18. #include <sys/mman.h>
  19. #include <sys/stat.h>
  20. #include <getopt.h>
  21. #include <elf.h>
  22. #include <fcntl.h>
  23. #include <setjmp.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <unistd.h>
  28. #include <tools/be_byteshift.h>
  29. #include <tools/le_byteshift.h>
  30. #ifndef EM_ARCOMPACT
  31. #define EM_ARCOMPACT 93
  32. #endif
  33. #ifndef EM_XTENSA
  34. #define EM_XTENSA 94
  35. #endif
  36. #ifndef EM_AARCH64
  37. #define EM_AARCH64 183
  38. #endif
  39. #ifndef EM_MICROBLAZE
  40. #define EM_MICROBLAZE 189
  41. #endif
  42. #ifndef EM_ARCV2
  43. #define EM_ARCV2 195
  44. #endif
  45. static int fd_map; /* File descriptor for file being modified. */
  46. static int mmap_failed; /* Boolean flag. */
  47. static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
  48. static struct stat sb; /* Remember .st_size, etc. */
  49. static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
  50. /* setjmp() return values */
  51. enum {
  52. SJ_SETJMP = 0, /* hardwired first return */
  53. SJ_FAIL,
  54. SJ_SUCCEED
  55. };
  56. /* Per-file resource cleanup when multiple files. */
  57. static void
  58. cleanup(void)
  59. {
  60. if (!mmap_failed)
  61. munmap(ehdr_curr, sb.st_size);
  62. close(fd_map);
  63. }
  64. static void __attribute__((noreturn))
  65. fail_file(void)
  66. {
  67. cleanup();
  68. longjmp(jmpenv, SJ_FAIL);
  69. }
  70. /*
  71. * Get the whole file as a programming convenience in order to avoid
  72. * malloc+lseek+read+free of many pieces. If successful, then mmap
  73. * avoids copying unused pieces; else just read the whole file.
  74. * Open for both read and write.
  75. */
  76. static void *mmap_file(char const *fname)
  77. {
  78. void *addr;
  79. fd_map = open(fname, O_RDWR);
  80. if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
  81. perror(fname);
  82. fail_file();
  83. }
  84. if (!S_ISREG(sb.st_mode)) {
  85. fprintf(stderr, "not a regular file: %s\n", fname);
  86. fail_file();
  87. }
  88. addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED,
  89. fd_map, 0);
  90. if (addr == MAP_FAILED) {
  91. mmap_failed = 1;
  92. fprintf(stderr, "Could not mmap file: %s\n", fname);
  93. fail_file();
  94. }
  95. return addr;
  96. }
  97. static uint64_t r8be(const uint64_t *x)
  98. {
  99. return get_unaligned_be64(x);
  100. }
  101. static uint32_t rbe(const uint32_t *x)
  102. {
  103. return get_unaligned_be32(x);
  104. }
  105. static uint16_t r2be(const uint16_t *x)
  106. {
  107. return get_unaligned_be16(x);
  108. }
  109. static uint64_t r8le(const uint64_t *x)
  110. {
  111. return get_unaligned_le64(x);
  112. }
  113. static uint32_t rle(const uint32_t *x)
  114. {
  115. return get_unaligned_le32(x);
  116. }
  117. static uint16_t r2le(const uint16_t *x)
  118. {
  119. return get_unaligned_le16(x);
  120. }
  121. static void w8be(uint64_t val, uint64_t *x)
  122. {
  123. put_unaligned_be64(val, x);
  124. }
  125. static void wbe(uint32_t val, uint32_t *x)
  126. {
  127. put_unaligned_be32(val, x);
  128. }
  129. static void w2be(uint16_t val, uint16_t *x)
  130. {
  131. put_unaligned_be16(val, x);
  132. }
  133. static void w8le(uint64_t val, uint64_t *x)
  134. {
  135. put_unaligned_le64(val, x);
  136. }
  137. static void wle(uint32_t val, uint32_t *x)
  138. {
  139. put_unaligned_le32(val, x);
  140. }
  141. static void w2le(uint16_t val, uint16_t *x)
  142. {
  143. put_unaligned_le16(val, x);
  144. }
  145. static uint64_t (*r8)(const uint64_t *);
  146. static uint32_t (*r)(const uint32_t *);
  147. static uint16_t (*r2)(const uint16_t *);
  148. static void (*w8)(uint64_t, uint64_t *);
  149. static void (*w)(uint32_t, uint32_t *);
  150. static void (*w2)(uint16_t, uint16_t *);
  151. typedef void (*table_sort_t)(char *, int);
  152. /*
  153. * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
  154. * the way to -256..-1, to avoid conflicting with real section
  155. * indices.
  156. */
  157. #define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
  158. static inline int is_shndx_special(unsigned int i)
  159. {
  160. return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
  161. }
  162. /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
  163. static inline unsigned int get_secindex(unsigned int shndx,
  164. unsigned int sym_offs,
  165. const Elf32_Word *symtab_shndx_start)
  166. {
  167. if (is_shndx_special(shndx))
  168. return SPECIAL(shndx);
  169. if (shndx != SHN_XINDEX)
  170. return shndx;
  171. return r(&symtab_shndx_start[sym_offs]);
  172. }
  173. /* 32 bit and 64 bit are very similar */
  174. #include "sortextable.h"
  175. #define SORTEXTABLE_64
  176. #include "sortextable.h"
  177. static int compare_relative_table(const void *a, const void *b)
  178. {
  179. int32_t av = (int32_t)r(a);
  180. int32_t bv = (int32_t)r(b);
  181. if (av < bv)
  182. return -1;
  183. if (av > bv)
  184. return 1;
  185. return 0;
  186. }
  187. static void x86_sort_relative_table(char *extab_image, int image_size)
  188. {
  189. int i;
  190. i = 0;
  191. while (i < image_size) {
  192. uint32_t *loc = (uint32_t *)(extab_image + i);
  193. w(r(loc) + i, loc);
  194. w(r(loc + 1) + i + 4, loc + 1);
  195. w(r(loc + 2) + i + 8, loc + 2);
  196. i += sizeof(uint32_t) * 3;
  197. }
  198. qsort(extab_image, image_size / 12, 12, compare_relative_table);
  199. i = 0;
  200. while (i < image_size) {
  201. uint32_t *loc = (uint32_t *)(extab_image + i);
  202. w(r(loc) - i, loc);
  203. w(r(loc + 1) - (i + 4), loc + 1);
  204. w(r(loc + 2) - (i + 8), loc + 2);
  205. i += sizeof(uint32_t) * 3;
  206. }
  207. }
  208. static void sort_relative_table(char *extab_image, int image_size)
  209. {
  210. int i;
  211. /*
  212. * Do the same thing the runtime sort does, first normalize to
  213. * being relative to the start of the section.
  214. */
  215. i = 0;
  216. while (i < image_size) {
  217. uint32_t *loc = (uint32_t *)(extab_image + i);
  218. w(r(loc) + i, loc);
  219. i += 4;
  220. }
  221. qsort(extab_image, image_size / 8, 8, compare_relative_table);
  222. /* Now denormalize. */
  223. i = 0;
  224. while (i < image_size) {
  225. uint32_t *loc = (uint32_t *)(extab_image + i);
  226. w(r(loc) - i, loc);
  227. i += 4;
  228. }
  229. }
  230. static void
  231. do_file(char const *const fname)
  232. {
  233. table_sort_t custom_sort;
  234. Elf32_Ehdr *ehdr = mmap_file(fname);
  235. ehdr_curr = ehdr;
  236. switch (ehdr->e_ident[EI_DATA]) {
  237. default:
  238. fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
  239. ehdr->e_ident[EI_DATA], fname);
  240. fail_file();
  241. break;
  242. case ELFDATA2LSB:
  243. r = rle;
  244. r2 = r2le;
  245. r8 = r8le;
  246. w = wle;
  247. w2 = w2le;
  248. w8 = w8le;
  249. break;
  250. case ELFDATA2MSB:
  251. r = rbe;
  252. r2 = r2be;
  253. r8 = r8be;
  254. w = wbe;
  255. w2 = w2be;
  256. w8 = w8be;
  257. break;
  258. } /* end switch */
  259. if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
  260. || (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
  261. || ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
  262. fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
  263. fail_file();
  264. }
  265. custom_sort = NULL;
  266. switch (r2(&ehdr->e_machine)) {
  267. default:
  268. fprintf(stderr, "unrecognized e_machine %d %s\n",
  269. r2(&ehdr->e_machine), fname);
  270. fail_file();
  271. break;
  272. case EM_386:
  273. case EM_X86_64:
  274. custom_sort = x86_sort_relative_table;
  275. break;
  276. case EM_S390:
  277. case EM_AARCH64:
  278. case EM_PARISC:
  279. custom_sort = sort_relative_table;
  280. break;
  281. case EM_ARCOMPACT:
  282. case EM_ARCV2:
  283. case EM_ARM:
  284. case EM_MICROBLAZE:
  285. case EM_MIPS:
  286. case EM_XTENSA:
  287. break;
  288. } /* end switch */
  289. switch (ehdr->e_ident[EI_CLASS]) {
  290. default:
  291. fprintf(stderr, "unrecognized ELF class %d %s\n",
  292. ehdr->e_ident[EI_CLASS], fname);
  293. fail_file();
  294. break;
  295. case ELFCLASS32:
  296. if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
  297. || r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
  298. fprintf(stderr,
  299. "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
  300. fail_file();
  301. }
  302. do32(ehdr, fname, custom_sort);
  303. break;
  304. case ELFCLASS64: {
  305. Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
  306. if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
  307. || r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
  308. fprintf(stderr,
  309. "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
  310. fail_file();
  311. }
  312. do64(ghdr, fname, custom_sort);
  313. break;
  314. }
  315. } /* end switch */
  316. cleanup();
  317. }
  318. int
  319. main(int argc, char *argv[])
  320. {
  321. int n_error = 0; /* gcc-4.3.0 false positive complaint */
  322. int i;
  323. if (argc < 2) {
  324. fprintf(stderr, "usage: sortextable vmlinux...\n");
  325. return 0;
  326. }
  327. /* Process each file in turn, allowing deep failure. */
  328. for (i = 1; i < argc; i++) {
  329. char *file = argv[i];
  330. int const sjval = setjmp(jmpenv);
  331. switch (sjval) {
  332. default:
  333. fprintf(stderr, "internal error: %s\n", file);
  334. exit(1);
  335. break;
  336. case SJ_SETJMP: /* normal sequence */
  337. /* Avoid problems if early cleanup() */
  338. fd_map = -1;
  339. ehdr_curr = NULL;
  340. mmap_failed = 1;
  341. do_file(file);
  342. break;
  343. case SJ_FAIL: /* error in do_file or below */
  344. ++n_error;
  345. break;
  346. case SJ_SUCCEED: /* premature success */
  347. /* do nothing */
  348. break;
  349. } /* end switch */
  350. }
  351. return !!n_error;
  352. }