kcov.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "kcov: " fmt
  3. #define DISABLE_BRANCH_PROFILING
  4. #include <linux/atomic.h>
  5. #include <linux/compiler.h>
  6. #include <linux/errno.h>
  7. #include <linux/export.h>
  8. #include <linux/types.h>
  9. #include <linux/file.h>
  10. #include <linux/fs.h>
  11. #include <linux/hashtable.h>
  12. #include <linux/init.h>
  13. #include <linux/mm.h>
  14. #include <linux/preempt.h>
  15. #include <linux/printk.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/kcov.h>
  23. #include <linux/refcount.h>
  24. #include <linux/log2.h>
  25. #include <asm/setup.h>
  26. #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
  27. /* Number of 64-bit words written per one comparison: */
  28. #define KCOV_WORDS_PER_CMP 4
  29. /*
  30. * kcov descriptor (one per opened debugfs file).
  31. * State transitions of the descriptor:
  32. * - initial state after open()
  33. * - then there must be a single ioctl(KCOV_INIT_TRACE) call
  34. * - then, mmap() call (several calls are allowed but not useful)
  35. * - then, ioctl(KCOV_ENABLE, arg), where arg is
  36. * KCOV_TRACE_PC - to trace only the PCs
  37. * or
  38. * KCOV_TRACE_CMP - to trace only the comparison operands
  39. * - then, ioctl(KCOV_DISABLE) to disable the task.
  40. * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  41. */
  42. struct kcov {
  43. /*
  44. * Reference counter. We keep one for:
  45. * - opened file descriptor
  46. * - task with enabled coverage (we can't unwire it from another task)
  47. * - each code section for remote coverage collection
  48. */
  49. refcount_t refcount;
  50. /* The lock protects mode, size, area and t. */
  51. spinlock_t lock;
  52. enum kcov_mode mode;
  53. /* Size of arena (in long's). */
  54. unsigned int size;
  55. /* Coverage buffer shared with user space. */
  56. void *area;
  57. /* Task for which we collect coverage, or NULL. */
  58. struct task_struct *t;
  59. /* Collecting coverage from remote (background) threads. */
  60. bool remote;
  61. /* Size of remote area (in long's). */
  62. unsigned int remote_size;
  63. /*
  64. * Sequence is incremented each time kcov is reenabled, used by
  65. * kcov_remote_stop(), see the comment there.
  66. */
  67. int sequence;
  68. };
  69. struct kcov_remote_area {
  70. struct list_head list;
  71. unsigned int size;
  72. };
  73. struct kcov_remote {
  74. u64 handle;
  75. struct kcov *kcov;
  76. struct hlist_node hnode;
  77. };
  78. static DEFINE_SPINLOCK(kcov_remote_lock);
  79. static DEFINE_HASHTABLE(kcov_remote_map, 4);
  80. static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
  81. /* Must be called with kcov_remote_lock locked. */
  82. static struct kcov_remote *kcov_remote_find(u64 handle)
  83. {
  84. struct kcov_remote *remote;
  85. hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
  86. if (remote->handle == handle)
  87. return remote;
  88. }
  89. return NULL;
  90. }
  91. static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
  92. {
  93. struct kcov_remote *remote;
  94. if (kcov_remote_find(handle))
  95. return ERR_PTR(-EEXIST);
  96. remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
  97. if (!remote)
  98. return ERR_PTR(-ENOMEM);
  99. remote->handle = handle;
  100. remote->kcov = kcov;
  101. hash_add(kcov_remote_map, &remote->hnode, handle);
  102. return remote;
  103. }
  104. /* Must be called with kcov_remote_lock locked. */
  105. static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
  106. {
  107. struct kcov_remote_area *area;
  108. struct list_head *pos;
  109. kcov_debug("size = %u\n", size);
  110. list_for_each(pos, &kcov_remote_areas) {
  111. area = list_entry(pos, struct kcov_remote_area, list);
  112. if (area->size == size) {
  113. list_del(&area->list);
  114. kcov_debug("rv = %px\n", area);
  115. return area;
  116. }
  117. }
  118. kcov_debug("rv = NULL\n");
  119. return NULL;
  120. }
  121. /* Must be called with kcov_remote_lock locked. */
  122. static void kcov_remote_area_put(struct kcov_remote_area *area,
  123. unsigned int size)
  124. {
  125. kcov_debug("area = %px, size = %u\n", area, size);
  126. INIT_LIST_HEAD(&area->list);
  127. area->size = size;
  128. list_add(&area->list, &kcov_remote_areas);
  129. }
  130. static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
  131. {
  132. unsigned int mode;
  133. /*
  134. * We are interested in code coverage as a function of a syscall inputs,
  135. * so we ignore code executed in interrupts.
  136. */
  137. if (!in_task())
  138. return false;
  139. mode = READ_ONCE(t->kcov_mode);
  140. /*
  141. * There is some code that runs in interrupts but for which
  142. * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  143. * READ_ONCE()/barrier() effectively provides load-acquire wrt
  144. * interrupts, there are paired barrier()/WRITE_ONCE() in
  145. * kcov_start().
  146. */
  147. barrier();
  148. return mode == needed_mode;
  149. }
  150. static notrace unsigned long canonicalize_ip(unsigned long ip)
  151. {
  152. #ifdef CONFIG_RANDOMIZE_BASE
  153. ip -= kaslr_offset();
  154. #endif
  155. return ip;
  156. }
  157. /*
  158. * Entry point from instrumented code.
  159. * This is called once per basic-block/edge.
  160. */
  161. void notrace __sanitizer_cov_trace_pc(void)
  162. {
  163. struct task_struct *t;
  164. unsigned long *area;
  165. unsigned long ip = canonicalize_ip(_RET_IP_);
  166. unsigned long pos;
  167. t = current;
  168. if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
  169. return;
  170. area = t->kcov_area;
  171. /* The first 64-bit word is the number of subsequent PCs. */
  172. pos = READ_ONCE(area[0]) + 1;
  173. if (likely(pos < t->kcov_size)) {
  174. area[pos] = ip;
  175. WRITE_ONCE(area[0], pos);
  176. }
  177. }
  178. EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  179. #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
  180. static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
  181. {
  182. struct task_struct *t;
  183. u64 *area;
  184. u64 count, start_index, end_pos, max_pos;
  185. t = current;
  186. if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
  187. return;
  188. ip = canonicalize_ip(ip);
  189. /*
  190. * We write all comparison arguments and types as u64.
  191. * The buffer was allocated for t->kcov_size unsigned longs.
  192. */
  193. area = (u64 *)t->kcov_area;
  194. max_pos = t->kcov_size * sizeof(unsigned long);
  195. count = READ_ONCE(area[0]);
  196. /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
  197. start_index = 1 + count * KCOV_WORDS_PER_CMP;
  198. end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
  199. if (likely(end_pos <= max_pos)) {
  200. area[start_index] = type;
  201. area[start_index + 1] = arg1;
  202. area[start_index + 2] = arg2;
  203. area[start_index + 3] = ip;
  204. WRITE_ONCE(area[0], count + 1);
  205. }
  206. }
  207. void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
  208. {
  209. write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
  210. }
  211. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
  212. void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
  213. {
  214. write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
  215. }
  216. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
  217. void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
  218. {
  219. write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
  220. }
  221. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
  222. void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
  223. {
  224. write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
  225. }
  226. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
  227. void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
  228. {
  229. write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
  230. _RET_IP_);
  231. }
  232. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
  233. void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
  234. {
  235. write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
  236. _RET_IP_);
  237. }
  238. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
  239. void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
  240. {
  241. write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
  242. _RET_IP_);
  243. }
  244. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
  245. void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
  246. {
  247. write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
  248. _RET_IP_);
  249. }
  250. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
  251. void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
  252. {
  253. u64 i;
  254. u64 count = cases[0];
  255. u64 size = cases[1];
  256. u64 type = KCOV_CMP_CONST;
  257. switch (size) {
  258. case 8:
  259. type |= KCOV_CMP_SIZE(0);
  260. break;
  261. case 16:
  262. type |= KCOV_CMP_SIZE(1);
  263. break;
  264. case 32:
  265. type |= KCOV_CMP_SIZE(2);
  266. break;
  267. case 64:
  268. type |= KCOV_CMP_SIZE(3);
  269. break;
  270. default:
  271. return;
  272. }
  273. for (i = 0; i < count; i++)
  274. write_comp_data(type, cases[i + 2], val, _RET_IP_);
  275. }
  276. EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
  277. #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
  278. static void kcov_start(struct task_struct *t, unsigned int size,
  279. void *area, enum kcov_mode mode, int sequence)
  280. {
  281. kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
  282. /* Cache in task struct for performance. */
  283. t->kcov_size = size;
  284. t->kcov_area = area;
  285. /* See comment in check_kcov_mode(). */
  286. barrier();
  287. WRITE_ONCE(t->kcov_mode, mode);
  288. t->kcov_sequence = sequence;
  289. }
  290. static void kcov_stop(struct task_struct *t)
  291. {
  292. WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
  293. barrier();
  294. t->kcov_size = 0;
  295. t->kcov_area = NULL;
  296. }
  297. static void kcov_task_reset(struct task_struct *t)
  298. {
  299. kcov_stop(t);
  300. t->kcov = NULL;
  301. t->kcov_sequence = 0;
  302. t->kcov_handle = 0;
  303. }
  304. void kcov_task_init(struct task_struct *t)
  305. {
  306. kcov_task_reset(t);
  307. t->kcov_handle = current->kcov_handle;
  308. }
  309. static void kcov_reset(struct kcov *kcov)
  310. {
  311. kcov->t = NULL;
  312. kcov->mode = KCOV_MODE_INIT;
  313. kcov->remote = false;
  314. kcov->remote_size = 0;
  315. kcov->sequence++;
  316. }
  317. static void kcov_remote_reset(struct kcov *kcov)
  318. {
  319. int bkt;
  320. struct kcov_remote *remote;
  321. struct hlist_node *tmp;
  322. spin_lock(&kcov_remote_lock);
  323. hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
  324. if (remote->kcov != kcov)
  325. continue;
  326. kcov_debug("removing handle %llx\n", remote->handle);
  327. hash_del(&remote->hnode);
  328. kfree(remote);
  329. }
  330. /* Do reset before unlock to prevent races with kcov_remote_start(). */
  331. kcov_reset(kcov);
  332. spin_unlock(&kcov_remote_lock);
  333. }
  334. static void kcov_disable(struct task_struct *t, struct kcov *kcov)
  335. {
  336. kcov_task_reset(t);
  337. if (kcov->remote)
  338. kcov_remote_reset(kcov);
  339. else
  340. kcov_reset(kcov);
  341. }
  342. static void kcov_get(struct kcov *kcov)
  343. {
  344. refcount_inc(&kcov->refcount);
  345. }
  346. static void kcov_put(struct kcov *kcov)
  347. {
  348. if (refcount_dec_and_test(&kcov->refcount)) {
  349. kcov_remote_reset(kcov);
  350. vfree(kcov->area);
  351. kfree(kcov);
  352. }
  353. }
  354. void kcov_task_exit(struct task_struct *t)
  355. {
  356. struct kcov *kcov;
  357. kcov = t->kcov;
  358. if (kcov == NULL)
  359. return;
  360. spin_lock(&kcov->lock);
  361. kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
  362. /*
  363. * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
  364. * which comes down to:
  365. * WARN_ON(!kcov->remote && kcov->t != t);
  366. *
  367. * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
  368. * 2. A remote task between kcov_remote_start() and kcov_remote_stop().
  369. * In this case we should print a warning right away, since a task
  370. * shouldn't be exiting when it's in a kcov coverage collection
  371. * section. Here t points to the task that is collecting remote
  372. * coverage, and t->kcov->t points to the thread that created the
  373. * kcov device. Which means that to detect this case we need to
  374. * check that t != t->kcov->t, and this gives us the following:
  375. * WARN_ON(kcov->remote && kcov->t != t);
  376. *
  377. * 2. The task that created kcov exiting without calling KCOV_DISABLE,
  378. * and then again we can make sure that t->kcov->t == t:
  379. * WARN_ON(kcov->remote && kcov->t != t);
  380. *
  381. * By combining all three checks into one we get:
  382. */
  383. if (WARN_ON(kcov->t != t)) {
  384. spin_unlock(&kcov->lock);
  385. return;
  386. }
  387. /* Just to not leave dangling references behind. */
  388. kcov_disable(t, kcov);
  389. spin_unlock(&kcov->lock);
  390. kcov_put(kcov);
  391. }
  392. static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
  393. {
  394. int res = 0;
  395. void *area;
  396. struct kcov *kcov = vma->vm_file->private_data;
  397. unsigned long size, off;
  398. struct page *page;
  399. area = vmalloc_user(vma->vm_end - vma->vm_start);
  400. if (!area)
  401. return -ENOMEM;
  402. spin_lock(&kcov->lock);
  403. size = kcov->size * sizeof(unsigned long);
  404. if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
  405. vma->vm_end - vma->vm_start != size) {
  406. res = -EINVAL;
  407. goto exit;
  408. }
  409. if (!kcov->area) {
  410. kcov->area = area;
  411. vma->vm_flags |= VM_DONTEXPAND;
  412. spin_unlock(&kcov->lock);
  413. for (off = 0; off < size; off += PAGE_SIZE) {
  414. page = vmalloc_to_page(kcov->area + off);
  415. if (vm_insert_page(vma, vma->vm_start + off, page))
  416. WARN_ONCE(1, "vm_insert_page() failed");
  417. }
  418. return 0;
  419. }
  420. exit:
  421. spin_unlock(&kcov->lock);
  422. vfree(area);
  423. return res;
  424. }
  425. static int kcov_open(struct inode *inode, struct file *filep)
  426. {
  427. struct kcov *kcov;
  428. kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
  429. if (!kcov)
  430. return -ENOMEM;
  431. kcov->mode = KCOV_MODE_DISABLED;
  432. kcov->sequence = 1;
  433. refcount_set(&kcov->refcount, 1);
  434. spin_lock_init(&kcov->lock);
  435. filep->private_data = kcov;
  436. return nonseekable_open(inode, filep);
  437. }
  438. static int kcov_close(struct inode *inode, struct file *filep)
  439. {
  440. kcov_put(filep->private_data);
  441. return 0;
  442. }
  443. static int kcov_get_mode(unsigned long arg)
  444. {
  445. if (arg == KCOV_TRACE_PC)
  446. return KCOV_MODE_TRACE_PC;
  447. else if (arg == KCOV_TRACE_CMP)
  448. #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
  449. return KCOV_MODE_TRACE_CMP;
  450. #else
  451. return -ENOTSUPP;
  452. #endif
  453. else
  454. return -EINVAL;
  455. }
  456. /*
  457. * Fault in a lazily-faulted vmalloc area before it can be used by
  458. * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
  459. * vmalloc fault handling path is instrumented.
  460. */
  461. static void kcov_fault_in_area(struct kcov *kcov)
  462. {
  463. unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
  464. unsigned long *area = kcov->area;
  465. unsigned long offset;
  466. for (offset = 0; offset < kcov->size; offset += stride)
  467. READ_ONCE(area[offset]);
  468. }
  469. static inline bool kcov_check_handle(u64 handle, bool common_valid,
  470. bool uncommon_valid, bool zero_valid)
  471. {
  472. if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
  473. return false;
  474. switch (handle & KCOV_SUBSYSTEM_MASK) {
  475. case KCOV_SUBSYSTEM_COMMON:
  476. return (handle & KCOV_INSTANCE_MASK) ?
  477. common_valid : zero_valid;
  478. case KCOV_SUBSYSTEM_USB:
  479. return uncommon_valid;
  480. default:
  481. return false;
  482. }
  483. return false;
  484. }
  485. static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
  486. unsigned long arg)
  487. {
  488. struct task_struct *t;
  489. unsigned long size, unused;
  490. int mode, i;
  491. struct kcov_remote_arg *remote_arg;
  492. struct kcov_remote *remote;
  493. switch (cmd) {
  494. case KCOV_INIT_TRACE:
  495. kcov_debug("KCOV_INIT_TRACE\n");
  496. /*
  497. * Enable kcov in trace mode and setup buffer size.
  498. * Must happen before anything else.
  499. */
  500. if (kcov->mode != KCOV_MODE_DISABLED)
  501. return -EBUSY;
  502. /*
  503. * Size must be at least 2 to hold current position and one PC.
  504. * Later we allocate size * sizeof(unsigned long) memory,
  505. * that must not overflow.
  506. */
  507. size = arg;
  508. if (size < 2 || size > INT_MAX / sizeof(unsigned long))
  509. return -EINVAL;
  510. kcov->size = size;
  511. kcov->mode = KCOV_MODE_INIT;
  512. return 0;
  513. case KCOV_ENABLE:
  514. kcov_debug("KCOV_ENABLE\n");
  515. /*
  516. * Enable coverage for the current task.
  517. * At this point user must have been enabled trace mode,
  518. * and mmapped the file. Coverage collection is disabled only
  519. * at task exit or voluntary by KCOV_DISABLE. After that it can
  520. * be enabled for another task.
  521. */
  522. if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
  523. return -EINVAL;
  524. t = current;
  525. if (kcov->t != NULL || t->kcov != NULL)
  526. return -EBUSY;
  527. mode = kcov_get_mode(arg);
  528. if (mode < 0)
  529. return mode;
  530. kcov_fault_in_area(kcov);
  531. kcov->mode = mode;
  532. kcov_start(t, kcov->size, kcov->area, kcov->mode,
  533. kcov->sequence);
  534. t->kcov = kcov;
  535. kcov->t = t;
  536. /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
  537. kcov_get(kcov);
  538. return 0;
  539. case KCOV_DISABLE:
  540. kcov_debug("KCOV_DISABLE\n");
  541. /* Disable coverage for the current task. */
  542. unused = arg;
  543. if (unused != 0 || current->kcov != kcov)
  544. return -EINVAL;
  545. t = current;
  546. if (WARN_ON(kcov->t != t))
  547. return -EINVAL;
  548. kcov_disable(t, kcov);
  549. kcov_put(kcov);
  550. return 0;
  551. case KCOV_REMOTE_ENABLE:
  552. kcov_debug("KCOV_REMOTE_ENABLE\n");
  553. if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
  554. return -EINVAL;
  555. t = current;
  556. if (kcov->t != NULL || t->kcov != NULL)
  557. return -EBUSY;
  558. remote_arg = (struct kcov_remote_arg *)arg;
  559. mode = kcov_get_mode(remote_arg->trace_mode);
  560. if (mode < 0)
  561. return mode;
  562. if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
  563. return -EINVAL;
  564. kcov->mode = mode;
  565. t->kcov = kcov;
  566. kcov->t = t;
  567. kcov->remote = true;
  568. kcov->remote_size = remote_arg->area_size;
  569. spin_lock(&kcov_remote_lock);
  570. for (i = 0; i < remote_arg->num_handles; i++) {
  571. kcov_debug("handle %llx\n", remote_arg->handles[i]);
  572. if (!kcov_check_handle(remote_arg->handles[i],
  573. false, true, false)) {
  574. spin_unlock(&kcov_remote_lock);
  575. kcov_disable(t, kcov);
  576. return -EINVAL;
  577. }
  578. remote = kcov_remote_add(kcov, remote_arg->handles[i]);
  579. if (IS_ERR(remote)) {
  580. spin_unlock(&kcov_remote_lock);
  581. kcov_disable(t, kcov);
  582. return PTR_ERR(remote);
  583. }
  584. }
  585. if (remote_arg->common_handle) {
  586. kcov_debug("common handle %llx\n",
  587. remote_arg->common_handle);
  588. if (!kcov_check_handle(remote_arg->common_handle,
  589. true, false, false)) {
  590. spin_unlock(&kcov_remote_lock);
  591. kcov_disable(t, kcov);
  592. return -EINVAL;
  593. }
  594. remote = kcov_remote_add(kcov,
  595. remote_arg->common_handle);
  596. if (IS_ERR(remote)) {
  597. spin_unlock(&kcov_remote_lock);
  598. kcov_disable(t, kcov);
  599. return PTR_ERR(remote);
  600. }
  601. t->kcov_handle = remote_arg->common_handle;
  602. }
  603. spin_unlock(&kcov_remote_lock);
  604. /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
  605. kcov_get(kcov);
  606. return 0;
  607. default:
  608. return -ENOTTY;
  609. }
  610. }
  611. static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  612. {
  613. struct kcov *kcov;
  614. int res;
  615. struct kcov_remote_arg *remote_arg = NULL;
  616. unsigned int remote_num_handles;
  617. unsigned long remote_arg_size;
  618. if (cmd == KCOV_REMOTE_ENABLE) {
  619. if (get_user(remote_num_handles, (unsigned __user *)(arg +
  620. offsetof(struct kcov_remote_arg, num_handles))))
  621. return -EFAULT;
  622. if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
  623. return -EINVAL;
  624. remote_arg_size = sizeof(*remote_arg) +
  625. sizeof(remote_arg->handles[0]) * remote_num_handles;
  626. remote_arg = memdup_user((void __user *)arg, remote_arg_size);
  627. if (IS_ERR(remote_arg))
  628. return PTR_ERR(remote_arg);
  629. if (remote_arg->num_handles != remote_num_handles) {
  630. kfree(remote_arg);
  631. return -EINVAL;
  632. }
  633. arg = (unsigned long)remote_arg;
  634. }
  635. kcov = filep->private_data;
  636. spin_lock(&kcov->lock);
  637. res = kcov_ioctl_locked(kcov, cmd, arg);
  638. spin_unlock(&kcov->lock);
  639. kfree(remote_arg);
  640. return res;
  641. }
  642. static const struct file_operations kcov_fops = {
  643. .open = kcov_open,
  644. .unlocked_ioctl = kcov_ioctl,
  645. .compat_ioctl = kcov_ioctl,
  646. .mmap = kcov_mmap,
  647. .release = kcov_close,
  648. };
  649. /*
  650. * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
  651. * of code in a kernel background thread to allow kcov to be used to collect
  652. * coverage from that part of code.
  653. *
  654. * The handle argument of kcov_remote_start() identifies a code section that is
  655. * used for coverage collection. A userspace process passes this handle to
  656. * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
  657. * coverage for the code section identified by this handle.
  658. *
  659. * The usage of these annotations in the kernel code is different depending on
  660. * the type of the kernel thread whose code is being annotated.
  661. *
  662. * For global kernel threads that are spawned in a limited number of instances
  663. * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each
  664. * instance must be assigned a unique 4-byte instance id. The instance id is
  665. * then combined with a 1-byte subsystem id to get a handle via
  666. * kcov_remote_handle(subsystem_id, instance_id).
  667. *
  668. * For local kernel threads that are spawned from system calls handler when a
  669. * user interacts with some kernel interface (e.g. vhost workers), a handle is
  670. * passed from a userspace process as the common_handle field of the
  671. * kcov_remote_arg struct (note, that the user must generate a handle by using
  672. * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
  673. * arbitrary 4-byte non-zero number as the instance id). This common handle
  674. * then gets saved into the task_struct of the process that issued the
  675. * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn
  676. * kernel threads, the common handle must be retrived via kcov_common_handle()
  677. * and passed to the spawned threads via custom annotations. Those kernel
  678. * threads must in turn be annotated with kcov_remote_start(common_handle) and
  679. * kcov_remote_stop(). All of the threads that are spawned by the same process
  680. * obtain the same handle, hence the name "common".
  681. *
  682. * See Documentation/dev-tools/kcov.rst for more details.
  683. *
  684. * Internally, this function looks up the kcov device associated with the
  685. * provided handle, allocates an area for coverage collection, and saves the
  686. * pointers to kcov and area into the current task_struct to allow coverage to
  687. * be collected via __sanitizer_cov_trace_pc()
  688. * In turns kcov_remote_stop() clears those pointers from task_struct to stop
  689. * collecting coverage and copies all collected coverage into the kcov area.
  690. */
  691. void kcov_remote_start(u64 handle)
  692. {
  693. struct kcov_remote *remote;
  694. void *area;
  695. struct task_struct *t;
  696. unsigned int size;
  697. enum kcov_mode mode;
  698. int sequence;
  699. if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
  700. return;
  701. if (WARN_ON(!in_task()))
  702. return;
  703. t = current;
  704. /*
  705. * Check that kcov_remote_start is not called twice
  706. * nor called by user tasks (with enabled kcov).
  707. */
  708. if (WARN_ON(t->kcov))
  709. return;
  710. kcov_debug("handle = %llx\n", handle);
  711. spin_lock(&kcov_remote_lock);
  712. remote = kcov_remote_find(handle);
  713. if (!remote) {
  714. kcov_debug("no remote found");
  715. spin_unlock(&kcov_remote_lock);
  716. return;
  717. }
  718. /* Put in kcov_remote_stop(). */
  719. kcov_get(remote->kcov);
  720. t->kcov = remote->kcov;
  721. /*
  722. * Read kcov fields before unlock to prevent races with
  723. * KCOV_DISABLE / kcov_remote_reset().
  724. */
  725. size = remote->kcov->remote_size;
  726. mode = remote->kcov->mode;
  727. sequence = remote->kcov->sequence;
  728. area = kcov_remote_area_get(size);
  729. spin_unlock(&kcov_remote_lock);
  730. if (!area) {
  731. area = vmalloc(size * sizeof(unsigned long));
  732. if (!area) {
  733. t->kcov = NULL;
  734. kcov_put(remote->kcov);
  735. return;
  736. }
  737. }
  738. /* Reset coverage size. */
  739. *(u64 *)area = 0;
  740. kcov_debug("area = %px, size = %u", area, size);
  741. kcov_start(t, size, area, mode, sequence);
  742. }
  743. EXPORT_SYMBOL(kcov_remote_start);
  744. static void kcov_move_area(enum kcov_mode mode, void *dst_area,
  745. unsigned int dst_area_size, void *src_area)
  746. {
  747. u64 word_size = sizeof(unsigned long);
  748. u64 count_size, entry_size_log;
  749. u64 dst_len, src_len;
  750. void *dst_entries, *src_entries;
  751. u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
  752. kcov_debug("%px %u <= %px %lu\n",
  753. dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
  754. switch (mode) {
  755. case KCOV_MODE_TRACE_PC:
  756. dst_len = READ_ONCE(*(unsigned long *)dst_area);
  757. src_len = *(unsigned long *)src_area;
  758. count_size = sizeof(unsigned long);
  759. entry_size_log = __ilog2_u64(sizeof(unsigned long));
  760. break;
  761. case KCOV_MODE_TRACE_CMP:
  762. dst_len = READ_ONCE(*(u64 *)dst_area);
  763. src_len = *(u64 *)src_area;
  764. count_size = sizeof(u64);
  765. BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
  766. entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
  767. break;
  768. default:
  769. WARN_ON(1);
  770. return;
  771. }
  772. /* As arm can't divide u64 integers use log of entry size. */
  773. if (dst_len > ((dst_area_size * word_size - count_size) >>
  774. entry_size_log))
  775. return;
  776. dst_occupied = count_size + (dst_len << entry_size_log);
  777. dst_free = dst_area_size * word_size - dst_occupied;
  778. bytes_to_move = min(dst_free, src_len << entry_size_log);
  779. dst_entries = dst_area + dst_occupied;
  780. src_entries = src_area + count_size;
  781. memcpy(dst_entries, src_entries, bytes_to_move);
  782. entries_moved = bytes_to_move >> entry_size_log;
  783. switch (mode) {
  784. case KCOV_MODE_TRACE_PC:
  785. WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
  786. break;
  787. case KCOV_MODE_TRACE_CMP:
  788. WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
  789. break;
  790. default:
  791. break;
  792. }
  793. }
  794. /* See the comment before kcov_remote_start() for usage details. */
  795. void kcov_remote_stop(void)
  796. {
  797. struct task_struct *t = current;
  798. struct kcov *kcov = t->kcov;
  799. void *area = t->kcov_area;
  800. unsigned int size = t->kcov_size;
  801. int sequence = t->kcov_sequence;
  802. if (!kcov) {
  803. kcov_debug("no kcov found\n");
  804. return;
  805. }
  806. kcov_stop(t);
  807. t->kcov = NULL;
  808. spin_lock(&kcov->lock);
  809. /*
  810. * KCOV_DISABLE could have been called between kcov_remote_start()
  811. * and kcov_remote_stop(), hence the check.
  812. */
  813. kcov_debug("move if: %d == %d && %d\n",
  814. sequence, kcov->sequence, (int)kcov->remote);
  815. if (sequence == kcov->sequence && kcov->remote)
  816. kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
  817. spin_unlock(&kcov->lock);
  818. spin_lock(&kcov_remote_lock);
  819. kcov_remote_area_put(area, size);
  820. spin_unlock(&kcov_remote_lock);
  821. kcov_put(kcov);
  822. }
  823. EXPORT_SYMBOL(kcov_remote_stop);
  824. /* See the comment before kcov_remote_start() for usage details. */
  825. u64 kcov_common_handle(void)
  826. {
  827. return current->kcov_handle;
  828. }
  829. EXPORT_SYMBOL(kcov_common_handle);
  830. static int __init kcov_init(void)
  831. {
  832. /*
  833. * The kcov debugfs file won't ever get removed and thus,
  834. * there is no need to protect it against removal races. The
  835. * use of debugfs_create_file_unsafe() is actually safe here.
  836. */
  837. debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
  838. return 0;
  839. }
  840. device_initcall(kcov_init);