kcov.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. #define pr_fmt(fmt) "kcov: " fmt
  2. #define DISABLE_BRANCH_PROFILING
  3. #include <linux/compiler.h>
  4. #include <linux/types.h>
  5. #include <linux/file.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/printk.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/kcov.h>
  16. /*
  17. * kcov descriptor (one per opened debugfs file).
  18. * State transitions of the descriptor:
  19. * - initial state after open()
  20. * - then there must be a single ioctl(KCOV_INIT_TRACE) call
  21. * - then, mmap() call (several calls are allowed but not useful)
  22. * - then, repeated enable/disable for a task (only one task a time allowed)
  23. */
  24. struct kcov {
  25. /*
  26. * Reference counter. We keep one for:
  27. * - opened file descriptor
  28. * - task with enabled coverage (we can't unwire it from another task)
  29. */
  30. atomic_t refcount;
  31. /* The lock protects mode, size, area and t. */
  32. spinlock_t lock;
  33. enum kcov_mode mode;
  34. /* Size of arena (in long's for KCOV_MODE_TRACE). */
  35. unsigned size;
  36. /* Coverage buffer shared with user space. */
  37. void *area;
  38. /* Task for which we collect coverage, or NULL. */
  39. struct task_struct *t;
  40. };
  41. /*
  42. * Entry point from instrumented code.
  43. * This is called once per basic-block/edge.
  44. */
  45. void notrace __sanitizer_cov_trace_pc(void)
  46. {
  47. struct task_struct *t;
  48. enum kcov_mode mode;
  49. t = current;
  50. /*
  51. * We are interested in code coverage as a function of a syscall inputs,
  52. * so we ignore code executed in interrupts.
  53. * The checks for whether we are in an interrupt are open-coded, because
  54. * 1. We can't use in_interrupt() here, since it also returns true
  55. * when we are inside local_bh_disable() section.
  56. * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
  57. * since that leads to slower generated code (three separate tests,
  58. * one for each of the flags).
  59. */
  60. if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
  61. | NMI_MASK)))
  62. return;
  63. mode = READ_ONCE(t->kcov_mode);
  64. if (mode == KCOV_MODE_TRACE) {
  65. unsigned long *area;
  66. unsigned long pos;
  67. /*
  68. * There is some code that runs in interrupts but for which
  69. * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  70. * READ_ONCE()/barrier() effectively provides load-acquire wrt
  71. * interrupts, there are paired barrier()/WRITE_ONCE() in
  72. * kcov_ioctl_locked().
  73. */
  74. barrier();
  75. area = t->kcov_area;
  76. /* The first word is number of subsequent PCs. */
  77. pos = READ_ONCE(area[0]) + 1;
  78. if (likely(pos < t->kcov_size)) {
  79. area[pos] = _RET_IP_;
  80. WRITE_ONCE(area[0], pos);
  81. }
  82. }
  83. }
  84. EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  85. static void kcov_get(struct kcov *kcov)
  86. {
  87. atomic_inc(&kcov->refcount);
  88. }
  89. static void kcov_put(struct kcov *kcov)
  90. {
  91. if (atomic_dec_and_test(&kcov->refcount)) {
  92. vfree(kcov->area);
  93. kfree(kcov);
  94. }
  95. }
  96. void kcov_task_init(struct task_struct *t)
  97. {
  98. WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
  99. barrier();
  100. t->kcov_size = 0;
  101. t->kcov_area = NULL;
  102. t->kcov = NULL;
  103. }
  104. void kcov_task_exit(struct task_struct *t)
  105. {
  106. struct kcov *kcov;
  107. kcov = t->kcov;
  108. if (kcov == NULL)
  109. return;
  110. spin_lock(&kcov->lock);
  111. if (WARN_ON(kcov->t != t)) {
  112. spin_unlock(&kcov->lock);
  113. return;
  114. }
  115. /* Just to not leave dangling references behind. */
  116. kcov_task_init(t);
  117. kcov->t = NULL;
  118. spin_unlock(&kcov->lock);
  119. kcov_put(kcov);
  120. }
  121. static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
  122. {
  123. int res = 0;
  124. void *area;
  125. struct kcov *kcov = vma->vm_file->private_data;
  126. unsigned long size, off;
  127. struct page *page;
  128. area = vmalloc_user(vma->vm_end - vma->vm_start);
  129. if (!area)
  130. return -ENOMEM;
  131. spin_lock(&kcov->lock);
  132. size = kcov->size * sizeof(unsigned long);
  133. if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
  134. vma->vm_end - vma->vm_start != size) {
  135. res = -EINVAL;
  136. goto exit;
  137. }
  138. if (!kcov->area) {
  139. kcov->area = area;
  140. vma->vm_flags |= VM_DONTEXPAND;
  141. spin_unlock(&kcov->lock);
  142. for (off = 0; off < size; off += PAGE_SIZE) {
  143. page = vmalloc_to_page(kcov->area + off);
  144. if (vm_insert_page(vma, vma->vm_start + off, page))
  145. WARN_ONCE(1, "vm_insert_page() failed");
  146. }
  147. return 0;
  148. }
  149. exit:
  150. spin_unlock(&kcov->lock);
  151. vfree(area);
  152. return res;
  153. }
  154. static int kcov_open(struct inode *inode, struct file *filep)
  155. {
  156. struct kcov *kcov;
  157. kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
  158. if (!kcov)
  159. return -ENOMEM;
  160. atomic_set(&kcov->refcount, 1);
  161. spin_lock_init(&kcov->lock);
  162. filep->private_data = kcov;
  163. return nonseekable_open(inode, filep);
  164. }
  165. static int kcov_close(struct inode *inode, struct file *filep)
  166. {
  167. kcov_put(filep->private_data);
  168. return 0;
  169. }
  170. static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
  171. unsigned long arg)
  172. {
  173. struct task_struct *t;
  174. unsigned long size, unused;
  175. switch (cmd) {
  176. case KCOV_INIT_TRACE:
  177. /*
  178. * Enable kcov in trace mode and setup buffer size.
  179. * Must happen before anything else.
  180. */
  181. if (kcov->mode != KCOV_MODE_DISABLED)
  182. return -EBUSY;
  183. /*
  184. * Size must be at least 2 to hold current position and one PC.
  185. * Later we allocate size * sizeof(unsigned long) memory,
  186. * that must not overflow.
  187. */
  188. size = arg;
  189. if (size < 2 || size > INT_MAX / sizeof(unsigned long))
  190. return -EINVAL;
  191. kcov->size = size;
  192. kcov->mode = KCOV_MODE_TRACE;
  193. return 0;
  194. case KCOV_ENABLE:
  195. /*
  196. * Enable coverage for the current task.
  197. * At this point user must have been enabled trace mode,
  198. * and mmapped the file. Coverage collection is disabled only
  199. * at task exit or voluntary by KCOV_DISABLE. After that it can
  200. * be enabled for another task.
  201. */
  202. unused = arg;
  203. if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
  204. kcov->area == NULL)
  205. return -EINVAL;
  206. t = current;
  207. if (kcov->t != NULL || t->kcov != NULL)
  208. return -EBUSY;
  209. /* Cache in task struct for performance. */
  210. t->kcov_size = kcov->size;
  211. t->kcov_area = kcov->area;
  212. /* See comment in __sanitizer_cov_trace_pc(). */
  213. barrier();
  214. WRITE_ONCE(t->kcov_mode, kcov->mode);
  215. t->kcov = kcov;
  216. kcov->t = t;
  217. /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
  218. kcov_get(kcov);
  219. return 0;
  220. case KCOV_DISABLE:
  221. /* Disable coverage for the current task. */
  222. unused = arg;
  223. if (unused != 0 || current->kcov != kcov)
  224. return -EINVAL;
  225. t = current;
  226. if (WARN_ON(kcov->t != t))
  227. return -EINVAL;
  228. kcov_task_init(t);
  229. kcov->t = NULL;
  230. kcov_put(kcov);
  231. return 0;
  232. default:
  233. return -ENOTTY;
  234. }
  235. }
  236. static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  237. {
  238. struct kcov *kcov;
  239. int res;
  240. kcov = filep->private_data;
  241. spin_lock(&kcov->lock);
  242. res = kcov_ioctl_locked(kcov, cmd, arg);
  243. spin_unlock(&kcov->lock);
  244. return res;
  245. }
  246. static const struct file_operations kcov_fops = {
  247. .open = kcov_open,
  248. .unlocked_ioctl = kcov_ioctl,
  249. .mmap = kcov_mmap,
  250. .release = kcov_close,
  251. };
  252. static int __init kcov_init(void)
  253. {
  254. /*
  255. * The kcov debugfs file won't ever get removed and thus,
  256. * there is no need to protect it against removal races. The
  257. * use of debugfs_create_file_unsafe() is actually safe here.
  258. */
  259. if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
  260. pr_err("failed to create kcov in debugfs\n");
  261. return -ENOMEM;
  262. }
  263. return 0;
  264. }
  265. device_initcall(kcov_init);