lkdtm_bugs.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4. * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5. * lockups) along with other things that don't fit well into existing LKDTM
  6. * test source files.
  7. */
  8. #include "lkdtm.h"
  9. #include <linux/list.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/uaccess.h>
  14. struct lkdtm_list {
  15. struct list_head node;
  16. };
  17. /*
  18. * Make sure our attempts to over run the kernel stack doesn't trigger
  19. * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  20. * recurse past the end of THREAD_SIZE by default.
  21. */
  22. #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  23. #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
  24. #else
  25. #define REC_STACK_SIZE (THREAD_SIZE / 8)
  26. #endif
  27. #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  28. static int recur_count = REC_NUM_DEFAULT;
  29. static DEFINE_SPINLOCK(lock_me_up);
  30. static int recursive_loop(int remaining)
  31. {
  32. char buf[REC_STACK_SIZE];
  33. /* Make sure compiler does not optimize this away. */
  34. memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
  35. if (!remaining)
  36. return 0;
  37. else
  38. return recursive_loop(remaining - 1);
  39. }
  40. /* If the depth is negative, use the default, otherwise keep parameter. */
  41. void __init lkdtm_bugs_init(int *recur_param)
  42. {
  43. if (*recur_param < 0)
  44. *recur_param = recur_count;
  45. else
  46. recur_count = *recur_param;
  47. }
  48. void lkdtm_PANIC(void)
  49. {
  50. panic("dumptest");
  51. }
  52. void lkdtm_BUG(void)
  53. {
  54. BUG();
  55. }
  56. void lkdtm_WARNING(void)
  57. {
  58. WARN_ON(1);
  59. }
  60. void lkdtm_EXCEPTION(void)
  61. {
  62. *((volatile int *) 0) = 0;
  63. }
  64. void lkdtm_LOOP(void)
  65. {
  66. for (;;)
  67. ;
  68. }
  69. void lkdtm_OVERFLOW(void)
  70. {
  71. (void) recursive_loop(recur_count);
  72. }
  73. static noinline void __lkdtm_CORRUPT_STACK(void *stack)
  74. {
  75. memset(stack, '\xff', 64);
  76. }
  77. /* This should trip the stack canary, not corrupt the return address. */
  78. noinline void lkdtm_CORRUPT_STACK(void)
  79. {
  80. /* Use default char array length that triggers stack protection. */
  81. char data[8] __aligned(sizeof(void *));
  82. __lkdtm_CORRUPT_STACK(&data);
  83. pr_info("Corrupted stack containing char array ...\n");
  84. }
  85. /* Same as above but will only get a canary with -fstack-protector-strong */
  86. noinline void lkdtm_CORRUPT_STACK_STRONG(void)
  87. {
  88. union {
  89. unsigned short shorts[4];
  90. unsigned long *ptr;
  91. } data __aligned(sizeof(void *));
  92. __lkdtm_CORRUPT_STACK(&data);
  93. pr_info("Corrupted stack containing union ...\n");
  94. }
  95. void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
  96. {
  97. static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
  98. u32 *p;
  99. u32 val = 0x12345678;
  100. p = (u32 *)(data + 1);
  101. if (*p == 0)
  102. val = 0x87654321;
  103. *p = val;
  104. }
  105. void lkdtm_SOFTLOCKUP(void)
  106. {
  107. preempt_disable();
  108. for (;;)
  109. cpu_relax();
  110. }
  111. void lkdtm_HARDLOCKUP(void)
  112. {
  113. local_irq_disable();
  114. for (;;)
  115. cpu_relax();
  116. }
  117. void lkdtm_SPINLOCKUP(void)
  118. {
  119. /* Must be called twice to trigger. */
  120. spin_lock(&lock_me_up);
  121. /* Let sparse know we intended to exit holding the lock. */
  122. __release(&lock_me_up);
  123. }
  124. void lkdtm_HUNG_TASK(void)
  125. {
  126. set_current_state(TASK_UNINTERRUPTIBLE);
  127. schedule();
  128. }
  129. void lkdtm_CORRUPT_LIST_ADD(void)
  130. {
  131. /*
  132. * Initially, an empty list via LIST_HEAD:
  133. * test_head.next = &test_head
  134. * test_head.prev = &test_head
  135. */
  136. LIST_HEAD(test_head);
  137. struct lkdtm_list good, bad;
  138. void *target[2] = { };
  139. void *redirection = &target;
  140. pr_info("attempting good list addition\n");
  141. /*
  142. * Adding to the list performs these actions:
  143. * test_head.next->prev = &good.node
  144. * good.node.next = test_head.next
  145. * good.node.prev = test_head
  146. * test_head.next = good.node
  147. */
  148. list_add(&good.node, &test_head);
  149. pr_info("attempting corrupted list addition\n");
  150. /*
  151. * In simulating this "write what where" primitive, the "what" is
  152. * the address of &bad.node, and the "where" is the address held
  153. * by "redirection".
  154. */
  155. test_head.next = redirection;
  156. list_add(&bad.node, &test_head);
  157. if (target[0] == NULL && target[1] == NULL)
  158. pr_err("Overwrite did not happen, but no BUG?!\n");
  159. else
  160. pr_err("list_add() corruption not detected!\n");
  161. }
  162. void lkdtm_CORRUPT_LIST_DEL(void)
  163. {
  164. LIST_HEAD(test_head);
  165. struct lkdtm_list item;
  166. void *target[2] = { };
  167. void *redirection = &target;
  168. list_add(&item.node, &test_head);
  169. pr_info("attempting good list removal\n");
  170. list_del(&item.node);
  171. pr_info("attempting corrupted list removal\n");
  172. list_add(&item.node, &test_head);
  173. /* As with the list_add() test above, this corrupts "next". */
  174. item.node.next = redirection;
  175. list_del(&item.node);
  176. if (target[0] == NULL && target[1] == NULL)
  177. pr_err("Overwrite did not happen, but no BUG?!\n");
  178. else
  179. pr_err("list_del() corruption not detected!\n");
  180. }
  181. /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
  182. void lkdtm_CORRUPT_USER_DS(void)
  183. {
  184. pr_info("setting bad task size limit\n");
  185. set_fs(KERNEL_DS);
  186. /* Make sure we do not keep running with a KERNEL_DS! */
  187. force_sig(SIGKILL, current);
  188. }
  189. /* Test that VMAP_STACK is actually allocating with a leading guard page */
  190. void lkdtm_STACK_GUARD_PAGE_LEADING(void)
  191. {
  192. const unsigned char *stack = task_stack_page(current);
  193. const unsigned char *ptr = stack - 1;
  194. volatile unsigned char byte;
  195. pr_info("attempting bad read from page below current stack\n");
  196. byte = *ptr;
  197. pr_err("FAIL: accessed page before stack!\n");
  198. }
  199. /* Test that VMAP_STACK is actually allocating with a trailing guard page */
  200. void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
  201. {
  202. const unsigned char *stack = task_stack_page(current);
  203. const unsigned char *ptr = stack + THREAD_SIZE;
  204. volatile unsigned char byte;
  205. pr_info("attempting bad read from page above current stack\n");
  206. byte = *ptr;
  207. pr_err("FAIL: accessed page after stack!\n");
  208. }