binder_alloc_selftest.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* binder_alloc_selftest.c
  3. *
  4. * Android IPC Subsystem
  5. *
  6. * Copyright (C) 2017 Google, Inc.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/mm_types.h>
  10. #include <linux/err.h>
  11. #include "binder_alloc.h"
  12. #define BUFFER_NUM 5
  13. #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
  14. static bool binder_selftest_run = true;
  15. static int binder_selftest_failures;
  16. static DEFINE_MUTEX(binder_selftest_lock);
  17. /**
  18. * enum buf_end_align_type - Page alignment of a buffer
  19. * end with regard to the end of the previous buffer.
  20. *
  21. * In the pictures below, buf2 refers to the buffer we
  22. * are aligning. buf1 refers to previous buffer by addr.
  23. * Symbol [ means the start of a buffer, ] means the end
  24. * of a buffer, and | means page boundaries.
  25. */
  26. enum buf_end_align_type {
  27. /**
  28. * @SAME_PAGE_UNALIGNED: The end of this buffer is on
  29. * the same page as the end of the previous buffer and
  30. * is not page aligned. Examples:
  31. * buf1 ][ buf2 ][ ...
  32. * buf1 ]|[ buf2 ][ ...
  33. */
  34. SAME_PAGE_UNALIGNED = 0,
  35. /**
  36. * @SAME_PAGE_ALIGNED: When the end of the previous buffer
  37. * is not page aligned, the end of this buffer is on the
  38. * same page as the end of the previous buffer and is page
  39. * aligned. When the previous buffer is page aligned, the
  40. * end of this buffer is aligned to the next page boundary.
  41. * Examples:
  42. * buf1 ][ buf2 ]| ...
  43. * buf1 ]|[ buf2 ]| ...
  44. */
  45. SAME_PAGE_ALIGNED,
  46. /**
  47. * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
  48. * the page next to the end of the previous buffer and
  49. * is not page aligned. Examples:
  50. * buf1 ][ buf2 | buf2 ][ ...
  51. * buf1 ]|[ buf2 | buf2 ][ ...
  52. */
  53. NEXT_PAGE_UNALIGNED,
  54. /**
  55. * @NEXT_PAGE_ALIGNED: The end of this buffer is on
  56. * the page next to the end of the previous buffer and
  57. * is page aligned. Examples:
  58. * buf1 ][ buf2 | buf2 ]| ...
  59. * buf1 ]|[ buf2 | buf2 ]| ...
  60. */
  61. NEXT_PAGE_ALIGNED,
  62. /**
  63. * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
  64. * the page that follows the page after the end of the
  65. * previous buffer and is not page aligned. Examples:
  66. * buf1 ][ buf2 | buf2 | buf2 ][ ...
  67. * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
  68. */
  69. NEXT_NEXT_UNALIGNED,
  70. LOOP_END,
  71. };
  72. static void pr_err_size_seq(size_t *sizes, int *seq)
  73. {
  74. int i;
  75. pr_err("alloc sizes: ");
  76. for (i = 0; i < BUFFER_NUM; i++)
  77. pr_cont("[%zu]", sizes[i]);
  78. pr_cont("\n");
  79. pr_err("free seq: ");
  80. for (i = 0; i < BUFFER_NUM; i++)
  81. pr_cont("[%d]", seq[i]);
  82. pr_cont("\n");
  83. }
  84. static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
  85. struct binder_buffer *buffer,
  86. size_t size)
  87. {
  88. void __user *page_addr;
  89. void __user *end;
  90. int page_index;
  91. end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
  92. page_addr = buffer->user_data;
  93. for (; page_addr < end; page_addr += PAGE_SIZE) {
  94. page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
  95. if (!alloc->pages[page_index].page_ptr ||
  96. !list_empty(&alloc->pages[page_index].lru)) {
  97. pr_err("expect alloc but is %s at page index %d\n",
  98. alloc->pages[page_index].page_ptr ?
  99. "lru" : "free", page_index);
  100. return false;
  101. }
  102. }
  103. return true;
  104. }
  105. static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
  106. struct binder_buffer *buffers[],
  107. size_t *sizes, int *seq)
  108. {
  109. int i;
  110. for (i = 0; i < BUFFER_NUM; i++) {
  111. buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
  112. if (IS_ERR(buffers[i]) ||
  113. !check_buffer_pages_allocated(alloc, buffers[i],
  114. sizes[i])) {
  115. pr_err_size_seq(sizes, seq);
  116. binder_selftest_failures++;
  117. }
  118. }
  119. }
  120. static void binder_selftest_free_buf(struct binder_alloc *alloc,
  121. struct binder_buffer *buffers[],
  122. size_t *sizes, int *seq, size_t end)
  123. {
  124. int i;
  125. for (i = 0; i < BUFFER_NUM; i++)
  126. binder_alloc_free_buf(alloc, buffers[seq[i]]);
  127. for (i = 0; i < end / PAGE_SIZE; i++) {
  128. /**
  129. * Error message on a free page can be false positive
  130. * if binder shrinker ran during binder_alloc_free_buf
  131. * calls above.
  132. */
  133. if (list_empty(&alloc->pages[i].lru)) {
  134. pr_err_size_seq(sizes, seq);
  135. pr_err("expect lru but is %s at page index %d\n",
  136. alloc->pages[i].page_ptr ? "alloc" : "free", i);
  137. binder_selftest_failures++;
  138. }
  139. }
  140. }
  141. static void binder_selftest_free_page(struct binder_alloc *alloc)
  142. {
  143. int i;
  144. unsigned long count;
  145. while ((count = list_lru_count(&binder_alloc_lru))) {
  146. list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  147. NULL, count);
  148. }
  149. for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
  150. if (alloc->pages[i].page_ptr) {
  151. pr_err("expect free but is %s at page index %d\n",
  152. list_empty(&alloc->pages[i].lru) ?
  153. "alloc" : "lru", i);
  154. binder_selftest_failures++;
  155. }
  156. }
  157. }
  158. static void binder_selftest_alloc_free(struct binder_alloc *alloc,
  159. size_t *sizes, int *seq, size_t end)
  160. {
  161. struct binder_buffer *buffers[BUFFER_NUM];
  162. binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
  163. binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
  164. /* Allocate from lru. */
  165. binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
  166. if (list_lru_count(&binder_alloc_lru))
  167. pr_err("lru list should be empty but is not\n");
  168. binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
  169. binder_selftest_free_page(alloc);
  170. }
  171. static bool is_dup(int *seq, int index, int val)
  172. {
  173. int i;
  174. for (i = 0; i < index; i++) {
  175. if (seq[i] == val)
  176. return true;
  177. }
  178. return false;
  179. }
  180. /* Generate BUFFER_NUM factorial free orders. */
  181. static void binder_selftest_free_seq(struct binder_alloc *alloc,
  182. size_t *sizes, int *seq,
  183. int index, size_t end)
  184. {
  185. int i;
  186. if (index == BUFFER_NUM) {
  187. binder_selftest_alloc_free(alloc, sizes, seq, end);
  188. return;
  189. }
  190. for (i = 0; i < BUFFER_NUM; i++) {
  191. if (is_dup(seq, index, i))
  192. continue;
  193. seq[index] = i;
  194. binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
  195. }
  196. }
  197. static void binder_selftest_alloc_size(struct binder_alloc *alloc,
  198. size_t *end_offset)
  199. {
  200. int i;
  201. int seq[BUFFER_NUM] = {0};
  202. size_t front_sizes[BUFFER_NUM];
  203. size_t back_sizes[BUFFER_NUM];
  204. size_t last_offset, offset = 0;
  205. for (i = 0; i < BUFFER_NUM; i++) {
  206. last_offset = offset;
  207. offset = end_offset[i];
  208. front_sizes[i] = offset - last_offset;
  209. back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
  210. }
  211. /*
  212. * Buffers share the first or last few pages.
  213. * Only BUFFER_NUM - 1 buffer sizes are adjustable since
  214. * we need one giant buffer before getting to the last page.
  215. */
  216. back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
  217. binder_selftest_free_seq(alloc, front_sizes, seq, 0,
  218. end_offset[BUFFER_NUM - 1]);
  219. binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
  220. }
  221. static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
  222. size_t *end_offset, int index)
  223. {
  224. int align;
  225. size_t end, prev;
  226. if (index == BUFFER_NUM) {
  227. binder_selftest_alloc_size(alloc, end_offset);
  228. return;
  229. }
  230. prev = index == 0 ? 0 : end_offset[index - 1];
  231. end = prev;
  232. BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
  233. for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
  234. if (align % 2)
  235. end = ALIGN(end, PAGE_SIZE);
  236. else
  237. end += BUFFER_MIN_SIZE;
  238. end_offset[index] = end;
  239. binder_selftest_alloc_offset(alloc, end_offset, index + 1);
  240. }
  241. }
  242. /**
  243. * binder_selftest_alloc() - Test alloc and free of buffer pages.
  244. * @alloc: Pointer to alloc struct.
  245. *
  246. * Allocate BUFFER_NUM buffers to cover all page alignment cases,
  247. * then free them in all orders possible. Check that pages are
  248. * correctly allocated, put onto lru when buffers are freed, and
  249. * are freed when binder_alloc_free_page is called.
  250. */
  251. void binder_selftest_alloc(struct binder_alloc *alloc)
  252. {
  253. size_t end_offset[BUFFER_NUM];
  254. if (!binder_selftest_run)
  255. return;
  256. mutex_lock(&binder_selftest_lock);
  257. if (!binder_selftest_run || !alloc->vma)
  258. goto done;
  259. pr_info("STARTED\n");
  260. binder_selftest_alloc_offset(alloc, end_offset, 0);
  261. binder_selftest_run = false;
  262. if (binder_selftest_failures > 0)
  263. pr_info("%d tests FAILED\n", binder_selftest_failures);
  264. else
  265. pr_info("PASSED\n");
  266. done:
  267. mutex_unlock(&binder_selftest_lock);
  268. }