kasan.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * This file contains shadow memory manipulation code.
  3. *
  4. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6. *
  7. * Some code borrowed from https://github.com/xairy/kasan-prototype by
  8. * Andrey Konovalov <andreyknvl@gmail.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #define DISABLE_BRANCH_PROFILING
  17. #include <linux/export.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/init.h>
  20. #include <linux/kasan.h>
  21. #include <linux/kernel.h>
  22. #include <linux/kmemleak.h>
  23. #include <linux/linkage.h>
  24. #include <linux/memblock.h>
  25. #include <linux/memory.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/printk.h>
  29. #include <linux/sched.h>
  30. #include <linux/sched/task_stack.h>
  31. #include <linux/slab.h>
  32. #include <linux/stacktrace.h>
  33. #include <linux/string.h>
  34. #include <linux/types.h>
  35. #include <linux/vmalloc.h>
  36. #include <linux/bug.h>
  37. #include "kasan.h"
  38. #include "../slab.h"
  39. void kasan_enable_current(void)
  40. {
  41. current->kasan_depth++;
  42. }
  43. void kasan_disable_current(void)
  44. {
  45. current->kasan_depth--;
  46. }
  47. /*
  48. * Poisons the shadow memory for 'size' bytes starting from 'addr'.
  49. * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
  50. */
  51. static void kasan_poison_shadow(const void *address, size_t size, u8 value)
  52. {
  53. void *shadow_start, *shadow_end;
  54. shadow_start = kasan_mem_to_shadow(address);
  55. shadow_end = kasan_mem_to_shadow(address + size);
  56. memset(shadow_start, value, shadow_end - shadow_start);
  57. }
  58. void kasan_unpoison_shadow(const void *address, size_t size)
  59. {
  60. kasan_poison_shadow(address, size, 0);
  61. if (size & KASAN_SHADOW_MASK) {
  62. u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
  63. *shadow = size & KASAN_SHADOW_MASK;
  64. }
  65. }
  66. static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
  67. {
  68. void *base = task_stack_page(task);
  69. size_t size = sp - base;
  70. kasan_unpoison_shadow(base, size);
  71. }
  72. /* Unpoison the entire stack for a task. */
  73. void kasan_unpoison_task_stack(struct task_struct *task)
  74. {
  75. __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
  76. }
  77. /* Unpoison the stack for the current task beyond a watermark sp value. */
  78. asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
  79. {
  80. /*
  81. * Calculate the task stack base address. Avoid using 'current'
  82. * because this function is called by early resume code which hasn't
  83. * yet set up the percpu register (%gs).
  84. */
  85. void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
  86. kasan_unpoison_shadow(base, watermark - base);
  87. }
  88. /*
  89. * Clear all poison for the region between the current SP and a provided
  90. * watermark value, as is sometimes required prior to hand-crafted asm function
  91. * returns in the middle of functions.
  92. */
  93. void kasan_unpoison_stack_above_sp_to(const void *watermark)
  94. {
  95. const void *sp = __builtin_frame_address(0);
  96. size_t size = watermark - sp;
  97. if (WARN_ON(sp > watermark))
  98. return;
  99. kasan_unpoison_shadow(sp, size);
  100. }
  101. /*
  102. * All functions below always inlined so compiler could
  103. * perform better optimizations in each of __asan_loadX/__assn_storeX
  104. * depending on memory access size X.
  105. */
  106. static __always_inline bool memory_is_poisoned_1(unsigned long addr)
  107. {
  108. s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
  109. if (unlikely(shadow_value)) {
  110. s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
  111. return unlikely(last_accessible_byte >= shadow_value);
  112. }
  113. return false;
  114. }
  115. static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
  116. unsigned long size)
  117. {
  118. u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
  119. /*
  120. * Access crosses 8(shadow size)-byte boundary. Such access maps
  121. * into 2 shadow bytes, so we need to check them both.
  122. */
  123. if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
  124. return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
  125. return memory_is_poisoned_1(addr + size - 1);
  126. }
  127. static __always_inline bool memory_is_poisoned_16(unsigned long addr)
  128. {
  129. u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
  130. /* Unaligned 16-bytes access maps into 3 shadow bytes. */
  131. if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
  132. return *shadow_addr || memory_is_poisoned_1(addr + 15);
  133. return *shadow_addr;
  134. }
  135. static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
  136. size_t size)
  137. {
  138. while (size) {
  139. if (unlikely(*start))
  140. return (unsigned long)start;
  141. start++;
  142. size--;
  143. }
  144. return 0;
  145. }
  146. static __always_inline unsigned long memory_is_nonzero(const void *start,
  147. const void *end)
  148. {
  149. unsigned int words;
  150. unsigned long ret;
  151. unsigned int prefix = (unsigned long)start % 8;
  152. if (end - start <= 16)
  153. return bytes_is_nonzero(start, end - start);
  154. if (prefix) {
  155. prefix = 8 - prefix;
  156. ret = bytes_is_nonzero(start, prefix);
  157. if (unlikely(ret))
  158. return ret;
  159. start += prefix;
  160. }
  161. words = (end - start) / 8;
  162. while (words) {
  163. if (unlikely(*(u64 *)start))
  164. return bytes_is_nonzero(start, 8);
  165. start += 8;
  166. words--;
  167. }
  168. return bytes_is_nonzero(start, (end - start) % 8);
  169. }
  170. static __always_inline bool memory_is_poisoned_n(unsigned long addr,
  171. size_t size)
  172. {
  173. unsigned long ret;
  174. ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
  175. kasan_mem_to_shadow((void *)addr + size - 1) + 1);
  176. if (unlikely(ret)) {
  177. unsigned long last_byte = addr + size - 1;
  178. s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
  179. if (unlikely(ret != (unsigned long)last_shadow ||
  180. ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
  181. return true;
  182. }
  183. return false;
  184. }
  185. static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
  186. {
  187. if (__builtin_constant_p(size)) {
  188. switch (size) {
  189. case 1:
  190. return memory_is_poisoned_1(addr);
  191. case 2:
  192. case 4:
  193. case 8:
  194. return memory_is_poisoned_2_4_8(addr, size);
  195. case 16:
  196. return memory_is_poisoned_16(addr);
  197. default:
  198. BUILD_BUG();
  199. }
  200. }
  201. return memory_is_poisoned_n(addr, size);
  202. }
  203. static __always_inline void check_memory_region_inline(unsigned long addr,
  204. size_t size, bool write,
  205. unsigned long ret_ip)
  206. {
  207. if (unlikely(size == 0))
  208. return;
  209. if (unlikely((void *)addr <
  210. kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
  211. kasan_report(addr, size, write, ret_ip);
  212. return;
  213. }
  214. if (likely(!memory_is_poisoned(addr, size)))
  215. return;
  216. kasan_report(addr, size, write, ret_ip);
  217. }
  218. static void check_memory_region(unsigned long addr,
  219. size_t size, bool write,
  220. unsigned long ret_ip)
  221. {
  222. check_memory_region_inline(addr, size, write, ret_ip);
  223. }
  224. void kasan_check_read(const volatile void *p, unsigned int size)
  225. {
  226. check_memory_region((unsigned long)p, size, false, _RET_IP_);
  227. }
  228. EXPORT_SYMBOL(kasan_check_read);
  229. void kasan_check_write(const volatile void *p, unsigned int size)
  230. {
  231. check_memory_region((unsigned long)p, size, true, _RET_IP_);
  232. }
  233. EXPORT_SYMBOL(kasan_check_write);
  234. #undef memset
  235. void *memset(void *addr, int c, size_t len)
  236. {
  237. check_memory_region((unsigned long)addr, len, true, _RET_IP_);
  238. return __memset(addr, c, len);
  239. }
  240. #undef memmove
  241. void *memmove(void *dest, const void *src, size_t len)
  242. {
  243. check_memory_region((unsigned long)src, len, false, _RET_IP_);
  244. check_memory_region((unsigned long)dest, len, true, _RET_IP_);
  245. return __memmove(dest, src, len);
  246. }
  247. #undef memcpy
  248. void *memcpy(void *dest, const void *src, size_t len)
  249. {
  250. check_memory_region((unsigned long)src, len, false, _RET_IP_);
  251. check_memory_region((unsigned long)dest, len, true, _RET_IP_);
  252. return __memcpy(dest, src, len);
  253. }
  254. void kasan_alloc_pages(struct page *page, unsigned int order)
  255. {
  256. if (likely(!PageHighMem(page)))
  257. kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
  258. }
  259. void kasan_free_pages(struct page *page, unsigned int order)
  260. {
  261. if (likely(!PageHighMem(page)))
  262. kasan_poison_shadow(page_address(page),
  263. PAGE_SIZE << order,
  264. KASAN_FREE_PAGE);
  265. }
  266. /*
  267. * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  268. * For larger allocations larger redzones are used.
  269. */
  270. static unsigned int optimal_redzone(unsigned int object_size)
  271. {
  272. return
  273. object_size <= 64 - 16 ? 16 :
  274. object_size <= 128 - 32 ? 32 :
  275. object_size <= 512 - 64 ? 64 :
  276. object_size <= 4096 - 128 ? 128 :
  277. object_size <= (1 << 14) - 256 ? 256 :
  278. object_size <= (1 << 15) - 512 ? 512 :
  279. object_size <= (1 << 16) - 1024 ? 1024 : 2048;
  280. }
  281. void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
  282. slab_flags_t *flags)
  283. {
  284. unsigned int orig_size = *size;
  285. int redzone_adjust;
  286. /* Add alloc meta. */
  287. cache->kasan_info.alloc_meta_offset = *size;
  288. *size += sizeof(struct kasan_alloc_meta);
  289. /* Add free meta. */
  290. if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
  291. cache->object_size < sizeof(struct kasan_free_meta)) {
  292. cache->kasan_info.free_meta_offset = *size;
  293. *size += sizeof(struct kasan_free_meta);
  294. }
  295. redzone_adjust = optimal_redzone(cache->object_size) -
  296. (*size - cache->object_size);
  297. if (redzone_adjust > 0)
  298. *size += redzone_adjust;
  299. *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
  300. max(*size, cache->object_size +
  301. optimal_redzone(cache->object_size)));
  302. /*
  303. * If the metadata doesn't fit, don't enable KASAN at all.
  304. */
  305. if (*size <= cache->kasan_info.alloc_meta_offset ||
  306. *size <= cache->kasan_info.free_meta_offset) {
  307. cache->kasan_info.alloc_meta_offset = 0;
  308. cache->kasan_info.free_meta_offset = 0;
  309. *size = orig_size;
  310. return;
  311. }
  312. *flags |= SLAB_KASAN;
  313. }
  314. void kasan_cache_shrink(struct kmem_cache *cache)
  315. {
  316. quarantine_remove_cache(cache);
  317. }
  318. void kasan_cache_shutdown(struct kmem_cache *cache)
  319. {
  320. if (!__kmem_cache_empty(cache))
  321. quarantine_remove_cache(cache);
  322. }
  323. size_t kasan_metadata_size(struct kmem_cache *cache)
  324. {
  325. return (cache->kasan_info.alloc_meta_offset ?
  326. sizeof(struct kasan_alloc_meta) : 0) +
  327. (cache->kasan_info.free_meta_offset ?
  328. sizeof(struct kasan_free_meta) : 0);
  329. }
  330. void kasan_poison_slab(struct page *page)
  331. {
  332. kasan_poison_shadow(page_address(page),
  333. PAGE_SIZE << compound_order(page),
  334. KASAN_KMALLOC_REDZONE);
  335. }
  336. void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
  337. {
  338. kasan_unpoison_shadow(object, cache->object_size);
  339. }
  340. void kasan_poison_object_data(struct kmem_cache *cache, void *object)
  341. {
  342. kasan_poison_shadow(object,
  343. round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
  344. KASAN_KMALLOC_REDZONE);
  345. }
  346. static inline int in_irqentry_text(unsigned long ptr)
  347. {
  348. return (ptr >= (unsigned long)&__irqentry_text_start &&
  349. ptr < (unsigned long)&__irqentry_text_end) ||
  350. (ptr >= (unsigned long)&__softirqentry_text_start &&
  351. ptr < (unsigned long)&__softirqentry_text_end);
  352. }
  353. static inline void filter_irq_stacks(struct stack_trace *trace)
  354. {
  355. int i;
  356. if (!trace->nr_entries)
  357. return;
  358. for (i = 0; i < trace->nr_entries; i++)
  359. if (in_irqentry_text(trace->entries[i])) {
  360. /* Include the irqentry function into the stack. */
  361. trace->nr_entries = i + 1;
  362. break;
  363. }
  364. }
  365. static inline depot_stack_handle_t save_stack(gfp_t flags)
  366. {
  367. unsigned long entries[KASAN_STACK_DEPTH];
  368. struct stack_trace trace = {
  369. .nr_entries = 0,
  370. .entries = entries,
  371. .max_entries = KASAN_STACK_DEPTH,
  372. .skip = 0
  373. };
  374. save_stack_trace(&trace);
  375. filter_irq_stacks(&trace);
  376. if (trace.nr_entries != 0 &&
  377. trace.entries[trace.nr_entries-1] == ULONG_MAX)
  378. trace.nr_entries--;
  379. return depot_save_stack(&trace, flags);
  380. }
  381. static inline void set_track(struct kasan_track *track, gfp_t flags)
  382. {
  383. track->pid = current->pid;
  384. track->stack = save_stack(flags);
  385. }
  386. struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
  387. const void *object)
  388. {
  389. BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
  390. return (void *)object + cache->kasan_info.alloc_meta_offset;
  391. }
  392. struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
  393. const void *object)
  394. {
  395. BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
  396. return (void *)object + cache->kasan_info.free_meta_offset;
  397. }
  398. void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
  399. {
  400. struct kasan_alloc_meta *alloc_info;
  401. if (!(cache->flags & SLAB_KASAN))
  402. return;
  403. alloc_info = get_alloc_info(cache, object);
  404. __memset(alloc_info, 0, sizeof(*alloc_info));
  405. }
  406. void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
  407. {
  408. kasan_kmalloc(cache, object, cache->object_size, flags);
  409. }
  410. static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
  411. unsigned long ip, bool quarantine)
  412. {
  413. s8 shadow_byte;
  414. unsigned long rounded_up_size;
  415. if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
  416. object)) {
  417. kasan_report_invalid_free(object, ip);
  418. return true;
  419. }
  420. /* RCU slabs could be legally used after free within the RCU period */
  421. if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
  422. return false;
  423. shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
  424. if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
  425. kasan_report_invalid_free(object, ip);
  426. return true;
  427. }
  428. rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
  429. kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
  430. if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
  431. return false;
  432. set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
  433. quarantine_put(get_free_info(cache, object), cache);
  434. return true;
  435. }
  436. bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
  437. {
  438. return __kasan_slab_free(cache, object, ip, true);
  439. }
  440. void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
  441. gfp_t flags)
  442. {
  443. unsigned long redzone_start;
  444. unsigned long redzone_end;
  445. if (gfpflags_allow_blocking(flags))
  446. quarantine_reduce();
  447. if (unlikely(object == NULL))
  448. return;
  449. redzone_start = round_up((unsigned long)(object + size),
  450. KASAN_SHADOW_SCALE_SIZE);
  451. redzone_end = round_up((unsigned long)object + cache->object_size,
  452. KASAN_SHADOW_SCALE_SIZE);
  453. kasan_unpoison_shadow(object, size);
  454. kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
  455. KASAN_KMALLOC_REDZONE);
  456. if (cache->flags & SLAB_KASAN)
  457. set_track(&get_alloc_info(cache, object)->alloc_track, flags);
  458. }
  459. EXPORT_SYMBOL(kasan_kmalloc);
  460. void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
  461. {
  462. struct page *page;
  463. unsigned long redzone_start;
  464. unsigned long redzone_end;
  465. if (gfpflags_allow_blocking(flags))
  466. quarantine_reduce();
  467. if (unlikely(ptr == NULL))
  468. return;
  469. page = virt_to_page(ptr);
  470. redzone_start = round_up((unsigned long)(ptr + size),
  471. KASAN_SHADOW_SCALE_SIZE);
  472. redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
  473. kasan_unpoison_shadow(ptr, size);
  474. kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
  475. KASAN_PAGE_REDZONE);
  476. }
  477. void kasan_krealloc(const void *object, size_t size, gfp_t flags)
  478. {
  479. struct page *page;
  480. if (unlikely(object == ZERO_SIZE_PTR))
  481. return;
  482. page = virt_to_head_page(object);
  483. if (unlikely(!PageSlab(page)))
  484. kasan_kmalloc_large(object, size, flags);
  485. else
  486. kasan_kmalloc(page->slab_cache, object, size, flags);
  487. }
  488. void kasan_poison_kfree(void *ptr, unsigned long ip)
  489. {
  490. struct page *page;
  491. page = virt_to_head_page(ptr);
  492. if (unlikely(!PageSlab(page))) {
  493. if (ptr != page_address(page)) {
  494. kasan_report_invalid_free(ptr, ip);
  495. return;
  496. }
  497. kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
  498. KASAN_FREE_PAGE);
  499. } else {
  500. __kasan_slab_free(page->slab_cache, ptr, ip, false);
  501. }
  502. }
  503. void kasan_kfree_large(void *ptr, unsigned long ip)
  504. {
  505. if (ptr != page_address(virt_to_head_page(ptr)))
  506. kasan_report_invalid_free(ptr, ip);
  507. /* The object will be poisoned by page_alloc. */
  508. }
  509. int kasan_module_alloc(void *addr, size_t size)
  510. {
  511. void *ret;
  512. size_t scaled_size;
  513. size_t shadow_size;
  514. unsigned long shadow_start;
  515. shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
  516. scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
  517. shadow_size = round_up(scaled_size, PAGE_SIZE);
  518. if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
  519. return -EINVAL;
  520. ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
  521. shadow_start + shadow_size,
  522. GFP_KERNEL | __GFP_ZERO,
  523. PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
  524. __builtin_return_address(0));
  525. if (ret) {
  526. find_vm_area(addr)->flags |= VM_KASAN;
  527. kmemleak_ignore(ret);
  528. return 0;
  529. }
  530. return -ENOMEM;
  531. }
  532. void kasan_free_shadow(const struct vm_struct *vm)
  533. {
  534. if (vm->flags & VM_KASAN)
  535. vfree(kasan_mem_to_shadow(vm->addr));
  536. }
  537. static void register_global(struct kasan_global *global)
  538. {
  539. size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
  540. kasan_unpoison_shadow(global->beg, global->size);
  541. kasan_poison_shadow(global->beg + aligned_size,
  542. global->size_with_redzone - aligned_size,
  543. KASAN_GLOBAL_REDZONE);
  544. }
  545. void __asan_register_globals(struct kasan_global *globals, size_t size)
  546. {
  547. int i;
  548. for (i = 0; i < size; i++)
  549. register_global(&globals[i]);
  550. }
  551. EXPORT_SYMBOL(__asan_register_globals);
  552. void __asan_unregister_globals(struct kasan_global *globals, size_t size)
  553. {
  554. }
  555. EXPORT_SYMBOL(__asan_unregister_globals);
  556. #define DEFINE_ASAN_LOAD_STORE(size) \
  557. void __asan_load##size(unsigned long addr) \
  558. { \
  559. check_memory_region_inline(addr, size, false, _RET_IP_);\
  560. } \
  561. EXPORT_SYMBOL(__asan_load##size); \
  562. __alias(__asan_load##size) \
  563. void __asan_load##size##_noabort(unsigned long); \
  564. EXPORT_SYMBOL(__asan_load##size##_noabort); \
  565. void __asan_store##size(unsigned long addr) \
  566. { \
  567. check_memory_region_inline(addr, size, true, _RET_IP_); \
  568. } \
  569. EXPORT_SYMBOL(__asan_store##size); \
  570. __alias(__asan_store##size) \
  571. void __asan_store##size##_noabort(unsigned long); \
  572. EXPORT_SYMBOL(__asan_store##size##_noabort)
  573. DEFINE_ASAN_LOAD_STORE(1);
  574. DEFINE_ASAN_LOAD_STORE(2);
  575. DEFINE_ASAN_LOAD_STORE(4);
  576. DEFINE_ASAN_LOAD_STORE(8);
  577. DEFINE_ASAN_LOAD_STORE(16);
  578. void __asan_loadN(unsigned long addr, size_t size)
  579. {
  580. check_memory_region(addr, size, false, _RET_IP_);
  581. }
  582. EXPORT_SYMBOL(__asan_loadN);
  583. __alias(__asan_loadN)
  584. void __asan_loadN_noabort(unsigned long, size_t);
  585. EXPORT_SYMBOL(__asan_loadN_noabort);
  586. void __asan_storeN(unsigned long addr, size_t size)
  587. {
  588. check_memory_region(addr, size, true, _RET_IP_);
  589. }
  590. EXPORT_SYMBOL(__asan_storeN);
  591. __alias(__asan_storeN)
  592. void __asan_storeN_noabort(unsigned long, size_t);
  593. EXPORT_SYMBOL(__asan_storeN_noabort);
  594. /* to shut up compiler complaints */
  595. void __asan_handle_no_return(void) {}
  596. EXPORT_SYMBOL(__asan_handle_no_return);
  597. /* Emitted by compiler to poison large objects when they go out of scope. */
  598. void __asan_poison_stack_memory(const void *addr, size_t size)
  599. {
  600. /*
  601. * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
  602. * by redzones, so we simply round up size to simplify logic.
  603. */
  604. kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
  605. KASAN_USE_AFTER_SCOPE);
  606. }
  607. EXPORT_SYMBOL(__asan_poison_stack_memory);
  608. /* Emitted by compiler to unpoison large objects when they go into scope. */
  609. void __asan_unpoison_stack_memory(const void *addr, size_t size)
  610. {
  611. kasan_unpoison_shadow(addr, size);
  612. }
  613. EXPORT_SYMBOL(__asan_unpoison_stack_memory);
  614. /* Emitted by compiler to poison alloca()ed objects. */
  615. void __asan_alloca_poison(unsigned long addr, size_t size)
  616. {
  617. size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
  618. size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
  619. rounded_up_size;
  620. size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
  621. const void *left_redzone = (const void *)(addr -
  622. KASAN_ALLOCA_REDZONE_SIZE);
  623. const void *right_redzone = (const void *)(addr + rounded_up_size);
  624. WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
  625. kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
  626. size - rounded_down_size);
  627. kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
  628. KASAN_ALLOCA_LEFT);
  629. kasan_poison_shadow(right_redzone,
  630. padding_size + KASAN_ALLOCA_REDZONE_SIZE,
  631. KASAN_ALLOCA_RIGHT);
  632. }
  633. EXPORT_SYMBOL(__asan_alloca_poison);
  634. /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
  635. void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
  636. {
  637. if (unlikely(!stack_top || stack_top > stack_bottom))
  638. return;
  639. kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
  640. }
  641. EXPORT_SYMBOL(__asan_allocas_unpoison);
  642. /* Emitted by the compiler to [un]poison local variables. */
  643. #define DEFINE_ASAN_SET_SHADOW(byte) \
  644. void __asan_set_shadow_##byte(const void *addr, size_t size) \
  645. { \
  646. __memset((void *)addr, 0x##byte, size); \
  647. } \
  648. EXPORT_SYMBOL(__asan_set_shadow_##byte)
  649. DEFINE_ASAN_SET_SHADOW(00);
  650. DEFINE_ASAN_SET_SHADOW(f1);
  651. DEFINE_ASAN_SET_SHADOW(f2);
  652. DEFINE_ASAN_SET_SHADOW(f3);
  653. DEFINE_ASAN_SET_SHADOW(f5);
  654. DEFINE_ASAN_SET_SHADOW(f8);
  655. #ifdef CONFIG_MEMORY_HOTPLUG
  656. static bool shadow_mapped(unsigned long addr)
  657. {
  658. pgd_t *pgd = pgd_offset_k(addr);
  659. p4d_t *p4d;
  660. pud_t *pud;
  661. pmd_t *pmd;
  662. pte_t *pte;
  663. if (pgd_none(*pgd))
  664. return false;
  665. p4d = p4d_offset(pgd, addr);
  666. if (p4d_none(*p4d))
  667. return false;
  668. pud = pud_offset(p4d, addr);
  669. if (pud_none(*pud))
  670. return false;
  671. /*
  672. * We can't use pud_large() or pud_huge(), the first one is
  673. * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
  674. * pud_bad(), if pud is bad then it's bad because it's huge.
  675. */
  676. if (pud_bad(*pud))
  677. return true;
  678. pmd = pmd_offset(pud, addr);
  679. if (pmd_none(*pmd))
  680. return false;
  681. if (pmd_bad(*pmd))
  682. return true;
  683. pte = pte_offset_kernel(pmd, addr);
  684. return !pte_none(*pte);
  685. }
  686. static int __meminit kasan_mem_notifier(struct notifier_block *nb,
  687. unsigned long action, void *data)
  688. {
  689. struct memory_notify *mem_data = data;
  690. unsigned long nr_shadow_pages, start_kaddr, shadow_start;
  691. unsigned long shadow_end, shadow_size;
  692. nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
  693. start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
  694. shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
  695. shadow_size = nr_shadow_pages << PAGE_SHIFT;
  696. shadow_end = shadow_start + shadow_size;
  697. if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
  698. WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
  699. return NOTIFY_BAD;
  700. switch (action) {
  701. case MEM_GOING_ONLINE: {
  702. void *ret;
  703. /*
  704. * If shadow is mapped already than it must have been mapped
  705. * during the boot. This could happen if we onlining previously
  706. * offlined memory.
  707. */
  708. if (shadow_mapped(shadow_start))
  709. return NOTIFY_OK;
  710. ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
  711. shadow_end, GFP_KERNEL,
  712. PAGE_KERNEL, VM_NO_GUARD,
  713. pfn_to_nid(mem_data->start_pfn),
  714. __builtin_return_address(0));
  715. if (!ret)
  716. return NOTIFY_BAD;
  717. kmemleak_ignore(ret);
  718. return NOTIFY_OK;
  719. }
  720. case MEM_CANCEL_ONLINE:
  721. case MEM_OFFLINE: {
  722. struct vm_struct *vm;
  723. /*
  724. * shadow_start was either mapped during boot by kasan_init()
  725. * or during memory online by __vmalloc_node_range().
  726. * In the latter case we can use vfree() to free shadow.
  727. * Non-NULL result of the find_vm_area() will tell us if
  728. * that was the second case.
  729. *
  730. * Currently it's not possible to free shadow mapped
  731. * during boot by kasan_init(). It's because the code
  732. * to do that hasn't been written yet. So we'll just
  733. * leak the memory.
  734. */
  735. vm = find_vm_area((void *)shadow_start);
  736. if (vm)
  737. vfree((void *)shadow_start);
  738. }
  739. }
  740. return NOTIFY_OK;
  741. }
  742. static int __init kasan_memhotplug_init(void)
  743. {
  744. hotplug_memory_notifier(kasan_mem_notifier, 0);
  745. return 0;
  746. }
  747. core_initcall(kasan_memhotplug_init);
  748. #endif