kasan.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * This file contains shadow memory manipulation code.
  3. *
  4. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6. *
  7. * Some code borrowed from https://github.com/xairy/kasan-prototype by
  8. * Andrey Konovalov <adech.fo@gmail.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #define DISABLE_BRANCH_PROFILING
  17. #include <linux/export.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/init.h>
  20. #include <linux/kasan.h>
  21. #include <linux/kernel.h>
  22. #include <linux/kmemleak.h>
  23. #include <linux/linkage.h>
  24. #include <linux/memblock.h>
  25. #include <linux/memory.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/printk.h>
  29. #include <linux/sched.h>
  30. #include <linux/slab.h>
  31. #include <linux/stacktrace.h>
  32. #include <linux/string.h>
  33. #include <linux/types.h>
  34. #include <linux/vmalloc.h>
  35. #include <linux/bug.h>
  36. #include "kasan.h"
  37. #include "../slab.h"
  38. /*
  39. * Poisons the shadow memory for 'size' bytes starting from 'addr'.
  40. * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
  41. */
  42. static void kasan_poison_shadow(const void *address, size_t size, u8 value)
  43. {
  44. void *shadow_start, *shadow_end;
  45. shadow_start = kasan_mem_to_shadow(address);
  46. shadow_end = kasan_mem_to_shadow(address + size);
  47. memset(shadow_start, value, shadow_end - shadow_start);
  48. }
  49. void kasan_unpoison_shadow(const void *address, size_t size)
  50. {
  51. kasan_poison_shadow(address, size, 0);
  52. if (size & KASAN_SHADOW_MASK) {
  53. u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
  54. *shadow = size & KASAN_SHADOW_MASK;
  55. }
  56. }
  57. static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
  58. {
  59. void *base = task_stack_page(task);
  60. size_t size = sp - base;
  61. kasan_unpoison_shadow(base, size);
  62. }
  63. /* Unpoison the entire stack for a task. */
  64. void kasan_unpoison_task_stack(struct task_struct *task)
  65. {
  66. __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
  67. }
  68. /* Unpoison the stack for the current task beyond a watermark sp value. */
  69. asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
  70. {
  71. __kasan_unpoison_stack(current, watermark);
  72. }
  73. /*
  74. * Clear all poison for the region between the current SP and a provided
  75. * watermark value, as is sometimes required prior to hand-crafted asm function
  76. * returns in the middle of functions.
  77. */
  78. void kasan_unpoison_stack_above_sp_to(const void *watermark)
  79. {
  80. const void *sp = __builtin_frame_address(0);
  81. size_t size = watermark - sp;
  82. if (WARN_ON(sp > watermark))
  83. return;
  84. kasan_unpoison_shadow(sp, size);
  85. }
  86. /*
  87. * All functions below always inlined so compiler could
  88. * perform better optimizations in each of __asan_loadX/__assn_storeX
  89. * depending on memory access size X.
  90. */
  91. static __always_inline bool memory_is_poisoned_1(unsigned long addr)
  92. {
  93. s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
  94. if (unlikely(shadow_value)) {
  95. s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
  96. return unlikely(last_accessible_byte >= shadow_value);
  97. }
  98. return false;
  99. }
  100. static __always_inline bool memory_is_poisoned_2(unsigned long addr)
  101. {
  102. u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
  103. if (unlikely(*shadow_addr)) {
  104. if (memory_is_poisoned_1(addr + 1))
  105. return true;
  106. /*
  107. * If single shadow byte covers 2-byte access, we don't
  108. * need to do anything more. Otherwise, test the first
  109. * shadow byte.
  110. */
  111. if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
  112. return false;
  113. return unlikely(*(u8 *)shadow_addr);
  114. }
  115. return false;
  116. }
  117. static __always_inline bool memory_is_poisoned_4(unsigned long addr)
  118. {
  119. u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
  120. if (unlikely(*shadow_addr)) {
  121. if (memory_is_poisoned_1(addr + 3))
  122. return true;
  123. /*
  124. * If single shadow byte covers 4-byte access, we don't
  125. * need to do anything more. Otherwise, test the first
  126. * shadow byte.
  127. */
  128. if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
  129. return false;
  130. return unlikely(*(u8 *)shadow_addr);
  131. }
  132. return false;
  133. }
  134. static __always_inline bool memory_is_poisoned_8(unsigned long addr)
  135. {
  136. u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
  137. if (unlikely(*shadow_addr)) {
  138. if (memory_is_poisoned_1(addr + 7))
  139. return true;
  140. /*
  141. * If single shadow byte covers 8-byte access, we don't
  142. * need to do anything more. Otherwise, test the first
  143. * shadow byte.
  144. */
  145. if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
  146. return false;
  147. return unlikely(*(u8 *)shadow_addr);
  148. }
  149. return false;
  150. }
  151. static __always_inline bool memory_is_poisoned_16(unsigned long addr)
  152. {
  153. u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
  154. if (unlikely(*shadow_addr)) {
  155. u16 shadow_first_bytes = *(u16 *)shadow_addr;
  156. if (unlikely(shadow_first_bytes))
  157. return true;
  158. /*
  159. * If two shadow bytes covers 16-byte access, we don't
  160. * need to do anything more. Otherwise, test the last
  161. * shadow byte.
  162. */
  163. if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
  164. return false;
  165. return memory_is_poisoned_1(addr + 15);
  166. }
  167. return false;
  168. }
  169. static __always_inline unsigned long bytes_is_zero(const u8 *start,
  170. size_t size)
  171. {
  172. while (size) {
  173. if (unlikely(*start))
  174. return (unsigned long)start;
  175. start++;
  176. size--;
  177. }
  178. return 0;
  179. }
  180. static __always_inline unsigned long memory_is_zero(const void *start,
  181. const void *end)
  182. {
  183. unsigned int words;
  184. unsigned long ret;
  185. unsigned int prefix = (unsigned long)start % 8;
  186. if (end - start <= 16)
  187. return bytes_is_zero(start, end - start);
  188. if (prefix) {
  189. prefix = 8 - prefix;
  190. ret = bytes_is_zero(start, prefix);
  191. if (unlikely(ret))
  192. return ret;
  193. start += prefix;
  194. }
  195. words = (end - start) / 8;
  196. while (words) {
  197. if (unlikely(*(u64 *)start))
  198. return bytes_is_zero(start, 8);
  199. start += 8;
  200. words--;
  201. }
  202. return bytes_is_zero(start, (end - start) % 8);
  203. }
  204. static __always_inline bool memory_is_poisoned_n(unsigned long addr,
  205. size_t size)
  206. {
  207. unsigned long ret;
  208. ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
  209. kasan_mem_to_shadow((void *)addr + size - 1) + 1);
  210. if (unlikely(ret)) {
  211. unsigned long last_byte = addr + size - 1;
  212. s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
  213. if (unlikely(ret != (unsigned long)last_shadow ||
  214. ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
  215. return true;
  216. }
  217. return false;
  218. }
  219. static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
  220. {
  221. if (__builtin_constant_p(size)) {
  222. switch (size) {
  223. case 1:
  224. return memory_is_poisoned_1(addr);
  225. case 2:
  226. return memory_is_poisoned_2(addr);
  227. case 4:
  228. return memory_is_poisoned_4(addr);
  229. case 8:
  230. return memory_is_poisoned_8(addr);
  231. case 16:
  232. return memory_is_poisoned_16(addr);
  233. default:
  234. BUILD_BUG();
  235. }
  236. }
  237. return memory_is_poisoned_n(addr, size);
  238. }
  239. static __always_inline void check_memory_region_inline(unsigned long addr,
  240. size_t size, bool write,
  241. unsigned long ret_ip)
  242. {
  243. if (unlikely(size == 0))
  244. return;
  245. if (unlikely((void *)addr <
  246. kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
  247. kasan_report(addr, size, write, ret_ip);
  248. return;
  249. }
  250. if (likely(!memory_is_poisoned(addr, size)))
  251. return;
  252. kasan_report(addr, size, write, ret_ip);
  253. }
  254. static void check_memory_region(unsigned long addr,
  255. size_t size, bool write,
  256. unsigned long ret_ip)
  257. {
  258. check_memory_region_inline(addr, size, write, ret_ip);
  259. }
  260. void kasan_check_read(const void *p, unsigned int size)
  261. {
  262. check_memory_region((unsigned long)p, size, false, _RET_IP_);
  263. }
  264. EXPORT_SYMBOL(kasan_check_read);
  265. void kasan_check_write(const void *p, unsigned int size)
  266. {
  267. check_memory_region((unsigned long)p, size, true, _RET_IP_);
  268. }
  269. EXPORT_SYMBOL(kasan_check_write);
  270. #undef memset
  271. void *memset(void *addr, int c, size_t len)
  272. {
  273. check_memory_region((unsigned long)addr, len, true, _RET_IP_);
  274. return __memset(addr, c, len);
  275. }
  276. #undef memmove
  277. void *memmove(void *dest, const void *src, size_t len)
  278. {
  279. check_memory_region((unsigned long)src, len, false, _RET_IP_);
  280. check_memory_region((unsigned long)dest, len, true, _RET_IP_);
  281. return __memmove(dest, src, len);
  282. }
  283. #undef memcpy
  284. void *memcpy(void *dest, const void *src, size_t len)
  285. {
  286. check_memory_region((unsigned long)src, len, false, _RET_IP_);
  287. check_memory_region((unsigned long)dest, len, true, _RET_IP_);
  288. return __memcpy(dest, src, len);
  289. }
  290. void kasan_alloc_pages(struct page *page, unsigned int order)
  291. {
  292. if (likely(!PageHighMem(page)))
  293. kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
  294. }
  295. void kasan_free_pages(struct page *page, unsigned int order)
  296. {
  297. if (likely(!PageHighMem(page)))
  298. kasan_poison_shadow(page_address(page),
  299. PAGE_SIZE << order,
  300. KASAN_FREE_PAGE);
  301. }
  302. /*
  303. * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  304. * For larger allocations larger redzones are used.
  305. */
  306. static size_t optimal_redzone(size_t object_size)
  307. {
  308. int rz =
  309. object_size <= 64 - 16 ? 16 :
  310. object_size <= 128 - 32 ? 32 :
  311. object_size <= 512 - 64 ? 64 :
  312. object_size <= 4096 - 128 ? 128 :
  313. object_size <= (1 << 14) - 256 ? 256 :
  314. object_size <= (1 << 15) - 512 ? 512 :
  315. object_size <= (1 << 16) - 1024 ? 1024 : 2048;
  316. return rz;
  317. }
  318. void kasan_cache_create(struct kmem_cache *cache, size_t *size,
  319. unsigned long *flags)
  320. {
  321. int redzone_adjust;
  322. int orig_size = *size;
  323. /* Add alloc meta. */
  324. cache->kasan_info.alloc_meta_offset = *size;
  325. *size += sizeof(struct kasan_alloc_meta);
  326. /* Add free meta. */
  327. if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
  328. cache->object_size < sizeof(struct kasan_free_meta)) {
  329. cache->kasan_info.free_meta_offset = *size;
  330. *size += sizeof(struct kasan_free_meta);
  331. }
  332. redzone_adjust = optimal_redzone(cache->object_size) -
  333. (*size - cache->object_size);
  334. if (redzone_adjust > 0)
  335. *size += redzone_adjust;
  336. *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
  337. optimal_redzone(cache->object_size)));
  338. /*
  339. * If the metadata doesn't fit, don't enable KASAN at all.
  340. */
  341. if (*size <= cache->kasan_info.alloc_meta_offset ||
  342. *size <= cache->kasan_info.free_meta_offset) {
  343. cache->kasan_info.alloc_meta_offset = 0;
  344. cache->kasan_info.free_meta_offset = 0;
  345. *size = orig_size;
  346. return;
  347. }
  348. *flags |= SLAB_KASAN;
  349. }
  350. void kasan_cache_shrink(struct kmem_cache *cache)
  351. {
  352. quarantine_remove_cache(cache);
  353. }
  354. void kasan_cache_destroy(struct kmem_cache *cache)
  355. {
  356. quarantine_remove_cache(cache);
  357. }
  358. size_t kasan_metadata_size(struct kmem_cache *cache)
  359. {
  360. return (cache->kasan_info.alloc_meta_offset ?
  361. sizeof(struct kasan_alloc_meta) : 0) +
  362. (cache->kasan_info.free_meta_offset ?
  363. sizeof(struct kasan_free_meta) : 0);
  364. }
  365. void kasan_poison_slab(struct page *page)
  366. {
  367. kasan_poison_shadow(page_address(page),
  368. PAGE_SIZE << compound_order(page),
  369. KASAN_KMALLOC_REDZONE);
  370. }
  371. void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
  372. {
  373. kasan_unpoison_shadow(object, cache->object_size);
  374. }
  375. void kasan_poison_object_data(struct kmem_cache *cache, void *object)
  376. {
  377. kasan_poison_shadow(object,
  378. round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
  379. KASAN_KMALLOC_REDZONE);
  380. }
  381. static inline int in_irqentry_text(unsigned long ptr)
  382. {
  383. return (ptr >= (unsigned long)&__irqentry_text_start &&
  384. ptr < (unsigned long)&__irqentry_text_end) ||
  385. (ptr >= (unsigned long)&__softirqentry_text_start &&
  386. ptr < (unsigned long)&__softirqentry_text_end);
  387. }
  388. static inline void filter_irq_stacks(struct stack_trace *trace)
  389. {
  390. int i;
  391. if (!trace->nr_entries)
  392. return;
  393. for (i = 0; i < trace->nr_entries; i++)
  394. if (in_irqentry_text(trace->entries[i])) {
  395. /* Include the irqentry function into the stack. */
  396. trace->nr_entries = i + 1;
  397. break;
  398. }
  399. }
  400. static inline depot_stack_handle_t save_stack(gfp_t flags)
  401. {
  402. unsigned long entries[KASAN_STACK_DEPTH];
  403. struct stack_trace trace = {
  404. .nr_entries = 0,
  405. .entries = entries,
  406. .max_entries = KASAN_STACK_DEPTH,
  407. .skip = 0
  408. };
  409. save_stack_trace(&trace);
  410. filter_irq_stacks(&trace);
  411. if (trace.nr_entries != 0 &&
  412. trace.entries[trace.nr_entries-1] == ULONG_MAX)
  413. trace.nr_entries--;
  414. return depot_save_stack(&trace, flags);
  415. }
  416. static inline void set_track(struct kasan_track *track, gfp_t flags)
  417. {
  418. track->pid = current->pid;
  419. track->stack = save_stack(flags);
  420. }
  421. struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
  422. const void *object)
  423. {
  424. BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
  425. return (void *)object + cache->kasan_info.alloc_meta_offset;
  426. }
  427. struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
  428. const void *object)
  429. {
  430. BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
  431. return (void *)object + cache->kasan_info.free_meta_offset;
  432. }
  433. void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
  434. {
  435. struct kasan_alloc_meta *alloc_info;
  436. if (!(cache->flags & SLAB_KASAN))
  437. return;
  438. alloc_info = get_alloc_info(cache, object);
  439. __memset(alloc_info, 0, sizeof(*alloc_info));
  440. }
  441. void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
  442. {
  443. kasan_kmalloc(cache, object, cache->object_size, flags);
  444. }
  445. static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
  446. {
  447. unsigned long size = cache->object_size;
  448. unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
  449. /* RCU slabs could be legally used after free within the RCU period */
  450. if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
  451. return;
  452. kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
  453. }
  454. bool kasan_slab_free(struct kmem_cache *cache, void *object)
  455. {
  456. s8 shadow_byte;
  457. /* RCU slabs could be legally used after free within the RCU period */
  458. if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
  459. return false;
  460. shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
  461. if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
  462. kasan_report_double_free(cache, object, shadow_byte);
  463. return true;
  464. }
  465. kasan_poison_slab_free(cache, object);
  466. if (unlikely(!(cache->flags & SLAB_KASAN)))
  467. return false;
  468. set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
  469. quarantine_put(get_free_info(cache, object), cache);
  470. return true;
  471. }
  472. void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
  473. gfp_t flags)
  474. {
  475. unsigned long redzone_start;
  476. unsigned long redzone_end;
  477. if (gfpflags_allow_blocking(flags))
  478. quarantine_reduce();
  479. if (unlikely(object == NULL))
  480. return;
  481. redzone_start = round_up((unsigned long)(object + size),
  482. KASAN_SHADOW_SCALE_SIZE);
  483. redzone_end = round_up((unsigned long)object + cache->object_size,
  484. KASAN_SHADOW_SCALE_SIZE);
  485. kasan_unpoison_shadow(object, size);
  486. kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
  487. KASAN_KMALLOC_REDZONE);
  488. if (cache->flags & SLAB_KASAN)
  489. set_track(&get_alloc_info(cache, object)->alloc_track, flags);
  490. }
  491. EXPORT_SYMBOL(kasan_kmalloc);
  492. void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
  493. {
  494. struct page *page;
  495. unsigned long redzone_start;
  496. unsigned long redzone_end;
  497. if (gfpflags_allow_blocking(flags))
  498. quarantine_reduce();
  499. if (unlikely(ptr == NULL))
  500. return;
  501. page = virt_to_page(ptr);
  502. redzone_start = round_up((unsigned long)(ptr + size),
  503. KASAN_SHADOW_SCALE_SIZE);
  504. redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
  505. kasan_unpoison_shadow(ptr, size);
  506. kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
  507. KASAN_PAGE_REDZONE);
  508. }
  509. void kasan_krealloc(const void *object, size_t size, gfp_t flags)
  510. {
  511. struct page *page;
  512. if (unlikely(object == ZERO_SIZE_PTR))
  513. return;
  514. page = virt_to_head_page(object);
  515. if (unlikely(!PageSlab(page)))
  516. kasan_kmalloc_large(object, size, flags);
  517. else
  518. kasan_kmalloc(page->slab_cache, object, size, flags);
  519. }
  520. void kasan_poison_kfree(void *ptr)
  521. {
  522. struct page *page;
  523. page = virt_to_head_page(ptr);
  524. if (unlikely(!PageSlab(page)))
  525. kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
  526. KASAN_FREE_PAGE);
  527. else
  528. kasan_poison_slab_free(page->slab_cache, ptr);
  529. }
  530. void kasan_kfree_large(const void *ptr)
  531. {
  532. struct page *page = virt_to_page(ptr);
  533. kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
  534. KASAN_FREE_PAGE);
  535. }
  536. int kasan_module_alloc(void *addr, size_t size)
  537. {
  538. void *ret;
  539. size_t shadow_size;
  540. unsigned long shadow_start;
  541. shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
  542. shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
  543. PAGE_SIZE);
  544. if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
  545. return -EINVAL;
  546. ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
  547. shadow_start + shadow_size,
  548. GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
  549. PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
  550. __builtin_return_address(0));
  551. if (ret) {
  552. find_vm_area(addr)->flags |= VM_KASAN;
  553. kmemleak_ignore(ret);
  554. return 0;
  555. }
  556. return -ENOMEM;
  557. }
  558. void kasan_free_shadow(const struct vm_struct *vm)
  559. {
  560. if (vm->flags & VM_KASAN)
  561. vfree(kasan_mem_to_shadow(vm->addr));
  562. }
  563. static void register_global(struct kasan_global *global)
  564. {
  565. size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
  566. kasan_unpoison_shadow(global->beg, global->size);
  567. kasan_poison_shadow(global->beg + aligned_size,
  568. global->size_with_redzone - aligned_size,
  569. KASAN_GLOBAL_REDZONE);
  570. }
  571. void __asan_register_globals(struct kasan_global *globals, size_t size)
  572. {
  573. int i;
  574. for (i = 0; i < size; i++)
  575. register_global(&globals[i]);
  576. }
  577. EXPORT_SYMBOL(__asan_register_globals);
  578. void __asan_unregister_globals(struct kasan_global *globals, size_t size)
  579. {
  580. }
  581. EXPORT_SYMBOL(__asan_unregister_globals);
  582. #define DEFINE_ASAN_LOAD_STORE(size) \
  583. void __asan_load##size(unsigned long addr) \
  584. { \
  585. check_memory_region_inline(addr, size, false, _RET_IP_);\
  586. } \
  587. EXPORT_SYMBOL(__asan_load##size); \
  588. __alias(__asan_load##size) \
  589. void __asan_load##size##_noabort(unsigned long); \
  590. EXPORT_SYMBOL(__asan_load##size##_noabort); \
  591. void __asan_store##size(unsigned long addr) \
  592. { \
  593. check_memory_region_inline(addr, size, true, _RET_IP_); \
  594. } \
  595. EXPORT_SYMBOL(__asan_store##size); \
  596. __alias(__asan_store##size) \
  597. void __asan_store##size##_noabort(unsigned long); \
  598. EXPORT_SYMBOL(__asan_store##size##_noabort)
  599. DEFINE_ASAN_LOAD_STORE(1);
  600. DEFINE_ASAN_LOAD_STORE(2);
  601. DEFINE_ASAN_LOAD_STORE(4);
  602. DEFINE_ASAN_LOAD_STORE(8);
  603. DEFINE_ASAN_LOAD_STORE(16);
  604. void __asan_loadN(unsigned long addr, size_t size)
  605. {
  606. check_memory_region(addr, size, false, _RET_IP_);
  607. }
  608. EXPORT_SYMBOL(__asan_loadN);
  609. __alias(__asan_loadN)
  610. void __asan_loadN_noabort(unsigned long, size_t);
  611. EXPORT_SYMBOL(__asan_loadN_noabort);
  612. void __asan_storeN(unsigned long addr, size_t size)
  613. {
  614. check_memory_region(addr, size, true, _RET_IP_);
  615. }
  616. EXPORT_SYMBOL(__asan_storeN);
  617. __alias(__asan_storeN)
  618. void __asan_storeN_noabort(unsigned long, size_t);
  619. EXPORT_SYMBOL(__asan_storeN_noabort);
  620. /* to shut up compiler complaints */
  621. void __asan_handle_no_return(void) {}
  622. EXPORT_SYMBOL(__asan_handle_no_return);
  623. /* Emitted by compiler to poison large objects when they go out of scope. */
  624. void __asan_poison_stack_memory(const void *addr, size_t size)
  625. {
  626. /*
  627. * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
  628. * by redzones, so we simply round up size to simplify logic.
  629. */
  630. kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
  631. KASAN_USE_AFTER_SCOPE);
  632. }
  633. EXPORT_SYMBOL(__asan_poison_stack_memory);
  634. /* Emitted by compiler to unpoison large objects when they go into scope. */
  635. void __asan_unpoison_stack_memory(const void *addr, size_t size)
  636. {
  637. kasan_unpoison_shadow(addr, size);
  638. }
  639. EXPORT_SYMBOL(__asan_unpoison_stack_memory);
  640. #ifdef CONFIG_MEMORY_HOTPLUG
  641. static int kasan_mem_notifier(struct notifier_block *nb,
  642. unsigned long action, void *data)
  643. {
  644. return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
  645. }
  646. static int __init kasan_memhotplug_init(void)
  647. {
  648. pr_info("WARNING: KASAN doesn't support memory hot-add\n");
  649. pr_info("Memory hot-add will be disabled\n");
  650. hotplug_memory_notifier(kasan_mem_notifier, 0);
  651. return 0;
  652. }
  653. core_initcall(kasan_memhotplug_init);
  654. #endif