uaccess.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /*
  2. * Copyright (C) 2012 Regents of the University of California
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * This file was copied from include/asm-generic/uaccess.h
  14. */
  15. #ifndef _ASM_RISCV_UACCESS_H
  16. #define _ASM_RISCV_UACCESS_H
  17. /*
  18. * User space memory access functions
  19. */
  20. #include <linux/errno.h>
  21. #include <linux/compiler.h>
  22. #include <linux/thread_info.h>
  23. #include <asm/byteorder.h>
  24. #include <asm/asm.h>
  25. #define __enable_user_access() \
  26. __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
  27. #define __disable_user_access() \
  28. __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
  29. /*
  30. * The fs value determines whether argument validity checking should be
  31. * performed or not. If get_fs() == USER_DS, checking is performed, with
  32. * get_fs() == KERNEL_DS, checking is bypassed.
  33. *
  34. * For historical reasons, these macros are grossly misnamed.
  35. */
  36. #define KERNEL_DS (~0UL)
  37. #define USER_DS (TASK_SIZE)
  38. #define get_ds() (KERNEL_DS)
  39. #define get_fs() (current_thread_info()->addr_limit)
  40. static inline void set_fs(mm_segment_t fs)
  41. {
  42. current_thread_info()->addr_limit = fs;
  43. }
  44. #define segment_eq(a, b) ((a) == (b))
  45. #define user_addr_max() (get_fs())
  46. #define VERIFY_READ 0
  47. #define VERIFY_WRITE 1
  48. /**
  49. * access_ok: - Checks if a user space pointer is valid
  50. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  51. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  52. * to write to a block, it is always safe to read from it.
  53. * @addr: User space pointer to start of block to check
  54. * @size: Size of block to check
  55. *
  56. * Context: User context only. This function may sleep.
  57. *
  58. * Checks if a pointer to a block of memory in user space is valid.
  59. *
  60. * Returns true (nonzero) if the memory block may be valid, false (zero)
  61. * if it is definitely invalid.
  62. *
  63. * Note that, depending on architecture, this function probably just
  64. * checks that the pointer is in the user space range - after calling
  65. * this function, memory access functions may still return -EFAULT.
  66. */
  67. #define access_ok(type, addr, size) ({ \
  68. __chk_user_ptr(addr); \
  69. likely(__access_ok((unsigned long __force)(addr), (size))); \
  70. })
  71. /*
  72. * Ensure that the range [addr, addr+size) is within the process's
  73. * address space
  74. */
  75. static inline int __access_ok(unsigned long addr, unsigned long size)
  76. {
  77. const mm_segment_t fs = get_fs();
  78. return (size <= fs) && (addr <= (fs - size));
  79. }
  80. /*
  81. * The exception table consists of pairs of addresses: the first is the
  82. * address of an instruction that is allowed to fault, and the second is
  83. * the address at which the program should continue. No registers are
  84. * modified, so it is entirely up to the continuation code to figure out
  85. * what to do.
  86. *
  87. * All the routines below use bits of fixup code that are out of line
  88. * with the main instruction path. This means when everything is well,
  89. * we don't even have to jump over them. Further, they do not intrude
  90. * on our cache or tlb entries.
  91. */
  92. struct exception_table_entry {
  93. unsigned long insn, fixup;
  94. };
  95. extern int fixup_exception(struct pt_regs *state);
  96. #if defined(__LITTLE_ENDIAN)
  97. #define __MSW 1
  98. #define __LSW 0
  99. #elif defined(__BIG_ENDIAN)
  100. #define __MSW 0
  101. #define __LSW 1
  102. #else
  103. #error "Unknown endianness"
  104. #endif
  105. /*
  106. * The "__xxx" versions of the user access functions do not verify the address
  107. * space - it must have been done previously with a separate "access_ok()"
  108. * call.
  109. */
  110. #define __get_user_asm(insn, x, ptr, err) \
  111. do { \
  112. uintptr_t __tmp; \
  113. __typeof__(x) __x; \
  114. __enable_user_access(); \
  115. __asm__ __volatile__ ( \
  116. "1:\n" \
  117. " " insn " %1, %3\n" \
  118. "2:\n" \
  119. " .section .fixup,\"ax\"\n" \
  120. " .balign 4\n" \
  121. "3:\n" \
  122. " li %0, %4\n" \
  123. " li %1, 0\n" \
  124. " jump 2b, %2\n" \
  125. " .previous\n" \
  126. " .section __ex_table,\"a\"\n" \
  127. " .balign " RISCV_SZPTR "\n" \
  128. " " RISCV_PTR " 1b, 3b\n" \
  129. " .previous" \
  130. : "+r" (err), "=&r" (__x), "=r" (__tmp) \
  131. : "m" (*(ptr)), "i" (-EFAULT)); \
  132. __disable_user_access(); \
  133. (x) = __x; \
  134. } while (0)
  135. #ifdef CONFIG_64BIT
  136. #define __get_user_8(x, ptr, err) \
  137. __get_user_asm("ld", x, ptr, err)
  138. #else /* !CONFIG_64BIT */
  139. #define __get_user_8(x, ptr, err) \
  140. do { \
  141. u32 __user *__ptr = (u32 __user *)(ptr); \
  142. u32 __lo, __hi; \
  143. uintptr_t __tmp; \
  144. __enable_user_access(); \
  145. __asm__ __volatile__ ( \
  146. "1:\n" \
  147. " lw %1, %4\n" \
  148. "2:\n" \
  149. " lw %2, %5\n" \
  150. "3:\n" \
  151. " .section .fixup,\"ax\"\n" \
  152. " .balign 4\n" \
  153. "4:\n" \
  154. " li %0, %6\n" \
  155. " li %1, 0\n" \
  156. " li %2, 0\n" \
  157. " jump 3b, %3\n" \
  158. " .previous\n" \
  159. " .section __ex_table,\"a\"\n" \
  160. " .balign " RISCV_SZPTR "\n" \
  161. " " RISCV_PTR " 1b, 4b\n" \
  162. " " RISCV_PTR " 2b, 4b\n" \
  163. " .previous" \
  164. : "+r" (err), "=&r" (__lo), "=r" (__hi), \
  165. "=r" (__tmp) \
  166. : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
  167. "i" (-EFAULT)); \
  168. __disable_user_access(); \
  169. (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
  170. (((u64)__hi << 32) | __lo))); \
  171. } while (0)
  172. #endif /* CONFIG_64BIT */
  173. /**
  174. * __get_user: - Get a simple variable from user space, with less checking.
  175. * @x: Variable to store result.
  176. * @ptr: Source address, in user space.
  177. *
  178. * Context: User context only. This function may sleep.
  179. *
  180. * This macro copies a single simple variable from user space to kernel
  181. * space. It supports simple types like char and int, but not larger
  182. * data types like structures or arrays.
  183. *
  184. * @ptr must have pointer-to-simple-variable type, and the result of
  185. * dereferencing @ptr must be assignable to @x without a cast.
  186. *
  187. * Caller must check the pointer with access_ok() before calling this
  188. * function.
  189. *
  190. * Returns zero on success, or -EFAULT on error.
  191. * On error, the variable @x is set to zero.
  192. */
  193. #define __get_user(x, ptr) \
  194. ({ \
  195. register long __gu_err = 0; \
  196. const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
  197. __chk_user_ptr(__gu_ptr); \
  198. switch (sizeof(*__gu_ptr)) { \
  199. case 1: \
  200. __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
  201. break; \
  202. case 2: \
  203. __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
  204. break; \
  205. case 4: \
  206. __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
  207. break; \
  208. case 8: \
  209. __get_user_8((x), __gu_ptr, __gu_err); \
  210. break; \
  211. default: \
  212. BUILD_BUG(); \
  213. } \
  214. __gu_err; \
  215. })
  216. /**
  217. * get_user: - Get a simple variable from user space.
  218. * @x: Variable to store result.
  219. * @ptr: Source address, in user space.
  220. *
  221. * Context: User context only. This function may sleep.
  222. *
  223. * This macro copies a single simple variable from user space to kernel
  224. * space. It supports simple types like char and int, but not larger
  225. * data types like structures or arrays.
  226. *
  227. * @ptr must have pointer-to-simple-variable type, and the result of
  228. * dereferencing @ptr must be assignable to @x without a cast.
  229. *
  230. * Returns zero on success, or -EFAULT on error.
  231. * On error, the variable @x is set to zero.
  232. */
  233. #define get_user(x, ptr) \
  234. ({ \
  235. const __typeof__(*(ptr)) __user *__p = (ptr); \
  236. might_fault(); \
  237. access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
  238. __get_user((x), __p) : \
  239. ((x) = 0, -EFAULT); \
  240. })
  241. #define __put_user_asm(insn, x, ptr, err) \
  242. do { \
  243. uintptr_t __tmp; \
  244. __typeof__(*(ptr)) __x = x; \
  245. __enable_user_access(); \
  246. __asm__ __volatile__ ( \
  247. "1:\n" \
  248. " " insn " %z3, %2\n" \
  249. "2:\n" \
  250. " .section .fixup,\"ax\"\n" \
  251. " .balign 4\n" \
  252. "3:\n" \
  253. " li %0, %4\n" \
  254. " jump 2b, %1\n" \
  255. " .previous\n" \
  256. " .section __ex_table,\"a\"\n" \
  257. " .balign " RISCV_SZPTR "\n" \
  258. " " RISCV_PTR " 1b, 3b\n" \
  259. " .previous" \
  260. : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
  261. : "rJ" (__x), "i" (-EFAULT)); \
  262. __disable_user_access(); \
  263. } while (0)
  264. #ifdef CONFIG_64BIT
  265. #define __put_user_8(x, ptr, err) \
  266. __put_user_asm("sd", x, ptr, err)
  267. #else /* !CONFIG_64BIT */
  268. #define __put_user_8(x, ptr, err) \
  269. do { \
  270. u32 __user *__ptr = (u32 __user *)(ptr); \
  271. u64 __x = (__typeof__((x)-(x)))(x); \
  272. uintptr_t __tmp; \
  273. __enable_user_access(); \
  274. __asm__ __volatile__ ( \
  275. "1:\n" \
  276. " sw %z4, %2\n" \
  277. "2:\n" \
  278. " sw %z5, %3\n" \
  279. "3:\n" \
  280. " .section .fixup,\"ax\"\n" \
  281. " .balign 4\n" \
  282. "4:\n" \
  283. " li %0, %6\n" \
  284. " jump 3b, %1\n" \
  285. " .previous\n" \
  286. " .section __ex_table,\"a\"\n" \
  287. " .balign " RISCV_SZPTR "\n" \
  288. " " RISCV_PTR " 1b, 4b\n" \
  289. " " RISCV_PTR " 2b, 4b\n" \
  290. " .previous" \
  291. : "+r" (err), "=r" (__tmp), \
  292. "=m" (__ptr[__LSW]), \
  293. "=m" (__ptr[__MSW]) \
  294. : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
  295. __disable_user_access(); \
  296. } while (0)
  297. #endif /* CONFIG_64BIT */
  298. /**
  299. * __put_user: - Write a simple value into user space, with less checking.
  300. * @x: Value to copy to user space.
  301. * @ptr: Destination address, in user space.
  302. *
  303. * Context: User context only. This function may sleep.
  304. *
  305. * This macro copies a single simple value from kernel space to user
  306. * space. It supports simple types like char and int, but not larger
  307. * data types like structures or arrays.
  308. *
  309. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  310. * to the result of dereferencing @ptr.
  311. *
  312. * Caller must check the pointer with access_ok() before calling this
  313. * function.
  314. *
  315. * Returns zero on success, or -EFAULT on error.
  316. */
  317. #define __put_user(x, ptr) \
  318. ({ \
  319. register long __pu_err = 0; \
  320. __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
  321. __chk_user_ptr(__gu_ptr); \
  322. switch (sizeof(*__gu_ptr)) { \
  323. case 1: \
  324. __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
  325. break; \
  326. case 2: \
  327. __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
  328. break; \
  329. case 4: \
  330. __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
  331. break; \
  332. case 8: \
  333. __put_user_8((x), __gu_ptr, __pu_err); \
  334. break; \
  335. default: \
  336. BUILD_BUG(); \
  337. } \
  338. __pu_err; \
  339. })
  340. /**
  341. * put_user: - Write a simple value into user space.
  342. * @x: Value to copy to user space.
  343. * @ptr: Destination address, in user space.
  344. *
  345. * Context: User context only. This function may sleep.
  346. *
  347. * This macro copies a single simple value from kernel space to user
  348. * space. It supports simple types like char and int, but not larger
  349. * data types like structures or arrays.
  350. *
  351. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  352. * to the result of dereferencing @ptr.
  353. *
  354. * Returns zero on success, or -EFAULT on error.
  355. */
  356. #define put_user(x, ptr) \
  357. ({ \
  358. __typeof__(*(ptr)) __user *__p = (ptr); \
  359. might_fault(); \
  360. access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
  361. __put_user((x), __p) : \
  362. -EFAULT; \
  363. })
  364. extern unsigned long __must_check __asm_copy_to_user(void __user *to,
  365. const void *from, unsigned long n);
  366. extern unsigned long __must_check __asm_copy_from_user(void *to,
  367. const void __user *from, unsigned long n);
  368. static inline unsigned long
  369. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  370. {
  371. return __asm_copy_from_user(to, from, n);
  372. }
  373. static inline unsigned long
  374. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  375. {
  376. return __asm_copy_to_user(to, from, n);
  377. }
  378. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  379. extern long __must_check strlen_user(const char __user *str);
  380. extern long __must_check strnlen_user(const char __user *str, long n);
  381. extern
  382. unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
  383. static inline
  384. unsigned long __must_check clear_user(void __user *to, unsigned long n)
  385. {
  386. might_fault();
  387. return access_ok(VERIFY_WRITE, to, n) ?
  388. __clear_user(to, n) : n;
  389. }
  390. /*
  391. * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
  392. * will set "err" to -EFAULT, while successful accesses return the previous
  393. * value.
  394. */
  395. #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
  396. ({ \
  397. __typeof__(ptr) __ptr = (ptr); \
  398. __typeof__(*(ptr)) __old = (old); \
  399. __typeof__(*(ptr)) __new = (new); \
  400. __typeof__(*(ptr)) __ret; \
  401. __typeof__(err) __err = 0; \
  402. register unsigned int __rc; \
  403. __enable_user_access(); \
  404. switch (size) { \
  405. case 4: \
  406. __asm__ __volatile__ ( \
  407. "0:\n" \
  408. " lr.w" #scb " %[ret], %[ptr]\n" \
  409. " bne %[ret], %z[old], 1f\n" \
  410. " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
  411. " bnez %[rc], 0b\n" \
  412. "1:\n" \
  413. ".section .fixup,\"ax\"\n" \
  414. ".balign 4\n" \
  415. "2:\n" \
  416. " li %[err], %[efault]\n" \
  417. " jump 1b, %[rc]\n" \
  418. ".previous\n" \
  419. ".section __ex_table,\"a\"\n" \
  420. ".balign " RISCV_SZPTR "\n" \
  421. " " RISCV_PTR " 1b, 2b\n" \
  422. ".previous\n" \
  423. : [ret] "=&r" (__ret), \
  424. [rc] "=&r" (__rc), \
  425. [ptr] "+A" (*__ptr), \
  426. [err] "=&r" (__err) \
  427. : [old] "rJ" (__old), \
  428. [new] "rJ" (__new), \
  429. [efault] "i" (-EFAULT)); \
  430. break; \
  431. case 8: \
  432. __asm__ __volatile__ ( \
  433. "0:\n" \
  434. " lr.d" #scb " %[ret], %[ptr]\n" \
  435. " bne %[ret], %z[old], 1f\n" \
  436. " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
  437. " bnez %[rc], 0b\n" \
  438. "1:\n" \
  439. ".section .fixup,\"ax\"\n" \
  440. ".balign 4\n" \
  441. "2:\n" \
  442. " li %[err], %[efault]\n" \
  443. " jump 1b, %[rc]\n" \
  444. ".previous\n" \
  445. ".section __ex_table,\"a\"\n" \
  446. ".balign " RISCV_SZPTR "\n" \
  447. " " RISCV_PTR " 1b, 2b\n" \
  448. ".previous\n" \
  449. : [ret] "=&r" (__ret), \
  450. [rc] "=&r" (__rc), \
  451. [ptr] "+A" (*__ptr), \
  452. [err] "=&r" (__err) \
  453. : [old] "rJ" (__old), \
  454. [new] "rJ" (__new), \
  455. [efault] "i" (-EFAULT)); \
  456. break; \
  457. default: \
  458. BUILD_BUG(); \
  459. } \
  460. __disable_user_access(); \
  461. (err) = __err; \
  462. __ret; \
  463. })
  464. #endif /* _ASM_RISCV_UACCESS_H */