uaccess.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. * Copyright (C) 2014, Imagination Technologies Ltd.
  10. */
  11. #ifndef _ASM_UACCESS_H
  12. #define _ASM_UACCESS_H
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <asm/asm-eva.h>
  16. #include <asm/extable.h>
  17. /*
  18. * The fs value determines whether argument validity checking should be
  19. * performed or not. If get_fs() == USER_DS, checking is performed, with
  20. * get_fs() == KERNEL_DS, checking is bypassed.
  21. *
  22. * For historical reasons, these macros are grossly misnamed.
  23. */
  24. #ifdef CONFIG_32BIT
  25. #ifdef CONFIG_KVM_GUEST
  26. #define __UA_LIMIT 0x40000000UL
  27. #else
  28. #define __UA_LIMIT 0x80000000UL
  29. #endif
  30. #define __UA_ADDR ".word"
  31. #define __UA_LA "la"
  32. #define __UA_ADDU "addu"
  33. #define __UA_t0 "$8"
  34. #define __UA_t1 "$9"
  35. #endif /* CONFIG_32BIT */
  36. #ifdef CONFIG_64BIT
  37. extern u64 __ua_limit;
  38. #define __UA_LIMIT __ua_limit
  39. #define __UA_ADDR ".dword"
  40. #define __UA_LA "dla"
  41. #define __UA_ADDU "daddu"
  42. #define __UA_t0 "$12"
  43. #define __UA_t1 "$13"
  44. #endif /* CONFIG_64BIT */
  45. /*
  46. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  47. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  48. * the arithmetic we're doing only works if the limit is a power of two, so
  49. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  50. * address in this range it's the process's problem, not ours :-)
  51. */
  52. #ifdef CONFIG_KVM_GUEST
  53. #define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
  54. #define USER_DS ((mm_segment_t) { 0xC0000000UL })
  55. #else
  56. #define KERNEL_DS ((mm_segment_t) { 0UL })
  57. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  58. #endif
  59. #define get_ds() (KERNEL_DS)
  60. #define get_fs() (current_thread_info()->addr_limit)
  61. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  62. #define segment_eq(a, b) ((a).seg == (b).seg)
  63. /*
  64. * eva_kernel_access() - determine whether kernel memory access on an EVA system
  65. *
  66. * Determines whether memory accesses should be performed to kernel memory
  67. * on a system using Extended Virtual Addressing (EVA).
  68. *
  69. * Return: true if a kernel memory access on an EVA system, else false.
  70. */
  71. static inline bool eva_kernel_access(void)
  72. {
  73. if (!IS_ENABLED(CONFIG_EVA))
  74. return false;
  75. return uaccess_kernel();
  76. }
  77. /*
  78. * Is a address valid? This does a straightforward calculation rather
  79. * than tests.
  80. *
  81. * Address valid if:
  82. * - "addr" doesn't have any high-bits set
  83. * - AND "size" doesn't have any high-bits set
  84. * - AND "addr+size" doesn't have any high-bits set
  85. * - OR we are in kernel mode.
  86. *
  87. * __ua_size() is a trick to avoid runtime checking of positive constant
  88. * sizes; for those we already know at compile time that the size is ok.
  89. */
  90. #define __ua_size(size) \
  91. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  92. /*
  93. * access_ok: - Checks if a user space pointer is valid
  94. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  95. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  96. * to write to a block, it is always safe to read from it.
  97. * @addr: User space pointer to start of block to check
  98. * @size: Size of block to check
  99. *
  100. * Context: User context only. This function may sleep if pagefaults are
  101. * enabled.
  102. *
  103. * Checks if a pointer to a block of memory in user space is valid.
  104. *
  105. * Returns true (nonzero) if the memory block may be valid, false (zero)
  106. * if it is definitely invalid.
  107. *
  108. * Note that, depending on architecture, this function probably just
  109. * checks that the pointer is in the user space range - after calling
  110. * this function, memory access functions may still return -EFAULT.
  111. */
  112. static inline int __access_ok(const void __user *p, unsigned long size)
  113. {
  114. unsigned long addr = (unsigned long)p;
  115. return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
  116. }
  117. #define access_ok(type, addr, size) \
  118. likely(__access_ok((addr), (size)))
  119. /*
  120. * put_user: - Write a simple value into user space.
  121. * @x: Value to copy to user space.
  122. * @ptr: Destination address, in user space.
  123. *
  124. * Context: User context only. This function may sleep if pagefaults are
  125. * enabled.
  126. *
  127. * This macro copies a single simple value from kernel space to user
  128. * space. It supports simple types like char and int, but not larger
  129. * data types like structures or arrays.
  130. *
  131. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  132. * to the result of dereferencing @ptr.
  133. *
  134. * Returns zero on success, or -EFAULT on error.
  135. */
  136. #define put_user(x,ptr) \
  137. __put_user_check((x), (ptr), sizeof(*(ptr)))
  138. /*
  139. * get_user: - Get a simple variable from user space.
  140. * @x: Variable to store result.
  141. * @ptr: Source address, in user space.
  142. *
  143. * Context: User context only. This function may sleep if pagefaults are
  144. * enabled.
  145. *
  146. * This macro copies a single simple variable from user space to kernel
  147. * space. It supports simple types like char and int, but not larger
  148. * data types like structures or arrays.
  149. *
  150. * @ptr must have pointer-to-simple-variable type, and the result of
  151. * dereferencing @ptr must be assignable to @x without a cast.
  152. *
  153. * Returns zero on success, or -EFAULT on error.
  154. * On error, the variable @x is set to zero.
  155. */
  156. #define get_user(x,ptr) \
  157. __get_user_check((x), (ptr), sizeof(*(ptr)))
  158. /*
  159. * __put_user: - Write a simple value into user space, with less checking.
  160. * @x: Value to copy to user space.
  161. * @ptr: Destination address, in user space.
  162. *
  163. * Context: User context only. This function may sleep if pagefaults are
  164. * enabled.
  165. *
  166. * This macro copies a single simple value from kernel space to user
  167. * space. It supports simple types like char and int, but not larger
  168. * data types like structures or arrays.
  169. *
  170. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  171. * to the result of dereferencing @ptr.
  172. *
  173. * Caller must check the pointer with access_ok() before calling this
  174. * function.
  175. *
  176. * Returns zero on success, or -EFAULT on error.
  177. */
  178. #define __put_user(x,ptr) \
  179. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  180. /*
  181. * __get_user: - Get a simple variable from user space, with less checking.
  182. * @x: Variable to store result.
  183. * @ptr: Source address, in user space.
  184. *
  185. * Context: User context only. This function may sleep if pagefaults are
  186. * enabled.
  187. *
  188. * This macro copies a single simple variable from user space to kernel
  189. * space. It supports simple types like char and int, but not larger
  190. * data types like structures or arrays.
  191. *
  192. * @ptr must have pointer-to-simple-variable type, and the result of
  193. * dereferencing @ptr must be assignable to @x without a cast.
  194. *
  195. * Caller must check the pointer with access_ok() before calling this
  196. * function.
  197. *
  198. * Returns zero on success, or -EFAULT on error.
  199. * On error, the variable @x is set to zero.
  200. */
  201. #define __get_user(x,ptr) \
  202. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  203. struct __large_struct { unsigned long buf[100]; };
  204. #define __m(x) (*(struct __large_struct __user *)(x))
  205. /*
  206. * Yuck. We need two variants, one for 64bit operation and one
  207. * for 32 bit mode and old iron.
  208. */
  209. #ifndef CONFIG_EVA
  210. #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
  211. #else
  212. /*
  213. * Kernel specific functions for EVA. We need to use normal load instructions
  214. * to read data from kernel when operating in EVA mode. We use these macros to
  215. * avoid redefining __get_user_asm for EVA.
  216. */
  217. #undef _loadd
  218. #undef _loadw
  219. #undef _loadh
  220. #undef _loadb
  221. #ifdef CONFIG_32BIT
  222. #define _loadd _loadw
  223. #else
  224. #define _loadd(reg, addr) "ld " reg ", " addr
  225. #endif
  226. #define _loadw(reg, addr) "lw " reg ", " addr
  227. #define _loadh(reg, addr) "lh " reg ", " addr
  228. #define _loadb(reg, addr) "lb " reg ", " addr
  229. #define __get_kernel_common(val, size, ptr) \
  230. do { \
  231. switch (size) { \
  232. case 1: __get_data_asm(val, _loadb, ptr); break; \
  233. case 2: __get_data_asm(val, _loadh, ptr); break; \
  234. case 4: __get_data_asm(val, _loadw, ptr); break; \
  235. case 8: __GET_DW(val, _loadd, ptr); break; \
  236. default: __get_user_unknown(); break; \
  237. } \
  238. } while (0)
  239. #endif
  240. #ifdef CONFIG_32BIT
  241. #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
  242. #endif
  243. #ifdef CONFIG_64BIT
  244. #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
  245. #endif
  246. extern void __get_user_unknown(void);
  247. #define __get_user_common(val, size, ptr) \
  248. do { \
  249. switch (size) { \
  250. case 1: __get_data_asm(val, user_lb, ptr); break; \
  251. case 2: __get_data_asm(val, user_lh, ptr); break; \
  252. case 4: __get_data_asm(val, user_lw, ptr); break; \
  253. case 8: __GET_DW(val, user_ld, ptr); break; \
  254. default: __get_user_unknown(); break; \
  255. } \
  256. } while (0)
  257. #define __get_user_nocheck(x, ptr, size) \
  258. ({ \
  259. int __gu_err; \
  260. \
  261. if (eva_kernel_access()) { \
  262. __get_kernel_common((x), size, ptr); \
  263. } else { \
  264. __chk_user_ptr(ptr); \
  265. __get_user_common((x), size, ptr); \
  266. } \
  267. __gu_err; \
  268. })
  269. #define __get_user_check(x, ptr, size) \
  270. ({ \
  271. int __gu_err = -EFAULT; \
  272. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  273. \
  274. might_fault(); \
  275. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
  276. if (eva_kernel_access()) \
  277. __get_kernel_common((x), size, __gu_ptr); \
  278. else \
  279. __get_user_common((x), size, __gu_ptr); \
  280. } else \
  281. (x) = 0; \
  282. \
  283. __gu_err; \
  284. })
  285. #define __get_data_asm(val, insn, addr) \
  286. { \
  287. long __gu_tmp; \
  288. \
  289. __asm__ __volatile__( \
  290. "1: "insn("%1", "%3")" \n" \
  291. "2: \n" \
  292. " .insn \n" \
  293. " .section .fixup,\"ax\" \n" \
  294. "3: li %0, %4 \n" \
  295. " move %1, $0 \n" \
  296. " j 2b \n" \
  297. " .previous \n" \
  298. " .section __ex_table,\"a\" \n" \
  299. " "__UA_ADDR "\t1b, 3b \n" \
  300. " .previous \n" \
  301. : "=r" (__gu_err), "=r" (__gu_tmp) \
  302. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  303. \
  304. (val) = (__typeof__(*(addr))) __gu_tmp; \
  305. }
  306. /*
  307. * Get a long long 64 using 32 bit registers.
  308. */
  309. #define __get_data_asm_ll32(val, insn, addr) \
  310. { \
  311. union { \
  312. unsigned long long l; \
  313. __typeof__(*(addr)) t; \
  314. } __gu_tmp; \
  315. \
  316. __asm__ __volatile__( \
  317. "1: " insn("%1", "(%3)")" \n" \
  318. "2: " insn("%D1", "4(%3)")" \n" \
  319. "3: \n" \
  320. " .insn \n" \
  321. " .section .fixup,\"ax\" \n" \
  322. "4: li %0, %4 \n" \
  323. " move %1, $0 \n" \
  324. " move %D1, $0 \n" \
  325. " j 3b \n" \
  326. " .previous \n" \
  327. " .section __ex_table,\"a\" \n" \
  328. " " __UA_ADDR " 1b, 4b \n" \
  329. " " __UA_ADDR " 2b, 4b \n" \
  330. " .previous \n" \
  331. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  332. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  333. \
  334. (val) = __gu_tmp.t; \
  335. }
  336. #ifndef CONFIG_EVA
  337. #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
  338. #else
  339. /*
  340. * Kernel specific functions for EVA. We need to use normal load instructions
  341. * to read data from kernel when operating in EVA mode. We use these macros to
  342. * avoid redefining __get_data_asm for EVA.
  343. */
  344. #undef _stored
  345. #undef _storew
  346. #undef _storeh
  347. #undef _storeb
  348. #ifdef CONFIG_32BIT
  349. #define _stored _storew
  350. #else
  351. #define _stored(reg, addr) "ld " reg ", " addr
  352. #endif
  353. #define _storew(reg, addr) "sw " reg ", " addr
  354. #define _storeh(reg, addr) "sh " reg ", " addr
  355. #define _storeb(reg, addr) "sb " reg ", " addr
  356. #define __put_kernel_common(ptr, size) \
  357. do { \
  358. switch (size) { \
  359. case 1: __put_data_asm(_storeb, ptr); break; \
  360. case 2: __put_data_asm(_storeh, ptr); break; \
  361. case 4: __put_data_asm(_storew, ptr); break; \
  362. case 8: __PUT_DW(_stored, ptr); break; \
  363. default: __put_user_unknown(); break; \
  364. } \
  365. } while(0)
  366. #endif
  367. /*
  368. * Yuck. We need two variants, one for 64bit operation and one
  369. * for 32 bit mode and old iron.
  370. */
  371. #ifdef CONFIG_32BIT
  372. #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
  373. #endif
  374. #ifdef CONFIG_64BIT
  375. #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
  376. #endif
  377. #define __put_user_common(ptr, size) \
  378. do { \
  379. switch (size) { \
  380. case 1: __put_data_asm(user_sb, ptr); break; \
  381. case 2: __put_data_asm(user_sh, ptr); break; \
  382. case 4: __put_data_asm(user_sw, ptr); break; \
  383. case 8: __PUT_DW(user_sd, ptr); break; \
  384. default: __put_user_unknown(); break; \
  385. } \
  386. } while (0)
  387. #define __put_user_nocheck(x, ptr, size) \
  388. ({ \
  389. __typeof__(*(ptr)) __pu_val; \
  390. int __pu_err = 0; \
  391. \
  392. __pu_val = (x); \
  393. if (eva_kernel_access()) { \
  394. __put_kernel_common(ptr, size); \
  395. } else { \
  396. __chk_user_ptr(ptr); \
  397. __put_user_common(ptr, size); \
  398. } \
  399. __pu_err; \
  400. })
  401. #define __put_user_check(x, ptr, size) \
  402. ({ \
  403. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  404. __typeof__(*(ptr)) __pu_val = (x); \
  405. int __pu_err = -EFAULT; \
  406. \
  407. might_fault(); \
  408. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  409. if (eva_kernel_access()) \
  410. __put_kernel_common(__pu_addr, size); \
  411. else \
  412. __put_user_common(__pu_addr, size); \
  413. } \
  414. \
  415. __pu_err; \
  416. })
  417. #define __put_data_asm(insn, ptr) \
  418. { \
  419. __asm__ __volatile__( \
  420. "1: "insn("%z2", "%3")" # __put_data_asm \n" \
  421. "2: \n" \
  422. " .insn \n" \
  423. " .section .fixup,\"ax\" \n" \
  424. "3: li %0, %4 \n" \
  425. " j 2b \n" \
  426. " .previous \n" \
  427. " .section __ex_table,\"a\" \n" \
  428. " " __UA_ADDR " 1b, 3b \n" \
  429. " .previous \n" \
  430. : "=r" (__pu_err) \
  431. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  432. "i" (-EFAULT)); \
  433. }
  434. #define __put_data_asm_ll32(insn, ptr) \
  435. { \
  436. __asm__ __volatile__( \
  437. "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
  438. "2: "insn("%D2", "4(%3)")" \n" \
  439. "3: \n" \
  440. " .insn \n" \
  441. " .section .fixup,\"ax\" \n" \
  442. "4: li %0, %4 \n" \
  443. " j 3b \n" \
  444. " .previous \n" \
  445. " .section __ex_table,\"a\" \n" \
  446. " " __UA_ADDR " 1b, 4b \n" \
  447. " " __UA_ADDR " 2b, 4b \n" \
  448. " .previous" \
  449. : "=r" (__pu_err) \
  450. : "0" (0), "r" (__pu_val), "r" (ptr), \
  451. "i" (-EFAULT)); \
  452. }
  453. extern void __put_user_unknown(void);
  454. /*
  455. * We're generating jump to subroutines which will be outside the range of
  456. * jump instructions
  457. */
  458. #ifdef MODULE
  459. #define __MODULE_JAL(destination) \
  460. ".set\tnoat\n\t" \
  461. __UA_LA "\t$1, " #destination "\n\t" \
  462. "jalr\t$1\n\t" \
  463. ".set\tat\n\t"
  464. #else
  465. #define __MODULE_JAL(destination) \
  466. "jal\t" #destination "\n\t"
  467. #endif
  468. #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
  469. defined(CONFIG_CPU_HAS_PREFETCH))
  470. #define DADDI_SCRATCH "$3"
  471. #else
  472. #define DADDI_SCRATCH "$0"
  473. #endif
  474. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  475. #define __invoke_copy_from(func, to, from, n) \
  476. ({ \
  477. register void *__cu_to_r __asm__("$4"); \
  478. register const void __user *__cu_from_r __asm__("$5"); \
  479. register long __cu_len_r __asm__("$6"); \
  480. \
  481. __cu_to_r = (to); \
  482. __cu_from_r = (from); \
  483. __cu_len_r = (n); \
  484. __asm__ __volatile__( \
  485. ".set\tnoreorder\n\t" \
  486. __MODULE_JAL(func) \
  487. ".set\tnoat\n\t" \
  488. __UA_ADDU "\t$1, %1, %2\n\t" \
  489. ".set\tat\n\t" \
  490. ".set\treorder" \
  491. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  492. : \
  493. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  494. DADDI_SCRATCH, "memory"); \
  495. __cu_len_r; \
  496. })
  497. #define __invoke_copy_to(func, to, from, n) \
  498. ({ \
  499. register void __user *__cu_to_r __asm__("$4"); \
  500. register const void *__cu_from_r __asm__("$5"); \
  501. register long __cu_len_r __asm__("$6"); \
  502. \
  503. __cu_to_r = (to); \
  504. __cu_from_r = (from); \
  505. __cu_len_r = (n); \
  506. __asm__ __volatile__( \
  507. __MODULE_JAL(func) \
  508. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  509. : \
  510. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  511. DADDI_SCRATCH, "memory"); \
  512. __cu_len_r; \
  513. })
  514. #define __invoke_copy_from_kernel(to, from, n) \
  515. __invoke_copy_from(__copy_user, to, from, n)
  516. #define __invoke_copy_to_kernel(to, from, n) \
  517. __invoke_copy_to(__copy_user, to, from, n)
  518. #define ___invoke_copy_in_kernel(to, from, n) \
  519. __invoke_copy_from(__copy_user, to, from, n)
  520. #ifndef CONFIG_EVA
  521. #define __invoke_copy_from_user(to, from, n) \
  522. __invoke_copy_from(__copy_user, to, from, n)
  523. #define __invoke_copy_to_user(to, from, n) \
  524. __invoke_copy_to(__copy_user, to, from, n)
  525. #define ___invoke_copy_in_user(to, from, n) \
  526. __invoke_copy_from(__copy_user, to, from, n)
  527. #else
  528. /* EVA specific functions */
  529. extern size_t __copy_from_user_eva(void *__to, const void *__from,
  530. size_t __n);
  531. extern size_t __copy_to_user_eva(void *__to, const void *__from,
  532. size_t __n);
  533. extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
  534. /*
  535. * Source or destination address is in userland. We need to go through
  536. * the TLB
  537. */
  538. #define __invoke_copy_from_user(to, from, n) \
  539. __invoke_copy_from(__copy_from_user_eva, to, from, n)
  540. #define __invoke_copy_to_user(to, from, n) \
  541. __invoke_copy_to(__copy_to_user_eva, to, from, n)
  542. #define ___invoke_copy_in_user(to, from, n) \
  543. __invoke_copy_from(__copy_in_user_eva, to, from, n)
  544. #endif /* CONFIG_EVA */
  545. static inline unsigned long
  546. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  547. {
  548. if (eva_kernel_access())
  549. return __invoke_copy_to_kernel(to, from, n);
  550. else
  551. return __invoke_copy_to_user(to, from, n);
  552. }
  553. static inline unsigned long
  554. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  555. {
  556. if (eva_kernel_access())
  557. return __invoke_copy_from_kernel(to, from, n);
  558. else
  559. return __invoke_copy_from_user(to, from, n);
  560. }
  561. #define INLINE_COPY_FROM_USER
  562. #define INLINE_COPY_TO_USER
  563. static inline unsigned long
  564. raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
  565. {
  566. if (eva_kernel_access())
  567. return ___invoke_copy_in_kernel(to, from, n);
  568. else
  569. return ___invoke_copy_in_user(to, from, n);
  570. }
  571. extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
  572. extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
  573. /*
  574. * __clear_user: - Zero a block of memory in user space, with less checking.
  575. * @to: Destination address, in user space.
  576. * @n: Number of bytes to zero.
  577. *
  578. * Zero a block of memory in user space. Caller must check
  579. * the specified block with access_ok() before calling this function.
  580. *
  581. * Returns number of bytes that could not be cleared.
  582. * On success, this will be zero.
  583. */
  584. static inline __kernel_size_t
  585. __clear_user(void __user *addr, __kernel_size_t size)
  586. {
  587. __kernel_size_t res;
  588. #ifdef CONFIG_CPU_MICROMIPS
  589. /* micromips memset / bzero also clobbers t7 & t8 */
  590. #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
  591. #else
  592. #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
  593. #endif /* CONFIG_CPU_MICROMIPS */
  594. if (eva_kernel_access()) {
  595. __asm__ __volatile__(
  596. "move\t$4, %1\n\t"
  597. "move\t$5, $0\n\t"
  598. "move\t$6, %2\n\t"
  599. __MODULE_JAL(__bzero_kernel)
  600. "move\t%0, $6"
  601. : "=r" (res)
  602. : "r" (addr), "r" (size)
  603. : bzero_clobbers);
  604. } else {
  605. might_fault();
  606. __asm__ __volatile__(
  607. "move\t$4, %1\n\t"
  608. "move\t$5, $0\n\t"
  609. "move\t$6, %2\n\t"
  610. __MODULE_JAL(__bzero)
  611. "move\t%0, $6"
  612. : "=r" (res)
  613. : "r" (addr), "r" (size)
  614. : bzero_clobbers);
  615. }
  616. return res;
  617. }
  618. #define clear_user(addr,n) \
  619. ({ \
  620. void __user * __cl_addr = (addr); \
  621. unsigned long __cl_size = (n); \
  622. if (__cl_size && access_ok(VERIFY_WRITE, \
  623. __cl_addr, __cl_size)) \
  624. __cl_size = __clear_user(__cl_addr, __cl_size); \
  625. __cl_size; \
  626. })
  627. extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
  628. extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
  629. /*
  630. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  631. * @dst: Destination address, in kernel space. This buffer must be at
  632. * least @count bytes long.
  633. * @src: Source address, in user space.
  634. * @count: Maximum number of bytes to copy, including the trailing NUL.
  635. *
  636. * Copies a NUL-terminated string from userspace to kernel space.
  637. *
  638. * On success, returns the length of the string (not including the trailing
  639. * NUL).
  640. *
  641. * If access to userspace fails, returns -EFAULT (some data may have been
  642. * copied).
  643. *
  644. * If @count is smaller than the length of the string, copies @count bytes
  645. * and returns @count.
  646. */
  647. static inline long
  648. strncpy_from_user(char *__to, const char __user *__from, long __len)
  649. {
  650. long res;
  651. if (eva_kernel_access()) {
  652. __asm__ __volatile__(
  653. "move\t$4, %1\n\t"
  654. "move\t$5, %2\n\t"
  655. "move\t$6, %3\n\t"
  656. __MODULE_JAL(__strncpy_from_kernel_asm)
  657. "move\t%0, $2"
  658. : "=r" (res)
  659. : "r" (__to), "r" (__from), "r" (__len)
  660. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  661. } else {
  662. might_fault();
  663. __asm__ __volatile__(
  664. "move\t$4, %1\n\t"
  665. "move\t$5, %2\n\t"
  666. "move\t$6, %3\n\t"
  667. __MODULE_JAL(__strncpy_from_user_asm)
  668. "move\t%0, $2"
  669. : "=r" (res)
  670. : "r" (__to), "r" (__from), "r" (__len)
  671. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  672. }
  673. return res;
  674. }
  675. extern long __strnlen_kernel_asm(const char __user *s, long n);
  676. extern long __strnlen_user_asm(const char __user *s, long n);
  677. /*
  678. * strnlen_user: - Get the size of a string in user space.
  679. * @str: The string to measure.
  680. *
  681. * Context: User context only. This function may sleep if pagefaults are
  682. * enabled.
  683. *
  684. * Get the size of a NUL-terminated string in user space.
  685. *
  686. * Returns the size of the string INCLUDING the terminating NUL.
  687. * On exception, returns 0.
  688. * If the string is too long, returns a value greater than @n.
  689. */
  690. static inline long strnlen_user(const char __user *s, long n)
  691. {
  692. long res;
  693. might_fault();
  694. if (eva_kernel_access()) {
  695. __asm__ __volatile__(
  696. "move\t$4, %1\n\t"
  697. "move\t$5, %2\n\t"
  698. __MODULE_JAL(__strnlen_kernel_asm)
  699. "move\t%0, $2"
  700. : "=r" (res)
  701. : "r" (s), "r" (n)
  702. : "$2", "$4", "$5", __UA_t0, "$31");
  703. } else {
  704. __asm__ __volatile__(
  705. "move\t$4, %1\n\t"
  706. "move\t$5, %2\n\t"
  707. __MODULE_JAL(__strnlen_user_asm)
  708. "move\t%0, $2"
  709. : "=r" (res)
  710. : "r" (s), "r" (n)
  711. : "$2", "$4", "$5", __UA_t0, "$31");
  712. }
  713. return res;
  714. }
  715. #endif /* _ASM_UACCESS_H */