uaccess.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. #ifndef __ALPHA_UACCESS_H
  2. #define __ALPHA_UACCESS_H
  3. #include <linux/errno.h>
  4. #include <linux/sched.h>
  5. /*
  6. * The fs value determines whether argument validity checking should be
  7. * performed or not. If get_fs() == USER_DS, checking is performed, with
  8. * get_fs() == KERNEL_DS, checking is bypassed.
  9. *
  10. * Or at least it did once upon a time. Nowadays it is a mask that
  11. * defines which bits of the address space are off limits. This is a
  12. * wee bit faster than the above.
  13. *
  14. * For historical reasons, these macros are grossly misnamed.
  15. */
  16. #define KERNEL_DS ((mm_segment_t) { 0UL })
  17. #define USER_DS ((mm_segment_t) { -0x40000000000UL })
  18. #define VERIFY_READ 0
  19. #define VERIFY_WRITE 1
  20. #define get_fs() (current_thread_info()->addr_limit)
  21. #define get_ds() (KERNEL_DS)
  22. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  23. #define segment_eq(a, b) ((a).seg == (b).seg)
  24. /*
  25. * Is a address valid? This does a straightforward calculation rather
  26. * than tests.
  27. *
  28. * Address valid if:
  29. * - "addr" doesn't have any high-bits set
  30. * - AND "size" doesn't have any high-bits set
  31. * - AND "addr+size" doesn't have any high-bits set
  32. * - OR we are in kernel mode.
  33. */
  34. #define __access_ok(addr, size, segment) \
  35. (((segment).seg & (addr | size | (addr+size))) == 0)
  36. #define access_ok(type, addr, size) \
  37. ({ \
  38. __chk_user_ptr(addr); \
  39. __access_ok(((unsigned long)(addr)), (size), get_fs()); \
  40. })
  41. /*
  42. * These are the main single-value transfer routines. They automatically
  43. * use the right size if we just have the right pointer type.
  44. *
  45. * As the alpha uses the same address space for kernel and user
  46. * data, we can just do these as direct assignments. (Of course, the
  47. * exception handling means that it's no longer "just"...)
  48. *
  49. * Careful to not
  50. * (a) re-use the arguments for side effects (sizeof/typeof is ok)
  51. * (b) require any knowledge of processes at this stage
  52. */
  53. #define put_user(x, ptr) \
  54. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
  55. #define get_user(x, ptr) \
  56. __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
  57. /*
  58. * The "__xxx" versions do not do address space checking, useful when
  59. * doing multiple accesses to the same area (the programmer has to do the
  60. * checks by hand with "access_ok()")
  61. */
  62. #define __put_user(x, ptr) \
  63. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  64. #define __get_user(x, ptr) \
  65. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  66. /*
  67. * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
  68. * encode the bits we need for resolving the exception. See the
  69. * more extensive comments with fixup_inline_exception below for
  70. * more information.
  71. */
  72. extern void __get_user_unknown(void);
  73. #define __get_user_nocheck(x, ptr, size) \
  74. ({ \
  75. long __gu_err = 0; \
  76. unsigned long __gu_val; \
  77. __chk_user_ptr(ptr); \
  78. switch (size) { \
  79. case 1: __get_user_8(ptr); break; \
  80. case 2: __get_user_16(ptr); break; \
  81. case 4: __get_user_32(ptr); break; \
  82. case 8: __get_user_64(ptr); break; \
  83. default: __get_user_unknown(); break; \
  84. } \
  85. (x) = (__force __typeof__(*(ptr))) __gu_val; \
  86. __gu_err; \
  87. })
  88. #define __get_user_check(x, ptr, size, segment) \
  89. ({ \
  90. long __gu_err = -EFAULT; \
  91. unsigned long __gu_val = 0; \
  92. const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
  93. if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
  94. __gu_err = 0; \
  95. switch (size) { \
  96. case 1: __get_user_8(__gu_addr); break; \
  97. case 2: __get_user_16(__gu_addr); break; \
  98. case 4: __get_user_32(__gu_addr); break; \
  99. case 8: __get_user_64(__gu_addr); break; \
  100. default: __get_user_unknown(); break; \
  101. } \
  102. } \
  103. (x) = (__force __typeof__(*(ptr))) __gu_val; \
  104. __gu_err; \
  105. })
  106. struct __large_struct { unsigned long buf[100]; };
  107. #define __m(x) (*(struct __large_struct __user *)(x))
  108. #define __get_user_64(addr) \
  109. __asm__("1: ldq %0,%2\n" \
  110. "2:\n" \
  111. ".section __ex_table,\"a\"\n" \
  112. " .long 1b - .\n" \
  113. " lda %0, 2b-1b(%1)\n" \
  114. ".previous" \
  115. : "=r"(__gu_val), "=r"(__gu_err) \
  116. : "m"(__m(addr)), "1"(__gu_err))
  117. #define __get_user_32(addr) \
  118. __asm__("1: ldl %0,%2\n" \
  119. "2:\n" \
  120. ".section __ex_table,\"a\"\n" \
  121. " .long 1b - .\n" \
  122. " lda %0, 2b-1b(%1)\n" \
  123. ".previous" \
  124. : "=r"(__gu_val), "=r"(__gu_err) \
  125. : "m"(__m(addr)), "1"(__gu_err))
  126. #ifdef __alpha_bwx__
  127. /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
  128. #define __get_user_16(addr) \
  129. __asm__("1: ldwu %0,%2\n" \
  130. "2:\n" \
  131. ".section __ex_table,\"a\"\n" \
  132. " .long 1b - .\n" \
  133. " lda %0, 2b-1b(%1)\n" \
  134. ".previous" \
  135. : "=r"(__gu_val), "=r"(__gu_err) \
  136. : "m"(__m(addr)), "1"(__gu_err))
  137. #define __get_user_8(addr) \
  138. __asm__("1: ldbu %0,%2\n" \
  139. "2:\n" \
  140. ".section __ex_table,\"a\"\n" \
  141. " .long 1b - .\n" \
  142. " lda %0, 2b-1b(%1)\n" \
  143. ".previous" \
  144. : "=r"(__gu_val), "=r"(__gu_err) \
  145. : "m"(__m(addr)), "1"(__gu_err))
  146. #else
  147. /* Unfortunately, we can't get an unaligned access trap for the sub-word
  148. load, so we have to do a general unaligned operation. */
  149. #define __get_user_16(addr) \
  150. { \
  151. long __gu_tmp; \
  152. __asm__("1: ldq_u %0,0(%3)\n" \
  153. "2: ldq_u %1,1(%3)\n" \
  154. " extwl %0,%3,%0\n" \
  155. " extwh %1,%3,%1\n" \
  156. " or %0,%1,%0\n" \
  157. "3:\n" \
  158. ".section __ex_table,\"a\"\n" \
  159. " .long 1b - .\n" \
  160. " lda %0, 3b-1b(%2)\n" \
  161. " .long 2b - .\n" \
  162. " lda %0, 3b-2b(%2)\n" \
  163. ".previous" \
  164. : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
  165. : "r"(addr), "2"(__gu_err)); \
  166. }
  167. #define __get_user_8(addr) \
  168. __asm__("1: ldq_u %0,0(%2)\n" \
  169. " extbl %0,%2,%0\n" \
  170. "2:\n" \
  171. ".section __ex_table,\"a\"\n" \
  172. " .long 1b - .\n" \
  173. " lda %0, 2b-1b(%1)\n" \
  174. ".previous" \
  175. : "=&r"(__gu_val), "=r"(__gu_err) \
  176. : "r"(addr), "1"(__gu_err))
  177. #endif
  178. extern void __put_user_unknown(void);
  179. #define __put_user_nocheck(x, ptr, size) \
  180. ({ \
  181. long __pu_err = 0; \
  182. __chk_user_ptr(ptr); \
  183. switch (size) { \
  184. case 1: __put_user_8(x, ptr); break; \
  185. case 2: __put_user_16(x, ptr); break; \
  186. case 4: __put_user_32(x, ptr); break; \
  187. case 8: __put_user_64(x, ptr); break; \
  188. default: __put_user_unknown(); break; \
  189. } \
  190. __pu_err; \
  191. })
  192. #define __put_user_check(x, ptr, size, segment) \
  193. ({ \
  194. long __pu_err = -EFAULT; \
  195. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  196. if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
  197. __pu_err = 0; \
  198. switch (size) { \
  199. case 1: __put_user_8(x, __pu_addr); break; \
  200. case 2: __put_user_16(x, __pu_addr); break; \
  201. case 4: __put_user_32(x, __pu_addr); break; \
  202. case 8: __put_user_64(x, __pu_addr); break; \
  203. default: __put_user_unknown(); break; \
  204. } \
  205. } \
  206. __pu_err; \
  207. })
  208. /*
  209. * The "__put_user_xx()" macros tell gcc they read from memory
  210. * instead of writing: this is because they do not write to
  211. * any memory gcc knows about, so there are no aliasing issues
  212. */
  213. #define __put_user_64(x, addr) \
  214. __asm__ __volatile__("1: stq %r2,%1\n" \
  215. "2:\n" \
  216. ".section __ex_table,\"a\"\n" \
  217. " .long 1b - .\n" \
  218. " lda $31,2b-1b(%0)\n" \
  219. ".previous" \
  220. : "=r"(__pu_err) \
  221. : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
  222. #define __put_user_32(x, addr) \
  223. __asm__ __volatile__("1: stl %r2,%1\n" \
  224. "2:\n" \
  225. ".section __ex_table,\"a\"\n" \
  226. " .long 1b - .\n" \
  227. " lda $31,2b-1b(%0)\n" \
  228. ".previous" \
  229. : "=r"(__pu_err) \
  230. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  231. #ifdef __alpha_bwx__
  232. /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
  233. #define __put_user_16(x, addr) \
  234. __asm__ __volatile__("1: stw %r2,%1\n" \
  235. "2:\n" \
  236. ".section __ex_table,\"a\"\n" \
  237. " .long 1b - .\n" \
  238. " lda $31,2b-1b(%0)\n" \
  239. ".previous" \
  240. : "=r"(__pu_err) \
  241. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  242. #define __put_user_8(x, addr) \
  243. __asm__ __volatile__("1: stb %r2,%1\n" \
  244. "2:\n" \
  245. ".section __ex_table,\"a\"\n" \
  246. " .long 1b - .\n" \
  247. " lda $31,2b-1b(%0)\n" \
  248. ".previous" \
  249. : "=r"(__pu_err) \
  250. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  251. #else
  252. /* Unfortunately, we can't get an unaligned access trap for the sub-word
  253. write, so we have to do a general unaligned operation. */
  254. #define __put_user_16(x, addr) \
  255. { \
  256. long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
  257. __asm__ __volatile__( \
  258. "1: ldq_u %2,1(%5)\n" \
  259. "2: ldq_u %1,0(%5)\n" \
  260. " inswh %6,%5,%4\n" \
  261. " inswl %6,%5,%3\n" \
  262. " mskwh %2,%5,%2\n" \
  263. " mskwl %1,%5,%1\n" \
  264. " or %2,%4,%2\n" \
  265. " or %1,%3,%1\n" \
  266. "3: stq_u %2,1(%5)\n" \
  267. "4: stq_u %1,0(%5)\n" \
  268. "5:\n" \
  269. ".section __ex_table,\"a\"\n" \
  270. " .long 1b - .\n" \
  271. " lda $31, 5b-1b(%0)\n" \
  272. " .long 2b - .\n" \
  273. " lda $31, 5b-2b(%0)\n" \
  274. " .long 3b - .\n" \
  275. " lda $31, 5b-3b(%0)\n" \
  276. " .long 4b - .\n" \
  277. " lda $31, 5b-4b(%0)\n" \
  278. ".previous" \
  279. : "=r"(__pu_err), "=&r"(__pu_tmp1), \
  280. "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
  281. "=&r"(__pu_tmp4) \
  282. : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
  283. }
  284. #define __put_user_8(x, addr) \
  285. { \
  286. long __pu_tmp1, __pu_tmp2; \
  287. __asm__ __volatile__( \
  288. "1: ldq_u %1,0(%4)\n" \
  289. " insbl %3,%4,%2\n" \
  290. " mskbl %1,%4,%1\n" \
  291. " or %1,%2,%1\n" \
  292. "2: stq_u %1,0(%4)\n" \
  293. "3:\n" \
  294. ".section __ex_table,\"a\"\n" \
  295. " .long 1b - .\n" \
  296. " lda $31, 3b-1b(%0)\n" \
  297. " .long 2b - .\n" \
  298. " lda $31, 3b-2b(%0)\n" \
  299. ".previous" \
  300. : "=r"(__pu_err), \
  301. "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
  302. : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
  303. }
  304. #endif
  305. /*
  306. * Complex access routines
  307. */
  308. /* This little bit of silliness is to get the GP loaded for a function
  309. that ordinarily wouldn't. Otherwise we could have it done by the macro
  310. directly, which can be optimized the linker. */
  311. #ifdef MODULE
  312. #define __module_address(sym) "r"(sym),
  313. #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
  314. #else
  315. #define __module_address(sym)
  316. #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
  317. #endif
  318. extern void __copy_user(void);
  319. extern inline long
  320. __copy_tofrom_user_nocheck(void *to, const void *from, long len)
  321. {
  322. register void * __cu_to __asm__("$6") = to;
  323. register const void * __cu_from __asm__("$7") = from;
  324. register long __cu_len __asm__("$0") = len;
  325. __asm__ __volatile__(
  326. __module_call(28, 3, __copy_user)
  327. : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
  328. : __module_address(__copy_user)
  329. "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
  330. : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
  331. return __cu_len;
  332. }
  333. #define __copy_to_user(to, from, n) \
  334. ({ \
  335. __chk_user_ptr(to); \
  336. __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
  337. })
  338. #define __copy_from_user(to, from, n) \
  339. ({ \
  340. __chk_user_ptr(from); \
  341. __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
  342. })
  343. #define __copy_to_user_inatomic __copy_to_user
  344. #define __copy_from_user_inatomic __copy_from_user
  345. extern inline long
  346. copy_to_user(void __user *to, const void *from, long n)
  347. {
  348. if (likely(__access_ok((unsigned long)to, n, get_fs())))
  349. n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
  350. return n;
  351. }
  352. extern inline long
  353. copy_from_user(void *to, const void __user *from, long n)
  354. {
  355. long res = n;
  356. if (likely(__access_ok((unsigned long)from, n, get_fs())))
  357. res = __copy_from_user_inatomic(to, from, n);
  358. if (unlikely(res))
  359. memset(to + (n - res), 0, res);
  360. return res;
  361. }
  362. extern void __do_clear_user(void);
  363. extern inline long
  364. __clear_user(void __user *to, long len)
  365. {
  366. register void __user * __cl_to __asm__("$6") = to;
  367. register long __cl_len __asm__("$0") = len;
  368. __asm__ __volatile__(
  369. __module_call(28, 2, __do_clear_user)
  370. : "=r"(__cl_len), "=r"(__cl_to)
  371. : __module_address(__do_clear_user)
  372. "0"(__cl_len), "1"(__cl_to)
  373. : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
  374. return __cl_len;
  375. }
  376. extern inline long
  377. clear_user(void __user *to, long len)
  378. {
  379. if (__access_ok((unsigned long)to, len, get_fs()))
  380. len = __clear_user(to, len);
  381. return len;
  382. }
  383. #undef __module_address
  384. #undef __module_call
  385. #define user_addr_max() \
  386. (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
  387. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  388. extern __must_check long strlen_user(const char __user *str);
  389. extern __must_check long strnlen_user(const char __user *str, long n);
  390. /*
  391. * About the exception table:
  392. *
  393. * - insn is a 32-bit pc-relative offset from the faulting insn.
  394. * - nextinsn is a 16-bit offset off of the faulting instruction
  395. * (not off of the *next* instruction as branches are).
  396. * - errreg is the register in which to place -EFAULT.
  397. * - valreg is the final target register for the load sequence
  398. * and will be zeroed.
  399. *
  400. * Either errreg or valreg may be $31, in which case nothing happens.
  401. *
  402. * The exception fixup information "just so happens" to be arranged
  403. * as in a MEM format instruction. This lets us emit our three
  404. * values like so:
  405. *
  406. * lda valreg, nextinsn(errreg)
  407. *
  408. */
  409. struct exception_table_entry
  410. {
  411. signed int insn;
  412. union exception_fixup {
  413. unsigned unit;
  414. struct {
  415. signed int nextinsn : 16;
  416. unsigned int errreg : 5;
  417. unsigned int valreg : 5;
  418. } bits;
  419. } fixup;
  420. };
  421. /* Returns the new pc */
  422. #define fixup_exception(map_reg, _fixup, pc) \
  423. ({ \
  424. if ((_fixup)->fixup.bits.valreg != 31) \
  425. map_reg((_fixup)->fixup.bits.valreg) = 0; \
  426. if ((_fixup)->fixup.bits.errreg != 31) \
  427. map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
  428. (pc) + (_fixup)->fixup.bits.nextinsn; \
  429. })
  430. #define ARCH_HAS_RELATIVE_EXTABLE
  431. #define swap_ex_entry_fixup(a, b, tmp, delta) \
  432. do { \
  433. (a)->fixup.unit = (b)->fixup.unit; \
  434. (b)->fixup.unit = (tmp).fixup.unit; \
  435. } while (0)
  436. #endif /* __ALPHA_UACCESS_H */