uaccess.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. #ifndef __PARISC_UACCESS_H
  2. #define __PARISC_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <asm/page.h>
  7. #include <asm/cache.h>
  8. #include <asm/errno.h>
  9. #include <asm-generic/uaccess-unaligned.h>
  10. #include <linux/bug.h>
  11. #include <linux/string.h>
  12. #include <linux/thread_info.h>
  13. #define VERIFY_READ 0
  14. #define VERIFY_WRITE 1
  15. #define KERNEL_DS ((mm_segment_t){0})
  16. #define USER_DS ((mm_segment_t){1})
  17. #define segment_eq(a, b) ((a).seg == (b).seg)
  18. #define get_ds() (KERNEL_DS)
  19. #define get_fs() (current_thread_info()->addr_limit)
  20. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  21. /*
  22. * Note that since kernel addresses are in a separate address space on
  23. * parisc, we don't need to do anything for access_ok().
  24. * We just let the page fault handler do the right thing. This also means
  25. * that put_user is the same as __put_user, etc.
  26. */
  27. static inline long access_ok(int type, const void __user * addr,
  28. unsigned long size)
  29. {
  30. return 1;
  31. }
  32. #define put_user __put_user
  33. #define get_user __get_user
  34. #if !defined(CONFIG_64BIT)
  35. #define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
  36. #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
  37. #else
  38. #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
  39. #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
  40. #endif
  41. /*
  42. * The exception table contains two values: the first is the relative offset to
  43. * the address of the instruction that is allowed to fault, and the second is
  44. * the relative offset to the address of the fixup routine. Since relative
  45. * addresses are used, 32bit values are sufficient even on 64bit kernel.
  46. */
  47. #define ARCH_HAS_RELATIVE_EXTABLE
  48. struct exception_table_entry {
  49. int insn; /* relative address of insn that is allowed to fault. */
  50. int fixup; /* relative address of fixup routine */
  51. };
  52. #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
  53. ".section __ex_table,\"aw\"\n" \
  54. ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
  55. ".previous\n"
  56. /*
  57. * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
  58. * (with lowest bit set) for which the fault handler in fixup_exception() will
  59. * load -EFAULT into %r8 for a read or write fault, and zeroes the target
  60. * register in case of a read fault in get_user().
  61. */
  62. #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
  63. ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
  64. /*
  65. * The page fault handler stores, in a per-cpu area, the following information
  66. * if a fixup routine is available.
  67. */
  68. struct exception_data {
  69. unsigned long fault_ip;
  70. unsigned long fault_gp;
  71. unsigned long fault_space;
  72. unsigned long fault_addr;
  73. };
  74. /*
  75. * load_sr2() preloads the space register %%sr2 - based on the value of
  76. * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
  77. * is 0), or with the current value of %%sr3 to access user space (USER_DS)
  78. * memory. The following __get_user_asm() and __put_user_asm() functions have
  79. * %%sr2 hard-coded to access the requested memory.
  80. */
  81. #define load_sr2() \
  82. __asm__(" or,= %0,%%r0,%%r0\n\t" \
  83. " mfsp %%sr3,%0\n\t" \
  84. " mtsp %0,%%sr2\n\t" \
  85. : : "r"(get_fs()) : )
  86. #define __get_user_internal(val, ptr) \
  87. ({ \
  88. register long __gu_err __asm__ ("r8") = 0; \
  89. \
  90. switch (sizeof(*(ptr))) { \
  91. case 1: __get_user_asm(val, "ldb", ptr); break; \
  92. case 2: __get_user_asm(val, "ldh", ptr); break; \
  93. case 4: __get_user_asm(val, "ldw", ptr); break; \
  94. case 8: LDD_USER(val, ptr); break; \
  95. default: BUILD_BUG(); \
  96. } \
  97. \
  98. __gu_err; \
  99. })
  100. #define __get_user(val, ptr) \
  101. ({ \
  102. load_sr2(); \
  103. __get_user_internal(val, ptr); \
  104. })
  105. #define __get_user_asm(val, ldx, ptr) \
  106. { \
  107. register long __gu_val; \
  108. \
  109. __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
  110. "9:\n" \
  111. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  112. : "=r"(__gu_val), "=r"(__gu_err) \
  113. : "r"(ptr), "1"(__gu_err)); \
  114. \
  115. (val) = (__force __typeof__(*(ptr))) __gu_val; \
  116. }
  117. #if !defined(CONFIG_64BIT)
  118. #define __get_user_asm64(val, ptr) \
  119. { \
  120. union { \
  121. unsigned long long l; \
  122. __typeof__(*(ptr)) t; \
  123. } __gu_tmp; \
  124. \
  125. __asm__(" copy %%r0,%R0\n" \
  126. "1: ldw 0(%%sr2,%2),%0\n" \
  127. "2: ldw 4(%%sr2,%2),%R0\n" \
  128. "9:\n" \
  129. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  130. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
  131. : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
  132. : "r"(ptr), "1"(__gu_err)); \
  133. \
  134. (val) = __gu_tmp.t; \
  135. }
  136. #endif /* !defined(CONFIG_64BIT) */
  137. #define __put_user_internal(x, ptr) \
  138. ({ \
  139. register long __pu_err __asm__ ("r8") = 0; \
  140. __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
  141. \
  142. switch (sizeof(*(ptr))) { \
  143. case 1: __put_user_asm("stb", __x, ptr); break; \
  144. case 2: __put_user_asm("sth", __x, ptr); break; \
  145. case 4: __put_user_asm("stw", __x, ptr); break; \
  146. case 8: STD_USER(__x, ptr); break; \
  147. default: BUILD_BUG(); \
  148. } \
  149. \
  150. __pu_err; \
  151. })
  152. #define __put_user(x, ptr) \
  153. ({ \
  154. load_sr2(); \
  155. __put_user_internal(x, ptr); \
  156. })
  157. /*
  158. * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  159. * instead of writing. This is because they do not write to any memory
  160. * gcc knows about, so there are no aliasing issues. These macros must
  161. * also be aware that fixups are executed in the context of the fault,
  162. * and any registers used there must be listed as clobbers.
  163. * r8 is already listed as err.
  164. */
  165. #define __put_user_asm(stx, x, ptr) \
  166. __asm__ __volatile__ ( \
  167. "1: " stx " %2,0(%%sr2,%1)\n" \
  168. "9:\n" \
  169. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  170. : "=r"(__pu_err) \
  171. : "r"(ptr), "r"(x), "0"(__pu_err))
  172. #if !defined(CONFIG_64BIT)
  173. #define __put_user_asm64(__val, ptr) do { \
  174. __asm__ __volatile__ ( \
  175. "1: stw %2,0(%%sr2,%1)\n" \
  176. "2: stw %R2,4(%%sr2,%1)\n" \
  177. "9:\n" \
  178. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  179. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
  180. : "=r"(__pu_err) \
  181. : "r"(ptr), "r"(__val), "0"(__pu_err)); \
  182. } while (0)
  183. #endif /* !defined(CONFIG_64BIT) */
  184. /*
  185. * Complex access routines -- external declarations
  186. */
  187. extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
  188. extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
  189. extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
  190. extern long strncpy_from_user(char *, const char __user *, long);
  191. extern unsigned lclear_user(void __user *, unsigned long);
  192. extern long lstrnlen_user(const char __user *, long);
  193. /*
  194. * Complex access routines -- macros
  195. */
  196. #define user_addr_max() (~0UL)
  197. #define strnlen_user lstrnlen_user
  198. #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
  199. #define clear_user lclear_user
  200. #define __clear_user lclear_user
  201. unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
  202. unsigned long len);
  203. unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
  204. unsigned long len);
  205. unsigned long copy_in_user(void __user *dst, const void __user *src,
  206. unsigned long len);
  207. #define __copy_in_user copy_in_user
  208. #define __copy_to_user_inatomic __copy_to_user
  209. #define __copy_from_user_inatomic __copy_from_user
  210. extern void __compiletime_error("usercopy buffer size is too small")
  211. __bad_copy_user(void);
  212. static inline void copy_user_overflow(int size, unsigned long count)
  213. {
  214. WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
  215. }
  216. static __always_inline unsigned long __must_check
  217. copy_from_user(void *to, const void __user *from, unsigned long n)
  218. {
  219. int sz = __compiletime_object_size(to);
  220. unsigned long ret = n;
  221. if (likely(sz < 0 || sz >= n)) {
  222. check_object_size(to, n, false);
  223. ret = __copy_from_user(to, from, n);
  224. } else if (!__builtin_constant_p(n))
  225. copy_user_overflow(sz, n);
  226. else
  227. __bad_copy_user();
  228. if (unlikely(ret))
  229. memset(to + (n - ret), 0, ret);
  230. return ret;
  231. }
  232. static __always_inline unsigned long __must_check
  233. copy_to_user(void __user *to, const void *from, unsigned long n)
  234. {
  235. int sz = __compiletime_object_size(from);
  236. if (likely(sz < 0 || sz >= n)) {
  237. check_object_size(from, n, true);
  238. n = __copy_to_user(to, from, n);
  239. } else if (!__builtin_constant_p(n))
  240. copy_user_overflow(sz, n);
  241. else
  242. __bad_copy_user();
  243. return n;
  244. }
  245. struct pt_regs;
  246. int fixup_exception(struct pt_regs *regs);
  247. #endif /* __PARISC_UACCESS_H */