uaccess.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef __ASM_SH_UACCESS_H
  2. #define __ASM_SH_UACCESS_H
  3. #include <linux/errno.h>
  4. #include <linux/sched.h>
  5. #include <asm/segment.h>
  6. #define VERIFY_READ 0
  7. #define VERIFY_WRITE 1
  8. #define __addr_ok(addr) \
  9. ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
  10. /*
  11. * __access_ok: Check if address with size is OK or not.
  12. *
  13. * Uhhuh, this needs 33-bit arithmetic. We have a carry..
  14. *
  15. * sum := addr + size; carry? --> flag = true;
  16. * if (sum >= addr_limit) flag = true;
  17. */
  18. #define __access_ok(addr, size) \
  19. (__addr_ok((addr) + (size)))
  20. #define access_ok(type, addr, size) \
  21. (__chk_user_ptr(addr), \
  22. __access_ok((unsigned long __force)(addr), (size)))
  23. #define user_addr_max() (current_thread_info()->addr_limit.seg)
  24. /*
  25. * Uh, these should become the main single-value transfer routines ...
  26. * They automatically use the right size if we just have the right
  27. * pointer type ...
  28. *
  29. * As SuperH uses the same address space for kernel and user data, we
  30. * can just do these as direct assignments.
  31. *
  32. * Careful to not
  33. * (a) re-use the arguments for side effects (sizeof is ok)
  34. * (b) require any knowledge of processes at this stage
  35. */
  36. #define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
  37. #define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
  38. /*
  39. * The "__xxx" versions do not do address space checking, useful when
  40. * doing multiple accesses to the same area (the user has to do the
  41. * checks by hand with "access_ok()")
  42. */
  43. #define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  44. #define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  45. struct __large_struct { unsigned long buf[100]; };
  46. #define __m(x) (*(struct __large_struct __user *)(x))
  47. #define __get_user_nocheck(x,ptr,size) \
  48. ({ \
  49. long __gu_err; \
  50. unsigned long __gu_val; \
  51. const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
  52. __chk_user_ptr(ptr); \
  53. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  54. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  55. __gu_err; \
  56. })
  57. #define __get_user_check(x,ptr,size) \
  58. ({ \
  59. long __gu_err = -EFAULT; \
  60. unsigned long __gu_val = 0; \
  61. const __typeof__(*(ptr)) *__gu_addr = (ptr); \
  62. if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
  63. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  64. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  65. __gu_err; \
  66. })
  67. #define __put_user_nocheck(x,ptr,size) \
  68. ({ \
  69. long __pu_err; \
  70. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  71. __typeof__(*(ptr)) __pu_val = x; \
  72. __chk_user_ptr(ptr); \
  73. __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
  74. __pu_err; \
  75. })
  76. #define __put_user_check(x,ptr,size) \
  77. ({ \
  78. long __pu_err = -EFAULT; \
  79. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  80. __typeof__(*(ptr)) __pu_val = x; \
  81. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
  82. __put_user_size(__pu_val, __pu_addr, (size), \
  83. __pu_err); \
  84. __pu_err; \
  85. })
  86. #ifdef CONFIG_SUPERH32
  87. # include <asm/uaccess_32.h>
  88. #else
  89. # include <asm/uaccess_64.h>
  90. #endif
  91. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  92. extern __must_check long strlen_user(const char __user *str);
  93. extern __must_check long strnlen_user(const char __user *str, long n);
  94. /* Generic arbitrary sized copy. */
  95. /* Return the number of bytes NOT copied */
  96. __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  97. static __always_inline unsigned long
  98. __copy_from_user(void *to, const void __user *from, unsigned long n)
  99. {
  100. return __copy_user(to, (__force void *)from, n);
  101. }
  102. static __always_inline unsigned long __must_check
  103. __copy_to_user(void __user *to, const void *from, unsigned long n)
  104. {
  105. return __copy_user((__force void *)to, from, n);
  106. }
  107. #define __copy_to_user_inatomic __copy_to_user
  108. #define __copy_from_user_inatomic __copy_from_user
  109. /*
  110. * Clear the area and return remaining number of bytes
  111. * (on failure. Usually it's 0.)
  112. */
  113. __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
  114. #define clear_user(addr,n) \
  115. ({ \
  116. void __user * __cl_addr = (addr); \
  117. unsigned long __cl_size = (n); \
  118. \
  119. if (__cl_size && access_ok(VERIFY_WRITE, \
  120. ((unsigned long)(__cl_addr)), __cl_size)) \
  121. __cl_size = __clear_user(__cl_addr, __cl_size); \
  122. \
  123. __cl_size; \
  124. })
  125. static inline unsigned long
  126. copy_from_user(void *to, const void __user *from, unsigned long n)
  127. {
  128. unsigned long __copy_from = (unsigned long) from;
  129. __kernel_size_t __copy_size = (__kernel_size_t) n;
  130. if (__copy_size && __access_ok(__copy_from, __copy_size))
  131. __copy_size = __copy_user(to, from, __copy_size);
  132. if (unlikely(__copy_size))
  133. memset(to + (n - __copy_size), 0, __copy_size);
  134. return __copy_size;
  135. }
  136. static inline unsigned long
  137. copy_to_user(void __user *to, const void *from, unsigned long n)
  138. {
  139. unsigned long __copy_to = (unsigned long) to;
  140. __kernel_size_t __copy_size = (__kernel_size_t) n;
  141. if (__copy_size && __access_ok(__copy_to, __copy_size))
  142. return __copy_user(to, from, __copy_size);
  143. return __copy_size;
  144. }
  145. /*
  146. * The exception table consists of pairs of addresses: the first is the
  147. * address of an instruction that is allowed to fault, and the second is
  148. * the address at which the program should continue. No registers are
  149. * modified, so it is entirely up to the continuation code to figure out
  150. * what to do.
  151. *
  152. * All the routines below use bits of fixup code that are out of line
  153. * with the main instruction path. This means when everything is well,
  154. * we don't even have to jump over them. Further, they do not intrude
  155. * on our cache or tlb entries.
  156. */
  157. struct exception_table_entry {
  158. unsigned long insn, fixup;
  159. };
  160. #if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
  161. #define ARCH_HAS_SEARCH_EXTABLE
  162. #endif
  163. int fixup_exception(struct pt_regs *regs);
  164. const struct exception_table_entry *search_exception_tables(unsigned long addr);
  165. extern void *set_exception_table_vec(unsigned int vec, void *handler);
  166. static inline void *set_exception_table_evt(unsigned int evt, void *handler)
  167. {
  168. return set_exception_table_vec(evt >> 5, handler);
  169. }
  170. struct mem_access {
  171. unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
  172. unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
  173. };
  174. int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  175. struct mem_access *ma, int, unsigned long address);
  176. #endif /* __ASM_SH_UACCESS_H */