uaccess.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Standard user space access functions based on mvcp/mvcs and doing
  4. * interesting things in the secondary space mode.
  5. *
  6. * Copyright IBM Corp. 2006,2014
  7. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  8. * Gerald Schaefer (gerald.schaefer@de.ibm.com)
  9. */
  10. #include <linux/jump_label.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/export.h>
  13. #include <linux/errno.h>
  14. #include <linux/mm.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/facility.h>
  17. #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
  18. static DEFINE_STATIC_KEY_FALSE(have_mvcos);
  19. static int __init uaccess_init(void)
  20. {
  21. if (test_facility(27))
  22. static_branch_enable(&have_mvcos);
  23. return 0;
  24. }
  25. early_initcall(uaccess_init);
  26. static inline int copy_with_mvcos(void)
  27. {
  28. if (static_branch_likely(&have_mvcos))
  29. return 1;
  30. return 0;
  31. }
  32. #else
  33. static inline int copy_with_mvcos(void)
  34. {
  35. return 1;
  36. }
  37. #endif
  38. void set_fs(mm_segment_t fs)
  39. {
  40. current->thread.mm_segment = fs;
  41. if (fs == USER_DS) {
  42. __ctl_load(S390_lowcore.user_asce, 1, 1);
  43. clear_cpu_flag(CIF_ASCE_PRIMARY);
  44. } else {
  45. __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  46. set_cpu_flag(CIF_ASCE_PRIMARY);
  47. }
  48. if (fs & 1) {
  49. if (fs == USER_DS_SACF)
  50. __ctl_load(S390_lowcore.user_asce, 7, 7);
  51. else
  52. __ctl_load(S390_lowcore.kernel_asce, 7, 7);
  53. set_cpu_flag(CIF_ASCE_SECONDARY);
  54. }
  55. }
  56. EXPORT_SYMBOL(set_fs);
  57. mm_segment_t enable_sacf_uaccess(void)
  58. {
  59. mm_segment_t old_fs;
  60. unsigned long asce, cr;
  61. old_fs = current->thread.mm_segment;
  62. if (old_fs & 1)
  63. return old_fs;
  64. current->thread.mm_segment |= 1;
  65. asce = S390_lowcore.kernel_asce;
  66. if (likely(old_fs == USER_DS)) {
  67. __ctl_store(cr, 1, 1);
  68. if (cr != S390_lowcore.kernel_asce) {
  69. __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  70. set_cpu_flag(CIF_ASCE_PRIMARY);
  71. }
  72. asce = S390_lowcore.user_asce;
  73. }
  74. __ctl_store(cr, 7, 7);
  75. if (cr != asce) {
  76. __ctl_load(asce, 7, 7);
  77. set_cpu_flag(CIF_ASCE_SECONDARY);
  78. }
  79. return old_fs;
  80. }
  81. EXPORT_SYMBOL(enable_sacf_uaccess);
  82. void disable_sacf_uaccess(mm_segment_t old_fs)
  83. {
  84. current->thread.mm_segment = old_fs;
  85. if (old_fs == USER_DS && test_facility(27)) {
  86. __ctl_load(S390_lowcore.user_asce, 1, 1);
  87. clear_cpu_flag(CIF_ASCE_PRIMARY);
  88. }
  89. }
  90. EXPORT_SYMBOL(disable_sacf_uaccess);
  91. static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
  92. unsigned long size)
  93. {
  94. register unsigned long reg0 asm("0") = 0x01UL;
  95. unsigned long tmp1, tmp2;
  96. tmp1 = -4096UL;
  97. asm volatile(
  98. "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
  99. "6: jz 4f\n"
  100. "1: algr %0,%3\n"
  101. " slgr %1,%3\n"
  102. " slgr %2,%3\n"
  103. " j 0b\n"
  104. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  105. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  106. " slgr %4,%1\n"
  107. " clgr %0,%4\n" /* copy crosses next page boundary? */
  108. " jnh 5f\n"
  109. "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
  110. "7: slgr %0,%4\n"
  111. " j 5f\n"
  112. "4: slgr %0,%0\n"
  113. "5:\n"
  114. EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
  115. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  116. : "d" (reg0) : "cc", "memory");
  117. return size;
  118. }
  119. static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
  120. unsigned long size)
  121. {
  122. unsigned long tmp1, tmp2;
  123. mm_segment_t old_fs;
  124. old_fs = enable_sacf_uaccess();
  125. tmp1 = -256UL;
  126. asm volatile(
  127. " sacf 0\n"
  128. "0: mvcp 0(%0,%2),0(%1),%3\n"
  129. "7: jz 5f\n"
  130. "1: algr %0,%3\n"
  131. " la %1,256(%1)\n"
  132. " la %2,256(%2)\n"
  133. "2: mvcp 0(%0,%2),0(%1),%3\n"
  134. "8: jnz 1b\n"
  135. " j 5f\n"
  136. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  137. " lghi %3,-4096\n"
  138. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  139. " slgr %4,%1\n"
  140. " clgr %0,%4\n" /* copy crosses next page boundary? */
  141. " jnh 6f\n"
  142. "4: mvcp 0(%4,%2),0(%1),%3\n"
  143. "9: slgr %0,%4\n"
  144. " j 6f\n"
  145. "5: slgr %0,%0\n"
  146. "6: sacf 768\n"
  147. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  148. EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  149. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  150. : : "cc", "memory");
  151. disable_sacf_uaccess(old_fs);
  152. return size;
  153. }
  154. unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  155. {
  156. if (copy_with_mvcos())
  157. return copy_from_user_mvcos(to, from, n);
  158. return copy_from_user_mvcp(to, from, n);
  159. }
  160. EXPORT_SYMBOL(raw_copy_from_user);
  161. static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
  162. unsigned long size)
  163. {
  164. register unsigned long reg0 asm("0") = 0x010000UL;
  165. unsigned long tmp1, tmp2;
  166. tmp1 = -4096UL;
  167. asm volatile(
  168. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  169. "6: jz 4f\n"
  170. "1: algr %0,%3\n"
  171. " slgr %1,%3\n"
  172. " slgr %2,%3\n"
  173. " j 0b\n"
  174. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  175. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  176. " slgr %4,%1\n"
  177. " clgr %0,%4\n" /* copy crosses next page boundary? */
  178. " jnh 5f\n"
  179. "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
  180. "7: slgr %0,%4\n"
  181. " j 5f\n"
  182. "4: slgr %0,%0\n"
  183. "5:\n"
  184. EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
  185. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  186. : "d" (reg0) : "cc", "memory");
  187. return size;
  188. }
  189. static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
  190. unsigned long size)
  191. {
  192. unsigned long tmp1, tmp2;
  193. mm_segment_t old_fs;
  194. old_fs = enable_sacf_uaccess();
  195. tmp1 = -256UL;
  196. asm volatile(
  197. " sacf 0\n"
  198. "0: mvcs 0(%0,%1),0(%2),%3\n"
  199. "7: jz 5f\n"
  200. "1: algr %0,%3\n"
  201. " la %1,256(%1)\n"
  202. " la %2,256(%2)\n"
  203. "2: mvcs 0(%0,%1),0(%2),%3\n"
  204. "8: jnz 1b\n"
  205. " j 5f\n"
  206. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  207. " lghi %3,-4096\n"
  208. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  209. " slgr %4,%1\n"
  210. " clgr %0,%4\n" /* copy crosses next page boundary? */
  211. " jnh 6f\n"
  212. "4: mvcs 0(%4,%1),0(%2),%3\n"
  213. "9: slgr %0,%4\n"
  214. " j 6f\n"
  215. "5: slgr %0,%0\n"
  216. "6: sacf 768\n"
  217. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  218. EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  219. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  220. : : "cc", "memory");
  221. disable_sacf_uaccess(old_fs);
  222. return size;
  223. }
  224. unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  225. {
  226. if (copy_with_mvcos())
  227. return copy_to_user_mvcos(to, from, n);
  228. return copy_to_user_mvcs(to, from, n);
  229. }
  230. EXPORT_SYMBOL(raw_copy_to_user);
  231. static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
  232. unsigned long size)
  233. {
  234. register unsigned long reg0 asm("0") = 0x010001UL;
  235. unsigned long tmp1, tmp2;
  236. tmp1 = -4096UL;
  237. /* FIXME: copy with reduced length. */
  238. asm volatile(
  239. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  240. " jz 2f\n"
  241. "1: algr %0,%3\n"
  242. " slgr %1,%3\n"
  243. " slgr %2,%3\n"
  244. " j 0b\n"
  245. "2:slgr %0,%0\n"
  246. "3: \n"
  247. EX_TABLE(0b,3b)
  248. : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
  249. : "d" (reg0) : "cc", "memory");
  250. return size;
  251. }
  252. static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
  253. unsigned long size)
  254. {
  255. mm_segment_t old_fs;
  256. unsigned long tmp1;
  257. old_fs = enable_sacf_uaccess();
  258. asm volatile(
  259. " sacf 256\n"
  260. " aghi %0,-1\n"
  261. " jo 5f\n"
  262. " bras %3,3f\n"
  263. "0: aghi %0,257\n"
  264. "1: mvc 0(1,%1),0(%2)\n"
  265. " la %1,1(%1)\n"
  266. " la %2,1(%2)\n"
  267. " aghi %0,-1\n"
  268. " jnz 1b\n"
  269. " j 5f\n"
  270. "2: mvc 0(256,%1),0(%2)\n"
  271. " la %1,256(%1)\n"
  272. " la %2,256(%2)\n"
  273. "3: aghi %0,-256\n"
  274. " jnm 2b\n"
  275. "4: ex %0,1b-0b(%3)\n"
  276. "5: slgr %0,%0\n"
  277. "6: sacf 768\n"
  278. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  279. : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
  280. : : "cc", "memory");
  281. disable_sacf_uaccess(old_fs);
  282. return size;
  283. }
  284. unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
  285. {
  286. if (copy_with_mvcos())
  287. return copy_in_user_mvcos(to, from, n);
  288. return copy_in_user_mvc(to, from, n);
  289. }
  290. EXPORT_SYMBOL(raw_copy_in_user);
  291. static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
  292. {
  293. register unsigned long reg0 asm("0") = 0x010000UL;
  294. unsigned long tmp1, tmp2;
  295. tmp1 = -4096UL;
  296. asm volatile(
  297. "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
  298. " jz 4f\n"
  299. "1: algr %0,%2\n"
  300. " slgr %1,%2\n"
  301. " j 0b\n"
  302. "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
  303. " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
  304. " slgr %3,%1\n"
  305. " clgr %0,%3\n" /* copy crosses next page boundary? */
  306. " jnh 5f\n"
  307. "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
  308. " slgr %0,%3\n"
  309. " j 5f\n"
  310. "4: slgr %0,%0\n"
  311. "5:\n"
  312. EX_TABLE(0b,2b) EX_TABLE(3b,5b)
  313. : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
  314. : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
  315. return size;
  316. }
  317. static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
  318. {
  319. mm_segment_t old_fs;
  320. unsigned long tmp1, tmp2;
  321. old_fs = enable_sacf_uaccess();
  322. asm volatile(
  323. " sacf 256\n"
  324. " aghi %0,-1\n"
  325. " jo 5f\n"
  326. " bras %3,3f\n"
  327. " xc 0(1,%1),0(%1)\n"
  328. "0: aghi %0,257\n"
  329. " la %2,255(%1)\n" /* %2 = ptr + 255 */
  330. " srl %2,12\n"
  331. " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
  332. " slgr %2,%1\n"
  333. " clgr %0,%2\n" /* clear crosses next page boundary? */
  334. " jnh 5f\n"
  335. " aghi %2,-1\n"
  336. "1: ex %2,0(%3)\n"
  337. " aghi %2,1\n"
  338. " slgr %0,%2\n"
  339. " j 5f\n"
  340. "2: xc 0(256,%1),0(%1)\n"
  341. " la %1,256(%1)\n"
  342. "3: aghi %0,-256\n"
  343. " jnm 2b\n"
  344. "4: ex %0,0(%3)\n"
  345. "5: slgr %0,%0\n"
  346. "6: sacf 768\n"
  347. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  348. : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
  349. : : "cc", "memory");
  350. disable_sacf_uaccess(old_fs);
  351. return size;
  352. }
  353. unsigned long __clear_user(void __user *to, unsigned long size)
  354. {
  355. if (copy_with_mvcos())
  356. return clear_user_mvcos(to, size);
  357. return clear_user_xc(to, size);
  358. }
  359. EXPORT_SYMBOL(__clear_user);
  360. static inline unsigned long strnlen_user_srst(const char __user *src,
  361. unsigned long size)
  362. {
  363. register unsigned long reg0 asm("0") = 0;
  364. unsigned long tmp1, tmp2;
  365. asm volatile(
  366. " la %2,0(%1)\n"
  367. " la %3,0(%0,%1)\n"
  368. " slgr %0,%0\n"
  369. " sacf 256\n"
  370. "0: srst %3,%2\n"
  371. " jo 0b\n"
  372. " la %0,1(%3)\n" /* strnlen_user results includes \0 */
  373. " slgr %0,%1\n"
  374. "1: sacf 768\n"
  375. EX_TABLE(0b,1b)
  376. : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  377. : "d" (reg0) : "cc", "memory");
  378. return size;
  379. }
  380. unsigned long __strnlen_user(const char __user *src, unsigned long size)
  381. {
  382. mm_segment_t old_fs;
  383. unsigned long len;
  384. if (unlikely(!size))
  385. return 0;
  386. old_fs = enable_sacf_uaccess();
  387. len = strnlen_user_srst(src, size);
  388. disable_sacf_uaccess(old_fs);
  389. return len;
  390. }
  391. EXPORT_SYMBOL(__strnlen_user);
  392. long __strncpy_from_user(char *dst, const char __user *src, long size)
  393. {
  394. size_t done, len, offset, len_str;
  395. if (unlikely(size <= 0))
  396. return 0;
  397. done = 0;
  398. do {
  399. offset = (size_t)src & (L1_CACHE_BYTES - 1);
  400. len = min(size - done, L1_CACHE_BYTES - offset);
  401. if (copy_from_user(dst, src, len))
  402. return -EFAULT;
  403. len_str = strnlen(dst, len);
  404. done += len_str;
  405. src += len_str;
  406. dst += len_str;
  407. } while ((len_str == len) && (done < size));
  408. return done;
  409. }
  410. EXPORT_SYMBOL(__strncpy_from_user);