maccess.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Access kernel memory without faulting -- s390 specific implementation.
  4. *
  5. * Copyright IBM Corp. 2009, 2015
  6. *
  7. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  8. *
  9. */
  10. #include <linux/uaccess.h>
  11. #include <linux/kernel.h>
  12. #include <linux/types.h>
  13. #include <linux/errno.h>
  14. #include <linux/gfp.h>
  15. #include <linux/cpu.h>
  16. #include <asm/ctl_reg.h>
  17. #include <asm/io.h>
  18. static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
  19. {
  20. unsigned long aligned, offset, count;
  21. char tmp[8];
  22. aligned = (unsigned long) dst & ~7UL;
  23. offset = (unsigned long) dst & 7UL;
  24. size = min(8UL - offset, size);
  25. count = size - 1;
  26. asm volatile(
  27. " bras 1,0f\n"
  28. " mvc 0(1,%4),0(%5)\n"
  29. "0: mvc 0(8,%3),0(%0)\n"
  30. " ex %1,0(1)\n"
  31. " lg %1,0(%3)\n"
  32. " lra %0,0(%0)\n"
  33. " sturg %1,%0\n"
  34. : "+&a" (aligned), "+&a" (count), "=m" (tmp)
  35. : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
  36. : "cc", "memory", "1");
  37. return size;
  38. }
  39. /*
  40. * s390_kernel_write - write to kernel memory bypassing DAT
  41. * @dst: destination address
  42. * @src: source address
  43. * @size: number of bytes to copy
  44. *
  45. * This function writes to kernel memory bypassing DAT and possible page table
  46. * write protection. It writes to the destination using the sturg instruction.
  47. * Therefore we have a read-modify-write sequence: the function reads eight
  48. * bytes from destination at an eight byte boundary, modifies the bytes
  49. * requested and writes the result back in a loop.
  50. *
  51. * Note: this means that this function may not be called concurrently on
  52. * several cpus with overlapping words, since this may potentially
  53. * cause data corruption.
  54. */
  55. void notrace s390_kernel_write(void *dst, const void *src, size_t size)
  56. {
  57. long copied;
  58. while (size) {
  59. copied = s390_kernel_write_odd(dst, src, size);
  60. dst += copied;
  61. src += copied;
  62. size -= copied;
  63. }
  64. }
  65. static int __memcpy_real(void *dest, void *src, size_t count)
  66. {
  67. register unsigned long _dest asm("2") = (unsigned long) dest;
  68. register unsigned long _len1 asm("3") = (unsigned long) count;
  69. register unsigned long _src asm("4") = (unsigned long) src;
  70. register unsigned long _len2 asm("5") = (unsigned long) count;
  71. int rc = -EFAULT;
  72. asm volatile (
  73. "0: mvcle %1,%2,0x0\n"
  74. "1: jo 0b\n"
  75. " lhi %0,0x0\n"
  76. "2:\n"
  77. EX_TABLE(1b,2b)
  78. : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  79. "+d" (_len2), "=m" (*((long *) dest))
  80. : "m" (*((long *) src))
  81. : "cc", "memory");
  82. return rc;
  83. }
  84. /*
  85. * Copy memory in real mode (kernel to kernel)
  86. */
  87. int memcpy_real(void *dest, void *src, size_t count)
  88. {
  89. int irqs_disabled, rc;
  90. unsigned long flags;
  91. if (!count)
  92. return 0;
  93. flags = __arch_local_irq_stnsm(0xf8UL);
  94. irqs_disabled = arch_irqs_disabled_flags(flags);
  95. if (!irqs_disabled)
  96. trace_hardirqs_off();
  97. rc = __memcpy_real(dest, src, count);
  98. if (!irqs_disabled)
  99. trace_hardirqs_on();
  100. __arch_local_irq_ssm(flags);
  101. return rc;
  102. }
  103. /*
  104. * Copy memory in absolute mode (kernel to kernel)
  105. */
  106. void memcpy_absolute(void *dest, void *src, size_t count)
  107. {
  108. unsigned long cr0, flags, prefix;
  109. flags = arch_local_irq_save();
  110. __ctl_store(cr0, 0, 0);
  111. __ctl_clear_bit(0, 28); /* disable lowcore protection */
  112. prefix = store_prefix();
  113. if (prefix) {
  114. local_mcck_disable();
  115. set_prefix(0);
  116. memcpy(dest, src, count);
  117. set_prefix(prefix);
  118. local_mcck_enable();
  119. } else {
  120. memcpy(dest, src, count);
  121. }
  122. __ctl_load(cr0, 0, 0);
  123. arch_local_irq_restore(flags);
  124. }
  125. /*
  126. * Copy memory from kernel (real) to user (virtual)
  127. */
  128. int copy_to_user_real(void __user *dest, void *src, unsigned long count)
  129. {
  130. int offs = 0, size, rc;
  131. char *buf;
  132. buf = (char *) __get_free_page(GFP_KERNEL);
  133. if (!buf)
  134. return -ENOMEM;
  135. rc = -EFAULT;
  136. while (offs < count) {
  137. size = min(PAGE_SIZE, count - offs);
  138. if (memcpy_real(buf, src + offs, size))
  139. goto out;
  140. if (copy_to_user(dest + offs, buf, size))
  141. goto out;
  142. offs += size;
  143. }
  144. rc = 0;
  145. out:
  146. free_page((unsigned long) buf);
  147. return rc;
  148. }
  149. /*
  150. * Check if physical address is within prefix or zero page
  151. */
  152. static int is_swapped(unsigned long addr)
  153. {
  154. unsigned long lc;
  155. int cpu;
  156. if (addr < sizeof(struct lowcore))
  157. return 1;
  158. for_each_online_cpu(cpu) {
  159. lc = (unsigned long) lowcore_ptr[cpu];
  160. if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
  161. continue;
  162. return 1;
  163. }
  164. return 0;
  165. }
  166. /*
  167. * Convert a physical pointer for /dev/mem access
  168. *
  169. * For swapped prefix pages a new buffer is returned that contains a copy of
  170. * the absolute memory. The buffer size is maximum one page large.
  171. */
  172. void *xlate_dev_mem_ptr(phys_addr_t addr)
  173. {
  174. void *bounce = (void *) addr;
  175. unsigned long size;
  176. get_online_cpus();
  177. preempt_disable();
  178. if (is_swapped(addr)) {
  179. size = PAGE_SIZE - (addr & ~PAGE_MASK);
  180. bounce = (void *) __get_free_page(GFP_ATOMIC);
  181. if (bounce)
  182. memcpy_absolute(bounce, (void *) addr, size);
  183. }
  184. preempt_enable();
  185. put_online_cpus();
  186. return bounce;
  187. }
  188. /*
  189. * Free converted buffer for /dev/mem access (if necessary)
  190. */
  191. void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
  192. {
  193. if ((void *) addr != buf)
  194. free_page((unsigned long) buf);
  195. }