ldt.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /*
  2. * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
  3. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  4. * Copyright (C) 2002 Andi Kleen
  5. *
  6. * This handles calls from both 32bit and 64bit mode.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/gfp.h>
  10. #include <linux/sched.h>
  11. #include <linux/string.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/ldt.h>
  18. #include <asm/desc.h>
  19. #include <asm/mmu_context.h>
  20. #include <asm/syscalls.h>
  21. /* context.lock is held for us, so we don't need any locking. */
  22. static void flush_ldt(void *current_mm)
  23. {
  24. mm_context_t *pc;
  25. if (current->active_mm != current_mm)
  26. return;
  27. pc = &current->active_mm->context;
  28. set_ldt(pc->ldt->entries, pc->ldt->size);
  29. }
  30. /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
  31. static struct ldt_struct *alloc_ldt_struct(int size)
  32. {
  33. struct ldt_struct *new_ldt;
  34. int alloc_size;
  35. if (size > LDT_ENTRIES)
  36. return NULL;
  37. new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
  38. if (!new_ldt)
  39. return NULL;
  40. BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
  41. alloc_size = size * LDT_ENTRY_SIZE;
  42. /*
  43. * Xen is very picky: it requires a page-aligned LDT that has no
  44. * trailing nonzero bytes in any page that contains LDT descriptors.
  45. * Keep it simple: zero the whole allocation and never allocate less
  46. * than PAGE_SIZE.
  47. */
  48. if (alloc_size > PAGE_SIZE)
  49. new_ldt->entries = vzalloc(alloc_size);
  50. else
  51. new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
  52. if (!new_ldt->entries) {
  53. kfree(new_ldt);
  54. return NULL;
  55. }
  56. new_ldt->size = size;
  57. return new_ldt;
  58. }
  59. /* After calling this, the LDT is immutable. */
  60. static void finalize_ldt_struct(struct ldt_struct *ldt)
  61. {
  62. paravirt_alloc_ldt(ldt->entries, ldt->size);
  63. }
  64. /* context.lock is held */
  65. static void install_ldt(struct mm_struct *current_mm,
  66. struct ldt_struct *ldt)
  67. {
  68. /* Synchronizes with smp_read_barrier_depends in load_mm_ldt. */
  69. barrier();
  70. ACCESS_ONCE(current_mm->context.ldt) = ldt;
  71. /* Activate the LDT for all CPUs using current_mm. */
  72. smp_call_function_many(mm_cpumask(current_mm), flush_ldt, current_mm,
  73. true);
  74. local_irq_disable();
  75. flush_ldt(current_mm);
  76. local_irq_enable();
  77. }
  78. static void free_ldt_struct(struct ldt_struct *ldt)
  79. {
  80. if (likely(!ldt))
  81. return;
  82. paravirt_free_ldt(ldt->entries, ldt->size);
  83. if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
  84. vfree(ldt->entries);
  85. else
  86. kfree(ldt->entries);
  87. kfree(ldt);
  88. }
  89. /*
  90. * we do not have to muck with descriptors here, that is
  91. * done in switch_mm() as needed.
  92. */
  93. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  94. {
  95. struct ldt_struct *new_ldt;
  96. struct mm_struct *old_mm;
  97. int retval = 0;
  98. mutex_init(&mm->context.lock);
  99. old_mm = current->mm;
  100. if (!old_mm) {
  101. mm->context.ldt = NULL;
  102. return 0;
  103. }
  104. mutex_lock(&old_mm->context.lock);
  105. if (!old_mm->context.ldt) {
  106. mm->context.ldt = NULL;
  107. goto out_unlock;
  108. }
  109. new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
  110. if (!new_ldt) {
  111. retval = -ENOMEM;
  112. goto out_unlock;
  113. }
  114. memcpy(new_ldt->entries, old_mm->context.ldt->entries,
  115. new_ldt->size * LDT_ENTRY_SIZE);
  116. finalize_ldt_struct(new_ldt);
  117. mm->context.ldt = new_ldt;
  118. out_unlock:
  119. mutex_unlock(&old_mm->context.lock);
  120. return retval;
  121. }
  122. /*
  123. * No need to lock the MM as we are the last user
  124. *
  125. * 64bit: Don't touch the LDT register - we're already in the next thread.
  126. */
  127. void destroy_context(struct mm_struct *mm)
  128. {
  129. free_ldt_struct(mm->context.ldt);
  130. mm->context.ldt = NULL;
  131. }
  132. static int read_ldt(void __user *ptr, unsigned long bytecount)
  133. {
  134. int retval;
  135. unsigned long size;
  136. struct mm_struct *mm = current->mm;
  137. mutex_lock(&mm->context.lock);
  138. if (!mm->context.ldt) {
  139. retval = 0;
  140. goto out_unlock;
  141. }
  142. if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
  143. bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
  144. size = mm->context.ldt->size * LDT_ENTRY_SIZE;
  145. if (size > bytecount)
  146. size = bytecount;
  147. if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
  148. retval = -EFAULT;
  149. goto out_unlock;
  150. }
  151. if (size != bytecount) {
  152. /* Zero-fill the rest and pretend we read bytecount bytes. */
  153. if (clear_user(ptr + size, bytecount - size)) {
  154. retval = -EFAULT;
  155. goto out_unlock;
  156. }
  157. }
  158. retval = bytecount;
  159. out_unlock:
  160. mutex_unlock(&mm->context.lock);
  161. return retval;
  162. }
  163. static int read_default_ldt(void __user *ptr, unsigned long bytecount)
  164. {
  165. /* CHECKME: Can we use _one_ random number ? */
  166. #ifdef CONFIG_X86_32
  167. unsigned long size = 5 * sizeof(struct desc_struct);
  168. #else
  169. unsigned long size = 128;
  170. #endif
  171. if (bytecount > size)
  172. bytecount = size;
  173. if (clear_user(ptr, bytecount))
  174. return -EFAULT;
  175. return bytecount;
  176. }
  177. static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
  178. {
  179. struct mm_struct *mm = current->mm;
  180. struct desc_struct ldt;
  181. int error;
  182. struct user_desc ldt_info;
  183. int oldsize, newsize;
  184. struct ldt_struct *new_ldt, *old_ldt;
  185. error = -EINVAL;
  186. if (bytecount != sizeof(ldt_info))
  187. goto out;
  188. error = -EFAULT;
  189. if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
  190. goto out;
  191. error = -EINVAL;
  192. if (ldt_info.entry_number >= LDT_ENTRIES)
  193. goto out;
  194. if (ldt_info.contents == 3) {
  195. if (oldmode)
  196. goto out;
  197. if (ldt_info.seg_not_present == 0)
  198. goto out;
  199. }
  200. if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
  201. LDT_empty(&ldt_info)) {
  202. /* The user wants to clear the entry. */
  203. memset(&ldt, 0, sizeof(ldt));
  204. } else {
  205. if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
  206. error = -EINVAL;
  207. goto out;
  208. }
  209. fill_ldt(&ldt, &ldt_info);
  210. if (oldmode)
  211. ldt.avl = 0;
  212. }
  213. mutex_lock(&mm->context.lock);
  214. old_ldt = mm->context.ldt;
  215. oldsize = old_ldt ? old_ldt->size : 0;
  216. newsize = max((int)(ldt_info.entry_number + 1), oldsize);
  217. error = -ENOMEM;
  218. new_ldt = alloc_ldt_struct(newsize);
  219. if (!new_ldt)
  220. goto out_unlock;
  221. if (old_ldt)
  222. memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
  223. new_ldt->entries[ldt_info.entry_number] = ldt;
  224. finalize_ldt_struct(new_ldt);
  225. install_ldt(mm, new_ldt);
  226. free_ldt_struct(old_ldt);
  227. error = 0;
  228. out_unlock:
  229. mutex_unlock(&mm->context.lock);
  230. out:
  231. return error;
  232. }
  233. asmlinkage int sys_modify_ldt(int func, void __user *ptr,
  234. unsigned long bytecount)
  235. {
  236. int ret = -ENOSYS;
  237. switch (func) {
  238. case 0:
  239. ret = read_ldt(ptr, bytecount);
  240. break;
  241. case 1:
  242. ret = write_ldt(ptr, bytecount, 1);
  243. break;
  244. case 2:
  245. ret = read_default_ldt(ptr, bytecount);
  246. break;
  247. case 0x11:
  248. ret = write_ldt(ptr, bytecount, 0);
  249. break;
  250. }
  251. return ret;
  252. }