tlb.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/tlbflush.h>
  10. #include <as-layout.h>
  11. #include <mem_user.h>
  12. #include <os.h>
  13. #include <skas.h>
  14. #include <kern_util.h>
  15. struct host_vm_change {
  16. struct host_vm_op {
  17. enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  18. union {
  19. struct {
  20. unsigned long addr;
  21. unsigned long len;
  22. unsigned int prot;
  23. int fd;
  24. __u64 offset;
  25. } mmap;
  26. struct {
  27. unsigned long addr;
  28. unsigned long len;
  29. } munmap;
  30. struct {
  31. unsigned long addr;
  32. unsigned long len;
  33. unsigned int prot;
  34. } mprotect;
  35. } u;
  36. } ops[1];
  37. int index;
  38. struct mm_id *id;
  39. void *data;
  40. int force;
  41. };
  42. #define INIT_HVC(mm, force) \
  43. ((struct host_vm_change) \
  44. { .ops = { { .type = NONE } }, \
  45. .id = &mm->context.id, \
  46. .data = NULL, \
  47. .index = 0, \
  48. .force = force })
  49. static int do_ops(struct host_vm_change *hvc, int end,
  50. int finished)
  51. {
  52. struct host_vm_op *op;
  53. int i, ret = 0;
  54. for (i = 0; i < end && !ret; i++) {
  55. op = &hvc->ops[i];
  56. switch (op->type) {
  57. case MMAP:
  58. ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  59. op->u.mmap.prot, op->u.mmap.fd,
  60. op->u.mmap.offset, finished, &hvc->data);
  61. break;
  62. case MUNMAP:
  63. ret = unmap(hvc->id, op->u.munmap.addr,
  64. op->u.munmap.len, finished, &hvc->data);
  65. break;
  66. case MPROTECT:
  67. ret = protect(hvc->id, op->u.mprotect.addr,
  68. op->u.mprotect.len, op->u.mprotect.prot,
  69. finished, &hvc->data);
  70. break;
  71. default:
  72. printk(KERN_ERR "Unknown op type %d in do_ops\n",
  73. op->type);
  74. BUG();
  75. break;
  76. }
  77. }
  78. return ret;
  79. }
  80. static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  81. unsigned int prot, struct host_vm_change *hvc)
  82. {
  83. __u64 offset;
  84. struct host_vm_op *last;
  85. int fd, ret = 0;
  86. fd = phys_mapping(phys, &offset);
  87. if (hvc->index != 0) {
  88. last = &hvc->ops[hvc->index - 1];
  89. if ((last->type == MMAP) &&
  90. (last->u.mmap.addr + last->u.mmap.len == virt) &&
  91. (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  92. (last->u.mmap.offset + last->u.mmap.len == offset)) {
  93. last->u.mmap.len += len;
  94. return 0;
  95. }
  96. }
  97. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  98. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  99. hvc->index = 0;
  100. }
  101. hvc->ops[hvc->index++] = ((struct host_vm_op)
  102. { .type = MMAP,
  103. .u = { .mmap = { .addr = virt,
  104. .len = len,
  105. .prot = prot,
  106. .fd = fd,
  107. .offset = offset }
  108. } });
  109. return ret;
  110. }
  111. static int add_munmap(unsigned long addr, unsigned long len,
  112. struct host_vm_change *hvc)
  113. {
  114. struct host_vm_op *last;
  115. int ret = 0;
  116. if ((addr >= STUB_START) && (addr < STUB_END))
  117. return -EINVAL;
  118. if (hvc->index != 0) {
  119. last = &hvc->ops[hvc->index - 1];
  120. if ((last->type == MUNMAP) &&
  121. (last->u.munmap.addr + last->u.mmap.len == addr)) {
  122. last->u.munmap.len += len;
  123. return 0;
  124. }
  125. }
  126. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  127. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  128. hvc->index = 0;
  129. }
  130. hvc->ops[hvc->index++] = ((struct host_vm_op)
  131. { .type = MUNMAP,
  132. .u = { .munmap = { .addr = addr,
  133. .len = len } } });
  134. return ret;
  135. }
  136. static int add_mprotect(unsigned long addr, unsigned long len,
  137. unsigned int prot, struct host_vm_change *hvc)
  138. {
  139. struct host_vm_op *last;
  140. int ret = 0;
  141. if (hvc->index != 0) {
  142. last = &hvc->ops[hvc->index - 1];
  143. if ((last->type == MPROTECT) &&
  144. (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
  145. (last->u.mprotect.prot == prot)) {
  146. last->u.mprotect.len += len;
  147. return 0;
  148. }
  149. }
  150. if (hvc->index == ARRAY_SIZE(hvc->ops)) {
  151. ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
  152. hvc->index = 0;
  153. }
  154. hvc->ops[hvc->index++] = ((struct host_vm_op)
  155. { .type = MPROTECT,
  156. .u = { .mprotect = { .addr = addr,
  157. .len = len,
  158. .prot = prot } } });
  159. return ret;
  160. }
  161. #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
  162. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  163. unsigned long end,
  164. struct host_vm_change *hvc)
  165. {
  166. pte_t *pte;
  167. int r, w, x, prot, ret = 0;
  168. pte = pte_offset_kernel(pmd, addr);
  169. do {
  170. if ((addr >= STUB_START) && (addr < STUB_END))
  171. continue;
  172. r = pte_read(*pte);
  173. w = pte_write(*pte);
  174. x = pte_exec(*pte);
  175. if (!pte_young(*pte)) {
  176. r = 0;
  177. w = 0;
  178. } else if (!pte_dirty(*pte))
  179. w = 0;
  180. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  181. (x ? UM_PROT_EXEC : 0));
  182. if (hvc->force || pte_newpage(*pte)) {
  183. if (pte_present(*pte))
  184. ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
  185. PAGE_SIZE, prot, hvc);
  186. else
  187. ret = add_munmap(addr, PAGE_SIZE, hvc);
  188. } else if (pte_newprot(*pte))
  189. ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
  190. *pte = pte_mkuptodate(*pte);
  191. } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
  192. return ret;
  193. }
  194. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  195. unsigned long end,
  196. struct host_vm_change *hvc)
  197. {
  198. pmd_t *pmd;
  199. unsigned long next;
  200. int ret = 0;
  201. pmd = pmd_offset(pud, addr);
  202. do {
  203. next = pmd_addr_end(addr, end);
  204. if (!pmd_present(*pmd)) {
  205. if (hvc->force || pmd_newpage(*pmd)) {
  206. ret = add_munmap(addr, next - addr, hvc);
  207. pmd_mkuptodate(*pmd);
  208. }
  209. }
  210. else ret = update_pte_range(pmd, addr, next, hvc);
  211. } while (pmd++, addr = next, ((addr < end) && !ret));
  212. return ret;
  213. }
  214. static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
  215. unsigned long end,
  216. struct host_vm_change *hvc)
  217. {
  218. pud_t *pud;
  219. unsigned long next;
  220. int ret = 0;
  221. pud = pud_offset(pgd, addr);
  222. do {
  223. next = pud_addr_end(addr, end);
  224. if (!pud_present(*pud)) {
  225. if (hvc->force || pud_newpage(*pud)) {
  226. ret = add_munmap(addr, next - addr, hvc);
  227. pud_mkuptodate(*pud);
  228. }
  229. }
  230. else ret = update_pmd_range(pud, addr, next, hvc);
  231. } while (pud++, addr = next, ((addr < end) && !ret));
  232. return ret;
  233. }
  234. void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
  235. unsigned long end_addr, int force)
  236. {
  237. pgd_t *pgd;
  238. struct host_vm_change hvc;
  239. unsigned long addr = start_addr, next;
  240. int ret = 0;
  241. hvc = INIT_HVC(mm, force);
  242. pgd = pgd_offset(mm, addr);
  243. do {
  244. next = pgd_addr_end(addr, end_addr);
  245. if (!pgd_present(*pgd)) {
  246. if (force || pgd_newpage(*pgd)) {
  247. ret = add_munmap(addr, next - addr, &hvc);
  248. pgd_mkuptodate(*pgd);
  249. }
  250. }
  251. else ret = update_pud_range(pgd, addr, next, &hvc);
  252. } while (pgd++, addr = next, ((addr < end_addr) && !ret));
  253. if (!ret)
  254. ret = do_ops(&hvc, hvc.index, 1);
  255. /* This is not an else because ret is modified above */
  256. if (ret) {
  257. printk(KERN_ERR "fix_range_common: failed, killing current "
  258. "process: %d\n", task_tgid_vnr(current));
  259. /* We are under mmap_sem, release it such that current can terminate */
  260. up_write(&current->mm->mmap_sem);
  261. force_sig(SIGKILL, current);
  262. do_signal();
  263. }
  264. }
  265. static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
  266. {
  267. struct mm_struct *mm;
  268. pgd_t *pgd;
  269. pud_t *pud;
  270. pmd_t *pmd;
  271. pte_t *pte;
  272. unsigned long addr, last;
  273. int updated = 0, err;
  274. mm = &init_mm;
  275. for (addr = start; addr < end;) {
  276. pgd = pgd_offset(mm, addr);
  277. if (!pgd_present(*pgd)) {
  278. last = ADD_ROUND(addr, PGDIR_SIZE);
  279. if (last > end)
  280. last = end;
  281. if (pgd_newpage(*pgd)) {
  282. updated = 1;
  283. err = os_unmap_memory((void *) addr,
  284. last - addr);
  285. if (err < 0)
  286. panic("munmap failed, errno = %d\n",
  287. -err);
  288. }
  289. addr = last;
  290. continue;
  291. }
  292. pud = pud_offset(pgd, addr);
  293. if (!pud_present(*pud)) {
  294. last = ADD_ROUND(addr, PUD_SIZE);
  295. if (last > end)
  296. last = end;
  297. if (pud_newpage(*pud)) {
  298. updated = 1;
  299. err = os_unmap_memory((void *) addr,
  300. last - addr);
  301. if (err < 0)
  302. panic("munmap failed, errno = %d\n",
  303. -err);
  304. }
  305. addr = last;
  306. continue;
  307. }
  308. pmd = pmd_offset(pud, addr);
  309. if (!pmd_present(*pmd)) {
  310. last = ADD_ROUND(addr, PMD_SIZE);
  311. if (last > end)
  312. last = end;
  313. if (pmd_newpage(*pmd)) {
  314. updated = 1;
  315. err = os_unmap_memory((void *) addr,
  316. last - addr);
  317. if (err < 0)
  318. panic("munmap failed, errno = %d\n",
  319. -err);
  320. }
  321. addr = last;
  322. continue;
  323. }
  324. pte = pte_offset_kernel(pmd, addr);
  325. if (!pte_present(*pte) || pte_newpage(*pte)) {
  326. updated = 1;
  327. err = os_unmap_memory((void *) addr,
  328. PAGE_SIZE);
  329. if (err < 0)
  330. panic("munmap failed, errno = %d\n",
  331. -err);
  332. if (pte_present(*pte))
  333. map_memory(addr,
  334. pte_val(*pte) & PAGE_MASK,
  335. PAGE_SIZE, 1, 1, 1);
  336. }
  337. else if (pte_newprot(*pte)) {
  338. updated = 1;
  339. os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
  340. }
  341. addr += PAGE_SIZE;
  342. }
  343. return updated;
  344. }
  345. void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
  346. {
  347. pgd_t *pgd;
  348. pud_t *pud;
  349. pmd_t *pmd;
  350. pte_t *pte;
  351. struct mm_struct *mm = vma->vm_mm;
  352. void *flush = NULL;
  353. int r, w, x, prot, err = 0;
  354. struct mm_id *mm_id;
  355. address &= PAGE_MASK;
  356. pgd = pgd_offset(mm, address);
  357. if (!pgd_present(*pgd))
  358. goto kill;
  359. pud = pud_offset(pgd, address);
  360. if (!pud_present(*pud))
  361. goto kill;
  362. pmd = pmd_offset(pud, address);
  363. if (!pmd_present(*pmd))
  364. goto kill;
  365. pte = pte_offset_kernel(pmd, address);
  366. r = pte_read(*pte);
  367. w = pte_write(*pte);
  368. x = pte_exec(*pte);
  369. if (!pte_young(*pte)) {
  370. r = 0;
  371. w = 0;
  372. } else if (!pte_dirty(*pte)) {
  373. w = 0;
  374. }
  375. mm_id = &mm->context.id;
  376. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  377. (x ? UM_PROT_EXEC : 0));
  378. if (pte_newpage(*pte)) {
  379. if (pte_present(*pte)) {
  380. unsigned long long offset;
  381. int fd;
  382. fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
  383. err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
  384. 1, &flush);
  385. }
  386. else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
  387. }
  388. else if (pte_newprot(*pte))
  389. err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
  390. if (err)
  391. goto kill;
  392. *pte = pte_mkuptodate(*pte);
  393. return;
  394. kill:
  395. printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
  396. force_sig(SIGKILL, current);
  397. }
  398. pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
  399. {
  400. return pgd_offset(mm, address);
  401. }
  402. pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
  403. {
  404. return pud_offset(pgd, address);
  405. }
  406. pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
  407. {
  408. return pmd_offset(pud, address);
  409. }
  410. pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
  411. {
  412. return pte_offset_kernel(pmd, address);
  413. }
  414. pte_t *addr_pte(struct task_struct *task, unsigned long addr)
  415. {
  416. pgd_t *pgd = pgd_offset(task->mm, addr);
  417. pud_t *pud = pud_offset(pgd, addr);
  418. pmd_t *pmd = pmd_offset(pud, addr);
  419. return pte_offset_map(pmd, addr);
  420. }
  421. void flush_tlb_all(void)
  422. {
  423. flush_tlb_mm(current->mm);
  424. }
  425. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  426. {
  427. flush_tlb_kernel_range_common(start, end);
  428. }
  429. void flush_tlb_kernel_vm(void)
  430. {
  431. flush_tlb_kernel_range_common(start_vm, end_vm);
  432. }
  433. void __flush_tlb_one(unsigned long addr)
  434. {
  435. flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
  436. }
  437. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  438. unsigned long end_addr, int force)
  439. {
  440. fix_range_common(mm, start_addr, end_addr, force);
  441. }
  442. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  443. unsigned long end)
  444. {
  445. if (vma->vm_mm == NULL)
  446. flush_tlb_kernel_range_common(start, end);
  447. else fix_range(vma->vm_mm, start, end, 0);
  448. }
  449. EXPORT_SYMBOL(flush_tlb_range);
  450. void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  451. unsigned long end)
  452. {
  453. /*
  454. * Don't bother flushing if this address space is about to be
  455. * destroyed.
  456. */
  457. if (atomic_read(&mm->mm_users) == 0)
  458. return;
  459. fix_range(mm, start, end, 0);
  460. }
  461. void flush_tlb_mm(struct mm_struct *mm)
  462. {
  463. struct vm_area_struct *vma = mm->mmap;
  464. while (vma != NULL) {
  465. fix_range(mm, vma->vm_start, vma->vm_end, 0);
  466. vma = vma->vm_next;
  467. }
  468. }
  469. void force_flush_all(void)
  470. {
  471. struct mm_struct *mm = current->mm;
  472. struct vm_area_struct *vma = mm->mmap;
  473. while (vma != NULL) {
  474. fix_range(mm, vma->vm_start, vma->vm_end, 1);
  475. vma = vma->vm_next;
  476. }
  477. }