pmap_64.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (C) 2020 Justin Hibbits
  5. * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
  6. * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  21. * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  23. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  24. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  25. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  26. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  27. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. *
  29. * Some hw specific parts of this pmap were derived or influenced
  30. * by NetBSD's ibm4xx pmap module. More generic code is shared with
  31. * a few other pmap modules from the FreeBSD tree.
  32. */
  33. /*
  34. * VM layout notes:
  35. *
  36. * Kernel and user threads run within one common virtual address space
  37. * defined by AS=0.
  38. *
  39. * 64-bit pmap:
  40. * Virtual address space layout:
  41. * -----------------------------
  42. * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff : user process
  43. * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff : unused
  44. * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : mmio region
  45. * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : direct map
  46. * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : KVA
  47. */
  48. #include <sys/cdefs.h>
  49. __FBSDID("$FreeBSD$");
  50. #include "opt_ddb.h"
  51. #include "opt_kstack_pages.h"
  52. #include <sys/param.h>
  53. #include <sys/conf.h>
  54. #include <sys/malloc.h>
  55. #include <sys/ktr.h>
  56. #include <sys/proc.h>
  57. #include <sys/user.h>
  58. #include <sys/queue.h>
  59. #include <sys/systm.h>
  60. #include <sys/kernel.h>
  61. #include <sys/kerneldump.h>
  62. #include <sys/linker.h>
  63. #include <sys/msgbuf.h>
  64. #include <sys/lock.h>
  65. #include <sys/mutex.h>
  66. #include <sys/rwlock.h>
  67. #include <sys/sched.h>
  68. #include <sys/smp.h>
  69. #include <sys/vmmeter.h>
  70. #include <vm/vm.h>
  71. #include <vm/vm_page.h>
  72. #include <vm/vm_kern.h>
  73. #include <vm/vm_pageout.h>
  74. #include <vm/vm_extern.h>
  75. #include <vm/vm_object.h>
  76. #include <vm/vm_param.h>
  77. #include <vm/vm_map.h>
  78. #include <vm/vm_pager.h>
  79. #include <vm/vm_phys.h>
  80. #include <vm/vm_pagequeue.h>
  81. #include <vm/uma.h>
  82. #include <machine/_inttypes.h>
  83. #include <machine/cpu.h>
  84. #include <machine/pcb.h>
  85. #include <machine/platform.h>
  86. #include <machine/tlb.h>
  87. #include <machine/spr.h>
  88. #include <machine/md_var.h>
  89. #include <machine/mmuvar.h>
  90. #include <machine/pmap.h>
  91. #include <machine/pte.h>
  92. #include <ddb/ddb.h>
  93. #ifdef DEBUG
  94. #define debugf(fmt, args...) printf(fmt, ##args)
  95. #else
  96. #define debugf(fmt, args...)
  97. #endif
  98. #define PRI0ptrX "016lx"
  99. /**************************************************************************/
  100. /* PMAP */
  101. /**************************************************************************/
  102. unsigned int kernel_pdirs;
  103. static uma_zone_t ptbl_root_zone;
  104. static pte_t ****kernel_ptbl_root;
  105. /*
  106. * Base of the pmap_mapdev() region. On 32-bit it immediately follows the
  107. * userspace address range. On On 64-bit it's far above, at (1 << 63), and
  108. * ranges up to the DMAP, giving 62 bits of PA allowed. This is far larger than
  109. * the widest Book-E address bus, the e6500 has a 40-bit PA space. This allows
  110. * us to map akin to the DMAP, with addresses identical to the PA, offset by the
  111. * base.
  112. */
  113. #define VM_MAPDEV_BASE 0x8000000000000000
  114. #define VM_MAPDEV_PA_MAX 0x4000000000000000 /* Don't encroach on DMAP */
  115. static void tid_flush(tlbtid_t tid);
  116. static unsigned long ilog2(unsigned long);
  117. /**************************************************************************/
  118. /* Page table management */
  119. /**************************************************************************/
  120. #define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
  121. static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
  122. bool nosleep, bool *is_new);
  123. static void ptbl_hold(pmap_t, pte_t *);
  124. static int ptbl_unhold(pmap_t, vm_offset_t);
  125. static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
  126. static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
  127. static int pte_remove(pmap_t, vm_offset_t, uint8_t);
  128. static pte_t *pte_find(pmap_t, vm_offset_t);
  129. static pte_t *pte_find_next(pmap_t, vm_offset_t *);
  130. static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
  131. /**************************************************************************/
  132. /* Page table related */
  133. /**************************************************************************/
  134. /* Allocate a page, to be used in a page table. */
  135. static vm_offset_t
  136. mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
  137. {
  138. vm_page_t m;
  139. int req;
  140. req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
  141. while ((m = vm_page_alloc(NULL, idx, req)) == NULL) {
  142. if (nosleep)
  143. return (0);
  144. PMAP_UNLOCK(pmap);
  145. rw_wunlock(&pvh_global_lock);
  146. vm_wait(NULL);
  147. rw_wlock(&pvh_global_lock);
  148. PMAP_LOCK(pmap);
  149. }
  150. if (!(m->flags & PG_ZERO))
  151. /* Zero whole ptbl. */
  152. mmu_booke_zero_page(m);
  153. return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
  154. }
  155. /* Initialize pool of kva ptbl buffers. */
  156. static void
  157. ptbl_init(void)
  158. {
  159. }
  160. /* Get a pointer to a PTE in a page table. */
  161. static __inline pte_t *
  162. pte_find(pmap_t pmap, vm_offset_t va)
  163. {
  164. pte_t ***pdir_l1;
  165. pte_t **pdir;
  166. pte_t *ptbl;
  167. KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
  168. pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
  169. if (pdir_l1 == NULL)
  170. return (NULL);
  171. pdir = pdir_l1[PDIR_L1_IDX(va)];
  172. if (pdir == NULL)
  173. return (NULL);
  174. ptbl = pdir[PDIR_IDX(va)];
  175. return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
  176. }
  177. /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
  178. static __inline pte_t *
  179. pte_find_next(pmap_t pmap, vm_offset_t *pva)
  180. {
  181. vm_offset_t va;
  182. pte_t ****pm_root;
  183. pte_t *pte;
  184. unsigned long i, j, k, l;
  185. KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
  186. va = *pva;
  187. i = PG_ROOT_IDX(va);
  188. j = PDIR_L1_IDX(va);
  189. k = PDIR_IDX(va);
  190. l = PTBL_IDX(va);
  191. pm_root = pmap->pm_root;
  192. /* truncate the VA for later. */
  193. va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
  194. for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
  195. if (pm_root[i] == 0)
  196. continue;
  197. for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
  198. if (pm_root[i][j] == 0)
  199. continue;
  200. for (; k < PDIR_NENTRIES; k++, l = 0) {
  201. if (pm_root[i][j][k] == NULL)
  202. continue;
  203. for (; l < PTBL_NENTRIES; l++) {
  204. pte = &pm_root[i][j][k][l];
  205. if (!PTE_ISVALID(pte))
  206. continue;
  207. *pva = va + PG_ROOT_SIZE * i +
  208. PDIR_L1_SIZE * j +
  209. PDIR_SIZE * k +
  210. PAGE_SIZE * l;
  211. return (pte);
  212. }
  213. }
  214. }
  215. }
  216. return (NULL);
  217. }
  218. static bool
  219. unhold_free_page(pmap_t pmap, vm_page_t m)
  220. {
  221. if (vm_page_unwire_noq(m)) {
  222. vm_page_free_zero(m);
  223. return (true);
  224. }
  225. return (false);
  226. }
  227. static vm_offset_t
  228. get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
  229. bool nosleep, bool hold_parent, bool *isnew)
  230. {
  231. vm_offset_t page;
  232. vm_page_t m;
  233. page = ptr_tbl[index];
  234. KASSERT(page != 0 || pmap != kernel_pmap,
  235. ("NULL page table page found in kernel pmap!"));
  236. if (page == 0) {
  237. page = mmu_booke_alloc_page(pmap, index, nosleep);
  238. if (ptr_tbl[index] == 0) {
  239. *isnew = true;
  240. ptr_tbl[index] = page;
  241. if (hold_parent) {
  242. m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
  243. m->ref_count++;
  244. }
  245. return (page);
  246. }
  247. m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
  248. page = ptr_tbl[index];
  249. vm_page_unwire_noq(m);
  250. vm_page_free_zero(m);
  251. }
  252. *isnew = false;
  253. return (page);
  254. }
  255. /* Allocate page table. */
  256. static pte_t*
  257. ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
  258. {
  259. unsigned int pg_root_idx = PG_ROOT_IDX(va);
  260. unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
  261. unsigned int pdir_idx = PDIR_IDX(va);
  262. vm_offset_t pdir_l1, pdir, ptbl;
  263. /* When holding a parent, no need to hold the root index pages. */
  264. pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
  265. pg_root_idx, nosleep, false, is_new);
  266. if (pdir_l1 == 0)
  267. return (NULL);
  268. pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
  269. nosleep, !*is_new, is_new);
  270. if (pdir == 0)
  271. return (NULL);
  272. ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
  273. nosleep, !*is_new, is_new);
  274. return ((pte_t *)ptbl);
  275. }
  276. /*
  277. * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
  278. * when removing pte entry from ptbl.
  279. *
  280. * Return 1 if ptbl pages were freed.
  281. */
  282. static int
  283. ptbl_unhold(pmap_t pmap, vm_offset_t va)
  284. {
  285. pte_t *ptbl;
  286. vm_page_t m;
  287. u_int pg_root_idx;
  288. pte_t ***pdir_l1;
  289. u_int pdir_l1_idx;
  290. pte_t **pdir;
  291. u_int pdir_idx;
  292. pg_root_idx = PG_ROOT_IDX(va);
  293. pdir_l1_idx = PDIR_L1_IDX(va);
  294. pdir_idx = PDIR_IDX(va);
  295. KASSERT((pmap != kernel_pmap),
  296. ("ptbl_unhold: unholding kernel ptbl!"));
  297. pdir_l1 = pmap->pm_root[pg_root_idx];
  298. pdir = pdir_l1[pdir_l1_idx];
  299. ptbl = pdir[pdir_idx];
  300. /* decrement hold count */
  301. m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
  302. if (!unhold_free_page(pmap, m))
  303. return (0);
  304. pdir[pdir_idx] = NULL;
  305. m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
  306. if (!unhold_free_page(pmap, m))
  307. return (1);
  308. pdir_l1[pdir_l1_idx] = NULL;
  309. m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
  310. if (!unhold_free_page(pmap, m))
  311. return (1);
  312. pmap->pm_root[pg_root_idx] = NULL;
  313. return (1);
  314. }
  315. /*
  316. * Increment hold count for ptbl pages. This routine is used when new pte
  317. * entry is being inserted into ptbl.
  318. */
  319. static void
  320. ptbl_hold(pmap_t pmap, pte_t *ptbl)
  321. {
  322. vm_page_t m;
  323. KASSERT((pmap != kernel_pmap),
  324. ("ptbl_hold: holding kernel ptbl!"));
  325. m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
  326. m->ref_count++;
  327. }
  328. /*
  329. * Clean pte entry, try to free page table page if requested.
  330. *
  331. * Return 1 if ptbl pages were freed, otherwise return 0.
  332. */
  333. static int
  334. pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
  335. {
  336. vm_page_t m;
  337. pte_t *pte;
  338. pte = pte_find(pmap, va);
  339. KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
  340. __func__, (uintmax_t)va, pmap));
  341. if (!PTE_ISVALID(pte))
  342. return (0);
  343. /* Get vm_page_t for mapped pte. */
  344. m = PHYS_TO_VM_PAGE(PTE_PA(pte));
  345. if (PTE_ISWIRED(pte))
  346. pmap->pm_stats.wired_count--;
  347. /* Handle managed entry. */
  348. if (PTE_ISMANAGED(pte)) {
  349. /* Handle modified pages. */
  350. if (PTE_ISMODIFIED(pte))
  351. vm_page_dirty(m);
  352. /* Referenced pages. */
  353. if (PTE_ISREFERENCED(pte))
  354. vm_page_aflag_set(m, PGA_REFERENCED);
  355. /* Remove pv_entry from pv_list. */
  356. pv_remove(pmap, va, m);
  357. } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
  358. pv_remove(pmap, va, m);
  359. if (TAILQ_EMPTY(&m->md.pv_list))
  360. m->md.pv_tracked = false;
  361. }
  362. mtx_lock_spin(&tlbivax_mutex);
  363. tlb_miss_lock();
  364. tlb0_flush_entry(va);
  365. *pte = 0;
  366. tlb_miss_unlock();
  367. mtx_unlock_spin(&tlbivax_mutex);
  368. pmap->pm_stats.resident_count--;
  369. if (flags & PTBL_UNHOLD) {
  370. return (ptbl_unhold(pmap, va));
  371. }
  372. return (0);
  373. }
  374. /*
  375. * Insert PTE for a given page and virtual address.
  376. */
  377. static int
  378. pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
  379. boolean_t nosleep)
  380. {
  381. unsigned int ptbl_idx = PTBL_IDX(va);
  382. pte_t *ptbl, *pte, pte_tmp;
  383. bool is_new;
  384. /* Get the page directory pointer. */
  385. ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
  386. if (ptbl == NULL) {
  387. KASSERT(nosleep, ("nosleep and NULL ptbl"));
  388. return (ENOMEM);
  389. }
  390. if (is_new) {
  391. pte = &ptbl[ptbl_idx];
  392. } else {
  393. /*
  394. * Check if there is valid mapping for requested va, if there
  395. * is, remove it.
  396. */
  397. pte = &ptbl[ptbl_idx];
  398. if (PTE_ISVALID(pte)) {
  399. pte_remove(pmap, va, PTBL_HOLD);
  400. } else {
  401. /*
  402. * pte is not used, increment hold count for ptbl
  403. * pages.
  404. */
  405. if (pmap != kernel_pmap)
  406. ptbl_hold(pmap, ptbl);
  407. }
  408. }
  409. /*
  410. * Insert pv_entry into pv_list for mapped page if part of managed
  411. * memory.
  412. */
  413. if ((m->oflags & VPO_UNMANAGED) == 0) {
  414. flags |= PTE_MANAGED;
  415. /* Create and insert pv entry. */
  416. pv_insert(pmap, va, m);
  417. }
  418. pmap->pm_stats.resident_count++;
  419. pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
  420. pte_tmp |= (PTE_VALID | flags);
  421. mtx_lock_spin(&tlbivax_mutex);
  422. tlb_miss_lock();
  423. tlb0_flush_entry(va);
  424. *pte = pte_tmp;
  425. tlb_miss_unlock();
  426. mtx_unlock_spin(&tlbivax_mutex);
  427. return (0);
  428. }
  429. /* Return the pa for the given pmap/va. */
  430. static vm_paddr_t
  431. pte_vatopa(pmap_t pmap, vm_offset_t va)
  432. {
  433. vm_paddr_t pa = 0;
  434. pte_t *pte;
  435. pte = pte_find(pmap, va);
  436. if ((pte != NULL) && PTE_ISVALID(pte))
  437. pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
  438. return (pa);
  439. }
  440. /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
  441. static void
  442. kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
  443. {
  444. pte_t *pte;
  445. vm_size_t kva_size;
  446. int kernel_pdirs, kernel_pgtbls, pdir_l1s;
  447. vm_offset_t va, l1_va, pdir_va, ptbl_va;
  448. int i, j, k;
  449. kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
  450. kernel_pmap->pm_root = kernel_ptbl_root;
  451. pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
  452. kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
  453. kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
  454. /* Initialize kernel pdir */
  455. l1_va = (vm_offset_t)kernel_ptbl_root +
  456. round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
  457. pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
  458. ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
  459. if (bootverbose) {
  460. printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
  461. printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
  462. printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
  463. printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
  464. }
  465. va = VM_MIN_KERNEL_ADDRESS;
  466. for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
  467. i++, l1_va += PAGE_SIZE) {
  468. kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
  469. for (j = 0;
  470. j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
  471. j++, pdir_va += PAGE_SIZE) {
  472. kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
  473. for (k = 0;
  474. k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
  475. k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
  476. kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
  477. }
  478. }
  479. /*
  480. * Fill in PTEs covering kernel code and data. They are not required
  481. * for address translation, as this area is covered by static TLB1
  482. * entries, but for pte_vatopa() to work correctly with kernel area
  483. * addresses.
  484. */
  485. for (va = addr; va < data_end; va += PAGE_SIZE) {
  486. pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
  487. *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
  488. *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
  489. PTE_VALID | PTE_PS_4KB;
  490. }
  491. }
  492. static vm_offset_t
  493. mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
  494. {
  495. vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
  496. kernel_ptbl_root = (pte_t ****)data_end;
  497. data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
  498. data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
  499. data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
  500. data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
  501. return (data_end);
  502. }
  503. /*
  504. * Initialize a preallocated and zeroed pmap structure,
  505. * such as one in a vmspace structure.
  506. */
  507. static int
  508. mmu_booke_pinit(pmap_t pmap)
  509. {
  510. int i;
  511. CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
  512. curthread->td_proc->p_pid, curthread->td_proc->p_comm);
  513. KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
  514. for (i = 0; i < MAXCPU; i++)
  515. pmap->pm_tid[i] = TID_NONE;
  516. CPU_ZERO(&kernel_pmap->pm_active);
  517. bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
  518. pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
  519. bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
  520. return (1);
  521. }
  522. /*
  523. * Release any resources held by the given physical map.
  524. * Called when a pmap initialized by mmu_booke_pinit is being released.
  525. * Should only be called if the map contains no valid mappings.
  526. */
  527. static void
  528. mmu_booke_release(pmap_t pmap)
  529. {
  530. KASSERT(pmap->pm_stats.resident_count == 0,
  531. ("pmap_release: pmap resident count %ld != 0",
  532. pmap->pm_stats.resident_count));
  533. #ifdef INVARIANTS
  534. /*
  535. * Verify that all page directories are gone.
  536. * Protects against reference count leakage.
  537. */
  538. for (int i = 0; i < PG_ROOT_NENTRIES; i++)
  539. KASSERT(pmap->pm_root[i] == 0,
  540. ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
  541. #endif
  542. uma_zfree(ptbl_root_zone, pmap->pm_root);
  543. }
  544. static void
  545. mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
  546. {
  547. pte_t *pte;
  548. vm_paddr_t pa = 0;
  549. int sync_sz, valid;
  550. while (sz > 0) {
  551. PMAP_LOCK(pm);
  552. pte = pte_find(pm, va);
  553. valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
  554. if (valid)
  555. pa = PTE_PA(pte);
  556. PMAP_UNLOCK(pm);
  557. sync_sz = PAGE_SIZE - (va & PAGE_MASK);
  558. sync_sz = min(sync_sz, sz);
  559. if (valid) {
  560. pa += (va & PAGE_MASK);
  561. __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
  562. }
  563. va += sync_sz;
  564. sz -= sync_sz;
  565. }
  566. }
  567. /*
  568. * mmu_booke_zero_page_area zeros the specified hardware page by
  569. * mapping it into virtual memory and using bzero to clear
  570. * its contents.
  571. *
  572. * off and size must reside within a single page.
  573. */
  574. static void
  575. mmu_booke_zero_page_area(vm_page_t m, int off, int size)
  576. {
  577. vm_offset_t va;
  578. /* XXX KASSERT off and size are within a single page? */
  579. va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
  580. bzero((caddr_t)va + off, size);
  581. }
  582. /*
  583. * mmu_booke_zero_page zeros the specified hardware page.
  584. */
  585. static void
  586. mmu_booke_zero_page(vm_page_t m)
  587. {
  588. vm_offset_t off, va;
  589. va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
  590. for (off = 0; off < PAGE_SIZE; off += cacheline_size)
  591. __asm __volatile("dcbz 0,%0" :: "r"(va + off));
  592. }
  593. /*
  594. * mmu_booke_copy_page copies the specified (machine independent) page by
  595. * mapping the page into virtual memory and using memcopy to copy the page,
  596. * one machine dependent page at a time.
  597. */
  598. static void
  599. mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
  600. {
  601. vm_offset_t sva, dva;
  602. sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
  603. dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
  604. memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
  605. }
  606. static inline void
  607. mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
  608. vm_page_t *mb, vm_offset_t b_offset, int xfersize)
  609. {
  610. void *a_cp, *b_cp;
  611. vm_offset_t a_pg_offset, b_pg_offset;
  612. int cnt;
  613. vm_page_t pa, pb;
  614. while (xfersize > 0) {
  615. a_pg_offset = a_offset & PAGE_MASK;
  616. pa = ma[a_offset >> PAGE_SHIFT];
  617. b_pg_offset = b_offset & PAGE_MASK;
  618. pb = mb[b_offset >> PAGE_SHIFT];
  619. cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
  620. cnt = min(cnt, PAGE_SIZE - b_pg_offset);
  621. a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
  622. a_pg_offset);
  623. b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
  624. b_pg_offset);
  625. bcopy(a_cp, b_cp, cnt);
  626. a_offset += cnt;
  627. b_offset += cnt;
  628. xfersize -= cnt;
  629. }
  630. }
  631. static vm_offset_t
  632. mmu_booke_quick_enter_page(vm_page_t m)
  633. {
  634. return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
  635. }
  636. static void
  637. mmu_booke_quick_remove_page(vm_offset_t addr)
  638. {
  639. }
  640. /**************************************************************************/
  641. /* TID handling */
  642. /**************************************************************************/
  643. /*
  644. * Return the largest uint value log such that 2^log <= num.
  645. */
  646. static unsigned long
  647. ilog2(unsigned long num)
  648. {
  649. long lz;
  650. __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
  651. return (63 - lz);
  652. }
  653. /*
  654. * Invalidate all TLB0 entries which match the given TID. Note this is
  655. * dedicated for cases when invalidations should NOT be propagated to other
  656. * CPUs.
  657. */
  658. static void
  659. tid_flush(tlbtid_t tid)
  660. {
  661. register_t msr;
  662. /* Don't evict kernel translations */
  663. if (tid == TID_KERNEL)
  664. return;
  665. msr = mfmsr();
  666. __asm __volatile("wrteei 0");
  667. /*
  668. * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
  669. * it for PID invalidation.
  670. */
  671. mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
  672. __asm __volatile("isync; .long 0x7c200024; isync; msync");
  673. __asm __volatile("wrtee %0" :: "r"(msr));
  674. }