rset.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /*
  2. * Copyright (c) 2024 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. */
  18. #include <kern/atomic.h>
  19. #include <kern/kmem.h>
  20. #include <kern/slist.h>
  21. #include <kern/spinlock.h>
  22. #include <kern/xcall.h>
  23. #include <machine/cpu.h>
  24. #include <machine/pmap.h>
  25. #include <vm/page.h>
  26. #include <vm/rset.h>
  27. static struct kmem_cache vm_rset_cache;
  28. static void
  29. vm_rset_entry_fini (struct work *wp)
  30. {
  31. kmem_cache_free (&vm_rset_cache, structof (wp, struct vm_rset_entry, work));
  32. }
  33. static struct vm_rset_entry*
  34. vm_rset_entry_create (void *pte, uintptr_t va, uint32_t cpu)
  35. {
  36. struct vm_rset_entry *ret = kmem_cache_alloc (&vm_rset_cache);
  37. if (! ret)
  38. return (ret);
  39. work_init (&ret->work, vm_rset_entry_fini);
  40. ret->pte = pte;
  41. ret->va = va;
  42. ret->cpu = cpu;
  43. return (ret);
  44. }
  45. int
  46. vm_rset_page_link (struct vm_page *page, void *pte, uintptr_t va, uint32_t cpu)
  47. {
  48. _Auto entry = vm_rset_entry_create (pte, va, cpu);
  49. if (! entry)
  50. return (ENOMEM);
  51. spinlock_lock (&page->rset_lock);
  52. slist_rcu_insert_tail (&page->rset, &entry->link);
  53. spinlock_unlock (&page->rset_lock);
  54. vm_page_mark_dirty (page);
  55. return (0);
  56. }
  57. void
  58. vm_rset_del (struct vm_page *page, void *pte)
  59. {
  60. struct slist_node *prev = NULL;
  61. struct vm_rset_entry *entry;
  62. SPINLOCK_GUARD (&page->rset_lock);
  63. slist_rcu_for_each_entry (&page->rset, entry, link)
  64. {
  65. if (pte != entry->pte)
  66. {
  67. prev = &entry->link;
  68. continue;
  69. }
  70. slist_rcu_remove (&page->rset, prev);
  71. entry->cpu = ~0u;
  72. rcu_defer (&entry->work);
  73. return;
  74. }
  75. }
  76. void
  77. vm_rset_mark_ro (struct vm_page *page)
  78. {
  79. struct pmap_clean_data cdata = { .pa = vm_page_to_pa (page) };
  80. struct vm_rset_entry *entry;
  81. slist_rcu_for_each_entry (&page->rset, entry, link)
  82. {
  83. cdata.va = entry->va;
  84. cdata.pte = entry->pte;
  85. cdata.cpu = entry->cpu;
  86. if (cdata.cpu == ~0u)
  87. continue;
  88. THREAD_PIN_GUARD ();
  89. if (cdata.cpu == cpu_id ())
  90. pmap_xcall_clean (&cdata);
  91. else
  92. xcall_call (pmap_xcall_clean, &cdata, cdata.cpu);
  93. }
  94. }
  95. static int __init
  96. vm_rset_setup (void)
  97. {
  98. kmem_cache_init (&vm_rset_cache, "vm_rset_entry",
  99. sizeof (struct vm_rset_entry), 0, NULL, 0);
  100. return (0);
  101. }
  102. INIT_OP_DEFINE (vm_rset_setup,
  103. INIT_OP_DEP (kmem_setup, true));