cspace.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Copyright (c) 2023 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * Capability spaces.
  18. */
  19. #ifndef KERN_CAP_SPACE_H
  20. #define KERN_CAP_SPACE_H
  21. #include <errno.h>
  22. #include <kern/capability.h>
  23. #include <kern/cspace_types.h>
  24. #include <kern/rcu.h>
  25. #define CSPACE_WEAK 0x02 // Use a weak reference for a capability.
  26. #define CSPACE_MASK (CSPACE_WEAK)
  27. static inline void
  28. cspace_init (struct cspace *sp)
  29. {
  30. rdxtree_init (&sp->tree, RDXTREE_KEY_ALLOC);
  31. adaptive_lock_init (&sp->lock);
  32. }
  33. static inline void
  34. cspace_maybe_rel (void *ptr)
  35. {
  36. if (!((uintptr_t)ptr & RDXTREE_XBIT))
  37. cap_base_rel (ptr);
  38. }
  39. static inline struct cap_base*
  40. cspace_get_all (struct cspace *sp, int capx, int *marked)
  41. {
  42. if (capx < 0)
  43. return (NULL);
  44. CPU_INTR_GUARD ();
  45. RCU_GUARD ();
  46. void *ptr = rdxtree_lookup (&sp->tree, capx);
  47. if (! ptr)
  48. return (ptr);
  49. *marked = ((uintptr_t)ptr & RDXTREE_XBIT) != 0;
  50. struct cap_base *cap = (void *)((uintptr_t)ptr & ~RDXTREE_XBIT);
  51. cap_base_acq (cap);
  52. return (cap);
  53. }
  54. static inline struct cap_base*
  55. cspace_get (struct cspace *sp, int capx)
  56. {
  57. int marked;
  58. return (cspace_get_all (sp, capx, &marked));
  59. }
  60. static inline int
  61. cspace_add_free_locked (struct cspace *sp, struct cap_base *cap,
  62. uint32_t flags)
  63. {
  64. rdxtree_key_t cap_idx;
  65. void *ptr = (void *)((uintptr_t)cap |
  66. ((flags & CSPACE_WEAK) ? RDXTREE_XBIT : 0));
  67. int rv = rdxtree_insert_alloc (&sp->tree, ptr, &cap_idx);
  68. if (rv < 0)
  69. return (-ENOMEM);
  70. cap_base_acq (cap);
  71. return ((int)cap_idx);
  72. }
  73. static inline int
  74. cspace_add_free (struct cspace *sp, struct cap_base *cap, uint32_t flags)
  75. {
  76. ADAPTIVE_LOCK_GUARD (&sp->lock);
  77. return (cspace_add_free_locked (sp, cap, flags));
  78. }
  79. static inline int
  80. cspace_rem_locked (struct cspace *sp, int cap_idx)
  81. {
  82. if (cap_idx < 0)
  83. return (EBADF);
  84. void *ptr = rdxtree_remove (&sp->tree, cap_idx);
  85. if (! ptr)
  86. return (EBADF);
  87. else if (!((uintptr_t)ptr & RDXTREE_XBIT))
  88. {
  89. CPU_INTR_GUARD ();
  90. cap_base_rel (ptr);
  91. }
  92. return (0);
  93. }
  94. static inline int
  95. cspace_rem (struct cspace *sp, int cap_idx)
  96. {
  97. ADAPTIVE_LOCK_GUARD (&sp->lock);
  98. return (cspace_rem_locked (sp, cap_idx));
  99. }
  100. static inline int
  101. cspace_dup (struct cspace *sp, int cap_idx)
  102. {
  103. _Auto cap = cspace_get (sp, cap_idx);
  104. if (! cap)
  105. return (-EBADF);
  106. int new_idx = cspace_add_free (sp, cap, 0);
  107. cap_base_rel (cap);
  108. return (new_idx);
  109. }
  110. static inline int
  111. cspace_dup3 (struct cspace *sp, int cap_idx, int new_idx,
  112. uint32_t flags __unused)
  113. {
  114. if (cap_idx < 0)
  115. return (EBADF);
  116. ADAPTIVE_LOCK_GUARD (&sp->lock);
  117. struct cap_base *cap = rdxtree_lookup (&sp->tree, cap_idx);
  118. if (! cap)
  119. return (EBADF);
  120. void **slot;
  121. int rv = rdxtree_insert_slot (&sp->tree, new_idx, cap, &slot);
  122. if (rv == EBUSY)
  123. // Replace the older capability.
  124. cspace_maybe_rel (rdxtree_replace_slot (slot, cap));
  125. else if (rv)
  126. return (ENOMEM);
  127. cap_base_acq (cap);
  128. return (0);
  129. }
  130. static inline void
  131. cspace_destroy (struct cspace *sp)
  132. {
  133. struct rdxtree_iter iter;
  134. void *cap;
  135. rdxtree_for_each (&sp->tree, &iter, cap)
  136. cspace_maybe_rel (cap);
  137. rdxtree_remove_all (&sp->tree);
  138. }
  139. #define cspace_self() ((struct cspace *)&thread_self()->xtask->caps)
  140. #endif