kern_srp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /* $OpenBSD: kern_srp.c,v 1.1 2015/07/02 01:34:00 dlg Exp $ */
  2. /*
  3. * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <sys/param.h>
  18. #include <sys/types.h>
  19. #include <sys/systm.h>
  20. #include <sys/proc.h>
  21. #include <sys/atomic.h>
  22. #include <sys/srp.h>
  23. void srp_v_gc_start(struct srp_gc *, struct srp *, void *);
  24. void
  25. srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
  26. {
  27. srp_gc->srp_gc_dtor = dtor;
  28. srp_gc->srp_gc_cookie = cookie;
  29. srp_gc->srp_gc_refcount = 1;
  30. }
  31. void
  32. srp_init(struct srp *srp)
  33. {
  34. srp->ref = NULL;
  35. }
  36. void
  37. srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *nv)
  38. {
  39. void *ov;
  40. if (nv != NULL)
  41. atomic_inc_int(&srp_gc->srp_gc_refcount);
  42. /*
  43. * this doesn't have to be as careful as the caller has already
  44. * prevented concurrent updates, eg. by holding the kernel lock.
  45. * can't be mixed with non-locked updates though.
  46. */
  47. ov = srp->ref;
  48. srp->ref = nv;
  49. if (ov != NULL)
  50. srp_v_gc_start(srp_gc, srp, ov);
  51. }
  52. void *
  53. srp_get_locked(struct srp *srp)
  54. {
  55. return (srp->ref);
  56. }
  57. #ifdef MULTIPROCESSOR
  58. #include <machine/cpu.h>
  59. #include <sys/pool.h>
  60. struct srp_gc_ctx {
  61. struct srp_gc *srp_gc;
  62. struct timeout tick;
  63. struct srp_hazard hzrd;
  64. };
  65. int srp_v_referenced(struct srp *, void *);
  66. void srp_v_gc(void *);
  67. struct pool srp_gc_ctx_pool;
  68. void
  69. srp_startup(void)
  70. {
  71. pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0,
  72. PR_WAITOK, "srpgc", NULL);
  73. /* items are allocated in a process, but freed from a timeout */
  74. pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK);
  75. }
  76. int
  77. srp_v_referenced(struct srp *srp, void *v)
  78. {
  79. struct cpu_info *ci;
  80. CPU_INFO_ITERATOR cii;
  81. u_int i;
  82. struct srp_hazard *hzrd;
  83. CPU_INFO_FOREACH(cii, ci) {
  84. for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
  85. hzrd = &ci->ci_srp_hazards[i];
  86. if (hzrd->sh_p != srp)
  87. continue;
  88. membar_consumer();
  89. if (hzrd->sh_v != v)
  90. continue;
  91. return (1);
  92. }
  93. }
  94. return (0);
  95. }
  96. void
  97. srp_v_dtor(struct srp_gc *srp_gc, void *v)
  98. {
  99. (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
  100. if (atomic_dec_int_nv(&srp_gc->srp_gc_refcount) == 0)
  101. wakeup_one(&srp_gc->srp_gc_refcount);
  102. }
  103. void
  104. srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
  105. {
  106. struct srp_gc_ctx *ctx;
  107. if (!srp_v_referenced(srp, v)) {
  108. /* we win */
  109. srp_v_dtor(srp_gc, v);
  110. return;
  111. }
  112. /* in use, try later */
  113. ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
  114. ctx->srp_gc = srp_gc;
  115. ctx->hzrd.sh_p = srp;
  116. ctx->hzrd.sh_v = v;
  117. timeout_set(&ctx->tick, srp_v_gc, ctx);
  118. timeout_add(&ctx->tick, 1);
  119. }
  120. void
  121. srp_v_gc(void *x)
  122. {
  123. struct srp_gc_ctx *ctx = x;
  124. if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
  125. /* oh well, try again later */
  126. timeout_add(&ctx->tick, 1);
  127. return;
  128. }
  129. srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
  130. pool_put(&srp_gc_ctx_pool, ctx);
  131. }
  132. void
  133. srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
  134. {
  135. if (v != NULL)
  136. atomic_inc_int(&srp_gc->srp_gc_refcount);
  137. v = atomic_swap_ptr(&srp->ref, v);
  138. if (v != NULL)
  139. srp_v_gc_start(srp_gc, srp, v);
  140. }
  141. void
  142. srp_finalize(struct srp_gc *srp_gc)
  143. {
  144. struct sleep_state sls;
  145. u_int r;
  146. r = atomic_dec_int_nv(&srp_gc->srp_gc_refcount);
  147. while (r > 0) {
  148. sleep_setup(&sls, &srp_gc->srp_gc_refcount, PWAIT, "srpfini");
  149. r = srp_gc->srp_gc_refcount;
  150. sleep_finish(&sls, r);
  151. }
  152. }
  153. void *
  154. srp_enter(struct srp *srp)
  155. {
  156. struct cpu_info *ci = curcpu();
  157. struct srp_hazard *hzrd;
  158. void *v;
  159. u_int i;
  160. for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
  161. hzrd = &ci->ci_srp_hazards[i];
  162. if (hzrd->sh_p == NULL)
  163. break;
  164. }
  165. if (__predict_false(i == nitems(ci->ci_srp_hazards)))
  166. panic("%s: not enough srp hazard records", __func__);
  167. hzrd->sh_p = srp;
  168. membar_producer();
  169. /*
  170. * ensure we update this cpu's hazard pointer to a value that's still
  171. * current after the store finishes, otherwise the gc task may already
  172. * be destroying it
  173. */
  174. do {
  175. v = srp->ref;
  176. hzrd->sh_v = v;
  177. membar_consumer();
  178. } while (__predict_false(v != srp->ref));
  179. return (v);
  180. }
  181. void
  182. srp_leave(struct srp *srp, void *v)
  183. {
  184. struct cpu_info *ci = curcpu();
  185. struct srp_hazard *hzrd;
  186. u_int i;
  187. for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
  188. hzrd = &ci->ci_srp_hazards[i];
  189. if (hzrd->sh_p == srp) {
  190. hzrd->sh_p = NULL;
  191. hzrd->sh_v = NULL;
  192. return;
  193. }
  194. }
  195. panic("%s: unexpected ref %p via %p", __func__, v, srp);
  196. }
  197. #else /* MULTIPROCESSOR */
  198. void
  199. srp_startup(void)
  200. {
  201. }
  202. void
  203. srp_finalize(struct srp_gc *srp_gc)
  204. {
  205. KASSERT(srp_gc->srp_gc_refcount == 1);
  206. srp_gc->srp_gc_refcount--;
  207. }
  208. void
  209. srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
  210. {
  211. (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
  212. srp_gc->srp_gc_refcount--;
  213. }
  214. #endif /* MULTIPROCESSOR */