123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976 |
- /*
- * Copyright (c) 2014-2019 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * This implementation is based on the paper "RadixVM: Scalable address
- * spaces for multithreaded applications" by Austin T. Clements,
- * M. Frans Kaashoek, and Nickolai Zeldovich. Specifically, it implements
- * the Refcache component described in the paper, with a few differences
- * outlined below.
- *
- * Refcache flushes delta caches directly from an interrupt handler, and
- * disables interrupts and preemption on cache access. That behavior is
- * realtime-unfriendly because of the potentially large number of deltas
- * in a cache. This module uses dedicated manager threads to perform
- * cache flushes and queue reviews, and only disables preemption on
- * individual delta access.
- *
- * Locking protocol : cache -> counter -> global data
- */
- #include <assert.h>
- #include <errno.h>
- #include <stdbool.h>
- #include <stddef.h>
- #include <stdint.h>
- #include <stdio.h>
- #include <kern/atomic.h>
- #include <kern/clock.h>
- #include <kern/cpumap.h>
- #include <kern/init.h>
- #include <kern/list.h>
- #include <kern/log.h>
- #include <kern/macros.h>
- #include <kern/panic.h>
- #include <kern/percpu.h>
- #include <kern/slist.h>
- #include <kern/spinlock.h>
- #include <kern/sref.h>
- #include <kern/syscnt.h>
- #include <kern/thread.h>
- #include <machine/cpu.h>
- // Counter flags.
- #define SREF_CNTF_QUEUED 0x1 // Queued for review
- #define SREF_CNTF_DIRTY 0x2 // Dirty zero seen
- #define SREF_CNTF_UNREF 0x4 // Unreferenced, for debugging only
- // Per-cache delta table size.
- #define SREF_CACHE_DELTA_TABLE_SIZE 4096
- #if !ISP2 (SREF_CACHE_DELTA_TABLE_SIZE)
- #error "delta table size must be a power-of-two"
- #endif
- #ifdef __LP64__
- #define SREF_HASH_SHIFT 3
- #else
- #define SREF_HASH_SHIFT 2
- #endif
- // Negative close to 0 so that an overflow occurs early.
- #define SREF_EPOCH_ID_INIT_VALUE ((uint32_t)-500)
- // Weakref flags.
- #define SREF_WEAKREF_DYING ((uintptr_t)1)
- #define SREF_WEAKREF_MASK (~SREF_WEAKREF_DYING)
- /*
- * Since review queues are processor-local, at least two local epochs
- * must have passed before a zero is considered a true zero. As a result,
- * three queues are required, one for the current epoch, and two more.
- * The queues are stored in an array used as a ring buffer that moves
- * forward with each new local epoch. Indexing in this array is done
- * with a binary mask instead of a modulo, for performance reasons, and
- * consequently, the array size must be at least the nearest power-of-two
- * above three.
- */
- #define SREF_NR_QUEUES P2ROUND (3, 2)
- // Number of counters in review queue beyond which to issue a warning.
- #define SREF_NR_COUNTERS_WARN 10000
- /*
- * Global data.
- *
- * Processors regularly check the global epoch ID against their own,
- * locally cached epoch ID. If they're the same, a processor flushes
- * its cached deltas, acknowledges its flush by decrementing the number
- * of pending acknowledgment counter, and increments its local epoch ID,
- * preventing additional flushes during the same epoch.
- *
- * The last processor to acknowledge starts the next epoch.
- *
- * The epoch ID and the pending acknowledgments counter fill an entire
- * cache line each in order to avoid false sharing on SMP. Whenever
- * multiple processors may access them, they must use atomic operations
- * to avoid data races.
- *
- * Atomic operations on the pending acknowledgments counter are done
- * with acquire-release ordering to enforce the memory ordering
- * guarantees required by both the implementation and the interface.
- */
- struct sref_data
- {
- __cacheline_aligned uint32_t epoch_id;
- __cacheline_aligned uint32_t nr_pending_acks;
- uint64_t start_ts;
- struct syscnt sc_epochs;
- struct syscnt sc_dirty_zeroes;
- struct syscnt sc_true_zeroes;
- struct syscnt sc_revives;
- struct syscnt sc_last_epoch_ms;
- struct syscnt sc_longest_epoch_ms;
- };
- /*
- * Temporary difference to apply on a reference counter.
- *
- * Deltas are stored in per-processor caches and added to their global
- * counter when flushed. A delta is valid if and only if the counter it
- * points to isn't NULL.
- *
- * On cache flush, if a delta is valid, it must be flushed whatever its
- * value because a delta can be a dirty zero too. By flushing all valid
- * deltas, and clearing them all after a flush, activity on a counter is
- * reliably reported.
- */
- struct sref_delta
- {
- struct list node;
- struct sref_counter *counter;
- unsigned long value;
- };
- struct sref_queue
- {
- struct slist counters;
- unsigned long size;
- };
- /*
- * Per-processor cache of deltas.
- *
- * A cache is dirty if there is at least one delta that requires flushing.
- * It may only be flushed once per epoch.
- *
- * Delta caches are implemented with hash tables for quick ref count to
- * delta lookups. For now, a very simple replacement policy, similar to
- * that described in the RadixVM paper, is used. Improve with an LRU-like
- * algorithm if this turns out to be a problem.
- *
- * Periodic events (normally the system timer tick) trigger cache checks.
- * A cache check may wake up the manager thread if the cache needs management,
- * i.e. if it's dirty or if there are counters to review. Otherwise, the
- * flush acknowledgment is done directly to avoid the cost of a thread
- * wake-up.
- *
- * Interrupts and preemption must be disabled when accessing a delta cache.
- */
- struct sref_cache
- {
- struct sref_data *data;
- bool dirty;
- bool flushed;
- uint32_t epoch_id;
- struct sref_delta deltas[SREF_CACHE_DELTA_TABLE_SIZE];
- struct list valid_deltas;
- struct sref_queue queues[SREF_NR_QUEUES];
- struct thread *manager;
- struct syscnt sc_collisions;
- struct syscnt sc_flushes;
- };
- static struct sref_data sref_data;
- static struct sref_cache sref_cache __percpu;
- static uint32_t
- sref_data_get_epoch_id (const struct sref_data *data)
- {
- return (data->epoch_id);
- }
- static bool
- sref_data_check_epoch_id (const struct sref_data *data, uint32_t epoch_id)
- {
- if (likely (atomic_load_rlx (&data->epoch_id) != epoch_id))
- return (false);
- atomic_fence_acq ();
- return (true);
- }
- static void
- sref_data_start_epoch (struct sref_data *data)
- {
- uint64_t now = clock_get_time (),
- duration = clock_ticks_to_ms (now - data->start_ts);
- syscnt_set (&data->sc_last_epoch_ms, duration);
- if (duration > syscnt_read (&data->sc_longest_epoch_ms))
- syscnt_set (&data->sc_longest_epoch_ms, duration);
- assert (data->nr_pending_acks == 0);
- data->nr_pending_acks = cpu_count();
- data->start_ts = now;
- uint32_t epoch_id = atomic_load_rlx (&data->epoch_id);
- atomic_store_rel (&data->epoch_id, epoch_id + 1);
- }
- static void
- sref_data_ack_cpu (struct sref_data *data)
- {
- uint32_t prev = atomic_sub_acq_rel (&data->nr_pending_acks, 1);
- if (prev != 1)
- {
- assert (prev != 0);
- return;
- }
- syscnt_inc (&data->sc_epochs);
- sref_data_start_epoch (data);
- }
- static void
- sref_data_update_stats (struct sref_data *data, int64_t nr_dirty_zeroes,
- int64_t nr_true_zeroes, int64_t nr_revives)
- {
- syscnt_add (&data->sc_dirty_zeroes, nr_dirty_zeroes);
- syscnt_add (&data->sc_true_zeroes, nr_true_zeroes);
- syscnt_add (&data->sc_revives, nr_revives);
- }
- static bool
- sref_counter_aligned (const struct sref_counter *counter)
- {
- return (((uintptr_t)counter & ~SREF_WEAKREF_MASK) == 0);
- }
- static void
- sref_weakref_init (struct sref_weakref *weakref, struct sref_counter *counter)
- {
- assert (sref_counter_aligned (counter));
- weakref->addr = (uintptr_t)counter;
- }
- static void
- sref_weakref_mark_dying (struct sref_weakref *weakref)
- {
- atomic_or_rlx (&weakref->addr, SREF_WEAKREF_DYING);
- }
- static void
- sref_weakref_clear_dying (struct sref_weakref *weakref)
- {
- atomic_and_rlx (&weakref->addr, SREF_WEAKREF_MASK);
- }
- static int
- sref_weakref_kill (struct sref_weakref *weakref)
- {
- uintptr_t addr = atomic_load_rlx (&weakref->addr) | SREF_WEAKREF_DYING,
- oldval = atomic_cas_rlx (&weakref->addr, addr, 0);
- if (oldval == addr)
- return (0);
- assert ((oldval & SREF_WEAKREF_MASK) == (addr & SREF_WEAKREF_MASK));
- return (EBUSY);
- }
- static struct sref_counter*
- sref_weakref_tryget (struct sref_weakref *weakref)
- {
- uintptr_t addr, oldval, newval;
- do
- {
- addr = atomic_load_rlx (&weakref->addr);
- newval = addr & SREF_WEAKREF_MASK;
- oldval = atomic_cas_rlx (&weakref->addr, addr, newval);
- }
- while (oldval != addr);
- return ((struct sref_counter *)newval);
- }
- static uintptr_t
- sref_counter_hash (const struct sref_counter *counter)
- {
- uintptr_t va = (uintptr_t) counter;
- assert (P2ALIGNED (va, 1UL << SREF_HASH_SHIFT));
- return (va >> SREF_HASH_SHIFT);
- }
- static bool
- sref_counter_is_queued (const struct sref_counter *counter)
- {
- return ((counter->flags & SREF_CNTF_QUEUED) != 0);
- }
- static void
- sref_counter_mark_queued (struct sref_counter *counter)
- {
- counter->flags |= SREF_CNTF_QUEUED;
- }
- static void
- sref_counter_clear_queued (struct sref_counter *counter)
- {
- counter->flags &= ~SREF_CNTF_QUEUED;
- }
- static bool
- sref_counter_is_dirty (const struct sref_counter *counter)
- {
- return ((counter->flags & SREF_CNTF_DIRTY) != 0);
- }
- static void
- sref_counter_mark_dirty (struct sref_counter *counter)
- {
- counter->flags |= SREF_CNTF_DIRTY;
- }
- static void
- sref_counter_clear_dirty (struct sref_counter *counter)
- {
- counter->flags &= ~SREF_CNTF_DIRTY;
- }
- #ifdef SREF_VERIFY
- static bool
- sref_counter_is_unreferenced (const struct sref_counter *counter)
- {
- return ((counter->flags & SREF_CNTF_UNREF) != 0);
- }
- static void
- sref_counter_mark_unreferenced (struct sref_counter *counter)
- {
- counter->flags |= SREF_CNTF_UNREF;
- }
- #endif
- static void
- sref_counter_mark_dying (struct sref_counter *counter)
- {
- if (counter->weakref)
- sref_weakref_mark_dying (counter->weakref);
- }
- static void
- sref_counter_clear_dying (struct sref_counter *counter)
- {
- if (counter->weakref)
- sref_weakref_clear_dying (counter->weakref);
- }
- static int
- sref_counter_kill_weakref (struct sref_counter *counter)
- {
- return (counter->weakref ? sref_weakref_kill (counter->weakref) : 0);
- }
- static void __init
- sref_queue_init (struct sref_queue *queue)
- {
- slist_init (&queue->counters);
- queue->size = 0;
- }
- static bool
- sref_queue_empty (const struct sref_queue *queue)
- {
- return (queue->size == 0);
- }
- static void
- sref_queue_push (struct sref_queue *queue, struct sref_counter *counter)
- {
- slist_insert_tail (&queue->counters, &counter->node);
- ++queue->size;
- }
- static struct sref_counter*
- sref_queue_pop (struct sref_queue *queue)
- {
- struct sref_counter *counter = slist_first_entry (&queue->counters,
- typeof (*counter), node);
- slist_remove (&queue->counters, NULL);
- --queue->size;
- return (counter);
- }
- static void
- sref_queue_move (struct sref_queue *dest, const struct sref_queue *src)
- {
- slist_set_head (&dest->counters, &src->counters);
- dest->size = src->size;
- }
- static struct sref_queue*
- sref_cache_get_queue (struct sref_cache *cache, size_t index)
- {
- assert (index < ARRAY_SIZE (cache->queues));
- return (&cache->queues[index]);
- }
- static struct sref_queue*
- sref_cache_get_queue_by_epoch_id (struct sref_cache *cache, uint32_t epoch_id)
- {
- size_t mask = ARRAY_SIZE (cache->queues) - 1;
- return (sref_cache_get_queue (cache, epoch_id & mask));
- }
- static void
- sref_cache_schedule_review (struct sref_cache *cache,
- struct sref_counter *counter)
- {
- assert (!sref_counter_is_queued (counter));
- assert (!sref_counter_is_dirty (counter));
- sref_counter_mark_queued (counter);
- sref_counter_mark_dying (counter);
- _Auto queue = sref_cache_get_queue_by_epoch_id (cache, cache->epoch_id);
- sref_queue_push (queue, counter);
- }
- static void
- sref_counter_add (struct sref_counter *counter, unsigned long delta,
- struct sref_cache *cache)
- {
- assert (!cpu_intr_enabled ());
- SPINLOCK_GUARD (&counter->lock);
- counter->value += delta;
- if (counter->value)
- ;
- else if (sref_counter_is_queued (counter))
- sref_counter_mark_dirty (counter);
- else
- sref_cache_schedule_review (cache, counter);
- }
- static void
- sref_counter_noref (struct work *work)
- {
- _Auto counter = structof (work, struct sref_counter, work);
- counter->noref_fn (counter);
- }
- static void __init
- sref_delta_init (struct sref_delta *delta)
- {
- delta->counter = NULL;
- delta->value = 0;
- }
- static struct sref_counter*
- sref_delta_counter (struct sref_delta *delta)
- {
- return (delta->counter);
- }
- static void
- sref_delta_set_counter (struct sref_delta *delta, struct sref_counter *counter)
- {
- assert (!delta->value);
- delta->counter = counter;
- }
- static void
- sref_delta_clear (struct sref_delta *delta)
- {
- assert (!delta->value);
- delta->counter = NULL;
- }
- static void
- sref_delta_inc (struct sref_delta *delta)
- {
- ++delta->value;
- }
- static void
- sref_delta_dec (struct sref_delta *delta)
- {
- --delta->value;
- }
- static bool
- sref_delta_is_valid (const struct sref_delta *delta)
- {
- return (delta->counter != 0);
- }
- static void
- sref_delta_flush (struct sref_delta *delta, struct sref_cache *cache)
- {
- sref_counter_add (delta->counter, delta->value, cache);
- delta->value = 0;
- }
- static void
- sref_delta_evict (struct sref_delta *delta, struct sref_cache *cache)
- {
- sref_delta_flush (delta, cache);
- sref_delta_clear (delta);
- }
- static struct sref_cache*
- sref_get_local_cache (void)
- {
- return (cpu_local_ptr (sref_cache));
- }
- static uintptr_t
- sref_cache_compute_counter_index (const struct sref_cache *cache,
- const struct sref_counter *counter)
- {
- return (sref_counter_hash (counter) & (ARRAY_SIZE (cache->deltas) - 1));
- }
- static struct sref_delta*
- sref_cache_get_delta (struct sref_cache *cache, size_t index)
- {
- assert (index < ARRAY_SIZE (cache->deltas));
- return (&cache->deltas[index]);
- }
- static struct sref_cache*
- sref_cache_acquire (cpu_flags_t *flags)
- {
- thread_preempt_disable_intr_save (flags);
- return (sref_get_local_cache ());
- }
- static void
- sref_cache_release (cpu_flags_t flags)
- {
- thread_preempt_enable_intr_restore (flags);
- }
- static bool
- sref_cache_is_dirty (const struct sref_cache *cache)
- {
- return (cache->dirty);
- }
- static void
- sref_cache_set_dirty (struct sref_cache *cache)
- {
- cache->dirty = true;
- }
- static void
- sref_cache_clear_dirty (struct sref_cache *cache)
- {
- cache->dirty = false;
- }
- static bool
- sref_cache_is_flushed (const struct sref_cache *cache)
- {
- return (cache->flushed);
- }
- static void
- sref_cache_set_flushed (struct sref_cache *cache)
- {
- cache->flushed = true;
- }
- static void
- sref_cache_clear_flushed (struct sref_cache *cache)
- {
- cache->flushed = false;
- }
- static void
- sref_cache_add_delta (struct sref_cache *cache, struct sref_delta *delta,
- struct sref_counter *counter)
- {
- assert (!sref_delta_is_valid (delta));
- assert (counter);
- sref_delta_set_counter (delta, counter);
- list_insert_tail (&cache->valid_deltas, &delta->node);
- }
- static void
- sref_cache_remove_delta (struct sref_cache *cache, struct sref_delta *delta)
- {
- assert (sref_delta_is_valid (delta));
- sref_delta_evict (delta, cache);
- list_remove (&delta->node);
- }
- static struct sref_delta*
- sref_cache_take_delta (struct sref_cache *cache, struct sref_counter *counter)
- {
- size_t index = sref_cache_compute_counter_index (cache, counter);
- _Auto delta = sref_cache_get_delta (cache, index);
- if (!sref_delta_is_valid (delta))
- sref_cache_add_delta (cache, delta, counter);
- else if (sref_delta_counter (delta) != counter)
- {
- sref_cache_remove_delta (cache, delta);
- sref_cache_add_delta (cache, delta, counter);
- syscnt_inc (&cache->sc_collisions);
- }
- return (delta);
- }
- static bool
- sref_cache_needs_management (struct sref_cache *cache)
- {
- assert (!cpu_intr_enabled ());
- assert (!thread_preempt_enabled ());
- const _Auto queue = sref_cache_get_queue_by_epoch_id (cache,
- cache->epoch_id - 2);
- return (sref_cache_is_dirty (cache) || !sref_queue_empty (queue));
- }
- static void
- sref_cache_end_epoch (struct sref_cache *cache)
- {
- assert (!sref_cache_needs_management (cache));
- sref_data_ack_cpu (cache->data);
- ++cache->epoch_id;
- }
- static void
- sref_cache_flush (struct sref_cache *cache, struct sref_queue *queue)
- {
- cpu_flags_t flags;
- while (1)
- {
- thread_preempt_disable_intr_save (&flags);
- if (list_empty (&cache->valid_deltas))
- break;
- struct sref_delta *delta = list_first_entry (&cache->valid_deltas,
- typeof (*delta), node);
- sref_cache_remove_delta (cache, delta);
- thread_preempt_enable_intr_restore (flags);
- }
- sref_cache_clear_dirty (cache);
- sref_cache_set_flushed (cache);
- _Auto prev_queue = sref_cache_get_queue_by_epoch_id (cache,
- cache->epoch_id - 2);
- sref_queue_move (queue, prev_queue);
- sref_queue_init (prev_queue);
- sref_cache_end_epoch (cache);
- thread_preempt_enable_intr_restore (flags);
- syscnt_inc (&cache->sc_flushes);
- }
- static void
- sref_queue_review (struct sref_queue *queue, struct sref_cache *cache)
- {
- int64_t nr_dirty_zeroes = 0, nr_true_zeroes = 0, nr_revives = 0;
- struct work_queue works;
- work_queue_init (&works);
- while (!sref_queue_empty (queue))
- {
- _Auto counter = sref_queue_pop (queue);
- cpu_flags_t flags;
- spinlock_lock_intr_save (&counter->lock, &flags);
- #ifdef SREF_VERIFY
- assert (!sref_counter_is_unreferenced (counter));
- #endif
- assert (sref_counter_is_queued (counter));
- sref_counter_clear_queued (counter);
- bool requeue;
- if (counter->value)
- {
- sref_counter_clear_dirty (counter);
- sref_counter_clear_dying (counter);
- spinlock_unlock_intr_restore (&counter->lock, flags);
- continue;
- }
- else if (sref_counter_is_dirty (counter))
- {
- requeue = true;
- ++nr_dirty_zeroes;
- sref_counter_clear_dirty (counter);
- }
- else
- {
- if (sref_counter_kill_weakref (counter) == 0)
- requeue = false;
- else
- {
- requeue = true;
- ++nr_revives;
- }
- }
- if (requeue)
- {
- sref_cache_schedule_review (cache, counter);
- spinlock_unlock_intr_restore (&counter->lock, flags);
- }
- else
- {
- /*
- * Keep in mind that the work structure shares memory with
- * the counter data.
- */
- #ifdef SREF_VERIFY
- sref_counter_mark_unreferenced (counter);
- #endif
- /*
- * Unlocking isn't needed here, since this counter is now
- * really at 0, but do it for consistency.
- */
- spinlock_unlock_intr_restore (&counter->lock, flags);
- ++nr_true_zeroes;
- work_init (&counter->work, sref_counter_noref);
- work_queue_push (&works, &counter->work);
- }
- }
- if (work_queue_nr_works (&works) != 0)
- work_queue_schedule (&works, WORK_HIGHPRIO);
- sref_data_update_stats (cache->data, nr_dirty_zeroes,
- nr_true_zeroes, nr_revives);
- }
- static void
- sref_cache_manage (void *arg)
- {
- struct sref_cache *cache = arg;
- cpu_flags_t flags;
- thread_preempt_disable_intr_save (&flags);
- while (1)
- {
- while (sref_cache_is_flushed (cache))
- thread_sleep (NULL, cache, "sref");
- thread_preempt_enable_intr_restore (flags);
- struct sref_queue queue;
- sref_cache_flush (cache, &queue);
- sref_queue_review (&queue, cache);
- thread_preempt_disable_intr_save (&flags);
- }
- }
- static void
- sref_cache_check (struct sref_cache *cache)
- {
- if (!sref_data_check_epoch_id (&sref_data, cache->epoch_id))
- return;
- else if (!sref_cache_needs_management (cache))
- {
- sref_cache_end_epoch (cache);
- return;
- }
- sref_cache_clear_flushed (cache);
- thread_wakeup (cache->manager);
- }
- static void __init
- sref_cache_init (struct sref_cache *cache, unsigned int cpu,
- struct sref_data *data)
- {
- cache->data = data;
- cache->dirty = false;
- cache->flushed = true;
- cache->epoch_id = sref_data_get_epoch_id (&sref_data) + 1;
- for (size_t i = 0; i < ARRAY_SIZE (cache->deltas); i++)
- sref_delta_init (sref_cache_get_delta (cache, i));
- list_init (&cache->valid_deltas);
- for (size_t i = 0; i < ARRAY_SIZE (cache->queues); i++)
- sref_queue_init (sref_cache_get_queue (cache, i));
- char name[SYSCNT_NAME_SIZE];
- snprintf (name, sizeof (name), "sref_collisions/%u", cpu);
- syscnt_register (&cache->sc_collisions, name);
- snprintf (name, sizeof (name), "sref_flushes/%u", cpu);
- syscnt_register (&cache->sc_flushes, name);
- cache->manager = NULL;
- }
- static void __init
- sref_cache_init_manager (struct sref_cache *cache, uint32_t cpu)
- {
- struct cpumap *cpumap;
- int error = cpumap_create (&cpumap);
- if (error)
- panic ("sref: unable to create manager thread CPU map");
- cpumap_zero (cpumap);
- cpumap_set (cpumap, cpu);
- char name[THREAD_NAME_SIZE];
- snprintf (name, sizeof (name),
- THREAD_KERNEL_PREFIX "sref_cache_manage/%u", cpu);
- struct thread_attr attr;
- thread_attr_init (&attr, name);
- thread_attr_set_cpumap (&attr, cpumap);
- thread_attr_set_priority (&attr, THREAD_SCHED_FS_PRIO_MAX);
- struct thread *manager;
- error = thread_create (&manager, &attr, sref_cache_manage, cache);
- cpumap_destroy (cpumap);
- if (error)
- panic ("sref: unable to create manager thread");
- cache->manager = manager;
- }
- static void __init
- sref_data_init (struct sref_data *data)
- {
- data->epoch_id = SREF_EPOCH_ID_INIT_VALUE;
- data->nr_pending_acks = 0;
- data->start_ts = clock_get_time ();
- syscnt_register (&data->sc_epochs, "sref_epochs");
- syscnt_register (&data->sc_dirty_zeroes, "sref_dirty_zeroes");
- syscnt_register (&data->sc_true_zeroes, "sref_true_zeroes");
- syscnt_register (&data->sc_revives, "sref_revives");
- syscnt_register (&data->sc_last_epoch_ms, "sref_last_epoch_ms");
- syscnt_register (&data->sc_longest_epoch_ms, "sref_longest_epoch_ms");
- }
- static int __init
- sref_bootstrap (void)
- {
- sref_data_init (&sref_data);
- sref_cache_init (sref_get_local_cache (), 0, &sref_data);
- return (0);
- }
- INIT_OP_DEFINE (sref_bootstrap,
- INIT_OP_DEP (cpu_setup, true),
- INIT_OP_DEP (spinlock_setup, true),
- INIT_OP_DEP (syscnt_setup, true),
- INIT_OP_DEP (thread_bootstrap, true));
- static int __init
- sref_setup (void)
- {
- for (uint32_t i = 1; i < cpu_count (); i++)
- sref_cache_init (percpu_ptr (sref_cache, i), i, &sref_data);
- for (uint32_t i = 0; i < cpu_count (); i++)
- sref_cache_init_manager (percpu_ptr (sref_cache, i), i);
- sref_data_start_epoch (&sref_data);
- return (0);
- }
- INIT_OP_DEFINE (sref_setup,
- INIT_OP_DEP (cpu_mp_probe, true),
- INIT_OP_DEP (cpumap_setup, true),
- INIT_OP_DEP (log_setup, true),
- INIT_OP_DEP (sref_bootstrap, true),
- INIT_OP_DEP (thread_setup, true));
- void
- sref_report_periodic_event (void)
- {
- sref_cache_check (sref_get_local_cache ());
- }
- void
- sref_counter_init (struct sref_counter *counter,
- unsigned long init_value,
- struct sref_weakref *weakref,
- sref_noref_fn_t noref_fn)
- {
- assert (init_value);
- counter->noref_fn = noref_fn;
- spinlock_init (&counter->lock);
- counter->flags = 0;
- counter->value = init_value;
- counter->weakref = weakref;
- if (weakref)
- sref_weakref_init (weakref, counter);
- }
- static void
- sref_counter_inc_common (struct sref_counter *counter, struct sref_cache *cache)
- {
- sref_cache_set_dirty (cache);
- sref_delta_inc (sref_cache_take_delta (cache, counter));
- }
- void
- sref_counter_inc (struct sref_counter *counter)
- {
- cpu_flags_t flags;
- _Auto cache = sref_cache_acquire (&flags);
- sref_counter_inc_common (counter, cache);
- sref_cache_release (flags);
- }
- void
- sref_counter_dec (struct sref_counter *counter)
- {
- cpu_flags_t flags;
- _Auto cache = sref_cache_acquire (&flags);
- sref_cache_set_dirty (cache);
- sref_delta_dec (sref_cache_take_delta (cache, counter));
- sref_cache_release (flags);
- }
- struct sref_counter*
- sref_weakref_get (struct sref_weakref *weakref)
- {
- cpu_flags_t flags;
- _Auto cache = sref_cache_acquire (&flags);
- _Auto counter = sref_weakref_tryget (weakref);
- if (counter)
- sref_counter_inc_common (counter, cache);
- sref_cache_release (flags);
- return (counter);
- }
|