12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313 |
- #define pr_fmt(fmt) "rcu: " fmt
- #include <linux/export.h>
- #include <linux/mutex.h>
- #include <linux/percpu.h>
- #include <linux/preempt.h>
- #include <linux/rcupdate_wait.h>
- #include <linux/sched.h>
- #include <linux/smp.h>
- #include <linux/delay.h>
- #include <linux/module.h>
- #include <linux/srcu.h>
- #include "rcu.h"
- #include "rcu_segcblist.h"
- #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
- static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
- module_param(exp_holdoff, ulong, 0444);
- static ulong counter_wrap_check = (ULONG_MAX >> 2);
- module_param(counter_wrap_check, ulong, 0444);
- static void srcu_invoke_callbacks(struct work_struct *work);
- static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
- static void process_srcu(struct work_struct *work);
- #define spin_lock_rcu_node(p) \
- do { \
- spin_lock(&ACCESS_PRIVATE(p, lock)); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
- #define spin_lock_irq_rcu_node(p) \
- do { \
- spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_unlock_irq_rcu_node(p) \
- spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
- #define spin_lock_irqsave_rcu_node(p, flags) \
- do { \
- spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_unlock_irqrestore_rcu_node(p, flags) \
- spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
- static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
- {
- int cpu;
- int i;
- int level = 0;
- int levelspread[RCU_NUM_LVLS];
- struct srcu_data *sdp;
- struct srcu_node *snp;
- struct srcu_node *snp_first;
-
- sp->level[0] = &sp->node[0];
- for (i = 1; i < rcu_num_lvls; i++)
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
- rcu_init_levelspread(levelspread, num_rcu_lvl);
-
- rcu_for_each_node_breadth_first(sp, snp) {
- spin_lock_init(&ACCESS_PRIVATE(snp, lock));
- WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
- ARRAY_SIZE(snp->srcu_data_have_cbs));
- for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
- snp->srcu_have_cbs[i] = 0;
- snp->srcu_data_have_cbs[i] = 0;
- }
- snp->srcu_gp_seq_needed_exp = 0;
- snp->grplo = -1;
- snp->grphi = -1;
- if (snp == &sp->node[0]) {
-
- snp->srcu_parent = NULL;
- continue;
- }
-
- if (snp == sp->level[level + 1])
- level++;
- snp->srcu_parent = sp->level[level - 1] +
- (snp - sp->level[level]) /
- levelspread[level - 1];
- }
-
- WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
- ARRAY_SIZE(sdp->srcu_unlock_count));
- level = rcu_num_lvls - 1;
- snp_first = sp->level[level];
- for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
- rcu_segcblist_init(&sdp->srcu_cblist);
- sdp->srcu_cblist_invoking = false;
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
- sdp->mynode = &snp_first[cpu / levelspread[level]];
- for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
- if (snp->grplo < 0)
- snp->grplo = cpu;
- snp->grphi = cpu;
- }
- sdp->cpu = cpu;
- INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
- sdp->sp = sp;
- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
- if (is_static)
- continue;
-
- for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
- sdp->srcu_lock_count[i] = 0;
- sdp->srcu_unlock_count[i] = 0;
- }
- }
- }
- static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
- {
- mutex_init(&sp->srcu_cb_mutex);
- mutex_init(&sp->srcu_gp_mutex);
- sp->srcu_idx = 0;
- sp->srcu_gp_seq = 0;
- sp->srcu_barrier_seq = 0;
- mutex_init(&sp->srcu_barrier_mutex);
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
- INIT_DELAYED_WORK(&sp->work, process_srcu);
- if (!is_static)
- sp->sda = alloc_percpu(struct srcu_data);
- init_srcu_struct_nodes(sp, is_static);
- sp->srcu_gp_seq_needed_exp = 0;
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- smp_store_release(&sp->srcu_gp_seq_needed, 0);
- return sp->sda ? 0 : -ENOMEM;
- }
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- int __init_srcu_struct(struct srcu_struct *sp, const char *name,
- struct lock_class_key *key)
- {
-
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
- }
- EXPORT_SYMBOL_GPL(__init_srcu_struct);
- #else
- int init_srcu_struct(struct srcu_struct *sp)
- {
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
- }
- EXPORT_SYMBOL_GPL(init_srcu_struct);
- #endif
- static void check_init_srcu_struct(struct srcu_struct *sp)
- {
- unsigned long flags;
- WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
-
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed)))
- return;
- spin_lock_irqsave_rcu_node(sp, flags);
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore_rcu_node(sp, flags);
- return;
- }
- init_srcu_struct_fields(sp, true);
- spin_unlock_irqrestore_rcu_node(sp, flags);
- }
- static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
- }
- return sum;
- }
- static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
- }
- return sum;
- }
- static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
- {
- unsigned long unlocks;
- unlocks = srcu_readers_unlock_idx(sp, idx);
-
- smp_mb();
-
- return srcu_readers_lock_idx(sp, idx) == unlocks;
- }
- static bool srcu_readers_active(struct srcu_struct *sp)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_lock_count[0]);
- sum += READ_ONCE(cpuc->srcu_lock_count[1]);
- sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
- sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
- }
- return sum;
- }
- #define SRCU_INTERVAL 1
- static unsigned long srcu_get_delay(struct srcu_struct *sp)
- {
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
- return 0;
- return SRCU_INTERVAL;
- }
- void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
- {
- int cpu;
- if (WARN_ON(!srcu_get_delay(sp)))
- return;
- if (WARN_ON(srcu_readers_active(sp)))
- return;
- if (quiesced) {
- if (WARN_ON(delayed_work_pending(&sp->work)))
- return;
- } else {
- flush_delayed_work(&sp->work);
- }
- for_each_possible_cpu(cpu)
- if (quiesced) {
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
- return;
- } else {
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
- }
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(srcu_readers_active(sp))) {
- pr_info("%s: Active srcu_struct %p state: %d\n",
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
- return;
- }
- free_percpu(sp->sda);
- sp->sda = NULL;
- }
- EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
- int __srcu_read_lock(struct srcu_struct *sp)
- {
- int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
- smp_mb();
- return idx;
- }
- EXPORT_SYMBOL_GPL(__srcu_read_lock);
- void __srcu_read_unlock(struct srcu_struct *sp, int idx)
- {
- smp_mb();
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
- }
- EXPORT_SYMBOL_GPL(__srcu_read_unlock);
- #define SRCU_RETRY_CHECK_DELAY 5
- static void srcu_gp_start(struct srcu_struct *sp)
- {
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
- int state;
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
- spin_lock_rcu_node(sdp);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
- spin_unlock_rcu_node(sdp);
- smp_mb();
- rcu_seq_start(&sp->srcu_gp_seq);
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
- WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
- }
- DEFINE_PER_CPU(bool, srcu_online);
- void srcu_online_cpu(unsigned int cpu)
- {
- WRITE_ONCE(per_cpu(srcu_online, cpu), true);
- }
- void srcu_offline_cpu(unsigned int cpu)
- {
- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
- }
- static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork,
- unsigned long delay)
- {
- bool ret;
- preempt_disable();
- if (READ_ONCE(per_cpu(srcu_online, cpu)))
- ret = queue_delayed_work_on(cpu, wq, dwork, delay);
- else
- ret = queue_delayed_work(wq, dwork, delay);
- preempt_enable();
- return ret;
- }
- static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
- {
- srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
- }
- static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
- unsigned long mask, unsigned long delay)
- {
- int cpu;
- for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- if (!(mask & (1 << (cpu - snp->grplo))))
- continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
- }
- }
- static void srcu_gp_end(struct srcu_struct *sp)
- {
- unsigned long cbdelay;
- bool cbs;
- bool last_lvl;
- int cpu;
- unsigned long flags;
- unsigned long gpseq;
- int idx;
- unsigned long mask;
- struct srcu_data *sdp;
- struct srcu_node *snp;
-
- mutex_lock(&sp->srcu_cb_mutex);
-
- spin_lock_irq_rcu_node(sp);
- idx = rcu_seq_state(sp->srcu_gp_seq);
- WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- rcu_seq_end(&sp->srcu_gp_seq);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
- sp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
-
-
- idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- rcu_for_each_node_breadth_first(sp, snp) {
- spin_lock_irq_rcu_node(snp);
- cbs = false;
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
- if (last_lvl)
- cbs = snp->srcu_have_cbs[idx] == gpseq;
- snp->srcu_have_cbs[idx] = gpseq;
- rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
- if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
- snp->srcu_gp_seq_needed_exp = gpseq;
- mask = snp->srcu_data_have_cbs[idx];
- snp->srcu_data_have_cbs[idx] = 0;
- spin_unlock_irq_rcu_node(snp);
- if (cbs)
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
-
- if (!(gpseq & counter_wrap_check) && last_lvl)
- for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_irqsave_rcu_node(sdp, flags);
- if (ULONG_CMP_GE(gpseq,
- sdp->srcu_gp_seq_needed + 100))
- sdp->srcu_gp_seq_needed = gpseq;
- if (ULONG_CMP_GE(gpseq,
- sdp->srcu_gp_seq_needed_exp + 100))
- sdp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- }
- }
-
- mutex_unlock(&sp->srcu_cb_mutex);
-
- spin_lock_irq_rcu_node(sp);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
- if (!rcu_seq_state(gpseq) &&
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
- srcu_reschedule(sp, 0);
- } else {
- spin_unlock_irq_rcu_node(sp);
- }
- }
- static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
- unsigned long s)
- {
- unsigned long flags;
- for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
- ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
- return;
- spin_lock_irqsave_rcu_node(snp, flags);
- if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
- spin_unlock_irqrestore_rcu_node(snp, flags);
- return;
- }
- WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- spin_unlock_irqrestore_rcu_node(snp, flags);
- }
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore_rcu_node(sp, flags);
- }
- static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
- unsigned long s, bool do_norm)
- {
- unsigned long flags;
- int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
- struct srcu_node *snp = sdp->mynode;
- unsigned long snp_seq;
-
- for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
- return;
- spin_lock_irqsave_rcu_node(snp, flags);
- if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
- snp_seq = snp->srcu_have_cbs[idx];
- if (snp == sdp->mynode && snp_seq == s)
- snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- spin_unlock_irqrestore_rcu_node(snp, flags);
- if (snp == sdp->mynode && snp_seq != s) {
- srcu_schedule_cbs_sdp(sdp, do_norm
- ? SRCU_INTERVAL
- : 0);
- return;
- }
- if (!do_norm)
- srcu_funnel_exp_start(sp, snp, s);
- return;
- }
- snp->srcu_have_cbs[idx] = s;
- if (snp == sdp->mynode)
- snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
- snp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore_rcu_node(snp, flags);
- }
-
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
-
- smp_store_release(&sp->srcu_gp_seq_needed, s);
- }
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
-
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
- srcu_gp_start(sp);
- queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
- }
- spin_unlock_irqrestore_rcu_node(sp, flags);
- }
- static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
- {
- for (;;) {
- if (srcu_readers_active_idx_check(sp, idx))
- return true;
- if (--trycount + !srcu_get_delay(sp) <= 0)
- return false;
- udelay(SRCU_RETRY_CHECK_DELAY);
- }
- }
- static void srcu_flip(struct srcu_struct *sp)
- {
-
- smp_mb();
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
-
- smp_mb();
- }
- static bool srcu_might_be_idle(struct srcu_struct *sp)
- {
- unsigned long curseq;
- unsigned long flags;
- struct srcu_data *sdp;
- unsigned long t;
-
- local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
- if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
- local_irq_restore(flags);
- return false;
- }
- local_irq_restore(flags);
-
-
- t = ktime_get_mono_fast_ns();
- if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
- return false;
-
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
- smp_mb();
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
- return false;
- smp_mb();
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
- return false;
- return true;
- }
- static void srcu_leak_callback(struct rcu_head *rhp)
- {
- }
- void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
- rcu_callback_t func, bool do_norm)
- {
- unsigned long flags;
- bool needexp = false;
- bool needgp = false;
- unsigned long s;
- struct srcu_data *sdp;
- check_init_srcu_struct(sp);
- if (debug_rcu_head_queue(rhp)) {
-
- WRITE_ONCE(rhp->func, srcu_leak_callback);
- WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
- return;
- }
- rhp->func = func;
- local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
- spin_lock_rcu_node(sdp);
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- s = rcu_seq_snap(&sp->srcu_gp_seq);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
- sdp->srcu_gp_seq_needed = s;
- needgp = true;
- }
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
- sdp->srcu_gp_seq_needed_exp = s;
- needexp = true;
- }
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- if (needgp)
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
- else if (needexp)
- srcu_funnel_exp_start(sp, sdp->mynode, s);
- }
- void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
- rcu_callback_t func)
- {
- __call_srcu(sp, rhp, func, true);
- }
- EXPORT_SYMBOL_GPL(call_srcu);
- static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
- {
- struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
- lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
- return;
- might_sleep();
- check_init_srcu_struct(sp);
- init_completion(&rcu.completion);
- init_rcu_head_on_stack(&rcu.head);
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-
- smp_mb();
- }
- void synchronize_srcu_expedited(struct srcu_struct *sp)
- {
- __synchronize_srcu(sp, rcu_gp_is_normal());
- }
- EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
- void synchronize_srcu(struct srcu_struct *sp)
- {
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
- synchronize_srcu_expedited(sp);
- else
- __synchronize_srcu(sp, true);
- }
- EXPORT_SYMBOL_GPL(synchronize_srcu);
- static void srcu_barrier_cb(struct rcu_head *rhp)
- {
- struct srcu_data *sdp;
- struct srcu_struct *sp;
- sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
- sp = sdp->sp;
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
- }
- void srcu_barrier(struct srcu_struct *sp)
- {
- int cpu;
- struct srcu_data *sdp;
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
- check_init_srcu_struct(sp);
- mutex_lock(&sp->srcu_barrier_mutex);
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
- smp_mb();
- mutex_unlock(&sp->srcu_barrier_mutex);
- return;
- }
- rcu_seq_start(&sp->srcu_barrier_seq);
- init_completion(&sp->srcu_barrier_completion);
-
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
-
- for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_irq_rcu_node(sdp);
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
- sdp->srcu_barrier_head.func = srcu_barrier_cb;
- debug_rcu_head_queue(&sdp->srcu_barrier_head);
- if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
- &sdp->srcu_barrier_head, 0)) {
- debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
- }
- spin_unlock_irq_rcu_node(sdp);
- }
-
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
- wait_for_completion(&sp->srcu_barrier_completion);
- rcu_seq_end(&sp->srcu_barrier_seq);
- mutex_unlock(&sp->srcu_barrier_mutex);
- }
- EXPORT_SYMBOL_GPL(srcu_barrier);
- unsigned long srcu_batches_completed(struct srcu_struct *sp)
- {
- return sp->srcu_idx;
- }
- EXPORT_SYMBOL_GPL(srcu_batches_completed);
- static void srcu_advance_state(struct srcu_struct *sp)
- {
- int idx;
- mutex_lock(&sp->srcu_gp_mutex);
-
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq));
- if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
- return;
- }
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
- if (idx == SRCU_STATE_IDLE)
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
- if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&sp->srcu_gp_mutex);
- return;
- }
- }
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 1)) {
- mutex_unlock(&sp->srcu_gp_mutex);
- return;
- }
- srcu_flip(sp);
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
- }
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
-
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 2)) {
- mutex_unlock(&sp->srcu_gp_mutex);
- return;
- }
- srcu_gp_end(sp);
- }
- }
- static void srcu_invoke_callbacks(struct work_struct *work)
- {
- bool more;
- struct rcu_cblist ready_cbs;
- struct rcu_head *rhp;
- struct srcu_data *sdp;
- struct srcu_struct *sp;
- sdp = container_of(work, struct srcu_data, work.work);
- sp = sdp->sp;
- rcu_cblist_init(&ready_cbs);
- spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- if (sdp->srcu_cblist_invoking ||
- !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
- spin_unlock_irq_rcu_node(sdp);
- return;
- }
-
- sdp->srcu_cblist_invoking = true;
- rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
- spin_unlock_irq_rcu_node(sdp);
- rhp = rcu_cblist_dequeue(&ready_cbs);
- for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
- debug_rcu_head_unqueue(rhp);
- local_bh_disable();
- rhp->func(rhp);
- local_bh_enable();
- }
-
- spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
- sdp->srcu_cblist_invoking = false;
- more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- spin_unlock_irq_rcu_node(sdp);
- if (more)
- srcu_schedule_cbs_sdp(sdp, 0);
- }
- static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
- {
- bool pushgp = true;
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
-
- pushgp = false;
- }
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
-
- srcu_gp_start(sp);
- }
- spin_unlock_irq_rcu_node(sp);
- if (pushgp)
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
- }
- static void process_srcu(struct work_struct *work)
- {
- struct srcu_struct *sp;
- sp = container_of(work, struct srcu_struct, work.work);
- srcu_advance_state(sp);
- srcu_reschedule(sp, srcu_get_delay(sp));
- }
- void srcutorture_get_gp_data(enum rcutorture_type test_type,
- struct srcu_struct *sp, int *flags,
- unsigned long *gp_seq)
- {
- if (test_type != SRCU_FLAVOR)
- return;
- *flags = 0;
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
- }
- EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
- void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
- {
- int cpu;
- int idx;
- unsigned long s0 = 0, s1 = 0;
- idx = sp->srcu_idx & 0x1;
- pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
- for_each_possible_cpu(cpu) {
- unsigned long l0, l1;
- unsigned long u0, u1;
- long c0, c1;
- struct srcu_data *sdp;
- sdp = per_cpu_ptr(sp->sda, cpu);
- u0 = sdp->srcu_unlock_count[!idx];
- u1 = sdp->srcu_unlock_count[idx];
-
- smp_rmb();
- l0 = sdp->srcu_lock_count[!idx];
- l1 = sdp->srcu_lock_count[idx];
- c0 = l0 - u0;
- c1 = l1 - u1;
- pr_cont(" %d(%ld,%ld %1p)",
- cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
- s0 += c0;
- s1 += c1;
- }
- pr_cont(" T(%ld,%ld)\n", s0, s1);
- }
- EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
- static int __init srcu_bootup_announce(void)
- {
- pr_info("Hierarchical SRCU implementation.\n");
- if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
- pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
- return 0;
- }
- early_initcall(srcu_bootup_announce);
|