diff options
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 123 | 
1 files changed, 80 insertions, 43 deletions
| diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9d0e2ac9356e..1c4add096078 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -483,7 +483,6 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next  module_param(rcu_kick_kthreads, bool, 0644);  static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); -static void force_quiescent_state(void);  static int rcu_pending(void);  /* @@ -508,15 +507,6 @@ unsigned long rcu_exp_batches_completed(void)  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);  /* - * Force a quiescent state. - */ -void rcu_force_quiescent_state(void) -{ -	force_quiescent_state(); -} -EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); - -/*   * Return the root node of the rcu_state structure.   */  static struct rcu_node *rcu_get_root(void) @@ -610,8 +600,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,  {  	switch (test_type) {  	case RCU_FLAVOR: -	case RCU_BH_FLAVOR: -	case RCU_SCHED_FLAVOR:  		*flags = READ_ONCE(rcu_state.gp_flags);  		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);  		break; @@ -1346,7 +1334,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)  	panic_on_rcu_stall(); -	force_quiescent_state();  /* Kick them all. */ +	rcu_force_quiescent_state();  /* Kick them all. */  }  static void print_cpu_stall(void) @@ -2625,7 +2613,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))   * Force quiescent states on reluctant CPUs, and also detect which   * CPUs are in dyntick-idle mode.   */ -static void force_quiescent_state(void) +void rcu_force_quiescent_state(void)  {  	unsigned long flags;  	bool ret; @@ -2657,6 +2645,7 @@ static void force_quiescent_state(void)  	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);  	rcu_gp_kthread_wake();  } +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);  /*   * This function checks for grace-period requests that fail to motivate @@ -2843,9 +2832,9 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,  	/*  	 * Force the grace period if too many callbacks or too long waiting. -	 * Enforce hysteresis, and don't invoke force_quiescent_state() +	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()  	 * if some other CPU has recently done so.  Also, don't bother -	 * invoking force_quiescent_state() if the newly enqueued callback +	 * invoking rcu_force_quiescent_state() if the newly enqueued callback  	 * is the only one waiting for a grace period to complete.  	 */  	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > @@ -2862,7 +2851,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,  			rdp->blimit = LONG_MAX;  			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&  			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head) -				force_quiescent_state(); +				rcu_force_quiescent_state();  			rdp->n_force_qs_snap = rcu_state.n_force_qs;  			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);  		} @@ -2931,9 +2920,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)  			rcu_segcblist_init(&rdp->cblist);  	}  	rcu_segcblist_enqueue(&rdp->cblist, head, lazy); -	if (!lazy) -		rcu_idle_count_callbacks_posted(); -  	if (__is_kfree_rcu_offset((unsigned long)func))  		trace_rcu_kfree_callback(rcu_state.name, head,  					 (unsigned long)func, @@ -3003,6 +2989,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)  }  EXPORT_SYMBOL_GPL(kfree_call_rcu); +/* + * During early boot, any blocking grace-period wait automatically + * implies a grace period.  Later on, this is never the case for PREEMPT. + * + * Howevr, because a context switch is a grace period for !PREEMPT, any + * blocking grace-period wait automatically implies a grace period if + * there is only one CPU online at any point time during execution of + * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to + * occasionally incorrectly indicate that there are multiple CPUs online + * when there was in fact only one the whole time, as this just adds some + * overhead: RCU still operates correctly. + */ +static int rcu_blocking_is_gp(void) +{ +	int ret; + +	if (IS_ENABLED(CONFIG_PREEMPT)) +		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; +	might_sleep();  /* Check for RCU read-side critical section. */ +	preempt_disable(); +	ret = num_online_cpus() <= 1; +	preempt_enable(); +	return ret; +} + +/** + * synchronize_rcu - wait until a grace period has elapsed. + * + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU + * read-side critical sections have completed.  Note, however, that + * upon return from synchronize_rcu(), the caller might well be executing + * concurrently with new RCU read-side critical sections that began while + * synchronize_rcu() was waiting.  RCU read-side critical sections are + * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. + * In addition, regions of code across which interrupts, preemption, or + * softirqs have been disabled also serve as RCU read-side critical + * sections.  This includes hardware interrupt handlers, softirq handlers, + * and NMI handlers. + * + * Note that this guarantee implies further memory-ordering guarantees. + * On systems with more than one CPU, when synchronize_rcu() returns, + * each CPU is guaranteed to have executed a full memory barrier since + * the end of its last RCU read-side critical section whose beginning + * preceded the call to synchronize_rcu().  In addition, each CPU having + * an RCU read-side critical section that extends beyond the return from + * synchronize_rcu() is guaranteed to have executed a full memory barrier + * after the beginning of synchronize_rcu() and before the beginning of + * that RCU read-side critical section.  Note that these guarantees include + * CPUs that are offline, idle, or executing in user mode, as well as CPUs + * that are executing in the kernel. + * + * Furthermore, if CPU A invoked synchronize_rcu(), which returned + * to its caller on CPU B, then both CPU A and CPU B are guaranteed + * to have executed a full memory barrier during the execution of + * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but + * again only if the system has more than one CPU). + */ +void synchronize_rcu(void) +{ +	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || +			 lock_is_held(&rcu_lock_map) || +			 lock_is_held(&rcu_sched_lock_map), +			 "Illegal synchronize_rcu() in RCU read-side critical section"); +	if (rcu_blocking_is_gp()) +		return; +	if (rcu_gp_is_expedited()) +		synchronize_rcu_expedited(); +	else +		wait_rcu_gp(call_rcu); +} +EXPORT_SYMBOL_GPL(synchronize_rcu); +  /**   * get_state_synchronize_rcu - Snapshot current RCU state   * @@ -3091,28 +3150,6 @@ static int rcu_pending(void)  }  /* - * Return true if the specified CPU has any callback.  If all_lazy is - * non-NULL, store an indication of whether all callbacks are lazy. - * (If there are no callbacks, all of them are deemed to be lazy.) - */ -static bool rcu_cpu_has_callbacks(bool *all_lazy) -{ -	bool al = true; -	bool hc = false; -	struct rcu_data *rdp; - -	rdp = this_cpu_ptr(&rcu_data); -	if (!rcu_segcblist_empty(&rdp->cblist)) { -		hc = true; -		if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) -			al = false; -	} -	if (all_lazy) -		*all_lazy = al; -	return hc; -} - -/*   * Helper function for rcu_barrier() tracing.  If tracing is disabled,   * the compiler is expected to optimize this away.   */ @@ -3341,7 +3378,7 @@ int rcutree_prepare_cpu(unsigned int cpu)  	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);  	rcu_prepare_kthreads(cpu); -	rcu_spawn_all_nocb_kthreads(cpu); +	rcu_spawn_cpu_nocb_kthread(cpu);  	return 0;  } | 
