diff options
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 15 | 
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8eff357b0436..8293bae1dec1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -573,7 +573,7 @@ void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len)  }  EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs); -#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) +#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))  /*   * An empty function that will trigger a reschedule on   * IRQ tail once IRQs get re-enabled on userspace/guest resume. @@ -602,7 +602,7 @@ noinstr void rcu_irq_work_resched(void)  	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))  		return; -	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) +	if (IS_ENABLED(CONFIG_VIRT_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))  		return;  	instrumentation_begin(); @@ -611,7 +611,7 @@ noinstr void rcu_irq_work_resched(void)  	}  	instrumentation_end();  } -#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */ +#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) */  #ifdef CONFIG_PROVE_RCU  /** @@ -3800,6 +3800,11 @@ static void rcu_barrier_handler(void *cpu_in)   * to complete.  For example, if there are no RCU callbacks queued anywhere   * in the system, then rcu_barrier() is within its rights to return   * immediately, without waiting for anything, much less an RCU grace period. + * In fact, rcu_barrier() will normally not result in any RCU grace periods + * beyond those that were already destined to be executed. + * + * In kernels built with CONFIG_RCU_LAZY=y, this function also hurries all + * pending lazy RCU callbacks.   */  void rcu_barrier(void)  { @@ -4885,10 +4890,10 @@ void __init rcu_init(void)  	rcutree_online_cpu(cpu);  	/* Create workqueue for Tree SRCU and for expedited GPs. */ -	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); +	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM | WQ_PERCPU, 0);  	WARN_ON(!rcu_gp_wq); -	sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0); +	sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);  	WARN_ON(!sync_wq);  	/* Respect if explicitly disabled via a boot parameter. */  | 
