diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /kernel/sched/core.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 59 | 
1 files changed, 24 insertions, 35 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 211890edf37e..78d8facba456 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10,6 +10,8 @@  #include <linux/kthread.h>  #include <linux/nospec.h> +#include <linux/kcov.h> +  #include <asm/switch_to.h>  #include <asm/tlb.h> @@ -1191,6 +1193,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)  		if (p->sched_class->migrate_task_rq)  			p->sched_class->migrate_task_rq(p);  		p->se.nr_migrations++; +		rseq_migrate(p);  		perf_event_task_migrate(p);  	} @@ -2194,27 +2197,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)  	INIT_HLIST_HEAD(&p->preempt_notifiers);  #endif -#ifdef CONFIG_NUMA_BALANCING -	if (p->mm && atomic_read(&p->mm->mm_users) == 1) { -		p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); -		p->mm->numa_scan_seq = 0; -	} - -	if (clone_flags & CLONE_VM) -		p->numa_preferred_nid = current->numa_preferred_nid; -	else -		p->numa_preferred_nid = -1; - -	p->node_stamp = 0ULL; -	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; -	p->numa_scan_period = sysctl_numa_balancing_scan_delay; -	p->numa_work.next = &p->numa_work; -	p->numa_faults = NULL; -	p->last_task_numa_placement = 0; -	p->last_sum_exec_runtime = 0; - -	p->numa_group = NULL; -#endif /* CONFIG_NUMA_BALANCING */ +	init_numa_balancing(clone_flags, p);  }  DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); @@ -2652,8 +2635,10 @@ static inline void  prepare_task_switch(struct rq *rq, struct task_struct *prev,  		    struct task_struct *next)  { +	kcov_prepare_switch(prev);  	sched_info_switch(rq, prev, next);  	perf_event_task_sched_out(prev, next); +	rseq_preempt(prev);  	fire_sched_out_preempt_notifiers(prev, next);  	prepare_task(next);  	prepare_arch_switch(next); @@ -2720,6 +2705,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)  	finish_task(prev);  	finish_lock_switch(rq);  	finish_arch_post_lock_switch(); +	kcov_finish_switch(current);  	fire_sched_in_preempt_notifiers(current);  	/* @@ -4050,6 +4036,23 @@ int idle_cpu(int cpu)  }  /** + * available_idle_cpu - is a given CPU idle for enqueuing work. + * @cpu: the CPU in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. + */ +int available_idle_cpu(int cpu) +{ +	if (!idle_cpu(cpu)) +		return 0; + +	if (vcpu_is_preempted(cpu)) +		return 0; + +	return 1; +} + +/**   * idle_task - return the idle task for a given CPU.   * @cpu: the processor in question.   * @@ -5025,20 +5028,6 @@ int __cond_resched_lock(spinlock_t *lock)  }  EXPORT_SYMBOL(__cond_resched_lock); -int __sched __cond_resched_softirq(void) -{ -	BUG_ON(!in_softirq()); - -	if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { -		local_bh_enable(); -		preempt_schedule_common(); -		local_bh_disable(); -		return 1; -	} -	return 0; -} -EXPORT_SYMBOL(__cond_resched_softirq); -  /**   * yield - yield the current processor to other threads.   *  | 
