summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/cputime.c13
-rw-r--r--kernel/sched/sched.h3
3 files changed, 19 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 23406f037dde..7c8b769c0d0d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -778,6 +778,11 @@ struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* RQ-clock updating methods:
*/
+/* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
+#ifdef CONFIG_PARAVIRT
+struct static_key paravirt_steal_rq_enabled;
+#endif
+
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ff0dfca95420..fbf31db0d2f3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -252,6 +252,19 @@ void __account_forceidle_time(struct task_struct *p, u64 delta)
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
+#ifdef CONFIG_PARAVIRT
+struct static_key paravirt_steal_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+#endif
+#endif
+
static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 62f9278b1663..e51bfa3586fa 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -86,9 +86,8 @@ struct rt_rq;
struct sched_group;
struct cpuidle_state;
-#ifdef CONFIG_PARAVIRT
+#if defined(CONFIG_PARAVIRT) && !defined(CONFIG_HAVE_PV_STEAL_CLOCK_GEN)
# include <asm/paravirt.h>
-# include <asm/paravirt_api_clock.h>
#endif
#include <asm/barrier.h>