From 6fec833b9d70c54ceacbf7d07665215fbd0cddef Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 28 Mar 2025 21:42:48 +0100 Subject: cpufreq: Add and use cpufreq policy locking guards Introduce "read" and "write" locking guards for cpufreq policies and use them where applicable in the cpufreq core. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Reviewed-by: Mario Limonciello Acked-by: Sudeep Holla Tested-by: Sudeep Holla Link: https://patch.msgid.link/8518682.T7Z3S40VBb@rjwysocki.net --- include/linux/cpufreq.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 400fee6427a5..cb972d2aa8df 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -170,6 +170,12 @@ struct cpufreq_policy { struct notifier_block nb_max; }; +DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *, + down_write(&_T->rwsem), up_write(&_T->rwsem)) + +DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *, + down_read(&_T->rwsem), up_read(&_T->rwsem)) + /* * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() * callback for sanitization. That callback is only expected to modify the min -- cgit v1.2.3 From c7282dce257480f0f22ed3db69cfb400a18709f4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 28 Mar 2025 21:45:38 +0100 Subject: cpufreq: Drop cpufreq_cpu_acquire() and cpufreq_cpu_release() Since cpufreq_cpu_acquire() and cpufreq_cpu_release() have no more users in the tree, remove them. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Reviewed-by: Mario Limonciello Acked-by: Sudeep Holla Tested-by: Sudeep Holla Link: https://patch.msgid.link/3880470.kQq0lBPeGt@rjwysocki.net --- drivers/cpufreq/cpufreq.c | 45 --------------------------------------------- include/linux/cpufreq.h | 2 -- 2 files changed, 47 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 40244ff620b6..2ed873777fb5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -/** - * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. - * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). - */ -void cpufreq_cpu_release(struct cpufreq_policy *policy) -{ - if (WARN_ON(!policy)) - return; - - lockdep_assert_held(&policy->rwsem); - - up_write(&policy->rwsem); - - cpufreq_cpu_put(policy); -} - -/** - * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. - * @cpu: CPU to find the policy for. - * - * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and - * if the policy returned by it is not NULL, acquire its rwsem for writing. - * Return the policy if it is active or release it and return NULL otherwise. - * - * The policy returned by this function has to be released with the help of - * cpufreq_cpu_release() in order to release its rwsem and balance its usage - * counter properly. - */ -struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) - return NULL; - - down_write(&policy->rwsem); - - if (policy_is_inactive(policy)) { - cpufreq_cpu_release(policy); - return NULL; - } - - return policy; -} - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index cb972d2aa8df..a33a094ef755 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -241,8 +241,6 @@ void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); -struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); -void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); void refresh_frequency_limits(struct cpufreq_policy *policy); void cpufreq_update_policy(unsigned int cpu); -- cgit v1.2.3 From eaff6b62d3439ca6ee00dba4f77673a8c37dac20 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 28 Mar 2025 21:48:54 +0100 Subject: cpufreq: Pass policy pointer to ->update_limits() Since cpufreq_update_limits() obtains a cpufreq policy pointer for the given CPU and reference counts the corresponding policy object, it may as well pass the policy pointer to the cpufreq driver's ->update_limits() callback which allows that callback to avoid invoking cpufreq_cpu_get() for the same CPU. Accordingly, redefine ->update_limits() to take a policy pointer instead of a CPU number and update both drivers implementing it, intel_pstate and amd-pstate, as needed. Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Reviewed-by: Mario Limonciello Acked-by: Srinivas Pandruvada Acked-by: Sudeep Holla Tested-by: Sudeep Holla Link: https://patch.msgid.link/8560367.NyiUUSuA9g@rjwysocki.net --- drivers/cpufreq/amd-pstate.c | 7 ++----- drivers/cpufreq/cpufreq.c | 2 +- drivers/cpufreq/intel_pstate.c | 29 ++++++++++++++++++----------- include/linux/cpufreq.h | 2 +- 4 files changed, 22 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 6789eed1bb5b..b9d59c7425f5 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -821,19 +821,16 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) schedule_work(&sched_prefcore_work); } -static void amd_pstate_update_limits(unsigned int cpu) +static void amd_pstate_update_limits(struct cpufreq_policy *policy) { - struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata; u32 prev_high = 0, cur_high = 0; bool highest_perf_changed = false; + unsigned int cpu = policy->cpu; if (!amd_pstate_prefcore) return; - if (!policy) - return; - if (amd_get_highest_perf(cpu, &cur_high)) return; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c885e0ec174f..2b91ba503b32 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2769,7 +2769,7 @@ void cpufreq_update_limits(unsigned int cpu) return; if (cpufreq_driver->update_limits) - cpufreq_driver->update_limits(cpu); + cpufreq_driver->update_limits(policy); else cpufreq_policy_refresh(policy); } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 108e4c6a371e..f5ca04b98b92 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1353,14 +1353,9 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } -static bool intel_pstate_update_max_freq(struct cpudata *cpudata) +static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, + struct cpudata *cpudata) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpudata->cpu); - if (!policy) - return false; - guard(cpufreq_policy_write)(policy); if (hwp_active) @@ -1370,16 +1365,28 @@ static bool intel_pstate_update_max_freq(struct cpudata *cpudata) cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; refresh_frequency_limits(policy); +} + +static bool intel_pstate_update_max_freq(struct cpudata *cpudata) +{ + struct cpufreq_policy *policy __free(put_cpufreq_policy); + + policy = cpufreq_cpu_get(cpudata->cpu); + if (!policy) + return false; + + __intel_pstate_update_max_freq(policy, cpudata); return true; } -static void intel_pstate_update_limits(unsigned int cpu) +static void intel_pstate_update_limits(struct cpufreq_policy *policy) { - struct cpudata *cpudata = all_cpu_data[cpu]; + struct cpudata *cpudata = all_cpu_data[policy->cpu]; - if (intel_pstate_update_max_freq(cpudata)) - hybrid_update_capacity(cpudata); + __intel_pstate_update_max_freq(policy, cpudata); + + hybrid_update_capacity(cpudata); } static void intel_pstate_update_limits_for_all(void) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a33a094ef755..f3cf2adea18f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -399,7 +399,7 @@ struct cpufreq_driver { unsigned int (*get)(unsigned int cpu); /* Called to update policy limits on firmware notifications. */ - void (*update_limits)(unsigned int cpu); + void (*update_limits)(struct cpufreq_policy *policy); /* optional */ int (*bios_limit)(int cpu, unsigned int *limit); -- cgit v1.2.3 From 589a7c406a721f5d3a818ad0003799180f027dfa Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 10 Apr 2025 12:20:43 +0200 Subject: cpufreq: Drop unused cpufreq_get_policy() A recent change has introduced a bug into cpufreq_get_policy(), but this function is not used, so it's better to drop it altogether. Reported-by: Dan Carpenter Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Acked-by: Sudeep Holla Link: https://patch.msgid.link/2802770.mvXUDI8C0e@rjwysocki.net --- drivers/cpufreq/cpufreq.c | 25 ------------------------- include/linux/cpufreq.h | 1 - 2 files changed, 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2b91ba503b32..2e7c730f0f7a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2549,31 +2549,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); * POLICY INTERFACE * *********************************************************************/ -/** - * cpufreq_get_policy - get the current cpufreq_policy - * @policy: struct cpufreq_policy into which the current cpufreq_policy - * is written - * @cpu: CPU to find the policy for - * - * Reads the current cpufreq policy. - */ -int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) -{ - struct cpufreq_policy *cpu_policy __free(put_cpufreq_policy); - - if (!policy) - return -EINVAL; - - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) - return -EINVAL; - - memcpy(policy, cpu_policy, sizeof(*policy)); - - return 0; -} -EXPORT_SYMBOL(cpufreq_get_policy); - DEFINE_PER_CPU(unsigned long, cpufreq_pressure); /** diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f3cf2adea18f..850fe7371cb1 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -241,7 +241,6 @@ void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); -int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); void refresh_frequency_limits(struct cpufreq_policy *policy); void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_limits(unsigned int cpu); -- cgit v1.2.3 From 7a3be00771aa9786c7bb4cdb0ee36fee45f67d69 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 23 Apr 2025 15:48:40 +0530 Subject: OPP: Return opp from dev_pm_opp_get() For convenience of users, return back the pointer to the opp from dev_pm_opp_get(), so they can do: opp = dev_pm_opp_get(tmp_opp); No intentional functional impact. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 13 ++++++------- include/linux/pm_opp.h | 7 +++++-- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e63a9b009df1..150439a18b87 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1188,8 +1188,8 @@ static void _find_current_opp(struct device *dev, struct opp_table *opp_table) */ if (IS_ERR(opp)) { mutex_lock(&opp_table->lock); - opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); - dev_pm_opp_get(opp); + opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, + struct dev_pm_opp, node)); mutex_unlock(&opp_table->lock); } @@ -1329,8 +1329,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, dev_pm_opp_put(old_opp); /* Make sure current_opp doesn't get freed */ - dev_pm_opp_get(opp); - opp_table->current_opp = opp; + opp_table->current_opp = dev_pm_opp_get(opp); return ret; } @@ -1724,9 +1723,10 @@ static void _opp_kref_release(struct kref *kref) kfree(opp); } -void dev_pm_opp_get(struct dev_pm_opp *opp) +struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) { kref_get(&opp->kref); + return opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_get); @@ -2706,8 +2706,7 @@ struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, list_for_each_entry(opp, &src_table->opp_list, node) { if (opp == src_opp) { - dest_opp = opp->required_opps[i]; - dev_pm_opp_get(dest_opp); + dest_opp = dev_pm_opp_get(opp->required_opps[i]); break; } } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index c247317aae38..5e4c3428b139 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -161,7 +161,7 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsigned int *bw, int index); -void dev_pm_opp_get(struct dev_pm_opp *opp); +struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp); void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp); @@ -345,7 +345,10 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } -static inline void dev_pm_opp_get(struct dev_pm_opp *opp) {} +static inline struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) +{ + return opp; +} static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} -- cgit v1.2.3 From ead694941686345bfd3f95100d889191cb9e3cda Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 23 Apr 2025 15:48:40 +0530 Subject: OPP: Return opp_table from dev_pm_opp_get_opp_table_ref() For convenience of users, return back the pointer to the opp_table from dev_pm_opp_get_opp_table_ref(), so they can do: opp_table = dev_pm_opp_get_opp_table_ref(tmp_table); No intentional functional impact. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 9 ++++----- drivers/opp/of.c | 9 +++------ include/linux/pm_opp.h | 7 +++++-- 3 files changed, 12 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 150439a18b87..14fb0f43cc77 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -58,10 +58,8 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) struct opp_table *opp_table; list_for_each_entry(opp_table, &opp_tables, node) { - if (_find_opp_dev(dev, opp_table)) { - dev_pm_opp_get_opp_table_ref(opp_table); - return opp_table; - } + if (_find_opp_dev(dev, opp_table)) + return dev_pm_opp_get_opp_table_ref(opp_table); } return ERR_PTR(-ENODEV); @@ -1687,9 +1685,10 @@ static void _opp_table_kref_release(struct kref *kref) kfree(opp_table); } -void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) +struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) { kref_get(&opp_table->kref); + return opp_table; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 87cb6aeb49ed..c240acc81a8d 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -60,10 +60,8 @@ struct opp_table *_managed_opp(struct device *dev, int index) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - dev_pm_opp_get_opp_table_ref(opp_table); - managed_table = opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) + managed_table = dev_pm_opp_get_opp_table_ref(opp_table); break; } @@ -405,8 +403,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) continue; } - required_opp_tables[i] = new_table; - dev_pm_opp_get_opp_table_ref(new_table); + required_opp_tables[i] = dev_pm_opp_get_opp_table_ref(new_table); /* Link OPPs now */ ret = lazy_link_required_opps(opp_table, new_table, i); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5e4c3428b139..0deddfa91aca 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -100,7 +100,7 @@ struct dev_pm_opp_data { #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); -void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table); void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index); @@ -207,7 +207,10 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device * return ERR_PTR(-EOPNOTSUPP); } -static inline void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) {} +static inline struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) +{ + return opp_table; +} static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} -- cgit v1.2.3 From 8c5d8c0b9e8184a86baede72ca392f90d867d22e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 23 Apr 2025 14:21:28 +0530 Subject: OPP: Define and use scope-based cleanup helpers Define and use scope-based cleanup helpers for `struct opp` and `struct opp_table`. No intentional functional impact. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 234 +++++++++++++++++-------------------------------- drivers/opp/cpu.c | 27 ++---- drivers/opp/of.c | 24 ++--- include/linux/pm_opp.h | 7 ++ 4 files changed, 102 insertions(+), 190 deletions(-) (limited to 'include/linux') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 14fb0f43cc77..31d5b749afc7 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -317,18 +317,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); */ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { - struct opp_table *opp_table; - unsigned long clock_latency_ns; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0; - clock_latency_ns = opp_table->clock_latency_ns_max; - - dev_pm_opp_put_opp_table(opp_table); - - return clock_latency_ns; + return opp_table->clock_latency_ns_max; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); @@ -340,7 +335,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); */ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp; struct regulator *reg; unsigned long latency_ns = 0; @@ -356,13 +351,13 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) /* Regulator may not be required for the device */ if (!opp_table->regulators) - goto put_opp_table; + return 0; count = opp_table->regulator_count; uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) - goto put_opp_table; + return 0; mutex_lock(&opp_table->lock); @@ -395,8 +390,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) } kfree(uV); -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); return latency_ns; } @@ -426,7 +419,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); */ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); unsigned long freq = 0; opp_table = _find_opp_table(dev); @@ -436,8 +429,6 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) if (opp_table->suspend_opp && opp_table->suspend_opp->available) freq = dev_pm_opp_get_freq(opp_table->suspend_opp); - dev_pm_opp_put_opp_table(opp_table); - return freq; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); @@ -468,21 +459,16 @@ int _get_opp_count(struct opp_table *opp_table) */ int dev_pm_opp_get_opp_count(struct device *dev) { - struct opp_table *opp_table; - int count; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - count = PTR_ERR(opp_table); - dev_dbg(dev, "%s: OPP table not found (%d)\n", - __func__, count); - return count; + dev_dbg(dev, "%s: OPP table not found (%ld)\n", + __func__, PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - count = _get_opp_count(opp_table); - dev_pm_opp_put_opp_table(opp_table); - - return count; + return _get_opp_count(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); @@ -576,8 +562,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long opp_key, unsigned long key), bool (*assert)(struct opp_table *opp_table, unsigned int index)) { - struct opp_table *opp_table; - struct dev_pm_opp *opp; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -586,12 +571,8 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, return ERR_CAST(opp_table); } - opp = _opp_table_find_key(opp_table, key, index, available, read, - compare, assert); - - dev_pm_opp_put_opp_table(opp_table); - - return opp; + return _opp_table_find_key(opp_table, key, index, available, read, + compare, assert); } static struct dev_pm_opp *_find_key_exact(struct device *dev, @@ -1345,11 +1326,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, */ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); + struct dev_pm_opp *opp __free(put_opp) = NULL; unsigned long freq = 0, temp_freq; - struct dev_pm_opp *opp = NULL; bool forced = false; - int ret; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1366,9 +1346,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * equivalent to a clk_set_rate() */ if (!_get_opp_count(opp_table)) { - ret = opp_table->config_clks(dev, opp_table, NULL, - &target_freq, false); - goto put_opp_table; + return opp_table->config_clks(dev, opp_table, NULL, + &target_freq, false); } freq = clk_round_rate(opp_table->clk, target_freq); @@ -1383,10 +1362,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) temp_freq = freq; opp = _find_freq_ceil(opp_table, &temp_freq); if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", - __func__, freq, ret); - goto put_opp_table; + dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n", + __func__, freq, PTR_ERR(opp)); + return PTR_ERR(opp); } /* @@ -1399,14 +1377,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) forced = opp_table->current_rate_single_clk != freq; } - ret = _set_opp(dev, opp_table, opp, &freq, forced); - - if (freq) - dev_pm_opp_put(opp); - -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - return ret; + return _set_opp(dev, opp_table, opp, &freq, forced); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -1422,8 +1393,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); */ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1431,10 +1401,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) return PTR_ERR(opp_table); } - ret = _set_opp(dev, opp_table, opp, NULL, false); - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return _set_opp(dev, opp_table, opp, NULL, false); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); @@ -1744,15 +1711,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put); */ void dev_pm_opp_remove(struct device *dev, unsigned long freq) { + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp = NULL, *iter; - struct opp_table *opp_table; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; if (!assert_single_clk(opp_table, 0)) - goto put_table; + return; mutex_lock(&opp_table->lock); @@ -1774,10 +1741,6 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", __func__, freq); } - -put_table: - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); @@ -1849,16 +1812,13 @@ bool _opp_remove_all_static(struct opp_table *opp_table) */ void dev_pm_opp_remove_all_dynamic(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; _opp_remove_all(opp_table, true); - - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); @@ -2846,47 +2806,43 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); /* Find the opp_table */ opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - r = PTR_ERR(opp_table); - dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; + dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - if (!assert_single_clk(opp_table, 0)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; mutex_lock(&opp_table->lock); /* Do we have the frequency? */ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; + opp = dev_pm_opp_get(tmp_opp); + + /* Is update really needed? */ + if (opp->available == availability_req) { + mutex_unlock(&opp_table->lock); + return 0; + } + + opp->available = availability_req; break; } } - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto unlock; - } - - /* Is update really needed? */ - if (opp->available == availability_req) - goto unlock; - - opp->available = availability_req; - - dev_pm_opp_get(opp); mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); + /* Notify the change of the OPP availability */ if (availability_req) blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, @@ -2895,14 +2851,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); - dev_pm_opp_put(opp); - goto put_table; - -unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } /** @@ -2922,9 +2871,9 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, unsigned long u_volt_max) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); + int r; /* Find the opp_table */ opp_table = _find_opp_table(dev); @@ -2934,49 +2883,40 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, return r; } - if (!assert_single_clk(opp_table, 0)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; mutex_lock(&opp_table->lock); /* Do we have the frequency? */ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + opp = dev_pm_opp_get(tmp_opp); - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto adjust_unlock; - } + /* Is update really needed? */ + if (opp->supplies->u_volt == u_volt) { + mutex_unlock(&opp_table->lock); + return 0; + } - /* Is update really needed? */ - if (opp->supplies->u_volt == u_volt) - goto adjust_unlock; + opp->supplies->u_volt = u_volt; + opp->supplies->u_volt_min = u_volt_min; + opp->supplies->u_volt_max = u_volt_max; - opp->supplies->u_volt = u_volt; - opp->supplies->u_volt_min = u_volt_min; - opp->supplies->u_volt_max = u_volt_max; + break; + } + } - dev_pm_opp_get(opp); mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); + /* Notify the voltage change of the OPP */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, opp); - dev_pm_opp_put(opp); - goto put_table; - -adjust_unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); @@ -2990,9 +2930,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); */ int dev_pm_opp_sync_regulators(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct regulator *reg; - int i, ret = 0; + int ret, i; /* Device may not have OPP table */ opp_table = _find_opp_table(dev); @@ -3001,23 +2941,20 @@ int dev_pm_opp_sync_regulators(struct device *dev) /* Regulator may not be required for the device */ if (unlikely(!opp_table->regulators)) - goto put_table; + return 0; /* Nothing to sync if voltage wasn't changed */ if (!opp_table->enabled) - goto put_table; + return 0; for (i = 0; i < opp_table->regulator_count; i++) { reg = opp_table->regulators[i]; ret = regulator_sync_voltage(reg); if (ret) - break; + return ret; } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); @@ -3069,18 +3006,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable); */ int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_register(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_register(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_register_notifier); @@ -3094,18 +3026,13 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_unregister(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_unregister(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); @@ -3118,7 +3045,7 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); */ void dev_pm_opp_remove_table(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); /* Check for existing table for 'dev' */ opp_table = _find_opp_table(dev); @@ -3139,8 +3066,5 @@ void dev_pm_opp_remove_table(struct device *dev) **/ if (_opp_remove_all_static(opp_table)) dev_pm_opp_put_opp_table(opp_table); - - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 12c429b407ca..330a1753fb22 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -43,7 +43,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **opp_table) { - struct dev_pm_opp *opp; struct cpufreq_frequency_table *freq_table = NULL; int i, max_opps, ret = 0; unsigned long rate; @@ -57,6 +56,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, return -ENOMEM; for (i = 0, rate = 0; i < max_opps; i++, rate++) { + struct dev_pm_opp *opp __free(put_opp); + /* find next rate */ opp = dev_pm_opp_find_freq_ceil(dev, &rate); if (IS_ERR(opp)) { @@ -69,8 +70,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, /* Is Boost/turbo opp ? */ if (dev_pm_opp_is_turbo(opp)) freq_table[i].flags = CPUFREQ_BOOST_FREQ; - - dev_pm_opp_put(opp); } freq_table[i].driver_data = i; @@ -155,10 +154,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; struct device *dev; - int cpu, ret = 0; + int cpu; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) @@ -186,9 +185,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; } - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); @@ -204,18 +201,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); */ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; - int ret = 0; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { - ret = -EINVAL; - goto put_opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) + return -EINVAL; cpumask_clear(cpumask); @@ -228,9 +222,6 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) cpumask_set_cpu(cpu_dev->id, cpumask); } -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index aa43fbfa3e50..54109e813d4f 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1344,8 +1344,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); int of_get_required_opp_performance_state(struct device_node *np, int index) { struct device_node *required_np __free(device_node); - struct opp_table *opp_table; - struct dev_pm_opp *opp; + struct opp_table *opp_table __free(put_opp_table) = NULL; + struct dev_pm_opp *opp __free(put_opp) = NULL; int pstate = -EINVAL; required_np = of_parse_required_opp(np, index); @@ -1373,11 +1373,8 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) } else { pstate = opp->level; } - dev_pm_opp_put(opp); - } - dev_pm_opp_put_opp_table(opp_table); return pstate; } EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); @@ -1443,7 +1440,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); static int __maybe_unused _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); unsigned long opp_freq, opp_power; /* Find the right frequency and related OPP */ @@ -1453,7 +1450,6 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return -EINVAL; opp_power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); if (!opp_power) return -EINVAL; @@ -1484,8 +1480,8 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { + struct dev_pm_opp *opp __free(put_opp) = NULL; struct device_node *np __free(device_node); - struct dev_pm_opp *opp; unsigned long mV, Hz; u32 cap; u64 tmp; @@ -1505,7 +1501,6 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, return -EINVAL; mV = dev_pm_opp_get_voltage(opp) / 1000; - dev_pm_opp_put(opp); if (!mV) return -EINVAL; @@ -1522,20 +1517,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power); static bool _of_has_opp_microwatt_property(struct device *dev) { - unsigned long power, freq = 0; - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); + unsigned long freq = 0; /* Check if at least one OPP has needed property */ opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) return false; - power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); - if (!power) - return false; - - return true; + return !!dev_pm_opp_get_power(opp); } /** diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0deddfa91aca..8313ed981535 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ +#include #include #include #include @@ -579,6 +580,12 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta } #endif +/* Scope based cleanup macro for OPP reference counting */ +DEFINE_FREE(put_opp, struct dev_pm_opp *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put(_T)) + +/* Scope based cleanup macro for OPP table reference counting */ +DEFINE_FREE(put_opp_table, struct opp_table *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put_opp_table(_T)) + /* OPP Configuration helpers */ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, -- cgit v1.2.3 From ee3de3cf7035aaf289085111fd0cfaddc93f0409 Mon Sep 17 00:00:00 2001 From: Praveen Talari Date: Fri, 2 May 2025 10:58:22 +0530 Subject: OPP: Add dev_pm_opp_set_level() To configure a device to a specific performance level, consumer drivers currently need to determine the OPP based on the exact level and then set it, resulting in code duplication across drivers. The new helper API, dev_pm_opp_set_level(), addresses this issue by providing a streamlined method for consumer drivers to find and set the OPP based on the desired performance level, thereby eliminating redundancy. Signed-off-by: Praveen Talari [ Viresh: Lot of fixes in the code, and rebased over latest changes. Fixed commit log too. ] Signed-off-by: Viresh Kumar --- include/linux/pm_opp.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 8313ed981535..cf477beae4bb 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -197,6 +197,7 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_sync_regulators(struct device *dev); + #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { @@ -717,4 +718,14 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) return dev_pm_opp_get_freq_indexed(opp, 0); } +static inline int dev_pm_opp_set_level(struct device *dev, unsigned int level) +{ + struct dev_pm_opp *opp __free(put_opp) = dev_pm_opp_find_level_exact(dev, level); + + if (IS_ERR(opp)) + return PTR_ERR(opp); + + return dev_pm_opp_set_opp(dev, opp); +} + #endif /* __LINUX_OPP_H__ */ -- cgit v1.2.3 From f42c8556a0690802246dc588ae9c18184f71d8f5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 6 May 2025 22:34:31 +0200 Subject: cpufreq/sched: schedutil: Add helper for governor checks Add a helper for checking if schedutil is the current governor for a given cpufreq policy and use it in sched_is_eas_possible() to avoid accessing cpufreq policy internals directly from there. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Christian Loehle Tested-by: Christian Loehle Reviewed-by: Dietmar Eggemann Link: https://patch.msgid.link/3365956.44csPzL39Z@rjwysocki.net --- include/linux/cpufreq.h | 9 +++++++++ kernel/sched/cpufreq_schedutil.c | 9 +++++++-- kernel/sched/sched.h | 2 -- kernel/sched/topology.c | 6 +++--- 4 files changed, 19 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7bb760031dd7..1d2c6c6d8952 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -650,6 +650,15 @@ module_exit(__governor##_exit) struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL +bool sugov_is_governor(struct cpufreq_policy *policy); +#else +static inline bool sugov_is_governor(struct cpufreq_policy *policy) +{ + return false; +} +#endif + static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) { if (policy->max < policy->cur) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 816f07f9d30f..461242ec958a 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -630,7 +630,7 @@ static const struct kobj_type sugov_tunables_ktype = { /********************** cpufreq governor interface *********************/ -struct cpufreq_governor schedutil_gov; +static struct cpufreq_governor schedutil_gov; static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) { @@ -909,7 +909,7 @@ static void sugov_limits(struct cpufreq_policy *policy) WRITE_ONCE(sg_policy->limits_changed, true); } -struct cpufreq_governor schedutil_gov = { +static struct cpufreq_governor schedutil_gov = { .name = "schedutil", .owner = THIS_MODULE, .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, @@ -927,4 +927,9 @@ struct cpufreq_governor *cpufreq_default_governor(void) } #endif +bool sugov_is_governor(struct cpufreq_policy *policy) +{ + return policy->governor == &schedutil_gov; +} + cpufreq_governor_init(schedutil_gov); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 47972f34ea70..5cbe3fd93fda 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3509,8 +3509,6 @@ static inline bool sched_energy_enabled(void) return static_branch_unlikely(&sched_energy_present); } -extern struct cpufreq_governor schedutil_gov; - #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ #define perf_domain_span(pd) NULL diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index f1ebc60d967f..902d37f922b4 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -213,7 +213,7 @@ static bool sched_is_eas_possible(const struct cpumask *cpu_mask) { bool any_asym_capacity = false; struct cpufreq_policy *policy; - struct cpufreq_governor *gov; + bool policy_is_ready; int i; /* EAS is enabled for asymmetric CPU capacity topologies. */ @@ -258,9 +258,9 @@ static bool sched_is_eas_possible(const struct cpumask *cpu_mask) } return false; } - gov = policy->governor; + policy_is_ready = sugov_is_governor(policy); cpufreq_cpu_put(policy); - if (gov != &schedutil_gov) { + if (!policy_is_ready) { if (sched_debug()) { pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n", cpumask_pr_args(cpu_mask)); -- cgit v1.2.3 From 4854649b1fb43968615e0374d9d59580093ac67f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 6 May 2025 22:37:15 +0200 Subject: cpufreq/sched: Move cpufreq-specific EAS checks to cpufreq Doing cpufreq-specific EAS checks that require accessing policy internals directly from sched_is_eas_possible() is a bit unfortunate, so introduce cpufreq_ready_for_eas() in cpufreq, move those checks into that new function and make sched_is_eas_possible() call it. While at it, address a possible race between the EAS governor check and governor change by doing the former under the policy rwsem. Signed-off-by: Rafael J. Wysocki Reviewed-by: Christian Loehle Tested-by: Christian Loehle Reviewed-by: Dietmar Eggemann Link: https://patch.msgid.link/2317800.iZASKD2KPV@rjwysocki.net --- drivers/cpufreq/cpufreq.c | 32 ++++++++++++++++++++++++++++++++ include/linux/cpufreq.h | 2 ++ kernel/sched/topology.c | 25 +++++-------------------- 3 files changed, 39 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 21fa733a2fe8..731ecfc178d8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -3056,6 +3056,38 @@ static int __init cpufreq_core_init(void) return 0; } + +static bool cpufreq_policy_is_good_for_eas(unsigned int cpu) +{ + struct cpufreq_policy *policy __free(put_cpufreq_policy); + + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_debug("cpufreq policy not set for CPU: %d\n", cpu); + return false; + } + + guard(cpufreq_policy_read)(policy); + + return sugov_is_governor(policy); +} + +bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask) +{ + unsigned int cpu; + + /* Do not attempt EAS if schedutil is not being used. */ + for_each_cpu(cpu, cpu_mask) { + if (!cpufreq_policy_is_good_for_eas(cpu)) { + pr_debug("rd %*pbl: schedutil is mandatory for EAS\n", + cpumask_pr_args(cpu_mask)); + return false; + } + } + + return true; +} + module_param(off, int, 0444); module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444); core_initcall(cpufreq_core_init); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1d2c6c6d8952..95f3807c8c55 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1237,6 +1237,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); +bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask); + static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) { dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 902d37f922b4..580c28fed539 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -212,8 +212,6 @@ static bool sched_energy_update; static bool sched_is_eas_possible(const struct cpumask *cpu_mask) { bool any_asym_capacity = false; - struct cpufreq_policy *policy; - bool policy_is_ready; int i; /* EAS is enabled for asymmetric CPU capacity topologies. */ @@ -248,25 +246,12 @@ static bool sched_is_eas_possible(const struct cpumask *cpu_mask) return false; } - /* Do not attempt EAS if schedutil is not being used. */ - for_each_cpu(i, cpu_mask) { - policy = cpufreq_cpu_get(i); - if (!policy) { - if (sched_debug()) { - pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d", - cpumask_pr_args(cpu_mask), i); - } - return false; - } - policy_is_ready = sugov_is_governor(policy); - cpufreq_cpu_put(policy); - if (!policy_is_ready) { - if (sched_debug()) { - pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n", - cpumask_pr_args(cpu_mask)); - } - return false; + if (!cpufreq_ready_for_eas(cpu_mask)) { + if (sched_debug()) { + pr_info("rd %*pbl: Checking EAS: cpufreq is not ready\n", + cpumask_pr_args(cpu_mask)); } + return false; } return true; -- cgit v1.2.3 From 6bceea7a1e076ef9d71b20d8dda2f7dc52bd34d2 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 18 Apr 2025 19:55:03 -0700 Subject: arch_topology: Relocate cpu_scale to topology.[h|c] arch_topology.c provides functionality to parse and scale CPU capacity. It also provides a corresponding sysfs interface. Some architectures parse and scale CPU capacity differently as per their own needs. On Intel processors, for instance, it is responsibility of the Intel P-state driver. Relocate the implementation of that interface to a common location in topology.c. Architectures can use the interface and populate it using their own mechanisms. An alternative approach would be to compile arch_topology.c even if not needed only to get this interface. This approach would create duplicated and conflicting functionality and data structures. Signed-off-by: Ricardo Neri Tested-by: Christian Loehle Link: https://patch.msgid.link/20250419025504.9760-2-ricardo.neri-calderon@linux.intel.com Signed-off-by: Rafael J. Wysocki --- drivers/base/arch_topology.c | 52 ------------------------------------------- drivers/base/topology.c | 52 +++++++++++++++++++++++++++++++++++++++++++ include/linux/arch_topology.h | 8 ------- include/linux/topology.h | 9 ++++++++ 4 files changed, 61 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index af0029d30dbe..1037169abb45 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, per_cpu(arch_freq_scale, i) = scale; } -DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; -EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); - -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) -{ - per_cpu(cpu_scale, cpu) = capacity; -} - DEFINE_PER_CPU(unsigned long, hw_pressure); /** @@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus, } EXPORT_SYMBOL_GPL(topology_update_hw_pressure); -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); -} - static void update_topology_flags_workfn(struct work_struct *work); static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); -static DEVICE_ATTR_RO(cpu_capacity); - -static int cpu_capacity_sysctl_add(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_create_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int cpu_capacity_sysctl_remove(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_remove_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int register_cpu_capacity_sysctl(void) -{ - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", - cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); - static int update_topology; int topology_update_cpu_topology(void) diff --git a/drivers/base/topology.c b/drivers/base/topology.c index b962da263eee..8b42df05feff 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void) } device_initcall(topology_sysfs_init); + +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); +} + +static DEVICE_ATTR_RO(cpu_capacity); + +static int cpu_capacity_sysctl_add(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_create_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int cpu_capacity_sysctl_remove(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_remove_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int register_cpu_capacity_sysctl(void) +{ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", + cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 2222e8b03ff4..d72d6e5aa200 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -14,14 +14,6 @@ int topology_update_cpu_topology(void); struct device_node; bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); -DECLARE_PER_CPU(unsigned long, cpu_scale); - -static inline unsigned long topology_get_cpu_scale(int cpu) -{ - return per_cpu(cpu_scale, cpu); -} - -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); DECLARE_PER_CPU(unsigned long, capacity_freq_ref); diff --git a/include/linux/topology.h b/include/linux/topology.h index 24e715f0f6d2..cd6b4bdc9cfd 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -332,4 +332,13 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops) !IS_ERR_OR_NULL(mask); \ __hops++) +DECLARE_PER_CPU(unsigned long, cpu_scale); + +static inline unsigned long topology_get_cpu_scale(int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); + #endif /* _LINUX_TOPOLOGY_H */ -- cgit v1.2.3 From 34a364ff04e960a4d47f558acf7fbafcc3085c1f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 9 May 2025 15:02:27 +0200 Subject: PM: sleep: Introduce pm_suspend_in_progress() Introduce pm_suspend_in_progress() to be used for checking if a system- wide suspend or resume transition is in progress, instead of comparing pm_suspend_target_state directly to PM_SUSPEND_ON, and use it where applicable. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Rodrigo Vivi Acked-by: Rodrigo Vivi Reviewed-by: Raag Jadav Reviewed-by: Mario Limonciello Link: https://patch.msgid.link/2020901.PYKUYFuaPT@rjwysocki.net --- arch/x86/pci/fixup.c | 4 ++-- drivers/base/power/wakeup.c | 2 +- drivers/gpu/drm/xe/xe_pm.c | 2 +- include/linux/suspend.h | 5 +++++ kernel/power/main.c | 4 ++-- 5 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index efefeb82ab61..e8ee1afa1992 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -970,13 +970,13 @@ static void amd_rp_pme_suspend(struct pci_dev *dev) struct pci_dev *rp; /* - * PM_SUSPEND_ON means we're doing runtime suspend, which means + * If system suspend is not in progress, we're doing runtime suspend, so * amd-pmc will not be involved so PMEs during D3 work as advertised. * * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware * sleep state, but we assume amd-pmc is always present. */ - if (pm_suspend_target_state == PM_SUSPEND_ON) + if (!pm_suspend_in_progress()) return; rp = pcie_find_root_port(dev); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 63bf914a4d44..6f6f309817f4 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -337,7 +337,7 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; - if (pm_suspend_target_state != PM_SUSPEND_ON) + if (pm_suspend_in_progress()) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7b6b754ad6eb..cb7fbf74138e 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -641,7 +641,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe) return dev->power.runtime_status == RPM_SUSPENDING || dev->power.runtime_status == RPM_RESUMING || - pm_suspend_target_state != PM_SUSPEND_ON; + pm_suspend_in_progress(); #else return false; #endif diff --git a/include/linux/suspend.h b/include/linux/suspend.h index da6ebca3ff77..52ea108f9451 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -298,6 +298,11 @@ static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} static inline void s2idle_wake(void) {} #endif /* !CONFIG_SUSPEND */ +static inline bool pm_suspend_in_progress(void) +{ + return pm_suspend_target_state != PM_SUSPEND_ON; +} + /* struct pbe is used for creating lists of pages that should be restored * atomically during the resume from disk, because the page frames they have * occupied before the suspend are in use. diff --git a/kernel/power/main.c b/kernel/power/main.c index fb20a6e26fb6..8c26241c6724 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -613,8 +613,8 @@ bool pm_debug_messages_on __read_mostly; bool pm_debug_messages_should_print(void) { - return pm_debug_messages_on && (hibernation_in_progress() || - pm_suspend_target_state != PM_SUSPEND_ON); + return pm_debug_messages_on && (pm_suspend_in_progress() || + hibernation_in_progress()); } EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); -- cgit v1.2.3 From 4a6b1cf0d4c02d6da2976c6314c264d20672937e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 6 May 2025 22:41:21 +0200 Subject: PM: EM: Introduce em_adjust_cpu_capacity() Add a function for updating the Energy Model for a CPU after its capacity has changed, which subsequently will be used by the intel_pstate driver. An EM_PERF_DOMAIN_ARTIFICIAL check is added to em_recalc_and_update() to prevent it from calling em_compute_costs() for an "artificial" perf domain with a NULL cb parameter which would cause it to crash. Signed-off-by: Rafael J. Wysocki Reviewed-by: Lukasz Luba Tested-by: Christian Loehle Reviewed-by: Dietmar Eggemann Link: https://patch.msgid.link/3637203.iIbC2pHGDl@rjwysocki.net --- include/linux/energy_model.h | 2 ++ kernel/power/energy_model.c | 28 ++++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index d8eabbf86a5b..7fa1eb3cc823 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int em_dev_update_chip_binning(struct device *dev); int em_update_performance_limits(struct em_perf_domain *pd, unsigned long freq_min_khz, unsigned long freq_max_khz); +void em_adjust_cpu_capacity(unsigned int cpu); void em_rebuild_sched_domains(void); /** @@ -403,6 +404,7 @@ int em_update_performance_limits(struct em_perf_domain *pd, { return -EINVAL; } +static inline void em_adjust_cpu_capacity(unsigned int cpu) {} static inline void em_rebuild_sched_domains(void) {} #endif diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 88449d4390cb..ea7995a25780 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -702,10 +702,12 @@ static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd, { int ret; - ret = em_compute_costs(dev, em_table->state, NULL, pd->nr_perf_states, - pd->flags); - if (ret) - goto free_em_table; + if (!em_is_artificial(pd)) { + ret = em_compute_costs(dev, em_table->state, NULL, + pd->nr_perf_states, pd->flags); + if (ret) + goto free_em_table; + } ret = em_dev_update_perf_domain(dev, em_table); if (ret) @@ -755,6 +757,24 @@ static void em_adjust_new_capacity(unsigned int cpu, struct device *dev, em_recalc_and_update(dev, pd, em_table); } +/** + * em_adjust_cpu_capacity() - Adjust the EM for a CPU after a capacity update. + * @cpu: Target CPU. + * + * Adjust the existing EM for @cpu after a capacity update under the assumption + * that the capacity has been updated in the same way for all of the CPUs in + * the same perf domain. + */ +void em_adjust_cpu_capacity(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + struct em_perf_domain *pd; + + pd = em_pd_get(dev); + if (pd) + em_adjust_new_capacity(cpu, dev, pd); +} + static void em_check_capacity_update(void) { cpumask_var_t cpu_done_mask; -- cgit v1.2.3 From f5c0ecf196aaf78777f1606f1e0392c5e57c4530 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 9 May 2025 15:03:35 +0200 Subject: PM: sleep: Introduce pm_sleep_transition_in_progress() The "suspend in progress" check in device_wakeup_enable() does not cover hibernation, but arguably it should do that, so introduce pm_sleep_transition_in_progress() covering transitions during both system suspend and hibernation to use in there and use it also in pm_debug_messages_should_print(). Signed-off-by: Rafael J. Wysocki Reviewed-by: Mario Limonciello Link: https://patch.msgid.link/7820474.EvYhyI6sBW@rjwysocki.net [ rjw: Move the new function definition under CONFIG_PM_SLEEP ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 2 +- include/linux/suspend.h | 4 ++++ kernel/power/main.c | 7 +++++-- 3 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 6f6f309817f4..7e612977be1b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -337,7 +337,7 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; - if (pm_suspend_in_progress()) + if (pm_sleep_transition_in_progress()) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 52ea108f9451..b1c76c8f2c82 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -475,6 +475,8 @@ extern void pm_print_active_wakeup_sources(void); extern unsigned int lock_system_sleep(void); extern void unlock_system_sleep(unsigned int); +extern bool pm_sleep_transition_in_progress(void); + #else /* !CONFIG_PM_SLEEP */ static inline int register_pm_notifier(struct notifier_block *nb) @@ -503,6 +505,8 @@ static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline unsigned int lock_system_sleep(void) { return 0; } static inline void unlock_system_sleep(unsigned int flags) {} +static inline bool pm_sleep_transition_in_progress(void) { return false; } + #endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_SLEEP_DEBUG diff --git a/kernel/power/main.c b/kernel/power/main.c index 8c26241c6724..8d17de9c8416 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -557,6 +557,10 @@ static int __init pm_debugfs_init(void) late_initcall(pm_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +bool pm_sleep_transition_in_progress(void) +{ + return pm_suspend_in_progress() || hibernation_in_progress(); +} #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_SLEEP_DEBUG @@ -613,8 +617,7 @@ bool pm_debug_messages_on __read_mostly; bool pm_debug_messages_should_print(void) { - return pm_debug_messages_on && (pm_suspend_in_progress() || - hibernation_in_progress()); + return pm_debug_messages_on && pm_sleep_transition_in_progress(); } EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); -- cgit v1.2.3