diff options
| author | Tony Lindgren <tony@atomide.com> | 2017-11-28 07:06:34 -0800 |
|---|---|---|
| committer | Tony Lindgren <tony@atomide.com> | 2017-11-28 07:06:34 -0800 |
| commit | 2db57789e6612ce0cf2fcbb577a1c8307b708566 (patch) | |
| tree | dd9f9e3dffabbf5cb932fcf5055ab329ca940fa0 /drivers/base/arch_topology.c | |
| parent | f0c96c6d40312b1a76cd36709dc3eb5948c1b97f (diff) | |
| parent | e9a9bb4e4779ca74cb52a6e2f8acbc0881d3bb18 (diff) | |
Merge branch 'soc-fixes' into omap-for-v4.15/fixes
Diffstat (limited to 'drivers/base/arch_topology.c')
| -rw-r--r-- | drivers/base/arch_topology.c | 43 |
1 files changed, 30 insertions, 13 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 41be9ff7d70a..4de87b0b53c8 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -22,14 +22,23 @@ #include <linux/string.h> #include <linux/sched/topology.h> -static DEFINE_MUTEX(cpu_scale_mutex); -static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; -unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq) { - return per_cpu(cpu_scale, cpu); + unsigned long scale; + int i; + + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(i, cpus) + per_cpu(freq_scale, i) = scale; } +static DEFINE_MUTEX(cpu_scale_mutex); +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) { per_cpu(cpu_scale, cpu) = capacity; @@ -96,7 +105,7 @@ subsys_initcall(register_cpu_capacity_sysctl); static u32 capacity_scale; static u32 *raw_capacity; -static int __init free_raw_capacity(void) +static int free_raw_capacity(void) { kfree(raw_capacity); raw_capacity = NULL; @@ -166,11 +175,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) } #ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit; -static void parsing_done_workfn(struct work_struct *work); -static DECLARE_WORK(parsing_done_work, parsing_done_workfn); +static cpumask_var_t cpus_to_visit __initdata; +static void __init parsing_done_workfn(struct work_struct *work); +static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); -static int +static int __init init_cpu_capacity_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -206,12 +215,14 @@ init_cpu_capacity_callback(struct notifier_block *nb, return 0; } -static struct notifier_block init_cpu_capacity_notifier = { +static struct notifier_block init_cpu_capacity_notifier __initdata = { .notifier_call = init_cpu_capacity_callback, }; static int __init register_cpufreq_notifier(void) { + int ret; + /* * on ACPI-based systems we need to use the default cpu capacity * until we have the necessary code to parse the cpu capacity, so @@ -227,15 +238,21 @@ static int __init register_cpufreq_notifier(void) cpumask_copy(cpus_to_visit, cpu_possible_mask); - return cpufreq_register_notifier(&init_cpu_capacity_notifier, - CPUFREQ_POLICY_NOTIFIER); + ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret) + free_cpumask_var(cpus_to_visit); + + return ret; } core_initcall(register_cpufreq_notifier); -static void parsing_done_workfn(struct work_struct *work) +static void __init parsing_done_workfn(struct work_struct *work) { cpufreq_unregister_notifier(&init_cpu_capacity_notifier, CPUFREQ_POLICY_NOTIFIER); + free_cpumask_var(cpus_to_visit); } #else |
