summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c41
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c3
-rw-r--r--drivers/cpufreq/cpufreq.c11
-rw-r--r--drivers/cpufreq/intel_pstate.c228
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c35
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c150
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
11 files changed, 320 insertions, 177 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 083d8369a591..e73a66785d69 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -395,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy,
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
- udelay(10);
+ usleep_range(10, 15);
}
return 0;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 298e92d8cc03..c45bc98721d2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -65,13 +65,13 @@ static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_PASSIVE] = "passive",
[AMD_PSTATE_ACTIVE] = "active",
[AMD_PSTATE_GUIDED] = "guided",
- NULL,
};
+static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX);
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
{
- if (mode < 0 || mode >= AMD_PSTATE_MAX)
- return NULL;
+ if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX)
+ mode = AMD_PSTATE_UNDEFINED;
return amd_pstate_mode_string[mode];
}
EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
@@ -110,6 +110,7 @@ enum energy_perf_value_index {
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
+ EPP_INDEX_MAX,
};
static const char * const energy_perf_strings[] = {
@@ -118,8 +119,8 @@ static const char * const energy_perf_strings[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
- NULL
};
+static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX);
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0,
@@ -127,7 +128,8 @@ static unsigned int epp_values[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
- };
+};
+static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX);
typedef int (*cppc_mode_transition_fn)(int);
@@ -183,7 +185,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
- for (i=0; i < AMD_PSTATE_MAX; i++) {
+ for (i = 0; i < AMD_PSTATE_MAX; i++) {
if (!strncmp(str, amd_pstate_mode_string[i], size))
return i;
}
@@ -1137,16 +1139,15 @@ static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
- int i = 0;
- int offset = 0;
+ int offset = 0, i;
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
return sysfs_emit_at(buf, offset, "%s\n",
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
- while (energy_perf_strings[i] != NULL)
- offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+ for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]);
offset += sysfs_emit_at(buf, offset, "\n");
@@ -1157,15 +1158,10 @@ static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct amd_cpudata *cpudata = policy->driver_data;
- char str_preference[21];
ssize_t ret;
u8 epp;
- ret = sscanf(buf, "%20s", str_preference);
- if (ret != 1)
- return -EINVAL;
-
- ret = match_string(energy_perf_strings, -1, str_preference);
+ ret = sysfs_match_string(energy_perf_strings, buf);
if (ret < 0)
return -EINVAL;
@@ -1282,7 +1278,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
}
@@ -1353,9 +1349,8 @@ int amd_pstate_update_status(const char *buf, size_t size)
return -EINVAL;
mode_idx = get_mode_idx_from_str(buf, size);
-
- if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
- return -EINVAL;
+ if (mode_idx < 0)
+ return mode_idx;
if (mode_state_machine[cppc_state][mode_idx]) {
guard(mutex)(&amd_pstate_driver_lock);
@@ -1614,7 +1609,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e23d9abea135..9eac77c4f294 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -142,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
- __func__, cpu, ret);
- /*
- * Don't abort if the CPU was offline while the driver
- * was getting registered.
- */
- if (cpu_online(cpu))
- return;
+ /*
+ * Don't abort as the CPU was offline while the driver was
+ * getting registered.
+ */
+ if (ret && cpu_online(cpu)) {
+ pr_debug("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+ return;
}
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index cd1816a12bb9..dc11b62399ad 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "starfive,jh7110", },
+ { .compatible = "starfive,jh7110s", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index fedad1081973..fbbbe501cf2d 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb)
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
+ pci_dev_put(nforce2_sub5);
+
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
@@ -426,6 +428,7 @@ static int __init nforce2_init(void)
static void __exit nforce2_exit(void)
{
cpufreq_unregister_driver(&nforce2_driver);
+ pci_dev_put(nforce2_dev);
}
module_init(nforce2_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 852e024facc3..4472bb1ec83c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1421,9 +1421,12 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_offline_policy;
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
+ policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_offline_policy;
+ }
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -2550,7 +2553,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
for_each_inactive_policy(policy) {
if (!strcmp(policy->last_governor, governor->name)) {
policy->governor = NULL;
- strcpy(policy->last_governor, "\0");
+ policy->last_governor[0] = '\0';
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 38897bb14a2c..ec4abe374573 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -575,13 +575,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int scaling = cpu->pstate.scaling;
int freq;
- pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
- pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+ pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ if (scaling == perf_ctl_scaling)
+ return;
+
+ hwp_is_hybrid = true;
+
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
@@ -603,9 +608,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- if (!cpu_feature_enabled(X86_FEATURE_IDA))
- return true;
-
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -912,6 +914,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
[HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
+static u8 hybrid_get_cpu_type(unsigned int cpu)
+{
+ return cpu_data(cpu).topo.intel_type;
+}
+
static bool no_cas __ro_after_init;
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
@@ -928,11 +935,8 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
unsigned long *freq)
{
/*
- * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
- * of the maximum capacity such that two CPUs of the same type will be
- * regarded as equally attractive if the utilization of each of them
- * falls into the same bin, which should prevent tasks from being
- * migrated between them too often.
+ * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the
+ * full capacity.
*
* For this purpose, return the "frequency" of 2 for the first
* performance level and otherwise leave the value set by the caller.
@@ -946,38 +950,40 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
return 0;
}
+static bool hybrid_has_l3(unsigned int cpu)
+{
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
+ unsigned int i;
+
+ if (!cacheinfo)
+ return false;
+
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3)
+ return true;
+ }
+
+ return false;
+}
+
static int hybrid_get_cost(struct device *dev, unsigned long freq,
unsigned long *cost)
{
- struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
- struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
-
+ /* Facilitate load balancing between CPUs of the same type. */
+ *cost = freq;
/*
- * The smaller the perf-to-frequency scaling factor, the larger the IPC
- * ratio between the given CPU and the least capable CPU in the system.
- * Regard that IPC ratio as the primary cost component and assume that
- * the scaling factors for different CPU types will differ by at least
- * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ * Adjust the cost depending on CPU type.
*
- * Add the freq value to the cost, so that the cost of running on CPUs
- * of the same type in different "utilization bins" is different.
+ * The idea is to start loading up LPE-cores before E-cores and start
+ * to populate E-cores when LPE-cores are utilized above 60% of the
+ * capacity. Similarly, P-cores start to be populated when E-cores are
+ * utilized above 60% of the capacity.
*/
- *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
- /*
- * Increase the cost slightly for CPUs able to access L3 to avoid
- * touching it in case some other CPUs of the same type can do the work
- * without it.
- */
- if (cacheinfo) {
- unsigned int i;
-
- /* Check if L3 cache is there. */
- for (i = 0; i < cacheinfo->num_leaves; i++) {
- if (cacheinfo->info_list[i].level == 3) {
- *cost += 2;
- break;
- }
- }
+ if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) {
+ if (hybrid_has_l3(dev->id)) /* E-core */
+ *cost += 1;
+ } else { /* P-core */
+ *cost += 2;
}
return 0;
@@ -1040,9 +1046,9 @@ static void hybrid_set_cpu_capacity(struct cpudata *cpu)
topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
- pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
- cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
- cpu->pstate.max_pstate_physical);
+ pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n",
+ cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical,
+ hybrid_max_perf_cpu->capacity_perf);
}
static void hybrid_clear_cpu_capacity(unsigned int cpunum)
@@ -1387,7 +1393,8 @@ static void set_power_ctl_ee_state(bool input)
{
u64 power_ctl;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
@@ -1397,7 +1404,6 @@ static void set_power_ctl_ee_state(bool input)
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
@@ -1519,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
- ret = intel_pstate_show_status(buf);
- mutex_unlock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- return ret;
+ return intel_pstate_show_status(buf);
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
@@ -1534,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
ret = intel_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&intel_pstate_driver_lock);
+ if (ret < 0)
+ return ret;
- return ret < 0 ? ret : count;
+ return count;
}
static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -1548,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
@@ -1562,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -1573,38 +1573,26 @@ static ssize_t show_num_pstates(struct kobject *kobj,
struct cpudata *cpu;
int total;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", total);
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
- ret = sprintf(buf, "%u\n", global.no_turbo);
-
- mutex_unlock(&intel_pstate_driver_lock);
-
- return ret;
+ return sprintf(buf, "%u\n", global.no_turbo);
}
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
@@ -1616,29 +1604,25 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- count = -EAGAIN;
- goto unlock_driver;
- }
+ if (!intel_pstate_driver)
+ return -EAGAIN;
no_turbo = !!clamp_t(int, input, 0, 1);
WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
if (global.turbo_disabled && !no_turbo) {
pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
- count = -EPERM;
if (global.no_turbo)
- goto unlock_driver;
- else
- no_turbo = 1;
- }
+ return -EPERM;
- if (no_turbo == global.no_turbo) {
- goto unlock_driver;
+ no_turbo = 1;
}
+ if (no_turbo == global.no_turbo)
+ return count;
+
WRITE_ONCE(global.no_turbo, no_turbo);
mutex_lock(&intel_pstate_limits_lock);
@@ -1657,9 +1641,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
intel_pstate_update_limits_for_all();
arch_set_max_freq_ratio(no_turbo);
-unlock_driver:
- mutex_unlock(&intel_pstate_driver_lock);
-
return count;
}
@@ -1709,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1727,8 +1706,6 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
else
update_qos_requests(FREQ_QOS_MAX);
- mutex_unlock(&intel_pstate_driver_lock);
-
return count;
}
@@ -1742,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1761,8 +1736,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
else
update_qos_requests(FREQ_QOS_MIN);
- mutex_unlock(&intel_pstate_driver_lock);
-
return count;
}
@@ -1783,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
if (ret)
return ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
hwp_boost = !!input;
intel_pstate_update_policies();
- mutex_unlock(&intel_pstate_driver_lock);
return count;
}
@@ -2075,6 +2048,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
intel_pstate_update_epp_defaults(cpudata);
}
+static u64 get_perf_ctl_val(int pstate)
+{
+ u64 val;
+
+ val = (u64)pstate << 8;
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
+ val |= (u64)1 << 32;
+
+ return val;
+}
+
static int atom_get_min_pstate(int not_used)
{
u64 value;
@@ -2101,14 +2086,10 @@ static int atom_get_turbo_pstate(int not_used)
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
+ u64 val = get_perf_ctl_val(pstate);
int32_t vid_fp;
u32 vid;
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
vid_fp = cpudata->vid.min + mul_fp(
int_tofp(pstate - cpudata->pstate.min_pstate),
cpudata->vid.ratio);
@@ -2268,13 +2249,7 @@ static int core_get_turbo_pstate(int cpu)
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
-
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
- return val;
+ return get_perf_ctl_val(pstate);
}
static int knl_get_aperf_mperf_shift(void)
@@ -2298,18 +2273,14 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- u8 cpu_type = c->topo.intel_type;
-
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == INTEL_CPU_TYPE_CORE)
+ if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == INTEL_CPU_TYPE_ATOM)
- return core_get_scaling();
+ return core_get_scaling();
}
/* Use core scaling on non-hybrid systems. */
@@ -2344,11 +2315,10 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
int perf_ctl_scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu);
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
- cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
@@ -2356,10 +2326,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling) {
- intel_pstate_hybrid_hwp_adjust(cpu);
- hwp_is_hybrid = true;
- }
+ intel_pstate_hybrid_hwp_adjust(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
@@ -2761,6 +2728,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
+ X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs),
{}
};
#endif
@@ -3913,9 +3881,9 @@ hwp_cpu_matched:
}
- mutex_lock(&intel_pstate_driver_lock);
- rc = intel_pstate_register_driver(default_driver);
- mutex_unlock(&intel_pstate_driver_lock);
+ scoped_guard(mutex, &intel_pstate_driver_lock) {
+ rc = intel_pstate_register_driver(default_driver);
+ }
if (rc) {
intel_pstate_sysfs_remove();
return rc;
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 765a5bb81829..81e16b5a0245 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -256,13 +256,22 @@ len_error:
return ret;
}
+static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unused = {
+ { .compatible = "qcom,ipq8062", .data = (const void *)QCOM_ID_IPQ8062 },
+ { .compatible = "qcom,ipq8064", .data = (const void *)QCOM_ID_IPQ8064 },
+ { .compatible = "qcom,ipq8065", .data = (const void *)QCOM_ID_IPQ8065 },
+ { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
+ { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
+ { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
+};
+
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
+ int msm_id = -1, ret = 0;
int speed = 0, pvs = 0;
- int msm_id, ret = 0;
u8 *speedbin;
size_t len;
@@ -279,8 +288,30 @@ static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
ret = qcom_smem_get_soc_id(&msm_id);
- if (ret)
+ if (ret == -ENODEV) {
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Fallback to compatible match with no SMEM initialized */
+ match = of_match_node(qcom_cpufreq_ipq806x_match_list, root);
+ of_node_put(root);
+ if (!match) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* We found a matching device, get the msm_id from the data entry */
+ msm_id = (int)(uintptr_t)match->data;
+ ret = 0;
+ } else if (ret) {
goto exit;
+ }
switch (msm_id) {
case QCOM_ID_IPQ8062:
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 4215621deb3f..ba8a1c96427a 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) {
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/*
@@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/* Find current refresh counter and frequency each DMC */
@@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
+out:
+ clk_put(dmc1_clk);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 136ab102f636..34ed943c5f34 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
@@ -58,7 +59,7 @@ static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
};
struct tegra186_cpufreq_cluster {
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 ref_clk_khz;
u32 div;
};
@@ -66,16 +67,119 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
const struct tegra186_cpufreq_cpu *cpus;
+ bool icc_dram_bw_scaling;
struct tegra186_cpufreq_cluster clusters[];
};
+static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(dev, freq_khz * HZ_PER_KHZ, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret)
+ data->icc_dram_bw_scaling = false;
+
+ return ret;
+}
+
+static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *bpmp_lut,
+ struct cpufreq_frequency_table **opp_table)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *freq_table = NULL;
+ struct cpufreq_frequency_table *pos;
+ struct device *cpu_dev;
+ unsigned long rate;
+ int ret, max_opps;
+ int j = 0;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
+ return -ENODEV;
+ }
+
+ /* Initialize OPP table mentioned in operating-points-v2 property in DT */
+ ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
+ if (ret) {
+ dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
+ data->icc_dram_bw_scaling = false;
+ return ret;
+ }
+
+ max_opps = dev_pm_opp_get_opp_count(cpu_dev);
+ if (max_opps <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ return max_opps;
+ }
+
+ /* Disable all opps and cross-validate against LUT later */
+ for (rate = 0; ; rate++) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return -ENOMEM;
+
+ /*
+ * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
+ * Enable only those DT OPP's which are present in LUT also.
+ */
+ cpufreq_for_each_valid_entry(pos, bpmp_lut) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * HZ_PER_KHZ, false);
+ if (IS_ERR(opp))
+ continue;
+
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * HZ_PER_KHZ);
+ if (ret < 0)
+ return ret;
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = pos->frequency;
+ j++;
+ }
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = CPUFREQ_TABLE_END;
+
+ *opp_table = &freq_table[0];
+
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ /* Prime interconnect data */
+ tegra_cpufreq_set_bw(policy, freq_table[j - 1].frequency);
+
+ return ret;
+}
+
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 cpu;
+ int ret;
- policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
@@ -85,6 +189,20 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
cpumask_set_cpu(cpu, policy->cpus);
}
+ bpmp_lut = data->clusters[cluster].bpmp_lut;
+
+ if (data->icc_dram_bw_scaling) {
+ ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
+ if (!ret) {
+ policy->freq_table = freq_table;
+ return 0;
+ }
+ }
+
+ data->icc_dram_bw_scaling = false;
+ policy->freq_table = bpmp_lut;
+ pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
+
return 0;
}
@@ -102,6 +220,10 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
writel(edvd_val, data->regs + edvd_offset);
}
+ if (data->icc_dram_bw_scaling)
+ tegra_cpufreq_set_bw(policy, tbl->frequency);
+
+
return 0;
}
@@ -134,7 +256,7 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.init = tegra186_cpufreq_init,
};
-static struct cpufreq_frequency_table *init_vhint_table(
+static struct cpufreq_frequency_table *tegra_cpufreq_bpmp_read_lut(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
int *num_rates)
@@ -229,6 +351,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
+ struct device *cpu_dev;
unsigned int i = 0, err, edvd_offset;
int num_rates = 0;
u32 edvd_val, cpu;
@@ -254,9 +377,9 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i, &num_rates);
- if (IS_ERR(cluster->table)) {
- err = PTR_ERR(cluster->table);
+ cluster->bpmp_lut = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, cluster, i, &num_rates);
+ if (IS_ERR(cluster->bpmp_lut)) {
+ err = PTR_ERR(cluster->bpmp_lut);
goto put_bpmp;
} else if (!num_rates) {
err = -EINVAL;
@@ -265,7 +388,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
if (data->cpus[cpu].bpmp_cluster_id == i) {
- edvd_val = cluster->table[num_rates - 1].driver_data;
+ edvd_val = cluster->bpmp_lut[num_rates - 1].driver_data;
edvd_offset = data->cpus[cpu].edvd_offset;
writel(edvd_val, data->regs + edvd_offset);
}
@@ -274,6 +397,19 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
tegra186_cpufreq_driver.driver_data = data;
+ /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ err = -EPROBE_DEFER;
+ goto put_bpmp;
+ }
+
+ if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
+ err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (!err)
+ data->icc_dram_bw_scaling = true;
+ }
+
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
put_bpmp:
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9b4f516f313e..695599e1001f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -750,7 +750,8 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
+ read_counters_wq = alloc_workqueue("read_counters_wq",
+ __WQ_LEGACY | WQ_PERCPU, 1);
if (!read_counters_wq) {
dev_err(&pdev->dev, "fail to create_workqueue\n");
err = -EINVAL;