diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
| -rw-r--r-- | drivers/cpufreq/intel_pstate.c | 227 |
1 files changed, 98 insertions, 129 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 492a10f1bdbf..ec4abe374573 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -575,13 +575,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) int scaling = cpu->pstate.scaling; int freq; - pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); - pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); - pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); + pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); + pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo); + pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling); pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); + if (scaling == perf_ctl_scaling) + return; + + hwp_is_hybrid = true; + cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, perf_ctl_scaling); cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, @@ -909,6 +914,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { [HWP_CPUFREQ_ATTR_COUNT] = NULL, }; +static u8 hybrid_get_cpu_type(unsigned int cpu) +{ + return cpu_data(cpu).topo.intel_type; +} + static bool no_cas __ro_after_init; static struct cpudata *hybrid_max_perf_cpu __read_mostly; @@ -925,11 +935,8 @@ static int hybrid_active_power(struct device *dev, unsigned long *power, unsigned long *freq) { /* - * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100% - * of the maximum capacity such that two CPUs of the same type will be - * regarded as equally attractive if the utilization of each of them - * falls into the same bin, which should prevent tasks from being - * migrated between them too often. + * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the + * full capacity. * * For this purpose, return the "frequency" of 2 for the first * performance level and otherwise leave the value set by the caller. @@ -943,38 +950,40 @@ static int hybrid_active_power(struct device *dev, unsigned long *power, return 0; } +static bool hybrid_has_l3(unsigned int cpu) +{ + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu); + unsigned int i; + + if (!cacheinfo) + return false; + + for (i = 0; i < cacheinfo->num_leaves; i++) { + if (cacheinfo->info_list[i].level == 3) + return true; + } + + return false; +} + static int hybrid_get_cost(struct device *dev, unsigned long freq, unsigned long *cost) { - struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate; - struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id); - + /* Facilitate load balancing between CPUs of the same type. */ + *cost = freq; /* - * The smaller the perf-to-frequency scaling factor, the larger the IPC - * ratio between the given CPU and the least capable CPU in the system. - * Regard that IPC ratio as the primary cost component and assume that - * the scaling factors for different CPU types will differ by at least - * 5% and they will not be above INTEL_PSTATE_CORE_SCALING. + * Adjust the cost depending on CPU type. * - * Add the freq value to the cost, so that the cost of running on CPUs - * of the same type in different "utilization bins" is different. - */ - *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq; - /* - * Increase the cost slightly for CPUs able to access L3 to avoid - * touching it in case some other CPUs of the same type can do the work - * without it. + * The idea is to start loading up LPE-cores before E-cores and start + * to populate E-cores when LPE-cores are utilized above 60% of the + * capacity. Similarly, P-cores start to be populated when E-cores are + * utilized above 60% of the capacity. */ - if (cacheinfo) { - unsigned int i; - - /* Check if L3 cache is there. */ - for (i = 0; i < cacheinfo->num_leaves; i++) { - if (cacheinfo->info_list[i].level == 3) { - *cost += 2; - break; - } - } + if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) { + if (hybrid_has_l3(dev->id)) /* E-core */ + *cost += 1; + } else { /* P-core */ + *cost += 2; } return 0; @@ -1037,9 +1046,9 @@ static void hybrid_set_cpu_capacity(struct cpudata *cpu) topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu)); - pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu, - cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, - cpu->pstate.max_pstate_physical); + pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n", + cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical, + hybrid_max_perf_cpu->capacity_perf); } static void hybrid_clear_cpu_capacity(unsigned int cpunum) @@ -1384,7 +1393,8 @@ static void set_power_ctl_ee_state(bool input) { u64 power_ctl; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); if (input) { power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); @@ -1394,7 +1404,6 @@ static void set_power_ctl_ee_state(bool input) power_ctl_ee_state = POWER_CTL_EE_DISABLE; } wrmsrq(MSR_IA32_POWER_CTL, power_ctl); - mutex_unlock(&intel_pstate_driver_lock); } static void intel_pstate_hwp_enable(struct cpudata *cpudata); @@ -1516,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size); static ssize_t show_status(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - ssize_t ret; - - mutex_lock(&intel_pstate_driver_lock); - ret = intel_pstate_show_status(buf); - mutex_unlock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - return ret; + return intel_pstate_show_status(buf); } static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, @@ -1531,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, char *p = memchr(buf, '\n', count); int ret; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + ret = intel_pstate_update_status(buf, p ? p - buf : count); - mutex_unlock(&intel_pstate_driver_lock); + if (ret < 0) + return ret; - return ret < 0 ? ret : count; + return count; } static ssize_t show_turbo_pct(struct kobject *kobj, @@ -1545,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj, int total, no_turbo, turbo_pct; uint32_t turbo_fp; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } cpu = all_cpu_data[0]; @@ -1559,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj, turbo_fp = div_fp(no_turbo, total); turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); - mutex_unlock(&intel_pstate_driver_lock); - return sprintf(buf, "%u\n", turbo_pct); } @@ -1570,38 +1573,26 @@ static ssize_t show_num_pstates(struct kobject *kobj, struct cpudata *cpu; int total; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } cpu = all_cpu_data[0]; total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; - mutex_unlock(&intel_pstate_driver_lock); - return sprintf(buf, "%u\n", total); } static ssize_t show_no_turbo(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - ssize_t ret; + guard(mutex)(&intel_pstate_driver_lock); - mutex_lock(&intel_pstate_driver_lock); - - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } - - ret = sprintf(buf, "%u\n", global.no_turbo); - - mutex_unlock(&intel_pstate_driver_lock); - return ret; + return sprintf(buf, "%u\n", global.no_turbo); } static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, @@ -1613,29 +1604,25 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, if (sscanf(buf, "%u", &input) != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - count = -EAGAIN; - goto unlock_driver; - } + if (!intel_pstate_driver) + return -EAGAIN; no_turbo = !!clamp_t(int, input, 0, 1); WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); if (global.turbo_disabled && !no_turbo) { pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); - count = -EPERM; if (global.no_turbo) - goto unlock_driver; - else - no_turbo = 1; - } + return -EPERM; - if (no_turbo == global.no_turbo) { - goto unlock_driver; + no_turbo = 1; } + if (no_turbo == global.no_turbo) + return count; + WRITE_ONCE(global.no_turbo, no_turbo); mutex_lock(&intel_pstate_limits_lock); @@ -1654,9 +1641,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, intel_pstate_update_limits_for_all(); arch_set_max_freq_ratio(no_turbo); -unlock_driver: - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1706,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } mutex_lock(&intel_pstate_limits_lock); @@ -1724,8 +1706,6 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, else update_qos_requests(FREQ_QOS_MAX); - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1739,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } mutex_lock(&intel_pstate_limits_lock); @@ -1758,8 +1736,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, else update_qos_requests(FREQ_QOS_MIN); - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1780,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a, if (ret) return ret; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + hwp_boost = !!input; intel_pstate_update_policies(); - mutex_unlock(&intel_pstate_driver_lock); return count; } @@ -2072,6 +2048,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) intel_pstate_update_epp_defaults(cpudata); } +static u64 get_perf_ctl_val(int pstate) +{ + u64 val; + + val = (u64)pstate << 8; + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && + cpu_feature_enabled(X86_FEATURE_IDA)) + val |= (u64)1 << 32; + + return val; +} + static int atom_get_min_pstate(int not_used) { u64 value; @@ -2098,15 +2086,10 @@ static int atom_get_turbo_pstate(int not_used) static u64 atom_get_val(struct cpudata *cpudata, int pstate) { - u64 val; + u64 val = get_perf_ctl_val(pstate); int32_t vid_fp; u32 vid; - val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && - cpu_feature_enabled(X86_FEATURE_IDA)) - val |= (u64)1 << 32; - vid_fp = cpudata->vid.min + mul_fp( int_tofp(pstate - cpudata->pstate.min_pstate), cpudata->vid.ratio); @@ -2266,14 +2249,7 @@ static int core_get_turbo_pstate(int cpu) static u64 core_get_val(struct cpudata *cpudata, int pstate) { - u64 val; - - val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && - cpu_feature_enabled(X86_FEATURE_IDA)) - val |= (u64)1 << 32; - - return val; + return get_perf_ctl_val(pstate); } static int knl_get_aperf_mperf_shift(void) @@ -2297,18 +2273,14 @@ static int knl_get_turbo_pstate(int cpu) static int hwp_get_cpu_scaling(int cpu) { if (hybrid_scaling_factor) { - struct cpuinfo_x86 *c = &cpu_data(cpu); - u8 cpu_type = c->topo.intel_type; - /* * Return the hybrid scaling factor for P-cores and use the * default core scaling for E-cores. */ - if (cpu_type == INTEL_CPU_TYPE_CORE) + if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE) return hybrid_scaling_factor; - if (cpu_type == INTEL_CPU_TYPE_ATOM) - return core_get_scaling(); + return core_get_scaling(); } /* Use core scaling on non-hybrid systems. */ @@ -2343,11 +2315,10 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); int perf_ctl_scaling = pstate_funcs.get_scaling(); + cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu); cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); - cpu->pstate.max_pstate_physical = perf_ctl_max_phys; cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; if (hwp_active && !hwp_mode_bdw) { @@ -2355,10 +2326,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_cpu_scaling) { cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); - if (cpu->pstate.scaling != perf_ctl_scaling) { - intel_pstate_hybrid_hwp_adjust(cpu); - hwp_is_hybrid = true; - } + intel_pstate_hybrid_hwp_adjust(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; } @@ -2760,6 +2728,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs), + X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs), {} }; #endif @@ -3912,9 +3881,9 @@ hwp_cpu_matched: } - mutex_lock(&intel_pstate_driver_lock); - rc = intel_pstate_register_driver(default_driver); - mutex_unlock(&intel_pstate_driver_lock); + scoped_guard(mutex, &intel_pstate_driver_lock) { + rc = intel_pstate_register_driver(default_driver); + } if (rc) { intel_pstate_sysfs_remove(); return rc; |
