summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-04-12 18:52:00 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-04-12 18:52:00 -0700
commit9515fdf192fee0ff2fc1bd4a277ee952fb9b13d5 (patch)
tree03d3b7462f868cda0226e1bb42af1f177fc83cab
parentf48c54a39e3d709f0e6072c4d9c7eb43ae01f1da (diff)
parent2aebc5b195bc00aa5e09c10dec3a4a5fdb9ba21b (diff)
Merge bk://linux-dj.bkbits.net/cpufreq
into ppc970.osdl.org:/home/torvalds/v2.6/linux
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig25
-rw-r--r--arch/i386/kernel/cpu/cpufreq/elanfreq.c3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longrun.c16
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c17
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k6.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c12
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c931
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h111
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c272
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-ich.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c19
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c7
-rw-r--r--drivers/cpufreq/cpufreq.c37
-rw-r--r--include/linux/cpufreq.h6
15 files changed, 986 insertions, 483 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 089b553e344d..d285dc1bafce 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -120,6 +120,21 @@ config X86_SPEEDSTEP_CENTRINO
If in doubt, say N.
+config X86_SPEEDSTEP_CENTRINO_TABLE
+ bool
+ depends on X86_SPEEDSTEP_CENTRINO
+ default y
+
+config X86_SPEEDSTEP_CENTRINO_ACPI
+ bool "Use ACPI tables to decode valid frequency/voltage pairs (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on ((X86_SPEEDSTEP_CENTRINO = "m" && ACPI_PROCESSOR) || (X86_SPEEDSTEP_CENTRINO = "y" && ACPI_PROCESSOR = "y"))
+ help
+ Use primarily the information provided in the BIOS ACPI tables
+ to determine valid CPU frequency and voltage pairings.
+
+ If in doubt, say Y.
+
config X86_SPEEDSTEP_ICH
tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
depends on CPU_FREQ_TABLE
@@ -161,6 +176,16 @@ config X86_SPEEDSTEP_LIB
depends on (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
+config X86_SPEEDSTEP_RELAXED_CAP_CHECK
+ bool "Relaxed speedstep capability checks"
+ depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
+ help
+ Don't perform all checks for a speedstep capable system which would
+ normally be done. Some ancient or strange systems, though speedstep
+ capable, don't always indicate that they are speedstep capable. This
+ option let's the probing code bypass some of those checks if the
+ parameter "relaxed_check=1" is passed to the module.
+
config X86_LONGRUN
tristate "Transmeta LongRun"
depends on CPU_FREQ
diff --git a/arch/i386/kernel/cpu/cpufreq/elanfreq.c b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
index b6e8482a0d06..e45ff82dcb4e 100644
--- a/arch/i386/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
@@ -165,7 +165,8 @@ static void elanfreq_set_cpu_state (unsigned int state) {
/**
- * elanfreq_validatespeed: test if frequency range is valid
+ * elanfreq_validatespeed: test if frequency range is valid
+ * @policy: the policy to validate
*
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 3c1a30c1d918..d8ddf3931530 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -458,7 +458,7 @@ static int __init longhaul_cpu_init (struct cpufreq_policy *policy)
return 0;
}
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
+static int __exit longhaul_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
diff --git a/arch/i386/kernel/cpu/cpufreq/longrun.c b/arch/i386/kernel/cpu/cpufreq/longrun.c
index 73a6b7eac052..e51bb0304243 100644
--- a/arch/i386/kernel/cpu/cpufreq/longrun.c
+++ b/arch/i386/kernel/cpu/cpufreq/longrun.c
@@ -33,7 +33,7 @@ static unsigned int longrun_low_freq, longrun_high_freq;
* Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
* and MSR_TMTA_LONGRUN_CTRL
*/
-static void longrun_get_policy(struct cpufreq_policy *policy)
+static void __init longrun_get_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
@@ -57,7 +57,7 @@ static void longrun_get_policy(struct cpufreq_policy *policy)
/**
* longrun_set_policy - sets a new CPUFreq policy
- * @policy - new policy
+ * @policy: new policy
*
* Sets a new CPUFreq policy on LongRun-capable processors. This function
* has to be called with cpufreq_driver locked.
@@ -106,6 +106,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
/**
* longrun_verify_poliy - verifies a new CPUFreq policy
+ * @policy: the policy to verify
*
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
@@ -130,6 +131,8 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
/**
* longrun_determine_freqs - determines the lowest and highest possible core frequency
+ * @low_freq: an int to put the lowest frequency into
+ * @high_freq: an int to put the highest frequency into
*
* Determines the lowest and highest possible core frequencies on this CPU.
* This is necessary to calculate the performance percentage according to
@@ -142,6 +145,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
u32 msr_lo, msr_hi;
u32 save_lo, save_hi;
u32 eax, ebx, ecx, edx;
+ u32 try_hi;
struct cpuinfo_x86 *c = cpu_data;
if (!low_freq || !high_freq)
@@ -184,12 +188,14 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
* upper limit to make the calculation more accurate.
*/
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
- if (ecx > 90) {
- /* set to 0 to 80 perf_pctg */
+ /* try decreasing in 10% steps, some processors react only
+ * on some barrier values */
+ for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) {
+ /* set to 0 to try_hi perf_pctg */
msr_lo &= 0xFFFFFF80;
msr_hi &= 0xFFFFFF80;
msr_lo |= 0;
- msr_hi |= 80;
+ msr_hi |= try_hi;
wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
/* read out current core MHz and current perf_pctg */
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index c6ca18703454..28d5e2243017 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -181,21 +181,24 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if ((c->x86 == 0x06) && (c->x86_model == 0x09)) {
/* Pentium M */
- printk(KERN_DEBUG PFX "Warning: Pentium M detected. The speedstep_centrino module\n");
- printk(KERN_DEBUG PFX "offers voltage scaling in addition of frequency scaling. You\n");
- printk(KERN_DEBUG PFX "should use that instead of p4-clockmod, if possible.\n");
+ printk(KERN_WARNING PFX "Warning: Pentium M detected. "
+ "The speedstep_centrino module offers voltage scaling"
+ " in addition of frequency scaling. You should use "
+ "that instead of p4-clockmod, if possible.\n");
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
}
if (c->x86 != 0xF) {
- printk(KERN_DEBUG PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n");
+ printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n");
return 0;
}
if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) {
- printk(KERN_DEBUG PFX "Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq \n");
- printk(KERN_DEBUG PFX "modules offers voltage scaling in addition of frequency scaling. You\n");
- printk(KERN_DEBUG PFX "should use either one instead of p4-clockmod, if possible.\n");
+ printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
+ "The speedstep-ich or acpi cpufreq modules offer "
+ "voltage scaling in addition of frequency scaling. "
+ "You should use either one instead of p4-clockmod, "
+ "if possible.\n");
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M);
}
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
index 83b0d2e527fd..80153fb05dbb 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
@@ -118,7 +118,9 @@ static int powernow_k6_verify(struct cpufreq_policy *policy)
/**
* powernow_k6_setpolicy - sets a new CPUFreq policy
- * @policy - new policy
+ * @policy: new policy
+ * @target_freq: the target frequency
+ * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* sets a new CPUFreq policy
*/
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index c77c9637592d..8b95e88c95d3 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -98,18 +98,20 @@ static int check_powernow(void)
return 0;
}
- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
- printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n");
- have_a0 = 1;
- }
-
/* Get maximum capabilities */
maxei = cpuid_eax (0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
+#ifdef MODULE
printk (KERN_INFO PFX "No powernow capabilities detected\n");
+#endif
return 0;
}
+ if ((c->x86_model == 6) && (c->x86_mask == 0)) {
+ printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n");
+ have_a0 = 1;
+ }
+
cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e92270482a70..aa387c4b46eb 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,22 +1,23 @@
/*
- * (c) 2003 Advanced Micro Devices, Inc.
+ * (c) 2003, 2004 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
- * GNU general public license version 2. See "../../../COPYING" or
+ * GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
* Support : paul.devriendt@amd.com
*
* Based on the powernow-k7.c module written by Dave Jones.
- * (C) 2003 Dave Jones <davej@codemonkey.ork.uk> on behalf of SuSE Labs
+ * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
* (C) 2004 Dominik Brodowski <linux@brodo.de>
* (C) 2004 Pavel Machek <pavel@suse.cz>
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
*
+ * Valuable input gratefully received from Dave Jones, Pavel Machek,
+ * Dominik Brodowski, and others.
* Processor information obtained from Chapter 9 (Power and Thermal Management)
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
- * Opteron Processors", revision 3.03, available for download from www.amd.com
- *
+ * Opteron Processors" available for download from www.amd.com
*/
#include <linux/kernel.h>
@@ -31,55 +32,41 @@
#include <asm/io.h>
#include <asm/delay.h>
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
-#define VERSION "version 1.00.08a"
+#define VERSION "version 1.00.08b"
#include "powernow-k8.h"
-static u32 vstable; /* voltage stabalization time, from PSB, units 20 us */
-static u32 plllock; /* pll lock time, from PSB, units 1 us */
-static u32 numps; /* number of p-states, from PSB */
-static u32 rvo; /* ramp voltage offset, from PSB */
-static u32 irt; /* isochronous relief time, from PSB */
-static u32 vidmvs; /* usable value calculated from mvs, from PSB */
-static u32 currvid; /* keep track of the current fid / vid */
-static u32 currfid;
+/* serialize freq changes */
+static DECLARE_MUTEX(fidvid_sem);
-static struct cpufreq_frequency_table *powernow_table;
+static struct powernow_k8_data *powernow_data[NR_CPUS];
-/*
-The PSB table supplied by BIOS allows for the definition of the number of
-p-states that can be used when running on a/c, and the number of p-states
-that can be used when running on battery. This allows laptop manufacturers
-to force the system to save power when running from battery. The relationship
-is :
- 1 <= number_of_battery_p_states <= maximum_number_of_p_states
-
-This driver does NOT have the support in it to detect transitions from
-a/c power to battery power, and thus trigger the transition to a lower
-p-state if required. This is because I need ACPI and the 2.6 kernel to do
-this, and this is a 2.4 kernel driver. Check back for a new improved driver
-for the 2.6 kernel soon.
-
-This code therefore assumes it is on battery at all times, and thus
-restricts performance to number_of_battery_p_states. For desktops,
- number_of_battery_p_states == maximum_number_of_pstates,
-so this is not actually a restriction.
-*/
-
-static u32 batps; /* limit on the number of p states when on battery */
- /* - set by BIOS in the PSB/PST */
-
- /* Return a frequency in MHz, given an input fid */
+/* Return a frequency in MHz, given an input fid */
static u32 find_freq_from_fid(u32 fid)
{
- return 800 + (fid * 100);
+ return 800 + (fid * 100);
}
+/* Return a frequency in KHz, given an input fid */
+static u32 find_khz_freq_from_fid(u32 fid)
+{
+ return 1000 * find_freq_from_fid(fid);
+}
+
+/* Return a voltage in miliVolts, given an input vid */
+static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
+{
+ return 1550-vid*25;
+}
/* Return the vco fid for an input fid */
-static u32
-convert_fid_to_vco_fid(u32 fid)
+static u32 convert_fid_to_vco_fid(u32 fid)
{
if (fid < HI_FID_TABLE_BOTTOM) {
return 8 + (2 * fid);
@@ -89,11 +76,10 @@ convert_fid_to_vco_fid(u32 fid)
}
/*
- * Return 1 if the pending bit is set. Unless we are actually just told the
- * processor to transition a state, seeing this bit set is really bad news.
+ * Return 1 if the pending bit is set. Unless we just instructed the processor
+ * to transition to a new state, seeing this bit set is really bad news.
*/
-static inline int
-pending_bit_stuck(void)
+static int pending_bit_stuck(void)
{
u32 lo, hi;
@@ -102,11 +88,10 @@ pending_bit_stuck(void)
}
/*
- * Update the global current fid / vid values from the status msr. Returns 1
- * on error.
+ * Update the global current fid / vid values from the status msr.
+ * Returns 1 on error.
*/
-static int
-query_current_values_with_pending_wait(void)
+static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
{
u32 lo, hi;
u32 i = 0;
@@ -120,63 +105,74 @@ query_current_values_with_pending_wait(void)
rdmsr(MSR_FIDVID_STATUS, lo, hi);
}
- currvid = hi & MSR_S_HI_CURRENT_VID;
- currfid = lo & MSR_S_LO_CURRENT_FID;
+ data->currvid = hi & MSR_S_HI_CURRENT_VID;
+ data->currfid = lo & MSR_S_LO_CURRENT_FID;
return 0;
}
/* the isochronous relief time */
-static inline void
-count_off_irt(void)
+static void count_off_irt(struct powernow_k8_data *data)
{
- udelay((1 << irt) * 10);
+ udelay((1 << data->irt) * 10);
return;
}
/* the voltage stabalization time */
-static inline void
-count_off_vst(void)
+static void count_off_vst(struct powernow_k8_data *data)
{
- udelay(vstable * VST_UNITS_20US);
+ udelay(data->vstable * VST_UNITS_20US);
return;
}
+/* need to init the control msr to a safe value (for each cpu) */
+static void fidvid_msr_init(void)
+{
+ u32 lo, hi;
+ u8 fid, vid;
+
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ vid = hi & MSR_S_HI_CURRENT_VID;
+ fid = lo & MSR_S_LO_CURRENT_FID;
+ lo = fid | (vid << MSR_C_LO_VID_SHIFT);
+ hi = MSR_C_HI_STP_GNT_BENIGN;
+ dprintk(PFX "cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
+ wrmsr(MSR_FIDVID_CTL, lo, hi);
+}
+
+
/* write the new fid value along with the other control fields to the msr */
-static int
-write_new_fid(u32 fid)
+static int write_new_fid(struct powernow_k8_data *data, u32 fid)
{
u32 lo;
- u32 savevid = currvid;
+ u32 savevid = data->currvid;
- if ((fid & INVALID_FID_MASK) || (currvid & INVALID_VID_MASK)) {
+ if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
printk(KERN_ERR PFX "internal error - overflow on fid write\n");
return 1;
}
- lo = fid | (currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
+ lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
- dprintk(KERN_DEBUG PFX "writing fid %x, lo %x, hi %x\n",
- fid, lo, plllock * PLL_LOCK_CONVERSION);
+ dprintk(KERN_DEBUG PFX "writing fid 0x%x, lo 0x%x, hi 0x%x\n",
+ fid, lo, data->plllock * PLL_LOCK_CONVERSION);
- wrmsr(MSR_FIDVID_CTL, lo, plllock * PLL_LOCK_CONVERSION);
+ wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- count_off_irt();
+ count_off_irt(data);
- if (savevid != currvid) {
- printk(KERN_ERR PFX
- "vid changed on fid transition, save %x, currvid %x\n",
- savevid, currvid);
+ if (savevid != data->currvid) {
+ printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n",
+ savevid, data->currvid);
return 1;
}
- if (fid != currfid) {
- printk(KERN_ERR PFX
- "fid transition failed, fid %x, currfid %x\n",
- fid, currfid);
+ if (fid != data->currfid) {
+ printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
+ data->currfid);
return 1;
}
@@ -184,39 +180,35 @@ write_new_fid(u32 fid)
}
/* Write a new vid to the hardware */
-static int
-write_new_vid(u32 vid)
+static int write_new_vid(struct powernow_k8_data *data, u32 vid)
{
u32 lo;
- u32 savefid = currfid;
+ u32 savefid = data->currfid;
- if ((currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
+ if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
printk(KERN_ERR PFX "internal error - overflow on vid write\n");
return 1;
}
- lo = currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
+ lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
- dprintk(KERN_DEBUG PFX "writing vid %x, lo %x, hi %x\n",
+ dprintk(KERN_DEBUG PFX "writing vid 0x%x, lo 0x%x, hi 0x%x\n",
vid, lo, STOP_GRANT_5NS);
wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
- if (query_current_values_with_pending_wait()) {
+ if (query_current_values_with_pending_wait(data))
return 1;
- }
- if (savefid != currfid) {
- printk(KERN_ERR PFX
- "fid changed on vid transition, save %x currfid %x\n",
- savefid, currfid);
+ if (savefid != data->currfid) {
+ printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
+ savefid, data->currfid);
return 1;
}
- if (vid != currvid) {
- printk(KERN_ERR PFX
- "vid transition failed, vid %x, currvid %x\n",
- vid, currvid);
+ if (vid != data->currvid) {
+ printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid,
+ data->currvid);
return 1;
}
@@ -228,293 +220,284 @@ write_new_vid(u32 vid)
* Decreasing vid codes represent increasing voltages:
* vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off.
*/
-static int
-decrease_vid_code_by_step(u32 reqvid, u32 step)
+static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
{
- if ((currvid - reqvid) > step)
- reqvid = currvid - step;
+ if ((data->currvid - reqvid) > step)
+ reqvid = data->currvid - step;
- if (write_new_vid(reqvid))
+ if (write_new_vid(data, reqvid))
return 1;
- count_off_vst();
+ count_off_vst(data);
return 0;
}
/* Change the fid and vid, by the 3 phases. */
-static inline int
-transition_fid_vid(u32 reqfid, u32 reqvid)
+static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid)
{
- if (core_voltage_pre_transition(reqvid))
+ if (core_voltage_pre_transition(data, reqvid))
return 1;
- if (core_frequency_transition(reqfid))
+ if (core_frequency_transition(data, reqfid))
return 1;
- if (core_voltage_post_transition(reqvid))
+ if (core_voltage_post_transition(data, reqvid))
return 1;
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- if ((reqfid != currfid) || (reqvid != currvid)) {
- printk(KERN_ERR PFX "failed: req 0x%x 0x%x, curr 0x%x 0x%x\n",
- reqfid, reqvid, currfid, currvid);
+ if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
+ printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
+ smp_processor_id(),
+ reqfid, reqvid, data->currfid, data->currvid);
return 1;
}
- dprintk(KERN_INFO PFX
- "transitioned: new fid 0x%x, vid 0x%x\n", currfid, currvid);
+ dprintk(KERN_INFO PFX "transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), data->currfid, data->currvid);
return 0;
}
-/*
- * Phase 1 - core voltage transition ... setup appropriate voltage for the
- * fid transition.
- */
-static inline int
-core_voltage_pre_transition(u32 reqvid)
+/* Phase 1 - core voltage transition ... setup voltage */
+static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid)
{
- u32 rvosteps = rvo;
- u32 savefid = currfid;
+ u32 rvosteps = data->rvo;
+ u32 savefid = data->currfid;
dprintk(KERN_DEBUG PFX
- "ph1: start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo %x\n",
- currfid, currvid, reqvid, rvo);
-
- while (currvid > reqvid) {
- dprintk(KERN_DEBUG PFX "ph1: curr 0x%x, requesting vid 0x%x\n",
- currvid, reqvid);
- if (decrease_vid_code_by_step(reqvid, vidmvs))
+ "ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
+ smp_processor_id();
+ data->currfid, data->currvid, reqvid, data->rvo);
+
+ while (data->currvid > reqvid) {
+ dprintk(KERN_DEBUG PFX "ph1: curr 0x%x, req vid 0x%x\n",
+ data->currvid, reqvid);
+ if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
return 1;
}
while (rvosteps > 0) {
- if (currvid == 0) {
+ if (data->currvid == 0) {
rvosteps = 0;
} else {
dprintk(KERN_DEBUG PFX
- "ph1: changing vid for rvo, requesting 0x%x\n",
- currvid - 1);
- if (decrease_vid_code_by_step(currvid - 1, 1))
+ "ph1: changing vid for rvo, req 0x%x\n",
+ data->currvid - 1);
+ if (decrease_vid_code_by_step(data, data->currvid - 1, 1))
return 1;
rvosteps--;
}
}
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- if (savefid != currfid) {
- printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", currfid);
+ if (savefid != data->currfid) {
+ printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid);
return 1;
}
dprintk(KERN_DEBUG PFX "ph1 complete, currfid 0x%x, currvid 0x%x\n",
- currfid, currvid);
+ data->currfid, data->currvid);
return 0;
}
/* Phase 2 - core frequency transition */
-static inline int
-core_frequency_transition(u32 reqfid)
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
{
u32 vcoreqfid;
u32 vcocurrfid;
u32 vcofiddiff;
- u32 savevid = currvid;
+ u32 savevid = data->currvid;
- if ((reqfid < HI_FID_TABLE_BOTTOM) && (currfid < HI_FID_TABLE_BOTTOM)) {
- printk(KERN_ERR PFX "ph2 illegal lo-lo transition 0x%x 0x%x\n",
- reqfid, currfid);
+ if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
+ printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
+ reqfid, data->currfid);
return 1;
}
- if (currfid == reqfid) {
- printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", currfid);
+ if (data->currfid == reqfid) {
+ printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid);
return 0;
}
dprintk(KERN_DEBUG PFX
- "ph2 starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
- currfid, currvid, reqfid);
+ "ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid, reqfid);
vcoreqfid = convert_fid_to_vco_fid(reqfid);
- vcocurrfid = convert_fid_to_vco_fid(currfid);
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
: vcoreqfid - vcocurrfid;
while (vcofiddiff > 2) {
- if (reqfid > currfid) {
- if (currfid > LO_FID_TABLE_TOP) {
- if (write_new_fid(currfid + 2)) {
+ if (reqfid > data->currfid) {
+ if (data->currfid > LO_FID_TABLE_TOP) {
+ if (write_new_fid(data, data->currfid + 2)) {
return 1;
}
} else {
if (write_new_fid
- (2 + convert_fid_to_vco_fid(currfid))) {
+ (data, 2 + convert_fid_to_vco_fid(data->currfid))) {
return 1;
}
}
} else {
- if (write_new_fid(currfid - 2))
+ if (write_new_fid(data, data->currfid - 2))
return 1;
}
- vcocurrfid = convert_fid_to_vco_fid(currfid);
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
: vcoreqfid - vcocurrfid;
}
- if (write_new_fid(reqfid))
+ if (write_new_fid(data, reqfid))
return 1;
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- if (currfid != reqfid) {
+ if (data->currfid != reqfid) {
printk(KERN_ERR PFX
- "ph2 mismatch, failed fid transition, curr %x, req %x\n",
- currfid, reqfid);
+ "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
+ data->currfid, reqfid);
return 1;
}
- if (savevid != currvid) {
- printk(KERN_ERR PFX
- "ph2 vid changed, save %x, curr %x\n", savevid,
- currvid);
+ if (savevid != data->currvid) {
+ printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
+ savevid, data->currvid);
return 1;
}
dprintk(KERN_DEBUG PFX "ph2 complete, currfid 0x%x, currvid 0x%x\n",
- currfid, currvid);
+ data->currfid, data->currvid);
return 0;
}
/* Phase 3 - core voltage transition flow ... jump to the final vid. */
-static inline int
-core_voltage_post_transition(u32 reqvid)
+static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid)
{
- u32 savefid = currfid;
+ u32 savefid = data->currfid;
u32 savereqvid = reqvid;
- dprintk(KERN_DEBUG PFX "ph3 starting, currfid 0x%x, currvid 0x%x\n",
- currfid, currvid);
+ dprintk(KERN_DEBUG PFX "ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid);
- if (reqvid != currvid) {
- if (write_new_vid(reqvid))
+ if (reqvid != data->currvid) {
+ if (write_new_vid(data, reqvid))
return 1;
- if (savefid != currfid) {
+ if (savefid != data->currfid) {
printk(KERN_ERR PFX
- "ph3: bad fid change, save %x, curr %x\n",
- savefid, currfid);
+ "ph3: bad fid change, save 0x%x, curr 0x%x\n",
+ savefid, data->currfid);
return 1;
}
- if (currvid != reqvid) {
+ if (data->currvid != reqvid) {
printk(KERN_ERR PFX
- "ph3: failed vid transition\n, req %x, curr %x",
- reqvid, currvid);
+ "ph3: failed vid transition\n, req 0x%x, curr 0x%x",
+ reqvid, data->currvid);
return 1;
}
}
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- if (savereqvid != currvid) {
- dprintk(KERN_ERR PFX "ph3 failed, currvid 0x%x\n", currvid);
+ if (savereqvid != data->currvid) {
+ dprintk(KERN_ERR PFX "ph3 failed, currvid 0x%x\n", data->currvid);
return 1;
}
- if (savefid != currfid) {
+ if (savefid != data->currfid) {
dprintk(KERN_ERR PFX "ph3 failed, currfid changed 0x%x\n",
- currfid);
+ data->currfid);
return 1;
}
dprintk(KERN_DEBUG PFX "ph3 complete, currfid 0x%x, currvid 0x%x\n",
- currfid, currvid);
+ data->currfid, data->currvid);
return 0;
}
-static inline int
-check_supported_cpu(void)
+static int check_supported_cpu(unsigned int cpu)
{
- struct cpuinfo_x86 *c = cpu_data;
+ cpumask_t oldmask = CPU_MASK_ALL;
u32 eax, ebx, ecx, edx;
+ unsigned int rc = 0;
- if (num_online_cpus() != 1) {
- printk(KERN_INFO PFX "multiprocessor systems not supported\n");
- return 0;
- }
+ oldmask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ schedule();
- if (c->x86_vendor != X86_VENDOR_AMD) {
-#ifdef MODULE
- printk(KERN_INFO PFX "Not an AMD processor\n");
-#endif
- return 0;
+ if (smp_processor_id() != cpu) {
+ printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
+ goto out;
}
+ if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ goto out;
+
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
if ((eax & CPUID_XFAM_MOD) == ATHLON64_XFAM_MOD) {
dprintk(KERN_DEBUG PFX "AMD Althon 64 Processor found\n");
- if ((eax & CPUID_F1_STEP) < ATHLON64_REV_C0) {
- printk(KERN_INFO PFX "Revision C0 or better "
- "AMD Athlon 64 processor required\n");
- return 0;
- }
} else if ((eax & CPUID_XFAM_MOD) == OPTERON_XFAM_MOD) {
dprintk(KERN_DEBUG PFX "AMD Opteron Processor found\n");
} else {
printk(KERN_INFO PFX
"AMD Athlon 64 or AMD Opteron processor required\n");
- return 0;
+ goto out;
}
eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
printk(KERN_INFO PFX
"No frequency change capabilities detected\n");
- return 0;
+ goto out;
}
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) {
printk(KERN_INFO PFX "Power state transitions not supported\n");
- return 0;
+ goto out;
}
- printk(KERN_INFO PFX "Found AMD64 processor supporting PowerNow (" VERSION ")\n");
- return 1;
+ rc = 1;
+
+out:
+ set_cpus_allowed(current, oldmask);
+ schedule();
+ return rc;
+
}
-static int check_pst_table(struct pst_s *pst, u8 maxvid)
+static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
{
unsigned int j;
- u8 lastfid = 0xFF;
+ u8 lastfid = 0xff;
- for (j = 0; j < numps; j++) {
+ for (j = 0; j < data->numps; j++) {
if (pst[j].vid > LEAST_VID) {
printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid);
return -EINVAL;
}
- if (pst[j].vid < rvo) { /* vid + rvo >= 0 */
- printk(KERN_ERR PFX
- "BIOS error - 0 vid exceeded with pstate %d\n",
- j);
+ if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */
+ printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j);
return -ENODEV;
}
- if (pst[j].vid < maxvid + rvo) { /* vid + rvo >= maxvid */
- printk(KERN_ERR PFX
- "BIOS error - maxvid exceeded with pstate %d\n",
- j);
+ if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */
+ printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j);
return -ENODEV;
}
if ((pst[j].fid > MAX_FID)
@@ -531,20 +514,88 @@ static int check_pst_table(struct pst_s *pst, u8 maxvid)
printk(KERN_ERR PFX "lastfid invalid\n");
return -EINVAL;
}
- if (lastfid > LO_FID_TABLE_TOP) {
+ if (lastfid > LO_FID_TABLE_TOP)
printk(KERN_INFO PFX "first fid not from lo freq table\n");
+
+ return 0;
+}
+
+static void print_basics(struct powernow_k8_data *data)
+{
+ int j;
+ for (j = 0; j < data->numps; j++) {
+ printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x (%d mV)\n", j,
+ data->powernow_table[j].index & 0xff,
+ data->powernow_table[j].frequency/1000,
+ data->powernow_table[j].index >> 8,
+ find_millivolts_from_vid(data, data->powernow_table[j].index >> 8));
+ }
+ if (data->batps)
+ printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps);
+}
+
+static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
+{
+ struct cpufreq_frequency_table *powernow_table;
+ unsigned int j;
+
+ if (data->batps) { /* use ACPI support to get full speed on mains power */
+ printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps);
+ data->numps = data->batps;
+ }
+
+ for ( j=1; j<data->numps; j++ ) {
+ if (pst[j-1].fid >= pst[j].fid) {
+ printk(KERN_ERR PFX "PST out of sequence\n");
+ return -EINVAL;
+ }
+ }
+
+ if (data->numps < 2) {
+ printk(KERN_ERR PFX "no p states to transition\n");
+ return -ENODEV;
+ }
+
+ if (check_pst_table(data, pst, maxvid))
+ return -EINVAL;
+
+ powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+ * (data->numps + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < data->numps; j++) {
+ powernow_table[j].index = pst[j].fid; /* lower 8 bits */
+ powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
+ powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid);
+ }
+ powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
+ powernow_table[data->numps].index = 0;
+
+ if (query_current_values_with_pending_wait(data)) {
+ kfree(powernow_table);
+ return -EIO;
}
+ dprintk(KERN_INFO PFX "cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
+ data->powernow_table = powernow_table;
+ print_basics(data);
+
+ for (j = 0; j < data->numps; j++)
+ if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
+ return 0;
+
+ dprintk(KERN_ERR PFX "currfid/vid do not match PST, ignoring\n");
return 0;
}
/* Find and validate the PSB/PST table in BIOS. */
-static inline int
-find_psb_table(void)
+static int find_psb_table(struct powernow_k8_data *data)
{
struct psb_s *psb;
- struct pst_s *pst;
- unsigned int i, j;
+ unsigned int i;
u32 mvs;
u8 maxvid;
@@ -570,20 +621,19 @@ find_psb_table(void)
return -ENODEV;
}
- vstable = psb->voltagestabilizationtime;
+ data->vstable = psb->voltagestabilizationtime;
+ dprintk(KERN_INFO PFX "voltage stabilization time: %d(*20us)\n", data->vstable);
+
dprintk(KERN_DEBUG PFX "flags2: 0x%x\n", psb->flags2);
- rvo = psb->flags2 & 3;
- irt = ((psb->flags2) >> 2) & 3;
+ data->rvo = psb->flags2 & 3;
+ data->irt = ((psb->flags2) >> 2) & 3;
mvs = ((psb->flags2) >> 4) & 3;
- vidmvs = 1 << mvs;
- batps = ((psb->flags2) >> 6) & 3;
+ data->vidmvs = 1 << mvs;
+ data->batps = ((psb->flags2) >> 6) & 3;
- printk(KERN_INFO PFX "voltage stable in %d usec", vstable * 20);
- if (batps)
- printk(", only %d lowest states on battery", batps);
- printk(", ramp voltage offset: %d", rvo);
- printk(", isochronous relief time: %d", irt);
- printk(", maximum voltage step: %d\n", mvs);
+ dprintk(KERN_INFO PFX "ramp voltage offset: %d\n", data->rvo);
+ dprintk(KERN_INFO PFX "isochronous relief time: %d\n", data->irt);
+ dprintk(KERN_INFO PFX "maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
dprintk(KERN_DEBUG PFX "numpst: 0x%x\n", psb->numpst);
if (psb->numpst != 1) {
@@ -591,241 +641,386 @@ find_psb_table(void)
return -ENODEV;
}
- dprintk(KERN_DEBUG PFX "cpuid: 0x%x\n", psb->cpuid);
-
- plllock = psb->plllocktime;
- printk(KERN_INFO PFX "pll lock time: 0x%x, ", plllock);
-
+ data->plllock = psb->plllocktime;
+ dprintk(KERN_INFO PFX "plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
+ dprintk(KERN_INFO PFX "maxfid: 0x%x\n", psb->maxfid);
+ dprintk(KERN_INFO PFX "maxvid: 0x%x\n", psb->maxvid);
maxvid = psb->maxvid;
- printk("maxfid 0x%x (%d MHz), maxvid 0x%x\n",
- psb->maxfid, find_freq_from_fid(psb->maxfid), maxvid);
- numps = psb->numpstates;
- if (numps < 2) {
- printk(KERN_ERR BFX "no p states to transition\n");
- return -ENODEV;
- }
+ data->numps = psb->numpstates;
+ dprintk(KERN_INFO PFX "numpstates: 0x%x\n", data->numps);
+ return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
+ }
+ /*
+ * If you see this message, complain to BIOS manufacturer. If
+ * he tells you "we do not support Linux" or some similar
+ * nonsense, remember that Windows 2000 uses the same legacy
+ * mechanism that the old Linux PSB driver uses. Tell them it
+ * is broken with Windows 2000.
+ *
+ * The reference to the AMD documentation is chapter 9 in the
+ * BIOS and Kernel Developer's Guide, which is available on
+ * www.amd.com
+ */
+ printk(KERN_ERR PFX "BIOS error - no PSB\n");
+ return -ENODEV;
+}
- if (batps == 0) {
- batps = numps;
- } else if (batps > numps) {
- printk(KERN_ERR BFX "batterypstates > numpstates\n");
- batps = numps;
- } else {
- printk(KERN_ERR PFX
- "Restricting operation to %d p-states\n", batps);
- printk(KERN_ERR PFX
- "Check for an updated driver to access all "
- "%d p-states\n", numps);
- }
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
+{
+ if (!data->acpi_data.state_count)
+ return;
+
+ data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
+ data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+ data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
+ data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
+ data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
+}
- if (numps <= 1) {
- printk(KERN_ERR PFX "only 1 p-state to transition\n");
- return -ENODEV;
- }
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+{
+ int i;
+ int cntlofreq = 0;
+ struct cpufreq_frequency_table *powernow_table;
- pst = (struct pst_s *) (psb + 1);
- if (check_pst_table(pst, maxvid))
- return -EINVAL;
+ if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
+ dprintk(KERN_DEBUG PFX "register performance failed\n");
+ return -EIO;
+ }
- powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (numps + 1)), GFP_KERNEL);
- if (!powernow_table) {
- printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
- return -ENOMEM;
- }
+ /* verify the data contained in the ACPI structures */
+ if (data->acpi_data.state_count <= 1) {
+ dprintk(KERN_DEBUG PFX "No ACPI P-States\n");
+ goto err_out;
+ }
- for (j = 0; j < psb->numpstates; j++) {
- powernow_table[j].index = pst[j].fid; /* lower 8 bits */
- powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
- }
+ if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ dprintk(KERN_DEBUG PFX "Invalid control/status registers\n");
+ goto err_out;
+ }
- /* If you want to override your frequency tables, this
- is right place. */
+ /* fill in data->powernow_table */
+ powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+ * (data->acpi_data.state_count + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ dprintk(KERN_ERR PFX "powernow_table memory alloc failure\n");
+ goto err_out;
+ }
- for (j = 0; j < numps; j++) {
- powernow_table[j].frequency = find_freq_from_fid(powernow_table[j].index & 0xff)*1000;
- printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", j,
- powernow_table[j].index & 0xff,
- powernow_table[j].frequency/1000,
- powernow_table[j].index >> 8);
- }
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ u32 fid = data->acpi_data.states[i].control & FID_MASK;
+ u32 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
+
+ dprintk(KERN_INFO PFX " %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
- powernow_table[numps].frequency = CPUFREQ_TABLE_END;
- powernow_table[numps].index = 0;
+ powernow_table[i].index = fid; /* lower 8 bits */
+ powernow_table[i].index |= (vid << 8); /* upper 8 bits */
+ powernow_table[i].frequency = find_khz_freq_from_fid(fid);
- if (query_current_values_with_pending_wait()) {
- kfree(powernow_table);
- return -EIO;
+ /* verify frequency is OK */
+ if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) ||
+ (powernow_table[i].frequency < (MIN_FREQ * 1000))) {
+ dprintk(KERN_INFO PFX "invalid freq %u kHz\n", powernow_table[i].frequency);
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
}
- printk(KERN_INFO PFX "currfid 0x%x (%d MHz), currvid 0x%x\n",
- currfid, find_freq_from_fid(currfid), currvid);
+ /* verify only 1 entry from the lo frequency table */
+ if ((fid < HI_FID_TABLE_BOTTOM) && (cntlofreq++)) {
+ printk(KERN_ERR PFX "Too many lo freq table entries\n");
+ goto err_out;
+ }
+
+ if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
+ printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+ powernow_table[i].frequency,
+ (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ }
+ }
- for (j = 0; j < numps; j++)
- if ((pst[j].fid==currfid) && (pst[j].vid==currvid))
- return 0;
+ powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
+ powernow_table[data->acpi_data.state_count].index = 0;
+ data->powernow_table = powernow_table;
- printk(KERN_ERR BFX "currfid/vid do not match PST, ignoring\n");
- return 0;
- }
+ /* fill in data */
+ data->numps = data->acpi_data.state_count;
+ print_basics(data);
+ powernow_k8_acpi_pst_values(data, 0);
+ return 0;
+err_out:
+ acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
- printk(KERN_ERR BFX "no PSB\n");
+ /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
+ data->acpi_data.state_count = 0;
+
return -ENODEV;
}
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+{
+ if (data->acpi_data.state_count)
+ acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+}
+
+#else
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
+#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
+
/* Take a frequency, and issue the fid/vid transition command */
-static inline int
-transition_frequency(unsigned int index)
+static int transition_frequency(struct powernow_k8_data *data, unsigned int index)
{
u32 fid;
u32 vid;
int res;
struct cpufreq_freqs freqs;
+ dprintk(KERN_DEBUG PFX "cpu %d transition to index %u\n",
+ smp_processor_id(), index );
+
/* fid are the lower 8 bits of the index we stored into
* the cpufreq frequency table in find_psb_table, vid are
* the upper 8 bits.
*/
- fid = powernow_table[index].index & 0xFF;
- vid = (powernow_table[index].index & 0xFF00) >> 8;
+ fid = data->powernow_table[index].index & 0xFF;
+ vid = (data->powernow_table[index].index & 0xFF00) >> 8;
dprintk(KERN_DEBUG PFX "table matched fid 0x%x, giving vid 0x%x\n",
fid, vid);
- if (query_current_values_with_pending_wait())
+ if (query_current_values_with_pending_wait(data))
return 1;
- if ((currvid == vid) && (currfid == fid)) {
+ if ((data->currvid == vid) && (data->currfid == fid)) {
dprintk(KERN_DEBUG PFX
"target matches current values (fid 0x%x, vid 0x%x)\n",
fid, vid);
return 0;
}
- if ((fid < HI_FID_TABLE_BOTTOM) && (currfid < HI_FID_TABLE_BOTTOM)) {
+ if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX
- "ignoring illegal change in lo freq table-%x to %x\n",
- currfid, fid);
+ "ignoring illegal change in lo freq table-%x to 0x%x\n",
+ data->currfid, fid);
return 1;
}
- dprintk(KERN_DEBUG PFX "changing to fid 0x%x, vid 0x%x\n", fid, vid);
+ dprintk(KERN_DEBUG PFX "cpu %d, changing to fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), fid, vid);
- freqs.cpu = 0; /* only true because SMP not supported */
+ freqs.cpu = data->cpu;
- freqs.old = find_freq_from_fid(currfid);
- freqs.new = find_freq_from_fid(fid);
+ freqs.old = find_khz_freq_from_fid(data->currfid);
+ freqs.new = find_khz_freq_from_fid(fid);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- res = transition_fid_vid(fid, vid);
+ down(&fidvid_sem);
+ res = transition_fid_vid(data, fid, vid);
+ up(&fidvid_sem);
- freqs.new = find_freq_from_fid(currfid);
+ freqs.new = find_khz_freq_from_fid(data->currfid);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return res;
}
/* Driver entry point to switch to the target frequency */
-static int
-powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{
- u32 checkfid = currfid;
- u32 checkvid = currvid;
+ cpumask_t oldmask = CPU_MASK_ALL;
+ struct powernow_k8_data *data = powernow_data[pol->cpu];
+ u32 checkfid = data->currfid;
+ u32 checkvid = data->currvid;
unsigned int newstate;
+ int ret = -EIO;
+
+ /* only run on specific CPU from here on */
+ oldmask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+ schedule();
+
+ if (smp_processor_id() != pol->cpu) {
+ printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
+ goto sched_out;
+ }
+
+ /* from this point, do not exit without restoring preempt and cpu */
+ preempt_disable();
if (pending_bit_stuck()) {
- printk(KERN_ERR PFX "drv targ fail: change pending bit set\n");
- return -EIO;
+ printk(KERN_ERR PFX "failing targ, change pending bit set\n");
+ goto err_out;
}
- dprintk(KERN_DEBUG PFX "targ: %d kHz, min %d, max %d, relation %d\n",
- targfreq, pol->min, pol->max, relation);
+ dprintk(KERN_DEBUG PFX "targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
+ pol->cpu, targfreq, pol->min, pol->max, relation);
- if (query_current_values_with_pending_wait())
- return -EIO;
+ if (query_current_values_with_pending_wait(data)) {
+ ret = -EIO;
+ goto err_out;
+ }
dprintk(KERN_DEBUG PFX "targ: curr fid 0x%x, vid 0x%x\n",
- currfid, currvid);
+ data->currfid, data->currvid);
- if ((checkvid != currvid) || (checkfid != currfid)) {
+ if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
printk(KERN_ERR PFX
"error - out of sync, fid 0x%x 0x%x, vid 0x%x 0x%x\n",
- checkfid, currfid, checkvid, currvid);
+ checkfid, data->currfid, checkvid, data->currvid);
}
- if (cpufreq_frequency_table_target(pol, powernow_table, targfreq, relation, &newstate))
- return -EINVAL;
-
- if (transition_frequency(newstate))
- {
+ if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
+ goto err_out;
+
+ powernow_k8_acpi_pst_values(data, newstate);
+
+ if (transition_frequency(data, newstate)) {
printk(KERN_ERR PFX "transition frequency failed\n");
- return 1;
+ ret = 1;
+ goto err_out;
}
- pol->cur = 1000 * find_freq_from_fid(currfid);
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ ret = 0;
- return 0;
+err_out:
+ preempt_enable_no_resched();
+sched_out:
+ set_cpus_allowed(current, oldmask);
+ schedule();
+
+ return ret;
}
/* Driver entry point to verify the policy and range of frequencies */
-static int
-powernowk8_verify(struct cpufreq_policy *pol)
+static int powernowk8_verify(struct cpufreq_policy *pol)
{
- if (pending_bit_stuck()) {
- printk(KERN_ERR PFX "failing verify, change pending bit set\n");
- return -EIO;
- }
+ struct powernow_k8_data *data = powernow_data[pol->cpu];
- return cpufreq_frequency_table_verify(pol, powernow_table);
+ return cpufreq_frequency_table_verify(pol, data->powernow_table);
}
/* per CPU init entry point to the driver */
-static int __init
-powernowk8_cpu_init(struct cpufreq_policy *pol)
+static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
{
- if (pol->cpu != 0) {
- printk(KERN_ERR PFX "init not cpu 0\n");
+ struct powernow_k8_data *data;
+ cpumask_t oldmask = CPU_MASK_ALL;
+ int rc;
+
+ if (!check_supported_cpu(pol->cpu))
return -ENODEV;
+
+ data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+ if (!data) {
+ printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
+ return -ENOMEM;
}
+ memset(data,0,sizeof(struct powernow_k8_data));
+
+ data->cpu = pol->cpu;
+
+ if (powernow_k8_cpu_init_acpi(data)) {
+ /*
+ * Use the PSB BIOS structure. This is only availabe on
+ * an UP version, and is deprecated by AMD.
+ */
+
+ if (pol->cpu != 0) {
+ printk(KERN_ERR PFX "init not cpu 0\n");
+ kfree(data);
+ return -ENODEV;
+ }
+ if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
+ printk(KERN_INFO PFX "MP systems not supported by PSB BIOS structure\n");
+ kfree(data);
+ return 0;
+ }
+ rc = find_psb_table(data);
+ if (rc) {
+ kfree(data);
+ return -ENODEV;
+ }
+ }
+
+ /* only run on specific CPU from here on */
+ oldmask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+ schedule();
+
+ if (smp_processor_id() != pol->cpu) {
+ printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
+ goto err_out;
+ }
+
+ if (pending_bit_stuck()) {
+ printk(KERN_ERR PFX "failing init, change pending bit set\n");
+ goto err_out;
+ }
+
+ if (query_current_values_with_pending_wait(data))
+ goto err_out;
+
+ fidvid_msr_init();
+
+ /* run on any CPU again */
+ set_cpus_allowed(current, oldmask);
+ schedule();
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* Take a crude guess here.
- * That guess was in microseconds, so multply with 1000 */
- pol->cpuinfo.transition_latency = (((rvo + 8) * vstable * VST_UNITS_20US)
- + (3 * (1 << irt) * 10)) * 1000;
-
- if (query_current_values_with_pending_wait())
- return -EIO;
+ * That guess was in microseconds, so multiply with 1000 */
+ pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+ + (3 * (1 << data->irt) * 10)) * 1000;
- pol->cur = 1000 * find_freq_from_fid(currfid);
+ pol->cur = find_khz_freq_from_fid(data->currfid);
dprintk(KERN_DEBUG PFX "policy current frequency %d kHz\n", pol->cur);
/* min/max the cpu is capable of */
- if (cpufreq_frequency_table_cpuinfo(pol, powernow_table)) {
+ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
printk(KERN_ERR PFX "invalid powernow_table\n");
- kfree(powernow_table);
+ kfree(data->powernow_table);
+ kfree(data);
return -EINVAL;
}
- cpufreq_frequency_table_get_attr(powernow_table, pol->cpu);
+ cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
printk(KERN_INFO PFX "cpu_init done, current fid 0x%x, vid 0x%x\n",
- currfid, currvid);
+ data->currfid, data->currvid);
+
+ powernow_data[pol->cpu] = data;
return 0;
+
+err_out:
+ set_cpus_allowed(current, oldmask);
+ schedule();
+
+ kfree(data);
+ return -ENODEV;
}
static int __exit powernowk8_cpu_exit (struct cpufreq_policy *pol)
{
- if (pol->cpu != 0)
+ struct powernow_k8_data *data = powernow_data[pol->cpu];
+
+ if (!data)
return -EINVAL;
+ powernow_k8_cpu_exit_acpi(data);
+
cpufreq_frequency_table_put_attr(pol->cpu);
- if (powernow_table)
- kfree(powernow_table);
+ kfree(data->powernow_table);
+ kfree(data);
return 0;
}
@@ -845,33 +1040,31 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
-
/* driver entry point for init */
-static int __init
-powernowk8_init(void)
+static int __init powernowk8_init(void)
{
- int rc;
-
- if (check_supported_cpu() == 0)
- return -ENODEV;
+ unsigned int i, supported_cpus = 0;
- rc = find_psb_table();
- if (rc)
- return rc;
+ for (i=0; i<NR_CPUS; i++) {
+ if (!cpu_online(i))
+ continue;
+ if (check_supported_cpu(i))
+ supported_cpus++;
+ }
- if (pending_bit_stuck()) {
- printk(KERN_ERR PFX "powernowk8_init fail, change pending bit set\n");
- return -EIO;
+ if (supported_cpus == num_online_cpus()) {
+ printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n",
+ supported_cpus);
+ return cpufreq_register_driver(&cpufreq_amd64_driver);
}
- return cpufreq_register_driver(&cpufreq_amd64_driver);
+ return -ENODEV;
}
/* driver entry point for term */
-static void __exit
-powernowk8_exit(void)
+static void __exit powernowk8_exit(void)
{
- dprintk(KERN_INFO PFX "powernowk8_exit\n");
+ dprintk(KERN_INFO PFX "exit\n");
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
@@ -880,5 +1073,5 @@ MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
MODULE_LICENSE("GPL");
-module_init(powernowk8_init);
+late_initcall(powernowk8_init);
module_exit(powernowk8_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 7f0e6866f2ee..921d032c3764 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -1,20 +1,47 @@
/*
- * (c) 2003 Advanced Micro Devices, Inc.
+ * (c) 2003, 2004 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
- * GNU general public license version 2. See "../../../COPYING" or
+ * GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*/
+struct powernow_k8_data {
+ unsigned int cpu;
+
+ u32 numps; /* number of p-states */
+ u32 batps; /* number of p-states supported on battery */
+
+ /* these values are constant when the PSB is used to determine
+ * vid/fid pairings, but are modified during the ->target() call
+ * when ACPI is used */
+ u32 rvo; /* ramp voltage offset */
+ u32 irt; /* isochronous relief time */
+ u32 vidmvs; /* usable value calculated from mvs */
+ u32 vstable; /* voltage stabilization time, units 20 us */
+ u32 plllock; /* pll lock time, units 1 us */
+
+ /* keep track of the current fid / vid */
+ u32 currvid;
+ u32 currfid;
+
+ /* the powernow_table includes all frequency and vid/fid pairings:
+ * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+ * frequency is in kHz */
+ struct cpufreq_frequency_table *powernow_table;
+
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ /* the acpi table needs to be kept. it's only available if ACPI was
+ * used to determine valid frequency/vid/fid states */
+ struct acpi_processor_performance acpi_data;
+#endif
+};
+
+
/* processor's cpuid instruction support */
-#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
-#define CPUID_F1_FAM 0x00000f00 /* family mask */
-#define CPUID_F1_XFAM 0x0ff00000 /* extended family mask */
-#define CPUID_F1_MOD 0x000000f0 /* model mask */
-#define CPUID_F1_STEP 0x0000000f /* stepping level mask */
-#define CPUID_XFAM_MOD 0x0ff00ff0 /* xtended fam, fam + model */
-#define ATHLON64_XFAM_MOD 0x00000f40 /* xtended fam, fam + model */
-#define OPTERON_XFAM_MOD 0x00000f50 /* xtended fam, fam + model */
-#define ATHLON64_REV_C0 8
+#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
+#define CPUID_XFAM_MOD 0x0ff00ff0 /* extended fam, fam + model */
+#define ATHLON64_XFAM_MOD 0x00000f40 /* extended fam, fam + model */
+#define OPTERON_XFAM_MOD 0x00000f50 /* extended fam, fam + model */
#define CPUID_GET_MAX_CAPABILITIES 0x80000000
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
#define P_STATE_TRANSITION_CAPABLE 6
@@ -47,10 +74,23 @@
#define MSR_S_HI_MAX_WORKING_VID 0x001f0000
#define MSR_S_HI_START_VID 0x00001f00
#define MSR_S_HI_CURRENT_VID 0x0000001f
+#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
+
+/*
+ * There are restrictions frequencies have to follow:
+ * - only 1 entry in the low fid table ( <=1.4GHz )
+ * - lowest entry in the high fid table must be >= 2 * the entry in the
+ * low fid table
+ * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
+ * in the low fid table
+ * - the parts can only step at 200 MHz intervals, so 1.9 GHz is never valid
+ * - lowest frequency must be >= interprocessor hypertransport link speed
+ * (only applies to MP systems obviously)
+ */
/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
-#define LO_FID_TABLE_TOP 6
-#define HI_FID_TABLE_BOTTOM 8
+#define LO_FID_TABLE_TOP 6 /* fid values marking the boundary */
+#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
#define HI_VCOFREQ_TABLE_BOTTOM 1600
@@ -58,14 +98,12 @@
#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
-
#define LEAST_VID 0x1e /* Lowest (numerically highest) useful vid value */
#define MIN_FREQ 800 /* Min and max freqs, per spec */
#define MAX_FREQ 5000
#define INVALID_FID_MASK 0xffffffc1 /* not a valid fid if these bits are set */
-
#define INVALID_VID_MASK 0xffffffe0 /* not a valid vid if these bits are set */
#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
@@ -73,18 +111,35 @@
#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
-
#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */
/*
-Version 1.4 of the PSB table. This table is constructed by BIOS and is
-to tell the OS's power management driver which VIDs and FIDs are
-supported by this particular processor. This information is obtained from
-the data sheets for each processor model by the system vendor and
-incorporated into the BIOS.
-If the data in the PSB / PST is wrong, then this driver will program the
-wrong values into hardware, which is very likely to lead to a crash.
-*/
+ * Most values of interest are enocoded in a single field of the _PSS
+ * entries: the "control" value.
+ */
+
+#define IRT_SHIFT 30
+#define RVO_SHIFT 28
+#define PLL_L_SHIFT 20
+#define MVS_SHIFT 18
+#define VST_SHIFT 11
+#define VID_SHIFT 6
+#define IRT_MASK 3
+#define RVO_MASK 3
+#define PLL_L_MASK 0x7f
+#define MVS_MASK 3
+#define VST_MASK 0x7f
+#define VID_MASK 0x1f
+#define FID_MASK 0x3f
+
+
+/*
+ * Version 1.4 of the PSB table. This table is constructed by BIOS and is
+ * to tell the OS's power management driver which VIDs and FIDs are
+ * supported by this particular processor.
+ * If the data in the PSB / PST is wrong, then this driver will program the
+ * wrong values into hardware, which is very likely to lead to a crash.
+ */
#define PSB_ID_STRING "AMDK7PNOW!"
#define PSB_ID_STRING_LEN 10
@@ -117,6 +172,8 @@ struct pst_s {
#define dprintk(msg...) do { } while(0)
#endif
-static inline int core_voltage_pre_transition(u32 reqvid);
-static inline int core_voltage_post_transition(u32 reqvid);
-static inline int core_frequency_transition(u32 reqfid);
+static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid);
+static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
+
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 7f8844cffcf9..2adb46c0ac78 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/config.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -46,7 +47,9 @@ struct cpu_model
};
/* Operating points for current CPU */
-static const struct cpu_model *centrino_model;
+static struct cpu_model *centrino_model;
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
/* Computes the correct form for IA32_PERF_CTL MSR for a particular
frequency/voltage operating point; frequency in MHz, volts in mV.
@@ -172,7 +175,7 @@ static struct cpufreq_frequency_table op_1700[] =
/* CPU models, their operating frequency range, and freq/voltage
operating points */
-static const struct cpu_model models[] =
+static struct cpu_model models[] =
{
_CPU( 900, " 900"),
CPU(1000),
@@ -187,6 +190,48 @@ static const struct cpu_model models[] =
};
#undef CPU
+static int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
+ struct cpu_model *model;
+
+ if (!cpu_has(cpu, X86_FEATURE_EST))
+ return -ENODEV;
+
+ /* Only Intel Pentium M stepping 5 for now - add new CPUs as
+ they appear after making sure they use PERF_CTL in the same
+ way. */
+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+ cpu->x86 != 6 ||
+ cpu->x86_model != 9 ||
+ cpu->x86_mask != 5) {
+ printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: "
+ "send /proc/cpuinfo to " MAINTAINER "\n");
+ return -ENODEV;
+ }
+
+ for(model = models; model->model_name != NULL; model++)
+ if (strcmp(cpu->x86_model_id, model->model_name) == 0)
+ break;
+ if (model->model_name == NULL) {
+ printk(KERN_INFO PFX "no support for CPU model \"%s\": "
+ "send /proc/cpuinfo to " MAINTAINER "\n",
+ cpu->x86_model_id);
+ return -ENOENT;
+ }
+
+ centrino_model = model;
+
+ printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n",
+ model->model_name, model->max_freq);
+
+ return 0;
+}
+
+#else
+static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
+#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
+
/* Extract clock in kHz from PERF_CTL value */
static unsigned extract_clock(unsigned msr)
{
@@ -203,13 +248,148 @@ static unsigned get_cur_freq(void)
return extract_clock(l);
}
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+
+static struct acpi_processor_performance p;
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#define ACPI_PDC_CAPABILITY_ENHANCED_SPEEDSTEP 0x1
+
+/*
+ * centrino_cpu_init_acpi - register with ACPI P-States library
+ *
+ * Register with the ACPI P-States library (part of drivers/acpi/processor.c)
+ * in order to determine correct frequency and voltage pairings by reading
+ * the _PSS of the ACPI DSDT or SSDT tables.
+ */
+static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
+{
+ union acpi_object arg0 = {ACPI_TYPE_BUFFER};
+ u32 arg0_buf[3];
+ struct acpi_object_list arg_list = {1, &arg0};
+ unsigned long cur_freq;
+ int result = 0, i;
+
+ /* _PDC settings */
+ arg0.buffer.length = 12;
+ arg0.buffer.pointer = (u8 *) arg0_buf;
+ arg0_buf[0] = ACPI_PDC_REVISION_ID;
+ arg0_buf[1] = 1;
+ arg0_buf[2] = ACPI_PDC_CAPABILITY_ENHANCED_SPEEDSTEP;
+
+ p.pdc = &arg_list;
+
+ /* register with ACPI core */
+ if (acpi_processor_register_performance(&p, 0))
+ return -EIO;
+
+ /* verify the acpi_data */
+ if (p.state_count <= 1) {
+ printk(KERN_DEBUG "No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ printk(KERN_DEBUG "Invalid control/status registers\n");
+ result = -EIO;
+ goto err_unreg;
+ }
+
+ for (i=0; i<p.state_count; i++) {
+ if (p.states[i].control != p.states[i].status) {
+ printk(KERN_DEBUG "Different control and status values\n");
+ result = -EINVAL;
+ goto err_unreg;
+ }
+
+ if (!p.states[i].core_frequency) {
+ printk(KERN_DEBUG "Zero core frequency\n");
+ result = -EINVAL;
+ goto err_unreg;
+ }
+
+ if (extract_clock(p.states[i].control) !=
+ (p.states[i].core_frequency * 1000)) {
+ printk(KERN_DEBUG "Invalid encoded frequency\n");
+ result = -EINVAL;
+ goto err_unreg;
+ }
+ }
+
+ centrino_model = kmalloc(sizeof(struct cpu_model), GFP_KERNEL);
+ if (!centrino_model) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+ memset(centrino_model, 0, sizeof(struct cpu_model));
+
+ centrino_model->model_name=NULL;
+ centrino_model->max_freq = p.states[0].core_frequency * 1000;
+ centrino_model->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ (p.state_count + 1), GFP_KERNEL);
+ if (!centrino_model->op_points) {
+ result = -ENOMEM;
+ goto err_kfree;
+ }
+
+ cur_freq = get_cur_freq();
+
+ for (i=0; i<p.state_count; i++) {
+ centrino_model->op_points[i].index = p.states[i].control;
+ centrino_model->op_points[i].frequency = p.states[i].core_frequency * 1000;
+ if (cur_freq == centrino_model->op_points[i].frequency)
+ p.state = i;
+ }
+ centrino_model->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
+
+ return 0;
+
+ err_kfree:
+ kfree(centrino_model);
+ err_unreg:
+ acpi_processor_unregister_performance(&p, 0);
+ return (result);
+}
+#else
+static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
+#endif
+
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
unsigned freq;
+ unsigned l, h;
+ int ret;
- if (policy->cpu != 0 || centrino_model == NULL)
+ if (policy->cpu != 0)
return -ENODEV;
+ if (centrino_cpu_init_acpi(policy)) {
+ if (centrino_cpu_init_table(policy)) {
+ return -ENODEV;
+ }
+ }
+
+ /* Check to see if Enhanced SpeedStep is enabled, and try to
+ enable it if not. */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ if (!(l & (1<<16))) {
+ l |= (1<<16);
+ wrmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ /* check to see if it stuck */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ if (!(l & (1<<16))) {
+ printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
+ return -ENODEV;
+ }
+ }
+
freq = get_cur_freq();
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@@ -219,12 +399,38 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
dprintk(KERN_INFO PFX "centrino_cpu_init: policy=%d cur=%dkHz\n",
policy->policy, policy->cur);
- return cpufreq_frequency_table_cpuinfo(policy, centrino_model->op_points);
+ ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model->op_points);
+ if (ret)
+ return (ret);
+
+ cpufreq_frequency_table_get_attr(centrino_model->op_points, policy->cpu);
+
+ return 0;
+}
+
+static int centrino_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (!centrino_model)
+ return -ENODEV;
+
+ cpufreq_frequency_table_put_attr(policy->cpu);
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ if (!centrino_model->model_name) {
+ acpi_processor_unregister_performance(&p, 0);
+ kfree(centrino_model->op_points);
+ kfree(centrino_model);
+ }
+#endif
+
+ centrino_model = NULL;
+
+ return 0;
}
/**
* centrino_verify - verifies a new CPUFreq policy
- * @freq: new policy
+ * @policy: new policy
*
* Limit must be within this model's frequency range at least one
* border included.
@@ -237,6 +443,8 @@ static int centrino_verify (struct cpufreq_policy *policy)
/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
+ * @target_freq: the target frequency
+ * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* Sets a new CPUFreq policy.
*/
@@ -295,12 +503,19 @@ static int centrino_target (struct cpufreq_policy *policy,
return 0;
}
+static struct freq_attr* centrino_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
+ .exit = centrino_cpu_exit,
.verify = centrino_verify,
.target = centrino_target,
+ .attr = centrino_attr,
.owner = THIS_MODULE,
};
@@ -322,55 +537,10 @@ static struct cpufreq_driver centrino_driver = {
static int __init centrino_init(void)
{
struct cpuinfo_x86 *cpu = cpu_data;
- const struct cpu_model *model;
- unsigned l, h;
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
- /* Only Intel Pentium M stepping 5 for now - add new CPUs as
- they appear after making sure they use PERF_CTL in the same
- way. */
- if (cpu->x86_vendor != X86_VENDOR_INTEL ||
- cpu->x86 != 6 ||
- cpu->x86_model != 9 ||
- cpu->x86_mask != 5) {
- printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: "
- "send /proc/cpuinfo to " MAINTAINER "\n");
- return -ENODEV;
- }
-
- /* Check to see if Enhanced SpeedStep is enabled, and try to
- enable it if not. */
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-
- if (!(l & (1<<16))) {
- l |= (1<<16);
- wrmsr(MSR_IA32_MISC_ENABLE, l, h);
-
- /* check to see if it stuck */
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
- if (!(l & (1<<16))) {
- printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
- return -ENODEV;
- }
- }
-
- for(model = models; model->model_name != NULL; model++)
- if (strcmp(cpu->x86_model_id, model->model_name) == 0)
- break;
- if (model->model_name == NULL) {
- printk(KERN_INFO PFX "no support for CPU model \"%s\": "
- "send /proc/cpuinfo to " MAINTAINER "\n",
- cpu->x86_model_id);
- return -ENOENT;
- }
-
- centrino_model = model;
-
- printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n",
- model->model_name, model->max_freq);
-
return cpufreq_register_driver(&centrino_driver);
}
@@ -383,5 +553,5 @@ MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
MODULE_LICENSE ("GPL");
-module_init(centrino_init);
+late_initcall(centrino_init);
module_exit(centrino_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
index 4e5a253176d0..5ba993af5546 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
@@ -67,6 +67,7 @@ static struct cpufreq_frequency_table speedstep_freqs[] = {
/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ * @notify: whether to call cpufreq_notify_transition for CPU speed changes
*
* Tries to change the SpeedStep state.
*/
@@ -239,8 +240,10 @@ static unsigned int speedstep_detect_chipset (void)
/**
- * speedstep_setpolicy - set a new CPUFreq policy
+ * speedstep_target - set a new CPUFreq policy
* @policy: new policy
+ * @target_freq: the target frequency
+ * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* Sets a new CPUFreq policy.
*/
@@ -261,7 +264,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
/**
* speedstep_verify - verifies a new CPUFreq policy
- * @freq: new policy
+ * @policy: new policy
*
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 588106bbb110..3305a84b6f7c 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
@@ -30,6 +31,12 @@
#define dprintk(msg...) do { } while(0)
#endif
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+static int relaxed_check = 0;
+#else
+#define relaxed_check 0
+#endif
+
/*********************************************************************
* GET PROCESSOR CORE SPEED IN KHZ *
*********************************************************************/
@@ -120,7 +127,7 @@ static unsigned int pentiumM_get_frequency(void)
msr_tmp = (msr_lo >> 22) & 0x1f;
dprintk(KERN_DEBUG "speedstep-lib: bits 22-26 are 0x%x\n", msr_tmp);
- return (msr_tmp * 100 * 10000);
+ return (msr_tmp * 100 * 1000);
}
@@ -210,7 +217,7 @@ unsigned int speedstep_detect_processor (void)
ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF;
- dprintk(KERN_INFO "ebx value is %x, x86_mask is %x\n", ebx, c->86_mask);
+ dprintk(KERN_INFO "ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
switch (c->x86_mask) {
case 4:
@@ -265,6 +272,7 @@ unsigned int speedstep_detect_processor (void)
ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF;
+
if (ebx != 0x06)
return 0;
@@ -292,7 +300,7 @@ unsigned int speedstep_detect_processor (void)
*/
rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi);
- if ((msr_hi & (1<<18)) && (msr_hi & (3<<24))) {
+ if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
if (c->x86_mask == 0x01)
return SPEEDSTEP_PROCESSOR_PIII_C_EARLY;
else
@@ -362,6 +370,11 @@ unsigned int speedstep_get_freqs(unsigned int processor,
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+module_param(relaxed_check, int, 0444);
+MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability.");
+#endif
+
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
MODULE_LICENSE ("GPL");
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index 50e41112b3e6..a84273864ff3 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -90,10 +90,12 @@ static int speedstep_smi_ownership (void)
/**
* speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
+ * @low: the low frequency value is placed here
+ * @high: the high frequency value is placed here
+ *
* Only available on later SpeedStep-enabled systems, returns false results or
* even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
* shows that the latter occurs if !(ist_info.event & 0xFFFF).
- *
*/
static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
{
@@ -141,6 +143,7 @@ static int speedstep_get_state (void)
/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ * @notify: whether to call cpufreq_notify_transition
*
*/
static void speedstep_set_state (unsigned int state, unsigned int notify)
@@ -224,7 +227,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
/**
* speedstep_verify - verifies a new CPUFreq policy
- * @freq: new policy
+ * @policy: new policy
*
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62af584a1d15..d5a18851ae73 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -504,19 +504,16 @@ static int cpufreq_resume(struct sys_device * sysdev)
if (cpufreq_driver->setpolicy)
ret = cpufreq_driver->setpolicy(cpu_policy);
else
- /* CPUFREQ_RELATION_H or CPUFREQ_RELATION_L have the same effect here, as cpu_policy->cur is known
- * to be a valid and exact target frequency
- */
+ /* CPUFREQ_RELATION_H or CPUFREQ_RELATION_L have the same effect here, as cpu_policy->cur is known
+ * to be a valid and exact target frequency
+ */
ret = cpufreq_driver->target(cpu_policy, cpu_policy->cur, CPUFREQ_RELATION_H);
- if (ret) {
+ if (ret)
printk(KERN_ERR "cpufreq: resume failed in ->setpolicy/target step on CPU %u\n", cpu_policy->cpu);
- goto out;
- }
- out:
+out:
cpufreq_cpu_put(cpu_policy);
-
return ret;
}
@@ -963,6 +960,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
{
unsigned long flags;
+ int ret;
if (!driver_data || !driver_data->verify || !driver_data->init ||
((!driver_data->setpolicy) && (!driver_data->target)))
@@ -976,7 +974,28 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
+ ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
+
+ if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ int i;
+ ret = -ENODEV;
+
+ /* check for at least one working CPU */
+ for (i=0; i<NR_CPUS; i++)
+ if (cpufreq_cpu_data[i])
+ ret = 0;
+
+ /* if all ->init() calls failed, unregister */
+ if (ret) {
+ sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ }
+ }
+
+ return (ret);
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 3a8e279897ed..f48a064221b4 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -175,6 +175,7 @@ struct freq_attr;
struct cpufreq_driver {
struct module *owner;
char name[CPUFREQ_NAME_LEN];
+ u8 flags;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
@@ -192,6 +193,11 @@ struct cpufreq_driver {
struct freq_attr **attr;
};
+/* flags */
+
+#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
+ all ->init() calls failed */
+
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);