summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 17:50:30 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 17:50:30 -0800
commit43e9282d088b07f03fc16a5325ff74cf49fb2990 (patch)
treebf54a000466ab5e020f4cfd7b6836ab64e0d23a3 /include
parent7a2deb32924142696b8174cdf9b38cd72a11fc96 (diff)
v2.4.0 -> v2.4.0.1
- Don't drop a megabyte off the old-style memory size detection - remember to UnlockPage() in ramfs_writepage() - 3c59x driver update from Andrew Morton - egcs-1.1.2 miscompiles depca: workaround by Andrew Morton - dmfe.c module init fix: Andrew Morton - dynamic XMM support. Andrea Arkangeli. - Locked SHM segment deadlock fix - fork() page table copy race fix
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/bugs.h42
-rw-r--r--include/asm-i386/i387.h6
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-i386/processor.h2
-rw-r--r--include/asm-i386/system.h7
-rw-r--r--include/asm-i386/xor.h4
-rw-r--r--include/linux/sched.h2
7 files changed, 31 insertions, 38 deletions
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 4e77e5d8a02f..0a12e306a91c 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -66,6 +66,8 @@ static double __initdata y = 3145727.0;
*/
static void __init check_fpu(void)
{
+ extern int disable_x86_fxsr;
+
if (!boot_cpu_data.hard_math) {
#ifndef CONFIG_MATH_EMULATION
printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -76,26 +78,26 @@ static void __init check_fpu(void)
}
/* Enable FXSR and company _before_ testing for FP problems. */
-#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR)
/*
* Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
*/
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
- panic("Kernel compiled for PII/PIII+ with FXSR, data not 16-byte aligned!");
-
- if (cpu_has_fxsr) {
- printk(KERN_INFO "Enabling fast FPU save and restore... ");
- set_in_cr4(X86_CR4_OSFXSR);
- printk("done.\n");
+ if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
+ extern void __buggy_fxsr_alignment(void);
+ __buggy_fxsr_alignment();
}
-#endif
-#ifdef CONFIG_X86_XMM
- if (cpu_has_xmm) {
- printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
- set_in_cr4(X86_CR4_OSXMMEXCPT);
- printk("done.\n");
- }
-#endif
+ if (!disable_x86_fxsr) {
+ if (cpu_has_fxsr) {
+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
+ set_in_cr4(X86_CR4_OSFXSR);
+ printk("done.\n");
+ }
+ if (cpu_has_xmm) {
+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+ printk("done.\n");
+ }
+ } else
+ printk(KERN_INFO "Disabling fast FPU save and restore.\n");
/* Test for the divl bug.. */
__asm__("fninit\n\t"
@@ -203,14 +205,6 @@ static void __init check_config(void)
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
#endif
-
-/*
- * If we configured ourselves for FXSR, we'd better have it.
- */
-#ifdef CONFIG_X86_FXSR
- if (!cpu_has_fxsr)
- panic("Kernel compiled for PII/PIII+, requires FXSR feature!");
-#endif
}
static void __init check_bugs(void)
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 04ba635e500d..95bf1608c89a 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -50,10 +50,8 @@ extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd );
extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr );
#define load_mxcsr( val ) do { \
- if ( cpu_has_xmm ) { \
- unsigned long __mxcsr = ((unsigned long)(val) & 0xffff); \
- asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
- } \
+ unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
+ asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
} while (0)
/*
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index bf32a74498ec..a25f3bcfdf70 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -140,7 +140,11 @@ extern unsigned long empty_zero_page[1024];
#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#define VMALLOC_END (FIXADDR_START)
+#if CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
/*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H"
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 21296d96140a..4db1c7f63bd9 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -88,6 +88,8 @@ extern struct cpuinfo_x86 cpu_data[];
#define cpu_has_fxsr (test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability))
#define cpu_has_xmm (test_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability))
#define cpu_has_fpu (test_bit(X86_FEATURE_FPU, boot_cpu_data.x86_capability))
+#define HAVE_FXSR (mmu_cr4_features & X86_CR4_OSFXSR)
+#define HAVE_XMM (mmu_cr4_features & X86_CR4_OSXMMEXCPT)
extern char ignore_irq13;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index d3b01ab8bb8e..52e24682e154 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -267,15 +267,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* I expect future Intel CPU's to have a weaker ordering,
* but I'd also expect them to finally get their act together
* and add some real memory barriers if so.
- *
- * The Pentium III does add a real memory barrier with the
- * sfence instruction, so we use that where appropriate.
*/
-#ifndef CONFIG_X86_XMM
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#else
-#define mb() __asm__ __volatile__ ("sfence": : :"memory")
-#endif
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h
index 6a2230b8fb30..7e38a6582130 100644
--- a/include/asm-i386/xor.h
+++ b/include/asm-i386/xor.h
@@ -843,7 +843,7 @@ static struct xor_block_template xor_block_pIII_sse = {
do { \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
- if (cpu_has_xmm) \
+ if (HAVE_XMM) \
xor_speed(&xor_block_pIII_sse); \
if (md_cpu_has_mmx()) { \
xor_speed(&xor_block_pII_mmx); \
@@ -855,4 +855,4 @@ static struct xor_block_template xor_block_pIII_sse = {
We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
+ (HAVE_XMM ? &xor_block_pIII_sse : FASTEST)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a119996699d3..f946b153276e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -226,6 +226,8 @@ struct mm_struct {
mm_context_t context;
};
+extern int mmlist_nr;
+
#define INIT_MM(name) \
{ \
mmap: &init_mmap, \