summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndi Kleen <ak@muc.de>2002-12-27 19:54:15 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2002-12-27 19:54:15 -0800
commit1e1144fdf0e2d624e641e8017737f230480289c1 (patch)
tree4f12ec4832ec20ef174ac7a1d2f63c0583eea20d /include
parentf4d6e3083e56dfd8eb8bfbe81597e92b5969e020 (diff)
[PATCH] x86-64 update
- Optimize __copy*user a bit. - Merge with 2.5.53/i386 - Fix broken 32bit signal handling. - Implement AT_SYSINFO and a vsyscall page for 32bit programs. - Fix 32bit SYSCALL entry point to handle 6 arguments and restart correctly. - Add oprofile support (Vojtech Pavlik, with changes by me) This is shared code with i386.
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/bitops.h2
-rw-r--r--include/asm-x86_64/desc.h2
-rw-r--r--include/asm-x86_64/dma-mapping.h1
-rw-r--r--include/asm-x86_64/hw_irq.h15
-rw-r--r--include/asm-x86_64/ia32.h2
-rw-r--r--include/asm-x86_64/io_apic.h1
-rw-r--r--include/asm-x86_64/processor.h2
-rw-r--r--include/asm-x86_64/proto.h2
-rw-r--r--include/asm-x86_64/spinlock.h2
-rw-r--r--include/asm-x86_64/system.h7
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/uaccess.h106
12 files changed, 112 insertions, 32 deletions
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index 72e575a68a33..c98d3c64bba0 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -244,7 +244,7 @@ static __inline__ int constant_test_bit(int nr, const volatile void * addr)
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile void * addr)
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
{
int oldbit;
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index f5775b7ddde5..724777a946f6 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -131,7 +131,7 @@ static inline void set_tss_desc(unsigned cpu, void *addr)
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], (unsigned long)addr,
+ set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr,
DESC_LDT, size);
}
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
deleted file mode 100644
index e7e16901f686..000000000000
--- a/include/asm-x86_64/dma-mapping.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/dma-mapping.h>
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 9d7d039d5222..bb096fb7ab06 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -168,9 +168,24 @@ static inline void x86_do_profile (struct pt_regs *regs)
struct notifier_block;
+#ifdef CONFIG_PROFILING
+
int register_profile_notifier(struct notifier_block * nb);
int unregister_profile_notifier(struct notifier_block * nb);
+#else
+
+static inline int register_profile_notifier(struct notifier_block * nb)
+{
+ return -ENOSYS;
+}
+
+static inline int unregister_profile_notifier(struct notifier_block * nb)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_PROFILING */
#ifdef CONFIG_SMP /*more of this file should probably be ifdefed SMP */
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
if (IO_APIC_IRQ(i))
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index f257195d1740..23d4d607a398 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -202,7 +202,7 @@ struct iovec32 {
int iov_len;
};
-#define IA32_PAGE_OFFSET 0xffff0000
+#define IA32_PAGE_OFFSET 0xffffe000
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#endif /* !CONFIG_IA32_SUPPORT */
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index 2975afd3f3e5..d5e3a3a81282 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <asm/types.h>
+#include <asm/mpspec.h>
/*
* Intel IO-APIC support for SMP and UP systems.
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 075a49ff9ba4..9c84b14139f2 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -256,7 +256,7 @@ static inline void clear_in_cr4 (unsigned long mask)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_32 0x40000000
+#define TASK_UNMAPPED_32 0xa0000000
#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index a9f8d7e16ee6..9ca683849c42 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -42,6 +42,8 @@ extern void exception_table_check(void);
extern int acpi_boot_init(char *);
+extern int map_syscall32(struct mm_struct *mm, unsigned long address);
+
#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
#define round_down(x,y) ((x) & ~((y)-1))
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 52047a7e928b..b502884f490a 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -48,8 +48,8 @@ typedef struct {
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
- "cmpb $0,%0\n\t" \
"rep;nop\n\t" \
+ "cmpb $0,%0\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f1281d48dc93..268865b34785 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -254,7 +254,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
*/
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence":::"memory")
+
+/* could use SFENCE here, but it would be only needed for unordered SSE
+ store instructions and we always do an explicit sfence with them currently.
+ the ordering of normal stores is serialized enough. Just make it a compile
+ barrier. */
+#define wmb() asm volatile("" ::: "memory")
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 69cac4da715d..9f034cf938d1 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -100,6 +100,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_IA32 18 /* 32bit process */
@@ -107,6 +108,7 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 3cb62ac5fb08..065eb4c8dafb 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -103,7 +103,8 @@ extern void __get_user_8(void);
/* Careful: we have to cast the result to the type of the pointer for sign reasons */
#define get_user(x,ptr) \
-({ long __ret_gu,__val_gu; \
+({ long __val_gu; \
+ int __ret_gu; \
switch(sizeof (*(ptr))) { \
case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
@@ -138,7 +139,7 @@ extern void __put_user_bad(void);
#define __put_user_nocheck(x,ptr,size) \
({ \
- long __pu_err; \
+ int __pu_err; \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
@@ -146,7 +147,7 @@ extern void __put_user_bad(void);
#define __put_user_check(x,ptr,size) \
({ \
- long __pu_err = -EFAULT; \
+ int __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
@@ -157,10 +158,10 @@ extern void __put_user_bad(void);
do { \
retval = 0; \
switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
- case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
- case 4: __put_user_asm(x,ptr,retval,"l","k","ir"); break; \
- case 8: __put_user_asm(x,ptr,retval,"q","","ir"); break; \
+ case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
+ case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
+ case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
+ case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
default: __put_user_bad(); \
} \
} while (0)
@@ -174,12 +175,12 @@ struct __large_struct { unsigned long buf[100]; };
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
-#define __put_user_asm(x, addr, err, itype, rtype, ltype) \
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
- "3: movq %3,%0\n" \
+ "3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
@@ -187,32 +188,33 @@ struct __large_struct { unsigned long buf[100]; };
" .quad 1b,3b\n" \
".previous" \
: "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
+ : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
#define __get_user_nocheck(x,ptr,size) \
({ \
- long __gu_err, __gu_val; \
+ int __gu_err; \
+ long __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
-extern long __get_user_bad(void);
+extern int __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
- case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
- case 4: __get_user_asm(x,ptr,retval,"l","k","=r"); break; \
- case 8: __get_user_asm(x,ptr,retval,"q","","=r"); break; \
+ case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
+ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
+ case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
+ case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
default: (x) = __get_user_bad(); \
} \
} while (0)
-#define __get_user_asm(x, addr, err, itype, rtype, ltype) \
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
@@ -226,23 +228,77 @@ do { \
" .quad 1b,3b\n" \
".previous" \
: "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
+ : "m"(__m(addr)), "i"(errno), "0"(err))
/*
* Copy To/From Userspace
- *
- * This relies on an optimized common worker function.
- *
- * Could do special inline versions for small constant copies, but avoid this
- * for now. It's not clear it is worth it.
*/
+/* Handles exceptions in both to and from, but doesn't do access_ok */
extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
extern unsigned long copy_to_user(void *to, const void *from, unsigned len);
extern unsigned long copy_from_user(void *to, const void *from, unsigned len);
-#define __copy_to_user copy_user_generic
-#define __copy_from_user copy_user_generic
+
+static inline int __copy_from_user(void *dst, void *src, unsigned size)
+{
+ if (!__builtin_constant_p(size))
+ return copy_user_generic(dst,src,size);
+ int ret = 0;
+ switch (size) {
+ case 1:__get_user_asm(*(u8*)dst,(u8 *)src,ret,"b","b","=q",1);
+ return ret;
+ case 2:__get_user_asm(*(u16*)dst,(u16*)src,ret,"w","w","=r",2);
+ return ret;
+ case 4:__get_user_asm(*(u32*)dst,(u32*)src,ret,"l","k","=r",4);
+ return ret;
+ case 8:__get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",8);
+ return ret;
+ case 10:
+ __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
+ if (ret) return ret;
+ __get_user_asm(*(u16*)(8+dst),(u16*)(8+src),ret,"w","w","=r",2);
+ return ret;
+ case 16:
+ __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
+ if (ret) return ret;
+ __get_user_asm(*(u64*)(8+dst),(u64*)(8+src),ret,"q","","=r",8);
+ return ret;
+ default:
+ return copy_user_generic(dst,src,size);
+ }
+}
+
+static inline int __copy_to_user(void *dst, void *src, unsigned size)
+{
+ if (!__builtin_constant_p(size))
+ return copy_user_generic(dst,src,size);
+ int ret = 0;
+ switch (size) {
+ case 1:__put_user_asm(*(u8*)src,(u8 *)dst,ret,"b","b","iq",1);
+ return ret;
+ case 2:__put_user_asm(*(u16*)src,(u16*)dst,ret,"w","w","ir",2);
+ return ret;
+ case 4:__put_user_asm(*(u32*)src,(u32*)dst,ret,"l","k","ir",4);
+ return ret;
+ case 8:__put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",8);
+ return ret;
+ case 10:
+ __put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",10);
+ if (ret) return ret;
+ asm("":::"memory");
+ __put_user_asm(4[(u16*)src],4+(u16*)dst,ret,"w","w","ir",2);
+ return ret;
+ case 16:
+ __put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",16);
+ if (ret) return ret;
+ asm("":::"memory");
+ __put_user_asm(1[(u64*)src],1+(u64*)dst,ret,"q","","ir",8);
+ return ret;
+ default:
+ return copy_user_generic(dst,src,size);
+ }
+}
long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count);