diff options
| -rw-r--r-- | arch/alpha/kernel/asm-offsets.c | 1 | ||||
| -rw-r--r-- | arch/alpha/kernel/head.S | 3 | ||||
| -rw-r--r-- | arch/alpha/kernel/systbls.S | 1 | ||||
| -rw-r--r-- | arch/alpha/kernel/traps.c | 80 | ||||
| -rw-r--r-- | arch/alpha/lib/csum_partial_copy.c | 4 | ||||
| -rw-r--r-- | arch/alpha/lib/memmove.S | 9 | ||||
| -rw-r--r-- | include/asm-alpha/ptrace.h | 11 | ||||
| -rw-r--r-- | include/asm-alpha/string.h | 1 | ||||
| -rw-r--r-- | include/asm-alpha/uaccess.h | 69 | ||||
| -rw-r--r-- | include/asm-alpha/unaligned.h | 2 | ||||
| -rw-r--r-- | include/asm-alpha/unistd.h | 3 |
11 files changed, 87 insertions, 97 deletions
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index 79b9b27166a3..8f2e5c718b50 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -31,6 +31,7 @@ void foo(void) DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); + DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S index fe03ed241fa3..4ca2e404708a 100644 --- a/arch/alpha/kernel/head.S +++ b/arch/alpha/kernel/head.S @@ -9,6 +9,7 @@ #include <linux/config.h> #include <asm/system.h> +#include <asm/asm_offsets.h> .globl swapper_pg_dir .globl _stext @@ -25,7 +26,7 @@ __start: /* We need to get current_task_info loaded up... */ lda $8,init_thread_union /* ... and find our stack ... */ - lda $30,0x4000($8) + lda $30,0x4000 - SIZEOF_PT_REGS($8) /* ... and then we can start the kernel. */ jsr $26,start_kernel call_pal PAL_halt diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index bc482d9388a3..3d9d1ac4f830 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -442,6 +442,7 @@ sys_call_table: .quad sys_clock_gettime /* 420 */ .quad sys_clock_getres .quad sys_clock_nanosleep + .quad sys_semtimedop .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 4cfbd4e27786..ddea651cb87e 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -485,9 +485,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -505,9 +505,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -525,9 +525,9 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -554,13 +554,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -584,13 +584,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -614,13 +614,13 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -845,9 +845,9 @@ do_entUnaUser(void * va, unsigned long opcode, " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -865,9 +865,9 @@ do_entUnaUser(void * va, unsigned long opcode, " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -885,9 +885,9 @@ do_entUnaUser(void * va, unsigned long opcode, " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -905,9 +905,9 @@ do_entUnaUser(void * va, unsigned long opcode, " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -925,9 +925,9 @@ do_entUnaUser(void * va, unsigned long opcode, " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %1,3b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) @@ -954,13 +954,13 @@ do_entUnaUser(void * va, unsigned long opcode, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -988,13 +988,13 @@ do_entUnaUser(void * va, unsigned long opcode, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), @@ -1022,13 +1022,13 @@ do_entUnaUser(void * va, unsigned long opcode, "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" - " .gprel32 1b\n" + " .long 1b - .\n" " lda %2,5b-1b(%0)\n" - " .gprel32 2b\n" + " .long 2b - .\n" " lda %1,5b-2b(%0)\n" - " .gprel32 3b\n" + " .long 3b - .\n" " lda $31,5b-3b(%0)\n" - " .gprel32 4b\n" + " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index a24a4e1abdbc..702efb639d3e 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -46,7 +46,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) "1: ldq_u %0,%2\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ - " .gprel32 1b\n" \ + " .long 1b - .\n" \ " lda %0,2b-1b(%1)\n" \ ".previous" \ : "=r"(x), "=r"(__guu_err) \ @@ -61,7 +61,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) "1: stq_u %2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ - " .gprel32 1b" \ + " .long 1b - ." \ " lda $31,2b-1b(%0)\n" \ ".previous" \ : "=r"(__puu_err) \ diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S index 73aed92537d0..a09e1d13bc6c 100644 --- a/arch/alpha/lib/memmove.S +++ b/arch/alpha/lib/memmove.S @@ -12,6 +12,15 @@ .text .align 4 + .globl bcopy + .ent bcopy +bcopy: + mov $16,$0 + mov $17,$16 + mov $0,$17 + .end bcopy + + .align 4 .globl memmove .ent memmove memmove: diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h index be24be276069..e77230f32b45 100644 --- a/include/asm-alpha/ptrace.h +++ b/include/asm-alpha/ptrace.h @@ -71,15 +71,8 @@ struct switch_stack { #define instruction_pointer(regs) ((regs)->pc) extern void show_regs(struct pt_regs *); -/* - * TODO: if kernel-only threads do not have a dummy pt_regs structure at the - * top of the stack, this would cause kernel stack corruption. Either check - * first that we're not dealing with a kernel thread or change the kernel - * stacks to allocate a dummy pt_regs structure. - */ - -#define alpha_task_regs(task) ((struct pt_regs *) \ - ((long) task->thread_info + PAGE_SIZE) - 1) +#define alpha_task_regs(task) \ + ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1) #define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0) diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h index 9e44fea669bf..4266af221932 100644 --- a/include/asm-alpha/string.h +++ b/include/asm-alpha/string.h @@ -13,6 +13,7 @@ #define __HAVE_ARCH_MEMCPY extern void * memcpy(void *, const void *, size_t); #define __HAVE_ARCH_MEMMOVE +#define __HAVE_ARCH_BCOPY extern void * memmove(void *, const void *, size_t); /* For backward compatibility with modules. Unused otherwise. */ diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h index e204cd592e90..3d69803066e8 100644 --- a/include/asm-alpha/uaccess.h +++ b/include/asm-alpha/uaccess.h @@ -340,25 +340,31 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ * Complex access routines */ +/* This little bit of silliness is to get the GP loaded for a function + that ordinarily wouldn't. Otherwise we could have it done by the macro + directly, which can be optimized the linker. */ +#ifdef MODULE +#define __module_address(sym) "r"(sym), +#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +#else +#define __module_address(sym) +#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +#endif + extern void __copy_user(void); extern inline long __copy_tofrom_user_nocheck(void *to, const void *from, long len) { - /* This little bit of silliness is to get the GP loaded for - a function that ordinarily wouldn't. Otherwise we could - have it done by the macro directly, which can be optimized - the linker. */ - register void * pv __asm__("$27") = __copy_user; - register void * __cu_to __asm__("$6") = to; register const void * __cu_from __asm__("$7") = from; register long __cu_len __asm__("$0") = len; __asm__ __volatile__( - "jsr $28,(%3),__copy_user" - : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to), "=r"(pv) - : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), "3"(pv) + __module_call(28, 3, __copy_user) + : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) + : __module_address(__copy_user) + "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) : "$1","$2","$3","$4","$5","$28","memory"); return __cu_len; @@ -367,20 +373,8 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) extern inline long __copy_tofrom_user(void *to, const void *from, long len, const void *validate) { - if (__access_ok((long)validate, len, get_fs())) { - register void * pv __asm__("$27") = __copy_user; - register void * __cu_to __asm__("$6") = to; - register const void * __cu_from __asm__("$7") = from; - register long __cu_len __asm__("$0") = len; - __asm__ __volatile__( - "jsr $28,(%3),__copy_user" - : "=r"(__cu_len), "=r"(__cu_from), "=r"(__cu_to), - "=r" (pv) - : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), - "3" (pv) - : "$1","$2","$3","$4","$5","$28","memory"); - len = __cu_len; - } + if (__access_ok((long)validate, len, get_fs())) + len = __copy_tofrom_user_nocheck(to, from, len); return len; } @@ -404,18 +398,13 @@ extern void __do_clear_user(void); extern inline long __clear_user(void *to, long len) { - /* This little bit of silliness is to get the GP loaded for - a function that ordinarily wouldn't. Otherwise we could - have it done by the macro directly, which can be optimized - the linker. */ - register void * pv __asm__("$27") = __do_clear_user; - register void * __cl_to __asm__("$6") = to; register long __cl_len __asm__("$0") = len; __asm__ __volatile__( - "jsr $28,(%2),__do_clear_user" - : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv) - : "0"(__cl_len), "1"(__cl_to), "2"(pv) + __module_call(28, 2, __do_clear_user) + : "=r"(__cl_len), "=r"(__cl_to) + : __module_address(__do_clear_user) + "0"(__cl_len), "1"(__cl_to) : "$1","$2","$3","$4","$5","$28","memory"); return __cl_len; } @@ -423,20 +412,14 @@ __clear_user(void *to, long len) extern inline long clear_user(void *to, long len) { - if (__access_ok((long)to, len, get_fs())) { - register void * pv __asm__("$27") = __do_clear_user; - register void * __cl_to __asm__("$6") = to; - register long __cl_len __asm__("$0") = len; - __asm__ __volatile__( - "jsr $28,(%2),__do_clear_user" - : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv) - : "0"(__cl_len), "1"(__cl_to), "2"(pv) - : "$1","$2","$3","$4","$5","$28","memory"); - len = __cl_len; - } + if (__access_ok((long)to, len, get_fs())) + len = __clear_user(to, len); return len; } +#undef __module_address +#undef __module_call + /* Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else strlen. */ diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h index 1ec3e969ece0..2090db872bcc 100644 --- a/include/asm-alpha/unaligned.h +++ b/include/asm-alpha/unaligned.h @@ -14,7 +14,7 @@ * the get/put functions are indeed always optimized, * and that we use the correct sizes. */ -extern void bad_unaligned_access_length(void); +extern void bad_unaligned_access_length(void) __attribute__((noreturn)); /* * EGCS 1.1 knows about arbitrary unaligned loads. Define some diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h index 494765bb1c92..7b500d3cfda2 100644 --- a/include/asm-alpha/unistd.h +++ b/include/asm-alpha/unistd.h @@ -358,7 +358,8 @@ #define __NR_clock_gettime 420 #define __NR_clock_getres 421 #define __NR_clock_nanosleep 422 -#define NR_SYSCALLS 423 +#define __NR_semtimedop 423 +#define NR_SYSCALLS 424 #if defined(__GNUC__) |
