summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.transmeta.com>2003-06-08 03:23:08 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2003-06-08 03:23:08 -0700
commitab7c828bf0dcddf9e4aeb2cc77eec7614c7cf580 (patch)
treeadaaf06e9b30e504e65a497ba8d3c7bc334d0d97 /include
parent6c94e37b1ea3014150d22076793cccc7be072d84 (diff)
parent4964368c1b5a04aeb65dbae43141de2ecc9c9916 (diff)
Merge bk://are.twiddle.net/axp-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/ptrace.h11
-rw-r--r--include/asm-alpha/string.h1
-rw-r--r--include/asm-alpha/uaccess.h69
-rw-r--r--include/asm-alpha/unaligned.h2
-rw-r--r--include/asm-alpha/unistd.h3
5 files changed, 32 insertions, 54 deletions
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
index be24be276069..e77230f32b45 100644
--- a/include/asm-alpha/ptrace.h
+++ b/include/asm-alpha/ptrace.h
@@ -71,15 +71,8 @@ struct switch_stack {
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
-/*
- * TODO: if kernel-only threads do not have a dummy pt_regs structure at the
- * top of the stack, this would cause kernel stack corruption. Either check
- * first that we're not dealing with a kernel thread or change the kernel
- * stacks to allocate a dummy pt_regs structure.
- */
-
-#define alpha_task_regs(task) ((struct pt_regs *) \
- ((long) task->thread_info + PAGE_SIZE) - 1)
+#define alpha_task_regs(task) \
+ ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
index 9e44fea669bf..4266af221932 100644
--- a/include/asm-alpha/string.h
+++ b/include/asm-alpha/string.h
@@ -13,6 +13,7 @@
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, size_t);
#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_BCOPY
extern void * memmove(void *, const void *, size_t);
/* For backward compatibility with modules. Unused otherwise. */
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
index e204cd592e90..3d69803066e8 100644
--- a/include/asm-alpha/uaccess.h
+++ b/include/asm-alpha/uaccess.h
@@ -340,25 +340,31 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
* Complex access routines
*/
+/* This little bit of silliness is to get the GP loaded for a function
+ that ordinarily wouldn't. Otherwise we could have it done by the macro
+ directly, which can be optimized the linker. */
+#ifdef MODULE
+#define __module_address(sym) "r"(sym),
+#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
+#else
+#define __module_address(sym)
+#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
+#endif
+
extern void __copy_user(void);
extern inline long
__copy_tofrom_user_nocheck(void *to, const void *from, long len)
{
- /* This little bit of silliness is to get the GP loaded for
- a function that ordinarily wouldn't. Otherwise we could
- have it done by the macro directly, which can be optimized
- the linker. */
- register void * pv __asm__("$27") = __copy_user;
-
register void * __cu_to __asm__("$6") = to;
register const void * __cu_from __asm__("$7") = from;
register long __cu_len __asm__("$0") = len;
__asm__ __volatile__(
- "jsr $28,(%3),__copy_user"
- : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to), "=r"(pv)
- : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), "3"(pv)
+ __module_call(28, 3, __copy_user)
+ : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
+ : __module_address(__copy_user)
+ "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
: "$1","$2","$3","$4","$5","$28","memory");
return __cu_len;
@@ -367,20 +373,8 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
extern inline long
__copy_tofrom_user(void *to, const void *from, long len, const void *validate)
{
- if (__access_ok((long)validate, len, get_fs())) {
- register void * pv __asm__("$27") = __copy_user;
- register void * __cu_to __asm__("$6") = to;
- register const void * __cu_from __asm__("$7") = from;
- register long __cu_len __asm__("$0") = len;
- __asm__ __volatile__(
- "jsr $28,(%3),__copy_user"
- : "=r"(__cu_len), "=r"(__cu_from), "=r"(__cu_to),
- "=r" (pv)
- : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to),
- "3" (pv)
- : "$1","$2","$3","$4","$5","$28","memory");
- len = __cu_len;
- }
+ if (__access_ok((long)validate, len, get_fs()))
+ len = __copy_tofrom_user_nocheck(to, from, len);
return len;
}
@@ -404,18 +398,13 @@ extern void __do_clear_user(void);
extern inline long
__clear_user(void *to, long len)
{
- /* This little bit of silliness is to get the GP loaded for
- a function that ordinarily wouldn't. Otherwise we could
- have it done by the macro directly, which can be optimized
- the linker. */
- register void * pv __asm__("$27") = __do_clear_user;
-
register void * __cl_to __asm__("$6") = to;
register long __cl_len __asm__("$0") = len;
__asm__ __volatile__(
- "jsr $28,(%2),__do_clear_user"
- : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv)
- : "0"(__cl_len), "1"(__cl_to), "2"(pv)
+ __module_call(28, 2, __do_clear_user)
+ : "=r"(__cl_len), "=r"(__cl_to)
+ : __module_address(__do_clear_user)
+ "0"(__cl_len), "1"(__cl_to)
: "$1","$2","$3","$4","$5","$28","memory");
return __cl_len;
}
@@ -423,20 +412,14 @@ __clear_user(void *to, long len)
extern inline long
clear_user(void *to, long len)
{
- if (__access_ok((long)to, len, get_fs())) {
- register void * pv __asm__("$27") = __do_clear_user;
- register void * __cl_to __asm__("$6") = to;
- register long __cl_len __asm__("$0") = len;
- __asm__ __volatile__(
- "jsr $28,(%2),__do_clear_user"
- : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv)
- : "0"(__cl_len), "1"(__cl_to), "2"(pv)
- : "$1","$2","$3","$4","$5","$28","memory");
- len = __cl_len;
- }
+ if (__access_ok((long)to, len, get_fs()))
+ len = __clear_user(to, len);
return len;
}
+#undef __module_address
+#undef __module_call
+
/* Returns: -EFAULT if exception before terminator, N if the entire
buffer filled, else strlen. */
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
index 1ec3e969ece0..2090db872bcc 100644
--- a/include/asm-alpha/unaligned.h
+++ b/include/asm-alpha/unaligned.h
@@ -14,7 +14,7 @@
* the get/put functions are indeed always optimized,
* and that we use the correct sizes.
*/
-extern void bad_unaligned_access_length(void);
+extern void bad_unaligned_access_length(void) __attribute__((noreturn));
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 494765bb1c92..7b500d3cfda2 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -358,7 +358,8 @@
#define __NR_clock_gettime 420
#define __NR_clock_getres 421
#define __NR_clock_nanosleep 422
-#define NR_SYSCALLS 423
+#define __NR_semtimedop 423
+#define NR_SYSCALLS 424
#if defined(__GNUC__)