summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRichard Henderson <rth@fidel.sfbay.redhat.com>2002-02-10 13:42:53 -0800
committerRichard Henderson <rth@fidel.sfbay.redhat.com>2002-02-10 13:42:53 -0800
commit374eeee8a8a50e12278dfa37021df7b6efe506c3 (patch)
tree2388fbed3bb98ad9f8f342c1353c2bbfb40e07c8 /include
parent74c0102446bb3160f186555bd133062003acf194 (diff)
Update Alpha UP for thread_info and scheduler changes.
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bitops.h114
-rw-r--r--include/asm-alpha/current.h7
-rw-r--r--include/asm-alpha/fpu.h2
-rw-r--r--include/asm-alpha/io.h6
-rw-r--r--include/asm-alpha/mmu_context.h36
-rw-r--r--include/asm-alpha/page.h10
-rw-r--r--include/asm-alpha/processor.h123
-rw-r--r--include/asm-alpha/smp.h3
-rw-r--r--include/asm-alpha/sysinfo.h4
-rw-r--r--include/asm-alpha/system.h3
-rw-r--r--include/asm-alpha/thread_info.h89
-rw-r--r--include/asm-alpha/uaccess.h4
-rw-r--r--include/asm-alpha/unistd.h3
13 files changed, 274 insertions, 130 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 06f25fbc8328..0fb2d49f1108 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -74,11 +74,11 @@ clear_bit(unsigned long nr, volatile void * addr)
* WARNING: non atomic version.
*/
static __inline__ void
-__change_bit(unsigned long nr, volatile void * addr)
+__clear_bit(unsigned long nr, volatile void * addr)
{
int *m = ((int *) addr) + (nr >> 5);
- *m ^= 1 << (nr & 31);
+ *m &= ~(1 << (nr & 31));
}
static inline void
@@ -99,6 +99,17 @@ change_bit(unsigned long nr, volatile void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ void
+__change_bit(unsigned long nr, volatile void * addr)
+{
+ int *m = ((int *) addr) + (nr >> 5);
+
+ *m ^= 1 << (nr & 31);
+}
+
static inline int
test_and_set_bit(unsigned long nr, volatile void *addr)
{
@@ -181,20 +192,6 @@ __test_and_clear_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0;
}
-/*
- * WARNING: non atomic version.
- */
-static __inline__ int
-__test_and_change_bit(unsigned long nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- int *m = ((int *) addr) + (nr >> 5);
- int old = *m;
-
- *m = old ^ mask;
- return (old & mask) != 0;
-}
-
static inline int
test_and_change_bit(unsigned long nr, volatile void * addr)
{
@@ -220,6 +217,20 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
return oldbit != 0;
}
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ int
+__test_and_change_bit(unsigned long nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *) addr) + (nr >> 5);
+ int old = *m;
+
+ *m = old ^ mask;
+ return (old & mask) != 0;
+}
+
static inline int
test_bit(int nr, volatile void * addr)
{
@@ -264,17 +275,39 @@ static inline unsigned long ffz(unsigned long word)
#endif
}
+/*
+ * __ffs = Find First set bit in word. Undefined if no set bit exists.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+ /* Whee. EV67 can calculate it directly. */
+ unsigned long result;
+ __asm__("cttz %1,%0" : "=r"(result) : "r"(word));
+ return result;
+#else
+ unsigned long bits, qofs, bofs;
+
+ __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word));
+ qofs = ffz_b(bits);
+ __asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));
+ bofs = ffz_b(~bits);
+
+ return qofs*8 + bofs;
+#endif
+}
+
#ifdef __KERNEL__
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from the above __ffs.
*/
static inline int ffs(int word)
{
- int result = ffz(~word);
+ int result = __ffs(word);
return word ? result+1 : 0;
}
@@ -365,10 +398,53 @@ found_middle:
}
/*
- * The optimizer actually does good code for this case..
+ * Find next one bit in a bitmap reasonably efficiently.
+ */
+static inline unsigned long
+find_next_bit(void * addr, unsigned long size, unsigned long offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
+ unsigned long result = offset & ~63UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 63UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= ~0UL << offset;
+ if (size < 64)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= 64;
+ result += 64;
+ }
+ while (size & ~63UL) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += 64;
+ size -= 64;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+ tmp &= ~0UL >> (64 - size);
+ if (!tmp)
+ return result + size;
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/*
+ * The optimizer actually does good code for this case.
*/
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
#ifdef __KERNEL__
diff --git a/include/asm-alpha/current.h b/include/asm-alpha/current.h
index 8db6dd06e236..dccab45e2c9e 100644
--- a/include/asm-alpha/current.h
+++ b/include/asm-alpha/current.h
@@ -1,6 +1,9 @@
#ifndef _ALPHA_CURRENT_H
#define _ALPHA_CURRENT_H
-register struct task_struct *current __asm__("$8");
+#include <asm/thread_info.h>
-#endif /* !(_ALPHA_CURRENT_H) */
+#define get_current() (current_thread_info()->task + 0)
+#define current get_current()
+
+#endif /* _ALPHA_CURRENT_H */
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index acd1b9a03bdf..3cd9b4dd511f 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -31,7 +31,7 @@
/*
* IEEE trap enables are implemented in software. These per-thread
- * bits are stored in the "flags" field of "struct thread_struct".
+ * bits are stored in the "ieee_state" field of "struct thread_info".
* Thus, the bits are defined so as not to conflict with the
* floating-point enable bit (which is architected). On top of that,
* we want to make these bits compatible with OSF/1 so
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 956c46a60f60..594292133373 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -18,6 +18,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
#include <asm/machvec.h>
/*
@@ -60,7 +61,10 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR);
}
-#define page_to_phys(page) (((page) - (page)->zone->zone_mem_map) << PAGE_SHIFT)
+#define page_to_phys(page) PAGE_TO_PA(page)
+
+/* This depends on working iommu. */
+#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
/*
* Change addresses as seen by the kernel (virtual) to addresses as
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 492d29b7c9d5..94d26e7b60f3 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -21,8 +21,34 @@
#include <asm/io.h>
#endif
+/* ??? This does not belong here. */
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 168
+ * bits is set.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO > 192
+# error update this function.
+#endif
+
+static inline int
+sched_find_first_bit(unsigned long *b)
+{
+ unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
+ unsigned long offset = 128;
+
+ if (unlikely(b0 | b1)) {
+ b2 = (b0 ? b0 : b1);
+ offset = (b0 ? 0 : 64);
+ }
+
+ return __ffs(b2) + offset;
+}
+
+
extern inline unsigned long
-__reload_thread(struct thread_struct *pcb)
+__reload_thread(struct pcb_struct *pcb)
{
register unsigned long a0 __asm__("$16");
register unsigned long v0 __asm__("$0");
@@ -153,7 +179,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
- next->thread.asn = mmc & HARDWARE_ASN_MASK;
+ next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
__EXTERN_INLINE void
@@ -228,7 +254,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for (i = 0; i < smp_num_cpus; i++)
mm->context[cpu_logical_map(i)] = 0;
- tsk->thread.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+ tsk->thread_info->pcb.ptbr
+ = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
return 0;
}
@@ -241,7 +268,8 @@ destroy_context(struct mm_struct *mm)
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
- tsk->thread.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+ tsk->thread_info->pcb.ptbr
+ = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
#ifdef __MMU_EXTERN_INLINE
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index f8bc9ac9cfd0..be8931a2e774 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -59,11 +59,11 @@ typedef unsigned long pgprot_t;
#endif /* STRICT_MM_TYPECHECKS */
-#define BUG() \
-do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- __asm__ __volatile__("call_pal %0 # bugchk" : : "i" (PAL_bugchk)); \
-} while (0)
+/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
+ function loaded the GP, so this could fail in modules. */
+#define BUG() \
+ __asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \
+ : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
#define PAGE_BUG(page) BUG()
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 1c41ecccfaa8..cd77be47b2c3 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -38,83 +38,17 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
-struct thread_struct {
- /* the fields below are used by PALcode and must match struct pcb: */
- unsigned long ksp;
- unsigned long usp;
- unsigned long ptbr;
- unsigned int pcc;
- unsigned int asn;
- unsigned long unique;
- /*
- * bit 0: floating point enable
- * bit 62: performance monitor enable
- */
- unsigned long pal_flags;
- unsigned long res1, res2;
-
- /*
- * The fields below are Linux-specific:
- *
- * bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
- * bit 6..8: UAC bits (see sysinfo.h)
- * bit 17..21: IEEE_STATUS_MASK bits (see fpu.h)
- * bit 63: die_if_kernel recursion lock
- */
- unsigned long flags;
-
- /* Perform syscall argument validation (get/set_fs). */
- mm_segment_t fs;
-
- /* Breakpoint handling for ptrace. */
- unsigned long bpt_addr[2];
- unsigned int bpt_insn[2];
- int bpt_nsaved;
-};
-
-#define INIT_THREAD { \
- 0, 0, 0, \
- 0, 0, 0, \
- 0, 0, 0, \
- 0, \
- KERNEL_DS \
-}
-
-#define THREAD_SIZE (2*PAGE_SIZE)
+/* This is dead. Everything has been moved to thread_info. */
+struct thread_struct { };
+#define INIT_THREAD { }
-#include <asm/ptrace.h>
-
-/*
- * Return saved PC of a blocked thread. This assumes the frame
- * pointer is the 6th saved long on the kernel stack and that the
- * saved return address is the first long in the frame. This all
- * holds provided the thread blocked through a call to schedule() ($15
- * is the frame pointer in schedule() and $15 is saved at offset 48 by
- * entry.S:do_switch_stack).
- *
- * Under heavy swap load I've seen this lose in an ugly way. So do
- * some extra sanity checking on the ranges we expect these pointers
- * to be in so that we can fail gracefully. This is just for ps after
- * all. -- r~
- */
-extern inline unsigned long thread_saved_pc(struct thread_struct *t)
-{
- unsigned long fp, sp = t->ksp, base = (unsigned long)t;
-
- if (sp > base && sp+6*8 < base + 16*1024) {
- fp = ((unsigned long*)sp)[6];
- if (fp > sp && fp < base + 16*1024)
- return *(unsigned long *)fp;
- }
-
- return 0;
-}
+/* Return saved PC of a blocked thread. */
+struct task_struct;
+extern unsigned long thread_saved_pc(struct task_struct *);
/* Do necessary setup to start up a newly executed thread. */
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
-struct task_struct;
-
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -127,26 +61,18 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
unsigned long get_wchan(struct task_struct *p);
/* See arch/alpha/kernel/ptrace.c for details. */
-#define PT_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
- + (long)&((struct pt_regs *)0)->reg)
+#define PT_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
-#define SW_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
- - sizeof(struct switch_stack) \
- + (long)&((struct switch_stack *)0)->reg)
+#define SW_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+ + offsetof(struct switch_stack, reg))
#define KSTK_EIP(tsk) \
- (*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))
-
-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-/* NOTE: The task struct and the stack go together! */
-#define alloc_task_struct() \
- ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
-#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
+ (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
-#define init_task (init_task_union.task)
-#define init_stack (init_task_union.stack)
+#define KSTK_ESP(tsk) \
+ ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
#define cpu_relax() do { } while (0)
@@ -154,21 +80,36 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
extern inline void prefetch(const void *ptr)
{
- __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
+ __builtin_prefetch(ptr, 0, 3);
}
extern inline void prefetchw(const void *ptr)
{
- __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
+ __builtin_prefetch(ptr, 1, 3);
}
extern inline void spin_lock_prefetch(const void *ptr)
{
+ __builtin_prefetch(ptr, 1, 3);
+}
+#else
+extern inline void prefetch(const void *ptr)
+{
__asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
}
-
+extern inline void prefetchw(const void *ptr)
+{
+ __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
+}
+
+extern inline void spin_lock_prefetch(const void *ptr)
+{
+ __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
+}
+#endif /* GCC 3.1 */
#endif /* __ASM_ALPHA_PROCESSOR_H */
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index a4abd0e7609a..f4cb8926b5a2 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -2,6 +2,7 @@
#define __ASM_SMP_H
#include <linux/config.h>
+#include <linux/threads.h>
#include <asm/pal.h>
/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
@@ -55,7 +56,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
#define hard_smp_processor_id() __hard_smp_processor_id()
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current_thread_info()->cpu)
extern unsigned long cpu_present_mask;
#define cpu_online_map cpu_present_mask
diff --git a/include/asm-alpha/sysinfo.h b/include/asm-alpha/sysinfo.h
index cdd66a643ef6..1c65a021ac3b 100644
--- a/include/asm-alpha/sysinfo.h
+++ b/include/asm-alpha/sysinfo.h
@@ -29,8 +29,8 @@
#ifdef __KERNEL__
-/* This is the shift that is applied to the UAC bits as stored in the
- per-thread flags. */
+/* This is the shift that is applied to the UAC bits as stored in the
+ per-thread flags. See thread_info.h. */
#define UAC_SHIFT 6
#endif
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 665c15e6ab58..bc06fe188985 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -134,8 +134,7 @@ extern void halt(void) __attribute__((noreturn));
#define switch_to(prev,next,last) \
do { \
unsigned long pcbb; \
- current = (next); \
- pcbb = virt_to_phys(&current->thread); \
+ pcbb = virt_to_phys(&(next)->thread_info->pcb); \
(last) = alpha_switch_to(pcbb, (prev)); \
check_mmu_context(); \
} while (0)
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
new file mode 100644
index 000000000000..a1f831d47245
--- /dev/null
+++ b/include/asm-alpha/thread_info.h
@@ -0,0 +1,89 @@
+#ifndef _ALPHA_THREAD_INFO_H
+#define _ALPHA_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/hwrpb.h>
+#endif
+
+#ifndef __ASSEMBLY__
+struct thread_info {
+ struct pcb_struct pcb; /* palcode state */
+
+ struct task_struct *task; /* main task structure */
+ unsigned int flags; /* low level flags */
+ unsigned int ieee_state; /* see fpu.h */
+
+ struct exec_domain *exec_domain; /* execution domain */
+ mm_segment_t addr_limit; /* thread address space */
+ int cpu; /* current CPU */
+
+ int bpt_nsaved;
+ unsigned long bpt_addr[2]; /* breakpoint handling */
+ unsigned int bpt_insn[2];
+};
+
+/*
+ * Macros/functions for gaining access to the thread information structure.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ task: &tsk, \
+ exec_domain: &default_exec_domain, \
+ addr_limit: KERNEL_DS, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* How to get the thread information struct from C. */
+register struct thread_info *__current_thread_info __asm__("$8");
+#define current_thread_info() __current_thread_info
+
+/* Thread information allocation. */
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define alloc_thread_info() \
+ ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Thread information flags:
+ * - these are process state flags and used from assembly
+ * - pending work-to-be-done flags come first to fit in and immediate operand.
+ *
+ * TIF_SYSCALL_TRACE is known to be 0 via blbs.
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 4 /* poll_idle is polling NEED_RESCHED */
+#define TIF_DIE_IF_KERNEL 5 /* dik recursion lock */
+#define TIF_UAC_NOPRINT 6 /* see sysinfo.h */
+#define TIF_UAC_NOFIX 7
+#define TIF_UAC_SIGBUS 8
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+
+/* Work to do on interrupt/exception return. */
+#define _TIF_WORK_MASK (_TIF_NOTIFY_RESUME \
+ | _TIF_SIGPENDING \
+ | _TIF_NEED_RESCHED)
+
+/* Work to do on any return to userspace. */
+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
+ | _TIF_SYSCALL_TRACE)
+
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_THREAD_INFO_H */
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
index eeb5b85406dd..f215aa90809b 100644
--- a/include/asm-alpha/uaccess.h
+++ b/include/asm-alpha/uaccess.h
@@ -23,9 +23,9 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-#define get_fs() (current->thread.fs)
+#define get_fs() (current_thread_info()->addr_limit)
#define get_ds() (KERNEL_DS)
-#define set_fs(x) (current->thread.fs = (x))
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define segment_eq(a,b) ((a).seg == (b).seg)
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index b94be13a7254..28ced9ae8f48 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -506,6 +506,7 @@ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
#include <linux/string.h>
#include <linux/signal.h>
+#include <asm/ptrace.h>
extern void sys_idle(void);
static inline void idle(void)
@@ -576,6 +577,8 @@ static inline long sync(void)
return sys_sync();
}
+struct rusage;
+extern asmlinkage long sys_wait4(pid_t, unsigned int *, int, struct rusage *);
static inline pid_t waitpid(int pid, int * wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);