From a061909b6a4f0fe92fab736619fef99817681fec Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 9 Feb 2002 06:45:28 -0800 Subject: Sparc64 thread_info implementation. --- include/asm-i386/processor.h | 4 +- include/asm-sparc/checksum.h | 3 +- include/asm-sparc/siginfo.h | 2 +- include/asm-sparc/unistd.h | 5 +- include/asm-sparc64/a.out.h | 4 +- include/asm-sparc64/checksum.h | 7 +- include/asm-sparc64/current.h | 5 +- include/asm-sparc64/delay.h | 10 +- include/asm-sparc64/elf.h | 15 +-- include/asm-sparc64/fpumacro.h | 2 +- include/asm-sparc64/mmu_context.h | 4 +- include/asm-sparc64/page.h | 4 +- include/asm-sparc64/pgtable.h | 8 +- include/asm-sparc64/processor.h | 165 ++++++------------------------ include/asm-sparc64/ptrace.h | 4 +- include/asm-sparc64/sfp-machine.h | 4 +- include/asm-sparc64/smp.h | 2 +- include/asm-sparc64/system.h | 31 +++--- include/asm-sparc64/thread_info.h | 210 ++++++++++++++++++++++++++++++++++++++ include/asm-sparc64/ttable.h | 20 ++-- include/asm-sparc64/uaccess.h | 6 +- include/asm-sparc64/unistd.h | 5 +- 22 files changed, 315 insertions(+), 205 deletions(-) create mode 100644 include/asm-sparc64/thread_info.h (limited to 'include') diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index d830237efb4d..ec48a38e5e0f 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -439,9 +439,9 @@ extern void release_segments(struct mm_struct * mm); /* * Return saved PC of a blocked thread. */ -static inline unsigned long thread_saved_pc(struct thread_struct *t) +static inline unsigned long thread_saved_pc(struct task_struct *tsk) { - return ((unsigned long *)t->esp)[3]; + return ((unsigned long *)tsk->thread->esp)[3]; } unsigned long get_wchan(struct task_struct *p); diff --git a/include/asm-sparc/checksum.h b/include/asm-sparc/checksum.h index 2f92efb4440a..cfe871c624ca 100644 --- a/include/asm-sparc/checksum.h +++ b/include/asm-sparc/checksum.h @@ -1,4 +1,4 @@ -/* $Id: checksum.h,v 1.32 2001/10/30 04:32:24 davem Exp $ */ +/* $Id: checksum.h,v 1.33 2002/02/01 22:01:05 davem Exp $ */ #ifndef __SPARC_CHECKSUM_H #define __SPARC_CHECKSUM_H @@ -16,6 +16,7 @@ * RFC1071 Computing the Internet Checksum */ +#include #include #include diff --git a/include/asm-sparc/siginfo.h b/include/asm-sparc/siginfo.h index 816a08a67275..629641b64750 100644 --- a/include/asm-sparc/siginfo.h +++ b/include/asm-sparc/siginfo.h @@ -1,4 +1,4 @@ -/* $Id: siginfo.h,v 1.8 2000/05/27 00:49:37 davem Exp $ +/* $Id: siginfo.h,v 1.9 2002/02/08 03:57:18 davem Exp $ * siginfo.c: */ diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index cbed9be281d4..5fc0f36c21be 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h @@ -1,4 +1,4 @@ -/* $Id: unistd.h,v 1.73 2002/01/31 03:30:13 davem Exp $ */ +/* $Id: unistd.h,v 1.74 2002/02/08 03:57:18 davem Exp $ */ #ifndef _SPARC_UNISTD_H #define _SPARC_UNISTD_H @@ -202,7 +202,7 @@ #define __NR_query_module 184 /* Linux Specific */ #define __NR_setpgid 185 /* Common */ #define __NR_fremovexattr 186 /* SunOS: pathconf */ -/* #define __NR_fpathconf 187 SunOS Specific */ +#define __NR_tkill 187 /* SunOS: fpathconf */ /* #define __NR_sysconf 188 SunOS Specific */ #define __NR_uname 189 /* Linux Specific */ #define __NR_init_module 190 /* Linux Specific */ @@ -271,7 +271,6 @@ #define __NR_fdatasync 253 #define __NR_nfsservctl 254 #define __NR_aplib 255 -#define __NR_tkill 257 #define _syscall0(type,name) \ type name(void) \ diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index 2b9ee5cddfac..02af289e3f46 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h @@ -1,4 +1,4 @@ -/* $Id: a.out.h,v 1.7 2001/04/24 01:09:12 davem Exp $ */ +/* $Id: a.out.h,v 1.8 2002/02/09 19:49:31 davem Exp $ */ #ifndef __SPARC64_A_OUT_H__ #define __SPARC64_A_OUT_H__ @@ -95,7 +95,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ #ifdef __KERNEL__ -#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L) +#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) #endif diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h index 5fbda2574179..6128f20a8f1e 100644 --- a/include/asm-sparc64/checksum.h +++ b/include/asm-sparc64/checksum.h @@ -1,4 +1,4 @@ -/* $Id: checksum.h,v 1.17 2001/04/24 01:09:12 davem Exp $ */ +/* $Id: checksum.h,v 1.19 2002/02/09 19:49:31 davem Exp $ */ #ifndef __SPARC64_CHECKSUM_H #define __SPARC64_CHECKSUM_H @@ -16,7 +16,8 @@ * RFC1071 Computing the Internet Checksum */ -#include +#include +#include /* computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) @@ -44,7 +45,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) { int ret; - unsigned char cur_ds = current->thread.current_ds.seg; + unsigned char cur_ds = get_thread_current_ds(); __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_P)); ret = csum_partial_copy_sparc64(src, dst, len, sum); __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (cur_ds)); diff --git a/include/asm-sparc64/current.h b/include/asm-sparc64/current.h index 80652fb3582b..7683c6bdcd10 100644 --- a/include/asm-sparc64/current.h +++ b/include/asm-sparc64/current.h @@ -1,7 +1,8 @@ #ifndef _SPARC64_CURRENT_H #define _SPARC64_CURRENT_H -/* Sparc rules... */ -register struct task_struct *current asm("g6"); +#include + +#define current (current_thread_info()->task) #endif /* !(_SPARC64_CURRENT_H) */ diff --git a/include/asm-sparc64/delay.h b/include/asm-sparc64/delay.h index 61a833af213f..400c5eb3d8a3 100644 --- a/include/asm-sparc64/delay.h +++ b/include/asm-sparc64/delay.h @@ -1,4 +1,4 @@ -/* $Id: delay.h,v 1.12 2001/04/24 01:09:12 davem Exp $ +/* $Id: delay.h,v 1.13 2002/02/02 03:33:48 kanoj Exp $ * delay.h: Linux delay routines on the V9. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu). @@ -9,9 +9,13 @@ #include #include + +#ifndef __ASSEMBLY__ + #ifdef CONFIG_SMP -#include #include +#else +extern unsigned long loops_per_jiffy; #endif extern __inline__ void __delay(unsigned long loops) @@ -49,4 +53,6 @@ extern __inline__ void __udelay(unsigned long usecs, unsigned long lps) #define udelay(usecs) __udelay((usecs),__udelay_val) +#endif /* !__ASSEMBLY__ */ + #endif /* defined(__SPARC64_DELAY_H) */ diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index dd584acbb34f..e6aca413a847 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h @@ -1,4 +1,4 @@ -/* $Id: elf.h,v 1.31 2002/01/08 16:00:20 davem Exp $ */ +/* $Id: elf.h,v 1.32 2002/02/09 19:49:31 davem Exp $ */ #ifndef __ASM_SPARC64_ELF_H #define __ASM_SPARC64_ELF_H @@ -69,16 +69,11 @@ typedef struct { #ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) \ -do { unsigned char flags = current->thread.flags; \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - flags |= SPARC_FLAG_32BIT; \ +do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + set_thread_flag(TIF_32BIT); \ else \ - flags &= ~SPARC_FLAG_32BIT; \ - if (flags != current->thread.flags) { \ - /* flush_thread will update pgd cache */\ - current->thread.flags = flags; \ - } \ - \ + clear_thread_flag(TIF_32BIT); \ + /* flush_thread will update pgd cache */ \ if (ibcs2) \ set_personality(PER_SVR4); \ else if (current->personality != PER_LINUX32) \ diff --git a/include/asm-sparc64/fpumacro.h b/include/asm-sparc64/fpumacro.h index 3fd5cab0cb5b..21d2740a810b 100644 --- a/include/asm-sparc64/fpumacro.h +++ b/include/asm-sparc64/fpumacro.h @@ -14,7 +14,7 @@ struct fpustate { u32 regs[64]; }; -#define FPUSTATE (struct fpustate *)(((unsigned long)current) + AOFF_task_fpregs) +#define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs) extern __inline__ unsigned long fprs_read(void) { diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 41e9ddf09b27..ec8de3308f19 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -1,4 +1,4 @@ -/* $Id: mmu_context.h,v 1.53 2002/01/30 01:40:00 davem Exp $ */ +/* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */ #ifndef __SPARC64_MMU_CONTEXT_H #define __SPARC64_MMU_CONTEXT_H @@ -101,7 +101,7 @@ do { \ register unsigned long pgd_cache asm("o4"); \ paddr = __pa((__mm)->pgd); \ pgd_cache = 0UL; \ - if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \ + if ((__tsk)->thread_info->flags & _TIF_32BIT) \ pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ "mov %3, %%g4\n\t" \ diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index c5de1e232dcf..c2cdb4c399e9 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -1,4 +1,4 @@ -/* $Id: page.h,v 1.38 2001/11/30 01:04:10 davem Exp $ */ +/* $Id: page.h,v 1.39 2002/02/09 19:49:31 davem Exp $ */ #ifndef _SPARC64_PAGE_H #define _SPARC64_PAGE_H @@ -95,7 +95,7 @@ typedef unsigned long iopgprot_t; #endif /* (STRICT_MM_TYPECHECKS) */ -#define TASK_UNMAPPED_BASE ((current->thread.flags & SPARC_FLAG_32BIT) ? \ +#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ (0x0000000070000000UL) : (PAGE_OFFSET)) #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 95060ec47b63..0df00266817b 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -1,4 +1,4 @@ -/* $Id: pgtable.h,v 1.155 2001/12/21 04:56:17 davem Exp $ +/* $Id: pgtable.h,v 1.156 2002/02/09 19:49:31 davem Exp $ * pgtable.h: SpitFire page table operations. * * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) @@ -76,7 +76,7 @@ * is different so we can optimize correctly for 32-bit tasks. */ #define REAL_PTRS_PER_PMD (1UL << PMD_BITS) -#define PTRS_PER_PMD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \ +#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \ (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : (REAL_PTRS_PER_PMD))) /* @@ -90,8 +90,8 @@ (PAGE_SHIFT-3) + PMD_BITS))) /* Kernel has a separate 44bit address space. */ -#define USER_PTRS_PER_PGD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \ - (1) : (PTRS_PER_PGD))) +#define USER_PTRS_PER_PGD ((const int)(test_thread_flag(TIF_32BIT)) ? \ + (1) : (PTRS_PER_PGD)) #define FIRST_USER_PGD_NR 0 #define pte_ERROR(e) __builtin_trap() diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index dc0e88282f50..1815502bf700 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -1,4 +1,4 @@ -/* $Id: processor.h,v 1.80 2001/11/17 00:10:48 davem Exp $ +/* $Id: processor.h,v 1.83 2002/02/10 06:04:33 davem Exp $ * include/asm-sparc64/processor.h * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -21,6 +21,7 @@ #include #include #include +#include /* Bus types */ #define EISA_bus 0 @@ -43,29 +44,13 @@ #ifndef __ASSEMBLY__ -#define NSWINS 7 - typedef struct { unsigned char seg; } mm_segment_t; /* The Sparc processor specific thread struct. */ +/* XXX This should die, everything can go into thread_info now. */ struct thread_struct { - /* D$ line 1 */ - unsigned long ksp __attribute__ ((aligned(16))); - unsigned char wstate, cwp, flags; - mm_segment_t current_ds; - unsigned char w_saved, fpdepth, fault_code, use_blkcommit; - unsigned long fault_address; - unsigned char fpsaved[7]; - unsigned char __pad2; - - /* D$ line 2, 3, 4 */ - struct pt_regs *kregs; - unsigned long *utraps; - unsigned long gsr[7]; - unsigned long xfsr[7]; - #ifdef CONFIG_DEBUG_SPINLOCK /* How many spinlocks held by this thread. * Used with spin lock debugging to catch tasks @@ -73,96 +58,29 @@ struct thread_struct { */ int smp_lock_count; unsigned int smp_lock_pc; +#else + int dummy; /* f'in gcc bug... */ #endif - - struct reg_window reg_window[NSWINS]; - unsigned long rwbuf_stkptrs[NSWINS]; - - /* Performance counter state */ - u64 *user_cntd0, *user_cntd1; - u64 kernel_cntd0, kernel_cntd1; - u64 pcr_reg; }; #endif /* !(__ASSEMBLY__) */ -#define SPARC_FLAG_UNALIGNED 0x01 /* is allowed to do unaligned accesses */ -#define SPARC_FLAG_NEWSIGNALS 0x02 /* task wants new-style signals */ -#define SPARC_FLAG_32BIT 0x04 /* task is older 32-bit binary */ -#define SPARC_FLAG_NEWCHILD 0x08 /* task is just-spawned child process */ -#define SPARC_FLAG_PERFCTR 0x10 /* task has performance counters active */ - -#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */ -#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */ -#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */ -#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */ - #ifndef CONFIG_DEBUG_SPINLOCK -#define INIT_THREAD { \ -/* ksp, wstate, cwp, flags, current_ds, */ \ - 0, 0, 0, 0, KERNEL_DS, \ -/* w_saved, fpdepth, fault_code, use_blkcommit, */ \ - 0, 0, 0, 0, \ -/* fault_address, fpsaved, __pad2, kregs, */ \ - 0, { 0 }, 0, 0, \ -/* utraps, gsr, xfsr, */ \ - 0, { 0 }, { 0 }, \ -/* reg_window */ \ - { { { 0, }, { 0, } }, }, \ -/* rwbuf_stkptrs */ \ - { 0, 0, 0, 0, 0, 0, 0, }, \ -/* user_cntd0, user_cndd1, kernel_cntd0, kernel_cntd0, pcr_reg */ \ - 0, 0, 0, 0, 0, \ +#define INIT_THREAD { \ + 0, \ } #else /* CONFIG_DEBUG_SPINLOCK */ #define INIT_THREAD { \ -/* ksp, wstate, cwp, flags, current_ds, */ \ - 0, 0, 0, 0, KERNEL_DS, \ -/* w_saved, fpdepth, fault_code, use_blkcommit, */ \ - 0, 0, 0, 0, \ -/* fault_address, fpsaved, __pad2, kregs, */ \ - 0, { 0 }, 0, 0, \ -/* utraps, gsr, xfsr, smp_lock_count, smp_lock_pc, */\ - 0, { 0 }, { 0 }, 0, 0, \ -/* reg_window */ \ - { { { 0, }, { 0, } }, }, \ -/* rwbuf_stkptrs */ \ - { 0, 0, 0, 0, 0, 0, 0, }, \ -/* user_cntd0, user_cndd1, kernel_cntd0, kernel_cntd0, pcr_reg */ \ - 0, 0, 0, 0, 0, \ +/* smp_lock_count, smp_lock_pc, */ \ + 0, 0, \ } #endif /* !(CONFIG_DEBUG_SPINLOCK) */ -#ifdef __KERNEL__ -#if PAGE_SHIFT == 13 -#define THREAD_SIZE (2*PAGE_SIZE) -#define THREAD_SHIFT (PAGE_SHIFT + 1) -#else /* PAGE_SHIFT == 13 */ -#define THREAD_SIZE PAGE_SIZE -#define THREAD_SHIFT PAGE_SHIFT -#endif /* PAGE_SHIFT == 13 */ -#endif /* __KERNEL__ */ - #ifndef __ASSEMBLY__ /* Return saved PC of a blocked thread. */ -extern __inline__ unsigned long thread_saved_pc(struct thread_struct *t) -{ - unsigned long ret = 0xdeadbeefUL; - - if (t->ksp) { - unsigned long *sp; - sp = (unsigned long *)(t->ksp + STACK_BIAS); - if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && - sp[14]) { - unsigned long *fp; - fp = (unsigned long *)(sp[14] + STACK_BIAS); - if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) - ret = fp[15]; - } - } - return ret; -} +struct task_struct; +extern unsigned long thread_saved_pc(struct task_struct *); /* On Uniprocessor, even in RMO processes see TSO semantics */ #ifdef CONFIG_SMP @@ -178,13 +96,13 @@ do { \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ - current->thread.wstate = (1 << 3); \ - if (current->thread.utraps) { \ - if (*(current->thread.utraps) < 2) \ - kfree (current->thread.utraps); \ + set_thread_wstate(1 << 3); \ + if (current_thread_info()->utraps) { \ + if (*(current_thread_info()->utraps) < 2) \ + kfree(current_thread_info()->utraps); \ else \ - (*(current->thread.utraps))--; \ - current->thread.utraps = NULL; \ + (*(current_thread_info()->utraps))--; \ + current_thread_info()->utraps = NULL; \ } \ __asm__ __volatile__( \ "stx %%g0, [%0 + %2 + 0x00]\n\t" \ @@ -207,7 +125,7 @@ do { \ : \ : "r" (regs), "r" (sp - REGWIN_SZ - STACK_BIAS), \ "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ -} while(0) +} while (0) #define start_thread32(regs, pc, sp) \ do { \ @@ -218,13 +136,13 @@ do { \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ - current->thread.wstate = (2 << 3); \ - if (current->thread.utraps) { \ - if (*(current->thread.utraps) < 2) \ - kfree (current->thread.utraps); \ + set_thread_wstate(2 << 3); \ + if (current_thread_info()->utraps) { \ + if (*(current_thread_info()->utraps) < 2) \ + kfree(current_thread_info()->utraps); \ else \ - (*(current->thread.utraps))--; \ - current->thread.utraps = NULL; \ + (*(current_thread_info()->utraps))--; \ + current_thread_info()->utraps = NULL; \ } \ __asm__ __volatile__( \ "stx %%g0, [%0 + %2 + 0x00]\n\t" \ @@ -247,10 +165,10 @@ do { \ : \ : "r" (regs), "r" (sp - REGWIN32_SZ), \ "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ -} while(0) +} while (0) /* Free all resources held by a thread. */ -#define release_thread(tsk) do { } while(0) +#define release_thread(tsk) do { } while (0) extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); @@ -261,19 +179,20 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); ({ extern void scheduling_functions_start_here(void); \ extern void scheduling_functions_end_here(void); \ unsigned long pc, fp, bias = 0; \ - unsigned long task_base = (unsigned long) (__TSK); \ + unsigned long thread_info_base; \ struct reg_window *rw; \ unsigned long __ret = 0; \ int count = 0; \ if (!(__TSK) || (__TSK) == current || \ (__TSK)->state == TASK_RUNNING) \ goto __out; \ + thread_info_base = (unsigned long) ((__TSK)->thread_info); \ bias = STACK_BIAS; \ - fp = (__TSK)->thread.ksp + bias; \ + fp = (__TSK)->thread_info->ksp + bias; \ do { \ /* Bogus frame pointer? */ \ - if (fp < (task_base + sizeof(struct task_struct)) || \ - fp >= (task_base + THREAD_SIZE)) \ + if (fp < (thread_info_base + sizeof(struct thread_info)) || \ + fp >= (thread_info_base + THREAD_SIZE)) \ break; \ rw = (struct reg_window *) fp; \ pc = rw->ins[7]; \ @@ -287,26 +206,10 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); __out: __ret; \ }) -#define KSTK_EIP(tsk) ((tsk)->thread.kregs->tpc) -#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP]) - -#ifdef __KERNEL__ -/* Allocation and freeing of task_struct and kernel stack. */ -#if PAGE_SHIFT == 13 -#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1)) -#define free_task_struct(tsk) free_pages((unsigned long)(tsk),1) -#else /* PAGE_SHIFT == 13 */ -#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 0)) -#define free_task_struct(tsk) free_pages((unsigned long)(tsk),0) -#endif /* PAGE_SHIFT == 13 */ -#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count) - -#define init_task (init_task_union.task) -#define init_stack (init_task_union.stack) - -#define cpu_relax() do { } while (0) +#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) +#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) -#endif /* __KERNEL__ */ +#define cpu_relax() udelay(1 + smp_processor_id()) #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h index 39bb8203ab7f..e464335fcd44 100644 --- a/include/asm-sparc64/ptrace.h +++ b/include/asm-sparc64/ptrace.h @@ -1,4 +1,4 @@ -/* $Id: ptrace.h,v 1.13 1997/09/17 17:27:51 davem Exp $ */ +/* $Id: ptrace.h,v 1.14 2002/02/09 19:49:32 davem Exp $ */ #ifndef _SPARC64_PTRACE_H #define _SPARC64_PTRACE_H @@ -110,8 +110,6 @@ extern void show_regs(struct pt_regs *); #define TRACEREG32_SZ 0x50 #define STACKFRAME32_SZ 0x60 #define REGWIN32_SZ 0x40 - -#include #endif #ifdef __KERNEL__ diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h index 49c77bd81b36..5015bb8d6c32 100644 --- a/include/asm-sparc64/sfp-machine.h +++ b/include/asm-sparc64/sfp-machine.h @@ -74,7 +74,7 @@ /* Obtain the current rounding mode. */ #ifndef FP_ROUNDMODE -#define FP_ROUNDMODE ((current->thread.xfsr[0] >> 30) & 0x3) +#define FP_ROUNDMODE ((current_thread_info()->xfsr[0] >> 30) & 0x3) #endif /* Exception flags. */ @@ -86,6 +86,6 @@ #define FP_HANDLE_EXCEPTIONS return _fex -#define FP_INHIBIT_RESULTS ((current->thread.xfsr[0] >> 23) & _fex) +#define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex) #endif diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index d73d3e657cf5..ae5307391893 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -103,7 +103,7 @@ extern __inline__ int hard_smp_processor_id(void) } } -#define smp_processor_id() (current->cpu) +#define smp_processor_id() (current_thread_info()->cpu) /* This needn't do anything as we do not sleep the cpu * inside of the idler task, so an interrupt is not needed diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index d5b497ddf5e0..58969561c107 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -1,12 +1,12 @@ -/* $Id: system.h,v 1.68 2001/11/18 00:12:56 davem Exp $ */ +/* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */ #ifndef __SPARC64_SYSTEM_H #define __SPARC64_SYSTEM_H #include #include #include -#include #include +#include #ifndef __ASSEMBLY__ /* @@ -174,19 +174,19 @@ if ((PREV)->thread.smp_lock_count) { \ */ #define switch_to(prev, next, last) \ do { CHECK_LOCKS(prev); \ - if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ + if (test_thread_flag(TIF_PERFCTR)) { \ unsigned long __tmp; \ read_pcr(__tmp); \ - current->thread.pcr_reg = __tmp; \ + current_thread_info()->pcr_reg = __tmp; \ read_pic(__tmp); \ - current->thread.kernel_cntd0 += (unsigned int)(__tmp); \ - current->thread.kernel_cntd1 += ((__tmp) >> 32); \ + current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp); \ + current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \ } \ save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ - : : "r" (next->thread.current_ds.seg)); \ + : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS])); \ __asm__ __volatile__( \ "mov %%g6, %%g5\n\t" \ "wrpr %%g0, 0x95, %%pstate\n\t" \ @@ -202,7 +202,7 @@ do { CHECK_LOCKS(prev); \ "wrpr %%g1, %%cwp\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldub [%%g6 + %2], %%o5\n\t" \ - "ldub [%%g6 + %4], %%o7\n\t" \ + "ldx [%%g6 + %4], %%o7\n\t" \ "mov %%g6, %%l2\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ @@ -212,21 +212,18 @@ do { CHECK_LOCKS(prev); \ "wrpr %%g0, 0x96, %%pstate\n\t" \ "andcc %%o7, %6, %%g0\n\t" \ "bne,pn %%icc, ret_from_syscall\n\t" \ - " mov %%g5, %0\n\t" \ + " ldx [%%g5 + %7], %0\n\t" \ : "=&r" (last) \ - : "r" (next), \ - "i" ((const unsigned long)(&((struct task_struct *)0)->thread.wstate)),\ - "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \ - "i" ((const unsigned long)(&((struct task_struct *)0)->thread.flags)),\ - "i" ((const unsigned long)(&((struct task_struct *)0)->thread.cwp)), \ - "i" (SPARC_FLAG_NEWCHILD) \ + : "r" (next->thread_info), \ + "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ + "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ : "cc", "g1", "g2", "g3", "g5", "g7", \ "l2", "l3", "l4", "l5", "l6", "l7", \ "i0", "i1", "i2", "i3", "i4", "i5", \ "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ /* If you fuck with this, update ret_from_syscall code too. */ \ - if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ - write_pcr(current->thread.pcr_reg); \ + if (test_thread_flag(TIF_PERFCTR)) { \ + write_pcr(current_thread_info()->pcr_reg); \ reset_pic(); \ } \ } while(0) diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h new file mode 100644 index 000000000000..6fea1ba05405 --- /dev/null +++ b/include/asm-sparc64/thread_info.h @@ -0,0 +1,210 @@ +/* $Id: thread_info.h,v 1.1 2002/02/10 00:00:58 davem Exp $ + * thread_info.h: sparc64 low-level thread information + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#define NSWINS 7 + +#define TI_FLAG_BYTE_FAULT_CODE 0 +#define TI_FLAG_FAULT_CODE_SHIFT 56 +#define TI_FLAG_BYTE_WSTATE 1 +#define TI_FLAG_WSTATE_SHIFT 48 +#define TI_FLAG_BYTE_CWP 2 +#define TI_FLAG_CWP_SHIFT 40 +#define TI_FLAG_BYTE_CURRENT_DS 3 +#define TI_FLAG_CURRENT_DS_SHIFT 32 +#define TI_FLAG_BYTE_FPDEPTH 4 +#define TI_FLAG_FPDEPTH_SHIFT 24 +#define TI_FLAG_BYTE_WSAVED 5 +#define TI_FLAG_WSAVED_SHIFT 16 + +#ifndef __ASSEMBLY__ + +#include + +struct task_struct; +struct exec_domain; + +struct thread_info { + /* D$ line 1 */ + struct task_struct *task; + unsigned long flags; + __u8 cpu; + __u8 fpsaved[7]; + unsigned long ksp; + + /* D$ line 2 */ + unsigned long fault_address; + struct pt_regs *kregs; + unsigned long *utraps; + struct exec_domain *exec_domain; + + struct reg_window reg_window[NSWINS]; + unsigned long rwbuf_stkptrs[NSWINS]; + + unsigned long gsr[7]; + unsigned long xfsr[7]; + + __u64 *user_cntd0, *user_cntd1; + __u64 kernel_cntd0, kernel_cntd1; + __u64 pcr_reg; + + __u64 cee_stuff; + + unsigned long fpregs[0] __attribute__ ((aligned(64))); +}; + +#endif /* !(__ASSEMBLY__) */ + +/* offsets into the thread_info struct for assembly code access */ +#define TI_TASK 0x00000000 +#define TI_FLAGS 0x00000008 +#define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE) +#define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE) +#define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP) +#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS) +#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) +#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) +#define TI_CPU 0x00000010 +#define TI_FPSAVED 0x00000011 +#define TI_KSP 0x00000018 +#define TI_FAULT_ADDR 0x00000020 +#define TI_KREGS 0x00000028 +#define TI_UTRAPS 0x00000030 +#define TI_EXEC_DOMAIN 0x00000038 +#define TI_REG_WINDOW 0x00000040 +#define TI_RWIN_SPTRS 0x000003c0 +#define TI_GSR 0x000003f8 +#define TI_XFSR 0x00000430 +#define TI_USER_CNTD0 0x00000468 +#define TI_USER_CNTD1 0x00000470 +#define TI_KERN_CNTD0 0x00000478 +#define TI_KERN_CNTD1 0x00000480 +#define TI_PCR 0x00000488 +#define TI_CEE_STUFF 0x00000490 +#define TI_FPREGS 0x000004c0 + +/* We embed this in the uppermost byte of thread_info->flags */ +#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */ +#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */ +#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */ +#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */ + +#if PAGE_SHIFT == 13 +#define THREAD_SIZE (2*PAGE_SIZE) +#define THREAD_SHIFT (PAGE_SHIFT + 1) +#else /* PAGE_SHIFT == 13 */ +#define THREAD_SIZE PAGE_SIZE +#define THREAD_SHIFT PAGE_SHIFT +#endif /* PAGE_SHIFT == 13 */ + +/* + * macros/functions for gaining access to the thread information structure + */ +#ifndef __ASSEMBLY__ + +#define INIT_THREAD_INFO(tsk) \ +{ \ + task: &tsk, \ + flags: ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \ + exec_domain: &default_exec_domain, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* how to get the thread information struct from C */ +register struct thread_info *current_thread_info_reg asm("g6"); +#define current_thread_info() (current_thread_info_reg) + +/* thread information allocation */ +#if PAGE_SHIFT == 13 +#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 1)) +#define free_thread_info(ti) free_pages((unsigned long)(ti),1) +#else /* PAGE_SHIFT == 13 */ +#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 0)) +#define free_thread_info(ti) free_pages((unsigned long)(ti),0) +#endif /* PAGE_SHIFT == 13 */ + +#define __thread_flag_byte_ptr(ti) \ + ((unsigned char *)(&((ti)->flags))) +#define __cur_thread_flag_byte_ptr __thread_flag_byte_ptr(current_thread_info()) + +#define get_thread_fault_code() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE]) +#define set_thread_fault_code(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE] = (val)) +#define get_thread_wstate() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE]) +#define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val)) +#define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP]) +#define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val)) +#define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS]) +#define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val)) +#define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH]) +#define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val)) +#define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED]) +#define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val)) + +#endif /* !(__ASSEMBLY__) */ + +/* + * Thread information flags, only 16 bits are available as we encode + * other values into the upper 6 bytes. + * + * On trap return we need to test several values: + * + * user: need_resched, notify_resume, sigpending, wsaved, perfctr + * kernel: fpdepth + * + * So to check for work in the kernel case we simply load the fpdepth + * byte out of the flags and test it. For the user case we encode the + * lower 3 bytes of flags as follows: + * ---------------------------------------- + * | wsaved | flags byte 1 | flags byte 2 | + * ---------------------------------------- + * This optimizes the user test into: + * ldx [%g6 + TI_FLAGS], REG1 + * sethi %hi(_TIF_USER_WORK_MASK), REG2 + * or REG2, %lo(_TIF_USER_WORK_MASK), REG2 + * andcc REG1, REG2, %g0 + * be,pt no_work_to_do + * nop + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_PERFCTR 4 /* performance counters active */ +#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ +#define TIF_NEWSIGNALS 6 /* wants new-style signals */ +#define TIF_32BIT 7 /* 32-bit binary */ +#define TIF_NEWCHILD 8 /* just-spawned child process */ + +/* XXX Make this one FAULT_CODE_BLKCOMMIT XXX */ +#define TIF_BLKCOMMIT 9 /* use ASI_BLK_COMMIT_* in copy_user_page */ + +#define TIF_POLLING_NRFLAG 10 + +#define _TIF_SYSCALL_TRACE (1< -#include +#include #include #define BOOT_KERNEL b sparc64_boot; nop; nop; nop; nop; nop; nop; nop; @@ -104,14 +104,14 @@ mov num, %g1; \ nop;nop;nop; -#define TRAP_UTRAP(handler,lvl) \ - ldx [%g6 + AOFF_task_thread + AOFF_thread_utraps], %g1; \ - sethi %hi(109f), %g7; \ - brz,pn %g1, utrap; \ - or %g7, %lo(109f), %g7; \ - ba,pt %xcc, utrap; \ -109: ldx [%g1 + handler*8], %g1; \ - ba,pt %xcc, utrap_ill; \ +#define TRAP_UTRAP(handler,lvl) \ + ldx [%g6 + TI_UTRAPS], %g1; \ + sethi %hi(109f), %g7; \ + brz,pn %g1, utrap; \ + or %g7, %lo(109f), %g7; \ + ba,pt %xcc, utrap; \ +109: ldx [%g1 + handler*8], %g1; \ + ba,pt %xcc, utrap_ill; \ mov lvl, %o1; #ifdef CONFIG_SUNOS_EMUL diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index a5590b897c8c..a52de7d2a22a 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -1,4 +1,4 @@ -/* $Id: uaccess.h,v 1.34 2001/09/27 04:36:24 kanoj Exp $ */ +/* $Id: uaccess.h,v 1.35 2002/02/09 19:49:31 davem Exp $ */ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H @@ -36,14 +36,14 @@ #define VERIFY_READ 0 #define VERIFY_WRITE 1 -#define get_fs() (current->thread.current_ds) +#define get_fs() ((mm_segment_t) { get_thread_current_ds() }) #define get_ds() (KERNEL_DS) #define segment_eq(a,b) ((a).seg == (b).seg) #define set_fs(val) \ do { \ - current->thread.current_ds = (val); \ + set_thread_current_ds((val).seg); \ __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ } while(0) diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 1b325812c8c7..c5a635cce3d1 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -1,4 +1,4 @@ -/* $Id: unistd.h,v 1.49 2001/10/18 08:27:05 davem Exp $ */ +/* $Id: unistd.h,v 1.50 2002/02/08 03:57:18 davem Exp $ */ #ifndef _SPARC64_UNISTD_H #define _SPARC64_UNISTD_H @@ -202,7 +202,7 @@ #define __NR_query_module 184 /* Linux Specific */ #define __NR_setpgid 185 /* Common */ /* #define __NR_pathconf 186 SunOS Specific */ -/* #define __NR_fpathconf 187 SunOS Specific */ +#define __NR_tkill 187 /* SunOS: fpathconf */ /* #define __NR_sysconf 188 SunOS Specific */ #define __NR_uname 189 /* Linux Specific */ #define __NR_init_module 190 /* Linux Specific */ @@ -273,7 +273,6 @@ #define __NR_fdatasync 253 #define __NR_nfsservctl 254 #define __NR_aplib 255 -#define __NR_tkill 256 #define _syscall0(type,name) \ type name(void) \ -- cgit v1.2.3 From 3acfe790195079e659ace46983ae0095398fd0ef Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 9 Feb 2002 08:03:53 -0800 Subject: Sparc64 preemption support. --- arch/sparc64/kernel/entry.S | 12 ++++++------ arch/sparc64/kernel/etrap.S | 14 ++++++++++++-- arch/sparc64/kernel/process.c | 12 ++++++++++++ arch/sparc64/kernel/rtrap.S | 23 +++++++++++++++++++++-- arch/sparc64/kernel/smp.c | 4 ++-- arch/sparc64/kernel/traps.c | 1 + arch/sparc64/lib/VISsave.S | 4 ++++ arch/sparc64/mm/ultra.S | 12 ++++++------ include/asm-sparc64/pgalloc.h | 28 +++++++++++++++++++++++++++- include/asm-sparc64/smplock.h | 8 ++++++++ include/asm-sparc64/softirq.h | 5 +++-- include/asm-sparc64/spinlock.h | 28 ++++++++++++++-------------- include/asm-sparc64/thread_info.h | 29 ++++++++++++++++------------- include/asm-sparc64/ttable.h | 4 ++-- 14 files changed, 134 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 2a73c391bde2..d687ae20ffcb 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -713,8 +713,8 @@ floppy_dosoftint: call sparc_floppy_irq add %sp, STACK_BIAS + REGWIN_SZ, %o2 - b,pt %xcc, rtrap - clr %l6 + b,pt %xcc, rtrap_irq + nop #endif /* CONFIG_BLK_DEV_FD */ @@ -883,7 +883,7 @@ cee_trap: mov %l5, %o1 call cee_log add %sp, STACK_BIAS + REGWIN_SZ, %o2 - ba,a,pt %xcc, rtrap_clr_l6 + ba,a,pt %xcc, rtrap_irq /* Capture I/D/E-cache state into per-cpu error scoreboard. * @@ -1109,7 +1109,7 @@ cheetah_fast_ecc: mov %l5, %o2 call cheetah_fecc_handler add %sp, STACK_BIAS + REGWIN_SZ, %o0 - ba,a,pt %xcc, rtrap_clr_l6 + ba,a,pt %xcc, rtrap_irq /* Our caller has disabled I-cache and performed membar Sync. */ .globl cheetah_cee @@ -1135,7 +1135,7 @@ cheetah_cee: mov %l5, %o2 call cheetah_cee_handler add %sp, STACK_BIAS + REGWIN_SZ, %o0 - ba,a,pt %xcc, rtrap_clr_l6 + ba,a,pt %xcc, rtrap_irq /* Our caller has disabled I-cache+D-cache and performed membar Sync. */ .globl cheetah_deferred_trap @@ -1161,7 +1161,7 @@ cheetah_deferred_trap: mov %l5, %o2 call cheetah_deferred_handler add %sp, STACK_BIAS + REGWIN_SZ, %o0 - ba,a,pt %xcc, rtrap_clr_l6 + ba,a,pt %xcc, rtrap_irq .globl __do_privact __do_privact: diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index eabe4aa117c9..b576ff91b86c 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -5,6 +5,8 @@ * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) */ +#include + #include #include #include @@ -25,9 +27,17 @@ .text .align 64 .globl etrap, etrap_irq, etraptl1 - +#ifdef CONFIG_PREEMPT +etrap_irq: ldsw [%g6 + TI_PRE_COUNT], %g1 + add %g1, 1, %g1 + ba,pt etrap_irq2 + stw %g1, [%g6 + TI_PRE_COUNT] +#endif etrap: rdpr %pil, %g2 ! Single Group -etrap_irq: rdpr %tstate, %g1 ! Single Group +#ifndef CONFIG_PREEMPT +etrap_irq +#endif +etrap_irq2: rdpr %tstate, %g1 ! Single Group sllx %g2, 20, %g3 ! IEU0 Group andcc %g1, TSTATE_PRIV, %g0 ! IEU1 or %g1, %g3, %g1 ! IEU0 Group diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 58a36fa35b8c..143bbcff8765 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -100,6 +100,18 @@ int cpu_idle(void) #endif +#ifdef CONFIG_PREEMPT +void kpreempt_maybe(void) +{ + int cpu = smp_processor_id(); + + if (local_irq_count(cpu) == 0 && + local_bh_count(cpu) == 0) + preempt_schedule(); + current_thread_info()->preempt_count--; +} +#endif + extern char reboot_command []; #ifdef CONFIG_SUN_CONSOLE diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index e387311ae789..8a68dd08b8cf 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -5,6 +5,8 @@ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ +#include + #include #include #include @@ -148,7 +150,13 @@ __handle_signal: andn %l1, %l4, %l1 .align 64 - .globl rtrap_clr_l6, rtrap, irqsz_patchme + .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme +rtrap_irq: +#ifdef CONFIG_PREEMPT + ldsw [%g6 + TI_PRE_COUNT], %l0 + sub %l0, 1, %l0 + stw %l0, [%g6 + TI_PRE_COUNT] +#endif rtrap_clr_l6: clr %l6 rtrap: ldub [%g6 + TI_CPU], %l0 sethi %hi(irq_stat), %l2 ! &softirq_active @@ -261,7 +269,18 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 kern_rtt: restore retry -to_kernel: ldub [%g6 + TI_FPDEPTH], %l5 +to_kernel: +#ifdef CONFIG_PREEMPT + ldsw [%g6 + TI_PRE_COUNT], %l5 + brnz %l5, kern_fpucheck + add %l5, 1, %l6 + stw %l6, [%g6 + TI_PRE_COUNT] + call kpreempt_maybe + wrpr %g0, RTRAP_PSTATE, %pstate + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate + stw %l5, [%g6 + TI_PRE_COUNT] +#endif +kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 brz,pt %l5, rt_continue srl %l5, 1, %o0 add %g6, TI_FPSAVED, %l6 diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 218b9d31cdbb..e0b6632b0403 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -902,7 +902,7 @@ void smp_migrate_task(int cpu, task_t *p) if (smp_processors_ready && (cpu_present_map & mask) != 0) { u64 data0 = (((u64)&xcall_migrate_task) & 0xffffffff); - spin_lock(&migration_lock); + _raw_spin_lock(&migration_lock); new_task = p; if (tlb_type == spitfire) @@ -923,7 +923,7 @@ asmlinkage void smp_task_migration_interrupt(int irq, struct pt_regs *regs) clear_softint(1 << irq); p = new_task; - spin_unlock(&migration_lock); + _raw_spin_unlock(&migration_lock); sched_task_migrated(p); } diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8bed3934b035..2268e15394e0 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -1700,6 +1700,7 @@ void trap_init(void) TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || TI_PCR != offsetof(struct thread_info, pcr_reg) || TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || + TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))) thread_info_offsets_are_bolixed_dave(); diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S index 11d041bb5f44..65e328d600a8 100644 --- a/arch/sparc64/lib/VISsave.S +++ b/arch/sparc64/lib/VISsave.S @@ -18,6 +18,10 @@ /* On entry: %o5=current FPRS value, %g7 is callers address */ /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ + /* Nothing special need be done here to handle pre-emption, this + * FPU save/restore mechanism is already preemption safe. + */ + .align 32 VISenter: ldub [%g6 + TI_FPDEPTH], %g1 diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 5cc7a6237780..514326d9099d 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -493,8 +493,8 @@ xcall_report_regs: 109: or %g7, %lo(109b), %g7 call __show_regs add %sp, STACK_BIAS + REGWIN_SZ, %o0 - b,pt %xcc, rtrap - clr %l6 + b,pt %xcc, rtrap_irq + nop .align 32 .globl xcall_flush_dcache_page_cheetah @@ -554,8 +554,8 @@ xcall_capture: 109: or %g7, %lo(109b), %g7 call smp_penguin_jailcell nop - b,pt %xcc, rtrap - clr %l6 + b,pt %xcc, rtrap_irq + nop .globl xcall_promstop xcall_promstop: @@ -681,8 +681,8 @@ xcall_call_function: 109: or %g7, %lo(109b), %g7 call smp_call_function_client nop - b,pt %xcc, rtrap - clr %l6 + b,pt %xcc, rtrap_irq + nop .globl xcall_migrate_task xcall_migrate_task: diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index ed5d98ec9bbf..e244d899d64e 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -158,6 +158,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd) { struct page *page = virt_to_page(pgd); + preempt_disable(); if (!page->pprev_hash) { (unsigned long *)page->next_hash = pgd_quicklist; pgd_quicklist = (unsigned long *)page; @@ -165,12 +166,14 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd) (unsigned long)page->pprev_hash |= (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1); pgd_cache_size++; + preempt_enable(); } extern __inline__ pgd_t *get_pgd_fast(void) { struct page *ret; + preempt_disable(); if ((ret = (struct page *)pgd_quicklist) != NULL) { unsigned long mask = (unsigned long)ret->pprev_hash; unsigned long off = 0; @@ -186,16 +189,22 @@ extern __inline__ pgd_t *get_pgd_fast(void) pgd_quicklist = (unsigned long *)ret->next_hash; ret = (struct page *)(__page_address(ret) + off); pgd_cache_size--; + preempt_enable(); } else { - struct page *page = alloc_page(GFP_KERNEL); + struct page *page; + preempt_enable(); + page = alloc_page(GFP_KERNEL); if (page) { ret = (struct page *)page_address(page); clear_page(ret); (unsigned long)page->pprev_hash = 2; + + preempt_disable(); (unsigned long *)page->next_hash = pgd_quicklist; pgd_quicklist = (unsigned long *)page; pgd_cache_size++; + preempt_enable(); } } return (pgd_t *)ret; @@ -205,20 +214,25 @@ extern __inline__ pgd_t *get_pgd_fast(void) extern __inline__ void free_pgd_fast(pgd_t *pgd) { + preempt_disable(); *(unsigned long *)pgd = (unsigned long) pgd_quicklist; pgd_quicklist = (unsigned long *) pgd; pgtable_cache_size++; + preempt_enable(); } extern __inline__ pgd_t *get_pgd_fast(void) { unsigned long *ret; + preempt_disable(); if((ret = pgd_quicklist) != NULL) { pgd_quicklist = (unsigned long *)(*ret); ret[0] = 0; pgtable_cache_size--; + preempt_enable(); } else { + preempt_enable(); ret = (unsigned long *) __get_free_page(GFP_KERNEL); if(ret) memset(ret, 0, PAGE_SIZE); @@ -258,20 +272,27 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long if (pte_quicklist[color] == NULL) color = 1; + + preempt_disable(); if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { pte_quicklist[color] = (unsigned long *)(*ret); ret[0] = 0; pgtable_cache_size--; } + preempt_enable(); + return (pmd_t *)ret; } extern __inline__ void free_pmd_fast(pmd_t *pmd) { unsigned long color = DCACHE_COLOR((unsigned long)pmd); + + preempt_disable(); *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; pte_quicklist[color] = (unsigned long *) pmd; pgtable_cache_size++; + preempt_enable(); } extern __inline__ void free_pmd_slow(pmd_t *pmd) @@ -288,20 +309,25 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long unsigned long color = VPTE_COLOR(address); unsigned long *ret; + preempt_disable(); if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { pte_quicklist[color] = (unsigned long *)(*ret); ret[0] = 0; pgtable_cache_size--; } + preempt_enable(); return (pte_t *)ret; } extern __inline__ void free_pte_fast(pte_t *pte) { unsigned long color = DCACHE_COLOR((unsigned long)pte); + + preempt_disable(); *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; pte_quicklist[color] = (unsigned long *) pte; pgtable_cache_size++; + preempt_enable(); } extern __inline__ void free_pte_slow(pte_t *pte) diff --git a/include/asm-sparc64/smplock.h b/include/asm-sparc64/smplock.h index dd2cc2b54267..d9c87542f5c6 100644 --- a/include/asm-sparc64/smplock.h +++ b/include/asm-sparc64/smplock.h @@ -9,9 +9,17 @@ extern spinlock_t kernel_flag; +#ifdef CONFIG_SMP #define kernel_locked() \ (spin_is_locked(&kernel_flag) &&\ (current->lock_depth >= 0)) +#else +#ifdef CONFIG_PREEMPT +#define kernel_locked() preempt_get_count() +#else +#define kernel_locked() 1 +#endif +#endif /* * Release global kernel lock and global interrupt lock diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h index e97b25e0ccbc..0239b7275cd5 100644 --- a/include/asm-sparc64/softirq.h +++ b/include/asm-sparc64/softirq.h @@ -10,14 +10,15 @@ #include #include /* for membar() */ -#define local_bh_disable() (local_bh_count(smp_processor_id())++) -#define __local_bh_enable() (local_bh_count(smp_processor_id())--) +#define local_bh_disable() do { barrier(); preempt_disable(); local_bh_count(smp_processor_id())++; } while (0) +#define __local_bh_enable() do { local_bh_count(smp_processor_id())--; preempt_enable(); barrier(); } while (0) #define local_bh_enable() \ do { if (!--local_bh_count(smp_processor_id()) && \ softirq_pending(smp_processor_id())) { \ do_softirq(); \ __sti(); \ } \ + preempt_enable(); \ } while (0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0) diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index ce905b46162c..55fef65608c3 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -40,7 +40,7 @@ typedef unsigned char spinlock_t; do { membar("#LoadLoad"); \ } while(*((volatile unsigned char *)lock)) -extern __inline__ void spin_lock(spinlock_t *lock) +extern __inline__ void _raw_spin_lock(spinlock_t *lock) { __asm__ __volatile__( "1: ldstub [%0], %%g7\n" @@ -57,7 +57,7 @@ extern __inline__ void spin_lock(spinlock_t *lock) : "g7", "memory"); } -extern __inline__ int spin_trylock(spinlock_t *lock) +extern __inline__ int _raw_spin_trylock(spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0\n\t" @@ -68,7 +68,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock) return (result == 0); } -extern __inline__ void spin_unlock(spinlock_t *lock) +extern __inline__ void _raw_spin_unlock(spinlock_t *lock) { __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t" "stb %%g0, [%0]" @@ -99,9 +99,9 @@ extern void _do_spin_lock (spinlock_t *lock, char *str); extern void _do_spin_unlock (spinlock_t *lock); extern int _spin_trylock (spinlock_t *lock); -#define spin_trylock(lp) _spin_trylock(lp) -#define spin_lock(lock) _do_spin_lock(lock, "spin_lock") -#define spin_unlock(lock) _do_spin_unlock(lock) +#define _raw_spin_trylock(lp) _spin_trylock(lp) +#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") +#define _raw_spin_unlock(lock) _do_spin_unlock(lock) #endif /* CONFIG_DEBUG_SPINLOCK */ @@ -118,10 +118,10 @@ extern void __read_unlock(rwlock_t *); extern void __write_lock(rwlock_t *); extern void __write_unlock(rwlock_t *); -#define read_lock(p) __read_lock(p) -#define read_unlock(p) __read_unlock(p) -#define write_lock(p) __write_lock(p) -#define write_unlock(p) __write_unlock(p) +#define _raw_read_lock(p) __read_lock(p) +#define _raw_read_unlock(p) __read_unlock(p) +#define _raw_write_lock(p) __write_lock(p) +#define _raw_write_unlock(p) __write_unlock(p) #else /* !(CONFIG_DEBUG_SPINLOCK) */ @@ -138,28 +138,28 @@ extern void _do_read_unlock(rwlock_t *rw, char *str); extern void _do_write_lock(rwlock_t *rw, char *str); extern void _do_write_unlock(rwlock_t *rw); -#define read_lock(lock) \ +#define _raw_read_lock(lock) \ do { unsigned long flags; \ __save_and_cli(flags); \ _do_read_lock(lock, "read_lock"); \ __restore_flags(flags); \ } while(0) -#define read_unlock(lock) \ +#define _raw_read_unlock(lock) \ do { unsigned long flags; \ __save_and_cli(flags); \ _do_read_unlock(lock, "read_unlock"); \ __restore_flags(flags); \ } while(0) -#define write_lock(lock) \ +#define _raw_write_lock(lock) \ do { unsigned long flags; \ __save_and_cli(flags); \ _do_write_lock(lock, "write_lock"); \ __restore_flags(flags); \ } while(0) -#define write_unlock(lock) \ +#define _raw_write_unlock(lock) \ do { unsigned long flags; \ __save_and_cli(flags); \ _do_write_unlock(lock); \ diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index 6fea1ba05405..a1e23aab871e 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -42,8 +42,10 @@ struct thread_info { /* D$ line 2 */ unsigned long fault_address; struct pt_regs *kregs; - unsigned long *utraps; struct exec_domain *exec_domain; + int preempt_count; + + unsigned long *utraps; struct reg_window reg_window[NSWINS]; unsigned long rwbuf_stkptrs[NSWINS]; @@ -76,18 +78,19 @@ struct thread_info { #define TI_KSP 0x00000018 #define TI_FAULT_ADDR 0x00000020 #define TI_KREGS 0x00000028 -#define TI_UTRAPS 0x00000030 -#define TI_EXEC_DOMAIN 0x00000038 -#define TI_REG_WINDOW 0x00000040 -#define TI_RWIN_SPTRS 0x000003c0 -#define TI_GSR 0x000003f8 -#define TI_XFSR 0x00000430 -#define TI_USER_CNTD0 0x00000468 -#define TI_USER_CNTD1 0x00000470 -#define TI_KERN_CNTD0 0x00000478 -#define TI_KERN_CNTD1 0x00000480 -#define TI_PCR 0x00000488 -#define TI_CEE_STUFF 0x00000490 +#define TI_EXEC_DOMAIN 0x00000030 +#define TI_PRE_COUNT 0x00000038 +#define TI_UTRAPS 0x00000040 +#define TI_REG_WINDOW 0x00000048 +#define TI_RWIN_SPTRS 0x000003c8 +#define TI_GSR 0x00000400 +#define TI_XFSR 0x00000438 +#define TI_USER_CNTD0 0x00000470 +#define TI_USER_CNTD1 0x00000478 +#define TI_KERN_CNTD0 0x00000480 +#define TI_KERN_CNTD1 0x00000488 +#define TI_PCR 0x00000490 +#define TI_CEE_STUFF 0x00000498 #define TI_FPREGS 0x000004c0 /* We embed this in the uppermost byte of thread_info->flags */ diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 6ae64b80f557..0e08a2571c3b 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -140,7 +140,7 @@ mov level, %o0; \ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o1; \ - ba,a,pt %xcc, rtrap_clr_l6; + ba,a,pt %xcc, rtrap_irq; #define TICK_SMP_IRQ \ rdpr %pil, %g2; \ @@ -150,7 +150,7 @@ 109: or %g7, %lo(109b), %g7; \ call smp_percpu_timer_interrupt; \ add %sp, STACK_BIAS + REGWIN_SZ, %o0; \ - ba,a,pt %xcc, rtrap_clr_l6; + ba,a,pt %xcc, rtrap_irq; #define TRAP_IVEC TRAP_NOSAVE(do_ivec) -- cgit v1.2.3 From 6421563d92679a65b629714d19c818ec4258668d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 9 Feb 2002 12:58:07 -0800 Subject: Clean up sparc64 build --- arch/sparc64/kernel/etrap.S | 4 ++-- arch/sparc64/kernel/sys_sparc32.c | 1 + arch/sparc64/lib/dec_and_lock.S | 11 +++++++++++ include/asm-sparc64/hardirq.h | 2 ++ include/asm-sparc64/thread_info.h | 1 + 5 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index b576ff91b86c..60a94bd4247d 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -30,12 +30,12 @@ #ifdef CONFIG_PREEMPT etrap_irq: ldsw [%g6 + TI_PRE_COUNT], %g1 add %g1, 1, %g1 - ba,pt etrap_irq2 + ba,pt %xcc, etrap_irq2 stw %g1, [%g6 + TI_PRE_COUNT] #endif etrap: rdpr %pil, %g2 ! Single Group #ifndef CONFIG_PREEMPT -etrap_irq +etrap_irq: #endif etrap_irq2: rdpr %tstate, %g1 ! Single Group sllx %g2, 20, %g3 ! IEU0 Group diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 000d61b22720..d472d4dc31eb 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S index b7c2631dd111..575f25e57883 100644 --- a/arch/sparc64/lib/dec_and_lock.S +++ b/arch/sparc64/lib/dec_and_lock.S @@ -5,6 +5,7 @@ * Copyright (C) 2000 David S. Miller (davem@redhat.com) */ #include +#include #ifndef CONFIG_DEBUG_SPINLOCK .text @@ -40,6 +41,11 @@ out: membar #StoreLoad | #StoreStore retl mov %g1, %o0 +#ifdef CONFIG_PREEMPT + ldsw [%g6 + TI_PRE_COUNT], %g3 + add %g3, 1, %g3 + stw %g3, [%g6 + TI_PRE_COUNT] +#endif to_zero: ldstub [%o1], %g3 brnz,pn %g3, spin_on_lock @@ -55,6 +61,11 @@ loop2: cas [%o0], %g5, %g7 /* ASSERT(g7 == 0) */ nop membar #StoreStore | #LoadStore stb %g0, [%o1] +#ifdef CONFIG_PREEMPT + ldsw [%g6 + TI_PRE_COUNT], %g3 + sub %g3, 1, %g3 + stw %g3, [%g6 + TI_PRE_COUNT] +#endif b,pt %xcc, nzero nop diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index 62bfae557ad0..d55bd01d7363 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -56,6 +56,8 @@ typedef struct { #define synchronize_irq() barrier() +#define release_irqlock(cpu) do { } while (0) + #else /* (CONFIG_SMP) */ static __inline__ int irqs_running(void) diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index a1e23aab871e..30224a298d91 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -27,6 +27,7 @@ #ifndef __ASSEMBLY__ #include +#include struct task_struct; struct exec_domain; -- cgit v1.2.3