From f0265ce31798e01729d432e2ff4ef9962ccd6cd9 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sat, 17 May 2003 12:43:43 -0700 Subject: [SPARC]: Keiths SMP patch #1 --- include/asm-sparc/smp.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index f32a11f8a113..b9959928eabf 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -169,9 +169,6 @@ extern __inline__ int hard_smp_processor_id(void) #endif #define smp_processor_id() hard_smp_processor_id() -/* XXX We really need to implement this now. -DaveM */ -extern __inline__ void smp_send_reschedule(int cpu) { } -extern __inline__ void smp_send_stop(void) { } #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From 841c7879c99dc1f72f619c493adc6707d4b59101 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sat, 17 May 2003 13:12:11 -0700 Subject: [SPARC]: Sanitize BUG(). --- arch/sparc/Kconfig | 7 +++++++ arch/sparc/kernel/sparc_ksyms.c | 5 +++++ arch/sparc/kernel/traps.c | 8 ++++++++ include/asm-sparc/bug.h | 26 ++++++++++---------------- 4 files changed, 30 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 97c736a6c668..c020ac0d729e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -1000,6 +1000,13 @@ config DEBUG_SPINLOCK_SLEEP If you say Y here, various routines which may sleep will become very noisy if they are called with a spinlock held. +config DEBUG_BUGVERBOSE + bool "Verbose BUG() reporting (adds 70K)" + help + Say Y here to make BUG() panics output the file name and line number + of the BUG call as well as the EIP and oops trace. This aids + debugging but costs about 70-100K of memory. + endmenu source "security/Kconfig" diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index ad08eb2eddf9..58a369d72529 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -53,6 +53,7 @@ #endif #include #include +#include extern spinlock_t rtc_lock; @@ -312,5 +313,9 @@ EXPORT_SYMBOL_DOT(umul); EXPORT_SYMBOL_DOT(div); EXPORT_SYMBOL_DOT(udiv); +#ifdef CONFIG_DEBUG_BUGVERBOSE +EXPORT_SYMBOL(do_BUG); +#endif + /* Sun Power Management Idle Handler */ EXPORT_SYMBOL(pm_idle); diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c index 17add64c9af2..f2fb91343b80 100644 --- a/arch/sparc/kernel/traps.c +++ b/arch/sparc/kernel/traps.c @@ -463,6 +463,14 @@ void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc send_sig_info(SIGFPE, &info, current); } +#ifdef CONFIG_DEBUG_BUGVERBOSE +void do_BUG(const char *file, int line) +{ + // bust_spinlocks(1); XXX Not in our original BUG() + printk("kernel BUG at %s:%d!\n", file, line); +} +#endif + /* Since we have our mappings set up, on multiprocessors we can spin them * up here so that timer interrupts work during initialization. */ diff --git a/include/asm-sparc/bug.h b/include/asm-sparc/bug.h index 2100cc50f97c..41dc0abaa624 100644 --- a/include/asm-sparc/bug.h +++ b/include/asm-sparc/bug.h @@ -2,24 +2,18 @@ #ifndef _SPARC_BUG_H #define _SPARC_BUG_H -/* - * XXX I am hitting compiler bugs with __builtin_trap. This has - * hit me before and rusty was blaming his netfilter bugs on - * this so lets disable it. - Anton - */ -#if 0 -/* We need the mb()'s so we don't trigger a compiler bug - Anton */ -#define BUG() do { \ - mb(); \ - __builtin_trap(); \ - mb(); \ -} while(0) -#else -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; \ +#ifdef CONFIG_DEBUG_BUGVERBOSE +extern void do_BUG(const char *file, int line); +#define BUG() do { \ + do_BUG(__FILE__, __LINE__); \ + __builtin_trap(); \ } while (0) +#else +#define BUG() __builtin_trap() #endif -#define PAGE_BUG(page) BUG() +#define PAGE_BUG(page) do { \ + BUG(); \ +} while (0) #endif -- cgit v1.2.3 From 62cbd987dc3054d952dec1d4214ff94e8916e46c Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Mon, 19 May 2003 03:35:30 -0700 Subject: [SPARC]: Switch bitops to unsigned long. --- include/asm-sparc/bitops.h | 48 +++++++++++++++++++++++---------------------- include/asm-sparc/io-unit.h | 2 +- 2 files changed, 26 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index 5b3226834489..4340bddccabb 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h @@ -20,7 +20,7 @@ * within the first byte. Sparc is BIG-Endian. Unless noted otherwise * all bit-ops return 0 if bit was previously clear and != 0 otherwise. */ -static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr) +static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -39,7 +39,7 @@ static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr) return mask != 0; } -static __inline__ void set_bit(unsigned long nr, volatile void *addr) +static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -56,7 +56,7 @@ static __inline__ void set_bit(unsigned long nr, volatile void *addr) : "g3", "g4", "g5", "g7", "cc"); } -static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr) +static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -75,7 +75,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr) return mask != 0; } -static __inline__ void clear_bit(unsigned long nr, volatile void *addr) +static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -92,7 +92,7 @@ static __inline__ void clear_bit(unsigned long nr, volatile void *addr) : "g3", "g4", "g5", "g7", "cc"); } -static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr) +static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -111,7 +111,7 @@ static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr) return mask != 0; } -static __inline__ void change_bit(unsigned long nr, volatile void *addr) +static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -131,7 +131,7 @@ static __inline__ void change_bit(unsigned long nr, volatile void *addr) /* * non-atomic versions */ -static __inline__ void __set_bit(int nr, volatile void *addr) +static __inline__ void __set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -139,7 +139,7 @@ static __inline__ void __set_bit(int nr, volatile void *addr) *p |= mask; } -static __inline__ void __clear_bit(int nr, volatile void *addr) +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -147,7 +147,7 @@ static __inline__ void __clear_bit(int nr, volatile void *addr) *p &= ~mask; } -static __inline__ void __change_bit(int nr, volatile void *addr) +static __inline__ void __change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -155,7 +155,7 @@ static __inline__ void __change_bit(int nr, volatile void *addr) *p ^= mask; } -static __inline__ int __test_and_set_bit(int nr, volatile void *addr) +static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -165,7 +165,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void *addr) return (old & mask) != 0; } -static __inline__ int __test_and_clear_bit(int nr, volatile void *addr) +static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -175,7 +175,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void *addr) return (old & mask) != 0; } -static __inline__ int __test_and_change_bit(int nr, volatile void *addr) +static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = 1UL << (nr & 0x1f); unsigned long *p = ((unsigned long *)addr) + (nr >> 5); @@ -189,9 +189,9 @@ static __inline__ int __test_and_change_bit(int nr, volatile void *addr) #define smp_mb__after_clear_bit() do { } while(0) /* The following routine need not be atomic. */ -static __inline__ int test_bit(int nr, __const__ void *addr) +static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) { - return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0; + return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; } /* The easy/cheese version for now. */ @@ -288,9 +288,10 @@ static __inline__ int ffs(int x) * 'size' bits, starting the search at bit 'offset'. This is largely based * on Linus's ALPHA routines, which are pretty portable BTW. */ -static __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +static __inline__ unsigned long find_next_zero_bit(unsigned long *addr, + unsigned long size, unsigned long offset) { - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long *p = addr + (offset >> 5); unsigned long result = offset & ~31UL; unsigned long tmp; @@ -361,7 +362,7 @@ static __inline__ int find_next_bit(unsigned long *addr, int size, int offset) /* */ -static __inline__ int test_le_bit(int nr, __const__ void * addr) +static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) { __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; return (ADDR[nr >> 3] >> (nr & 7)) & 1; @@ -370,7 +371,7 @@ static __inline__ int test_le_bit(int nr, __const__ void * addr) /* * non-atomic versions */ -static __inline__ void __set_le_bit(int nr, void *addr) +static __inline__ void __set_le_bit(int nr, unsigned long *addr) { unsigned char *ADDR = (unsigned char *)addr; @@ -378,7 +379,7 @@ static __inline__ void __set_le_bit(int nr, void *addr) *ADDR |= 1 << (nr & 0x07); } -static __inline__ void __clear_le_bit(int nr, void *addr) +static __inline__ void __clear_le_bit(int nr, unsigned long *addr) { unsigned char *ADDR = (unsigned char *)addr; @@ -386,7 +387,7 @@ static __inline__ void __clear_le_bit(int nr, void *addr) *ADDR &= ~(1 << (nr & 0x07)); } -static __inline__ int __test_and_set_le_bit(int nr, void *addr) +static __inline__ int __test_and_set_le_bit(int nr, unsigned long *addr) { int mask, retval; unsigned char *ADDR = (unsigned char *)addr; @@ -398,7 +399,7 @@ static __inline__ int __test_and_set_le_bit(int nr, void *addr) return retval; } -static __inline__ int __test_and_clear_le_bit(int nr, void *addr) +static __inline__ int __test_and_clear_le_bit(int nr, unsigned long *addr) { int mask, retval; unsigned char *ADDR = (unsigned char *)addr; @@ -410,9 +411,10 @@ static __inline__ int __test_and_clear_le_bit(int nr, void *addr) return retval; } -static __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset) +static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr, + unsigned long size, unsigned long offset) { - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long *p = addr + (offset >> 5); unsigned long result = offset & ~31UL; unsigned long tmp; diff --git a/include/asm-sparc/io-unit.h b/include/asm-sparc/io-unit.h index bd5fc063444b..96823b47fd45 100644 --- a/include/asm-sparc/io-unit.h +++ b/include/asm-sparc/io-unit.h @@ -41,7 +41,7 @@ #define IOUPTE_PARITY 0x00000001 /* Parity is checked during DVMA */ struct iounit_struct { - unsigned int bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned int)]; + unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)]; spinlock_t lock; iopte_t *page_table; unsigned long rotor[3]; -- cgit v1.2.3