diff options
Diffstat (limited to 'include')
59 files changed, 1974 insertions, 1153 deletions
diff --git a/include/asm-h8300/aki3068net/machine-depend.h b/include/asm-h8300/aki3068net/machine-depend.h new file mode 100644 index 000000000000..e2e5f6a523ac --- /dev/null +++ b/include/asm-h8300/aki3068net/machine-depend.h @@ -0,0 +1,35 @@ +/* AE-3068 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#include <linux/config.h> +#define H8300_TIMER_COUNT_DATA 20000*10/8192 +#define H8300_TIMER_FREQ 20000*1000/8192 +#endif + +/* AE-3068 RTL8019AS Config */ +#ifdef H8300_NE_DEFINE + +#define NE2000_ADDR 0x200000 +#define NE2000_IRQ 5 +#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ) +#define NE2000_BYTE volatile unsigned short + +#define IER 0xfee015 +#define ISR 0xfee016 +#define IRQ_MASK (1 << NE2000_IRQ) + +#define WCRL 0xfee023 +#define MAR0A 0xffff20 +#define ETCR0A 0xffff24 +#define DTCR0A 0xffff27 +#define MAR0B 0xffff28 +#define DTCR0B 0xffff2f + +#define H8300_INIT_NE() \ +do { \ + wordlength = 1; \ + outb_p(0x48, ioaddr + EN0_DCFG); \ +} while(0) + +#endif diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h index 3af502772b6f..e9595c099c58 100644 --- a/include/asm-h8300/atomic.h +++ b/include/asm-h8300/atomic.h @@ -71,56 +71,27 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) return ret == 0; } -#if defined(__H8300H__) static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("stc ccr,r2l\n\t" + __asm__ __volatile__("stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "and.l er1,er0\n\t" + "and.l %1,er0\n\t" "mov.l er0,%0\n\t" - "ldc r2l,ccr" - : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2"); + "ldc r1l,ccr" + : "=m" (*v) : "g" (~(mask)) :"er0","er1"); } static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("stc ccr,r2l\n\t" + __asm__ __volatile__("stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "or.l er1,er0\n\t" + "or.l %1,er0\n\t" "mov.l er0,%0\n\t" - "ldc r2l,ccr" - : "=m" (*v) : "ir" (mask) :"er0","er1","er2"); + "ldc r1l,ccr" + : "=m" (*v) : "g" (mask) :"er0","er1"); } -#endif -#if defined(__H8300S__) -static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc exr,r2l\n\t" - "orc #0x07,exr\n\t" - "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "and.l er1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r2l,exr" - : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2"); -} - -static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc exr,r2l\n\t" - "orc #0x07,exr\n\t" - "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "or.l er1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r2l,exr" - : "=m" (*v) : "ir" (mask) :"er0","er1","er2"); -} -#endif /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h index 703024599da6..87068f245d5c 100644 --- a/include/asm-h8300/bitops.h +++ b/include/asm-h8300/bitops.h @@ -39,16 +39,18 @@ static __inline__ unsigned long ffz(unsigned long word) static __inline__ void set_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bset r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bset r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -/* Bigendian is complexed... */ -#define __set_bit(nr, addr) set_bit(nr, addr) +/* Bigendian is complexed... */ +#define __set_bit(nr, addr) set_bit((nr), (addr)) /* * clear_bit() doesn't provide any barrier for the compiler. @@ -58,261 +60,158 @@ static __inline__ void set_bit(int nr, volatile unsigned long* addr) static __inline__ void clear_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bclr r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bclr r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -#define __clear_bit(nr, addr) clear_bit(nr, addr) +#define __clear_bit(nr, addr) clear_bit((nr), (addr)) static __inline__ void change_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bnot r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bnot r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -#define __change_bit(nr, addr) change_bit(nr, addr) +#define __change_bit(nr, addr) change_bit((nr), (addr)) -#if defined(__H8300H__) -static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr) +static __inline__ int test_bit(int nr, const unsigned long* addr) { - int retval; - unsigned char *a; - a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bset r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; + return ((1UL << (nr & 7)) & + (((const volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)])) != 0; } -#endif -#if defined(__H8300S__) + +#define __test_bit(nr, addr) test_bit(nr, addr) + static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a; - a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bset r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bset r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_set_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bset r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); + "btst r3l,%1\n\t" + "bset r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#if defined(__H8300H__) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bclr r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; -} -#endif -#if defined(__H8300S__) -static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bclr r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bclr r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bclr r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); + "btst r3l,%1\n\t" + "bclr r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#if defined(__H8300H__) static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bnot r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; -} -#endif -#if defined(__H8300S__) -static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bnot r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bnot r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_change_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bnot r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); - return retval; -} - -/* - * This routine doesn't need to be atomic. - */ -static __inline__ int __constant_test_bit(int nr, const volatile unsigned long* addr) -{ - return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int __test_bit(int nr, const unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %1,er0\n\t" - "btst r0l,@%2\n\t" + "btst r3l,%1\n\t" + "bnot r3l,%1\n\t" "beq 1f\n\t" - "sub.l %0,%0\n\t" - "inc.l #1,%0\n" - "bra 2f\n" - "1:\n\t" - "sub.l %0,%0\n" - "2:" - : "=r"(retval) :"g"(nr & 7),"r"(a):"er0"); + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - __constant_test_bit((nr),(addr)) : \ - __test_bit((nr),(addr))) - - #define find_first_zero_bit(addr, size) \ find_next_zero_bit((addr), (size), 0) diff --git a/include/asm-h8300/edosk2674/machine-depend.h b/include/asm-h8300/edosk2674/machine-depend.h new file mode 100644 index 000000000000..1e98b40e5f4e --- /dev/null +++ b/include/asm-h8300/edosk2674/machine-depend.h @@ -0,0 +1,70 @@ +/* EDOSK2674 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 33000*10/8192 +#define H8300_TIMER_FREQ 33000*1000/8192 +#endif + +/* EDOSK-2674R SMSC Network Controler Target Depend impliments */ +#ifdef H8300_SMSC_DEFINE + +#define SMSC_BASE 0xf80000 +#define SMSC_IRQ 16 + +/* sorry quick hack */ +#if defined(outw) +# undef outw +#endif +#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a)) +#if defined(inw) +# undef inw +#endif +#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a)) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l) + +static inline void edosk2674_smsc_outw( + unsigned short d, + volatile unsigned short *a + ) +{ + *a = (d >> 8) | (d << 8); +} + +static inline unsigned short edosk2674_smsc_inw( + volatile unsigned short *a + ) +{ + unsigned short d; + d = *a; + return (d >> 8) | (d << 8); +} + +static inline void edosk2674_smsc_outsw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *a = *p; +} + +static inline void edosk2674_smsc_insw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *p = *a; +} + +#endif diff --git a/include/asm-h8300/generic/machine-depend.h b/include/asm-h8300/generic/machine-depend.h new file mode 100644 index 000000000000..2d78096e54c8 --- /dev/null +++ b/include/asm-h8300/generic/machine-depend.h @@ -0,0 +1,17 @@ +/* machine depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#include <linux/config.h> +#if defined(CONFIG_H83007) || defined(CONFIG_H83068) || defined(CONFIG_H8S2678) +#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8192 +#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8192 +#endif + +#if defined(CONFIG_H8_3002) || defined(CONFIG_H83048) +#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8 +#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8 +#endif + +#endif + diff --git a/include/asm-h8300/h8300_ne.h b/include/asm-h8300/h8300_ne.h index debef6ae7140..c0350b6ea6e3 100644 --- a/include/asm-h8300/h8300_ne.h +++ b/include/asm-h8300/h8300_ne.h @@ -11,9 +11,9 @@ #define h8300ne_h /****************************************************************************/ -/* Such a description is OK ? */ -#define DEPEND_HEADER(target) <asm/target/ne.h> -#include DEPEND_HEADER(TARGET) +#define H8300_NE_DEFINE +#include <asm/machine-depend.h> +#undef H8300_NE_DEFINE /****************************************************************************/ #endif /* h8300ne_h */ diff --git a/include/asm-h8300/h8max/ide.h b/include/asm-h8300/h8max/ide.h deleted file mode 100644 index e7d75ac57974..000000000000 --- a/include/asm-h8300/h8max/ide.h +++ /dev/null @@ -1,60 +0,0 @@ -/* H8MAX IDE I/F Config */ - -#define H8300_IDE_BASE 0x200000 -#define H8300_IDE_CTRL 0x60000c -#define H8300_IDE_IRQ 5 -#define H8300_IDE_REG_OFFSET 2 - -#undef outb -#undef inb -#undef outb_p -#undef inb_p -#undef outsw -#undef insw - -#define outb(d,a) h8max_outb(d,(unsigned short *)a) -#define inb(a) h8max_inb((unsigned char *)a) -#define outb_p(d,a) h8max_outb(d,(unsigned short *)a) -#define inb_p(a) h8max_inb((unsigned char *)a) -#define outsw(addr,buf,len) h8max_outsw(addr,buf,len); -#define insw(addr,buf,len) h8max_insw(addr,buf,len); - -static inline void h8max_outb(unsigned short d,unsigned short *a) -{ - *a = d; -} - -static inline unsigned char h8max_inb(unsigned char *a) -{ - return *(a+1); -} - -static inline void h8max_outsw(void *addr, void *buf, int len) -{ - unsigned volatile short *ap = (unsigned volatile short *)addr; - unsigned short *bp = (unsigned short *)buf; - unsigned short d; - while(len--) { - d = *bp++; - *ap = (d >> 8) | (d << 8); - } -} - -static inline void h8max_insw(void *addr, void *buf, int len) -{ - unsigned volatile short *ap = (unsigned volatile short *)addr; - unsigned short *bp = (unsigned short *)buf; - unsigned short d; - while(len--) { - d = *ap; - *bp++ = (d >> 8) | (d << 8); - } -} - -static inline void target_ide_fix_driveid(struct hd_driveid *id) -{ - int c; - unsigned short *p = (unsigned short *)id; - for (c = 0; c < SECTOR_WORDS; c++, p++) - *p = (*p >> 8) | (*p << 8); -} diff --git a/include/asm-h8300/h8max/machine-depend.h b/include/asm-h8300/h8max/machine-depend.h new file mode 100644 index 000000000000..1a2218f9d3bf --- /dev/null +++ b/include/asm-h8300/h8max/machine-depend.h @@ -0,0 +1,167 @@ +/* H8MAX board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 25000*10/8192 +#define H8300_TIMER_FREQ 25000*1000/8192 +#endif + +/* H8MAX RTL8019AS Config */ +#ifdef H8300_NE_DEFINE + +#define NE2000_ADDR 0x800600 +#define NE2000_IRQ 4 +#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ) +#define NE2000_BYTE volatile unsigned short + +#define IER 0xfee015 +#define ISR 0xfee016 +#define IRQ_MASK (1 << NE2000_IRQ) +/* sorry quick hack */ +#if defined(outb) +# undef outb +#endif +#define outb(d,a) h8max_outb((d),(a) - NE2000_ADDR) +#if defined(inb) +# undef inb +#endif +#define inb(a) h8max_inb((a) - NE2000_ADDR) +#if defined(outb_p) +# undef outb_p +#endif +#define outb_p(d,a) h8max_outb((d),(a) - NE2000_ADDR) +#if defined(inb_p) +# undef inb_p +#endif +#define inb_p(a) h8max_inb((a) - NE2000_ADDR) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) h8max_outsw((a) - NE2000_ADDR,(unsigned short *)p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) h8max_insw((a) - NE2000_ADDR,(unsigned short *)p,l) +#if defined(outsb) +# undef outsb +#endif +#define outsb(a,p,l) h8max_outsb((a) - NE2000_ADDR,(unsigned char *)p,l) +#if defined(insb) +# undef insb +#endif +#define insb(a,p,l) h8max_insb((a) - NE2000_ADDR,(unsigned char *)p,l) + +#define H8300_INIT_NE() \ +do { \ + wordlength = 2; \ + h8max_outb(0x49, ioaddr + EN0_DCFG); \ + SA_prom[14] = SA_prom[15] = 0x57;\ +} while(0) + +static inline void h8max_outb(unsigned char d,unsigned char a) +{ + *(unsigned short *)(NE2000_ADDR + (a << 1)) = d; +} + +static inline unsigned char h8max_inb(unsigned char a) +{ + return *(unsigned char *)(NE2000_ADDR + (a << 1) +1); +} + +static inline void h8max_outsw(unsigned char a,unsigned short *p,unsigned long l) +{ + unsigned short d; + for (; l != 0; --l, p++) { + d = (((*p) >> 8) & 0xff) | ((*p) << 8); + *(unsigned short *)(NE2000_ADDR + (a << 1)) = d; + } +} + +static inline void h8max_insw(unsigned char a,unsigned short *p,unsigned long l) +{ + unsigned short d; + for (; l != 0; --l, p++) { + d = *(unsigned short *)(NE2000_ADDR + (a << 1)); + *p = (d << 8)|((d >> 8) & 0xff); + } +} + +static inline void h8max_outsb(unsigned char a,unsigned char *p,unsigned long l) +{ + for (; l != 0; --l, p++) { + *(unsigned short *)(NE2000_ADDR + (a << 1)) = *p; + } +} + +static inline void h8max_insb(unsigned char a,unsigned char *p,unsigned long l) +{ + for (; l != 0; --l, p++) { + *p = *((unsigned char *)(NE2000_ADDR + (a << 1))+1); + } +} + +#endif + +/* H8MAX IDE I/F Config */ +#ifdef H8300_IDE_DEFINE + +#define H8300_IDE_BASE 0x200000 +#define H8300_IDE_CTRL 0x60000c +#define H8300_IDE_IRQ 5 +#define H8300_IDE_REG_OFFSET 2 + +#undef outb +#undef inb +#undef outb_p +#undef inb_p +#undef outsw +#undef insw + +#define outb(d,a) h8max_outb(d,(unsigned short *)a) +#define inb(a) h8max_inb((unsigned char *)a) +#define outb_p(d,a) h8max_outb(d,(unsigned short *)a) +#define inb_p(a) h8max_inb((unsigned char *)a) +#define outsw(addr,buf,len) h8max_outsw(addr,buf,len); +#define insw(addr,buf,len) h8max_insw(addr,buf,len); + +static inline void h8max_outb(unsigned short d,unsigned short *a) +{ + *a = d; +} + +static inline unsigned char h8max_inb(unsigned char *a) +{ + return *(a+1); +} + +static inline void h8max_outsw(void *addr, void *buf, int len) +{ + unsigned volatile short *ap = (unsigned volatile short *)addr; + unsigned short *bp = (unsigned short *)buf; + unsigned short d; + while(len--) { + d = *bp++; + *ap = (d >> 8) | (d << 8); + } +} + +static inline void h8max_insw(void *addr, void *buf, int len) +{ + unsigned volatile short *ap = (unsigned volatile short *)addr; + unsigned short *bp = (unsigned short *)buf; + unsigned short d; + while(len--) { + d = *ap; + *bp++ = (d >> 8) | (d << 8); + } +} + +static inline void target_ide_fix_driveid(struct hd_driveid *id) +{ + int c; + unsigned short *p = (unsigned short *)id; + for (c = 0; c < SECTOR_WORDS; c++, p++) + *p = (*p >> 8) | (*p << 8); +} + +#endif diff --git a/include/asm-h8300/hardirq.h b/include/asm-h8300/hardirq.h index 20f3571cc299..ccab235b9f83 100644 --- a/include/asm-h8300/hardirq.h +++ b/include/asm-h8300/hardirq.h @@ -75,12 +75,6 @@ typedef struct { #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #ifdef CONFIG_PREEMPT -# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) -#else -# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET -#endif - -#ifdef CONFIG_PREEMPT # define in_atomic() (preempt_count() != kernel_locked()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else diff --git a/include/asm-h8300/ide.h b/include/asm-h8300/ide.h index 3ebf8e262324..3669f106312b 100644 --- a/include/asm-h8300/ide.h +++ b/include/asm-h8300/ide.h @@ -70,9 +70,10 @@ typedef union { * Our list of ports/irq's for different boards. */ -/* Such a description is OK ? */ -#define DEPEND_HEADER(target) <asm/target/ide.h> -#include DEPEND_HEADER(TARGET) +/* machine depend header include */ +#define H8300_IDE_DEFINE +#include <asm/machine-depend.h> +#undef H8300_IDE_DEFINE /****************************************************************************/ diff --git a/include/asm-h8300/io.h b/include/asm-h8300/io.h index 69efa4f2c0de..42f91752b920 100644 --- a/include/asm-h8300/io.h +++ b/include/asm-h8300/io.h @@ -51,21 +51,12 @@ static inline unsigned int _swapl(volatile unsigned long v) #define writew(b,addr) (void)((*(volatile unsigned short *) (addr & 0x00ffffff)) = (b)) #define writel(b,addr) (void)((*(volatile unsigned int *) (addr & 0x00ffffff)) = (b)) -/* - * The following are some defines we need for MTD with our - * COBRA5272 board. - * Because I don't know if they break something I have - * #ifdef'd them. - * (020325 - hede) - */ -#ifdef CONFIG_senTec #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel -#endif /* CONFIG_senTec */ static inline void io_outsb(unsigned int addr, void *buf, int len) { diff --git a/include/asm-h8300/machine-depend.h b/include/asm-h8300/machine-depend.h new file mode 100644 index 000000000000..1e98b40e5f4e --- /dev/null +++ b/include/asm-h8300/machine-depend.h @@ -0,0 +1,70 @@ +/* EDOSK2674 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 33000*10/8192 +#define H8300_TIMER_FREQ 33000*1000/8192 +#endif + +/* EDOSK-2674R SMSC Network Controler Target Depend impliments */ +#ifdef H8300_SMSC_DEFINE + +#define SMSC_BASE 0xf80000 +#define SMSC_IRQ 16 + +/* sorry quick hack */ +#if defined(outw) +# undef outw +#endif +#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a)) +#if defined(inw) +# undef inw +#endif +#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a)) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l) + +static inline void edosk2674_smsc_outw( + unsigned short d, + volatile unsigned short *a + ) +{ + *a = (d >> 8) | (d << 8); +} + +static inline unsigned short edosk2674_smsc_inw( + volatile unsigned short *a + ) +{ + unsigned short d; + d = *a; + return (d >> 8) | (d << 8); +} + +static inline void edosk2674_smsc_outsw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *a = *p; +} + +static inline void edosk2674_smsc_insw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *p = *a; +} + +#endif diff --git a/include/asm-h8300/processor.h b/include/asm-h8300/processor.h index a945b8bede2f..819c9b34e152 100644 --- a/include/asm-h8300/processor.h +++ b/include/asm-h8300/processor.h @@ -70,12 +70,12 @@ struct thread_struct { * pass the data segment into user programs if it exists, * it can't hurt anything as far as I can tell */ -#if defined(__H8300S__) +#if defined(__H8300H__) #define start_thread(_regs, _pc, _usp) \ do { \ set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ - (_regs)->ccr &= ~0x10; /* clear kernel flag */ \ + (_regs)->ccr &= 0x00; /* clear kernel flag */ \ } while(0) #endif #if defined(__H8300S__) @@ -83,7 +83,7 @@ do { \ do { \ set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ - (_regs)->ccr &= ~0x10; /* clear kernel flag */ \ + (_regs)->ccr = 0x00; /* clear kernel flag */ \ (_regs)->exr = 0x78; /* enable all interrupts */ \ /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \ wrusp(((unsigned long)(_usp)) - 14); \ diff --git a/include/asm-h8300/regs267x.h b/include/asm-h8300/regs267x.h new file mode 100644 index 000000000000..1bff731a9f77 --- /dev/null +++ b/include/asm-h8300/regs267x.h @@ -0,0 +1,336 @@ +/* internal Peripherals Register address define */ +/* CPU: H8/306x */ + +#if !defined(__REGS_H8S267x__) +#define __REGS_H8S267x__ + +#if defined(__KERNEL__) + +#define DASTCR 0xFEE01A +#define DADR0 0xFFFFA4 +#define DADR1 0xFFFFA5 +#define DACR01 0xFFFFA6 +#define DADR2 0xFFFFA8 +#define DADR3 0xFFFFA9 +#define DACR23 0xFFFFAA + +#define ADDRA 0xFFFF90 +#define ADDRAH 0xFFFF90 +#define ADDRAL 0xFFFF91 +#define ADDRB 0xFFFF92 +#define ADDRBH 0xFFFF92 +#define ADDRBL 0xFFFF93 +#define ADDRC 0xFFFF94 +#define ADDRCH 0xFFFF94 +#define ADDRCL 0xFFFF95 +#define ADDRD 0xFFFF96 +#define ADDRDH 0xFFFF96 +#define ADDRDL 0xFFFF97 +#define ADDRE 0xFFFF98 +#define ADDREH 0xFFFF98 +#define ADDREL 0xFFFF99 +#define ADDRF 0xFFFF9A +#define ADDRFH 0xFFFF9A +#define ADDRFL 0xFFFF9B +#define ADDRG 0xFFFF9C +#define ADDRGH 0xFFFF9C +#define ADDRGL 0xFFFF9D +#define ADDRH 0xFFFF9E +#define ADDRHH 0xFFFF9E +#define ADDRHL 0xFFFF9F + +#define ADCSR 0xFFFFA0 +#define ADCR 0xFFFFA1 + +#define ABWCR 0xFFFEC0 +#define ASTCR 0xFFFEC1 +#define WTCRAH 0xFFFEC2 +#define WTCRAL 0xFFFEC3 +#define WTCRBH 0xFFFEC4 +#define WTCRBL 0xFFFEC5 +#define RDNCR 0xFFFEC6 +#define CSACRH 0xFFFEC8 +#define CSACRL 0xFFFEC9 +#define BROMCRH 0xFFFECA +#define BROMCRL 0xFFFECB +#define BCR 0xFFFECC +#define DRAMCR 0xFFFED0 +#define DRACCR 0xFFFED2 +#define REFCR 0xFFFED4 +#define RTCNT 0xFFFED6 +#define RTCOR 0xFFFED7 + +#define MAR0AH 0xFFFEE0 +#define MAR0AL 0xFFFEE2 +#define IOAR0A 0xFFFEE4 +#define ETCR0A 0xFFFEE6 +#define MAR0BH 0xFFFEE8 +#define MAR0BL 0xFFFEEA +#define IOAR0B 0xFFFEEC +#define ETCR0B 0xFFFEEE +#define MAR1AH 0xFFFEF0 +#define MAR1AL 0xFFFEF2 +#define IOAR1A 0xFFFEF4 +#define ETCR1A 0xFFFEF6 +#define MAR1BH 0xFFFEF8 +#define MAR1BL 0xFFFEFA +#define IOAR1B 0xFFFEFC +#define ETCR1B 0xFFFEFE +#define DMAWER 0xFFFF20 +#define DMATCR 0xFFFF21 +#define DMACR0A 0xFFFF22 +#define DMACR0B 0xFFFF23 +#define DMACR1A 0xFFFF24 +#define DMACR1B 0xFFFF25 +#define DMABCRH 0xFFFF26 +#define DMABCRL 0xFFFF27 + +#define EDSAR0 0xFFFDC0 +#define EDDAR0 0xFFFDC4 +#define EDTCR0 0xFFFDC8 +#define EDMDR0 0xFFFDCC +#define EDMDR0H 0xFFFDCC +#define EDMDR0L 0xFFFDCD +#define EDACR0 0xFFFDCE +#define EDSAR1 0xFFFDD0 +#define EDDAR1 0xFFFDD4 +#define EDTCR1 0xFFFDD8 +#define EDMDR1 0xFFFDDC +#define EDMDR1H 0xFFFDDC +#define EDMDR1L 0xFFFDDD +#define EDACR1 0xFFFDDE +#define EDSAR2 0xFFFDE0 +#define EDDAR2 0xFFFDE4 +#define EDTCR2 0xFFFDE8 +#define EDMDR2 0xFFFDEC +#define EDMDR2H 0xFFFDEC +#define EDMDR2L 0xFFFDED +#define EDACR2 0xFFFDEE +#define EDSAR3 0xFFFDF0 +#define EDDAR3 0xFFFDF4 +#define EDTCR3 0xFFFDF8 +#define EDMDR3 0xFFFDFC +#define EDMDR3H 0xFFFDFC +#define EDMDR3L 0xFFFDFD +#define EDACR3 0xFFFDFE + +#define IPRA 0xFFFE00 +#define IPRB 0xFFFE02 +#define IPRC 0xFFFE04 +#define IPRD 0xFFFE06 +#define IPRE 0xFFFE08 +#define IPRF 0xFFFE0A +#define IPRG 0xFFFE0C +#define IPRH 0xFFFE0E +#define IPRI 0xFFFE10 +#define IPRJ 0xFFFE12 +#define IPRK 0xFFFE14 +#define ITSR 0xFFFE16 +#define SSIER 0xFFFE18 +#define ISCRH 0xFFFE1A +#define ISCRL 0xFFFE1C + +#define INTCR 0xFFFF31 +#define IER 0xFFFF32 +#define IERH 0xFFFF32 +#define IERL 0xFFFF33 +#define ISR 0xFFFF34 +#define ISRH 0xFFFF34 +#define ISRL 0xFFFF35 + +#define P1DDR 0xFFFE20 +#define P2DDR 0xFFFE21 +#define P3DDR 0xFFFE22 +#define P4DDR 0xFFFE23 +#define P5DDR 0xFFFE24 +#define P6DDR 0xFFFE25 +#define P7DDR 0xFFFE26 +#define P8DDR 0xFFFE27 +#define P9DDR 0xFFFE28 +#define PADDR 0xFFFE29 +#define PBDDR 0xFFFE2A +#define PCDDR 0xFFFE2B +#define PDDDR 0xFFFE2C +#define PEDDR 0xFFFE2D +#define PFDDR 0xFFFE2E +#define PGDDR 0xFFFE2F +#define PHDDR 0xFFFF74 + +#define PFCR0 0xFFFE32 +#define PFCR1 0xFFFE33 +#define PFCR2 0xFFFE34 + +#define PAPCR 0xFFFE36 +#define PBPCR 0xFFFE37 +#define PCPCR 0xFFFE38 +#define PDPCR 0xFFFE39 +#define PEPCR 0xFFFE3A + +#define P3ODR 0xFFFE3C +#define PAODR 0xFFFE3D + +#define P1DR 0xFFFF60 +#define P2DR 0xFFFF61 +#define P3DR 0xFFFF62 +#define P4DR 0xFFFF63 +#define P5DR 0xFFFF64 +#define P6DR 0xFFFF65 +#define P7DR 0xFFFF66 +#define P8DR 0xFFFF67 +#define P9DR 0xFFFF68 +#define PADR 0xFFFF69 +#define PBDR 0xFFFF6A +#define PCDR 0xFFFF6B +#define PDDR 0xFFFF6C +#define PEDR 0xFFFF6D +#define PFDR 0xFFFF6E +#define PGDR 0xFFFF6F +#define PHDR 0xFFFF72 + +#define PORT1 0xFFFF50 +#define PORT2 0xFFFF51 +#define PORT3 0xFFFF52 +#define PORT4 0xFFFF53 +#define PORT5 0xFFFF54 +#define PORT6 0xFFFF55 +#define PORT7 0xFFFF56 +#define PORT8 0xFFFF57 +#define PORT9 0xFFFF58 +#define PORTA 0xFFFF59 +#define PORTB 0xFFFF5A +#define PORTC 0xFFFF5B +#define PORTD 0xFFFF5C +#define PORTE 0xFFFF5D +#define PORTF 0xFFFF5E +#define PORTG 0xFFFF5F +#define PORTH 0xFFFF70 + +#define PCR 0xFFFF46 +#define PMR 0xFFFF47 +#define NDERH 0xFFFF48 +#define NDERL 0xFFFF49 +#define PODRH 0xFFFF4A +#define PODRL 0xFFFF4B +#define NDRH1 0xFFFF4C +#define NDRL1 0xFFFF4D +#define NDRH2 0xFFFF4E +#define NDRL2 0xFFFF4F + +#define SMR0 0xFFFF78 +#define BRR0 0xFFFF79 +#define SCR0 0xFFFF7A +#define TDR0 0xFFFF7B +#define SSR0 0xFFFF7C +#define RDR0 0xFFFF7D +#define SCMR0 0xFFFF7E +#define SMR1 0xFFFF80 +#define BRR1 0xFFFF81 +#define SCR1 0xFFFF82 +#define TDR1 0xFFFF83 +#define SSR1 0xFFFF84 +#define RDR1 0xFFFF85 +#define SCMR1 0xFFFF86 +#define SMR2 0xFFFF88 +#define BRR2 0xFFFF89 +#define SCR2 0xFFFF8A +#define TDR2 0xFFFF8B +#define SSR2 0xFFFF8C +#define RDR2 0xFFFF8D +#define SCMR2 0xFFFF8E + +#define IRCR0 0xFFFE1E +#define SEMR 0xFFFDA8 + +#define MDCR 0xFFFF3E +#define SYSCR 0xFFFF3D +#define MSTPCRH 0xFFFF40 +#define MSTPCRL 0xFFFF41 +#define FLMCR1 0xFFFFC8 +#define FLMCR2 0xFFFFC9 +#define EBR1 0xFFFFCA +#define EBR2 0xFFFFCB +#define CTGARC_RAMCR 0xFFFECE +#define SBYCR 0xFFFF3A +#define SCKCR 0xFFFF3B +#define PLLCR 0xFFFF45 + +#define TSTR 0xFFFFC0 +#define TSNC 0XFFFFC1 + +#define TCR0 0xFFFFD0 +#define TMDR0 0xFFFFD1 +#define TIORH0 0xFFFFD2 +#define TIORL0 0xFFFFD3 +#define TIER0 0xFFFFD4 +#define TSR0 0xFFFFD5 +#define TCNT0 0xFFFFD6 +#define GRA0 0xFFFFD8 +#define GRB0 0xFFFFDA +#define GRC0 0xFFFFDC +#define GRD0 0xFFFFDE +#define TCR1 0xFFFFE0 +#define TMDR1 0xFFFFE1 +#define TIORH1 0xFFFFE2 +#define TIORL1 0xFFFFE3 +#define TIER1 0xFFFFE4 +#define TSR1 0xFFFFE5 +#define TCNT1 0xFFFFE6 +#define GRA1 0xFFFFE8 +#define GRB1 0xFFFFEA +#define TCR2 0xFFFFF0 +#define TMDR2 0xFFFFF1 +#define TIORH2 0xFFFFF2 +#define TIORL2 0xFFFFF3 +#define TIER2 0xFFFFF4 +#define TSR2 0xFFFFF5 +#define TCNT2 0xFFFFF6 +#define GRA2 0xFFFFF8 +#define GRB2 0xFFFFFA +#define TCR3 0xFFFE80 +#define TMDR3 0xFFFE81 +#define TIORH3 0xFFFE82 +#define TIORL3 0xFFFE83 +#define TIER3 0xFFFE84 +#define TSR3 0xFFFE85 +#define TCNT3 0xFFFE86 +#define GRA3 0xFFFE88 +#define GRB3 0xFFFE8A +#define GRC3 0xFFFE8C +#define GRD3 0xFFFE8E +#define TCR4 0xFFFE90 +#define TMDR4 0xFFFE91 +#define TIORH4 0xFFFE92 +#define TIORL4 0xFFFE93 +#define TIER4 0xFFFE94 +#define TSR4 0xFFFE95 +#define TCNT4 0xFFFE96 +#define GRA4 0xFFFE98 +#define GRB4 0xFFFE9A +#define TCR5 0xFFFEA0 +#define TMDR5 0xFFFEA1 +#define TIORH5 0xFFFEA2 +#define TIORL5 0xFFFEA3 +#define TIER5 0xFFFEA4 +#define TSR5 0xFFFEA5 +#define TCNT5 0xFFFEA6 +#define GRA5 0xFFFEA8 +#define GRB5 0xFFFEAA + +#define _8TCR0 0xFFFFB0 +#define _8TCR1 0xFFFFB1 +#define _8TCSR0 0xFFFFB2 +#define _8TCSR1 0xFFFFB3 +#define _8TCORA0 0xFFFFB4 +#define _8TCORA1 0xFFFFB5 +#define _8TCORB0 0xFFFFB6 +#define _8TCORB1 0xFFFFB7 +#define _8TCNT0 0xFFFFB8 +#define _8TCNT1 0xFFFFB9 + +#define TCSR 0xFFFFBC +#define TCNT 0xFFFFBD +#define RSTCSRW 0xFFFFBE +#define RSTCSRR 0xFFFFBF + +#endif /* __KERNEL__ */ +#endif /* __REGS_H8S267x__ */ diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h index 8fdd9e2e8833..962f5eb32d16 100644 --- a/include/asm-h8300/semaphore.h +++ b/include/asm-h8300/semaphore.h @@ -83,7 +83,6 @@ extern spinlock_t semaphore_wake_lock; * "down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/m68k/lib/semaphore.S */ -#if defined(__H8300H__) static inline void down(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -96,9 +95,9 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%1, er1\n\t" + "mov.l %0, er1\n\t" "dec.l #1,er1\n\t" - "mov.l er1,@%1\n\t" + "mov.l er1,%0\n\t" "bpl 1f\n\t" "ldc r3l,ccr\n\t" "jsr @___down\n\t" @@ -106,38 +105,11 @@ static inline void down(struct semaphore * sem) "1:\n\t" "ldc r3l,ccr\n" "2:" - : "=m"(sem->count) - : "g" (count) - : "cc", "er1", "er2", "er3", "er4", "memory"); + : "+m"(*count) + : + : "cc", "er1", "er2", "er3"); } -#endif -#if defined(__H8300S__) -static inline void down(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%1, er1\n\t" - "dec.l #1,er1\n\t" - "mov.l er1,@%1\n\t" - "ldc r3l,exr\n\t" - "bpl 1f\n\t" - "jsr @___down\n" - "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); -} -#endif -#if defined(__H8300H__) static inline int down_interruptible(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -148,56 +120,25 @@ static inline int down_interruptible(struct semaphore * sem) count = &(sem->count); __asm__ __volatile__( - "stc ccr,r3l\n\t" + "stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%2, er2\n\t" + "mov.l %1, er2\n\t" "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" + "mov.l er2,%1\n\t" "bpl 1f\n\t" - "ldc r3l,ccr\n\t" + "ldc r1l,ccr\n\t" "jsr @___down_interruptible\n\t" "bra 2f\n" "1:\n\t" - "ldc r3l,ccr\n\t" - "sub.l %0,%0\n" - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); - return (int)count; -} -#endif -#if defined(__H8300S__) -static inline int down_interruptible(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%2, er2\n\t" - "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" - "ldc r3l,exr\n\t" - "bmi 1f\n\t" + "ldc r1l,ccr\n\t" "sub.l %0,%0\n\t" - "bra 2f\n" - "1:\n\t" - "jsr @___down_interruptible\n" - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + "2:\n\t" + : "=r" (count),"+m" (*count) + : + : "cc", "er1", "er2", "er3"); return (int)count; } -#endif -#if defined(__H8300H__) static inline int down_trylock(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -210,60 +151,26 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%2,er2\n\t" + "mov.l %0,er2\n\t" "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" + "mov.l er2,%0\n\t" "bpl 1f\n\t" "ldc r3l,ccr\n\t" - "jmp @3f\n" - "1:\n\t" - "ldc r3l,ccr\n\t" - "sub.l %0,%0\n" + "jmp @3f\n\t" LOCK_SECTION_START(".align 2\n\t") "3:\n\t" "jsr @___down_trylock\n\t" "jmp @2f\n\t" LOCK_SECTION_END - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er2", "er3", "memory"); - return (int)count; -} -#endif -#if defined(__H8300S__) -static inline int down_trylock(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%2,er2\n\t" - "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" - "ldc r3l,exr\n\t" - "bpl 1f\n\t" - "jmp @3f\n" "1:\n\t" - "sub.l %0,%0\n\t" - LOCK_SECTION_START(".align 2\n\t") - "3:\n\t" - "jsr @___down_trylock\n\t" - "jmp @2f\n\t" - LOCK_SECTION_END - "2:\n\t" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + "ldc r3l,ccr\n\t" + "sub.l %1,%1\n" + "2:" + : "+m" (*count),"=r"(count) + : + : "cc", "er1","er2", "er3"); return (int)count; } -#endif /* * Note! This is subtle. We jump to wake people up only if @@ -271,7 +178,6 @@ static inline int down_trylock(struct semaphore * sem) * The default case (no contention) will result in NO * jumps for both down() and up(). */ -#if defined(__H8300H__) static inline void up(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -284,47 +190,19 @@ static inline void up(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%1,er1\n\t" + "mov.l %0,er1\n\t" "inc.l #1,er1\n\t" - "mov.l er1,@%1\n\t" + "mov.l er1,%0\n\t" "ldc r3l,ccr\n\t" "sub.l er2,er2\n\t" "cmp.l er2,er1\n\t" "bgt 1f\n\t" "jsr @___up\n" "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + : "+m"(*count) + : + : "cc", "er1", "er2", "er3"); } -#endif -#if defined(__H8300S__) -static inline void up(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%1,er1\n\t" - "inc.l #1,er1\n\t" - "mov.l er1,@%1\n\t" - "ldc r3l,exr\n\t" - "sub.l er2,er2\n\t" - "cmp.l er2,er1\n\t" - "bgt 1f\n\t" - "jsr @___up\n" - "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); -} -#endif #endif /* __ASSEMBLY__ */ diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h index 2c187ff8e348..c2d2457b138c 100644 --- a/include/asm-h8300/system.h +++ b/include/asm-h8300/system.h @@ -35,6 +35,7 @@ * * H8/300 Porting 2002/09/04 Yoshinori Sato */ + asmlinkage void resume(void); #define switch_to(prev,next,last) { \ void *_last; \ @@ -52,7 +53,6 @@ asmlinkage void resume(void); (last) = _last; \ } -#if defined(__H8300H__) #define __sti() asm volatile ("andc #0x7f,ccr") #define __cli() asm volatile ("orc #0x80,ccr") @@ -69,25 +69,6 @@ asmlinkage void resume(void); ((flags & 0x80) == 0x80); \ }) -#endif -#if defined(__H8300S__) -#define __sti() asm volatile ("andc #0xf8,exr") -#define __cli() asm volatile ("orc #0x07,exr") - -#define __save_flags(x) \ - asm volatile ("stc exr,r0l\n\tmov.l er0,%0":"=r" (x) : : "er0") - -#define __restore_flags(x) \ - asm volatile ("mov.l %0,er0\n\tldc r0l,exr": :"r" (x) : "er0") -#endif - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - __save_flags(flags); \ - ((flags & 0x07) == 0x07); \ -}) - #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") /* For spinlocks etc */ diff --git a/include/asm-h8300/timex.h b/include/asm-h8300/timex.h index 99a472819dc1..4ea243a11566 100644 --- a/include/asm-h8300/timex.h +++ b/include/asm-h8300/timex.h @@ -6,9 +6,9 @@ #ifndef _ASM_H8300_TIMEX_H #define _ASM_H8300_TIMEX_H -/* Such a description is OK ? */ -#define TIMEX_DEPEND_HEADER(target) <asm/target/timer_rate.h> -#include TIMEX_DEPEND_HEADER(TARGET) +#define H8300_TIMER_DEFINE +#include <asm/machine-depend.h> +#undef H8300_TIMER_DEFINE #define CLOCK_TICK_RATE H8300_TIMER_FREQ #define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */ diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 350048b1f39e..6b56aa3eaa39 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h @@ -106,21 +106,37 @@ :"0"(n_hi), "1"(n_lo)) -#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_LOCAL_APIC) - extern int acpi_lapic; -#else - #define acpi_lapic 0 -#endif +#ifdef CONFIG_ACPI_BOOT +extern int acpi_lapic; +extern int acpi_ioapic; -#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_IO_APIC) - extern int acpi_ioapic; -#else - #define acpi_ioapic 0 -#endif -#ifdef CONFIG_ACPI_BOOT /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ #define FIX_ACPI_PAGES 4 + +#ifdef CONFIG_X86_IO_APIC +extern int skip_ioapic_setup; + +static inline void disable_ioapic_setup(void) +{ + skip_ioapic_setup = 1; +} + +static inline int ioapic_setup_disabled(void) +{ + return skip_ioapic_setup; +} + +#else +static inline void disable_ioapic_setup(void) +{ } + +#endif + +#else /* CONFIG_ACPI_BOOT */ +# define acpi_lapic 0 +# define acpi_ioapic 0 + #endif #ifdef CONFIG_ACPI_SLEEP diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index c21ed08175d5..dab6aa34c6fa 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -86,7 +86,10 @@ extern u8 bios_cpu_apicid[]; static inline int cpu_present_to_apicid(int mps_cpu) { - return (int) bios_cpu_apicid[mps_cpu]; + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index aa7fd107c1c9..b744ac27f6fc 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h @@ -106,8 +106,10 @@ static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; - else + else if (mps_cpu < NR_CPUS) return (int) bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h index 2f9f19237460..98b4e5921aa8 100644 --- a/include/asm-i386/mach-numaq/mach_apic.h +++ b/include/asm-i386/mach-numaq/mach_apic.h @@ -65,9 +65,17 @@ static inline int cpu_to_logical_apicid(int cpu) return (int)cpu_2_logical_apicid[cpu]; } +/* + * Supporting over 60 cpus on NUMA-Q requires a locality-dependent + * cpu to APIC ID relation to properly interact with the intelligent + * mode of the cluster controller. + */ static inline int cpu_present_to_apicid(int mps_cpu) { - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + if (mps_cpu < 60) + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + else + return BAD_APICID; } static inline int generate_logical_apicid(int quad, int phys_apicid) diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index f79d5df55e1a..73a4a1077e85 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -87,7 +87,10 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { - return (int) bios_cpu_apicid[mps_cpu]; + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index b596438496a1..a4ee37cade68 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h @@ -37,8 +37,14 @@ extern void mp_register_lapic_address (u64 address); extern void mp_register_ioapic (u8 id, u32 address, u32 irq_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 global_irq); extern void mp_config_acpi_legacy_irqs (void); -extern void mp_config_ioapic_for_sci(int irq); extern void mp_parse_prt (void); + +#ifdef CONFIG_X86_IO_APIC +extern void mp_config_ioapic_for_sci(int irq); +#else +static inline void mp_config_ioapic_for_sci(int irq) +{ } +#endif #endif /*CONFIG_ACPI_BOOT*/ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h index f1114b6b5275..0d22ec30019b 100644 --- a/include/asm-i386/suspend.h +++ b/include/asm-i386/suspend.h @@ -38,7 +38,7 @@ struct saved_context { extern void save_processor_state(void); extern void restore_processor_state(void); -extern void do_magic(int resume); +extern int do_magic(int resume); #ifdef CONFIG_ACPI_SLEEP extern unsigned long saved_eip; diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 5b88749e54b2..f2e179d4bb76 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index af15c6694522..502f51a1a0ee 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -292,7 +292,7 @@ ffz (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1))); + result = ia64_popcnt(x & (~x - 1)); return result; } @@ -307,7 +307,7 @@ __ffs (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x)); + result = ia64_popcnt((x-1) & ~x); return result; } @@ -323,7 +323,7 @@ ia64_fls (unsigned long x) long double d = x; long exp; - __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d)); + exp = ia64_getf_exp(d); return exp - 0xffff; } @@ -349,7 +349,7 @@ static __inline__ unsigned long hweight64 (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x)); + result = ia64_popcnt(x); return result; } diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h index a4e3abfc3477..434686fccb95 100644 --- a/include/asm-ia64/byteorder.h +++ b/include/asm-ia64/byteorder.h @@ -7,13 +7,14 @@ */ #include <asm/types.h> +#include <asm/intrinsics.h> static __inline__ __const__ __u64 __ia64_swab64 (__u64 x) { __u64 result; - __asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x)); + result = ia64_mux1(x, ia64_mux1_rev); return result; } diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h index 73a5edf825b8..8e316f179815 100644 --- a/include/asm-ia64/current.h +++ b/include/asm-ia64/current.h @@ -6,8 +6,12 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ -/* In kernel mode, thread pointer (r13) is used to point to the - current task structure. */ -register struct task_struct *current asm ("r13"); +#include <asm/intrinsics.h> + +/* + * In kernel mode, thread pointer (r13) is used to point to the current task + * structure. + */ +#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) #endif /* _ASM_IA64_CURRENT_H */ diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h index da812415f634..74c542acc1e8 100644 --- a/include/asm-ia64/delay.h +++ b/include/asm-ia64/delay.h @@ -5,7 +5,7 @@ * Delay routines using a pre-computed "cycles/usec" value. * * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> @@ -17,12 +17,14 @@ #include <linux/sched.h> #include <linux/compiler.h> +#include <asm/intrinsics.h> #include <asm/processor.h> static __inline__ void ia64_set_itm (unsigned long val) { - __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_CR_ITM, val); + ia64_srlz_d(); } static __inline__ unsigned long @@ -30,20 +32,23 @@ ia64_get_itm (void) { unsigned long result; - __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory"); + result = ia64_getreg(_IA64_REG_CR_ITM); + ia64_srlz_d(); return result; } static __inline__ void ia64_set_itv (unsigned long val) { - __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_CR_ITV, val); + ia64_srlz_d(); } static __inline__ void ia64_set_itc (unsigned long val) { - __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_AR_ITC, val); + ia64_srlz_d(); } static __inline__ unsigned long @@ -51,10 +56,13 @@ ia64_get_itc (void) { unsigned long result; - __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); #ifdef CONFIG_ITANIUM - while (unlikely((__s32) result == -1)) - __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); + while (unlikely((__s32) result == -1)) { + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); + } #endif return result; } @@ -62,15 +70,11 @@ ia64_get_itc (void) static __inline__ void __delay (unsigned long loops) { - unsigned long saved_ar_lc; - if (loops < 1) return; - __asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc)); - __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1)); - __asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;"); - __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc)); + while (loops--) + ia64_nop(0); } static __inline__ void diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h new file mode 100644 index 000000000000..5175f0345555 --- /dev/null +++ b/include/asm-ia64/gcc_intrin.h @@ -0,0 +1,584 @@ +#ifndef _ASM_IA64_GCC_INTRIN_H +#define _ASM_IA64_GCC_INTRIN_H +/* + * + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> + * + */ + +/* define this macro to get some asm stmts included in 'c' files */ +#define ASM_SUPPORTED + +/* Optimization barrier */ +/* The "volatile" is due to gcc bugs */ +#define ia64_barrier() asm volatile ("":::"memory") + +#define ia64_stop() asm volatile (";;"::) + +#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum)) + +#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) + +extern void ia64_bad_param_for_setreg (void); +extern void ia64_bad_param_for_getreg (void); + +#define ia64_setreg(regnum, val) \ +({ \ + switch (regnum) { \ + case _IA64_REG_PSR_L: \ + asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov ar%0=%1" :: \ + "i" (regnum - _IA64_REG_AR_KR0), \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov cr%0=%1" :: \ + "i" (regnum - _IA64_REG_CR_DCR), \ + "r"(val): "memory" ); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov r12=%0" :: \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_GP: \ + asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \ + break; \ + default: \ + ia64_bad_param_for_setreg(); \ + break; \ + } \ +}) + +#define ia64_getreg(regnum) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_TP: /* for current() */ \ + { \ + register __u64 ia64_r13 asm ("r13"); \ + ia64_intri_res = ia64_r13; \ + } \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define ia64_hint_pause 0 + +#define ia64_hint(mode) \ +({ \ + switch (mode) { \ + case ia64_hint_pause: \ + asm volatile ("hint @pause" ::: "memory"); \ + break; \ + } \ +}) + + +/* Integer values for mux1 instruction */ +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1(x, mode) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (mode) { \ + case ia64_mux1_brcst: \ + asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_mix: \ + asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_shuf: \ + asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_alt: \ + asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_rev: \ + asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define ia64_popcnt(x) \ +({ \ + __u64 ia64_intri_res; \ + asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_getf_exp(x) \ +({ \ + long ia64_intri_res; \ + \ + asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_shrp(a, b, count) \ +({ \ + __u64 ia64_intri_res; \ + asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \ + ia64_intri_res; \ +}) + +#define ia64_ldfs(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfd(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfe(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf8(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf_fill(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_stfs(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stfd(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stfe(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf8(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf_spill(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_fetchadd4_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd4_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_xchg1(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg2(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg4(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg8(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_mf() asm volatile ("mf" ::: "memory") +#define ia64_mfa() asm volatile ("mf.a" ::: "memory") + +#define ia64_invala() asm volatile ("invala" ::: "memory") + +#define ia64_thash(addr) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ +}) + +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") + +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); + +#define ia64_nop(x) asm volatile ("nop %0"::"i"(x)); + +#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") + +#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory") + + +#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + +#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + +#define ia64_tpa(addr) \ +({ \ + __u64 ia64_pa; \ + asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ + ia64_pa; \ +}) + +#define __ia64_set_dbr(index, val) \ + asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_ibr(index, val) \ + asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pkr(index, val) \ + asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pmc(index, val) \ + asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pmd(index, val) \ + asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_rr(index, val) \ + asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); + +#define ia64_get_cpuid(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ + ia64_intri_res; \ +}) + +#define __ia64_get_dbr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_ibr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_pkr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_pmc(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + + +#define ia64_get_pmd(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_rr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ + ia64_intri_res; \ +}) + +#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") + + +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") + +#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") +#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") +#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") +#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") + +#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) + +#define ia64_ptcga(addr, size) \ + asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory") + +#define ia64_ptcl(addr, size) \ + asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory") + +#define ia64_ptri(addr, size) \ + asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") + +#define ia64_ptrd(addr, size) \ + asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") + +/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_fault(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.fault [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_fault_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ + break; \ + } \ +}) + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + asm volatile (" cmp.ne p6,p7=%0,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "(p6) srlz.d" \ + :: "r"((x)) : "p6", "p7", "memory"); \ +} while (0) + +#endif /* _ASM_IA64_GCC_INTRIN_H */ diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h new file mode 100644 index 000000000000..1757f1c11ad4 --- /dev/null +++ b/include/asm-ia64/ia64regs.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2002,2003 Intel Corp. + * Jun Nakajima <jun.nakajima@intel.com> + * Suresh Siddha <suresh.b.siddha@intel.com> + */ + +#ifndef _ASM_IA64_IA64REGS_H +#define _ASM_IA64_IA64REGS_H + +/* + * Register Names for getreg() and setreg(). + * + * The "magic" numbers happen to match the values used by the Intel compiler's + * getreg()/setreg() intrinsics. + */ + +/* Special Registers */ + +#define _IA64_REG_IP 1016 /* getreg only */ +#define _IA64_REG_PSR 1019 +#define _IA64_REG_PSR_L 1019 + +/* General Integer Registers */ + +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ + +/* Application Registers */ + +#define _IA64_REG_AR_KR0 3072 +#define _IA64_REG_AR_KR1 3073 +#define _IA64_REG_AR_KR2 3074 +#define _IA64_REG_AR_KR3 3075 +#define _IA64_REG_AR_KR4 3076 +#define _IA64_REG_AR_KR5 3077 +#define _IA64_REG_AR_KR6 3078 +#define _IA64_REG_AR_KR7 3079 +#define _IA64_REG_AR_RSC 3088 +#define _IA64_REG_AR_BSP 3089 +#define _IA64_REG_AR_BSPSTORE 3090 +#define _IA64_REG_AR_RNAT 3091 +#define _IA64_REG_AR_FCR 3093 +#define _IA64_REG_AR_EFLAG 3096 +#define _IA64_REG_AR_CSD 3097 +#define _IA64_REG_AR_SSD 3098 +#define _IA64_REG_AR_CFLAG 3099 +#define _IA64_REG_AR_FSR 3100 +#define _IA64_REG_AR_FIR 3101 +#define _IA64_REG_AR_FDR 3102 +#define _IA64_REG_AR_CCV 3104 +#define _IA64_REG_AR_UNAT 3108 +#define _IA64_REG_AR_FPSR 3112 +#define _IA64_REG_AR_ITC 3116 +#define _IA64_REG_AR_PFS 3136 +#define _IA64_REG_AR_LC 3137 +#define _IA64_REG_AR_EC 3138 + +/* Control Registers */ + +#define _IA64_REG_CR_DCR 4096 +#define _IA64_REG_CR_ITM 4097 +#define _IA64_REG_CR_IVA 4098 +#define _IA64_REG_CR_PTA 4104 +#define _IA64_REG_CR_IPSR 4112 +#define _IA64_REG_CR_ISR 4113 +#define _IA64_REG_CR_IIP 4115 +#define _IA64_REG_CR_IFA 4116 +#define _IA64_REG_CR_ITIR 4117 +#define _IA64_REG_CR_IIPA 4118 +#define _IA64_REG_CR_IFS 4119 +#define _IA64_REG_CR_IIM 4120 +#define _IA64_REG_CR_IHA 4121 +#define _IA64_REG_CR_LID 4160 +#define _IA64_REG_CR_IVR 4161 /* getreg only */ +#define _IA64_REG_CR_TPR 4162 +#define _IA64_REG_CR_EOI 4163 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */ +#define _IA64_REG_CR_IRR1 4165 /* getreg only */ +#define _IA64_REG_CR_IRR2 4166 /* getreg only */ +#define _IA64_REG_CR_IRR3 4167 /* getreg only */ +#define _IA64_REG_CR_ITV 4168 +#define _IA64_REG_CR_PMV 4169 +#define _IA64_REG_CR_CMCV 4170 +#define _IA64_REG_CR_LRR0 4176 +#define _IA64_REG_CR_LRR1 4177 + +/* Indirect Registers for getindreg() and setindreg() */ + +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ +#define _IA64_REG_INDR_DBR 9001 +#define _IA64_REG_INDR_IBR 9002 +#define _IA64_REG_INDR_PKR 9003 +#define _IA64_REG_INDR_PMC 9004 +#define _IA64_REG_INDR_PMD 9005 +#define _IA64_REG_INDR_RR 9006 + +#endif /* _ASM_IA64_IA64REGS_H */ diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index 19408747bd17..743049ca0851 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -8,8 +8,17 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +#ifndef __ASSEMBLY__ #include <linux/config.h> +/* include compiler specific intrinsics */ +#include <asm/ia64regs.h> +#ifdef __INTEL_COMPILER +# include <asm/intel_intrin.h> +#else +# include <asm/gcc_intrin.h> +#endif + /* * Force an unresolved reference if someone tries to use * ia64_fetch_and_add() with a bad value. @@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ({ \ switch (sz) { \ case 4: \ - __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \ - : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ + tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ break; \ \ case 8: \ - __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \ - : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ + tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ break; \ \ default: \ @@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); (__typeof__(*(v))) (_tmp); /* return old value */ \ }) -#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */ +#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer (void); - -static __inline__ unsigned long -__xchg (unsigned long x, volatile void *ptr, int size) -{ - unsigned long result; - - switch (size) { - case 1: - __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 2: - __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 4: - __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 8: - __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - } - __xchg_called_with_bad_pointer(); - return x; -} +extern void ia64_xchg_called_with_bad_pointer (void); + +#define __xchg(x,ptr,size) \ +({ \ + unsigned long __xchg_result; \ + \ + switch (size) { \ + case 1: \ + __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ + break; \ + \ + case 2: \ + __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ + break; \ + \ + case 4: \ + __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ + break; \ + \ + case 8: \ + __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ + break; \ + default: \ + ia64_xchg_called_with_bad_pointer(); \ + } \ + __xchg_result; \ +}) #define xchg(ptr,x) \ ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) @@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size) * This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ -extern long __cmpxchg_called_with_bad_pointer(void); +extern long ia64_cmpxchg_called_with_bad_pointer (void); #define ia64_cmpxchg(sem,ptr,old,new,size) \ ({ \ - __typeof__(ptr) _p_ = (ptr); \ - __typeof__(new) _n_ = (new); \ __u64 _o_, _r_; \ \ switch (size) { \ @@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void); case 8: _o_ = (__u64) (long) (old); break; \ default: break; \ } \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \ switch (size) { \ case 1: \ - __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ break; \ \ case 2: \ - __asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ break; \ \ case 4: \ - __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ break; \ \ case 8: \ - __asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ break; \ \ default: \ - _r_ = __cmpxchg_called_with_bad_pointer(); \ + _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ break; \ } \ (__typeof__(old)) _r_; \ }) -#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr))) -#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr))) +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) /* for compatibility with other platforms: */ #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n) @@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void); if (_cmpxchg_bugcheck_count-- <= 0) { \ void *ip; \ extern int printk(const char *fmt, ...); \ - asm ("mov %0=ip" : "=r"(ip)); \ + ip = ia64_getreg(_IA64_REG_IP); \ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ break; \ } \ @@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void); # define CMPXCHG_BUGCHECK(v) #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ +#endif #endif /* _ASM_IA64_INTRINSICS_H */ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index 1297c6bba42b..297efb06c347 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -52,6 +52,7 @@ extern unsigned int num_io_spaces; # ifdef __KERNEL__ +#include <asm/intrinsics.h> #include <asm/machvec.h> #include <asm/page.h> #include <asm/system.h> @@ -85,7 +86,7 @@ phys_to_virt (unsigned long address) * Memory fence w/accept. This should never be used in code that is * not IA-64 specific. */ -#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") +#define __ia64_mf_a() ia64_mfa() static inline const unsigned long __ia64_get_io_port_base (void) diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index a277c8ff9595..471a2c91cd29 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -155,7 +155,7 @@ struct ia64_machine_vector { ia64_mv_readw_t *readw; ia64_mv_readl_t *readl; ia64_mv_readq_t *readq; -}; +} __attribute__((__aligned__(16))); /* align attrib? see above comment */ #define MACHVEC_INIT(name) \ { \ diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 95e786212982..0255260f61bc 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -158,9 +158,7 @@ reload_context (mm_context_t context) ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x6000000000000000, rr3); ia64_set_rr(0x8000000000000000, rr4); - ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ - ia64_insn_group_barrier(); } static inline void diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 44b3f419c854..56f5c49a4e95 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -9,6 +9,7 @@ #include <linux/config.h> +#include <asm/intrinsics.h> #include <asm/types.h> /* @@ -143,7 +144,7 @@ get_order (unsigned long size) double d = size - 1; long order; - __asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d)); + order = ia64_getf_exp(d); order = order - PAGE_SHIFT - 0xffff + 1; if (order < 0) order = 0; diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 5640226e8a15..e3152bc4fb39 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector /* Initialize the processor controlled caches */ static inline s64 -ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict) +ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict); + PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest); return iprv.status; } diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index 26afeeb46ea6..b8e81aa3bffa 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -70,64 +70,70 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type * Request structure used to define a context */ typedef struct { - pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ - unsigned long ctx_flags; /* noblock/block */ - unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */ - int ctx_fd; /* return arg: unique identification for context */ - void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ - unsigned long ctx_reserved[11]; /* for future use */ + pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ + unsigned long ctx_flags; /* noblock/block */ + unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */ + unsigned short ctx_reserved1; /* for future use */ + int ctx_fd; /* return arg: unique identification for context */ + void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ + unsigned long ctx_reserved2[11];/* for future use */ } pfarg_context_t; /* * Request structure used to write/read a PMC or PMD */ typedef struct { - unsigned int reg_num; /* which register */ - unsigned int reg_set; /* event set for this register */ + unsigned int reg_num; /* which register */ + unsigned short reg_set; /* event set for this register */ + unsigned short reg_reserved1; /* for future use */ - unsigned long reg_value; /* initial pmc/pmd value */ - unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ + unsigned long reg_value; /* initial pmc/pmd value */ + unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ - unsigned long reg_long_reset; /* reset after buffer overflow notification */ - unsigned long reg_short_reset; /* reset after counter overflow */ + unsigned long reg_long_reset; /* reset after buffer overflow notification */ + unsigned long reg_short_reset; /* reset after counter overflow */ - unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ - unsigned long reg_random_seed; /* seed value when randomization is used */ - unsigned long reg_random_mask; /* bitmask used to limit random value */ - unsigned long reg_last_reset_val;/* return: PMD last reset value */ + unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ + unsigned long reg_random_seed; /* seed value when randomization is used */ + unsigned long reg_random_mask; /* bitmask used to limit random value */ + unsigned long reg_last_reset_val;/* return: PMD last reset value */ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */ - unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ + unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ - unsigned long reserved[3]; /* for future use */ + unsigned long reg_reserved2[3]; /* for future use */ } pfarg_reg_t; typedef struct { - unsigned int dbreg_num; /* which debug register */ - unsigned int dbreg_set; /* event set for this register */ - unsigned long dbreg_value; /* value for debug register */ - unsigned long dbreg_flags; /* return: dbreg error */ - unsigned long dbreg_reserved[1]; /* for future use */ + unsigned int dbreg_num; /* which debug register */ + unsigned short dbreg_set; /* event set for this register */ + unsigned short dbreg_reserved1; /* for future use */ + unsigned long dbreg_value; /* value for debug register */ + unsigned long dbreg_flags; /* return: dbreg error */ + unsigned long dbreg_reserved2[1]; /* for future use */ } pfarg_dbreg_t; typedef struct { unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */ - unsigned int ft_reserved; /* reserved for future use */ - unsigned long reserved[4]; /* for future use */ + unsigned int ft_reserved; /* reserved for future use */ + unsigned long reserved[4]; /* for future use */ } pfarg_features_t; typedef struct { - pid_t load_pid; /* process to load the context into */ - unsigned int load_set; /* first event set to load */ - unsigned long load_reserved[2]; /* for future use */ + pid_t load_pid; /* process to load the context into */ + unsigned short load_set; /* first event set to load */ + unsigned short load_reserved1; /* for future use */ + unsigned long load_reserved2[3]; /* for future use */ } pfarg_load_t; typedef struct { int msg_type; /* generic message header */ int msg_ctx_fd; /* generic message header */ - unsigned long msg_tstamp; /* for perf tuning */ - unsigned int msg_active_set; /* active set at the time of overflow */ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */ + unsigned short msg_active_set; /* active set at the time of overflow */ + unsigned short msg_reserved1; /* for future use */ + unsigned int msg_reserved2; /* for future use */ + unsigned long msg_tstamp; /* for perf tuning/debug */ } pfm_ovfl_msg_t; typedef struct { @@ -192,25 +198,28 @@ extern void pfm_handle_work(void); #define PFM_PMD_LONG_RESET 1 #define PFM_PMD_SHORT_RESET 2 -typedef struct { - unsigned int notify_user:1; /* notify user program of overflow */ - unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */ - unsigned int block:1; /* block monitored task on kernel exit */ - unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */ - unsigned int reserved:26; /* for future use */ +typedef union { + unsigned int val; + struct { + unsigned int notify_user:1; /* notify user program of overflow */ + unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */ + unsigned int block_task:1; /* block monitored task on kernel exit */ + unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */ + unsigned int reserved:28; /* for future use */ + } bits; } pfm_ovfl_ctrl_t; typedef struct { - unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */ - unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */ - unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */ - unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */ - unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */ - unsigned int active_set; /* event set active at the time of the overflow */ - unsigned int reserved1; - unsigned long smpl_pmds[4]; - unsigned long smpl_pmds_values[PMU_MAX_PMDS]; - pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ + unsigned char ovfl_pmd; /* index of overflowed PMD */ + unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */ + unsigned short active_set; /* event set active at the time of the overflow */ + pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ + + unsigned long pmd_last_reset; /* last reset value of of the PMD */ + unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */ + unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */ + unsigned long pmd_value; /* current 64-bit value of the PMD */ + unsigned long pmd_eventid; /* eventid associated with PMD */ } pfm_ovfl_arg_t; @@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t { int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg); int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size); int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg); - int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs); + int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp); int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs); diff --git a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h index 77709625f96f..1c63c7cf7f49 100644 --- a/include/asm-ia64/perfmon_default_smpl.h +++ b/include/asm-ia64/perfmon_default_smpl.h @@ -16,7 +16,9 @@ */ typedef struct { unsigned long buf_size; /* size of the buffer in bytes */ - unsigned long reserved[3]; /* for future use */ + unsigned int flags; /* buffer specific flags */ + unsigned int res1; /* for future use */ + unsigned long reserved[2]; /* for future use */ } pfm_default_smpl_arg_t; /* @@ -46,28 +48,27 @@ typedef struct { /* * Entry header in the sampling buffer. The header is directly followed - * with the PMDs saved in increasing index order: PMD4, PMD5, .... How - * many PMDs are present depends on how the session was programmed. + * with the values of the PMD registers of interest saved in increasing + * index order: PMD4, PMD5, and so on. How many PMDs are present depends + * on how the session was programmed. * - * XXX: in this version of the entry, only up to 64 registers can be - * recorded. This should be enough for quite some time. Always check - * sampling format before parsing entries! + * In the case where multiple counters overflow at the same time, multiple + * entries are written consecutively. * - * In the case where multiple counters overflow at the same time, the - * last_reset_value member indicates the initial value of the - * overflowed PMD with the smallest index. For instance, if PMD2 and - * PMD5 have overflowed, the last_reset_value member contains the - * initial value of PMD2. + * last_reset_value member indicates the initial value of the overflowed PMD. */ typedef struct { - int pid; /* current process at PMU interrupt point */ - int cpu; /* cpu on which the overfow occured */ - unsigned long last_reset_val; /* initial value of 1st overflowed PMD */ - unsigned long ip; /* where did the overflow interrupt happened */ - unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */ - unsigned long tstamp; /* ar.itc on the CPU that took the overflow */ - unsigned int set; /* event set active when overflow ocurred */ - unsigned int reserved1; /* for future use */ + int pid; /* active process at PMU interrupt point */ + unsigned char reserved1[3]; /* reserved for future use */ + unsigned char ovfl_pmd; /* index of overflowed PMD */ + + unsigned long last_reset_val; /* initial value of overflowed PMD */ + unsigned long ip; /* where did the overflow interrupt happened */ + unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ + + unsigned short cpu; /* cpu on which the overfow occured */ + unsigned short set; /* event set active when overflow ocurred */ + unsigned int reserved2; /* for future use */ } pfm_default_smpl_entry_t; #define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 669e44bf8012..c6b4af2b3643 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -15,8 +15,9 @@ #include <linux/config.h> -#include <asm/ptrace.h> +#include <asm/intrinsics.h> #include <asm/kregs.h> +#include <asm/ptrace.h> #include <asm/ustack.h> #define IA64_NUM_DBG_REGS 8 @@ -356,38 +357,41 @@ extern unsigned long get_wchan (struct task_struct *p); /* Return stack pointer of blocked task TSK. */ #define KSTK_ESP(tsk) ((tsk)->thread.ksp) -static inline unsigned long -ia64_get_kr (unsigned long regnum) -{ - unsigned long r = 0; - - switch (regnum) { - case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break; - case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break; - case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break; - case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break; - case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break; - case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break; - case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break; - case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break; - } - return r; -} +extern void ia64_getreg_unknown_kr (void); +extern void ia64_setreg_unknown_kr (void); + +#define ia64_get_kr(regnum) \ +({ \ + unsigned long r = 0; \ + \ + switch (regnum) { \ + case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ + case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ + case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ + case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ + case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ + case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ + case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ + case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ + default: ia64_getreg_unknown_kr(); break; \ + } \ + r; \ +}) -static inline void -ia64_set_kr (unsigned long regnum, unsigned long r) -{ - switch (regnum) { - case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break; - case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break; - case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break; - case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break; - case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break; - case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break; - case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break; - case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break; - } -} +#define ia64_set_kr(regnum, r) \ +({ \ + switch (regnum) { \ + case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ + case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ + case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ + case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ + case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ + case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ + case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ + case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ + default: ia64_setreg_unknown_kr(); break; \ + } \ +}) /* * The following three macros can't be inline functions because we don't have struct @@ -423,8 +427,8 @@ extern void ia32_save_state (struct task_struct *task); extern void ia32_load_state (struct task_struct *task); #endif -#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory"); -#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory"); +#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) +#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) /* load fp 0.0 into fph */ static inline void @@ -450,78 +454,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) { ia64_fph_disable(); } -static inline void -ia64_fc (void *addr) -{ - asm volatile ("fc %0" :: "r"(addr) : "memory"); -} - -static inline void -ia64_sync_i (void) -{ - asm volatile (";; sync.i" ::: "memory"); -} - -static inline void -ia64_srlz_i (void) -{ - asm volatile (";; srlz.i ;;" ::: "memory"); -} - -static inline void -ia64_srlz_d (void) -{ - asm volatile (";; srlz.d" ::: "memory"); -} - -static inline __u64 -ia64_get_rr (__u64 reg_bits) -{ - __u64 r; - asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory"); - return r; -} - -static inline void -ia64_set_rr (__u64 reg_bits, __u64 rr_val) -{ - asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory"); -} - -static inline __u64 -ia64_get_dcr (void) -{ - __u64 r; - asm volatile ("mov %0=cr.dcr" : "=r"(r)); - return r; -} - -static inline void -ia64_set_dcr (__u64 val) -{ - asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_lid (void) -{ - __u64 r; - asm volatile ("mov %0=cr.lid" : "=r"(r)); - return r; -} - -static inline void -ia64_invala (void) -{ - asm volatile ("invala" ::: "memory"); -} - static inline __u64 ia64_clear_ic (void) { __u64 psr; - asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory"); + psr = ia64_getreg(_IA64_REG_PSR); + ia64_stop(); + ia64_rsm(IA64_PSR_I | IA64_PSR_IC); + ia64_srlz_i(); return psr; } @@ -531,7 +471,9 @@ ia64_clear_ic (void) static inline void ia64_set_psr (__u64 psr) { - asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory"); + ia64_stop(); + ia64_setreg(_IA64_REG_PSR_L, psr); + ia64_srlz_d(); } /* @@ -543,14 +485,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num, __u64 vmaddr, __u64 pte, __u64 log_page_size) { - asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); - asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); if (target_mask & 0x1) - asm volatile ("itr.i itr[%0]=%1" - :: "r"(tr_num), "r"(pte) : "memory"); + ia64_itri(tr_num, pte); if (target_mask & 0x2) - asm volatile (";;itr.d dtr[%0]=%1" - :: "r"(tr_num), "r"(pte) : "memory"); + ia64_itrd(tr_num, pte); } /* @@ -561,13 +502,14 @@ static inline void ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 log_page_size) { - asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); - asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); /* as per EAS2.6, itc must be the last instruction in an instruction group */ if (target_mask & 0x1) - asm volatile ("itc.i %0;;" :: "r"(pte) : "memory"); + ia64_itci(pte); if (target_mask & 0x2) - asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory"); + ia64_itcd(pte); } /* @@ -578,16 +520,17 @@ static inline void ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) { if (target_mask & 0x1) - asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); + ia64_ptri(vmaddr, (log_size << 2)); if (target_mask & 0x2) - asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); + ia64_ptrd(vmaddr, (log_size << 2)); } /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ static inline void ia64_set_iva (void *ivt_addr) { - asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory"); + ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); + ia64_srlz_i(); } /* Set the page table address and control bits. */ @@ -595,79 +538,33 @@ static inline void ia64_set_pta (__u64 pta) { /* Note: srlz.i implies srlz.d */ - asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory"); -} - -static inline __u64 -ia64_get_cpuid (__u64 regnum) -{ - __u64 r; - - asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum)); - return r; + ia64_setreg(_IA64_REG_CR_PTA, pta); + ia64_srlz_i(); } static inline void ia64_eoi (void) { - asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory"); + ia64_setreg(_IA64_REG_CR_EOI, 0); + ia64_srlz_d(); } -static inline void -ia64_set_lrr0 (unsigned long val) -{ - asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory"); -} +#define cpu_relax() ia64_hint(ia64_hint_pause) static inline void -ia64_hint_pause (void) +ia64_set_lrr0 (unsigned long val) { - asm volatile ("hint @pause" ::: "memory"); + ia64_setreg(_IA64_REG_CR_LRR0, val); + ia64_srlz_d(); } -#define cpu_relax() ia64_hint_pause() - static inline void ia64_set_lrr1 (unsigned long val) { - asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory"); -} - -static inline void -ia64_set_pmv (__u64 val) -{ - asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory"); -} - -static inline __u64 -ia64_get_pmc (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum)); - return retval; -} - -static inline void -ia64_set_pmc (__u64 regnum, __u64 value) -{ - asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value)); -} - -static inline __u64 -ia64_get_pmd (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum)); - return retval; + ia64_setreg(_IA64_REG_CR_LRR1, val); + ia64_srlz_d(); } -static inline void -ia64_set_pmd (__u64 regnum, __u64 value) -{ - asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value)); -} /* * Given the address to which a spill occurred, return the unat bit @@ -713,199 +610,58 @@ thread_saved_pc (struct task_struct *t) * Get the current instruction/program counter value. */ #define current_text_addr() \ - ({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; }) - -/* - * Set the correctable machine check vector register - */ -static inline void -ia64_set_cmcv (__u64 val) -{ - asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory"); -} - -/* - * Read the correctable machine check vector register - */ -static inline __u64 -ia64_get_cmcv (void) -{ - __u64 val; - - asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory"); - return val; -} + ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) static inline __u64 ia64_get_ivr (void) { __u64 r; - asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r)); - return r; -} - -static inline void -ia64_set_tpr (__u64 val) -{ - asm volatile ("mov cr.tpr=%0" :: "r"(val)); -} - -static inline __u64 -ia64_get_tpr (void) -{ - __u64 r; - asm volatile ("mov %0=cr.tpr" : "=r"(r)); - return r; -} - -static inline void -ia64_set_irr0 (__u64 val) -{ - asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory"); ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr0 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr0" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr1 (__u64 val) -{ - asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory"); + r = ia64_getreg(_IA64_REG_CR_IVR); ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr1 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr1" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr2 (__u64 val) -{ - asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr2 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr2" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr3 (__u64 val) -{ - asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr3 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile ("mov %0=cr.irr3" : "=r"(val)); - return val; -} - -static inline __u64 -ia64_get_gp(void) -{ - __u64 val; - - asm ("mov %0=gp" : "=r"(val)); - return val; -} - -static inline void -ia64_set_ibr (__u64 regnum, __u64 value) -{ - asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value)); + return r; } static inline void ia64_set_dbr (__u64 regnum, __u64 value) { - asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value)); + __ia64_set_dbr(regnum, value); #ifdef CONFIG_ITANIUM - asm volatile (";; srlz.d"); + ia64_srlz_d(); #endif } static inline __u64 -ia64_get_ibr (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum)); - return retval; -} - -static inline __u64 ia64_get_dbr (__u64 regnum) { __u64 retval; - asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum)); + retval = __ia64_get_dbr(regnum); #ifdef CONFIG_ITANIUM - asm volatile (";; srlz.d"); + ia64_srlz_d(); #endif return retval; } /* XXX remove the handcoded version once we have a sufficiently clever compiler... */ #ifdef SMART_COMPILER -# define ia64_rotr(w,n) \ - ({ \ - __u64 _w = (w), _n = (n); \ - \ - (_w >> _n) | (_w << (64 - _n)); \ +# define ia64_rotr(w,n) \ + ({ \ + __u64 __ia64_rotr_w = (w), _n = (n); \ + \ + (__ia64_rotr_w >> _n) | (__ia64_rotr_w << (64 - _n)); \ }) #else -# define ia64_rotr(w,n) \ - ({ \ - __u64 result; \ - asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \ - result; \ +# define ia64_rotr(w,n) \ + ({ \ + __u64 __ia64_rotr_w; \ + __ia64_rotr_w = ia64_shrp((w), (w), (n)); \ + __ia64_rotr_w; \ }) #endif #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n)) -static inline __u64 -ia64_thash (__u64 addr) -{ - __u64 result; - asm ("thash %0=%1" : "=r"(result) : "r" (addr)); - return result; -} - -static inline __u64 -ia64_tpa (__u64 addr) -{ - __u64 result; - asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); - return result; -} - /* * Take a mapped kernel address and return the equivalent address * in the region 7 identity mapped virtual area. @@ -914,7 +670,7 @@ static inline void * ia64_imva (void *addr) { void *result; - asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); + result = (void *) ia64_tpa(addr); return __va(result); } @@ -926,13 +682,13 @@ ia64_imva (void *addr) static inline void prefetch (const void *x) { - __asm__ __volatile__ ("lfetch [%0]" : : "r"(x)); + ia64_lfetch(ia64_lfhint_none, x); } static inline void prefetchw (const void *x) { - __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x)); + ia64_lfetch_excl(ia64_lfhint_none, x); } #define spin_lock_prefetch(x) prefetchw(x) diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index b0427fa5bccf..6ece5061dc19 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h @@ -23,6 +23,8 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include <asm/intrinsics.h> + /* * the semaphore definition */ @@ -81,9 +83,8 @@ init_rwsem (struct rw_semaphore *sem) static inline void __down_read (struct rw_semaphore *sem) { - int result; - __asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" : - "=r"(result) : "r"(&sem->count) : "memory"); + int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); + if (result < 0) rwsem_down_read_failed(sem); } @@ -111,9 +112,8 @@ __down_write (struct rw_semaphore *sem) static inline void __up_read (struct rw_semaphore *sem) { - int result; - __asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" : - "=r"(result) : "r"(&sem->count) : "memory"); + int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); + if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) rwsem_wake(sem); } diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index a2c7f1c09050..855c24712736 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, extern unsigned long sal_platform_features; +struct sal_ret_values { + long r8; long r9; long r10; long r11; +}; + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IA64_PAL_H */ diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h index 16de6f10c1e1..eca7d714a8fb 100644 --- a/include/asm-ia64/siginfo.h +++ b/include/asm-ia64/siginfo.h @@ -79,7 +79,6 @@ typedef struct siginfo { * si_code is non-zero and __ISR_VALID is set in si_flags. */ #define si_isr _sifields._sigfault._isr -#define si_pfm_ovfl _sifields._sigprof._pfm_ovfl_counters /* * Flag values for si_flags: diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 0f114d98c2ee..adb4716f352c 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -106,7 +106,7 @@ hard_smp_processor_id (void) unsigned long bits; } lid; - lid.bits = ia64_get_lid(); + lid.bits = ia64_getreg(_IA64_REG_CR_LID); return lid.f.id << 8 | lid.f.eid; } diff --git a/include/asm-ia64/sn/sn2/io.h b/include/asm-ia64/sn/sn2/io.h index fc30f1f4c5c8..3a3b1e214164 100644 --- a/include/asm-ia64/sn/sn2/io.h +++ b/include/asm-ia64/sn/sn2/io.h @@ -11,11 +11,23 @@ extern void * sn_io_addr(unsigned long port); /* Forward definition */ extern void sn_mmiob(void); /* Forward definition */ +#include <asm/intrinsics.h> -#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") +#define __sn_mf_a() ia64_mfa() extern void sn_dma_flush(unsigned long); +#define __sn_inb ___sn_inb +#define __sn_inw ___sn_inw +#define __sn_inl ___sn_inl +#define __sn_outb ___sn_outb +#define __sn_outw ___sn_outw +#define __sn_outl ___sn_outl +#define __sn_readb ___sn_readb +#define __sn_readw ___sn_readw +#define __sn_readl ___sn_readl +#define __sn_readq ___sn_readq + /* * The following routines are SN Platform specific, called when * a reference is made to inX/outX set macros. SN Platform @@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long); */ static inline unsigned int -__sn_inb (unsigned long port) +___sn_inb (unsigned long port) { volatile unsigned char *addr; unsigned char ret = -1; @@ -40,7 +52,7 @@ __sn_inb (unsigned long port) } static inline unsigned int -__sn_inw (unsigned long port) +___sn_inw (unsigned long port) { volatile unsigned short *addr; unsigned short ret = -1; @@ -54,7 +66,7 @@ __sn_inw (unsigned long port) } static inline unsigned int -__sn_inl (unsigned long port) +___sn_inl (unsigned long port) { volatile unsigned int *addr; unsigned int ret = -1; @@ -68,7 +80,7 @@ __sn_inl (unsigned long port) } static inline void -__sn_outb (unsigned char val, unsigned long port) +___sn_outb (unsigned char val, unsigned long port) { volatile unsigned char *addr; @@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port) } static inline void -__sn_outw (unsigned short val, unsigned long port) +___sn_outw (unsigned short val, unsigned long port) { volatile unsigned short *addr; @@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port) } static inline void -__sn_outl (unsigned int val, unsigned long port) +___sn_outl (unsigned int val, unsigned long port) { volatile unsigned int *addr; @@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port) */ static inline unsigned char -__sn_readb (void *addr) +___sn_readb (void *addr) { unsigned char val; @@ -121,7 +133,7 @@ __sn_readb (void *addr) } static inline unsigned short -__sn_readw (void *addr) +___sn_readw (void *addr) { unsigned short val; @@ -132,7 +144,7 @@ __sn_readw (void *addr) } static inline unsigned int -__sn_readl (void *addr) +___sn_readl (void *addr) { unsigned int val; @@ -143,7 +155,7 @@ __sn_readl (void *addr) } static inline unsigned long -__sn_readq (void *addr) +___sn_readq (void *addr) { unsigned long val; diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index 74dd5a6d2460..a2831ceb16a8 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h @@ -89,7 +89,7 @@ #ifndef CONFIG_SMP #define cpu_logical_id(cpu) 0 -#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) +#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) #endif /* @@ -98,8 +98,8 @@ */ #define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff) #define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3) -#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff) -#define get_slice() ((ia64_get_lid() >> 28) & 0xf) +#define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff) +#define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf) #define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff) /* diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 3c0d89837b02..3a5f08f4c6f2 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -9,11 +9,13 @@ * This file is used for SMP configurations only. */ +#include <linux/compiler.h> #include <linux/kernel.h> -#include <asm/system.h> -#include <asm/bitops.h> #include <asm/atomic.h> +#include <asm/bitops.h> +#include <asm/intrinsics.h> +#include <asm/system.h> typedef struct { volatile unsigned int lock; @@ -102,8 +104,8 @@ typedef struct { do { \ rwlock_t *__read_lock_ptr = (rw); \ \ - while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ + while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ while (*(volatile int *)__read_lock_ptr < 0) \ cpu_relax(); \ } \ @@ -112,7 +114,7 @@ do { \ #define _raw_read_unlock(rw) \ do { \ rwlock_t *__read_lock_ptr = (rw); \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #define _raw_write_lock(rw) \ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index f4951838e69d..c0a638402858 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -55,12 +55,6 @@ extern struct ia64_boot_param { __u64 initrd_size; } *ia64_boot_param; -static inline void -ia64_insn_group_barrier (void) -{ - __asm__ __volatile__ (";;" ::: "memory"); -} - /* * Macros to force memory ordering. In these descriptions, "previous" * and "subsequent" refer to program order; "visible" means that all @@ -83,7 +77,7 @@ ia64_insn_group_barrier (void) * it's (presumably) much slower than mf and (b) mf.a is supported for * sequential memory pages only. */ -#define mb() __asm__ __volatile__ ("mf" ::: "memory") +#define mb() ia64_mf() #define rmb() mb() #define wmb() mb() #define read_barrier_depends() do { } while(0) @@ -119,22 +113,26 @@ ia64_insn_group_barrier (void) /* clearing psr.i is implicitly serialized (visible by next insn) */ /* setting psr.i requires data serialization */ -#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \ - "rsm psr.i;;" \ - : "=r" (x) :: "memory") -#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") -#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ - "(p6) ssm psr.i;" \ - "(p7) rsm psr.i;;" \ - "(p6) srlz.d" \ - :: "r" ((x) & IA64_PSR_I) \ - : "p6", "p7", "memory") +#define __local_irq_save(x) \ +do { \ + (x) = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_disable() \ +do { \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; -# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip)) +# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) # define local_irq_save(x) \ do { \ @@ -164,14 +162,14 @@ do { \ # define local_irq_restore(x) __local_irq_restore(x) #endif /* !CONFIG_IA64_DEBUG_IRQ */ -#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") -#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") +#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) #define irqs_disabled() \ ({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & IA64_PSR_I) == 0; \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ }) #ifdef __KERNEL__ diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h index 5bf5bd8f148e..414aae060440 100644 --- a/include/asm-ia64/timex.h +++ b/include/asm-ia64/timex.h @@ -10,6 +10,7 @@ * Also removed cacheflush_time as it's entirely unused. */ +#include <asm/intrinsics.h> #include <asm/processor.h> typedef unsigned long cycles_t; @@ -32,7 +33,7 @@ get_cycles (void) { cycles_t ret; - __asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret)); + ret = ia64_getreg(_IA64_REG_AR_ITC); return ret; } diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index dd49222e8f08..049c69845b23 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -10,6 +10,7 @@ #include <linux/mm.h> +#include <asm/intrinsics.h> #include <asm/mmu_context.h> #include <asm/page.h> @@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); #else if (vma->vm_mm == current->active_mm) - asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); + ia64_ptcl(addr, (PAGE_SHIFT << 2)); else vma->vm_mm->context = 0; #endif diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 09325eb6503d..f65623c70fb1 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -334,73 +334,20 @@ waitpid (int pid, int * wait_stat, int flags) } -static inline int -execve (const char *filename, char *const av[], char *const ep[]) -{ - register long r8 asm("r8"); - register long r10 asm("r10"); - register long r15 asm("r15") = __NR_execve; - register long out0 asm("out0") = (long)filename; - register long out1 asm("out1") = (long)av; - register long out2 asm("out2") = (long)ep; - - asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t" - : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2) - : "2" (r15), "3" (out0), "4" (out1), "5" (out2) - : "memory", "out3", "out4", "out5", "out6", "out7", - /* Non-stacked integer registers, minus r8, r10, r15, r13 */ - "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18", - "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", - "r28", "r29", "r30", "r31", - /* Predicate registers. */ - "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", - /* Non-rotating fp registers. */ - "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - /* Branch registers. */ - "b6", "b7" ); - return r8; -} - -static inline pid_t -clone (unsigned long flags, void *sp) -{ - register long r8 asm("r8"); - register long r10 asm("r10"); - register long r15 asm("r15") = __NR_clone; - register long out0 asm("out0") = (long)flags; - register long out1 asm("out1") = (long)sp; - long retval; - - /* clone clobbers current, hence the "r13" in the clobbers list */ - asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t" - : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1) - : "2" (r15), "3" (out0), "4" (out1) - : "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13", - /* Non-stacked integer registers, minus r8, r10, r15, r13 */ - "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18", - "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", - "r28", "r29", "r30", "r31", - /* Predicate registers. */ - "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", - /* Non-rotating fp registers. */ - "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - /* Branch registers. */ - "b6", "b7" ); - retval = r8; - return retval;; - -} +extern int execve (const char *filename, char *const av[], char *const ep[]); +extern pid_t clone (unsigned long flags, void *sp); #endif /* __KERNEL_SYSCALLS__ */ /* * "Conditional" syscalls * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on - * all toolchains, so we just do it by hand. Note, this macro can only be used in the - * file which defines sys_ni_syscall, i.e., in kernel/sys.c. + * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in + * kernel/sys.c. This version causes warnings because the declaration isn't a + * proper prototype, but we can't use __typeof__ either, because not all cond_syscall() + * declarations have prototypes at the moment. */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); +#define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall"))); #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3fd526160f1a..94a0f27e331c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -373,6 +373,11 @@ extern int acpi_mp_config; #define acpi_mp_config 0 +static inline int acpi_boot_init(void) +{ + return 0; +} + #endif /*!CONFIG_ACPI_BOOT*/ @@ -423,6 +428,13 @@ int ec_write(u8 addr, u8 val); int acpi_blacklisted(void); +#else + +static inline int acpi_blacklisted(void) +{ + return 0; +} + #endif /*CONFIG_ACPI*/ #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/device.h b/include/linux/device.h index 7b49400adf31..8d6266f2e3c3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -58,7 +58,8 @@ struct bus_type { struct device * (*add) (struct device * parent, char * bus_id); int (*hotplug) (struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); - + int (*suspend)(struct device * dev, u32 state); + int (*resume)(struct device * dev); }; extern int bus_register(struct bus_type * bus); @@ -372,8 +373,6 @@ extern struct bus_type platform_bus_type; extern struct device legacy_bus; /* drivers/base/power.c */ -extern int device_suspend(u32 state, u32 level); -extern void device_resume(u32 level); extern void device_shutdown(void); diff --git a/include/linux/ide.h b/include/linux/ide.h index 82ca6da75b3f..a3ee36b438ca 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1241,8 +1241,6 @@ typedef struct ide_driver_s { #define DRIVER(drive) ((drive)->driver) extern int generic_ide_ioctl(struct block_device *, unsigned, unsigned long); -extern int generic_ide_suspend(struct device *dev, u32 state, u32 level); -extern int generic_ide_resume(struct device *dev, u32 level); /* * IDE modules. diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index c2b4fd735f40..9555dd4d69fc 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -92,7 +92,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char * buf, size_t count, loff_ * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, * updating *offset appropriately. Returns bytes written or -EFAULT. */ -ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset); +ssize_t oprofilefs_ulong_to_user(unsigned long val, char * buf, size_t count, loff_t * offset); /** * Read an ASCII string for a number from a userspace buffer and fill *val on success. diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index fec8ad62b567..d97edad0effc 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -45,7 +45,7 @@ struct tc_stats struct tc_estimator { - char interval; + signed char interval; unsigned char ewma_log; }; diff --git a/include/linux/pm.h b/include/linux/pm.h index e4c795f71cea..3017bdef5f03 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -186,9 +186,46 @@ static inline void pm_dev_idle(struct pm_dev *dev) {} #endif /* CONFIG_PM */ + +/* + * Callbacks for platform drivers to implement. + */ extern void (*pm_idle)(void); extern void (*pm_power_off)(void); +enum { + PM_SUSPEND_ON, + PM_SUSPEND_STANDBY, + PM_SUSPEND_MEM, + PM_SUSPEND_DISK, + PM_SUSPEND_MAX, +}; + +enum { + PM_DISK_FIRMWARE = 1, + PM_DISK_PLATFORM, + PM_DISK_SHUTDOWN, + PM_DISK_REBOOT, + PM_DISK_MAX, +}; + + +struct pm_ops { + u32 pm_disk_mode; + int (*prepare)(u32 state); + int (*enter)(u32 state); + int (*finish)(u32 state); +}; + +extern void pm_set_ops(struct pm_ops *); + +extern int pm_suspend(u32 state); + + +/* + * Device power management + */ + struct device; struct dev_pm_info { @@ -203,10 +240,10 @@ struct dev_pm_info { extern void device_pm_set_parent(struct device * dev, struct device * parent); -extern int device_pm_suspend(u32 state); -extern int device_pm_power_down(u32 state); -extern void device_pm_power_up(void); -extern void device_pm_resume(void); +extern int device_suspend(u32 state); +extern int device_power_down(u32 state); +extern void device_power_up(void); +extern void device_resume(void); #endif /* __KERNEL__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 28788d8a65ff..132db86c961a 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -8,8 +8,7 @@ #include <linux/notifier.h> #include <linux/config.h> #include <linux/init.h> - -extern unsigned char software_suspend_enabled; +#include <linux/pm.h> #ifdef CONFIG_SOFTWARE_SUSPEND /* page backup entry */ @@ -46,22 +45,9 @@ extern int shrink_mem(void); /* mm/page_alloc.c */ extern void drain_local_pages(void); -/* kernel/suspend.c */ -extern int software_suspend(void); - -extern int register_suspend_notifier(struct notifier_block *); -extern int unregister_suspend_notifier(struct notifier_block *); - extern unsigned int nr_copy_pages __nosavedata; extern suspend_pagedir_t *pagedir_nosave __nosavedata; -/* Communication between kernel/suspend.c and arch/i386/suspend.c */ - -extern void do_magic_resume_1(void); -extern void do_magic_resume_2(void); -extern void do_magic_suspend_1(void); -extern void do_magic_suspend_2(void); - /* Communication between acpi and arch/i386/suspend.c */ extern void do_suspend_lowlevel(int resume); @@ -72,32 +58,17 @@ static inline int software_suspend(void) { return -EPERM; } -#define register_suspend_notifier(a) do { } while(0) -#define unregister_suspend_notifier(a) do { } while(0) #endif /* CONFIG_SOFTWARE_SUSPEND */ #ifdef CONFIG_PM extern void refrigerator(unsigned long); -extern int freeze_processes(void); -extern void thaw_processes(void); - -extern int pm_prepare_console(void); -extern void pm_restore_console(void); #else static inline void refrigerator(unsigned long flag) { } -static inline int freeze_processes(void) -{ - return 0; -} -static inline void thaw_processes(void) -{ - -} #endif /* CONFIG_PM */ #endif /* _LINUX_SWSUSP_H */ diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index 4bc3e22b5104..2a90db8d41de 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -31,10 +31,8 @@ struct sysdev_class { /* Default operations for these types of devices */ int (*shutdown)(struct sys_device *); - int (*save)(struct sys_device *, u32 state); int (*suspend)(struct sys_device *, u32 state); int (*resume)(struct sys_device *); - int (*restore)(struct sys_device *); struct kset kset; }; @@ -52,10 +50,8 @@ struct sysdev_driver { int (*add)(struct sys_device *); int (*remove)(struct sys_device *); int (*shutdown)(struct sys_device *); - int (*save)(struct sys_device *, u32 state); int (*suspend)(struct sys_device *, u32 state); int (*resume)(struct sys_device *); - int (*restore)(struct sys_device *); }; |
