summaryrefslogtreecommitdiff
path: root/include/asm-cris
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:21:22 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:21:22 -0800
commit0a528ace2ea013fe60efaf633084b154962bfdbb (patch)
tree4e038736f3e5ff9eece35f5c2a8efcbb4487a477 /include/asm-cris
parent8b29e8edf0ed8f63ecb35b16946f222854d74ad0 (diff)
v2.4.10.5 -> v2.4.10.6
- various: fix some module exports uncovered by stricter error checking - Urban Widmark: make smbfs use same error define names as samba and win32 - Greg KH: USB update - Tom Rini: MPC8xx ppc update - Matthew Wilcox: rd.c page cache flushing fix - Richard Gooch: devfs race fix: rwsem for symlinks - Björn Wesen: Cris arch update - Nikita Danilov: reiserfs cleanup - Tim Waugh: parport update - Peter Rival: update alpha SMP bootup to match wait_init_idle fixes - Trond Myklebust: lockd/grace period fix
Diffstat (limited to 'include/asm-cris')
-rw-r--r--include/asm-cris/bitops.h69
-rw-r--r--include/asm-cris/checksum.h1
-rw-r--r--include/asm-cris/current.h2
-rw-r--r--include/asm-cris/delay.h8
-rw-r--r--include/asm-cris/io.h16
-rw-r--r--include/asm-cris/irq.h47
-rw-r--r--include/asm-cris/module.h2
-rw-r--r--include/asm-cris/pgtable.h7
-rw-r--r--include/asm-cris/processor.h3
-rw-r--r--include/asm-cris/system.h34
-rw-r--r--include/asm-cris/tlb.h1
-rw-r--r--include/asm-cris/uaccess.h201
12 files changed, 235 insertions, 156 deletions
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index c00c224fb55c..86b07d546759 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -19,6 +19,10 @@
#include <asm/system.h>
+/* We use generic_ffs so get it; include guards resolve the possible
+ mutually inclusion. */
+#include <linux/bitops.h>
+
/*
* Some hacks to defeat gcc over-optimizations..
*/
@@ -215,33 +219,62 @@ static __inline__ int test_bit(int nr, const void *addr)
*/
/*
+ * Helper functions for the core of the ff[sz] functions, wrapping the
+ * syntactically awkward asms. The asms compute the number of leading
+ * zeroes of a bits-in-byte and byte-in-word and word-in-dword-swapped
+ * number. They differ in that the first function also inverts all bits
+ * in the input.
+ */
+static __inline__ unsigned long cris_swapnwbrlz(unsigned long w)
+{
+ /* Let's just say we return the result in the same register as the
+ input. Saying we clobber the input but can return the result
+ in another register:
+ ! __asm__ ("swapnwbr %2\n\tlz %2,%0"
+ ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w));
+ confuses gcc (sched.c, gcc from cris-dist-1.14). */
+
+ unsigned long res;
+ __asm__ ("swapnwbr %0 \n\t"
+ "lz %0,%0"
+ : "=r" (res) : "0" (w));
+ return res;
+}
+
+static __inline__ unsigned long cris_swapwbrlz(unsigned long w)
+{
+ unsigned res;
+ __asm__ ("swapwbr %0 \n\t"
+ "lz %0,%0"
+ : "=r" (res)
+ : "0" (w));
+ return res;
+}
+
+/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-static __inline__ unsigned long ffz(unsigned long word)
+static __inline__ unsigned long ffz(unsigned long w)
{
- unsigned long result = 0;
-
- while(word & 1) {
- result++;
- word >>= 1;
- }
- return result;
+ /* The generic_ffs function is used to avoid the asm when the
+ argument is a constant. */
+ return __builtin_constant_p (w)
+ ? (~w ? (unsigned long) generic_ffs ((int) ~w) - 1 : 32)
+ : cris_swapnwbrlz (w);
}
/*
- * Find first one in word. Undefined if no one exists,
- * so code should check against 0UL first..
+ * Somewhat like ffz but the equivalent of generic_ffs: in contrast to
+ * ffz we return the first one-bit *plus one*.
*/
-static __inline__ unsigned long find_first_one(unsigned long word)
+static __inline__ unsigned long ffs(unsigned long w)
{
- unsigned long result = 0;
-
- while(!(word & 1)) {
- result++;
- word >>= 1;
- }
- return result;
+ /* The generic_ffs function is used to avoid the asm when the
+ argument is a constant. */
+ return __builtin_constant_p (w)
+ ? (unsigned long) generic_ffs ((int) w)
+ : w ? cris_swapwbrlz (w) + 1 : 0;
}
/**
diff --git a/include/asm-cris/checksum.h b/include/asm-cris/checksum.h
index 4ed0f832b19d..589eb323eba3 100644
--- a/include/asm-cris/checksum.h
+++ b/include/asm-cris/checksum.h
@@ -1,4 +1,3 @@
-/* $Id: checksum.h,v 1.4 2001/06/28 03:58:36 hp Exp $ */
/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */
#ifndef _CRIS_CHECKSUM_H
diff --git a/include/asm-cris/current.h b/include/asm-cris/current.h
index 6b00b86b6a9d..c5cc44d537d6 100644
--- a/include/asm-cris/current.h
+++ b/include/asm-cris/current.h
@@ -6,7 +6,7 @@ struct task_struct;
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
- __asm__("and.d sp,%0; ":"=r" (current) : "0" (~8191UL));
+ __asm__("and.d $sp,%0; ":"=r" (current) : "0" (~8191UL));
return current;
}
diff --git a/include/asm-cris/delay.h b/include/asm-cris/delay.h
index a9a848dd3b07..632c369c41b9 100644
--- a/include/asm-cris/delay.h
+++ b/include/asm-cris/delay.h
@@ -1,5 +1,3 @@
-/* $Id: delay.h,v 1.5 2001/06/28 04:59:25 hp Exp $ */
-
#ifndef _CRIS_DELAY_H
#define _CRIS_DELAY_H
@@ -21,12 +19,12 @@ extern void __do_delay(void); /* Special register call calling convention */
extern __inline__ void __delay(int loops)
{
__asm__ __volatile__ (
- "move.d %0,r9\n\t"
+ "move.d %0,$r9\n\t"
"beq 2f\n\t"
- "subq 1,r9\n\t"
+ "subq 1,$r9\n\t"
"1:\n\t"
"bne 1b\n\t"
- "subq 1,r9\n"
+ "subq 1,$r9\n"
"2:"
: : "g" (loops) : "r9");
}
diff --git a/include/asm-cris/io.h b/include/asm-cris/io.h
index 01c8b1dad1d9..607b6291b366 100644
--- a/include/asm-cris/io.h
+++ b/include/asm-cris/io.h
@@ -10,16 +10,16 @@
#ifdef CONFIG_SVINTO_SIM
/* Let's use the ucsim interface since it lets us do write(2, ...) */
#define SIMCOUT(s,len) \
- asm ("moveq 4,r1 \n\t" \
- "moveq 2,r10 \n\t" \
- "move.d %0,r11 \n\t" \
- "move.d %1,r12 \n\t" \
- "push irp \n\t" \
- "move 0f,irp \n\t" \
+ asm ("moveq 4,$r9 \n\t" \
+ "moveq 2,$r10 \n\t" \
+ "move.d %0,$r11 \n\t" \
+ "move.d %1,$r12 \n\t" \
+ "push $irp \n\t" \
+ "move 0f,$irp \n\t" \
"jump -6809 \n" \
"0: \n\t" \
- "pop irp" \
- : : "rm" (s), "rm" (len) : "r1","r10","r11","r12","memory")
+ "pop $irp" \
+ : : "rm" (s), "rm" (len) : "r9","r10","r11","r12","memory")
#define TRACE_ON() __extension__ \
({ int _Foofoo; __asm__ volatile ("bmod [%0],%0" : "=r" (_Foofoo) : "0" \
(255)); _Foofoo; })
diff --git a/include/asm-cris/irq.h b/include/asm-cris/irq.h
index b7eb9c363795..b1b5ada6ff5a 100644
--- a/include/asm-cris/irq.h
+++ b/include/asm-cris/irq.h
@@ -3,9 +3,6 @@
*
* Copyright (c) 2000, 2001 Axis Communications AB
*
- * Authors: Bjorn Wesen (bjornw@axis.com)
- *
- * $Id: irq.h,v 1.13 2001/07/06 18:52:08 hp Exp $
*/
#ifndef _ASM_IRQ_H
@@ -101,25 +98,25 @@ void set_break_vector(int n, irqvectptr addr);
/* SAVE_ALL saves registers so they match pt_regs */
#define SAVE_ALL \
- "move irp,[sp=sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \
- "push srp\n\t" /* push subroutine return pointer */ \
- "push dccr\n\t" /* push condition codes */ \
- "push mof\n\t" /* push multiply overflow reg */ \
+ "move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \
+ "push $srp\n\t" /* push subroutine return pointer */ \
+ "push $dccr\n\t" /* push condition codes */ \
+ "push $mof\n\t" /* push multiply overflow reg */ \
"di\n\t" /* need to disable irq's at this point */\
- "subq 14*4,sp\n\t" /* make room for r0-r13 */ \
- "movem r13,[sp]\n\t" /* push the r0-r13 registers */ \
- "push r10\n\t" /* push orig_r10 */ \
- "clear.d [sp=sp-4]\n\t" /* frametype - this is a normal stackframe */
+ "subq 14*4,$sp\n\t" /* make room for r0-r13 */ \
+ "movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \
+ "push $r10\n\t" /* push orig_r10 */ \
+ "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */
/* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq in irq.c */
#define BLOCK_IRQ(mask,nr) \
- "move.d " #mask ",r0\n\t" \
- "move.d r0,[0xb00000d8]\n\t"
+ "move.d " #mask ",$r0\n\t" \
+ "move.d $r0,[0xb00000d8]\n\t"
#define UNBLOCK_IRQ(mask) \
- "move.d " #mask ",r0\n\t" \
- "move.d r0,[0xb00000dc]\n\t"
+ "move.d " #mask ",$r0\n\t" \
+ "move.d $r0,[0xb00000dc]\n\t"
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
@@ -137,20 +134,20 @@ void sIRQ_NAME(nr); \
void BAD_IRQ_NAME(nr); \
__asm__ ( \
".text\n\t" \
- "_IRQ" #nr "_interrupt:\n\t" \
+ "IRQ" #nr "_interrupt:\n\t" \
SAVE_ALL \
- "_sIRQ" #nr "_interrupt:\n\t" /* shortcut for the multiple irq handler */ \
+ "sIRQ" #nr "_interrupt:\n\t" /* shortcut for the multiple irq handler */ \
BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \
- "moveq "#nr",r10\n\t" \
- "move.d sp,r11\n\t" \
- "jsr _do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \
+ "moveq "#nr",$r10\n\t" \
+ "move.d $sp,$r11\n\t" \
+ "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \
UNBLOCK_IRQ(mask) \
- "moveq 0,r9\n\t" /* make ret_from_intr realise we came from an irq */ \
- "jump _ret_from_intr\n\t" \
- "_bad_IRQ" #nr "_interrupt:\n\t" \
- "push r0\n\t" \
+ "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \
+ "jump ret_from_intr\n\t" \
+ "bad_IRQ" #nr "_interrupt:\n\t" \
+ "push $r0\n\t" \
BLOCK_IRQ(mask,nr) \
- "pop r0\n\t" \
+ "pop $r0\n\t" \
"reti\n\t" \
"nop\n");
diff --git a/include/asm-cris/module.h b/include/asm-cris/module.h
index 0fe7e7e77fb6..5853a11d6163 100644
--- a/include/asm-cris/module.h
+++ b/include/asm-cris/module.h
@@ -7,6 +7,6 @@
#define module_map(x) vmalloc(x)
#define module_unmap(x) vfree(x)
#define module_arch_init(x) (0)
-#define arch_init_modules(x) do { } while (0)
+#define arch_init_modules(x) do { } while (0)
#endif /* _ASM_CRIS_MODULE_H */
diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h
index 38cb45a278d9..afe60fc7d9e5 100644
--- a/include/asm-cris/pgtable.h
+++ b/include/asm-cris/pgtable.h
@@ -3,6 +3,9 @@
* HISTORY:
*
* $Log: pgtable.h,v $
+ * Revision 1.12 2001/08/11 00:28:00 bjornw
+ * PAGE_CHG_MASK and PAGE_NONE had somewhat untraditional values
+ *
* Revision 1.11 2001/04/04 14:38:36 bjornw
* Removed bad_pagetable handling and the _kernel functions
*
@@ -215,9 +218,9 @@ static inline void flush_tlb(void)
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
#define _PAGE_TABLE (_PAGE_PRESENT | __READABLE | __WRITEABLE)
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_SILENT_WRITE)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | __READABLE)
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | __READABLE | _PAGE_WRITE | \
_PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | __READABLE) // | _PAGE_COW
diff --git a/include/asm-cris/processor.h b/include/asm-cris/processor.h
index dec7e2cefc7d..a9b11b89eced 100644
--- a/include/asm-cris/processor.h
+++ b/include/asm-cris/processor.h
@@ -12,13 +12,14 @@
#include <linux/config.h>
#include <asm/system.h>
+#include <asm/page.h>
#include <asm/ptrace.h>
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
-#define current_text_addr() ({void *pc; __asm__ ("move.d pc,%0" : "=rm" (pc)); pc; })
+#define current_text_addr() ({void *pc; __asm__ ("move.d $pc,%0" : "=rm" (pc)); pc; })
/* CRIS has no problems with write protection */
diff --git a/include/asm-cris/system.h b/include/asm-cris/system.h
index 8c5b38977603..95c6c4198da5 100644
--- a/include/asm-cris/system.h
+++ b/include/asm-cris/system.h
@@ -1,5 +1,3 @@
-/* $Id: system.h,v 1.4 2001/03/20 19:46:00 bjornw Exp $ */
-
#ifndef __ASM_CRIS_SYSTEM_H
#define __ASM_CRIS_SYSTEM_H
@@ -16,22 +14,30 @@ extern struct task_struct *resume(struct task_struct *prev, struct task_struct *
#define switch_to(prev,next,last) last = resume(prev,next, \
(int)&((struct task_struct *)0)->thread)
+/* read the CPU version register */
+
+static inline unsigned long rdvr(void) {
+ unsigned long vr;
+ __asm__ volatile ("move $vr,%0" : "=rm" (vr));
+ return vr;
+}
+
/* read/write the user-mode stackpointer */
-extern inline unsigned long rdusp(void) {
+static inline unsigned long rdusp(void) {
unsigned long usp;
- __asm__ __volatile__("move usp,%0" : "=rm" (usp));
+ __asm__ __volatile__("move $usp,%0" : "=rm" (usp));
return usp;
}
#define wrusp(usp) \
- __asm__ __volatile__("move %0,usp" : /* no outputs */ : "rm" (usp))
+ __asm__ __volatile__("move %0,$usp" : /* no outputs */ : "rm" (usp))
/* read the current stackpointer */
-extern inline unsigned long rdsp(void) {
+static inline unsigned long rdsp(void) {
unsigned long sp;
- __asm__ __volatile__("move.d sp,%0" : "=rm" (sp));
+ __asm__ __volatile__("move.d $sp,%0" : "=rm" (sp));
return sp;
}
@@ -51,18 +57,18 @@ struct __xchg_dummy { unsigned long a[100]; };
#if 0
/* use these and an oscilloscope to see the fraction of time we're running with IRQ's disabled */
/* it assumes the LED's are on port 0x90000000 of course. */
-#define sti() __asm__ __volatile__ ( "ei\n\tpush r0\n\tmoveq 0,r0\n\tmove.d r0,[0x90000000]\n\tpop r0" );
-#define cli() __asm__ __volatile__ ( "di\n\tpush r0\n\tmove.d 0x40000,r0\n\tmove.d r0,[0x90000000]\n\tpop r0");
-#define save_flags(x) __asm__ __volatile__ ("move ccr,%0" : "=rm" (x) : : "memory");
-#define restore_flags(x) __asm__ __volatile__ ("move %0,ccr\n\tbtstq 5,%0\n\tbpl 1f\n\tnop\n\tpush r0\n\tmoveq 0,r0\n\tmove.d r0,[0x90000000]\n\tpop r0\n1:\n" : : "r" (x) : "memory");
+#define sti() __asm__ __volatile__ ( "ei\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0" );
+#define cli() __asm__ __volatile__ ( "di\n\tpush $r0\n\tmove.d 0x40000,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0");
+#define save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
+#define restore_flags(x) __asm__ __volatile__ ("move %0,$ccr\n\tbtstq 5,%0\n\tbpl 1f\n\tnop\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0\n1:\n" : : "r" (x) : "memory");
#else
#define __cli() __asm__ __volatile__ ( "di");
#define __sti() __asm__ __volatile__ ( "ei" );
-#define __save_flags(x) __asm__ __volatile__ ("move ccr,%0" : "=rm" (x) : : "memory");
-#define __restore_flags(x) __asm__ __volatile__ ("move %0,ccr" : : "rm" (x) : "memory");
+#define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
+#define __restore_flags(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
/* For spinlocks etc */
-#define local_irq_save(x) __asm__ __volatile__ ("move ccr,%0\n\tdi" : "=rm" (x) : : "memory");
+#define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
#define local_irq_restore(x) restore_flags(x)
#define local_irq_disable() cli()
diff --git a/include/asm-cris/tlb.h b/include/asm-cris/tlb.h
new file mode 100644
index 000000000000..69c0faa93194
--- /dev/null
+++ b/include/asm-cris/tlb.h
@@ -0,0 +1 @@
+#include <asm-generic/tlb.h>
diff --git a/include/asm-cris/uaccess.h b/include/asm-cris/uaccess.h
index d506983b11f7..8cc79775254c 100644
--- a/include/asm-cris/uaccess.h
+++ b/include/asm-cris/uaccess.h
@@ -3,6 +3,12 @@
* Hans-Peter Nilsson (hp@axis.com)
*
* $Log: uaccess.h,v $
+ * Revision 1.7 2001/10/02 12:44:52 hp
+ * Add support for 64-bit put_user/get_user
+ *
+ * Revision 1.6 2001/10/01 14:51:17 bjornw
+ * Added register prefixes and removed underscores
+ *
* Revision 1.5 2000/10/25 03:33:21 hp
* - Provide implementation for everything else but get_user and put_user;
* copying inline to/from user for constant length 0..16, 20, 24, and
@@ -36,7 +42,7 @@
Check regularly...
- Register r9 is chosen for temporaries, being a call-clobbered register
+ Register $r9 is chosen for temporaries, being a call-clobbered register
first in line to be used (notably for local blocks), not colliding with
parameter registers. */
@@ -182,6 +188,7 @@ do { \
case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
+ case 8: __put_user_asm_64(x,ptr,retval); break; \
default: __put_user_bad(); \
} \
} while (0)
@@ -211,6 +218,22 @@ struct __large_struct { unsigned long buf[100]; };
: "=r" (err) \
: "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
+#define __put_user_asm_64(x, addr, err) \
+ __asm__ __volatile__( \
+ " move.d %M1,[%2]\n" \
+ "2: move.d %H1,[%2+4]\n" \
+ "4:\n" \
+ " .section .fixup,\"ax\"\n" \
+ "3: move.d %3,%0\n" \
+ " jump 4b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .dword 2b,3b\n" \
+ " .dword 4b,3b\n" \
+ " .previous\n" \
+ : "=r" (err) \
+ : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
+
#define __get_user_nocheck(x,ptr,size) \
({ \
@@ -239,6 +262,7 @@ do { \
case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
+ case 8: __get_user_asm_64(x,ptr,retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
@@ -260,6 +284,23 @@ do { \
: "=r" (err), "=r" (x) \
: "r" (addr), "g" (-EFAULT), "0" (err))
+#define __get_user_asm_64(x, addr, err) \
+ __asm__ __volatile__( \
+ " move.d [%2],%M1\n" \
+ "2: move.d [%2+4],%H1\n" \
+ "4:\n" \
+ " .section .fixup,\"ax\"\n" \
+ "3: move.d %3,%0\n" \
+ " moveq 0,%1\n" \
+ " jump 4b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .dword 2b,3b\n" \
+ " .dword 4b,3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=r" (x) \
+ : "r" (addr), "g" (-EFAULT), "0" (err))
+
/* More complex functions. Most are inline, but some call functions that
live in lib/usercopy.c */
@@ -305,13 +346,13 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm__ __volatile__ (
" move.d %3,%0\n"
- " move.b [%2+],r9\n"
+ " move.b [%2+],$r9\n"
"1: beq 2f\n"
- " move.b r9,[%1+]\n"
+ " move.b $r9,[%1+]\n"
" subq 1,%0\n"
" bne 1b\n"
- " move.b [%2+],r9\n"
+ " move.b [%2+],$r9\n"
"2: sub.d %3,%0\n"
" neg.d %0,%0\n"
@@ -399,16 +440,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "2: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "2: move.b $r9,[%0+]\n", \
"3: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- "2: move.w r9,[%0+]\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ "2: move.w $r9,[%0+]\n" COPY, \
"3: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
@@ -418,16 +459,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_3(to, from, ret) \
__asm_copy_from_user_2x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "4: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "2: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "2: move.d $r9,[%0+]\n" COPY, \
"3: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
@@ -437,16 +478,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_5(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "4: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- "4: move.w r9,[%0+]\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ "4: move.w $r9,[%0+]\n" COPY, \
"5: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
@@ -456,16 +497,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_7(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "6: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "4: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "4: move.d $r9,[%0+]\n" COPY, \
"5: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
@@ -475,16 +516,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_9(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "6: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- "6: move.w r9,[%0+]\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ "6: move.w $r9,[%0+]\n" COPY, \
"7: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
@@ -494,16 +535,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_11(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "8: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "6: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "6: move.d $r9,[%0+]\n" COPY, \
"7: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
@@ -513,16 +554,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_13(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "8: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- "8: move.w r9,[%0+]\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ "8: move.w $r9,[%0+]\n" COPY, \
"9: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
@@ -532,16 +573,16 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_15(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- "10: move.b r9,[%0+]\n", \
+ " move.b [%1+],$r9\n" \
+ "10: move.b $r9,[%0+]\n", \
"11: addq 1,%2\n" \
" clear.b [%0+]\n", \
" .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "8: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "8: move.d $r9,[%0+]\n" COPY, \
"9: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
@@ -551,8 +592,8 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_16x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "10: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "10: move.d $r9,[%0+]\n" COPY, \
"11: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 10b,11b\n" TENTRY)
@@ -562,8 +603,8 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_20x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- "12: move.d r9,[%0+]\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ "12: move.d $r9,[%0+]\n" COPY, \
"13: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \
" .dword 12b,13b\n" TENTRY)
@@ -575,15 +616,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n2:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n2:\n", \
"3: addq 1,%2\n", \
" .dword 2b,3b\n")
#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- " move.w r9,[%0+]\n2:\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ " move.w $r9,[%0+]\n2:\n" COPY, \
"3: addq 2,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
@@ -592,15 +633,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_3(to, from, ret) \
__asm_copy_to_user_2x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n4:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n4:\n", \
"5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n2:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n2:\n" COPY, \
"3: addq 4,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
@@ -609,15 +650,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_5(to, from, ret) \
__asm_copy_to_user_4x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n4:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n4:\n", \
"5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- " move.w r9,[%0+]\n4:\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ " move.w $r9,[%0+]\n4:\n" COPY, \
"5: addq 2,%2\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
@@ -626,15 +667,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_7(to, from, ret) \
__asm_copy_to_user_6x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n6:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n6:\n", \
"7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n4:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n4:\n" COPY, \
"5: addq 4,%2\n" FIXUP, \
" .dword 4b,5b\n" TENTRY)
@@ -643,15 +684,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_9(to, from, ret) \
__asm_copy_to_user_8x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n6:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n6:\n", \
"7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- " move.w r9,[%0+]\n6:\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ " move.w $r9,[%0+]\n6:\n" COPY, \
"7: addq 2,%2\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
@@ -660,15 +701,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_11(to, from, ret) \
__asm_copy_to_user_10x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n8:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n8:\n", \
"9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n6:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n6:\n" COPY, \
"7: addq 4,%2\n" FIXUP, \
" .dword 6b,7b\n" TENTRY)
@@ -677,15 +718,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_13(to, from, ret) \
__asm_copy_to_user_12x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n8:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n8:\n", \
"9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
- " move.w [%1+],r9\n" \
- " move.w r9,[%0+]\n8:\n" COPY, \
+ " move.w [%1+],$r9\n" \
+ " move.w $r9,[%0+]\n8:\n" COPY, \
"9: addq 2,%2\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
@@ -694,15 +735,15 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_15(to, from, ret) \
__asm_copy_to_user_14x_cont(to, from, ret, \
- " move.b [%1+],r9\n" \
- " move.b r9,[%0+]\n10:\n", \
+ " move.b [%1+],$r9\n" \
+ " move.b $r9,[%0+]\n10:\n", \
"11: addq 1,%2\n", \
" .dword 10b,11b\n")
#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n8:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n8:\n" COPY, \
"9: addq 4,%2\n" FIXUP, \
" .dword 8b,9b\n" TENTRY)
@@ -711,8 +752,8 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_16x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n10:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n10:\n" COPY, \
"11: addq 4,%2\n" FIXUP, \
" .dword 10b,11b\n" TENTRY)
@@ -721,8 +762,8 @@ strncpy_from_user(char *dst, const char *src, long count)
#define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_20x_cont(to, from, ret, \
- " move.d [%1+],r9\n" \
- " move.d r9,[%0+]\n12:\n" COPY, \
+ " move.d [%1+],$r9\n" \
+ " move.d $r9,[%0+]\n12:\n" COPY, \
"13: addq 4,%2\n" FIXUP, \
" .dword 12b,13b\n" TENTRY)
@@ -1034,17 +1075,17 @@ strnlen_user(const char *s, long n)
*/
__asm__ __volatile__ (
- " move.d %1,r9\n"
+ " move.d %1,$r9\n"
"0:\n"
" ble 1f\n"
- " subq 1,r9\n"
+ " subq 1,$r9\n"
" test.b [%0+]\n"
" bne 0b\n"
- " test.d r9\n"
+ " test.d $r9\n"
"1:\n"
" move.d %1,%0\n"
- " sub.d r9,%0\n"
+ " sub.d $r9,%0\n"
"2:\n"
" .section .fixup,\"ax\"\n"