summaryrefslogtreecommitdiff
path: root/arch/ppc/lib/string.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@tango.paulus.ozlabs.org>2002-02-11 20:41:44 +1100
committerPaul Mackerras <paulus@tango.paulus.ozlabs.org>2002-02-11 20:41:44 +1100
commitdb7bfdb0276574b29618179004ced1de8dcf40c0 (patch)
treef65179bd228616f902065bc92a96ad394f4b0097 /arch/ppc/lib/string.S
parent0dc68d77428413d0f417df3a378f857a2e798ebf (diff)
Import arch/ppc and include/asm-ppc changes from linuxppc_2_5 tree
Diffstat (limited to 'arch/ppc/lib/string.S')
-rw-r--r--arch/ppc/lib/string.S80
1 files changed, 35 insertions, 45 deletions
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 1b0ae15f1929..efab9d80b436 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.string.S 1.9 10/25/01 10:08:51 trini
+ * BK Id: %F% %I% %G% %U% %#%
*/
/*
* String handling functions for PowerPC.
@@ -11,11 +11,11 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#include "../kernel/ppc_asm.tmpl"
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/errno.h>
+#include <asm/ppc_asm.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
@@ -65,13 +65,14 @@
.text
.text
+ .stabs "arch/ppc/lib/",N_SO,0,0,0f
+ .stabs "string.S",N_SO,0,0,0f
CACHELINE_BYTES = L1_CACHE_LINE_SIZE
LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE
CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1)
- .globl strcpy
-strcpy:
+_GLOBAL(strcpy)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
@@ -80,8 +81,7 @@ strcpy:
bne 1b
blr
- .globl strncpy
-strncpy:
+_GLOBAL(strncpy)
cmpwi 0,r5,0
beqlr
mtctr r5
@@ -93,8 +93,7 @@ strncpy:
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
blr
- .globl strcat
-strcat:
+_GLOBAL(strcat)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r5)
@@ -107,8 +106,7 @@ strcat:
bne 1b
blr
- .globl strcmp
-strcmp:
+_GLOBAL(strcmp)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r5)
@@ -119,8 +117,7 @@ strcmp:
beq 1b
blr
- .globl strlen
-strlen:
+_GLOBAL(strlen)
addi r4,r3,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
@@ -133,8 +130,7 @@ strlen:
* to set them to zero. This requires that the destination
* area is cacheable. -- paulus
*/
- .globl cacheable_memzero
-cacheable_memzero:
+_GLOBAL(cacheable_memzero)
mr r5,r4
li r4,0
addi r6,r3,-4
@@ -165,6 +161,12 @@ cacheable_memzero:
stw r4, 8(r6)
stw r4, 12(r6)
stw r4, 16(r6)
+#if CACHE_LINE_SIZE >= 32
+ stw r4, 20(r6)
+ stw r4, 24(r6)
+ stw r4, 28(r6)
+ stw r4, 32(r6)
+#endif /* CACHE_LINE_SIZE */
#endif
addi r6,r6,CACHELINE_BYTES
bdnz 10b
@@ -184,8 +186,7 @@ cacheable_memzero:
bdnz 8b
blr
- .globl memset
-memset:
+_GLOBAL(memset)
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
addi r6,r3,-4
@@ -210,8 +211,7 @@ memset:
bdnz 8b
blr
- .globl bcopy
-bcopy:
+_GLOBAL(bcopy)
mr r6,r3
mr r3,r4
mr r4,r6
@@ -224,8 +224,7 @@ bcopy:
* We only use this version if the source and dest don't overlap.
* -- paulus.
*/
- .global cacheable_memcpy
-cacheable_memcpy:
+_GLOBAL(cacheable_memcpy)
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
@@ -299,14 +298,12 @@ cacheable_memcpy:
bdnz 40b
65: blr
- .globl memmove
-memmove:
+_GLOBAL(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
/* fall through */
- .globl memcpy
-memcpy:
+_GLOBAL(memcpy)
srwi. r7,r5,3
addi r6,r3,-4
addi r4,r4,-4
@@ -347,8 +344,7 @@ memcpy:
mtctr r7
b 1b
- .globl backwards_memcpy
-backwards_memcpy:
+_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
@@ -383,9 +379,8 @@ backwards_memcpy:
beq 2b
mtctr r7
b 1b
-
- .globl memcmp
-memcmp:
+
+_GLOBAL(memcmp)
cmpwi 0,r5,0
ble- 2f
mtctr r5
@@ -399,8 +394,7 @@ memcmp:
2: li r3,0
blr
- .global memchr
-memchr:
+_GLOBAL(memchr)
cmpwi 0,r5,0
ble- 2f
mtctr r5
@@ -412,8 +406,7 @@ memchr:
2: li r3,0
blr
- .globl __copy_tofrom_user
-__copy_tofrom_user:
+_GLOBAL(__copy_tofrom_user)
addi r4,r4,-4
addi r6,r3,-4
neg r0,r3
@@ -445,23 +438,23 @@ __copy_tofrom_user:
#if !defined(CONFIG_8xx)
/* Here we decide how far ahead to prefetch the source */
-#if MAX_L1_COPY_PREFETCH > 1
+#if MAX_COPY_PREFETCH > 1
/* Heuristically, for large transfers we prefetch
- MAX_L1_COPY_PREFETCH cachelines ahead. For small transfers
+ MAX_COPY_PREFETCH cachelines ahead. For small transfers
we prefetch 1 cacheline ahead. */
- cmpwi r0,MAX_L1_COPY_PREFETCH
+ cmpwi r0,MAX_COPY_PREFETCH
li r7,1
li r3,4
ble 111f
- li r7,MAX_L1_COPY_PREFETCH
+ li r7,MAX_COPY_PREFETCH
111: mtctr r7
112: dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
bdnz 112b
-#else /* MAX_L1_COPY_PREFETCH == 1 */
+#else /* MAX_COPY_PREFETCH == 1 */
li r3,CACHELINE_BYTES + 4
dcbt r11,r4
-#endif /* MAX_L1_COPY_PREFETCH */
+#endif /* MAX_COPY_PREFETCH */
#endif /* CONFIG_8xx */
mtctr r0
@@ -606,8 +599,7 @@ __copy_tofrom_user:
.long 114b,120b
.text
- .globl __clear_user
-__clear_user:
+_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
li r5,0
@@ -644,8 +636,7 @@ __clear_user:
.long 8b,99b
.text
- .globl __strncpy_from_user
-__strncpy_from_user:
+_GLOBAL(__strncpy_from_user)
addi r6,r3,-1
addi r4,r4,-1
cmpwi 0,r5,0
@@ -668,8 +659,7 @@ __strncpy_from_user:
.text
/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
- .globl __strnlen_user
-__strnlen_user:
+_GLOBAL(__strnlen_user)
addi r7,r3,-1
subf r6,r7,r5 /* top+1 - str */
cmplw 0,r4,r6