summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 19:08:33 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 19:08:33 -0800
commit2d80cb2a5e022225b9512ccc98f7979cc4b92ce8 (patch)
treea7f247ba424b5366ce443b3d9cb5b4c7f6538e3f /include
parent75b566af5cc6f64f9ab5b66608ff8ce18098a2b4 (diff)
v2.4.5.8 -> v2.4.5.9
- make sure "sync()" doesn't effectively lock up the machine by overloading all the IO resources - fix up some network memory allocations that don't wan tto wait on IO. - merge with Alan (including MIPS update) - Jeff Garzik: network driver updates. - Al Viro: System V FS update (write capability, page cache, mondo cleanups) - Kai Germaschewski: ISDN cleanups, TURBOPAM driver by Stelian Pop - Ben Fennema: UDF update (time handling, i_blocks fix) - Neil Brown: md error handling improvements, knfsd file handle compatibility - Paul Mackerras: PPC update - Jakub Jelinek: fix up kernel linker scripts to accept .rodata better - Patrick Mochel: fix PME handling in pci_enable_wake() - Chris Mason: reiserfs PF_MEMALLOC handling
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/uaccess.h6
-rw-r--r--include/asm-mips/addrspace.h18
-rw-r--r--include/asm-mips/asmmacro.h200
-rw-r--r--include/asm-mips/atomic.h217
-rw-r--r--include/asm-mips/bcache.h44
-rw-r--r--include/asm-mips/bitops.h566
-rw-r--r--include/asm-mips/bootinfo.h152
-rw-r--r--include/asm-mips/bugs.h7
-rw-r--r--include/asm-mips/cache.h22
-rw-r--r--include/asm-mips/checksum.h223
-rw-r--r--include/asm-mips/cpu.h154
-rw-r--r--include/asm-mips/current.h19
-rw-r--r--include/asm-mips/delay.h15
-rw-r--r--include/asm-mips/div64.h109
-rw-r--r--include/asm-mips/elf.h15
-rw-r--r--include/asm-mips/errno.h4
-rw-r--r--include/asm-mips/fcntl.h26
-rw-r--r--include/asm-mips/fpu_emulator.h44
-rw-r--r--include/asm-mips/hardirq.h56
-rw-r--r--include/asm-mips/hdreg.h15
-rw-r--r--include/asm-mips/hw_irq.h19
-rw-r--r--include/asm-mips/ide.h102
-rw-r--r--include/asm-mips/inst.h77
-rw-r--r--include/asm-mips/io.h110
-rw-r--r--include/asm-mips/ioctl.h9
-rw-r--r--include/asm-mips/ioctls.h1
-rw-r--r--include/asm-mips/irq.h24
-rw-r--r--include/asm-mips/jazzdma.h40
-rw-r--r--include/asm-mips/keyboard.h16
-rw-r--r--include/asm-mips/mc146818rtc.h9
-rw-r--r--include/asm-mips/mips32_cache.h288
-rw-r--r--include/asm-mips/mipsregs.h219
-rw-r--r--include/asm-mips/mmu_context.h22
-rw-r--r--include/asm-mips/orion.h14
-rw-r--r--include/asm-mips/param.h4
-rw-r--r--include/asm-mips/pci.h29
-rw-r--r--include/asm-mips/pgalloc.h124
-rw-r--r--include/asm-mips/pgtable.h115
-rw-r--r--include/asm-mips/pmc/ev64120.h59
-rw-r--r--include/asm-mips/pmc/ev64120int.h32
-rw-r--r--include/asm-mips/processor.h9
-rw-r--r--include/asm-mips/ptrace.h24
-rw-r--r--include/asm-mips/resource.h9
-rw-r--r--include/asm-mips/riscos-syscall.h979
-rw-r--r--include/asm-mips/semaphore-helper.h60
-rw-r--r--include/asm-mips/semaphore.h230
-rw-r--r--include/asm-mips/serial.h130
-rw-r--r--include/asm-mips/sgi/sgint23.h22
-rw-r--r--include/asm-mips/sgialib.h27
-rw-r--r--include/asm-mips/shmbuf.h2
-rw-r--r--include/asm-mips/smp.h31
-rw-r--r--include/asm-mips/sni.h30
-rw-r--r--include/asm-mips/socket.h2
-rw-r--r--include/asm-mips/spinlock.h33
-rw-r--r--include/asm-mips/stackframe.h85
-rw-r--r--include/asm-mips/stat.h26
-rw-r--r--include/asm-mips/system.h213
-rw-r--r--include/asm-mips/termios.h5
-rw-r--r--include/asm-mips/time.h66
-rw-r--r--include/asm-mips/tlb.h1
-rw-r--r--include/asm-mips/tx3912.h576
-rw-r--r--include/asm-mips/unaligned.h186
-rw-r--r--include/asm-mips/unistd.h1068
-rw-r--r--include/asm-mips/watch.h7
-rw-r--r--include/asm-mips/wbflush.h2
-rw-r--r--include/asm-ppc/bootinfo.h23
-rw-r--r--include/asm-ppc/highmem.h6
-rw-r--r--include/asm-ppc/machdep.h23
-rw-r--r--include/asm-ppc/mmu.h46
-rw-r--r--include/asm-ppc/mmu_context.h144
-rw-r--r--include/asm-ppc/pgtable.h245
-rw-r--r--include/asm-ppc/processor.h6
-rw-r--r--include/asm-ppc/prom.h27
-rw-r--r--include/asm-ppc/time.h4
-rw-r--r--include/asm-sh/hitachi_se.h2
-rw-r--r--include/asm-sparc/hardirq.h2
-rw-r--r--include/asm-sparc/vaddrs.h2
-rw-r--r--include/asm-sparc64/starfire.h2
-rw-r--r--include/linux/agp_backend.h7
-rw-r--r--include/linux/atmdev.h7
-rw-r--r--include/linux/isdn.h19
-rw-r--r--include/linux/isdn/tpam.h56
-rw-r--r--include/linux/isdnif.h3
-rw-r--r--include/linux/nfsd/nfsd.h8
-rw-r--r--include/linux/nfsd/nfsfh.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h55
-rw-r--r--include/linux/sysv_fs.h229
-rw-r--r--include/linux/sysv_fs_i.h2
-rw-r--r--include/linux/sysv_fs_sb.h57
-rw-r--r--include/linux/udf_fs.h4
-rw-r--r--include/linux/udf_fs_i.h3
-rw-r--r--include/linux/udf_udf.h4
94 files changed, 5198 insertions, 2842 deletions
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index c8d826232e19..9b14bcf6c95e 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -55,7 +55,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* Wow, the "main" arch needs arch dependent functions too.. :) */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index ec4823866cb9..74c43e03f00e 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -129,12 +129,6 @@ extern void __put_user_4(void);
extern void __put_user_bad(void);
-#define __put_user_x(size,ret,x,ptr) \
- __asm__ __volatile__("call __put_user_" #size \
- :"=a" (ret) \
- :"0" (ptr),"d" (x) \
- :"cx")
-
#define put_user(x,ptr) \
__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 39259486e6fa..0d1bf3246db0 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -4,6 +4,7 @@
* for more details.
*
* Copyright (C) 1996 by Ralf Baechle
+ * Copyright (C) 2000 by Maciej W. Rozycki
*
* Defitions for the address spaces of the MIPS CPUs.
*/
@@ -19,23 +20,40 @@
#define KSEG2 0xc0000000
#define KSEG3 0xe0000000
+#define K0BASE KSEG0
+
/*
* Returns the kernel segment base of a given address
*/
+#ifndef __ASSEMBLY__
#define KSEGX(a) (((unsigned long)(a)) & 0xe0000000)
+#else
+#define KSEGX(a) ((a) & 0xe0000000)
+#endif
/*
* Returns the physical address of a KSEG0/KSEG1 address
*/
+#ifndef __ASSEMBLY__
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
+#else
+#define PHYSADDR(a) ((a) & 0x1fffffff)
+#endif
/*
* Map an address to a certain kernel segment
*/
+#ifndef __ASSEMBLY__
#define KSEG0ADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG0))
#define KSEG1ADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG1))
#define KSEG2ADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG2))
#define KSEG3ADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG3))
+#else
+#define KSEG0ADDR(a) (((a) & 0x1fffffff) | KSEG0)
+#define KSEG1ADDR(a) (((a) & 0x1fffffff) | KSEG1)
+#define KSEG2ADDR(a) (((a) & 0x1fffffff) | KSEG2)
+#define KSEG3ADDR(a) (((a) & 0x1fffffff) | KSEG3)
+#endif
/*
* Memory segments (64bit kernel mode addresses)
diff --git a/include/asm-mips/asmmacro.h b/include/asm-mips/asmmacro.h
index fb3692dd705c..7becc9bd1ae2 100644
--- a/include/asm-mips/asmmacro.h
+++ b/include/asm-mips/asmmacro.h
@@ -3,124 +3,122 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1998 Ralf Baechle
- *
- * $Id: asmmacro.h,v 1.3 1998/03/27 04:47:58 ralf Exp $
*/
-#ifndef __MIPS_ASMMACRO_H
-#define __MIPS_ASMMACRO_H
+#ifndef _ASM_ASMMACRO_H
+#define _ASM_ASMMACRO_H
#include <asm/offset.h>
#define FPU_SAVE_DOUBLE(thread, tmp) \
cfc1 tmp, fcr31; \
sdc1 $f0, (THREAD_FPU + 0x000)(thread); \
- sdc1 $f2, (THREAD_FPU + 0x008)(thread); \
- sdc1 $f4, (THREAD_FPU + 0x010)(thread); \
- sdc1 $f6, (THREAD_FPU + 0x018)(thread); \
- sdc1 $f8, (THREAD_FPU + 0x020)(thread); \
- sdc1 $f10, (THREAD_FPU + 0x028)(thread); \
- sdc1 $f12, (THREAD_FPU + 0x030)(thread); \
- sdc1 $f14, (THREAD_FPU + 0x038)(thread); \
- sdc1 $f16, (THREAD_FPU + 0x040)(thread); \
- sdc1 $f18, (THREAD_FPU + 0x048)(thread); \
- sdc1 $f20, (THREAD_FPU + 0x050)(thread); \
- sdc1 $f22, (THREAD_FPU + 0x058)(thread); \
- sdc1 $f24, (THREAD_FPU + 0x060)(thread); \
- sdc1 $f26, (THREAD_FPU + 0x068)(thread); \
- sdc1 $f28, (THREAD_FPU + 0x070)(thread); \
- sdc1 $f30, (THREAD_FPU + 0x078)(thread); \
- sw tmp, (THREAD_FPU + 0x080)(thread)
+ sdc1 $f2, (THREAD_FPU + 0x010)(thread); \
+ sdc1 $f4, (THREAD_FPU + 0x020)(thread); \
+ sdc1 $f6, (THREAD_FPU + 0x030)(thread); \
+ sdc1 $f8, (THREAD_FPU + 0x040)(thread); \
+ sdc1 $f10, (THREAD_FPU + 0x050)(thread); \
+ sdc1 $f12, (THREAD_FPU + 0x060)(thread); \
+ sdc1 $f14, (THREAD_FPU + 0x070)(thread); \
+ sdc1 $f16, (THREAD_FPU + 0x080)(thread); \
+ sdc1 $f18, (THREAD_FPU + 0x090)(thread); \
+ sdc1 $f20, (THREAD_FPU + 0x0a0)(thread); \
+ sdc1 $f22, (THREAD_FPU + 0x0b0)(thread); \
+ sdc1 $f24, (THREAD_FPU + 0x0c0)(thread); \
+ sdc1 $f26, (THREAD_FPU + 0x0d0)(thread); \
+ sdc1 $f28, (THREAD_FPU + 0x0e0)(thread); \
+ sdc1 $f30, (THREAD_FPU + 0x0f0)(thread); \
+ sw tmp, (THREAD_FPU + 0x100)(thread)
#define FPU_SAVE_SINGLE(thread,tmp) \
cfc1 tmp, fcr31; \
swc1 $f0, (THREAD_FPU + 0x000)(thread); \
- swc1 $f1, (THREAD_FPU + 0x004)(thread); \
- swc1 $f2, (THREAD_FPU + 0x008)(thread); \
- swc1 $f3, (THREAD_FPU + 0x00c)(thread); \
- swc1 $f4, (THREAD_FPU + 0x010)(thread); \
- swc1 $f5, (THREAD_FPU + 0x014)(thread); \
- swc1 $f6, (THREAD_FPU + 0x018)(thread); \
- swc1 $f7, (THREAD_FPU + 0x01c)(thread); \
- swc1 $f8, (THREAD_FPU + 0x020)(thread); \
- swc1 $f9, (THREAD_FPU + 0x024)(thread); \
- swc1 $f10, (THREAD_FPU + 0x028)(thread); \
- swc1 $f11, (THREAD_FPU + 0x02c)(thread); \
- swc1 $f12, (THREAD_FPU + 0x030)(thread); \
- swc1 $f13, (THREAD_FPU + 0x034)(thread); \
- swc1 $f14, (THREAD_FPU + 0x038)(thread); \
- swc1 $f15, (THREAD_FPU + 0x03c)(thread); \
- swc1 $f16, (THREAD_FPU + 0x040)(thread); \
- swc1 $f17, (THREAD_FPU + 0x044)(thread); \
- swc1 $f18, (THREAD_FPU + 0x048)(thread); \
- swc1 $f19, (THREAD_FPU + 0x04c)(thread); \
- swc1 $f20, (THREAD_FPU + 0x050)(thread); \
- swc1 $f21, (THREAD_FPU + 0x054)(thread); \
- swc1 $f22, (THREAD_FPU + 0x058)(thread); \
- swc1 $f23, (THREAD_FPU + 0x05c)(thread); \
- swc1 $f24, (THREAD_FPU + 0x060)(thread); \
- swc1 $f25, (THREAD_FPU + 0x064)(thread); \
- swc1 $f26, (THREAD_FPU + 0x068)(thread); \
- swc1 $f27, (THREAD_FPU + 0x06c)(thread); \
- swc1 $f28, (THREAD_FPU + 0x070)(thread); \
- swc1 $f29, (THREAD_FPU + 0x074)(thread); \
- swc1 $f30, (THREAD_FPU + 0x078)(thread); \
- swc1 $f31, (THREAD_FPU + 0x07c)(thread); \
- sw tmp, (THREAD_FPU + 0x080)(thread)
+ swc1 $f1, (THREAD_FPU + 0x008)(thread); \
+ swc1 $f2, (THREAD_FPU + 0x010)(thread); \
+ swc1 $f3, (THREAD_FPU + 0x018)(thread); \
+ swc1 $f4, (THREAD_FPU + 0x020)(thread); \
+ swc1 $f5, (THREAD_FPU + 0x028)(thread); \
+ swc1 $f6, (THREAD_FPU + 0x030)(thread); \
+ swc1 $f7, (THREAD_FPU + 0x038)(thread); \
+ swc1 $f8, (THREAD_FPU + 0x040)(thread); \
+ swc1 $f9, (THREAD_FPU + 0x048)(thread); \
+ swc1 $f10, (THREAD_FPU + 0x050)(thread); \
+ swc1 $f11, (THREAD_FPU + 0x058)(thread); \
+ swc1 $f12, (THREAD_FPU + 0x060)(thread); \
+ swc1 $f13, (THREAD_FPU + 0x068)(thread); \
+ swc1 $f14, (THREAD_FPU + 0x070)(thread); \
+ swc1 $f15, (THREAD_FPU + 0x078)(thread); \
+ swc1 $f16, (THREAD_FPU + 0x080)(thread); \
+ swc1 $f17, (THREAD_FPU + 0x088)(thread); \
+ swc1 $f18, (THREAD_FPU + 0x090)(thread); \
+ swc1 $f19, (THREAD_FPU + 0x098)(thread); \
+ swc1 $f20, (THREAD_FPU + 0x0a0)(thread); \
+ swc1 $f21, (THREAD_FPU + 0x0a8)(thread); \
+ swc1 $f22, (THREAD_FPU + 0x0b0)(thread); \
+ swc1 $f23, (THREAD_FPU + 0x0b8)(thread); \
+ swc1 $f24, (THREAD_FPU + 0x0c0)(thread); \
+ swc1 $f25, (THREAD_FPU + 0x0c8)(thread); \
+ swc1 $f26, (THREAD_FPU + 0x0d0)(thread); \
+ swc1 $f27, (THREAD_FPU + 0x0d8)(thread); \
+ swc1 $f28, (THREAD_FPU + 0x0e0)(thread); \
+ swc1 $f29, (THREAD_FPU + 0x0e8)(thread); \
+ swc1 $f30, (THREAD_FPU + 0x0f0)(thread); \
+ swc1 $f31, (THREAD_FPU + 0x0f8)(thread); \
+ sw tmp, (THREAD_FPU + 0x100)(thread)
#define FPU_RESTORE_DOUBLE(thread, tmp) \
- lw tmp, (THREAD_FPU + 0x080)(thread); \
+ lw tmp, (THREAD_FPU + 0x100)(thread); \
ldc1 $f0, (THREAD_FPU + 0x000)(thread); \
- ldc1 $f2, (THREAD_FPU + 0x008)(thread); \
- ldc1 $f4, (THREAD_FPU + 0x010)(thread); \
- ldc1 $f6, (THREAD_FPU + 0x018)(thread); \
- ldc1 $f8, (THREAD_FPU + 0x020)(thread); \
- ldc1 $f10, (THREAD_FPU + 0x028)(thread); \
- ldc1 $f12, (THREAD_FPU + 0x030)(thread); \
- ldc1 $f14, (THREAD_FPU + 0x038)(thread); \
- ldc1 $f16, (THREAD_FPU + 0x040)(thread); \
- ldc1 $f18, (THREAD_FPU + 0x048)(thread); \
- ldc1 $f20, (THREAD_FPU + 0x050)(thread); \
- ldc1 $f22, (THREAD_FPU + 0x058)(thread); \
- ldc1 $f24, (THREAD_FPU + 0x060)(thread); \
- ldc1 $f26, (THREAD_FPU + 0x068)(thread); \
- ldc1 $f28, (THREAD_FPU + 0x070)(thread); \
- ldc1 $f30, (THREAD_FPU + 0x078)(thread); \
+ ldc1 $f2, (THREAD_FPU + 0x010)(thread); \
+ ldc1 $f4, (THREAD_FPU + 0x020)(thread); \
+ ldc1 $f6, (THREAD_FPU + 0x030)(thread); \
+ ldc1 $f8, (THREAD_FPU + 0x040)(thread); \
+ ldc1 $f10, (THREAD_FPU + 0x050)(thread); \
+ ldc1 $f12, (THREAD_FPU + 0x060)(thread); \
+ ldc1 $f14, (THREAD_FPU + 0x070)(thread); \
+ ldc1 $f16, (THREAD_FPU + 0x080)(thread); \
+ ldc1 $f18, (THREAD_FPU + 0x090)(thread); \
+ ldc1 $f20, (THREAD_FPU + 0x0a0)(thread); \
+ ldc1 $f22, (THREAD_FPU + 0x0b0)(thread); \
+ ldc1 $f24, (THREAD_FPU + 0x0c0)(thread); \
+ ldc1 $f26, (THREAD_FPU + 0x0d0)(thread); \
+ ldc1 $f28, (THREAD_FPU + 0x0e0)(thread); \
+ ldc1 $f30, (THREAD_FPU + 0x0f0)(thread); \
ctc1 tmp, fcr31
#define FPU_RESTORE_SINGLE(thread,tmp) \
- lw tmp, (THREAD_FPU + 0x080)(thread); \
+ lw tmp, (THREAD_FPU + 0x100)(thread); \
lwc1 $f0, (THREAD_FPU + 0x000)(thread); \
- lwc1 $f1, (THREAD_FPU + 0x004)(thread); \
- lwc1 $f2, (THREAD_FPU + 0x008)(thread); \
- lwc1 $f3, (THREAD_FPU + 0x00c)(thread); \
- lwc1 $f4, (THREAD_FPU + 0x010)(thread); \
- lwc1 $f5, (THREAD_FPU + 0x014)(thread); \
- lwc1 $f6, (THREAD_FPU + 0x018)(thread); \
- lwc1 $f7, (THREAD_FPU + 0x01c)(thread); \
- lwc1 $f8, (THREAD_FPU + 0x020)(thread); \
- lwc1 $f9, (THREAD_FPU + 0x024)(thread); \
- lwc1 $f10, (THREAD_FPU + 0x028)(thread); \
- lwc1 $f11, (THREAD_FPU + 0x02c)(thread); \
- lwc1 $f12, (THREAD_FPU + 0x030)(thread); \
- lwc1 $f13, (THREAD_FPU + 0x034)(thread); \
- lwc1 $f14, (THREAD_FPU + 0x038)(thread); \
- lwc1 $f15, (THREAD_FPU + 0x03c)(thread); \
- lwc1 $f16, (THREAD_FPU + 0x040)(thread); \
- lwc1 $f17, (THREAD_FPU + 0x044)(thread); \
- lwc1 $f18, (THREAD_FPU + 0x048)(thread); \
- lwc1 $f19, (THREAD_FPU + 0x04c)(thread); \
- lwc1 $f20, (THREAD_FPU + 0x050)(thread); \
- lwc1 $f21, (THREAD_FPU + 0x054)(thread); \
- lwc1 $f22, (THREAD_FPU + 0x058)(thread); \
- lwc1 $f23, (THREAD_FPU + 0x05c)(thread); \
- lwc1 $f24, (THREAD_FPU + 0x060)(thread); \
- lwc1 $f25, (THREAD_FPU + 0x064)(thread); \
- lwc1 $f26, (THREAD_FPU + 0x068)(thread); \
- lwc1 $f27, (THREAD_FPU + 0x06c)(thread); \
- lwc1 $f28, (THREAD_FPU + 0x070)(thread); \
- lwc1 $f29, (THREAD_FPU + 0x074)(thread); \
- lwc1 $f30, (THREAD_FPU + 0x078)(thread); \
- lwc1 $f31, (THREAD_FPU + 0x07c)(thread); \
+ lwc1 $f1, (THREAD_FPU + 0x008)(thread); \
+ lwc1 $f2, (THREAD_FPU + 0x010)(thread); \
+ lwc1 $f3, (THREAD_FPU + 0x018)(thread); \
+ lwc1 $f4, (THREAD_FPU + 0x020)(thread); \
+ lwc1 $f5, (THREAD_FPU + 0x028)(thread); \
+ lwc1 $f6, (THREAD_FPU + 0x030)(thread); \
+ lwc1 $f7, (THREAD_FPU + 0x038)(thread); \
+ lwc1 $f8, (THREAD_FPU + 0x040)(thread); \
+ lwc1 $f9, (THREAD_FPU + 0x048)(thread); \
+ lwc1 $f10, (THREAD_FPU + 0x050)(thread); \
+ lwc1 $f11, (THREAD_FPU + 0x058)(thread); \
+ lwc1 $f12, (THREAD_FPU + 0x060)(thread); \
+ lwc1 $f13, (THREAD_FPU + 0x068)(thread); \
+ lwc1 $f14, (THREAD_FPU + 0x070)(thread); \
+ lwc1 $f15, (THREAD_FPU + 0x078)(thread); \
+ lwc1 $f16, (THREAD_FPU + 0x080)(thread); \
+ lwc1 $f17, (THREAD_FPU + 0x088)(thread); \
+ lwc1 $f18, (THREAD_FPU + 0x090)(thread); \
+ lwc1 $f19, (THREAD_FPU + 0x098)(thread); \
+ lwc1 $f20, (THREAD_FPU + 0x0a0)(thread); \
+ lwc1 $f21, (THREAD_FPU + 0x0a8)(thread); \
+ lwc1 $f22, (THREAD_FPU + 0x0b0)(thread); \
+ lwc1 $f23, (THREAD_FPU + 0x0b8)(thread); \
+ lwc1 $f24, (THREAD_FPU + 0x0c0)(thread); \
+ lwc1 $f25, (THREAD_FPU + 0x0c8)(thread); \
+ lwc1 $f26, (THREAD_FPU + 0x0d0)(thread); \
+ lwc1 $f27, (THREAD_FPU + 0x0d8)(thread); \
+ lwc1 $f28, (THREAD_FPU + 0x0e0)(thread); \
+ lwc1 $f29, (THREAD_FPU + 0x0e8)(thread); \
+ lwc1 $f30, (THREAD_FPU + 0x0f0)(thread); \
+ lwc1 $f31, (THREAD_FPU + 0x0f8)(thread); \
ctc1 tmp, fcr31
#define CPU_SAVE_NONSCRATCH(thread) \
@@ -148,4 +146,4 @@
lw fp, THREAD_REG30(thread); \
lw ra, THREAD_REG31(thread)
-#endif /* !(__MIPS_ASMMACRO_H) */
+#endif /* _ASM_ASMMACRO_H */
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index fbd2daee70fc..63667f0c81f1 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -9,25 +9,35 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
- *
- * $Id: atomic.h,v 1.6 1999/07/26 19:42:42 harald Exp $
+ * Copyright (C) 1996, 1997, 2000 by Ralf Baechle
*/
#ifndef __ASM_ATOMIC_H
#define __ASM_ATOMIC_H
#include <linux/config.h>
-#ifdef CONFIG_SMP
typedef struct { volatile int counter; } atomic_t;
-#else
-typedef struct { int counter; } atomic_t;
-#endif
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
+/*
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_read(v) ((v)->counter)
+
+/*
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_set(v,i) ((v)->counter = (i))
#if !defined(CONFIG_CPU_HAS_LLSC)
@@ -37,8 +47,15 @@ typedef struct { int counter; } atomic_t;
/*
* The MIPS I implementation is only atomic with respect to
* interrupts. R3000 based multiprocessor machines are rare anyway ...
+ *
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
*/
-extern __inline__ void atomic_add(int i, volatile atomic_t * v)
+extern __inline__ void atomic_add(int i, atomic_t * v)
{
int flags;
@@ -48,7 +65,15 @@ extern __inline__ void atomic_add(int i, volatile atomic_t * v)
restore_flags(flags);
}
-extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
+/*
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_sub(int i, atomic_t * v)
{
int flags;
@@ -86,21 +111,6 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
return temp;
}
-extern __inline__ void atomic_clear_mask(unsigned long mask, unsigned long * v)
-{
- unsigned long temp;
- int flags;
-
- save_flags(flags);
- cli();
- temp = *v;
- temp &= ~mask;
- *v = temp;
- restore_flags(flags);
-
- return;
-}
-
#else
/*
@@ -109,40 +119,45 @@ extern __inline__ void atomic_clear_mask(unsigned long mask, unsigned long * v)
*/
/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
*/
-#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
-
-extern __inline__ void atomic_add(int i, volatile atomic_t * v)
+extern __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
- "addu\t%0,%2\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ "1: ll %0, %1 # atomic_add\n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
-extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
+/*
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_sub(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
- "subu\t%0,%2\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ "1: ll %0, %1 # atomic_sub\n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
/*
@@ -153,18 +168,17 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
- "1:\tll\t%1,%2\n\t"
- "addu\t%0,%1,%3\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
- "addu\t%0,%1,%3\n\t"
- ".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ ".set push # atomic_add_return\n"
+ ".set noreorder \n"
+ "1: ll %1, %2 \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ ".set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
return result;
}
@@ -174,18 +188,17 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
- "1:\tll\t%1,%2\n\t"
- "subu\t%0,%1,%3\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
- "subu\t%0,%1,%3\n\t"
- ".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ ".set push \n"
+ ".set noreorder # atomic_sub_return\n"
+ "1: ll %1, %2 \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ ".set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
return result;
}
@@ -194,11 +207,71 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
+/*
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0)
+
+/*
+ * atomic_dec_and_test - decrement by 1 and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+/*
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_inc(v) atomic_add(1,(v))
+
+/*
+ * atomic_dec - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_dec(v) atomic_sub(1,(v))
+
+/*
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ *
+ * Currently not implemented for MIPS.
+ */
+
#endif /* defined(__KERNEL__) */
-#endif /* __ASM_MIPS_ATOMIC_H */
+#endif /* __ASM_ATOMIC_H */
diff --git a/include/asm-mips/bcache.h b/include/asm-mips/bcache.h
index e3507bb042e9..e7c8071b003e 100644
--- a/include/asm-mips/bcache.h
+++ b/include/asm-mips/bcache.h
@@ -1,14 +1,19 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1997, 1999 by Ralf Baechle
+ * Copyright (c) 1997, 1999, 2000 by Ralf Baechle
+ * Copyright (c) 2000 by Silicon Graphics, Inc.
*/
#ifndef _ASM_BCACHE_H
#define _ASM_BCACHE_H
+#include <linux/config.h>
+
+/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
+ chipset implemented caches. On machines with other CPUs the CPU does the
+ cache thing itself. */
struct bcache_ops {
void (*bc_enable)(void);
void (*bc_disable)(void);
@@ -19,6 +24,39 @@ struct bcache_ops {
extern void indy_sc_init(void);
extern void sni_pcimt_sc_init(void);
+#ifdef CONFIG_BOARD_SCACHE
+
extern struct bcache_ops *bcops;
+extern inline void bc_enable(void)
+{
+ bcops->bc_enable();
+}
+
+extern inline void bc_disable(void)
+{
+ bcops->bc_disable();
+}
+
+extern inline void bc_wback_inv(unsigned long page, unsigned long size)
+{
+ bcops->bc_wback_inv(page, size);
+}
+
+extern inline void bc_inv(unsigned long page, unsigned long size)
+{
+ bcops->bc_inv(page, size);
+}
+
+#else /* !defined(CONFIG_BOARD_SCACHE) */
+
+/* Not R4000 / R4400 / R4600 / R5000. */
+
+#define bc_enable() do { } while (0)
+#define bc_disable() do { } while (0)
+#define bc_wback_inv(page, size) do { } while (0)
+#define bc_inv(page, size) do { } while (0)
+
+#endif /* !defined(CONFIG_BOARD_SCACHE) */
+
#endif /* _ASM_BCACHE_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 326294ab26ac..0c4e2e8df7a1 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -1,10 +1,10 @@
-/* $Id: bitops.h,v 1.7 1999/08/19 22:56:33 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1994 - 1997, 1999 Ralf Baechle (ralf@gnu.org)
+ * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (c) 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
@@ -19,6 +19,12 @@
#include <linux/config.h>
/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/*
* Only disable interrupt for kernel mode stuff to keep usermode stuff
* that dares to use kernel include files alive.
*/
@@ -35,25 +41,6 @@
#define __bi_restore_flags(x)
#endif /* __KERNEL__ */
-/*
- * Note that the bit operations are defined on arrays of 32 bit sized
- * elements. With respect to a future 64 bit implementation it is
- * wrong to use long *. Use u32 * or int *.
- */
-extern __inline__ void set_bit(int nr, void *addr);
-extern __inline__ void clear_bit(int nr, void *addr);
-extern __inline__ void change_bit(int nr, void *addr);
-extern __inline__ int test_and_set_bit(int nr, void *addr);
-extern __inline__ int test_and_clear_bit(int nr, void *addr);
-extern __inline__ int test_and_change_bit(int nr, void *addr);
-
-extern __inline__ int test_bit(int nr, const void *addr);
-#ifndef __MIPSEB__
-extern __inline__ int find_first_zero_bit (void *addr, unsigned size);
-#endif
-extern __inline__ int find_next_zero_bit (void * addr, int size, int offset);
-extern __inline__ unsigned long ffz(unsigned long word);
-
#if defined(CONFIG_CPU_HAS_LLSC)
#include <asm/mipsregs.h>
@@ -64,92 +51,281 @@ extern __inline__ unsigned long ffz(unsigned long word);
*/
/*
- * The following functions will only work for the R4000!
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
*/
+extern __inline__ void
+set_bit(int nr, volatile void *addr)
+{
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1:\tll\t%0, %1\t\t# set_bit\n\t"
+ "or\t%0, %2\n\t"
+ "sc\t%0, %1\n\t"
+ "beqz\t%0, 1b"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & 0x1f)), "m" (*m));
+}
-extern __inline__ void set_bit(int nr, void *addr)
+/*
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
{
- int mask, mw;
+ unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
- addr += ((nr >> 3) & ~3);
- mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- } while (!store_conditional(addr, mw|mask));
+ *m |= 1UL << (nr & 31);
}
-extern __inline__ void clear_bit(int nr, void *addr)
+/*
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+extern __inline__ void
+clear_bit(int nr, volatile void *addr)
{
- int mask, mw;
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1:\tll\t%0, %1\t\t# clear_bit\n\t"
+ "and\t%0, %2\n\t"
+ "sc\t%0, %1\n\t"
+ "beqz\t%0, 1b\n\t"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
+}
- addr += ((nr >> 3) & ~3);
- mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- }
- while (!store_conditional(addr, mw & ~mask));
+/*
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+extern __inline__ void
+change_bit(int nr, volatile void *addr)
+{
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1:\tll\t%0, %1\t\t# change_bit\n\t"
+ "xor\t%0, %2\n\t"
+ "sc\t%0, %1\n\t"
+ "beqz\t%0, 1b"
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & 0x1f)), "m" (*m));
}
-extern __inline__ void change_bit(int nr, void *addr)
+/*
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+extern __inline__ void __change_bit(int nr, volatile void * addr)
{
- int mask, mw;
+ unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
- addr += ((nr >> 3) & ~3);
- mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- } while (!store_conditional(addr, mw ^ mask));
+ *m ^= 1UL << (nr & 31);
}
-extern __inline__ int test_and_set_bit(int nr, void *addr)
+/*
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int
+test_and_set_bit(int nr, volatile void *addr)
+{
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\t\t# test_and_set_bit\n"
+ "1:\tll\t%0, %1\n\t"
+ "or\t%2, %0, %3\n\t"
+ "sc\t%2, %1\n\t"
+ "beqz\t%2, 1b\n\t"
+ " and\t%2, %0, %3\n\t"
+ ".set\treorder"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
+
+ return res != 0;
+}
+
+/*
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
{
- int mask, retval, mw;
+ int mask, retval;
+ volatile int *a = addr;
- addr += ((nr >> 3) & ~3);
+ a += nr >> 5;
mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- retval = (mask & mw) != 0;
- } while (!store_conditional(addr, mw|mask));
+ retval = (mask & *a) != 0;
+ *a |= mask;
return retval;
}
-extern __inline__ int test_and_clear_bit(int nr, void *addr)
+/*
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int
+test_and_clear_bit(int nr, volatile void *addr)
{
- int mask, retval, mw;
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\t\t# test_and_clear_bit\n"
+ "1:\tll\t%0, %1\n\t"
+ "or\t%2, %0, %3\n\t"
+ "xor\t%2, %3\n\t"
+ "sc\t%2, %1\n\t"
+ "beqz\t%2, 1b\n\t"
+ " and\t%2, %0, %3\n\t"
+ ".set\treorder"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
+
+ return res != 0;
+}
+
+/*
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
- addr += ((nr >> 3) & ~3);
+ a += nr >> 5;
mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- retval = (mask & mw) != 0;
- }
- while (!store_conditional(addr, mw & ~mask));
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
return retval;
}
-extern __inline__ int test_and_change_bit(int nr, void *addr)
+/*
+ * test_and_change_bit - Change a bit and return its new value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int
+test_and_change_bit(int nr, volatile void *addr)
+{
+ unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
+ unsigned long temp, res;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\t\t# test_and_change_bit\n"
+ "1:\tll\t%0, %1\n\t"
+ "xor\t%2, %0, %3\n\t"
+ "sc\t%2, %1\n\t"
+ "beqz\t%2, 1b\n\t"
+ " and\t%2, %0, %3\n\t"
+ ".set\treorder"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
+
+ return res != 0;
+}
+
+/*
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
{
- int mask, retval, mw;
+ int mask, retval;
+ volatile int *a = addr;
- addr += ((nr >> 3) & ~3);
+ a += nr >> 5;
mask = 1 << (nr & 0x1f);
- do {
- mw = load_linked(addr);
- retval = (mask & mw) != 0;
- } while (!store_conditional(addr, mw ^ mask));
+ retval = (mask & *a) != 0;
+ *a ^= mask;
return retval;
}
#else /* MIPS I */
-extern __inline__ void set_bit(int nr, void * addr)
+/*
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+extern __inline__ void set_bit(int nr, volatile void * addr)
{
int mask;
- int *a = addr;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -159,10 +335,39 @@ extern __inline__ void set_bit(int nr, void * addr)
__bi_restore_flags(flags);
}
-extern __inline__ void clear_bit(int nr, void * addr)
+/*
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
{
int mask;
- int *a = addr;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+/*
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+extern __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ int mask;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -172,10 +377,19 @@ extern __inline__ void clear_bit(int nr, void * addr)
__bi_restore_flags(flags);
}
-extern __inline__ void change_bit(int nr, void * addr)
+/*
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+extern __inline__ void change_bit(int nr, volatile void * addr)
{
int mask;
- int *a = addr;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -185,10 +399,34 @@ extern __inline__ void change_bit(int nr, void * addr)
__bi_restore_flags(flags);
}
-extern __inline__ int test_and_set_bit(int nr, void * addr)
+/*
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+extern __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
+
+ *m ^= 1UL << (nr & 31);
+}
+
+/*
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
- int *a = addr;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -201,10 +439,40 @@ extern __inline__ int test_and_set_bit(int nr, void * addr)
return retval;
}
-extern __inline__ int test_and_clear_bit(int nr, void * addr)
+/*
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
+/*
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
- int *a = addr;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -217,10 +485,40 @@ extern __inline__ int test_and_clear_bit(int nr, void * addr)
return retval;
}
-extern __inline__ int test_and_change_bit(int nr, void * addr)
+/*
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
- int *a = addr;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+
+ return retval;
+}
+
+/*
+ * test_and_change_bit - Change a bit and return its new value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
__bi_flags;
a += nr >> 5;
@@ -233,14 +531,41 @@ extern __inline__ int test_and_change_bit(int nr, void * addr)
return retval;
}
+/*
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+
+ return retval;
+}
+
#undef __bi_flags
-#undef __bi_cli()
-#undef __bi_save_flags(x)
-#undef __bi_restore_flags(x)
+#undef __bi_cli
+#undef __bi_save_flags
+#undef __bi_restore_flags
#endif /* MIPS I */
-extern __inline__ int test_bit(int nr, const void *addr)
+/*
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+extern __inline__ int test_bit(int nr, volatile void *addr)
{
return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
}
@@ -249,6 +574,14 @@ extern __inline__ int test_bit(int nr, const void *addr)
/* Little endian versions. */
+/*
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
extern __inline__ int find_first_zero_bit (void *addr, unsigned size)
{
unsigned long dummy;
@@ -285,18 +618,20 @@ extern __inline__ int find_first_zero_bit (void *addr, unsigned size)
".set\tat\n\t"
".set\treorder\n"
"2:"
- : "=r" (res),
- "=r" (dummy),
- "=r" (addr)
- : "0" ((signed int) 0),
- "1" ((unsigned int) 0xffffffff),
- "2" (addr),
- "r" (size)
+ : "=r" (res), "=r" (dummy), "=r" (addr)
+ : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
+ "2" (addr), "r" (size)
: "$1");
return res;
}
+/*
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
@@ -320,11 +655,8 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
".set\tat\n\t"
".set\treorder\n"
"1:"
- : "=r" (set),
- "=r" (dummy)
- : "0" (0),
- "1" (1 << bit),
- "r" (*p)
+ : "=r" (set), "=r" (dummy)
+ : "0" (0), "1" (1 << bit), "r" (*p)
: "$1");
if (set < (32 - bit))
return set + offset;
@@ -341,8 +673,10 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
#endif /* !(__MIPSEB__) */
/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
*/
extern __inline__ unsigned long ffz(unsigned long word)
{
@@ -370,8 +704,11 @@ extern __inline__ unsigned long ffz(unsigned long word)
#ifdef __KERNEL__
-/*
- * ffs: find first bit set. This is defined the same way as
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
@@ -379,8 +716,10 @@ extern __inline__ unsigned long ffz(unsigned long word)
#define ffs(x) generic_ffs(x)
/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
*/
#define hweight32(x) generic_hweight32(x)
@@ -390,14 +729,12 @@ extern __inline__ unsigned long ffz(unsigned long word)
#endif /* __KERNEL__ */
#ifdef __MIPSEB__
-/* For now I steal the Sparc C versions, no need for speed, just need to
- * get it working.
- */
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
+/*
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
*/
-
extern __inline__ int find_next_zero_bit(void *addr, int size, int offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
@@ -438,6 +775,18 @@ found_middle:
* holds on the Sparc as it does for the ALPHA.
*/
+#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+/*
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
+extern int find_first_zero_bit (void *addr, unsigned size);
+#endif
+
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
@@ -451,7 +800,6 @@ extern __inline__ int ext2_set_bit(int nr,void * addr)
int mask, retval, flags;
unsigned char *ADDR = (unsigned char *) addr;
- ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
save_and_cli(flags);
retval = (mask & *ADDR) != 0;
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index f1c30ece0f25..128de8dc893c 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -18,14 +18,23 @@
#define MACH_GROUP_ARC 3 /* Wreckstation Tyne, rPC44, possibly other */
#define MACH_GROUP_SNI_RM 4 /* Siemens Nixdorf RM series */
#define MACH_GROUP_ACN 5
-#define MACH_GROUP_SGI 6 /* Silicon Graphics workstations and servers */
+#define MACH_GROUP_SGI 6 /* Silicon Graphics */
#define MACH_GROUP_COBALT 7 /* Cobalt servers */
-#define MACH_GROUP_NEC_DDB 8 /* NEC DDB */
-#define MACH_GROUP_BAGET 9 /* Baget */
-#define MACH_GROUP_ORION 10 /* CoSine Orion */
-
-#define GROUP_NAMES { "unknown", "Jazz", "Digital", "ARC", \
- "SNI", "ACN", "SGI", "Cobalt", "NEC DDB", "Baget", "Orion" }
+#define MACH_GROUP_NEC_DDB 8 /* NEC DDB */
+#define MACH_GROUP_BAGET 9 /* Baget */
+#define MACH_GROUP_COSINE 10 /* CoSine Orion */
+#define MACH_GROUP_GALILEO 11 /* Galileo Eval Boards */
+#define MACH_GROUP_MOMENCO 12 /* Momentum Boards */
+#define MACH_GROUP_ITE 13 /* ITE Semi Eval Boards */
+#define MACH_GROUP_PHILIPS 14
+#define MACH_GROUP_GLOBESPAN 15 /* Globespan PVR Referrence Board */
+#define MACH_GROUP_SIBYTE 16 /* Sibyte Eval Boards */
+#define MACH_GROUP_TOSHIBA 17 /* Toshiba Reference Systems TSBREF */
+#define MACH_GROUP_ALCHEMY 18 /* Alchemy Semi Eval Boards*/
+
+#define GROUP_NAMES { "unknown", "Jazz", "Digital", "ARC", "SNI", "ACN", \
+ "SGI", "Cobalt", "NEC DDB", "Baget", "Cosine", "Galileo", "Momentum", \
+ "ITE", "Philips", "Globepspan", "SiByte", "Toshiba", "Alchemy" }
/*
* Valid machtype values for group unknown (low order halfword of mips_machtype)
@@ -87,9 +96,11 @@
/*
* Valid machtype for group SGI
*/
-#define MACH_SGI_INDY 0 /* R4?K and R5K Indy workstaions */
+#define MACH_SGI_INDY 0 /* R4?K and R5K Indy workstations */
+#define MACH_SGI_CHALLENGE_S 1 /* The Challenge S server */
+#define MACH_SGI_INDIGO2 2 /* The Indigo2 system */
-#define GROUP_SGI_NAMES { "Indy" }
+#define GROUP_SGI_NAMES { "Indy", "Challenge S", "Indigo2" }
/*
* Valid machtype for group COBALT
@@ -103,8 +114,9 @@
*/
#define MACH_NEC_DDB5074 0 /* NEC DDB Vrc-5074 */
#define MACH_NEC_DDB5476 1 /* NEC DDB Vrc-5476 */
+#define MACH_NEC_DDB5477 2 /* NEC DDB Vrc-5477 */
-#define GROUP_NEC_DDB_NAMES { "Vrc-5074", "Vrc-5476"}
+#define GROUP_NEC_DDB_NAMES { "Vrc-5074", "Vrc-5476", "Vrc-5477"}
/*
* Valid machtype for group BAGET
@@ -115,6 +127,74 @@
#define GROUP_BAGET_NAMES { "BT23-201", "BT23-202" }
/*
+ * Cosine boards.
+ */
+#define MACH_COSINE_ORION 0
+
+#define GROUP_COSINE_NAMES { "Orion" }
+
+/*
+ * Valid machtype for group GALILEO
+ */
+#define MACH_EV96100 0 /* EV96100 */
+#define MACH_EV64120A 1 /* EV64120A */
+
+#define GROUP_GALILEO_NAMES { "EV96100" , "EV64120A" }
+
+/*
+ * Valid machtype for group MOMENCO
+ */
+#define MACH_MOMENCO_OCELOT 0
+
+#define GROUP_MOMENCO_NAMES { "Ocelot" }
+
+
+/*
+ * Valid machtype for group ITE
+ */
+#define MACH_QED_4N_S01B 0 /* ITE8172 based eval board */
+
+#define GROUP_ITE_NAMES { "QED-4N-S01B" } /* the actual board name */
+
+/*
+ * Valid machtype for group Globespan
+ */
+#define MACH_IVR 0 /* IVR eval board */
+
+#define GROUP_GLOBESPAN_NAMES { "IVR" } /* the actual board name */
+
+/*
+ * Valid machtype for group PHILIPS
+ */
+#define MACH_PHILIPS_NINO 0 /* Nino */
+#define MACH_PHILIPS_VELO 1 /* Velo */
+
+#define GROUP_PHILIPS_NAMES { "Nino" , "Velo" }
+
+/*
+ * Valid machtype for group SIBYTE
+ */
+#define MACH_SWARM 0
+
+#define GROUP_SIBYTE_NAMES {"SWARM" }
+
+/*
+ * Valid machtypes for group Toshiba
+ */
+#define MACH_PALLAS 0
+#define MACH_TOPAS 1
+#define MACH_JMR 2
+
+#define GROUP_TOSHIBA_NAMES { "Pallas", "TopasCE", "JMR" }
+
+/*
+ * Valid machtype for group Alchemy
+ */
+#define MACH_PB1000 0 /* Au1000-based eval board */
+
+#define GROUP_ALCHEMY_NAMES { "PB1000" } /* the actual board name */
+
+/*
* Valid cputype values
*/
#define CPU_UNKNOWN 0
@@ -145,17 +225,36 @@
#define CPU_R5000A 25
#define CPU_R4640 26
#define CPU_NEVADA 27 /* RM5230, RM5260 */
-#define CPU_LAST 27
+#define CPU_RM7000 28
+#define CPU_R5432 29
+#define CPU_4KC 30
+#define CPU_5KC 31
+#define CPU_R4310 32
+#define CPU_SB1 33
+#define CPU_TX3912 34
+#define CPU_TX3922 35
+#define CPU_TX3927 36
+#define CPU_AU1000 37
+#define CPU_4KEC 37
+#define CPU_4KSC 38
+#define CPU_LAST 39
#define CPU_NAMES { "unknown", "R2000", "R3000", "R3000A", "R3041", "R3051", \
"R3052", "R3081", "R3081E", "R4000PC", "R4000SC", "R4000MC", \
"R4200", "R4400PC", "R4400SC", "R4400MC", "R4600", "R6000", \
"R6000A", "R8000", "R10000", "R4300", "R4650", "R4700", "R5000", \
- "R5000A", "R4640", "Nevada" }
+ "R5000A", "R4640", "Nevada", "RM7000", "R5432", "MIPS 4Kc", \
+ "MIPS 5Kc", "R4310", "SiByte SB1", "TX3912", "TX3922", "TX3927", \
+ "Au1000", "MIPS 4KEc", "MIPS 4KSc" }
-#define CL_SIZE (80)
+#define COMMAND_LINE_SIZE 256
-#ifndef _LANGUAGE_ASSEMBLY
+#define BOOT_MEM_MAP_MAX 32
+#define BOOT_MEM_RAM 1
+#define BOOT_MEM_ROM_DATA 2
+#define BOOT_MEM_RESERVED 3
+
+#ifndef __ASSEMBLY__
/*
* Some machine parameters passed by the bootloaders.
@@ -183,12 +282,29 @@ typedef struct mips_arc_DisplayInfo { /* video adapter information */
* values in setup.c (or whereever suitable) so they are in
* .data section
*/
-extern unsigned long mips_memory_upper;
-extern unsigned long mips_cputype;
+extern struct mips_cpu mips_cpu;
extern unsigned long mips_machtype;
extern unsigned long mips_machgroup;
extern unsigned long mips_tlb_entries;
-#endif /* _LANGUAGE_ASSEMBLY */
+/*
+ * A memory map that's built upon what was determined
+ * or specified on the command line.
+ */
+struct boot_mem_map {
+ int nr_map;
+ struct {
+ unsigned long addr; /* start of memory segment */
+ unsigned long size; /* size of memory segment */
+ long type; /* type of memory segment */
+ } map[BOOT_MEM_MAP_MAX];
+};
+
+extern struct boot_mem_map boot_mem_map;
+
+extern void add_memory_region(unsigned long start, unsigned long size,
+ long type);
+
+#endif /* !__ASSEMBLY__ */
-#endif /* __ASM_MIPS_BOOTINFO_H */
+#endif /* _ASM_BOOTINFO_H */
diff --git a/include/asm-mips/bugs.h b/include/asm-mips/bugs.h
index 7a7871dd3c57..17b94e2693cc 100644
--- a/include/asm-mips/bugs.h
+++ b/include/asm-mips/bugs.h
@@ -1,9 +1,9 @@
-/* $Id: bugs.h,v 1.4 1999/08/18 23:37:49 ralf Exp $
- *
+/*
* Copyright (C) 1995 Waldorf Electronics
* Copyright (C) 1997, 1999 Ralf Baechle
*/
#include <asm/bootinfo.h>
+#include <asm/cpu.h>
/*
* This is included by init/main.c to check for architecture-dependent bugs.
@@ -16,7 +16,7 @@
static inline void check_wait(void)
{
printk("Checking for 'wait' instruction... ");
- switch(mips_cputype) {
+ switch(mips_cpu.cputype) {
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
@@ -30,6 +30,7 @@ static inline void check_wait(void)
case CPU_R4700:
case CPU_R5000:
case CPU_NEVADA:
+ case CPU_RM7000:
cpu_wait = r4k_wait;
printk(" available.\n");
break;
diff --git a/include/asm-mips/cache.h b/include/asm-mips/cache.h
index a6c80d31ee3e..28f08756ed0e 100644
--- a/include/asm-mips/cache.h
+++ b/include/asm-mips/cache.h
@@ -1,10 +1,9 @@
-/* $Id: cache.h,v 1.4 2000/02/04 07:40:53 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1997, 1998, 1999 Ralf Baechle
+ * Copyright (C) 1997, 98, 99, 2000 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_CACHE_H
@@ -12,6 +11,23 @@
#include <linux/config.h>
+#ifndef _LANGUAGE_ASSEMBLY
+/*
+ * Descriptor for a cache
+ */
+struct cache_desc {
+ int linesz;
+ int sets;
+ int ways;
+ int flags; /* Details like write thru/back, coherent, etc. */
+};
+#endif
+
+/*
+ * Flag definitions
+ */
+#define MIPS_CACHE_NOT_PRESENT 0x00000001
+
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_R6000)
#define L1_CACHE_BYTES 16
#else
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h
index 25e303e8cb02..a9b5d7e5471a 100644
--- a/include/asm-mips/checksum.h
+++ b/include/asm-mips/checksum.h
@@ -1,10 +1,9 @@
-/* $Id: checksum.h,v 1.8 2000/02/18 00:24:48 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 1995, 1996, 1997, 1998, 2001 by Ralf Baechle
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
@@ -23,7 +22,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
@@ -42,9 +41,9 @@ unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-extern inline unsigned int
-csum_and_copy_to_user (const char *src, char *dst,
- int len, int sum, int *err_ptr)
+extern inline unsigned int csum_and_copy_to_user (const char *src, char *dst,
+ int len, int sum,
+ int *err_ptr)
{
sum = csum_partial(src, len, sum);
@@ -63,22 +62,23 @@ csum_and_copy_to_user (const char *src, char *dst,
* this is obsolete and will go away.
*/
#define csum_partial_copy_fromuser csum_partial_copy
-unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum);
-
+unsigned int csum_partial_copy(const char *src, char *dst, int len,
+ unsigned int sum);
+
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline unsigned short int csum_fold(unsigned int sum)
{
- __asm__("
- .set noat
- sll $1,%0,16
- addu %0,$1
- sltu $1,%0,$1
- srl %0,%0,16
- addu %0,$1
- xori %0,0xffff
- .set at"
+ __asm__(
+ ".set\tnoat\t\t\t# csum_fold\n\t"
+ "sll\t$1,%0,16\n\t"
+ "addu\t%0,$1\n\t"
+ "sltu\t$1,%0,$1\n\t"
+ "srl\t%0,%0,16\n\t"
+ "addu\t%0,$1\n\t"
+ "xori\t%0,0xffff\n\t"
+ ".set\tat"
: "=r" (sum)
: "0" (sum)
: "$1");
@@ -93,7 +93,7 @@ static inline unsigned short int csum_fold(unsigned int sum)
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
-static inline unsigned short ip_fast_csum(unsigned char * iph,
+static inline unsigned short ip_fast_csum(unsigned char *iph,
unsigned int ihl)
{
unsigned int sum;
@@ -102,37 +102,36 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
/*
* This is for 32-bit MIPS processors.
*/
- __asm__ __volatile__("
- .set noreorder
- .set noat
- lw %0,(%1)
- subu %2,4
- #blez %2,2f
- sll %2,2 # delay slot
+ __asm__ __volatile__(
+ ".set\tnoreorder\t\t\t# ip_fast_csum\n\t"
+ ".set\tnoat\n\t"
+ "lw\t%0, (%1)\n\t"
+ "subu\t%2, 4\n\t"
+ "#blez\t%2, 2f\n\t"
+ " sll\t%2, 2\n\t"
+ "lw\t%3, 4(%1)\n\t"
+ "addu\t%2, %1\n\t"
+ "addu\t%0, %3\n\t"
+ "sltu\t$1, %0, %3\n\t"
+ "lw\t%3, 8(%1)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %3\n\t"
+ "sltu\t$1, %0, %3\n\t"
+ "lw\t%3, 12(%1)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %3\n\t"
+ "sltu\t$1, %0, %3\n\t"
+ "addu\t%0, $1\n"
- lw %3,4(%1)
- addu %2,%1 # delay slot
- addu %0,%3
- sltu $1,%0,%3
- lw %3,8(%1)
- addu %0,$1
- addu %0,%3
- sltu $1,%0,%3
- lw %3,12(%1)
- addu %0,$1
- addu %0,%3
- sltu $1,%0,%3
- addu %0,$1
+ "1:\tlw\t%3, 16(%1)\n\t"
+ "addiu\t%1, 4\n\t"
+ "addu\t%0, %3\n\t"
+ "sltu\t$1, %0, %3\n\t"
+ "bne\t%2, %1, 1b\n\t"
+ " addu\t%0, $1\n"
-1: lw %3,16(%1)
- addiu %1,4
- addu %0,%3
- sltu $1,%0,%3
- bne %2,%1,1b
- addu %0,$1 # delay slot
-
-2: .set at
- .set reorder"
+ "2:\t.set\tat\n\t"
+ ".set\treorder"
: "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
: "1" (iph), "2" (ihl)
: "$1");
@@ -150,28 +149,28 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned short proto,
unsigned int sum)
{
- __asm__("
- .set noat
- addu %0,%2
- sltu $1,%0,%2
- addu %0,$1
+ __asm__(
+ ".set\tnoat\t\t\t# csum_tcpudp_nofold\n\t"
+ "addu\t%0, %2\n\t"
+ "sltu\t$1, %0, %2\n\t"
+ "addu\t%0, $1\n\t"
- addu %0,%3
- sltu $1,%0,%3
- addu %0,$1
+ "addu\t%0, %3\n\t"
+ "sltu\t$1, %0, %3\n\t"
+ "addu\t%0, $1\n\t"
- addu %0,%4
- sltu $1,%0,%4
- addu %0,$1
- .set at"
+ "addu\t%0, %4\n\t"
+ "sltu\t$1, %0, %4\n\t"
+ "addu\t%0, $1\n\t"
+ ".set\tat"
: "=r" (sum)
: "0" (daddr), "r"(saddr),
#ifdef __MIPSEL__
- "r" ((ntohs(len)<<16)+proto*256),
+ "r" ((ntohs(len)<<16)+proto*256),
#else
- "r" (((proto)<<16)+len),
+ "r" (((proto)<<16)+len),
#endif
- "r"(sum)
+ "r" (sum)
: "$1");
return sum;
@@ -187,7 +186,7 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned short proto,
unsigned int sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
@@ -206,64 +205,60 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
unsigned short proto,
unsigned int sum)
{
- __asm__("
- .set noreorder
- .set noat
- addu %0,%5 # proto (long in network byte order)
- sltu $1,%0,%5
- addu %0,$1
+ __asm__(
+ ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
+ ".set\tnoat\n\t"
+ "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
+ "sltu\t$1, %0, %5\n\t"
+ "addu\t%0, $1\n\t"
- addu %0,%6 # csum
- sltu $1,%0,%6
- lw %1,0(%2) # four words source address
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "addu\t%0, %6\t\t\t# csum\n\t"
+ "sltu\t$1, %0, %6\n\t"
+ "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,4(%2)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 4(%2)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,8(%2)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 8(%2)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,12(%2)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 12(%2)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,0(%3)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 0(%3)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,4(%3)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 4(%3)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,8(%3)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
+ "lw\t%1, 8(%3)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
- lw %1,12(%3)
- addu %0,$1
- addu %0,%1
- sltu $1,%0,$1
- .set noat
- .set noreorder"
- : "=r" (sum),
- "=r" (proto)
- : "r" (saddr),
- "r" (daddr),
- "0" (htonl(len)),
- "1" (htonl(proto)),
- "r"(sum)
- : "$1");
+ "lw\t%1, 12(%3)\n\t"
+ "addu\t%0, $1\n\t"
+ "addu\t%0, %1\n\t"
+ "sltu\t$1, %0, $1\n\t"
+ ".set\tnoat\n\t"
+ ".set\tnoreorder"
+ : "=r" (sum), "=r" (proto)
+ : "r" (saddr), "r" (daddr),
+ "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)
+ : "$1");
return csum_fold(sum);
}
diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h
index 4d42be6f1334..73c4a711dddd 100644
--- a/include/asm-mips/cpu.h
+++ b/include/asm-mips/cpu.h
@@ -1,40 +1,132 @@
-/* $Id: cpu.h,v 1.1 1996/06/23 09:38:33 dm Exp $
+/*
* cpu.h: Values of the PRId register used to match up
* various MIPS cpu types.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*/
-#ifndef _MIPS_CPU_H
-#define _MIPS_CPU_H
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+#include <asm/cache.h>
+/* Assigned Company values for bits 23:16 of the PRId Register
+ (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
+ MTI, the PRId register is defined in this (backwards compatible)
+ way:
+
+ +----------------+----------------+----------------+----------------+
+ | Company Options| Company ID | Processor ID | Revision |
+ +----------------+----------------+----------------+----------------+
+ 31 24 23 16 15 8 7
+
+ I don't have docs for all the previous processors, but my impression is
+ that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
+ spec.
+*/
+
+#define PRID_COMP_LEGACY 0x000000
+#define PRID_COMP_MIPS 0x010000
+#define PRID_COMP_ALCHEMY 0x030000
+/*
+ * Don't know who should be here...QED and Sandcraft, maybe?
+ */
+#define PRID_COMP_SIBYTE 0x040000
/*
* Assigned values for the product ID register. In order to detect a
* certain CPU type exactly eventually additional registers may need to
- * be examined.
- */
-#define PRID_IMP_R2000 0x0100
-#define PRID_IMP_R3000 0x0200
-#define PRID_IMP_R6000 0x0300
-#define PRID_IMP_R4000 0x0400
-#define PRID_IMP_R6000A 0x0600
-#define PRID_IMP_R10000 0x0900
-#define PRID_IMP_R4300 0x0b00
-#define PRID_IMP_R8000 0x1000
-#define PRID_IMP_R4600 0x2000
-#define PRID_IMP_R4700 0x2100
-#define PRID_IMP_R4640 0x2200
-#define PRID_IMP_R4650 0x2200 /* Same as R4640 */
-#define PRID_IMP_R5000 0x2300
-#define PRID_IMP_SONIC 0x2400
-#define PRID_IMP_MAGIC 0x2500
-#define PRID_IMP_RM7000 0x2700
-#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
-
-#define PRID_IMP_UNKNOWN 0xff00
-
-#define PRID_REV_R4400 0x0040
-#define PRID_REV_R3000A 0x0030
-#define PRID_REV_R3000 0x0020
-#define PRID_REV_R2000A 0x0010
-
-#endif /* !(_MIPS_CPU_H) */
+ * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
+ */
+#define PRID_IMP_R2000 0x0100
+#define PRID_IMP_AU1000 0x0100
+#define PRID_IMP_R3000 0x0200 /* Same as R2000A */
+#define PRID_IMP_R6000 0x0300 /* Same as R3000A */
+#define PRID_IMP_R4000 0x0400
+#define PRID_IMP_R6000A 0x0600
+#define PRID_IMP_R10000 0x0900
+#define PRID_IMP_R4300 0x0b00
+#define PRID_IMP_R12000 0x0e00
+#define PRID_IMP_R8000 0x1000
+#define PRID_IMP_R4600 0x2000
+#define PRID_IMP_R4700 0x2100
+#define PRID_IMP_TX39 0x2200
+#define PRID_IMP_R4640 0x2200
+#define PRID_IMP_R4650 0x2200 /* Same as R4640 */
+#define PRID_IMP_R5000 0x2300
+#define PRID_IMP_R5432 0x5400
+#define PRID_IMP_SONIC 0x2400
+#define PRID_IMP_MAGIC 0x2500
+#define PRID_IMP_RM7000 0x2700
+#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
+#define PRID_IMP_4KC 0x8000
+#define PRID_IMP_5KC 0x8100
+#define PRID_IMP_4KEC 0x8400
+#define PRID_IMP_4KSC 0x8600
+
+
+#define PRID_IMP_UNKNOWN 0xff00
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
+ */
+
+#define PRID_IMP_SB1 0x0100
+
+/*
+ * Definitions for 7:0 on legacy processors
+ */
+
+
+#define PRID_REV_R4400 0x0040
+#define PRID_REV_R3000A 0x0030
+#define PRID_REV_R3000 0x0020
+#define PRID_REV_R2000A 0x0010
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
+
+#ifndef _LANGUAGE_ASSEMBLY
+/*
+ * Capability and feature descriptor structure for MIPS CPU
+ */
+struct mips_cpu {
+ unsigned int processor_id;
+ unsigned int cputype; /* Old "mips_cputype" code */
+ int isa_level;
+ int options;
+ int tlbsize;
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+};
+
+#endif
+
+/*
+ * ISA Level encodings
+ */
+#define MIPS_CPU_ISA_I 0x00000001
+#define MIPS_CPU_ISA_II 0x00000002
+#define MIPS_CPU_ISA_III 0x00000003
+#define MIPS_CPU_ISA_IV 0x00000004
+#define MIPS_CPU_ISA_V 0x00000005
+#define MIPS_CPU_ISA_M32 0x00000020
+#define MIPS_CPU_ISA_M64 0x00000040
+
+/*
+ * CPU Option encodings
+ */
+#define MIPS_CPU_TLB 0x00000001 /* CPU has TLB */
+/* Leave a spare bit for variant MMU types... */
+#define MIPS_CPU_4KEX 0x00000004 /* "R4K" exception model */
+#define MIPS_CPU_4KTLB 0x00000008 /* "R4K" TLB handler */
+#define MIPS_CPU_FPU 0x00000010 /* CPU has FPU */
+#define MIPS_CPU_32FPR 0x00000020 /* 32 dbl. prec. FP registers */
+#define MIPS_CPU_COUNTER 0x00000040 /* Cycle count/compare */
+#define MIPS_CPU_WATCH 0x00000080 /* watchpoint registers */
+#define MIPS_CPU_MIPS16 0x00000100 /* code compression */
+#define MIPS_CPU_DIVEC 0x00000200 /* dedicated interrupt vector */
+#define MIPS_CPU_VCE 0x00000400 /* virt. coherence conflict possible */
+#define MIPS_CPU_CACHE_CDEX 0x00000800 /* Create_Dirty_Exclusive CACHE op */
+
+#endif /* _ASM_CPU_H */
diff --git a/include/asm-mips/current.h b/include/asm-mips/current.h
index 3015ce4bd256..2c776757e74a 100644
--- a/include/asm-mips/current.h
+++ b/include/asm-mips/current.h
@@ -1,5 +1,4 @@
-/* $Id: current.h,v 1.5 1999/07/26 19:42:43 harald Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -16,21 +15,5 @@
register struct task_struct *current asm("$28");
#endif /* _LANGUAGE_C */
-#ifdef _LANGUAGE_ASSEMBLY
-
-/*
- * Special variant for use by exception handlers when the stack pointer
- * is not loaded.
- */
-#define _GET_CURRENT(reg) \
- lui reg, %hi(kernelsp); \
- .set push; \
- .set reorder; \
- lw reg, %lo(kernelsp)(reg); \
- .set pop; \
- ori reg, 8191; \
- xori reg, 8191
-
-#endif
#endif /* _ASM_CURRENT_H */
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
index 7628fe4a8cdb..50024e3481c1 100644
--- a/include/asm-mips/delay.h
+++ b/include/asm-mips/delay.h
@@ -1,17 +1,18 @@
-/* $Id: delay.h,v 1.2 1999/01/04 16:09:20 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 by Waldorf Electronics
- * Copyright (C) 1995 - 1998 by Ralf Baechle
+ * Copyright (C) 1995 - 1998, 2001 by Ralf Baechle
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
#include <linux/config.h>
+extern unsigned long loops_per_jiffy;
+
extern __inline__ void
__delay(unsigned long loops)
{
@@ -34,21 +35,21 @@ __delay(unsigned long loops)
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
-extern __inline__ void __udelay(unsigned long usecs, unsigned long lps)
+extern __inline__ void __udelay(unsigned long usecs, unsigned long lpj)
{
unsigned long lo;
- usecs *= 0x000010c6; /* 2**32 / 1000000 */
+ usecs *= 0x00068db8; /* 2**32 / (1000000 / HZ) */
__asm__("multu\t%2,%3"
:"=h" (usecs), "=l" (lo)
- :"r" (usecs),"r" (lps));
+ :"r" (usecs),"r" (lpj));
__delay(usecs);
}
#ifdef CONFIG_SMP
#define __udelay_val cpu_data[smp_processor_id()].udelay_val
#else
-#define __udelay_val loops_per_sec
+#define __udelay_val loops_per_jiffy
#endif
#define udelay(usecs) __udelay((usecs),__udelay_val)
diff --git a/include/asm-mips/div64.h b/include/asm-mips/div64.h
index 58a7b437e800..9ff7bd640799 100644
--- a/include/asm-mips/div64.h
+++ b/include/asm-mips/div64.h
@@ -1,4 +1,7 @@
-/* $Id: div64.h,v 1.1 2000/01/28 23:18:43 ralf Exp $
+/*
+ * include/asm-mips/div64.h
+ *
+ * Copyright (C) 2000 Maciej W. Rozycki
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -7,14 +10,104 @@
#ifndef _ASM_DIV64_H
#define _ASM_DIV64_H
+#include <asm/sgidefs.h>
+
/*
- * Hey, we're already 64-bit, no
- * need to play games..
+ * No traps on overflows for any of these...
*/
-#define do_div(n,base) ({ \
- int __res; \
- __res = ((unsigned long) n) % (unsigned) base; \
- n = ((unsigned long) n) / (unsigned) base; \
- __res; })
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2)
+
+#define do_div64_32(res, high, low, base) ({ \
+ unsigned long __quot, __mod; \
+ unsigned long __cf, __tmp, __i; \
+ \
+ __asm__(".set push\n\t" \
+ ".set noat\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
+ " li %4,0x21\n" \
+ "0:\n\t" \
+ "sll $1,%0,0x1\n\t" \
+ "srl %3,%0,0x1f\n\t" \
+ "or %0,$1,$2\n\t" \
+ "sll %1,%1,0x1\n\t" \
+ "sll %2,%2,0x1\n" \
+ "1:\n\t" \
+ "bnez %3,2f\n\t" \
+ "sltu $2,%0,%z5\n\t" \
+ "bnez $2,3f\n\t" \
+ "2:\n\t" \
+ " addiu %4,%4,-1\n\t" \
+ "subu %0,%0,%z5\n\t" \
+ "addiu %2,%2,1\n" \
+ "3:\n\t" \
+ "bnez %4,0b\n\t" \
+ " srl $2,%1,0x1f\n\t" \
+ ".set pop" \
+ : "=&r" (__mod), "=&r" (__tmp), "=&r" (__quot), "=&r" (__cf), \
+ "=&r" (__i) \
+ : "Jr" (base), "0" (high), "1" (low), "2" (0), "3" (0) \
+ /* Aarrgh! Ran out of gcc's limit on constraints... */ \
+ : "$1", "$2"); \
+ \
+ (res) = __quot; \
+ __mod; })
+
+#define do_div(n, base) ({ \
+ unsigned long long __quot; \
+ unsigned long __upper, __low, __high, __mod; \
+ \
+ __quot = (n); \
+ __high = __quot >> 32; \
+ __low = __quot; \
+ __upper = __high; \
+ \
+ if (__high) \
+ __asm__("divu $0,%z2,%z3" \
+ : "=h" (__upper), "=l" (__high) \
+ : "Jr" (__high), "Jr" (base)); \
+ \
+ __mod = do_div64_32(__low, __upper, __low, base); \
+ \
+ __quot = __high; \
+ __quot = __quot << 32 | __low; \
+ (n) = __quot; \
+ __mod; })
+
+#else
+
+#define do_div64_32(res, high, low, base) ({ \
+ unsigned long __quot, __mod, __r0; \
+ \
+ __asm__("dsll32 %2,%z3,0\n\t" \
+ "or %2,%2,%z4\n\t" \
+ "ddivu $0,%2,%z5" \
+ : "=h" (__mod), "=l" (__quot), "=&r" (__r0) \
+ : "Jr" (high), "Jr" (low), "Jr" (base)); \
+ \
+ (res) = __quot; \
+ __mod; })
+
+#define do_div(n, base) ({ \
+ unsigned long long __quot; \
+ unsigned long __mod, __r0; \
+ \
+ __quot = (n); \
+ \
+ __asm__("dsll32 %2,%M3,0\n\t" \
+ "or %2,%2,%L3\n\t" \
+ "ddivu $0,%2,%z4\n\t" \
+ "mflo %L1\n\t" \
+ "dsra32 %M1,%L1,0\n\t" \
+ "dsll32 %L1,%L1,0\n\t" \
+ "dsra32 %L1,%L1,0" \
+ : "=h" (__mod), "=r" (__quot), "=&r" (__r0) \
+ : "r" (n), "Jr" (base)); \
+ \
+ (n) = __quot; \
+ __mod; })
+
+#endif
#endif /* _ASM_DIV64_H */
diff --git a/include/asm-mips/elf.h b/include/asm-mips/elf.h
index adaba397ae2e..9f13f7299ce4 100644
--- a/include/asm-mips/elf.h
+++ b/include/asm-mips/elf.h
@@ -25,8 +25,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
- if ((__h->e_machine != EM_MIPS) && \
- (__h->e_machine != EM_MIPS_RS4_BE)) \
+ if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_flags & EF_MIPS_ARCH) \
__res = 0; \
@@ -35,17 +34,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
})
/* This one accepts IRIX binaries. */
-#define irix_elf_check_arch(hdr) \
-({ \
- int __res = 1; \
- struct elfhdr *__h = (hdr); \
- \
- if ((__h->e_machine != EM_MIPS) && \
- (__h->e_machine != EM_MIPS_RS4_BE)) \
- __res = 0; \
- \
- __res; \
-})
+#define irix_elf_check_arch(hdr) ((hdr)->e_machine == EM_MIPS)
/*
* These are used to set parameters in the core dumps.
diff --git a/include/asm-mips/errno.h b/include/asm-mips/errno.h
index 0763d0e99e31..2956e4ce3b84 100644
--- a/include/asm-mips/errno.h
+++ b/include/asm-mips/errno.h
@@ -1,11 +1,9 @@
/*
- * include/asm-mips/errno.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1999 by Ralf Baechle
+ * Copyright (C) 1995, 1999, 2001 by Ralf Baechle
*/
#ifndef _ASM_ERRNO_H
#define _ASM_ERRNO_H
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h
index b14cc9063f14..f7a6ada7fffb 100644
--- a/include/asm-mips/fcntl.h
+++ b/include/asm-mips/fcntl.h
@@ -1,13 +1,12 @@
-/* $Id: fcntl.h,v 1.4 1998/09/19 19:19:36 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
*/
-#ifndef __ASM_MIPS_FCNTL_H
-#define __ASM_MIPS_FCNTL_H
+#ifndef __ASM_FCNTL_H
+#define __ASM_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
@@ -44,6 +43,10 @@
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_GETLK64 33 /* using 'struct flock64' */
+#define F_SETLK64 34
+#define F_SETLKW64 35
+
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
@@ -76,10 +79,19 @@ typedef struct flock {
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
- long l_sysid; /* XXXXXXXXXXXXXXXXXXXXXXXXX */
+ long l_sysid; /* ABI junk, unused on Linux */
__kernel_pid_t l_pid;
- long pad[4]; /* ZZZZZZZZZZZZZZZZZZZZZZZZZZ */
+ long pad[4]; /* ABI junk, unused on Linux */
} flock_t;
+typedef struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+} flock64_t;
+
#define F_LINUX_SPECIFIC_BASE 1024
-#endif /* __ASM_MIPS_FCNTL_H */
+
+#endif /* __ASM_FCNTL_H */
diff --git a/include/asm-mips/fpu_emulator.h b/include/asm-mips/fpu_emulator.h
new file mode 100644
index 000000000000..70800480b87e
--- /dev/null
+++ b/include/asm-mips/fpu_emulator.h
@@ -0,0 +1,44 @@
+/*
+ * Definitiona for the Algorithmics FPU Emulator port into MIPS Linux
+ */
+/**************************************************************************
+ *
+ * include/asm-mips/fpu_emulator.h
+ *
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ *************************************************************************/
+/*
+ * Further private data for which no space exists in mips_fpu_soft_struct.
+ * This should be subsumed into the mips_fpu_soft_struct structure as
+ * defined in processor.h as soon as the absurd wired absolute assembler
+ * offsets become dynamic at compile time.
+ */
+
+struct mips_fpu_emulator_private {
+ unsigned int eir;
+ struct {
+ unsigned int emulated;
+ unsigned int loads;
+ unsigned int stores;
+ unsigned int cp1ops;
+ unsigned int cp1xops;
+ unsigned int errors;
+ } stats;
+};
diff --git a/include/asm-mips/hardirq.h b/include/asm-mips/hardirq.h
index eead2fb87e42..28340dff3f55 100644
--- a/include/asm-mips/hardirq.h
+++ b/include/asm-mips/hardirq.h
@@ -1,5 +1,4 @@
-/* $Id: hardirq.h,v 1.8 2000/03/02 02:37:13 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -13,6 +12,7 @@
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/irq.h>
+#include <linux/spinlock.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
@@ -38,14 +38,60 @@ typedef struct {
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define irq_enter(cpu) (local_irq_count(cpu)++)
-#define irq_exit(cpu) (local_irq_count(cpu)--)
+#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
+#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier();
#else
-#error No habla MIPS SMP
+#include <asm/atomic.h>
+#include <linux/spinlock.h>
+#include <asm/smp.h>
+
+extern int global_irq_holder;
+extern spinlock_t global_irq_lock;
+
+static inline int irqs_running (void)
+{
+ int i;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ if (local_irq_count(i))
+ return 1;
+ return 0;
+}
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == cpu) {
+ global_irq_holder = NO_PROC_ID;
+ spin_unlock(&global_irq_lock);
+ }
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !local_irq_count(cpu) && !spin_is_locked(&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) do { } while (0)
+
+static inline void irq_enter(int cpu, int irq)
+{
+ ++local_irq_count(cpu);
+
+ while (spin_is_locked(&global_irq_lock))
+ barrier();
+}
+
+static inline void irq_exit(int cpu, int irq)
+{
+ --local_irq_count(cpu);
+}
+
+extern void synchronize_irq(void);
#endif /* CONFIG_SMP */
#endif /* _ASM_HARDIRQ_H */
diff --git a/include/asm-mips/hdreg.h b/include/asm-mips/hdreg.h
index 189dfc55c3cb..4b90cf6bf98a 100644
--- a/include/asm-mips/hdreg.h
+++ b/include/asm-mips/hdreg.h
@@ -1,18 +1,15 @@
-/* $Id: hdreg.h,v 1.4 1998/05/08 21:05:26 davem Exp $
- *
- * linux/include/asm-mips/hdreg.h
- *
+/*
* Copyright (C) 1994-1996 Linus Torvalds & authors
+ * Copyright (C) 2001 Ralf Baechle
*/
/*
* This file contains the MIPS architecture specific IDE code.
*/
-#ifndef __ASM_MIPS_HDREG_H
-#define __ASM_MIPS_HDREG_H
-
-typedef unsigned short ide_ioreg_t;
+#ifndef _ASM_HDREG_H
+#define _ASM_HDREG_H
-#endif /* __ASM_MIPS_HDREG_H */
+typedef unsigned long ide_ioreg_t;
+#endif /* _ASM_HDREG_H */
diff --git a/include/asm-mips/hw_irq.h b/include/asm-mips/hw_irq.h
index 1bf6629b4ee8..8dfa57d9be94 100644
--- a/include/asm-mips/hw_irq.h
+++ b/include/asm-mips/hw_irq.h
@@ -1,5 +1,16 @@
-/* This exists merely to satisfy <linux/irq.h>. There is
- nothing that would go here of general interest.
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 2001 by Ralf Baechle
+ */
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
- Everything of consequence is in arch/alpha/kernel/irq_impl.h,
- to be used only in arch/alpha/kernel/. */
+/* This may not be apropriate for all machines, we'll see ... */
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
+{
+}
+
+#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-mips/ide.h b/include/asm-mips/ide.h
index ed20997b1ce3..10da72198120 100644
--- a/include/asm-mips/ide.h
+++ b/include/asm-mips/ide.h
@@ -8,16 +8,13 @@
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
-/*
- * This file contains the MIPS architecture specific IDE code.
- */
-
#ifndef __ASM_IDE_H
#define __ASM_IDE_H
#ifdef __KERNEL__
#include <linux/config.h>
+#include <asm/io.h>
#ifndef MAX_HWIFS
# ifdef CONFIG_BLK_DEV_IDEPCI
@@ -93,13 +90,21 @@ static __inline__ void ide_init_default_hwifs(void)
typedef union {
unsigned all : 8; /* all of the bits together */
struct {
+#ifdef __MIPSEB__
+ unsigned bit7 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned head : 4; /* always zeros here */
+#else
unsigned head : 4; /* always zeros here */
unsigned unit : 1; /* drive select number, 0 or 1 */
unsigned bit5 : 1; /* always 1 */
unsigned lba : 1; /* using LBA instead of CHS */
unsigned bit7 : 1; /* always 1 */
+#endif
} b;
- } select_t;
+} select_t;
static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int,void *, struct pt_regs *),
unsigned long flags, const char *device, void *dev_id)
@@ -129,11 +134,96 @@ static __inline__ void ide_release_region(ide_ioreg_t from,
ide_ops->ide_release_region(from, extent);
}
+#undef SUPPORT_VLB_SYNC
+#define SUPPORT_VLB_SYNC 0
+
+#if defined(__MIPSEB__)
+
+#define T_CHAR (0x0000) /* char: don't touch */
+#define T_SHORT (0x4000) /* short: 12 -> 21 */
+#define T_INT (0x8000) /* int: 1234 -> 4321 */
+#define T_TEXT (0xc000) /* text: 12 -> 21 */
+
+#define T_MASK_TYPE (0xc000)
+#define T_MASK_COUNT (0x3fff)
+
+#define D_CHAR(cnt) (T_CHAR | (cnt))
+#define D_SHORT(cnt) (T_SHORT | (cnt))
+#define D_INT(cnt) (T_INT | (cnt))
+#define D_TEXT(cnt) (T_TEXT | (cnt))
+
+static u_short driveid_types[] = {
+ D_SHORT(10), /* config - vendor2 */
+ D_TEXT(20), /* serial_no */
+ D_SHORT(3), /* buf_type - ecc_bytes */
+ D_TEXT(48), /* fw_rev - model */
+ D_CHAR(2), /* max_multsect - vendor3 */
+ D_SHORT(1), /* dword_io */
+ D_CHAR(2), /* vendor4 - capability */
+ D_SHORT(1), /* reserved50 */
+ D_CHAR(4), /* vendor5 - tDMA */
+ D_SHORT(4), /* field_valid - cur_sectors */
+ D_INT(1), /* cur_capacity */
+ D_CHAR(2), /* multsect - multsect_valid */
+ D_INT(1), /* lba_capacity */
+ D_SHORT(194) /* dma_1word - reservedyy */
+};
+
+#define num_driveid_types (sizeof(driveid_types)/sizeof(*driveid_types))
+
+static __inline__ void ide_fix_driveid(struct hd_driveid *id)
+{
+ u_char *p = (u_char *)id;
+ int i, j, cnt;
+ u_char t;
+
+ for (i = 0; i < num_driveid_types; i++) {
+ cnt = driveid_types[i] & T_MASK_COUNT;
+ switch (driveid_types[i] & T_MASK_TYPE) {
+ case T_CHAR:
+ p += cnt;
+ break;
+ case T_SHORT:
+ for (j = 0; j < cnt; j++) {
+ t = p[0];
+ p[0] = p[1];
+ p[1] = t;
+ p += 2;
+ }
+ break;
+ case T_INT:
+ for (j = 0; j < cnt; j++) {
+ t = p[0];
+ p[0] = p[3];
+ p[3] = t;
+ t = p[1];
+ p[1] = p[2];
+ p[2] = t;
+ p += 4;
+ }
+ break;
+ case T_TEXT:
+ for (j = 0; j < cnt; j += 2) {
+ t = p[0];
+ p[0] = p[1];
+ p[1] = t;
+ p += 2;
+ }
+ break;
+ };
+ }
+}
+
+#else /* defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__) */
+
+#define ide_fix_driveid(id) do {} while (0)
+
+#endif
+
/*
* The following are not needed for the non-m68k ports
*/
#define ide_ack_intr(hwif) (1)
-#define ide_fix_driveid(id) do {} while (0)
#define ide_release_lock(lock) do {} while (0)
#define ide_get_lock(lock, hdlr, data) do {} while (0)
diff --git a/include/asm-mips/inst.h b/include/asm-mips/inst.h
index 8d7328f6d3d6..6ad517241768 100644
--- a/include/asm-mips/inst.h
+++ b/include/asm-mips/inst.h
@@ -5,10 +5,10 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996 by Ralf Baechle
+ * Copyright (C) 1996, 2000 by Ralf Baechle
*/
-#ifndef __ASM_MIPS_INST_H
-#define __ASM_MIPS_INST_H
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
/*
* Major opcodes; before MIPS IV cop1x was called cop3.
@@ -21,7 +21,7 @@ enum major_op {
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
daddi_op, daddiu_op, ldl_op, ldr_op,
- major_1c_op, major_1d_op, major_1e_op, major_1f_op,
+ major_1c_op, jalx_op, major_1e_op, major_1f_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
@@ -80,6 +80,13 @@ enum cop_op {
};
/*
+ * rt field of cop.bc_op opcodes
+ */
+enum bcop_op {
+ bcf_op, bct_op, bcfl_op, bctl_op
+};
+
+/*
* func field of cop0 coi opcodes.
*/
enum cop0_coi_func {
@@ -301,4 +308,64 @@ union mips_instruction {
struct ma_format ma_format;
};
-#endif /* __ASM_MIPS_INST_H */
+/* HACHACHAHCAHC ... */
+
+/* In case some other massaging is needed, keep MIPSInst as wrapper */
+
+#define MIPSInst(x) x
+
+#define I_OPCODE_SFT 26
+#define MIPSInst_OPCODE(x) (MIPSInst(x) >> I_OPCODE_SFT)
+
+#define I_JTARGET_SFT 0
+#define MIPSInst_JTARGET(x) (MIPSInst(x) & 0x03ffffff)
+
+#define I_RS_SFT 21
+#define MIPSInst_RS(x) ((MIPSInst(x) & 0x03e00000) >> I_RS_SFT)
+
+#define I_RT_SFT 16
+#define MIPSInst_RT(x) ((MIPSInst(x) & 0x001f0000) >> I_RT_SFT)
+
+#define I_IMM_SFT 0
+#define MIPSInst_SIMM(x) ((int)((short)(MIPSInst(x) & 0xffff)))
+#define MIPSInst_UIMM(x) (MIPSInst(x) & 0xffff)
+
+#define I_CACHEOP_SFT 18
+#define MIPSInst_CACHEOP(x) ((MIPSInst(x) & 0x001c0000) >> I_CACHEOP_SFT)
+
+#define I_CACHESEL_SFT 16
+#define MIPSInst_CACHESEL(x) ((MIPSInst(x) & 0x00030000) >> I_CACHESEL_SFT)
+
+#define I_RD_SFT 11
+#define MIPSInst_RD(x) ((MIPSInst(x) & 0x0000f800) >> I_RD_SFT)
+
+#define I_RE_SFT 6
+#define MIPSInst_RE(x) ((MIPSInst(x) & 0x000007c0) >> I_RE_SFT)
+
+#define I_FUNC_SFT 0
+#define MIPSInst_FUNC(x) (MIPSInst(x) & 0x0000003f)
+
+#define I_FFMT_SFT 21
+#define MIPSInst_FFMT(x) ((MIPSInst(x) & 0x01e00000) >> I_FFMT_SFT)
+
+#define I_FT_SFT 16
+#define MIPSInst_FT(x) ((MIPSInst(x) & 0x001f0000) >> I_FT_SFT)
+
+#define I_FS_SFT 11
+#define MIPSInst_FS(x) ((MIPSInst(x) & 0x0000f800) >> I_FS_SFT)
+
+#define I_FD_SFT 6
+#define MIPSInst_FD(x) ((MIPSInst(x) & 0x000007c0) >> I_FD_SFT)
+
+#define I_FR_SFT 21
+#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT)
+
+#define I_FMA_FUNC_SFT 2
+#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT)
+
+#define I_FMA_FFMT_SFT 0
+#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003)
+
+typedef unsigned int mips_instruction;
+
+#endif /* _ASM_INST_H */
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index 3af11a591730..2c96842a6277 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -1,5 +1,4 @@
-/* $Id: io.h,v 1.13 2000/02/24 00:13:19 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -7,16 +6,38 @@
* Copyright (C) 1994, 1995 Waldorf GmbH
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 FSMLabs, Inc.
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#include <linux/config.h>
+#include <linux/pagemap.h>
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+
/*
* Slowdown I/O port space accesses for antique hardware.
*/
#undef CONF_SLOWDOWN_IO
-#include <asm/addrspace.h>
+/*
+ * Sane hardware offers swapping of I/O space accesses in hardware; less
+ * sane hardware forces software to fiddle with this ...
+ */
+#if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__)
+
+#define __ioswab8(x) (x)
+#define __ioswab16(x) swab16(x)
+#define __ioswab32(x) swab32(x)
+
+#else
+
+#define __ioswab8(x) (x)
+#define __ioswab16(x) (x)
+#define __ioswab32(x) (x)
+
+#endif
/*
* This file contains the definitions for the MIPS counterpart of the
@@ -91,9 +112,6 @@ extern inline void * phys_to_virt(unsigned long address)
return (void *)KSEG0ADDR(address);
}
-extern void * ioremap(unsigned long phys_addr, unsigned long size);
-extern void iounmap(void *addr);
-
/*
* IO bus memory addresses are also 1:1 with the physical address
*/
@@ -113,54 +131,35 @@ extern inline void * bus_to_virt(unsigned long address)
*/
extern unsigned long isa_slot_offset;
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- *
- * On MIPS, we have the whole physical address space mapped at all
- * times, so "ioremap()" and "iounmap()" do not need to do anything.
- * (This isn't true for all machines but we still handle these cases
- * with wired TLB entries anyway ...)
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- */
-extern inline void * ioremap(unsigned long offset, unsigned long size)
-{
- return (void *) KSEG1ADDR(offset);
-}
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
+extern inline void *ioremap(unsigned long offset, unsigned long size)
{
- return (void *) KSEG1ADDR(offset);
+ return __ioremap(offset, size, _CACHE_UNCACHED);
}
-extern inline void iounmap(void *addr)
+extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
+ return __ioremap(offset, size, _CACHE_UNCACHED);
}
+extern void iounmap(void *addr);
+
/*
* XXX We need system specific versions of these to handle EISA address bits
* 24-31 on SNI.
* XXX more SNI hacks.
*/
#define readb(addr) (*(volatile unsigned char *)(addr))
-#define readw(addr) (*(volatile unsigned short *)(addr))
-#define readl(addr) (*(volatile unsigned int *)(addr))
+#define readw(addr) __ioswab16((*(volatile unsigned short *)(addr)))
+#define readl(addr) __ioswab32((*(volatile unsigned int *)(addr)))
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define writeb(b,addr) (*(volatile unsigned char *)(addr)) = (b)
-#define writew(b,addr) (*(volatile unsigned short *)(addr)) = (b)
-#define writel(b,addr) (*(volatile unsigned int *)(addr)) = (b)
+#define writew(b,addr) (*(volatile unsigned short *)(addr)) = (__ioswab16(b))
+#define writel(b,addr) (*(volatile unsigned int *)(addr)) = (__ioswab32(b))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
@@ -226,12 +225,12 @@ extern inline void __out##s(unsigned int value, unsigned int port) {
#define __OUT2(m) \
__asm__ __volatile__ ("s" #m "\t%0,%1(%2)"
-#define __OUT(m,s) \
-__OUT1(s) __OUT2(m) : : "r" (value), "i" (0), "r" (mips_io_port_base+port)); } \
-__OUT1(s##c) __OUT2(m) : : "r" (value), "ir" (port), "r" (mips_io_port_base)); } \
-__OUT1(s##_p) __OUT2(m) : : "r" (value), "i" (0), "r" (mips_io_port_base+port)); \
+#define __OUT(m,s,w) \
+__OUT1(s) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); } \
+__OUT1(s##c) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); } \
+__OUT1(s##_p) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); \
SLOW_DOWN_IO; } \
-__OUT1(s##c_p) __OUT2(m) : : "r" (value), "ir" (port), "r" (mips_io_port_base)); \
+__OUT1(s##c_p) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); \
SLOW_DOWN_IO; }
#define __IN1(t,s) \
@@ -243,11 +242,11 @@ extern __inline__ t __in##s(unsigned int port) { t _v;
#define __IN2(m) \
__asm__ __volatile__ ("l" #m "\t%0,%1(%2)"
-#define __IN(t,m,s) \
-__IN1(t,s) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); return _v; } \
-__IN1(t,s##c) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); return _v; } \
-__IN1(t,s##_p) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); SLOW_DOWN_IO; return _v; } \
-__IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return _v; }
+#define __IN(t,m,s,w) \
+__IN1(t,s) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); return __ioswab##w(_v); } \
+__IN1(t,s##c) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); return __ioswab##w(_v); } \
+__IN1(t,s##_p) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); SLOW_DOWN_IO; return __ioswab##w(_v); } \
+__IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return __ioswab##w(_v); }
#define __INS1(s) \
extern inline void __ins##s(unsigned int port, void * addr, unsigned long count) {
@@ -268,11 +267,13 @@ __asm__ __volatile__ ( \
#define __INS(m,s,i) \
__INS1(s) __INS2(m) \
: "=r" (addr), "=r" (count) \
- : "0" (addr), "1" (count), "i" (0), "r" (mips_io_port_base+port), "I" (i) \
+ : "0" (addr), "1" (count), "i" (0), \
+ "r" (mips_io_port_base+port), "I" (i) \
: "$1");} \
__INS1(s##c) __INS2(m) \
: "=r" (addr), "=r" (count) \
- : "0" (addr), "1" (count), "ir" (port), "r" (mips_io_port_base), "I" (i) \
+ : "0" (addr), "1" (count), "ir" (port), \
+ "r" (mips_io_port_base), "I" (i) \
: "$1");}
#define __OUTS1(s) \
@@ -301,13 +302,13 @@ __OUTS1(s##c) __OUTS2(m) \
: "0" (addr), "1" (count), "ir" (port), "r" (mips_io_port_base), "I" (i) \
: "$1");}
-__IN(unsigned char,b,b)
-__IN(unsigned short,h,w)
-__IN(unsigned int,w,l)
+__IN(unsigned char,b,b,8)
+__IN(unsigned short,h,w,16)
+__IN(unsigned int,w,l,32)
-__OUT(b,b)
-__OUT(h,w)
-__OUT(w,l)
+__OUT(b,b,8)
+__OUT(h,w,16)
+__OUT(w,l,32)
__INS(b,b,1)
__INS(h,w,2)
@@ -317,6 +318,7 @@ __OUTS(b,b,1)
__OUTS(h,w,2)
__OUTS(w,l,4)
+
/*
* Note that due to the way __builtin_constant_p() works, you
* - can't use it inside an inline function (it will never be true)
diff --git a/include/asm-mips/ioctl.h b/include/asm-mips/ioctl.h
index 360b22ce337e..ab54abc3d6db 100644
--- a/include/asm-mips/ioctl.h
+++ b/include/asm-mips/ioctl.h
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1996 by Ralf Baechle
+ * Copyright (C) 1995, 1996, 2001 by Ralf Baechle
*/
#ifndef __ASM_MIPS_IOCTL_H
#define __ASM_MIPS_IOCTL_H
@@ -40,11 +40,6 @@
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
/*
- * We to additionally limit parameters to a maximum 255 bytes.
- */
-#define _IOC_SLMASK 0xff
-
-/*
* Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
* And this turns out useful to catch old ioctl numbers in header
* files for us.
@@ -65,7 +60,7 @@
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
- (((size) & _IOC_SLMASK) << _IOC_SIZESHIFT))
+ ((size) << _IOC_SIZESHIFT))
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
diff --git a/include/asm-mips/ioctls.h b/include/asm-mips/ioctls.h
index 5e268a1c5787..59689c43076d 100644
--- a/include/asm-mips/ioctls.h
+++ b/include/asm-mips/ioctls.h
@@ -58,6 +58,7 @@
#define FIONCLEX 0x6602 /* these numbers need to be adjusted. */
#define FIOASYNC 0x667d
#define FIONBIO 0x667e
+#define FIOQSIZE 0x667f
#if defined(__USE_MISC) || defined (__KERNEL__)
#define TIOCGLTC (tIOC | 116) /* get special local chars */
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index c7bafd3c879f..f89e071c355a 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -1,25 +1,39 @@
-/* $Id: irq.h,v 1.6 2000/01/26 00:07:45 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle
- * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 2001 by Ralf Baechle
*/
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
-#define NR_IRQS 64
+#include <linux/config.h>
+
+#define NR_IRQS 64 /* Largest number of ints of all machines. */
#define TIMER_IRQ 0
-extern int (*irq_cannonicalize)(int irq);
+#ifdef CONFIG_I8259
+static inline int irq_cannonicalize(int irq)
+{
+ return ((irq == 2) ? 9 : irq);
+}
+#else
+#define irq_cannonicalize(irq) (irq) /* Sane hardware, sane code ... */
+#endif
struct irqaction;
extern int i8259_setup_irq(int irq, struct irqaction * new);
extern void disable_irq(unsigned int);
+
+#ifdef CONFIG_ROTTEN_IRQ
#define disable_irq_nosync disable_irq
+#else
+extern void disable_irq_nosync(unsigned int);
+#endif
+
extern void enable_irq(unsigned int);
/* Machine specific interrupt initialization */
diff --git a/include/asm-mips/jazzdma.h b/include/asm-mips/jazzdma.h
index 634cfecd38af..0a205b77e505 100644
--- a/include/asm-mips/jazzdma.h
+++ b/include/asm-mips/jazzdma.h
@@ -1,29 +1,28 @@
/*
* Helpfile for jazzdma.c -- Mips Jazz R4030 DMA controller support
- *
- * $Id:$
*/
-#ifndef __ASM_MIPS_JAZZDMA_H
-#define __ASM_MIPS_JAZZDMA_H
+#ifndef _ASM_JAZZDMA_H
+#define _ASM_JAZZDMA_H
/*
* Prototypes and macros
*/
-unsigned long vdma_init(unsigned long memory_start, unsigned long memory_end);
-unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
-int vdma_free(unsigned long laddr);
-int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size);
-unsigned long vdma_phys2log(unsigned long paddr);
-unsigned long vdma_log2phys(unsigned long laddr);
-void vdma_stats(void); /* for debugging only */
+extern void vdma_init(void);
+extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
+extern int vdma_free(unsigned long laddr);
+extern int vdma_remap(unsigned long laddr, unsigned long paddr,
+ unsigned long size);
+extern unsigned long vdma_phys2log(unsigned long paddr);
+extern unsigned long vdma_log2phys(unsigned long laddr);
+extern void vdma_stats(void); /* for debugging only */
-void vdma_enable(int channel);
-void vdma_disable(int channel);
-void vdma_set_mode(int channel, int mode);
-void vdma_set_addr(int channel, long addr);
-void vdma_set_count(int channel, int count);
-int vdma_get_residue(int channel);
-int vdma_get_enable(int channel);
+extern void vdma_enable(int channel);
+extern void vdma_disable(int channel);
+extern void vdma_set_mode(int channel, int mode);
+extern void vdma_set_addr(int channel, long addr);
+extern void vdma_set_count(int channel, int count);
+extern int vdma_get_residue(int channel);
+extern int vdma_get_enable(int channel);
/*
* some definitions used by the driver functions
@@ -49,8 +48,7 @@ int vdma_get_enable(int channel);
/*
* VDMA pagetable entry description
*/
-typedef volatile struct VDMA_PGTBL_ENTRY
-{
+typedef volatile struct VDMA_PGTBL_ENTRY {
unsigned int frame; /* physical frame no. */
unsigned int owner; /* owner of this entry (0=free) */
} VDMA_PGTBL_ENTRY;
@@ -95,4 +93,4 @@ typedef volatile struct VDMA_PGTBL_ENTRY
#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
-#endif /* __ASM_MIPS_JAZZDMA_H */
+#endif /* _ASM_JAZZDMA_H */
diff --git a/include/asm-mips/keyboard.h b/include/asm-mips/keyboard.h
index 7baed2d8c0b1..54d6d07fa6fb 100644
--- a/include/asm-mips/keyboard.h
+++ b/include/asm-mips/keyboard.h
@@ -13,10 +13,13 @@
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/config.h>
#include <asm/bootinfo.h>
#define DISABLE_KBD_DURING_INTERRUPTS 0
+#ifdef CONFIG_PC_KEYB
+
extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int pckbd_getkeycode(unsigned int scancode);
extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
@@ -35,6 +38,19 @@ extern void kbd_forward_char (int ch);
#define kbd_init_hw pckbd_init_hw
#define kbd_sysrq_xlate pckbd_sysrq_xlate
+#else
+
+extern int kbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int kbd_getkeycode(unsigned int scancode);
+extern int kbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char kbd_unexpected_up(unsigned char keycode);
+extern void kbd_leds(unsigned char leds);
+extern void kbd_init_hw(void);
+extern unsigned char *kbd_sysrq_xlate;
+
+#endif
+
#define SYSRQ_KEY 0x54
/* Some stoneage hardware needs delays after some operations. */
diff --git a/include/asm-mips/mc146818rtc.h b/include/asm-mips/mc146818rtc.h
index f521b7239ad3..6e23e432f335 100644
--- a/include/asm-mips/mc146818rtc.h
+++ b/include/asm-mips/mc146818rtc.h
@@ -14,8 +14,12 @@
#include <asm/io.h>
#ifndef RTC_PORT
+#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
+#define RTC_PORT(x) (0x14014800 + (x))
+#else
#define RTC_PORT(x) (0x70 + (x))
#endif
+#endif
/*
* The yet supported machines all access the RTC index register via
@@ -45,8 +49,13 @@ extern struct rtc_ops *rtc_ops;
#ifdef CONFIG_DECSTATION
#define RTC_IRQ 0
+#elif defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
+#include <asm/it8172/it8172_int.h>
+#define RTC_IRQ IT8172_RTC_IRQ
#else
#define RTC_IRQ 8
#endif
+#define RTC_DEC_YEAR 0x3f /* Where we store the real year on DECs. */
+
#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-mips/mips32_cache.h b/include/asm-mips/mips32_cache.h
new file mode 100644
index 000000000000..2de18bd7cb71
--- /dev/null
+++ b/include/asm-mips/mips32_cache.h
@@ -0,0 +1,288 @@
+/*
+ * mips32_cache.h
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * Inline assembly cache operations.
+ *
+ * This file is the original r4cache.c file with modification that makes the
+ * cache handling more generic.
+ *
+ * FIXME: Handle split L2 caches.
+ *
+ */
+#ifndef _MIPS_R4KCACHE_H
+#define _MIPS_R4KCACHE_H
+
+#include <asm/asm.h>
+#include <asm/cacheops.h>
+
+extern inline void flush_icache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Invalidate_I));
+}
+
+extern inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Writeback_Inv_D));
+}
+
+extern inline void flush_scache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Index_Writeback_Inv_SD));
+}
+
+extern inline void flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_I));
+}
+
+extern inline void flush_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_Inv_D));
+}
+
+extern inline void invalidate_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_D));
+}
+
+extern inline void invalidate_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_SD));
+}
+
+extern inline void flush_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache %1, (%0)\n\t"
+ ".set mips0\n\t"
+ ".set reorder"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_Inv_SD));
+}
+
+/*
+ * The next two are for badland addresses like signal trampolines.
+ */
+extern inline void protected_flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set mips0\n\t"
+ ".set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ STR(PTR)"\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr),
+ "i" (Hit_Invalidate_I));
+}
+
+extern inline void protected_writeback_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ ".set mips3\n"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set mips0\n\t"
+ ".set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ STR(PTR)"\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr),
+ "i" (Hit_Writeback_D));
+}
+
+#define cache_unroll(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ .set mips3; \
+ cache %1, (%0); \
+ .set mips0; \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+
+extern inline void blast_dcache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + dcache_size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_dcache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Hit_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_dcache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_D);
+ start += dc_lsize;
+ }
+}
+
+extern inline void blast_icache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + icache_size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_icache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Hit_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_icache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache_unroll(start,Index_Invalidate_I);
+ start += ic_lsize;
+ }
+}
+
+extern inline void blast_scache(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+extern inline void blast_scache_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache_unroll(start,Hit_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+extern inline void blast_scache_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_SD);
+ start += sc_lsize;
+ }
+}
+
+#endif /* !(_MIPS_R4KCACHE_H) */
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index 4991babbde0a..0c67d0d127ac 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1,14 +1,16 @@
-/* $Id: mipsregs.h,v 1.6 1999/07/26 19:42:43 harald Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 1995, 1996, 1997 by Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996, 1997, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
* Modified for further R[236]000 support by Paul M. Antoine, 1996.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
-#ifndef __ASM_MIPS_MIPSREGS_H
-#define __ASM_MIPS_MIPSREGS_H
+#ifndef _ASM_MIPSREGS_H
+#define _ASM_MIPSREGS_H
#include <linux/linkage.h>
@@ -70,6 +72,12 @@
#define CP0_IWATCH $18
#define CP0_DWATCH $19
+/*
+ * Coprocessor 0 Set 1 register names
+ */
+#define CP0_S1_DERRADDR0 $26
+#define CP0_S1_DERRADDR1 $27
+#define CP0_S1_INTCONTROL $20
/*
* Coprocessor 1 (FPU) register names
*/
@@ -77,6 +85,58 @@
#define CP1_STATUS $31
/*
+ * FPU Status Register Values
+ */
+/*
+ * Status Register Values
+ */
+
+#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
+#define FPU_CSR_COND 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
+#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
+#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
+#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
+#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
+#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
+#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+*/
+#define FPU_CSR_ALL_X 0x0003f000
+#define FPU_CSR_UNI_X 0x00020000
+#define FPU_CSR_INV_X 0x00010000
+#define FPU_CSR_DIV_X 0x00008000
+#define FPU_CSR_OVF_X 0x00004000
+#define FPU_CSR_UDF_X 0x00002000
+#define FPU_CSR_INE_X 0x00001000
+
+#define FPU_CSR_ALL_E 0x00000f80
+#define FPU_CSR_INV_E 0x00000800
+#define FPU_CSR_DIV_E 0x00000400
+#define FPU_CSR_OVF_E 0x00000200
+#define FPU_CSR_UDF_E 0x00000100
+#define FPU_CSR_INE_E 0x00000080
+
+#define FPU_CSR_ALL_S 0x0000007c
+#define FPU_CSR_INV_S 0x00000040
+#define FPU_CSR_DIV_S 0x00000020
+#define FPU_CSR_OVF_S 0x00000010
+#define FPU_CSR_UDF_S 0x00000008
+#define FPU_CSR_INE_S 0x00000004
+
+/* rounding mode */
+#define FPU_CSR_RN 0x0 /* nearest */
+#define FPU_CSR_RZ 0x1 /* towards zero */
+#define FPU_CSR_RU 0x2 /* towards +Infinity */
+#define FPU_CSR_RD 0x3 /* towards -Infinity */
+
+
+/*
* Values for PageMask register
*/
#define PM_4K 0x00000000
@@ -111,6 +171,16 @@
: "=r" (__res)); \
__res;})
+#define read_32bit_cp0_set1_register(source) \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\treorder\n\t" \
+ "cfc0\t%0,"STR(source)"\n\t" \
+ ".set\tpop" \
+ : "=r" (__res)); \
+ __res;})
+
/*
* For now use this only with interrupts disabled!
*/
@@ -129,12 +199,36 @@
"nop" \
: : "r" (value));
+#define write_32bit_cp0_set1_register(register,value) \
+ __asm__ __volatile__( \
+ "ctc0\t%0,"STR(register)"\n\t" \
+ "nop" \
+ : : "r" (value));
+
#define write_64bit_cp0_register(register,value) \
__asm__ __volatile__( \
".set\tmips3\n\t" \
"dmtc0\t%0,"STR(register)"\n\t" \
".set\tmips0" \
: : "r" (value))
+
+#ifdef CONFIG_CPU_MIPS32
+/*
+ * This should be changed when we get a compiler that support the MIPS32 ISA.
+ */
+#define read_mips32_cp0_config1() \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ ".set\tnoat\n\t" \
+ ".word\t0x40018001\n\t" \
+ "move\t%0,$1\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ :"=r" (__res)); \
+ __res;})
+#endif
+
/*
* R4x00 interrupt enable / cause bits
*/
@@ -166,7 +260,31 @@
*/
#define __BUILD_SET_CP0(name,register) \
extern __inline__ unsigned int \
-set_cp0_##name(unsigned int change, unsigned int new) \
+set_cp0_##name(unsigned int set) \
+{ \
+ unsigned int res; \
+ \
+ res = read_32bit_cp0_register(register); \
+ res |= set; \
+ write_32bit_cp0_register(register, res); \
+ \
+ return res; \
+} \
+ \
+extern __inline__ unsigned int \
+clear_cp0_##name(unsigned int clear) \
+{ \
+ unsigned int res; \
+ \
+ res = read_32bit_cp0_register(register); \
+ res &= ~clear; \
+ write_32bit_cp0_register(register, res); \
+ \
+ return res; \
+} \
+ \
+extern __inline__ unsigned int \
+change_cp0_##name(unsigned int change, unsigned int new) \
{ \
unsigned int res; \
\
@@ -186,42 +304,6 @@ __BUILD_SET_CP0(config,CP0_CONFIG)
#endif /* defined (_LANGUAGE_ASSEMBLY) */
/*
- * Inline code for use of the ll and sc instructions
- *
- * FIXME: This instruction is only available on MIPS ISA >=2.
- * Since these operations are only being used for atomic operations
- * the easiest workaround for the R[23]00 is to disable interrupts.
- * This fails for R3000 SMP machines which use that many different
- * technologies as replacement that it is difficult to create even
- * just a hook for for all machines to hook into. The only good
- * thing is that there is currently no R3000 SMP machine on the
- * Linux/MIPS target list ...
- */
-#define load_linked(addr) \
-({ \
- unsigned int __res; \
- \
- __asm__ __volatile__( \
- "ll\t%0,(%1)" \
- : "=r" (__res) \
- : "r" ((unsigned long) (addr))); \
- \
- __res; \
-})
-
-#define store_conditional(addr,value) \
-({ \
- int __res; \
- \
- __asm__ __volatile__( \
- "sc\t%0,(%2)" \
- : "=r" (__res) \
- : "0" (value), "r" (addr)); \
- \
- __res; \
-})
-
-/*
* Bitfields in the R4xx0 cp0 status register
*/
#define ST0_IE 0x00000001
@@ -253,11 +335,44 @@ __BUILD_SET_CP0(config,CP0_CONFIG)
/*
* Bits specific to the R4640/R4650
*/
-#define ST0_UM <1 << 4)
+#define ST0_UM (1 << 4)
#define ST0_IL (1 << 23)
#define ST0_DL (1 << 24)
/*
+ * Bitfields in the TX39 family CP0 Configuration Register 3
+ */
+#define TX39_CONF_ICS_SHIFT 19
+#define TX39_CONF_ICS_MASK 0x00380000
+#define TX39_CONF_ICS_1KB 0x00000000
+#define TX39_CONF_ICS_2KB 0x00080000
+#define TX39_CONF_ICS_4KB 0x00100000
+#define TX39_CONF_ICS_8KB 0x00180000
+#define TX39_CONF_ICS_16KB 0x00200000
+
+#define TX39_CONF_DCS_SHIFT 16
+#define TX39_CONF_DCS_MASK 0x00070000
+#define TX39_CONF_DCS_1KB 0x00000000
+#define TX39_CONF_DCS_2KB 0x00010000
+#define TX39_CONF_DCS_4KB 0x00020000
+#define TX39_CONF_DCS_8KB 0x00030000
+#define TX39_CONF_DCS_16KB 0x00040000
+
+#define TX39_CONF_CWFON 0x00004000
+#define TX39_CONF_WBON 0x00002000
+#define TX39_CONF_RF_SHIFT 10
+#define TX39_CONF_RF_MASK 0x00000c00
+#define TX39_CONF_DOZE 0x00000200
+#define TX39_CONF_HALT 0x00000100
+#define TX39_CONF_LOCK 0x00000080
+#define TX39_CONF_ICE 0x00000020
+#define TX39_CONF_DCE 0x00000010
+#define TX39_CONF_IRSIZE_SHIFT 2
+#define TX39_CONF_IRSIZE_MASK 0x0000000c
+#define TX39_CONF_DRSIZE_SHIFT 0
+#define TX39_CONF_DRSIZE_MASK 0x00000003
+
+/*
* Status register bits available in all MIPS CPUs.
*/
#define ST0_IM 0x0000ff00
@@ -277,6 +392,22 @@ __BUILD_SET_CP0(config,CP0_CONFIG)
#define STATUSF_IP6 (1 << 14)
#define STATUSB_IP7 15
#define STATUSF_IP7 (1 << 15)
+#define STATUSB_IP8 0
+#define STATUSF_IP8 (1 << 0)
+#define STATUSB_IP9 1
+#define STATUSF_IP9 (1 << 1)
+#define STATUSB_IP10 2
+#define STATUSF_IP10 (1 << 2)
+#define STATUSB_IP11 3
+#define STATUSF_IP11 (1 << 3)
+#define STATUSB_IP12 4
+#define STATUSF_IP12 (1 << 4)
+#define STATUSB_IP13 5
+#define STATUSF_IP13 (1 << 5)
+#define STATUSB_IP14 6
+#define STATUSF_IP14 (1 << 6)
+#define STATUSB_IP15 7
+#define STATUSF_IP15 (1 << 7)
#define ST0_CH 0x00040000
#define ST0_SR 0x00100000
#define ST0_BEV 0x00400000
@@ -405,4 +536,4 @@ extern asmlinkage unsigned int read_perf_cntl(unsigned int counter);
extern asmlinkage void write_perf_cntl(unsigned int counter, unsigned int val);
#endif
-#endif /* __ASM_MIPS_MIPSREGS_H */
+#endif /* _ASM_MIPSREGS_H */
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index 9be6976e896d..1e8e9614a012 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -1,5 +1,4 @@
-/* $Id: mmu_context.h,v 1.7 2000/02/04 07:40:53 ralf Exp $
- *
+/*
* Switch a MMU context.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -13,11 +12,12 @@
#define _ASM_MMU_CONTEXT_H
#include <linux/config.h>
+#include <linux/slab.h>
#include <asm/pgalloc.h>
/* Fuck. The f-word is here so you can grep for it :-) */
extern unsigned long asid_cache;
-extern pgd_t *current_pgd;
+extern pgd_t *current_pgd[];
#if defined(CONFIG_CPU_R3000)
@@ -60,7 +60,19 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
extern inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+#ifndef CONFIG_SMP
mm->context = 0;
+#else
+ mm->context = (unsigned long)kmalloc(smp_num_cpus *
+ sizeof(unsigned long), GFP_KERNEL);
+ /*
+ * Init the "context" values so that a tlbpid allocation
+ * happens on the first switch.
+ */
+ if (mm->context == 0)
+ return -ENOMEM;
+ memset((void *)mm->context, 0, smp_num_cpus * sizeof(unsigned long));
+#endif
return 0;
}
@@ -73,7 +85,7 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if ((next->context ^ asid) & ASID_VERSION_MASK)
get_new_mmu_context(next, asid);
- current_pgd = next->pgd;
+ current_pgd[cpu] = next->pgd;
set_entryhi(next->context);
}
@@ -96,7 +108,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, asid_cache);
- current_pgd = next->pgd;
+ current_pgd[smp_processor_id()] = next->pgd;
set_entryhi(next->context);
}
diff --git a/include/asm-mips/orion.h b/include/asm-mips/orion.h
deleted file mode 100644
index 6df02086a64d..000000000000
--- a/include/asm-mips/orion.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Orion/Galileo specific header file.
- * -- Cort <cort@fsmlabs.com>
- */
-#ifndef __LINUX_MIPS_ORION_H
-#define __LINUX_MIPS_ORION_H
-
-/* base address for the GT-64120 internal registers */
-#define GT64120_BASE (0x14000000)
-/* GT64120 and PCI_0 interrupt cause register */
-#define GT64120_CAUSE_LOW *(unsigned long *)(GT64120_BASE + 0xc18)
-#define GT64120_CAUSE_HIGH *(unsigned long *)(GT64120_BASE + 0xc1c)
-
-#endif /* __LINUX_MIPS_ORION_H */
diff --git a/include/asm-mips/param.h b/include/asm-mips/param.h
index 5487778ca7b9..d4e4c7d73316 100644
--- a/include/asm-mips/param.h
+++ b/include/asm-mips/param.h
@@ -64,4 +64,8 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC 100 /* frequency at which times() counts */
+#endif
+
#endif /* _ASM_PARAM_H */
diff --git a/include/asm-mips/pci.h b/include/asm-mips/pci.h
index 2bbffa9b94f6..fdcedada3c14 100644
--- a/include/asm-mips/pci.h
+++ b/include/asm-mips/pci.h
@@ -1,5 +1,4 @@
-/* $Id: pci.h,v 1.10 2000/03/23 02:26:00 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -13,7 +12,8 @@
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
-#define pcibios_assign_all_busses() 0
+//#define pcibios_assign_all_busses() 0
+#define pcibios_assign_all_busses() 1
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -201,8 +201,27 @@ extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
#endif
}
-/* Return the index of the PCI controller for device PDEV. */
-#define pci_controller_num(PDEV) (0)
+/* Return whether the given PCI device DMA address mask can
+ * be supported properly. For example, if your device can
+ * only drive the low 24-bits during PCI bus mastering, then
+ * you would pass 0x00ffffff as the mask to this function.
+ */
+extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+
+
+/* Return the index of the PCI controller for device. */
+#define pci_controller_num(pdev) (0)
/*
* These macros should be used after a pci_map_sg call has been done
diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h
index e70b32fcbc89..66c12260fe6d 100644
--- a/include/asm-mips/pgalloc.h
+++ b/include/asm-mips/pgalloc.h
@@ -1,16 +1,16 @@
-/* $Id: pgalloc.h,v 1.3 2000/02/23 00:41:38 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 - 2000 by Ralf Baechle at alii
+ * Copyright (C) 1994 - 2001 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
#include <linux/config.h>
+#include <linux/mm.h>
/* TLB flushing:
*
@@ -33,9 +33,7 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
+ * Allocate and free page tables.
*/
#define pgd_quicklist (current_cpu_data.pgd_quick)
@@ -43,6 +41,13 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
+
+/*
+ * Initialize new page directory with pointers to invalid ptes
+ */
+extern void pgd_init(unsigned long page);
+
extern __inline__ pgd_t *get_pgd_slow(void)
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
@@ -82,7 +87,6 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd)
}
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern __inline__ pte_t *get_pte_fast(void)
{
@@ -123,94 +127,54 @@ extern __inline__ void free_pmd_slow(pmd_t *pmd)
}
extern void __bad_pte(pmd_t *pmd);
-extern void __bad_pte_kernel(pmd_t *pmd);
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
-
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
- if (page) {
- pmd_val(*pmd) = (unsigned long)page;
- return page + address;
- }
- return get_pte_kernel_slow(pmd, address);
- }
- if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pte)
+ clear_page(pte);
+ return pte;
}
-extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
- if (page) {
- pmd_val(*pmd) = (unsigned long)page;
- return page + address;
- }
- return get_pte_slow(pmd, address);
- }
- if (pmd_bad(*pmd)) {
- __bad_pte(pmd);
- return NULL;
+ unsigned long *ret;
+
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- return (pte_t *) pmd_page(*pmd) + address;
+ return (pte_t *)ret;
}
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern inline void pmd_free(pmd_t * pmd)
+extern __inline__ void pte_free_fast(pte_t *pte)
{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
}
-extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
+extern __inline__ void pte_free_slow(pte_t *pte)
{
- return (pmd_t *) pgd;
+ free_page((unsigned long)pte);
}
-#define pmd_free_kernel pmd_free
-#define pmd_alloc_kernel pmd_alloc
+#define pte_free(pte) pte_free_slow(pte)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc(mm) get_pgd_fast()
-extern int do_check_pgt_cache(int, int);
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
-extern inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-#ifdef CONFIG_SMP
- int i;
-#endif
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
-#ifndef CONFIG_SMP
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-#else
- /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our
- callee, so we can modify pgd caches of other CPUs as well. -jj */
- for (i = 0; i < NR_CPUS; i++)
- for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-#endif
-}
+extern int do_check_pgt_cache(int, int);
#endif /* _ASM_PGALLOC_H */
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 9cb9241c2b39..c96dea2e3619 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 - 1999 by Ralf Baechle at alii
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle at alii
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_PGTABLE_H
@@ -25,32 +25,32 @@
* - flush_cache_page(mm, vmaddr) flushes a single page
* - flush_cache_range(mm, start, end) flushes a range of pages
* - flush_page_to_ram(page) write back kernel page to ram
+ * - flush_icache_range(start, end) flush a range of instructions
*/
extern void (*_flush_cache_all)(void);
+extern void (*___flush_cache_all)(void);
extern void (*_flush_cache_mm)(struct mm_struct *mm);
extern void (*_flush_cache_range)(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void (*_flush_cache_page)(struct vm_area_struct *vma, unsigned long page);
extern void (*_flush_cache_sigtramp)(unsigned long addr);
extern void (*_flush_page_to_ram)(struct page * page);
+extern void (*_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*_flush_icache_page)(struct vm_area_struct *vma,
+ struct page *page);
#define flush_dcache_page(page) do { } while (0)
#define flush_cache_all() _flush_cache_all()
+#define __flush_cache_all() ___flush_cache_all()
#define flush_cache_mm(mm) _flush_cache_mm(mm)
#define flush_cache_range(mm,start,end) _flush_cache_range(mm,start,end)
#define flush_cache_page(vma,page) _flush_cache_page(vma, page)
#define flush_cache_sigtramp(addr) _flush_cache_sigtramp(addr)
#define flush_page_to_ram(page) _flush_page_to_ram(page)
-#define flush_icache_range(start, end) flush_cache_all()
-
-#define flush_icache_page(vma, page) \
-do { \
- unsigned long addr; \
- addr = (unsigned long) page_address(page); \
- _flush_cache_page(vma, addr); \
-} while (0)
+#define flush_icache_range(start, end) _flush_icache_range(start,end)
+#define flush_icache_page(vma, page) _flush_icache_page(vma, page)
/*
@@ -59,6 +59,16 @@ do { \
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
+/*
+ * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
+ * starting at the top and working down. This is for populating the
+ * TLB before trap_init() puts the TLB miss handler in place. It
+ * should be used only for entries matching the actual page tables,
+ * to prevent inconsistencies.
+ */
+extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask);
+
/* Basically we have the same two-level (which is the logical three level
* Linux page table layout folded) page tables as the i386. Some day
@@ -130,13 +140,25 @@ extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#define _CACHE_CACHABLE_NONCOHERENT 0
#else
-
#define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */
#define _PAGE_GLOBAL (1<<6)
#define _PAGE_VALID (1<<7)
#define _PAGE_SILENT_READ (1<<7) /* synonym */
#define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1<<8)
+#define _CACHE_MASK (7<<9)
+
+#if defined(CONFIG_CPU_SB1)
+
+/* No penalty for being coherent on the SB1, so just
+ use it for "noncoherent" spaces, too. Shouldn't hurt. */
+
+#define _CACHE_UNCACHED (2<<9)
+#define _CACHE_CACHABLE_COW (5<<9)
+#define _CACHE_CACHABLE_NONCOHERENT (5<<9)
+
+#else
+
#define _CACHE_CACHABLE_NO_WA (0<<9) /* R4600 only */
#define _CACHE_CACHABLE_WA (1<<9) /* R4600 only */
#define _CACHE_UNCACHED (2<<9) /* R4[0246]00 */
@@ -145,26 +167,36 @@ extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#define _CACHE_CACHABLE_COW (5<<9) /* R4[04]00 only */
#define _CACHE_CACHABLE_CUW (6<<9) /* R4[04]00 only */
#define _CACHE_CACHABLE_ACCELERATED (7<<9) /* R10000 only */
-#define _CACHE_MASK (7<<9)
#endif
+#endif
#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#ifdef CONFIG_MIPS_UNCACHED
+#define PAGE_CACHABLE_DEFAULT _CACHE_UNCACHED
+#else
+#ifdef CONFIG_CPU_SB1
+#define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_COW
+#else
+#define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_NONCOHERENT
+#endif
+#endif
+
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _CACHE_CACHABLE_NONCOHERENT)
+ PAGE_CACHABLE_DEFAULT)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ PAGE_CACHABLE_DEFAULT)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ PAGE_CACHABLE_DEFAULT)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
- _CACHE_CACHABLE_NONCOHERENT)
+ PAGE_CACHABLE_DEFAULT)
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _CACHE_UNCACHED)
+ PAGE_CACHABLE_DEFAULT)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_CACHE_UNCACHED)
@@ -200,21 +232,9 @@ extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
-/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern pte_t __bad_page(void);
-extern pte_t *__bad_pagetable(void);
-
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
@@ -269,6 +289,13 @@ extern inline void pte_clear(pte_t *ptep)
}
/*
+ * (pmds are folded into pgds so this doesnt get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+
+/*
* Empty pgd/pmd entries point to the invalid_pte_table.
*/
extern inline int pmd_none(pmd_t pmd)
@@ -284,7 +311,7 @@ extern inline int pmd_bad(pmd_t pmd)
extern inline int pmd_present(pmd_t pmd)
{
- return pmd_val(pmd);
+ return (pmd_val(pmd) != (unsigned long) invalid_pte_table);
}
extern inline void pmd_clear(pmd_t *pmdp)
@@ -303,7 +330,7 @@ extern inline int pgd_present(pgd_t pgd) { return 1; }
extern inline void pgd_clear(pgd_t *pgdp) { }
/*
- * Permanent address of a page. On MIPS64 we never have highmem, so this
+ * Permanent address of a page. On MIPS we never have highmem, so this
* is simple.
*/
#define page_address(page) ((page)->virtual)
@@ -390,7 +417,7 @@ extern inline pte_t pte_mkyoung(pte_t pte)
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
- return __pte(((physpage & PAGE_MASK) - PAGE_OFFSET) | pgprot_val(pgprot));
+ return __pte(physpage | pgprot_val(pgprot));
}
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -424,19 +451,6 @@ extern inline pte_t *pte_offset(pmd_t * dir, unsigned long address)
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
-/*
- * Initialize new page directory with pointers to invalid ptes
- */
-extern void pgd_init(unsigned long page);
-
-extern void __bad_pte(pmd_t *pmd);
-extern void __bad_pte_kernel(pmd_t *pmd);
-
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
-
extern int do_check_pgt_cache(int, int);
extern pgd_t swapper_pg_dir[1024];
@@ -639,6 +653,19 @@ extern inline void set_wired(unsigned long val)
: : "r" (val));
}
+extern inline unsigned long get_info(void)
+{
+ unsigned long val;
+
+ __asm__(
+ ".set push\n\t"
+ ".set reorder\n\t"
+ "mfc0 %0, $7\n\t"
+ ".set pop"
+ : "=r" (val));
+ return val;
+}
+
/* CP0_TAGLO and CP0_TAGHI registers */
extern inline unsigned long get_taglo(void)
{
diff --git a/include/asm-mips/pmc/ev64120.h b/include/asm-mips/pmc/ev64120.h
new file mode 100644
index 000000000000..74ad8c5105e3
--- /dev/null
+++ b/include/asm-mips/pmc/ev64120.h
@@ -0,0 +1,59 @@
+/*
+ * This is a direct copy of the ev96100.h file, with a global search and
+ * replace. The numbers are the same.
+ *
+ * The reason I'm duplicating this is so that the 64120/96100
+ * defines won't be confusing in the source code.
+ */
+#ifndef _ASM_PMC_CP7000_H
+#define _ASM_PMC_CP7000_H
+
+#include <asm/addrspace.h>
+
+/*
+ * GT64120 config space base address
+ */
+#define GT64120_BASE (KSEG1ADDR(0x14000000))
+#define MIPS_GT_BASE GT64120_BASE
+
+/*
+ * PCI Bus allocation
+ */
+#define GT_PCI_MEM_BASE 0x12000000
+#define GT_PCI_MEM_SIZE 0x02000000
+#define GT_PCI_IO_BASE 0x10000000
+#define GT_PCI_IO_SIZE 0x02000000
+#define GT_ISA_IO_BASE PCI_IO_BASE
+
+/*
+ * Duart I/O ports.
+ */
+#define EV64120_COM1_BASE_ADDR (0x1d000000 + 0x20)
+#define EV64120_COM2_BASE_ADDR (0x1d000000 + 0x00)
+
+
+/*
+ * EV64120 interrupt controller register base.
+ */
+#define EV64120_ICTRL_REGS_BASE (KSEG1ADDR(0x1f000000))
+
+/*
+ * EV64120 UART register base.
+ */
+#define EV64120_UART0_REGS_BASE (KSEG1ADDR(EV64120_COM1_BASE_ADDR))
+#define EV64120_UART1_REGS_BASE (KSEG1ADDR(EV64120_COM2_BASE_ADDR))
+#define EV64120_BASE_BAUD ( 3686400 / 16 )
+
+
+/*
+ * Because of an error/peculiarity in the Galileo chip, we need to swap the
+ * bytes when running bigendian.
+ */
+
+#define GT_WRITE(ofs, data) \
+ *(volatile u32 *)(MIPS_GT_BASE+ofs) = cpu_to_le32(data)
+#define GT_READ(ofs, data) \
+ *data = le32_to_cpu(*(volatile u32 *)(MIPS_GT_BASE+ofs))
+
+
+#endif /* _ASM_PMC_CP7000_H */
diff --git a/include/asm-mips/pmc/ev64120int.h b/include/asm-mips/pmc/ev64120int.h
new file mode 100644
index 000000000000..463f6b39dcaf
--- /dev/null
+++ b/include/asm-mips/pmc/ev64120int.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_PMC_CP7000INT_H
+#define _ASM_PMC_CP7000INT_H
+
+#define INT_CAUSE_MAIN 0
+#define INT_CAUSE_HIGH 1
+
+#define MAX_CAUSE_REGS 4
+#define MAX_CAUSE_REG_WIDTH 32
+
+void hook_irq_handler (int int_cause , int bit_num , void *isr_ptr);
+int disable_galileo_irq (int int_cause , int bit_num);
+int enable_galileo_irq (int int_cause , int bit_num);
+
+extern struct tq_struct irq_handlers[MAX_CAUSE_REGS][MAX_CAUSE_REG_WIDTH];
+
+/*
+ * PCI interrupts will come in on either the INTA or INTD interrups lines,
+ * which are mapped to the #2 and #5 interrupt pins of the MIPS. On our
+ * boards, they all either come in on IntD or they all come in on IntA, they
+ * aren't mixed. There can be numerous PCI interrupts, so we keep a list of the
+ * "requested" interrupt numbers and go through the list whenever we get an
+ * IntA/D.
+ *
+ * All PCI interrupts have numbers >= 20 by arbitrary convention. Any
+ * interrupt < 8 is an interrupt that is maskable on MIPS.
+ */
+
+#define TIMER 4
+#define INTA 2
+#define INTD 5
+
+#endif /* _ASM_PMC_CP7000INT_H */
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index decd449f39a8..b0273cca1735 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -1,11 +1,10 @@
-/* $Id: processor.h,v 1.25 2000/02/05 06:47:37 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 Waldorf GMBH
- * Copyright (C) 1995, 1996, 1997, 1998 Ralf Baechle
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
@@ -30,6 +29,7 @@
#include <asm/system.h>
struct mips_cpuinfo {
+ unsigned long udelay_val;
unsigned long *pgd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
@@ -44,7 +44,6 @@ extern void r3081_wait(void);
extern void r4k_wait(void);
extern char cyclecounter_available; /* only available from R4000 upwards. */
extern char dedicated_iv_available; /* some embedded MIPS like Nevada */
-extern char vce_available; /* Supports VCED / VCEI exceptions */
extern struct mips_cpuinfo boot_cpu_data;
extern unsigned int vced_count, vcei_count;
@@ -83,7 +82,7 @@ extern struct task_struct *last_task_used_math;
* for a 64 bit kernel expandable to 8192EB, of which the current MIPS
* implementations will "only" be able to use 1TB ...
*/
-#define TASK_SIZE (0x80000000UL)
+#define TASK_SIZE (0x7fff8000UL)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h
index b395fe1be56a..39ac6cea4c49 100644
--- a/include/asm-mips/ptrace.h
+++ b/include/asm-mips/ptrace.h
@@ -1,16 +1,15 @@
-/* $Id: ptrace.h,v 1.7 1999/09/28 22:27:17 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
*
* Machine dependent structs and defines to help the user use
* the ptrace system call.
*/
-#ifndef __ASM_MIPS_PTRACE_H
-#define __ASM_MIPS_PTRACE_H
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
#include <asm/isadep.h>
#include <linux/types.h>
@@ -52,6 +51,19 @@ struct pt_regs {
#endif /* !(_LANGUAGE_ASSEMBLY) */
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+/* #define PTRACE_GETREGS 12 */
+/* #define PTRACE_SETREGS 13 */
+/* #define PTRACE_GETFPREGS 14 */
+/* #define PTRACE_SETFPREGS 15 */
+/* #define PTRACE_GETFPXREGS 18 */
+/* #define PTRACE_SETFPXREGS 19 */
+
+#define PTRACE_SETOPTIONS 21
+
+/* options set using PTRACE_SETOPTIONS */
+#define PTRACE_O_TRACESYSGOOD 0x00000001
+
#ifdef _LANGUAGE_ASSEMBLY
#include <asm/offset.h>
#endif
@@ -71,4 +83,4 @@ extern void show_regs(struct pt_regs *);
#endif
-#endif /* __ASM_MIPS_PTRACE_H */
+#endif /* _ASM_PTRACE_H */
diff --git a/include/asm-mips/resource.h b/include/asm-mips/resource.h
index 718e983e61f2..286b71b7057e 100644
--- a/include/asm-mips/resource.h
+++ b/include/asm-mips/resource.h
@@ -1,10 +1,9 @@
-/* $Id: resource.h,v 1.4 2000/01/27 23:45:30 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1996, 1998 by Ralf Baechle
+ * Copyright (C) 1995, 96, 98, 2000 by Ralf Baechle
*/
#ifndef _ASM_RESOURCE_H
#define _ASM_RESOURCE_H
@@ -26,14 +25,14 @@
#define RLIM_NLIMITS 11 /* Number of limit flavors. */
+#ifdef __KERNEL__
+
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*/
#define RLIM_INFINITY 0x7fffffffUL
-#ifdef __KERNEL__
-
#define INIT_RLIMITS \
{ \
{ RLIM_INFINITY, RLIM_INFINITY }, \
diff --git a/include/asm-mips/riscos-syscall.h b/include/asm-mips/riscos-syscall.h
new file mode 100644
index 000000000000..8cb87df377bb
--- /dev/null
+++ b/include/asm-mips/riscos-syscall.h
@@ -0,0 +1,979 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_RISCOS_SYSCALL_H
+#define _ASM_RISCOS_SYSCALL_H
+
+/*
+ * The syscalls 0 - 3999 are reserved for a down to the root syscall
+ * compatibility with RISC/os and IRIX. We'll see how to deal with the
+ * various "real" BSD variants like Ultrix, NetBSD ...
+ */
+
+/*
+ * SVR4 syscalls are in the range from 1 to 999
+ */
+#define __NR_SVR4 0
+#define __NR_SVR4_syscall (__NR_SVR4 + 0)
+#define __NR_SVR4_exit (__NR_SVR4 + 1)
+#define __NR_SVR4_fork (__NR_SVR4 + 2)
+#define __NR_SVR4_read (__NR_SVR4 + 3)
+#define __NR_SVR4_write (__NR_SVR4 + 4)
+#define __NR_SVR4_open (__NR_SVR4 + 5)
+#define __NR_SVR4_close (__NR_SVR4 + 6)
+#define __NR_SVR4_wait (__NR_SVR4 + 7)
+#define __NR_SVR4_creat (__NR_SVR4 + 8)
+#define __NR_SVR4_link (__NR_SVR4 + 9)
+#define __NR_SVR4_unlink (__NR_SVR4 + 10)
+#define __NR_SVR4_exec (__NR_SVR4 + 11)
+#define __NR_SVR4_chdir (__NR_SVR4 + 12)
+#define __NR_SVR4_gtime (__NR_SVR4 + 13)
+#define __NR_SVR4_mknod (__NR_SVR4 + 14)
+#define __NR_SVR4_chmod (__NR_SVR4 + 15)
+#define __NR_SVR4_chown (__NR_SVR4 + 16)
+#define __NR_SVR4_sbreak (__NR_SVR4 + 17)
+#define __NR_SVR4_stat (__NR_SVR4 + 18)
+#define __NR_SVR4_lseek (__NR_SVR4 + 19)
+#define __NR_SVR4_getpid (__NR_SVR4 + 20)
+#define __NR_SVR4_mount (__NR_SVR4 + 21)
+#define __NR_SVR4_umount (__NR_SVR4 + 22)
+#define __NR_SVR4_setuid (__NR_SVR4 + 23)
+#define __NR_SVR4_getuid (__NR_SVR4 + 24)
+#define __NR_SVR4_stime (__NR_SVR4 + 25)
+#define __NR_SVR4_ptrace (__NR_SVR4 + 26)
+#define __NR_SVR4_alarm (__NR_SVR4 + 27)
+#define __NR_SVR4_fstat (__NR_SVR4 + 28)
+#define __NR_SVR4_pause (__NR_SVR4 + 29)
+#define __NR_SVR4_utime (__NR_SVR4 + 30)
+#define __NR_SVR4_stty (__NR_SVR4 + 31)
+#define __NR_SVR4_gtty (__NR_SVR4 + 32)
+#define __NR_SVR4_access (__NR_SVR4 + 33)
+#define __NR_SVR4_nice (__NR_SVR4 + 34)
+#define __NR_SVR4_statfs (__NR_SVR4 + 35)
+#define __NR_SVR4_sync (__NR_SVR4 + 36)
+#define __NR_SVR4_kill (__NR_SVR4 + 37)
+#define __NR_SVR4_fstatfs (__NR_SVR4 + 38)
+#define __NR_SVR4_setpgrp (__NR_SVR4 + 39)
+#define __NR_SVR4_cxenix (__NR_SVR4 + 40)
+#define __NR_SVR4_dup (__NR_SVR4 + 41)
+#define __NR_SVR4_pipe (__NR_SVR4 + 42)
+#define __NR_SVR4_times (__NR_SVR4 + 43)
+#define __NR_SVR4_profil (__NR_SVR4 + 44)
+#define __NR_SVR4_plock (__NR_SVR4 + 45)
+#define __NR_SVR4_setgid (__NR_SVR4 + 46)
+#define __NR_SVR4_getgid (__NR_SVR4 + 47)
+#define __NR_SVR4_sig (__NR_SVR4 + 48)
+#define __NR_SVR4_msgsys (__NR_SVR4 + 49)
+#define __NR_SVR4_sysmips (__NR_SVR4 + 50)
+#define __NR_SVR4_sysacct (__NR_SVR4 + 51)
+#define __NR_SVR4_shmsys (__NR_SVR4 + 52)
+#define __NR_SVR4_semsys (__NR_SVR4 + 53)
+#define __NR_SVR4_ioctl (__NR_SVR4 + 54)
+#define __NR_SVR4_uadmin (__NR_SVR4 + 55)
+#define __NR_SVR4_exch (__NR_SVR4 + 56)
+#define __NR_SVR4_utssys (__NR_SVR4 + 57)
+#define __NR_SVR4_fsync (__NR_SVR4 + 58)
+#define __NR_SVR4_exece (__NR_SVR4 + 59)
+#define __NR_SVR4_umask (__NR_SVR4 + 60)
+#define __NR_SVR4_chroot (__NR_SVR4 + 61)
+#define __NR_SVR4_fcntl (__NR_SVR4 + 62)
+#define __NR_SVR4_ulimit (__NR_SVR4 + 63)
+#define __NR_SVR4_reserved1 (__NR_SVR4 + 64)
+#define __NR_SVR4_reserved2 (__NR_SVR4 + 65)
+#define __NR_SVR4_reserved3 (__NR_SVR4 + 66)
+#define __NR_SVR4_reserved4 (__NR_SVR4 + 67)
+#define __NR_SVR4_reserved5 (__NR_SVR4 + 68)
+#define __NR_SVR4_reserved6 (__NR_SVR4 + 69)
+#define __NR_SVR4_advfs (__NR_SVR4 + 70)
+#define __NR_SVR4_unadvfs (__NR_SVR4 + 71)
+#define __NR_SVR4_unused1 (__NR_SVR4 + 72)
+#define __NR_SVR4_unused2 (__NR_SVR4 + 73)
+#define __NR_SVR4_rfstart (__NR_SVR4 + 74)
+#define __NR_SVR4_unused3 (__NR_SVR4 + 75)
+#define __NR_SVR4_rdebug (__NR_SVR4 + 76)
+#define __NR_SVR4_rfstop (__NR_SVR4 + 77)
+#define __NR_SVR4_rfsys (__NR_SVR4 + 78)
+#define __NR_SVR4_rmdir (__NR_SVR4 + 79)
+#define __NR_SVR4_mkdir (__NR_SVR4 + 80)
+#define __NR_SVR4_getdents (__NR_SVR4 + 81)
+#define __NR_SVR4_libattach (__NR_SVR4 + 82)
+#define __NR_SVR4_libdetach (__NR_SVR4 + 83)
+#define __NR_SVR4_sysfs (__NR_SVR4 + 84)
+#define __NR_SVR4_getmsg (__NR_SVR4 + 85)
+#define __NR_SVR4_putmsg (__NR_SVR4 + 86)
+#define __NR_SVR4_poll (__NR_SVR4 + 87)
+#define __NR_SVR4_lstat (__NR_SVR4 + 88)
+#define __NR_SVR4_symlink (__NR_SVR4 + 89)
+#define __NR_SVR4_readlink (__NR_SVR4 + 90)
+#define __NR_SVR4_setgroups (__NR_SVR4 + 91)
+#define __NR_SVR4_getgroups (__NR_SVR4 + 92)
+#define __NR_SVR4_fchmod (__NR_SVR4 + 93)
+#define __NR_SVR4_fchown (__NR_SVR4 + 94)
+#define __NR_SVR4_sigprocmask (__NR_SVR4 + 95)
+#define __NR_SVR4_sigsuspend (__NR_SVR4 + 96)
+#define __NR_SVR4_sigaltstack (__NR_SVR4 + 97)
+#define __NR_SVR4_sigaction (__NR_SVR4 + 98)
+#define __NR_SVR4_sigpending (__NR_SVR4 + 99)
+#define __NR_SVR4_setcontext (__NR_SVR4 + 100)
+#define __NR_SVR4_evsys (__NR_SVR4 + 101)
+#define __NR_SVR4_evtrapret (__NR_SVR4 + 102)
+#define __NR_SVR4_statvfs (__NR_SVR4 + 103)
+#define __NR_SVR4_fstatvfs (__NR_SVR4 + 104)
+#define __NR_SVR4_reserved7 (__NR_SVR4 + 105)
+#define __NR_SVR4_nfssys (__NR_SVR4 + 106)
+#define __NR_SVR4_waitid (__NR_SVR4 + 107)
+#define __NR_SVR4_sigsendset (__NR_SVR4 + 108)
+#define __NR_SVR4_hrtsys (__NR_SVR4 + 109)
+#define __NR_SVR4_acancel (__NR_SVR4 + 110)
+#define __NR_SVR4_async (__NR_SVR4 + 111)
+#define __NR_SVR4_priocntlset (__NR_SVR4 + 112)
+#define __NR_SVR4_pathconf (__NR_SVR4 + 113)
+#define __NR_SVR4_mincore (__NR_SVR4 + 114)
+#define __NR_SVR4_mmap (__NR_SVR4 + 115)
+#define __NR_SVR4_mprotect (__NR_SVR4 + 116)
+#define __NR_SVR4_munmap (__NR_SVR4 + 117)
+#define __NR_SVR4_fpathconf (__NR_SVR4 + 118)
+#define __NR_SVR4_vfork (__NR_SVR4 + 119)
+#define __NR_SVR4_fchdir (__NR_SVR4 + 120)
+#define __NR_SVR4_readv (__NR_SVR4 + 121)
+#define __NR_SVR4_writev (__NR_SVR4 + 122)
+#define __NR_SVR4_xstat (__NR_SVR4 + 123)
+#define __NR_SVR4_lxstat (__NR_SVR4 + 124)
+#define __NR_SVR4_fxstat (__NR_SVR4 + 125)
+#define __NR_SVR4_xmknod (__NR_SVR4 + 126)
+#define __NR_SVR4_clocal (__NR_SVR4 + 127)
+#define __NR_SVR4_setrlimit (__NR_SVR4 + 128)
+#define __NR_SVR4_getrlimit (__NR_SVR4 + 129)
+#define __NR_SVR4_lchown (__NR_SVR4 + 130)
+#define __NR_SVR4_memcntl (__NR_SVR4 + 131)
+#define __NR_SVR4_getpmsg (__NR_SVR4 + 132)
+#define __NR_SVR4_putpmsg (__NR_SVR4 + 133)
+#define __NR_SVR4_rename (__NR_SVR4 + 134)
+#define __NR_SVR4_nuname (__NR_SVR4 + 135)
+#define __NR_SVR4_setegid (__NR_SVR4 + 136)
+#define __NR_SVR4_sysconf (__NR_SVR4 + 137)
+#define __NR_SVR4_adjtime (__NR_SVR4 + 138)
+#define __NR_SVR4_sysinfo (__NR_SVR4 + 139)
+#define __NR_SVR4_reserved8 (__NR_SVR4 + 140)
+#define __NR_SVR4_seteuid (__NR_SVR4 + 141)
+#define __NR_SVR4_PYRAMID_statis (__NR_SVR4 + 142)
+#define __NR_SVR4_PYRAMID_tuning (__NR_SVR4 + 143)
+#define __NR_SVR4_PYRAMID_forcerr (__NR_SVR4 + 144)
+#define __NR_SVR4_PYRAMID_mpcntl (__NR_SVR4 + 145)
+#define __NR_SVR4_reserved9 (__NR_SVR4 + 146)
+#define __NR_SVR4_reserved10 (__NR_SVR4 + 147)
+#define __NR_SVR4_reserved11 (__NR_SVR4 + 148)
+#define __NR_SVR4_reserved12 (__NR_SVR4 + 149)
+#define __NR_SVR4_reserved13 (__NR_SVR4 + 150)
+#define __NR_SVR4_reserved14 (__NR_SVR4 + 151)
+#define __NR_SVR4_reserved15 (__NR_SVR4 + 152)
+#define __NR_SVR4_reserved16 (__NR_SVR4 + 153)
+#define __NR_SVR4_reserved17 (__NR_SVR4 + 154)
+#define __NR_SVR4_reserved18 (__NR_SVR4 + 155)
+#define __NR_SVR4_reserved19 (__NR_SVR4 + 156)
+#define __NR_SVR4_reserved20 (__NR_SVR4 + 157)
+#define __NR_SVR4_reserved21 (__NR_SVR4 + 158)
+#define __NR_SVR4_reserved22 (__NR_SVR4 + 159)
+#define __NR_SVR4_reserved23 (__NR_SVR4 + 160)
+#define __NR_SVR4_reserved24 (__NR_SVR4 + 161)
+#define __NR_SVR4_reserved25 (__NR_SVR4 + 162)
+#define __NR_SVR4_reserved26 (__NR_SVR4 + 163)
+#define __NR_SVR4_reserved27 (__NR_SVR4 + 164)
+#define __NR_SVR4_reserved28 (__NR_SVR4 + 165)
+#define __NR_SVR4_reserved29 (__NR_SVR4 + 166)
+#define __NR_SVR4_reserved30 (__NR_SVR4 + 167)
+#define __NR_SVR4_reserved31 (__NR_SVR4 + 168)
+#define __NR_SVR4_reserved32 (__NR_SVR4 + 169)
+#define __NR_SVR4_reserved33 (__NR_SVR4 + 170)
+#define __NR_SVR4_reserved34 (__NR_SVR4 + 171)
+#define __NR_SVR4_reserved35 (__NR_SVR4 + 172)
+#define __NR_SVR4_reserved36 (__NR_SVR4 + 173)
+#define __NR_SVR4_reserved37 (__NR_SVR4 + 174)
+#define __NR_SVR4_reserved38 (__NR_SVR4 + 175)
+#define __NR_SVR4_reserved39 (__NR_SVR4 + 176)
+#define __NR_SVR4_reserved40 (__NR_SVR4 + 177)
+#define __NR_SVR4_reserved41 (__NR_SVR4 + 178)
+#define __NR_SVR4_reserved42 (__NR_SVR4 + 179)
+#define __NR_SVR4_reserved43 (__NR_SVR4 + 180)
+#define __NR_SVR4_reserved44 (__NR_SVR4 + 181)
+#define __NR_SVR4_reserved45 (__NR_SVR4 + 182)
+#define __NR_SVR4_reserved46 (__NR_SVR4 + 183)
+#define __NR_SVR4_reserved47 (__NR_SVR4 + 184)
+#define __NR_SVR4_reserved48 (__NR_SVR4 + 185)
+#define __NR_SVR4_reserved49 (__NR_SVR4 + 186)
+#define __NR_SVR4_reserved50 (__NR_SVR4 + 187)
+#define __NR_SVR4_reserved51 (__NR_SVR4 + 188)
+#define __NR_SVR4_reserved52 (__NR_SVR4 + 189)
+#define __NR_SVR4_reserved53 (__NR_SVR4 + 190)
+#define __NR_SVR4_reserved54 (__NR_SVR4 + 191)
+#define __NR_SVR4_reserved55 (__NR_SVR4 + 192)
+#define __NR_SVR4_reserved56 (__NR_SVR4 + 193)
+#define __NR_SVR4_reserved57 (__NR_SVR4 + 194)
+#define __NR_SVR4_reserved58 (__NR_SVR4 + 195)
+#define __NR_SVR4_reserved59 (__NR_SVR4 + 196)
+#define __NR_SVR4_reserved60 (__NR_SVR4 + 197)
+#define __NR_SVR4_reserved61 (__NR_SVR4 + 198)
+#define __NR_SVR4_reserved62 (__NR_SVR4 + 199)
+#define __NR_SVR4_reserved63 (__NR_SVR4 + 200)
+#define __NR_SVR4_aread (__NR_SVR4 + 201)
+#define __NR_SVR4_awrite (__NR_SVR4 + 202)
+#define __NR_SVR4_listio (__NR_SVR4 + 203)
+#define __NR_SVR4_mips_acancel (__NR_SVR4 + 204)
+#define __NR_SVR4_astatus (__NR_SVR4 + 205)
+#define __NR_SVR4_await (__NR_SVR4 + 206)
+#define __NR_SVR4_areadv (__NR_SVR4 + 207)
+#define __NR_SVR4_awritev (__NR_SVR4 + 208)
+#define __NR_SVR4_MIPS_reserved1 (__NR_SVR4 + 209)
+#define __NR_SVR4_MIPS_reserved2 (__NR_SVR4 + 210)
+#define __NR_SVR4_MIPS_reserved3 (__NR_SVR4 + 211)
+#define __NR_SVR4_MIPS_reserved4 (__NR_SVR4 + 212)
+#define __NR_SVR4_MIPS_reserved5 (__NR_SVR4 + 213)
+#define __NR_SVR4_MIPS_reserved6 (__NR_SVR4 + 214)
+#define __NR_SVR4_MIPS_reserved7 (__NR_SVR4 + 215)
+#define __NR_SVR4_MIPS_reserved8 (__NR_SVR4 + 216)
+#define __NR_SVR4_MIPS_reserved9 (__NR_SVR4 + 217)
+#define __NR_SVR4_MIPS_reserved10 (__NR_SVR4 + 218)
+#define __NR_SVR4_MIPS_reserved11 (__NR_SVR4 + 219)
+#define __NR_SVR4_MIPS_reserved12 (__NR_SVR4 + 220)
+#define __NR_SVR4_CDC_reserved1 (__NR_SVR4 + 221)
+#define __NR_SVR4_CDC_reserved2 (__NR_SVR4 + 222)
+#define __NR_SVR4_CDC_reserved3 (__NR_SVR4 + 223)
+#define __NR_SVR4_CDC_reserved4 (__NR_SVR4 + 224)
+#define __NR_SVR4_CDC_reserved5 (__NR_SVR4 + 225)
+#define __NR_SVR4_CDC_reserved6 (__NR_SVR4 + 226)
+#define __NR_SVR4_CDC_reserved7 (__NR_SVR4 + 227)
+#define __NR_SVR4_CDC_reserved8 (__NR_SVR4 + 228)
+#define __NR_SVR4_CDC_reserved9 (__NR_SVR4 + 229)
+#define __NR_SVR4_CDC_reserved10 (__NR_SVR4 + 230)
+#define __NR_SVR4_CDC_reserved11 (__NR_SVR4 + 231)
+#define __NR_SVR4_CDC_reserved12 (__NR_SVR4 + 232)
+#define __NR_SVR4_CDC_reserved13 (__NR_SVR4 + 233)
+#define __NR_SVR4_CDC_reserved14 (__NR_SVR4 + 234)
+#define __NR_SVR4_CDC_reserved15 (__NR_SVR4 + 235)
+#define __NR_SVR4_CDC_reserved16 (__NR_SVR4 + 236)
+#define __NR_SVR4_CDC_reserved17 (__NR_SVR4 + 237)
+#define __NR_SVR4_CDC_reserved18 (__NR_SVR4 + 238)
+#define __NR_SVR4_CDC_reserved19 (__NR_SVR4 + 239)
+#define __NR_SVR4_CDC_reserved20 (__NR_SVR4 + 240)
+
+/*
+ * SYS V syscalls are in the range from 1000 to 1999
+ */
+#define __NR_SYSV 1000
+#define __NR_SYSV_syscall (__NR_SYSV + 0)
+#define __NR_SYSV_exit (__NR_SYSV + 1)
+#define __NR_SYSV_fork (__NR_SYSV + 2)
+#define __NR_SYSV_read (__NR_SYSV + 3)
+#define __NR_SYSV_write (__NR_SYSV + 4)
+#define __NR_SYSV_open (__NR_SYSV + 5)
+#define __NR_SYSV_close (__NR_SYSV + 6)
+#define __NR_SYSV_wait (__NR_SYSV + 7)
+#define __NR_SYSV_creat (__NR_SYSV + 8)
+#define __NR_SYSV_link (__NR_SYSV + 9)
+#define __NR_SYSV_unlink (__NR_SYSV + 10)
+#define __NR_SYSV_execv (__NR_SYSV + 11)
+#define __NR_SYSV_chdir (__NR_SYSV + 12)
+#define __NR_SYSV_time (__NR_SYSV + 13)
+#define __NR_SYSV_mknod (__NR_SYSV + 14)
+#define __NR_SYSV_chmod (__NR_SYSV + 15)
+#define __NR_SYSV_chown (__NR_SYSV + 16)
+#define __NR_SYSV_brk (__NR_SYSV + 17)
+#define __NR_SYSV_stat (__NR_SYSV + 18)
+#define __NR_SYSV_lseek (__NR_SYSV + 19)
+#define __NR_SYSV_getpid (__NR_SYSV + 20)
+#define __NR_SYSV_mount (__NR_SYSV + 21)
+#define __NR_SYSV_umount (__NR_SYSV + 22)
+#define __NR_SYSV_setuid (__NR_SYSV + 23)
+#define __NR_SYSV_getuid (__NR_SYSV + 24)
+#define __NR_SYSV_stime (__NR_SYSV + 25)
+#define __NR_SYSV_ptrace (__NR_SYSV + 26)
+#define __NR_SYSV_alarm (__NR_SYSV + 27)
+#define __NR_SYSV_fstat (__NR_SYSV + 28)
+#define __NR_SYSV_pause (__NR_SYSV + 29)
+#define __NR_SYSV_utime (__NR_SYSV + 30)
+#define __NR_SYSV_stty (__NR_SYSV + 31)
+#define __NR_SYSV_gtty (__NR_SYSV + 32)
+#define __NR_SYSV_access (__NR_SYSV + 33)
+#define __NR_SYSV_nice (__NR_SYSV + 34)
+#define __NR_SYSV_statfs (__NR_SYSV + 35)
+#define __NR_SYSV_sync (__NR_SYSV + 36)
+#define __NR_SYSV_kill (__NR_SYSV + 37)
+#define __NR_SYSV_fstatfs (__NR_SYSV + 38)
+#define __NR_SYSV_setpgrp (__NR_SYSV + 39)
+#define __NR_SYSV_syssgi (__NR_SYSV + 40)
+#define __NR_SYSV_dup (__NR_SYSV + 41)
+#define __NR_SYSV_pipe (__NR_SYSV + 42)
+#define __NR_SYSV_times (__NR_SYSV + 43)
+#define __NR_SYSV_profil (__NR_SYSV + 44)
+#define __NR_SYSV_plock (__NR_SYSV + 45)
+#define __NR_SYSV_setgid (__NR_SYSV + 46)
+#define __NR_SYSV_getgid (__NR_SYSV + 47)
+#define __NR_SYSV_sig (__NR_SYSV + 48)
+#define __NR_SYSV_msgsys (__NR_SYSV + 49)
+#define __NR_SYSV_sysmips (__NR_SYSV + 50)
+#define __NR_SYSV_acct (__NR_SYSV + 51)
+#define __NR_SYSV_shmsys (__NR_SYSV + 52)
+#define __NR_SYSV_semsys (__NR_SYSV + 53)
+#define __NR_SYSV_ioctl (__NR_SYSV + 54)
+#define __NR_SYSV_uadmin (__NR_SYSV + 55)
+#define __NR_SYSV_sysmp (__NR_SYSV + 56)
+#define __NR_SYSV_utssys (__NR_SYSV + 57)
+#define __NR_SYSV_USG_reserved1 (__NR_SYSV + 58)
+#define __NR_SYSV_execve (__NR_SYSV + 59)
+#define __NR_SYSV_umask (__NR_SYSV + 60)
+#define __NR_SYSV_chroot (__NR_SYSV + 61)
+#define __NR_SYSV_fcntl (__NR_SYSV + 62)
+#define __NR_SYSV_ulimit (__NR_SYSV + 63)
+#define __NR_SYSV_SAFARI4_reserved1 (__NR_SYSV + 64)
+#define __NR_SYSV_SAFARI4_reserved2 (__NR_SYSV + 65)
+#define __NR_SYSV_SAFARI4_reserved3 (__NR_SYSV + 66)
+#define __NR_SYSV_SAFARI4_reserved4 (__NR_SYSV + 67)
+#define __NR_SYSV_SAFARI4_reserved5 (__NR_SYSV + 68)
+#define __NR_SYSV_SAFARI4_reserved6 (__NR_SYSV + 69)
+#define __NR_SYSV_advfs (__NR_SYSV + 70)
+#define __NR_SYSV_unadvfs (__NR_SYSV + 71)
+#define __NR_SYSV_rmount (__NR_SYSV + 72)
+#define __NR_SYSV_rumount (__NR_SYSV + 73)
+#define __NR_SYSV_rfstart (__NR_SYSV + 74)
+#define __NR_SYSV_getrlimit64 (__NR_SYSV + 75)
+#define __NR_SYSV_setrlimit64 (__NR_SYSV + 76)
+#define __NR_SYSV_nanosleep (__NR_SYSV + 77)
+#define __NR_SYSV_lseek64 (__NR_SYSV + 78)
+#define __NR_SYSV_rmdir (__NR_SYSV + 79)
+#define __NR_SYSV_mkdir (__NR_SYSV + 80)
+#define __NR_SYSV_getdents (__NR_SYSV + 81)
+#define __NR_SYSV_sginap (__NR_SYSV + 82)
+#define __NR_SYSV_sgikopt (__NR_SYSV + 83)
+#define __NR_SYSV_sysfs (__NR_SYSV + 84)
+#define __NR_SYSV_getmsg (__NR_SYSV + 85)
+#define __NR_SYSV_putmsg (__NR_SYSV + 86)
+#define __NR_SYSV_poll (__NR_SYSV + 87)
+#define __NR_SYSV_sigreturn (__NR_SYSV + 88)
+#define __NR_SYSV_accept (__NR_SYSV + 89)
+#define __NR_SYSV_bind (__NR_SYSV + 90)
+#define __NR_SYSV_connect (__NR_SYSV + 91)
+#define __NR_SYSV_gethostid (__NR_SYSV + 92)
+#define __NR_SYSV_getpeername (__NR_SYSV + 93)
+#define __NR_SYSV_getsockname (__NR_SYSV + 94)
+#define __NR_SYSV_getsockopt (__NR_SYSV + 95)
+#define __NR_SYSV_listen (__NR_SYSV + 96)
+#define __NR_SYSV_recv (__NR_SYSV + 97)
+#define __NR_SYSV_recvfrom (__NR_SYSV + 98)
+#define __NR_SYSV_recvmsg (__NR_SYSV + 99)
+#define __NR_SYSV_select (__NR_SYSV + 100)
+#define __NR_SYSV_send (__NR_SYSV + 101)
+#define __NR_SYSV_sendmsg (__NR_SYSV + 102)
+#define __NR_SYSV_sendto (__NR_SYSV + 103)
+#define __NR_SYSV_sethostid (__NR_SYSV + 104)
+#define __NR_SYSV_setsockopt (__NR_SYSV + 105)
+#define __NR_SYSV_shutdown (__NR_SYSV + 106)
+#define __NR_SYSV_socket (__NR_SYSV + 107)
+#define __NR_SYSV_gethostname (__NR_SYSV + 108)
+#define __NR_SYSV_sethostname (__NR_SYSV + 109)
+#define __NR_SYSV_getdomainname (__NR_SYSV + 110)
+#define __NR_SYSV_setdomainname (__NR_SYSV + 111)
+#define __NR_SYSV_truncate (__NR_SYSV + 112)
+#define __NR_SYSV_ftruncate (__NR_SYSV + 113)
+#define __NR_SYSV_rename (__NR_SYSV + 114)
+#define __NR_SYSV_symlink (__NR_SYSV + 115)
+#define __NR_SYSV_readlink (__NR_SYSV + 116)
+#define __NR_SYSV_lstat (__NR_SYSV + 117)
+#define __NR_SYSV_nfsmount (__NR_SYSV + 118)
+#define __NR_SYSV_nfssvc (__NR_SYSV + 119)
+#define __NR_SYSV_getfh (__NR_SYSV + 120)
+#define __NR_SYSV_async_daemon (__NR_SYSV + 121)
+#define __NR_SYSV_exportfs (__NR_SYSV + 122)
+#define __NR_SYSV_setregid (__NR_SYSV + 123)
+#define __NR_SYSV_setreuid (__NR_SYSV + 124)
+#define __NR_SYSV_getitimer (__NR_SYSV + 125)
+#define __NR_SYSV_setitimer (__NR_SYSV + 126)
+#define __NR_SYSV_adjtime (__NR_SYSV + 127)
+#define __NR_SYSV_BSD_getime (__NR_SYSV + 128)
+#define __NR_SYSV_sproc (__NR_SYSV + 129)
+#define __NR_SYSV_prctl (__NR_SYSV + 130)
+#define __NR_SYSV_procblk (__NR_SYSV + 131)
+#define __NR_SYSV_sprocsp (__NR_SYSV + 132)
+#define __NR_SYSV_sgigsc (__NR_SYSV + 133)
+#define __NR_SYSV_mmap (__NR_SYSV + 134)
+#define __NR_SYSV_munmap (__NR_SYSV + 135)
+#define __NR_SYSV_mprotect (__NR_SYSV + 136)
+#define __NR_SYSV_msync (__NR_SYSV + 137)
+#define __NR_SYSV_madvise (__NR_SYSV + 138)
+#define __NR_SYSV_pagelock (__NR_SYSV + 139)
+#define __NR_SYSV_getpagesize (__NR_SYSV + 140)
+#define __NR_SYSV_quotactl (__NR_SYSV + 141)
+#define __NR_SYSV_libdetach (__NR_SYSV + 142)
+#define __NR_SYSV_BSDgetpgrp (__NR_SYSV + 143)
+#define __NR_SYSV_BSDsetpgrp (__NR_SYSV + 144)
+#define __NR_SYSV_vhangup (__NR_SYSV + 145)
+#define __NR_SYSV_fsync (__NR_SYSV + 146)
+#define __NR_SYSV_fchdir (__NR_SYSV + 147)
+#define __NR_SYSV_getrlimit (__NR_SYSV + 148)
+#define __NR_SYSV_setrlimit (__NR_SYSV + 149)
+#define __NR_SYSV_cacheflush (__NR_SYSV + 150)
+#define __NR_SYSV_cachectl (__NR_SYSV + 151)
+#define __NR_SYSV_fchown (__NR_SYSV + 152)
+#define __NR_SYSV_fchmod (__NR_SYSV + 153)
+#define __NR_SYSV_wait3 (__NR_SYSV + 154)
+#define __NR_SYSV_socketpair (__NR_SYSV + 155)
+#define __NR_SYSV_sysinfo (__NR_SYSV + 156)
+#define __NR_SYSV_nuname (__NR_SYSV + 157)
+#define __NR_SYSV_xstat (__NR_SYSV + 158)
+#define __NR_SYSV_lxstat (__NR_SYSV + 159)
+#define __NR_SYSV_fxstat (__NR_SYSV + 160)
+#define __NR_SYSV_xmknod (__NR_SYSV + 161)
+#define __NR_SYSV_ksigaction (__NR_SYSV + 162)
+#define __NR_SYSV_sigpending (__NR_SYSV + 163)
+#define __NR_SYSV_sigprocmask (__NR_SYSV + 164)
+#define __NR_SYSV_sigsuspend (__NR_SYSV + 165)
+#define __NR_SYSV_sigpoll (__NR_SYSV + 166)
+#define __NR_SYSV_swapctl (__NR_SYSV + 167)
+#define __NR_SYSV_getcontext (__NR_SYSV + 168)
+#define __NR_SYSV_setcontext (__NR_SYSV + 169)
+#define __NR_SYSV_waitsys (__NR_SYSV + 170)
+#define __NR_SYSV_sigstack (__NR_SYSV + 171)
+#define __NR_SYSV_sigaltstack (__NR_SYSV + 172)
+#define __NR_SYSV_sigsendset (__NR_SYSV + 173)
+#define __NR_SYSV_statvfs (__NR_SYSV + 174)
+#define __NR_SYSV_fstatvfs (__NR_SYSV + 175)
+#define __NR_SYSV_getpmsg (__NR_SYSV + 176)
+#define __NR_SYSV_putpmsg (__NR_SYSV + 177)
+#define __NR_SYSV_lchown (__NR_SYSV + 178)
+#define __NR_SYSV_priocntl (__NR_SYSV + 179)
+#define __NR_SYSV_ksigqueue (__NR_SYSV + 180)
+#define __NR_SYSV_readv (__NR_SYSV + 181)
+#define __NR_SYSV_writev (__NR_SYSV + 182)
+#define __NR_SYSV_truncate64 (__NR_SYSV + 183)
+#define __NR_SYSV_ftruncate64 (__NR_SYSV + 184)
+#define __NR_SYSV_mmap64 (__NR_SYSV + 185)
+#define __NR_SYSV_dmi (__NR_SYSV + 186)
+#define __NR_SYSV_pread (__NR_SYSV + 187)
+#define __NR_SYSV_pwrite (__NR_SYSV + 188)
+
+/*
+ * BSD 4.3 syscalls are in the range from 2000 to 2999
+ */
+#define __NR_BSD43 2000
+#define __NR_BSD43_syscall (__NR_BSD43 + 0)
+#define __NR_BSD43_exit (__NR_BSD43 + 1)
+#define __NR_BSD43_fork (__NR_BSD43 + 2)
+#define __NR_BSD43_read (__NR_BSD43 + 3)
+#define __NR_BSD43_write (__NR_BSD43 + 4)
+#define __NR_BSD43_open (__NR_BSD43 + 5)
+#define __NR_BSD43_close (__NR_BSD43 + 6)
+#define __NR_BSD43_wait (__NR_BSD43 + 7)
+#define __NR_BSD43_creat (__NR_BSD43 + 8)
+#define __NR_BSD43_link (__NR_BSD43 + 9)
+#define __NR_BSD43_unlink (__NR_BSD43 + 10)
+#define __NR_BSD43_exec (__NR_BSD43 + 11)
+#define __NR_BSD43_chdir (__NR_BSD43 + 12)
+#define __NR_BSD43_time (__NR_BSD43 + 13)
+#define __NR_BSD43_mknod (__NR_BSD43 + 14)
+#define __NR_BSD43_chmod (__NR_BSD43 + 15)
+#define __NR_BSD43_chown (__NR_BSD43 + 16)
+#define __NR_BSD43_sbreak (__NR_BSD43 + 17)
+#define __NR_BSD43_oldstat (__NR_BSD43 + 18)
+#define __NR_BSD43_lseek (__NR_BSD43 + 19)
+#define __NR_BSD43_getpid (__NR_BSD43 + 20)
+#define __NR_BSD43_oldmount (__NR_BSD43 + 21)
+#define __NR_BSD43_umount (__NR_BSD43 + 22)
+#define __NR_BSD43_setuid (__NR_BSD43 + 23)
+#define __NR_BSD43_getuid (__NR_BSD43 + 24)
+#define __NR_BSD43_stime (__NR_BSD43 + 25)
+#define __NR_BSD43_ptrace (__NR_BSD43 + 26)
+#define __NR_BSD43_alarm (__NR_BSD43 + 27)
+#define __NR_BSD43_oldfstat (__NR_BSD43 + 28)
+#define __NR_BSD43_pause (__NR_BSD43 + 29)
+#define __NR_BSD43_utime (__NR_BSD43 + 30)
+#define __NR_BSD43_stty (__NR_BSD43 + 31)
+#define __NR_BSD43_gtty (__NR_BSD43 + 32)
+#define __NR_BSD43_access (__NR_BSD43 + 33)
+#define __NR_BSD43_nice (__NR_BSD43 + 34)
+#define __NR_BSD43_ftime (__NR_BSD43 + 35)
+#define __NR_BSD43_sync (__NR_BSD43 + 36)
+#define __NR_BSD43_kill (__NR_BSD43 + 37)
+#define __NR_BSD43_stat (__NR_BSD43 + 38)
+#define __NR_BSD43_oldsetpgrp (__NR_BSD43 + 39)
+#define __NR_BSD43_lstat (__NR_BSD43 + 40)
+#define __NR_BSD43_dup (__NR_BSD43 + 41)
+#define __NR_BSD43_pipe (__NR_BSD43 + 42)
+#define __NR_BSD43_times (__NR_BSD43 + 43)
+#define __NR_BSD43_profil (__NR_BSD43 + 44)
+#define __NR_BSD43_msgsys (__NR_BSD43 + 45)
+#define __NR_BSD43_setgid (__NR_BSD43 + 46)
+#define __NR_BSD43_getgid (__NR_BSD43 + 47)
+#define __NR_BSD43_ssig (__NR_BSD43 + 48)
+#define __NR_BSD43_reserved1 (__NR_BSD43 + 49)
+#define __NR_BSD43_reserved2 (__NR_BSD43 + 50)
+#define __NR_BSD43_sysacct (__NR_BSD43 + 51)
+#define __NR_BSD43_phys (__NR_BSD43 + 52)
+#define __NR_BSD43_lock (__NR_BSD43 + 53)
+#define __NR_BSD43_ioctl (__NR_BSD43 + 54)
+#define __NR_BSD43_reboot (__NR_BSD43 + 55)
+#define __NR_BSD43_mpxchan (__NR_BSD43 + 56)
+#define __NR_BSD43_symlink (__NR_BSD43 + 57)
+#define __NR_BSD43_readlink (__NR_BSD43 + 58)
+#define __NR_BSD43_execve (__NR_BSD43 + 59)
+#define __NR_BSD43_umask (__NR_BSD43 + 60)
+#define __NR_BSD43_chroot (__NR_BSD43 + 61)
+#define __NR_BSD43_fstat (__NR_BSD43 + 62)
+#define __NR_BSD43_reserved3 (__NR_BSD43 + 63)
+#define __NR_BSD43_getpagesize (__NR_BSD43 + 64)
+#define __NR_BSD43_mremap (__NR_BSD43 + 65)
+#define __NR_BSD43_vfork (__NR_BSD43 + 66)
+#define __NR_BSD43_vread (__NR_BSD43 + 67)
+#define __NR_BSD43_vwrite (__NR_BSD43 + 68)
+#define __NR_BSD43_sbrk (__NR_BSD43 + 69)
+#define __NR_BSD43_sstk (__NR_BSD43 + 70)
+#define __NR_BSD43_mmap (__NR_BSD43 + 71)
+#define __NR_BSD43_vadvise (__NR_BSD43 + 72)
+#define __NR_BSD43_munmap (__NR_BSD43 + 73)
+#define __NR_BSD43_mprotect (__NR_BSD43 + 74)
+#define __NR_BSD43_madvise (__NR_BSD43 + 75)
+#define __NR_BSD43_vhangup (__NR_BSD43 + 76)
+#define __NR_BSD43_vlimit (__NR_BSD43 + 77)
+#define __NR_BSD43_mincore (__NR_BSD43 + 78)
+#define __NR_BSD43_getgroups (__NR_BSD43 + 79)
+#define __NR_BSD43_setgroups (__NR_BSD43 + 80)
+#define __NR_BSD43_getpgrp (__NR_BSD43 + 81)
+#define __NR_BSD43_setpgrp (__NR_BSD43 + 82)
+#define __NR_BSD43_setitimer (__NR_BSD43 + 83)
+#define __NR_BSD43_wait3 (__NR_BSD43 + 84)
+#define __NR_BSD43_swapon (__NR_BSD43 + 85)
+#define __NR_BSD43_getitimer (__NR_BSD43 + 86)
+#define __NR_BSD43_gethostname (__NR_BSD43 + 87)
+#define __NR_BSD43_sethostname (__NR_BSD43 + 88)
+#define __NR_BSD43_getdtablesize (__NR_BSD43 + 89)
+#define __NR_BSD43_dup2 (__NR_BSD43 + 90)
+#define __NR_BSD43_getdopt (__NR_BSD43 + 91)
+#define __NR_BSD43_fcntl (__NR_BSD43 + 92)
+#define __NR_BSD43_select (__NR_BSD43 + 93)
+#define __NR_BSD43_setdopt (__NR_BSD43 + 94)
+#define __NR_BSD43_fsync (__NR_BSD43 + 95)
+#define __NR_BSD43_setpriority (__NR_BSD43 + 96)
+#define __NR_BSD43_socket (__NR_BSD43 + 97)
+#define __NR_BSD43_connect (__NR_BSD43 + 98)
+#define __NR_BSD43_oldaccept (__NR_BSD43 + 99)
+#define __NR_BSD43_getpriority (__NR_BSD43 + 100)
+#define __NR_BSD43_send (__NR_BSD43 + 101)
+#define __NR_BSD43_recv (__NR_BSD43 + 102)
+#define __NR_BSD43_sigreturn (__NR_BSD43 + 103)
+#define __NR_BSD43_bind (__NR_BSD43 + 104)
+#define __NR_BSD43_setsockopt (__NR_BSD43 + 105)
+#define __NR_BSD43_listen (__NR_BSD43 + 106)
+#define __NR_BSD43_vtimes (__NR_BSD43 + 107)
+#define __NR_BSD43_sigvec (__NR_BSD43 + 108)
+#define __NR_BSD43_sigblock (__NR_BSD43 + 109)
+#define __NR_BSD43_sigsetmask (__NR_BSD43 + 110)
+#define __NR_BSD43_sigpause (__NR_BSD43 + 111)
+#define __NR_BSD43_sigstack (__NR_BSD43 + 112)
+#define __NR_BSD43_oldrecvmsg (__NR_BSD43 + 113)
+#define __NR_BSD43_oldsendmsg (__NR_BSD43 + 114)
+#define __NR_BSD43_vtrace (__NR_BSD43 + 115)
+#define __NR_BSD43_gettimeofday (__NR_BSD43 + 116)
+#define __NR_BSD43_getrusage (__NR_BSD43 + 117)
+#define __NR_BSD43_getsockopt (__NR_BSD43 + 118)
+#define __NR_BSD43_reserved4 (__NR_BSD43 + 119)
+#define __NR_BSD43_readv (__NR_BSD43 + 120)
+#define __NR_BSD43_writev (__NR_BSD43 + 121)
+#define __NR_BSD43_settimeofday (__NR_BSD43 + 122)
+#define __NR_BSD43_fchown (__NR_BSD43 + 123)
+#define __NR_BSD43_fchmod (__NR_BSD43 + 124)
+#define __NR_BSD43_oldrecvfrom (__NR_BSD43 + 125)
+#define __NR_BSD43_setreuid (__NR_BSD43 + 126)
+#define __NR_BSD43_setregid (__NR_BSD43 + 127)
+#define __NR_BSD43_rename (__NR_BSD43 + 128)
+#define __NR_BSD43_truncate (__NR_BSD43 + 129)
+#define __NR_BSD43_ftruncate (__NR_BSD43 + 130)
+#define __NR_BSD43_flock (__NR_BSD43 + 131)
+#define __NR_BSD43_semsys (__NR_BSD43 + 132)
+#define __NR_BSD43_sendto (__NR_BSD43 + 133)
+#define __NR_BSD43_shutdown (__NR_BSD43 + 134)
+#define __NR_BSD43_socketpair (__NR_BSD43 + 135)
+#define __NR_BSD43_mkdir (__NR_BSD43 + 136)
+#define __NR_BSD43_rmdir (__NR_BSD43 + 137)
+#define __NR_BSD43_utimes (__NR_BSD43 + 138)
+#define __NR_BSD43_sigcleanup (__NR_BSD43 + 139)
+#define __NR_BSD43_adjtime (__NR_BSD43 + 140)
+#define __NR_BSD43_oldgetpeername (__NR_BSD43 + 141)
+#define __NR_BSD43_gethostid (__NR_BSD43 + 142)
+#define __NR_BSD43_sethostid (__NR_BSD43 + 143)
+#define __NR_BSD43_getrlimit (__NR_BSD43 + 144)
+#define __NR_BSD43_setrlimit (__NR_BSD43 + 145)
+#define __NR_BSD43_killpg (__NR_BSD43 + 146)
+#define __NR_BSD43_shmsys (__NR_BSD43 + 147)
+#define __NR_BSD43_quota (__NR_BSD43 + 148)
+#define __NR_BSD43_qquota (__NR_BSD43 + 149)
+#define __NR_BSD43_oldgetsockname (__NR_BSD43 + 150)
+#define __NR_BSD43_sysmips (__NR_BSD43 + 151)
+#define __NR_BSD43_cacheflush (__NR_BSD43 + 152)
+#define __NR_BSD43_cachectl (__NR_BSD43 + 153)
+#define __NR_BSD43_debug (__NR_BSD43 + 154)
+#define __NR_BSD43_reserved5 (__NR_BSD43 + 155)
+#define __NR_BSD43_reserved6 (__NR_BSD43 + 156)
+#define __NR_BSD43_nfs_mount (__NR_BSD43 + 157)
+#define __NR_BSD43_nfs_svc (__NR_BSD43 + 158)
+#define __NR_BSD43_getdirentries (__NR_BSD43 + 159)
+#define __NR_BSD43_statfs (__NR_BSD43 + 160)
+#define __NR_BSD43_fstatfs (__NR_BSD43 + 161)
+#define __NR_BSD43_unmount (__NR_BSD43 + 162)
+#define __NR_BSD43_async_daemon (__NR_BSD43 + 163)
+#define __NR_BSD43_nfs_getfh (__NR_BSD43 + 164)
+#define __NR_BSD43_getdomainname (__NR_BSD43 + 165)
+#define __NR_BSD43_setdomainname (__NR_BSD43 + 166)
+#define __NR_BSD43_pcfs_mount (__NR_BSD43 + 167)
+#define __NR_BSD43_quotactl (__NR_BSD43 + 168)
+#define __NR_BSD43_oldexportfs (__NR_BSD43 + 169)
+#define __NR_BSD43_smount (__NR_BSD43 + 170)
+#define __NR_BSD43_mipshwconf (__NR_BSD43 + 171)
+#define __NR_BSD43_exportfs (__NR_BSD43 + 172)
+#define __NR_BSD43_nfsfh_open (__NR_BSD43 + 173)
+#define __NR_BSD43_libattach (__NR_BSD43 + 174)
+#define __NR_BSD43_libdetach (__NR_BSD43 + 175)
+#define __NR_BSD43_accept (__NR_BSD43 + 176)
+#define __NR_BSD43_reserved7 (__NR_BSD43 + 177)
+#define __NR_BSD43_reserved8 (__NR_BSD43 + 178)
+#define __NR_BSD43_recvmsg (__NR_BSD43 + 179)
+#define __NR_BSD43_recvfrom (__NR_BSD43 + 180)
+#define __NR_BSD43_sendmsg (__NR_BSD43 + 181)
+#define __NR_BSD43_getpeername (__NR_BSD43 + 182)
+#define __NR_BSD43_getsockname (__NR_BSD43 + 183)
+#define __NR_BSD43_aread (__NR_BSD43 + 184)
+#define __NR_BSD43_awrite (__NR_BSD43 + 185)
+#define __NR_BSD43_listio (__NR_BSD43 + 186)
+#define __NR_BSD43_acancel (__NR_BSD43 + 187)
+#define __NR_BSD43_astatus (__NR_BSD43 + 188)
+#define __NR_BSD43_await (__NR_BSD43 + 189)
+#define __NR_BSD43_areadv (__NR_BSD43 + 190)
+#define __NR_BSD43_awritev (__NR_BSD43 + 191)
+
+/*
+ * POSIX syscalls are in the range from 3000 to 3999
+ */
+#define __NR_POSIX 3000
+#define __NR_POSIX_syscall (__NR_POSIX + 0)
+#define __NR_POSIX_exit (__NR_POSIX + 1)
+#define __NR_POSIX_fork (__NR_POSIX + 2)
+#define __NR_POSIX_read (__NR_POSIX + 3)
+#define __NR_POSIX_write (__NR_POSIX + 4)
+#define __NR_POSIX_open (__NR_POSIX + 5)
+#define __NR_POSIX_close (__NR_POSIX + 6)
+#define __NR_POSIX_wait (__NR_POSIX + 7)
+#define __NR_POSIX_creat (__NR_POSIX + 8)
+#define __NR_POSIX_link (__NR_POSIX + 9)
+#define __NR_POSIX_unlink (__NR_POSIX + 10)
+#define __NR_POSIX_exec (__NR_POSIX + 11)
+#define __NR_POSIX_chdir (__NR_POSIX + 12)
+#define __NR_POSIX_gtime (__NR_POSIX + 13)
+#define __NR_POSIX_mknod (__NR_POSIX + 14)
+#define __NR_POSIX_chmod (__NR_POSIX + 15)
+#define __NR_POSIX_chown (__NR_POSIX + 16)
+#define __NR_POSIX_sbreak (__NR_POSIX + 17)
+#define __NR_POSIX_stat (__NR_POSIX + 18)
+#define __NR_POSIX_lseek (__NR_POSIX + 19)
+#define __NR_POSIX_getpid (__NR_POSIX + 20)
+#define __NR_POSIX_mount (__NR_POSIX + 21)
+#define __NR_POSIX_umount (__NR_POSIX + 22)
+#define __NR_POSIX_setuid (__NR_POSIX + 23)
+#define __NR_POSIX_getuid (__NR_POSIX + 24)
+#define __NR_POSIX_stime (__NR_POSIX + 25)
+#define __NR_POSIX_ptrace (__NR_POSIX + 26)
+#define __NR_POSIX_alarm (__NR_POSIX + 27)
+#define __NR_POSIX_fstat (__NR_POSIX + 28)
+#define __NR_POSIX_pause (__NR_POSIX + 29)
+#define __NR_POSIX_utime (__NR_POSIX + 30)
+#define __NR_POSIX_stty (__NR_POSIX + 31)
+#define __NR_POSIX_gtty (__NR_POSIX + 32)
+#define __NR_POSIX_access (__NR_POSIX + 33)
+#define __NR_POSIX_nice (__NR_POSIX + 34)
+#define __NR_POSIX_statfs (__NR_POSIX + 35)
+#define __NR_POSIX_sync (__NR_POSIX + 36)
+#define __NR_POSIX_kill (__NR_POSIX + 37)
+#define __NR_POSIX_fstatfs (__NR_POSIX + 38)
+#define __NR_POSIX_getpgrp (__NR_POSIX + 39)
+#define __NR_POSIX_syssgi (__NR_POSIX + 40)
+#define __NR_POSIX_dup (__NR_POSIX + 41)
+#define __NR_POSIX_pipe (__NR_POSIX + 42)
+#define __NR_POSIX_times (__NR_POSIX + 43)
+#define __NR_POSIX_profil (__NR_POSIX + 44)
+#define __NR_POSIX_lock (__NR_POSIX + 45)
+#define __NR_POSIX_setgid (__NR_POSIX + 46)
+#define __NR_POSIX_getgid (__NR_POSIX + 47)
+#define __NR_POSIX_sig (__NR_POSIX + 48)
+#define __NR_POSIX_msgsys (__NR_POSIX + 49)
+#define __NR_POSIX_sysmips (__NR_POSIX + 50)
+#define __NR_POSIX_sysacct (__NR_POSIX + 51)
+#define __NR_POSIX_shmsys (__NR_POSIX + 52)
+#define __NR_POSIX_semsys (__NR_POSIX + 53)
+#define __NR_POSIX_ioctl (__NR_POSIX + 54)
+#define __NR_POSIX_uadmin (__NR_POSIX + 55)
+#define __NR_POSIX_exch (__NR_POSIX + 56)
+#define __NR_POSIX_utssys (__NR_POSIX + 57)
+#define __NR_POSIX_USG_reserved1 (__NR_POSIX + 58)
+#define __NR_POSIX_exece (__NR_POSIX + 59)
+#define __NR_POSIX_umask (__NR_POSIX + 60)
+#define __NR_POSIX_chroot (__NR_POSIX + 61)
+#define __NR_POSIX_fcntl (__NR_POSIX + 62)
+#define __NR_POSIX_ulimit (__NR_POSIX + 63)
+#define __NR_POSIX_SAFARI4_reserved1 (__NR_POSIX + 64)
+#define __NR_POSIX_SAFARI4_reserved2 (__NR_POSIX + 65)
+#define __NR_POSIX_SAFARI4_reserved3 (__NR_POSIX + 66)
+#define __NR_POSIX_SAFARI4_reserved4 (__NR_POSIX + 67)
+#define __NR_POSIX_SAFARI4_reserved5 (__NR_POSIX + 68)
+#define __NR_POSIX_SAFARI4_reserved6 (__NR_POSIX + 69)
+#define __NR_POSIX_advfs (__NR_POSIX + 70)
+#define __NR_POSIX_unadvfs (__NR_POSIX + 71)
+#define __NR_POSIX_rmount (__NR_POSIX + 72)
+#define __NR_POSIX_rumount (__NR_POSIX + 73)
+#define __NR_POSIX_rfstart (__NR_POSIX + 74)
+#define __NR_POSIX_reserved1 (__NR_POSIX + 75)
+#define __NR_POSIX_rdebug (__NR_POSIX + 76)
+#define __NR_POSIX_rfstop (__NR_POSIX + 77)
+#define __NR_POSIX_rfsys (__NR_POSIX + 78)
+#define __NR_POSIX_rmdir (__NR_POSIX + 79)
+#define __NR_POSIX_mkdir (__NR_POSIX + 80)
+#define __NR_POSIX_getdents (__NR_POSIX + 81)
+#define __NR_POSIX_sginap (__NR_POSIX + 82)
+#define __NR_POSIX_sgikopt (__NR_POSIX + 83)
+#define __NR_POSIX_sysfs (__NR_POSIX + 84)
+#define __NR_POSIX_getmsg (__NR_POSIX + 85)
+#define __NR_POSIX_putmsg (__NR_POSIX + 86)
+#define __NR_POSIX_poll (__NR_POSIX + 87)
+#define __NR_POSIX_sigreturn (__NR_POSIX + 88)
+#define __NR_POSIX_accept (__NR_POSIX + 89)
+#define __NR_POSIX_bind (__NR_POSIX + 90)
+#define __NR_POSIX_connect (__NR_POSIX + 91)
+#define __NR_POSIX_gethostid (__NR_POSIX + 92)
+#define __NR_POSIX_getpeername (__NR_POSIX + 93)
+#define __NR_POSIX_getsockname (__NR_POSIX + 94)
+#define __NR_POSIX_getsockopt (__NR_POSIX + 95)
+#define __NR_POSIX_listen (__NR_POSIX + 96)
+#define __NR_POSIX_recv (__NR_POSIX + 97)
+#define __NR_POSIX_recvfrom (__NR_POSIX + 98)
+#define __NR_POSIX_recvmsg (__NR_POSIX + 99)
+#define __NR_POSIX_select (__NR_POSIX + 100)
+#define __NR_POSIX_send (__NR_POSIX + 101)
+#define __NR_POSIX_sendmsg (__NR_POSIX + 102)
+#define __NR_POSIX_sendto (__NR_POSIX + 103)
+#define __NR_POSIX_sethostid (__NR_POSIX + 104)
+#define __NR_POSIX_setsockopt (__NR_POSIX + 105)
+#define __NR_POSIX_shutdown (__NR_POSIX + 106)
+#define __NR_POSIX_socket (__NR_POSIX + 107)
+#define __NR_POSIX_gethostname (__NR_POSIX + 108)
+#define __NR_POSIX_sethostname (__NR_POSIX + 109)
+#define __NR_POSIX_getdomainname (__NR_POSIX + 110)
+#define __NR_POSIX_setdomainname (__NR_POSIX + 111)
+#define __NR_POSIX_truncate (__NR_POSIX + 112)
+#define __NR_POSIX_ftruncate (__NR_POSIX + 113)
+#define __NR_POSIX_rename (__NR_POSIX + 114)
+#define __NR_POSIX_symlink (__NR_POSIX + 115)
+#define __NR_POSIX_readlink (__NR_POSIX + 116)
+#define __NR_POSIX_lstat (__NR_POSIX + 117)
+#define __NR_POSIX_nfs_mount (__NR_POSIX + 118)
+#define __NR_POSIX_nfs_svc (__NR_POSIX + 119)
+#define __NR_POSIX_nfs_getfh (__NR_POSIX + 120)
+#define __NR_POSIX_async_daemon (__NR_POSIX + 121)
+#define __NR_POSIX_exportfs (__NR_POSIX + 122)
+#define __NR_POSIX_SGI_setregid (__NR_POSIX + 123)
+#define __NR_POSIX_SGI_setreuid (__NR_POSIX + 124)
+#define __NR_POSIX_getitimer (__NR_POSIX + 125)
+#define __NR_POSIX_setitimer (__NR_POSIX + 126)
+#define __NR_POSIX_adjtime (__NR_POSIX + 127)
+#define __NR_POSIX_SGI_bsdgettime (__NR_POSIX + 128)
+#define __NR_POSIX_SGI_sproc (__NR_POSIX + 129)
+#define __NR_POSIX_SGI_prctl (__NR_POSIX + 130)
+#define __NR_POSIX_SGI_blkproc (__NR_POSIX + 131)
+#define __NR_POSIX_SGI_reserved1 (__NR_POSIX + 132)
+#define __NR_POSIX_SGI_sgigsc (__NR_POSIX + 133)
+#define __NR_POSIX_SGI_mmap (__NR_POSIX + 134)
+#define __NR_POSIX_SGI_munmap (__NR_POSIX + 135)
+#define __NR_POSIX_SGI_mprotect (__NR_POSIX + 136)
+#define __NR_POSIX_SGI_msync (__NR_POSIX + 137)
+#define __NR_POSIX_SGI_madvise (__NR_POSIX + 138)
+#define __NR_POSIX_SGI_mpin (__NR_POSIX + 139)
+#define __NR_POSIX_SGI_getpagesize (__NR_POSIX + 140)
+#define __NR_POSIX_SGI_libattach (__NR_POSIX + 141)
+#define __NR_POSIX_SGI_libdetach (__NR_POSIX + 142)
+#define __NR_POSIX_SGI_getpgrp (__NR_POSIX + 143)
+#define __NR_POSIX_SGI_setpgrp (__NR_POSIX + 144)
+#define __NR_POSIX_SGI_reserved2 (__NR_POSIX + 145)
+#define __NR_POSIX_SGI_reserved3 (__NR_POSIX + 146)
+#define __NR_POSIX_SGI_reserved4 (__NR_POSIX + 147)
+#define __NR_POSIX_SGI_reserved5 (__NR_POSIX + 148)
+#define __NR_POSIX_SGI_reserved6 (__NR_POSIX + 149)
+#define __NR_POSIX_cacheflush (__NR_POSIX + 150)
+#define __NR_POSIX_cachectl (__NR_POSIX + 151)
+#define __NR_POSIX_fchown (__NR_POSIX + 152)
+#define __NR_POSIX_fchmod (__NR_POSIX + 153)
+#define __NR_POSIX_wait3 (__NR_POSIX + 154)
+#define __NR_POSIX_mmap (__NR_POSIX + 155)
+#define __NR_POSIX_munmap (__NR_POSIX + 156)
+#define __NR_POSIX_madvise (__NR_POSIX + 157)
+#define __NR_POSIX_BSD_getpagesize (__NR_POSIX + 158)
+#define __NR_POSIX_setreuid (__NR_POSIX + 159)
+#define __NR_POSIX_setregid (__NR_POSIX + 160)
+#define __NR_POSIX_setpgid (__NR_POSIX + 161)
+#define __NR_POSIX_getgroups (__NR_POSIX + 162)
+#define __NR_POSIX_setgroups (__NR_POSIX + 163)
+#define __NR_POSIX_gettimeofday (__NR_POSIX + 164)
+#define __NR_POSIX_getrusage (__NR_POSIX + 165)
+#define __NR_POSIX_getrlimit (__NR_POSIX + 166)
+#define __NR_POSIX_setrlimit (__NR_POSIX + 167)
+#define __NR_POSIX_waitpid (__NR_POSIX + 168)
+#define __NR_POSIX_dup2 (__NR_POSIX + 169)
+#define __NR_POSIX_reserved2 (__NR_POSIX + 170)
+#define __NR_POSIX_reserved3 (__NR_POSIX + 171)
+#define __NR_POSIX_reserved4 (__NR_POSIX + 172)
+#define __NR_POSIX_reserved5 (__NR_POSIX + 173)
+#define __NR_POSIX_reserved6 (__NR_POSIX + 174)
+#define __NR_POSIX_reserved7 (__NR_POSIX + 175)
+#define __NR_POSIX_reserved8 (__NR_POSIX + 176)
+#define __NR_POSIX_reserved9 (__NR_POSIX + 177)
+#define __NR_POSIX_reserved10 (__NR_POSIX + 178)
+#define __NR_POSIX_reserved11 (__NR_POSIX + 179)
+#define __NR_POSIX_reserved12 (__NR_POSIX + 180)
+#define __NR_POSIX_reserved13 (__NR_POSIX + 181)
+#define __NR_POSIX_reserved14 (__NR_POSIX + 182)
+#define __NR_POSIX_reserved15 (__NR_POSIX + 183)
+#define __NR_POSIX_reserved16 (__NR_POSIX + 184)
+#define __NR_POSIX_reserved17 (__NR_POSIX + 185)
+#define __NR_POSIX_reserved18 (__NR_POSIX + 186)
+#define __NR_POSIX_reserved19 (__NR_POSIX + 187)
+#define __NR_POSIX_reserved20 (__NR_POSIX + 188)
+#define __NR_POSIX_reserved21 (__NR_POSIX + 189)
+#define __NR_POSIX_reserved22 (__NR_POSIX + 190)
+#define __NR_POSIX_reserved23 (__NR_POSIX + 191)
+#define __NR_POSIX_reserved24 (__NR_POSIX + 192)
+#define __NR_POSIX_reserved25 (__NR_POSIX + 193)
+#define __NR_POSIX_reserved26 (__NR_POSIX + 194)
+#define __NR_POSIX_reserved27 (__NR_POSIX + 195)
+#define __NR_POSIX_reserved28 (__NR_POSIX + 196)
+#define __NR_POSIX_reserved29 (__NR_POSIX + 197)
+#define __NR_POSIX_reserved30 (__NR_POSIX + 198)
+#define __NR_POSIX_reserved31 (__NR_POSIX + 199)
+#define __NR_POSIX_reserved32 (__NR_POSIX + 200)
+#define __NR_POSIX_reserved33 (__NR_POSIX + 201)
+#define __NR_POSIX_reserved34 (__NR_POSIX + 202)
+#define __NR_POSIX_reserved35 (__NR_POSIX + 203)
+#define __NR_POSIX_reserved36 (__NR_POSIX + 204)
+#define __NR_POSIX_reserved37 (__NR_POSIX + 205)
+#define __NR_POSIX_reserved38 (__NR_POSIX + 206)
+#define __NR_POSIX_reserved39 (__NR_POSIX + 207)
+#define __NR_POSIX_reserved40 (__NR_POSIX + 208)
+#define __NR_POSIX_reserved41 (__NR_POSIX + 209)
+#define __NR_POSIX_reserved42 (__NR_POSIX + 210)
+#define __NR_POSIX_reserved43 (__NR_POSIX + 211)
+#define __NR_POSIX_reserved44 (__NR_POSIX + 212)
+#define __NR_POSIX_reserved45 (__NR_POSIX + 213)
+#define __NR_POSIX_reserved46 (__NR_POSIX + 214)
+#define __NR_POSIX_reserved47 (__NR_POSIX + 215)
+#define __NR_POSIX_reserved48 (__NR_POSIX + 216)
+#define __NR_POSIX_reserved49 (__NR_POSIX + 217)
+#define __NR_POSIX_reserved50 (__NR_POSIX + 218)
+#define __NR_POSIX_reserved51 (__NR_POSIX + 219)
+#define __NR_POSIX_reserved52 (__NR_POSIX + 220)
+#define __NR_POSIX_reserved53 (__NR_POSIX + 221)
+#define __NR_POSIX_reserved54 (__NR_POSIX + 222)
+#define __NR_POSIX_reserved55 (__NR_POSIX + 223)
+#define __NR_POSIX_reserved56 (__NR_POSIX + 224)
+#define __NR_POSIX_reserved57 (__NR_POSIX + 225)
+#define __NR_POSIX_reserved58 (__NR_POSIX + 226)
+#define __NR_POSIX_reserved59 (__NR_POSIX + 227)
+#define __NR_POSIX_reserved60 (__NR_POSIX + 228)
+#define __NR_POSIX_reserved61 (__NR_POSIX + 229)
+#define __NR_POSIX_reserved62 (__NR_POSIX + 230)
+#define __NR_POSIX_reserved63 (__NR_POSIX + 231)
+#define __NR_POSIX_reserved64 (__NR_POSIX + 232)
+#define __NR_POSIX_reserved65 (__NR_POSIX + 233)
+#define __NR_POSIX_reserved66 (__NR_POSIX + 234)
+#define __NR_POSIX_reserved67 (__NR_POSIX + 235)
+#define __NR_POSIX_reserved68 (__NR_POSIX + 236)
+#define __NR_POSIX_reserved69 (__NR_POSIX + 237)
+#define __NR_POSIX_reserved70 (__NR_POSIX + 238)
+#define __NR_POSIX_reserved71 (__NR_POSIX + 239)
+#define __NR_POSIX_reserved72 (__NR_POSIX + 240)
+#define __NR_POSIX_reserved73 (__NR_POSIX + 241)
+#define __NR_POSIX_reserved74 (__NR_POSIX + 242)
+#define __NR_POSIX_reserved75 (__NR_POSIX + 243)
+#define __NR_POSIX_reserved76 (__NR_POSIX + 244)
+#define __NR_POSIX_reserved77 (__NR_POSIX + 245)
+#define __NR_POSIX_reserved78 (__NR_POSIX + 246)
+#define __NR_POSIX_reserved79 (__NR_POSIX + 247)
+#define __NR_POSIX_reserved80 (__NR_POSIX + 248)
+#define __NR_POSIX_reserved81 (__NR_POSIX + 249)
+#define __NR_POSIX_reserved82 (__NR_POSIX + 250)
+#define __NR_POSIX_reserved83 (__NR_POSIX + 251)
+#define __NR_POSIX_reserved84 (__NR_POSIX + 252)
+#define __NR_POSIX_reserved85 (__NR_POSIX + 253)
+#define __NR_POSIX_reserved86 (__NR_POSIX + 254)
+#define __NR_POSIX_reserved87 (__NR_POSIX + 255)
+#define __NR_POSIX_reserved88 (__NR_POSIX + 256)
+#define __NR_POSIX_reserved89 (__NR_POSIX + 257)
+#define __NR_POSIX_reserved90 (__NR_POSIX + 258)
+#define __NR_POSIX_reserved91 (__NR_POSIX + 259)
+#define __NR_POSIX_netboot (__NR_POSIX + 260)
+#define __NR_POSIX_netunboot (__NR_POSIX + 261)
+#define __NR_POSIX_rdump (__NR_POSIX + 262)
+#define __NR_POSIX_setsid (__NR_POSIX + 263)
+#define __NR_POSIX_getmaxsig (__NR_POSIX + 264)
+#define __NR_POSIX_sigpending (__NR_POSIX + 265)
+#define __NR_POSIX_sigprocmask (__NR_POSIX + 266)
+#define __NR_POSIX_sigsuspend (__NR_POSIX + 267)
+#define __NR_POSIX_sigaction (__NR_POSIX + 268)
+#define __NR_POSIX_MIPS_reserved1 (__NR_POSIX + 269)
+#define __NR_POSIX_MIPS_reserved2 (__NR_POSIX + 270)
+#define __NR_POSIX_MIPS_reserved3 (__NR_POSIX + 271)
+#define __NR_POSIX_MIPS_reserved4 (__NR_POSIX + 272)
+#define __NR_POSIX_MIPS_reserved5 (__NR_POSIX + 273)
+#define __NR_POSIX_MIPS_reserved6 (__NR_POSIX + 274)
+#define __NR_POSIX_MIPS_reserved7 (__NR_POSIX + 275)
+#define __NR_POSIX_MIPS_reserved8 (__NR_POSIX + 276)
+#define __NR_POSIX_MIPS_reserved9 (__NR_POSIX + 277)
+#define __NR_POSIX_MIPS_reserved10 (__NR_POSIX + 278)
+#define __NR_POSIX_MIPS_reserved11 (__NR_POSIX + 279)
+#define __NR_POSIX_TANDEM_reserved1 (__NR_POSIX + 280)
+#define __NR_POSIX_TANDEM_reserved2 (__NR_POSIX + 281)
+#define __NR_POSIX_TANDEM_reserved3 (__NR_POSIX + 282)
+#define __NR_POSIX_TANDEM_reserved4 (__NR_POSIX + 283)
+#define __NR_POSIX_TANDEM_reserved5 (__NR_POSIX + 284)
+#define __NR_POSIX_TANDEM_reserved6 (__NR_POSIX + 285)
+#define __NR_POSIX_TANDEM_reserved7 (__NR_POSIX + 286)
+#define __NR_POSIX_TANDEM_reserved8 (__NR_POSIX + 287)
+#define __NR_POSIX_TANDEM_reserved9 (__NR_POSIX + 288)
+#define __NR_POSIX_TANDEM_reserved10 (__NR_POSIX + 289)
+#define __NR_POSIX_TANDEM_reserved11 (__NR_POSIX + 290)
+#define __NR_POSIX_TANDEM_reserved12 (__NR_POSIX + 291)
+#define __NR_POSIX_TANDEM_reserved13 (__NR_POSIX + 292)
+#define __NR_POSIX_TANDEM_reserved14 (__NR_POSIX + 293)
+#define __NR_POSIX_TANDEM_reserved15 (__NR_POSIX + 294)
+#define __NR_POSIX_TANDEM_reserved16 (__NR_POSIX + 295)
+#define __NR_POSIX_TANDEM_reserved17 (__NR_POSIX + 296)
+#define __NR_POSIX_TANDEM_reserved18 (__NR_POSIX + 297)
+#define __NR_POSIX_TANDEM_reserved19 (__NR_POSIX + 298)
+#define __NR_POSIX_TANDEM_reserved20 (__NR_POSIX + 299)
+#define __NR_POSIX_SGI_reserved7 (__NR_POSIX + 300)
+#define __NR_POSIX_SGI_reserved8 (__NR_POSIX + 301)
+#define __NR_POSIX_SGI_reserved9 (__NR_POSIX + 302)
+#define __NR_POSIX_SGI_reserved10 (__NR_POSIX + 303)
+#define __NR_POSIX_SGI_reserved11 (__NR_POSIX + 304)
+#define __NR_POSIX_SGI_reserved12 (__NR_POSIX + 305)
+#define __NR_POSIX_SGI_reserved13 (__NR_POSIX + 306)
+#define __NR_POSIX_SGI_reserved14 (__NR_POSIX + 307)
+#define __NR_POSIX_SGI_reserved15 (__NR_POSIX + 308)
+#define __NR_POSIX_SGI_reserved16 (__NR_POSIX + 309)
+#define __NR_POSIX_SGI_reserved17 (__NR_POSIX + 310)
+#define __NR_POSIX_SGI_reserved18 (__NR_POSIX + 311)
+#define __NR_POSIX_SGI_reserved19 (__NR_POSIX + 312)
+#define __NR_POSIX_SGI_reserved20 (__NR_POSIX + 313)
+#define __NR_POSIX_SGI_reserved21 (__NR_POSIX + 314)
+#define __NR_POSIX_SGI_reserved22 (__NR_POSIX + 315)
+#define __NR_POSIX_SGI_reserved23 (__NR_POSIX + 316)
+#define __NR_POSIX_SGI_reserved24 (__NR_POSIX + 317)
+#define __NR_POSIX_SGI_reserved25 (__NR_POSIX + 318)
+#define __NR_POSIX_SGI_reserved26 (__NR_POSIX + 319)
+
+#endif /* _ASM_RISCOS_SYSCALL_H */
diff --git a/include/asm-mips/semaphore-helper.h b/include/asm-mips/semaphore-helper.h
index b6eb8e4f997e..ad49f94f31b5 100644
--- a/include/asm-mips/semaphore-helper.h
+++ b/include/asm-mips/semaphore-helper.h
@@ -1,11 +1,11 @@
-/* $Id: semaphore-helper.h,v 1.6 1999/10/20 21:10:58 ralf Exp $
- *
+/*
* SMP- and interrupt-safe semaphores helper functions.
*
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- * (C) Copyright 1999 Ralf Baechle
- * (C) Copyright 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996 Linus Torvalds
+ * Copyright (C) 1999 Andrea Arcangeli
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
*/
#ifndef _ASM_SEMAPHORE_HELPER_H
#define _ASM_SEMAPHORE_HELPER_H
@@ -20,7 +20,7 @@ static inline void wake_one_more(struct semaphore * sem)
atomic_inc(&sem->waking);
}
-#if !defined(CONFIG_CPU_HAS_LLSC)
+#if !defined(CONFIG_CPU_HAS_LLSC) || defined(CONFIG_CPU_MIPS32)
/*
* It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
@@ -75,6 +75,7 @@ static inline int waking_non_zero_trylock(struct semaphore *sem)
ret = 0;
}
restore_flags(flags);
+
return ret;
}
@@ -92,7 +93,7 @@ waking_non_zero(struct semaphore *sem)
"sc\t%0, %2\n\t"
"beqz\t%0, 1b\n\t"
"2:"
- : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
+ : "=r" (ret), "=r" (tmp), "=m" (sem->waking)
: "0"(0));
return ret;
@@ -133,29 +134,26 @@ waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
{
long ret, tmp;
- __asm__ __volatile__("
- .set push
- .set mips3
- .set noat
-0: lld %1, %2
- li %0, 0
- sll $1, %1, 0
- blez $1, 1f
- daddiu %1, %1, -1
- li %0, 1
- b 2f
-1:
- beqz %3, 2f
- li %0, %4
- dli $1, 0x0000000100000000
- daddu %1, %1, $1
-2:
- scd %1, %2
- beqz %1, 0b
-
- .set pop"
- : "=&r"(ret), "=&r"(tmp), "=m"(*sem)
- : "r"(signal_pending(tsk)), "i"(-EINTR));
+ __asm__ __volatile__(
+ ".set\tpush\n\t"
+ ".set\tmips3\n\t"
+ ".set\tnoat\n"
+ "0:\tlld\t%1, %2\n\t"
+ "li\t%0, 0\n\t"
+ "sll\t$1, %1, 0\n\t"
+ "blez\t$1, 1f\n\t"
+ "daddiu\t%1, %1, -1\n\t"
+ "li\t%0, 1\n\t"
+ "b\t2f\n"
+ "1:\tbeqz\t%3, 2f\n\t"
+ "li\t%0, %4\n\t"
+ "dli\t$1, 0x0000000100000000\n\t"
+ "daddu\t%1, %1, $1\n"
+ "2:\tscd\t%1, %2\n\t"
+ "beqz\t%1, 0b\n\t"
+ ".set\tpop"
+ : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
+ : "r" (signal_pending(tsk)), "i" (-EINTR));
return ret;
}
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index baffb07ba8dc..4897792a1e09 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,5 +1,4 @@
-/* $Id: semaphore.h,v 1.12 1999/12/08 22:05:10 harald Exp $
- *
+/*
* SMP- and interrupt-safe semaphores..
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +8,7 @@
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1998, 1999, 2000 Ralf Baechle
* (C) Copyright 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#ifndef _ASM_SEMAPHORE_H
#define _ASM_SEMAPHORE_H
@@ -60,7 +60,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
atomic_set(&sem->waking, 0);
@@ -85,7 +85,7 @@ asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
-extern inline void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -94,7 +94,7 @@ extern inline void down(struct semaphore * sem)
__down(sem);
}
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
@@ -106,9 +106,9 @@ extern inline int down_interruptible(struct semaphore * sem)
return ret;
}
-#if !defined(CONFIG_CPU_HAS_LLSC)
+#if !defined(CONFIG_CPU_HAS_LLSC) || defined(CONFIG_CPU_MIPS32)
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
@@ -140,7 +140,7 @@ extern inline int down_trylock(struct semaphore * sem)
* }
* }
*/
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
long ret, tmp, tmp2, sub;
@@ -148,29 +148,25 @@ extern inline int down_trylock(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
- __asm__ __volatile__("
- .set mips3
-
- 0: lld %1, %4
- dli %3, 0x0000000100000000
- dsubu %1, %3
- li %0, 0
- bgez %1, 2f
- sll %2, %1, 0
- blez %2, 1f
- daddiu %1, %1, -1
- b 2f
- 1:
- daddu %1, %1, %3
- li %0, 1
- 2:
- scd %1, %4
- beqz %1, 0b
-
- .set mips0"
- : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
- : "m"(*sem)
- : "memory");
+ __asm__ __volatile__(
+ ".set\tmips3\t\t\t# down_trylock\n"
+ "0:\tlld\t%1, %4\n\t"
+ "dli\t%3, 0x0000000100000000\n\t"
+ "dsubu\t%1, %3\n\t"
+ "li\t%0, 0\n\t"
+ "bgez\t%1, 2f\n\t"
+ "sll\t%2, %1, 0\n\t"
+ "blez\t%2, 1f\n\t"
+ "daddiu\t%1, %1, -1\n\t"
+ "b\t2f\n\t"
+ "1:\tdaddu\t%1, %1, %3\n"
+ "li\t%0, 1\n"
+ "2:\tscd\t%1, %4\n\t"
+ "beqz\t%1, 0b\n\t"
+ ".set mips0"
+ : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
+ : "m"(*sem)
+ : "memory");
return ret;
}
@@ -181,7 +177,7 @@ extern inline int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
-extern inline void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -190,174 +186,4 @@ extern inline void up(struct semaphore * sem)
__up(sem);
}
-/*
- * rw mutexes (should that be mutices? =) -- throw rw spinlocks and
- * semaphores together, and this is what we end up with...
- *
- * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands
- * gets 0 for the case of an uncontended lock. Readers decrement by 1 and
- * see a positive value when uncontended, negative if there are writers
- * waiting (in which case it goes to sleep).
- *
- * The value 0x01000000 supports up to 128 processors and lots of processes.
- * BIAS must be chosen such that subtracting BIAS once per CPU will result
- * in the int remaining negative. In terms of fairness, this should result
- * in the lock flopping back and forth between readers and writers under
- * heavy use.
- *
- * Once we start supporting machines with more than 128 CPUs, we should go
- * for using a 64bit atomic type instead of 32bit as counter. We shall
- * probably go for bias 0x80000000 then, so that single sethi can set it.
- * */
-
-#define RW_LOCK_BIAS 0x01000000
-
-struct rw_semaphore {
- atomic_t count;
- /* bit 0 means read bias granted;
- bit 1 means write bias granted. */
- unsigned long granted; /* pedant: long req'd for set_bit */
- wait_queue_head_t wait;
- wait_queue_head_t write_bias_wait;
-#if WAITQUEUE_DEBUG
- long __magic;
- atomic_t readers;
- atomic_t writers;
-#endif
-};
-
-#if WAITQUEUE_DEBUG
-#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_INIT /* */
-#endif
-
-#define __RWSEM_INITIALIZER(name,count) \
- { ATOMIC_INIT(count), 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
- __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
-
-#define __DECLARE_RWSEM_GENERIC(name,count) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
-
-#define DECLARE_RWSEM(name) \
- __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) \
- __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) \
- __DECLARE_RWSEM_GENERIC(name, 0)
-
-extern inline void init_rwsem(struct rw_semaphore *sem)
-{
- atomic_set(&sem->count, RW_LOCK_BIAS);
- sem->granted = 0;
- init_waitqueue_head(&sem->wait);
- init_waitqueue_head(&sem->write_bias_wait);
-#if WAITQUEUE_DEBUG
- sem->__magic = (long)&sem->__magic;
- atomic_set(&sem->readers, 0);
- atomic_set(&sem->writers, 0);
-#endif
-}
-
-/* The expensive part is outlined. */
-extern void __down_read(struct rw_semaphore *sem, int count);
-extern void __down_write(struct rw_semaphore *sem, int count);
-extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
-
-extern inline void down_read(struct rw_semaphore *sem)
-{
- int count;
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = atomic_dec_return(&sem->count);
- if (count < 0) {
- __down_read(sem, count);
- }
- mb();
-
-#if WAITQUEUE_DEBUG
- if (sem->granted & 2)
- BUG();
- if (atomic_read(&sem->writers))
- BUG();
- atomic_inc(&sem->readers);
-#endif
-}
-
-extern inline void down_write(struct rw_semaphore *sem)
-{
- int count;
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
- if (count) {
- __down_write(sem, count);
- }
- mb();
-
-#if WAITQUEUE_DEBUG
- if (atomic_read(&sem->writers))
- BUG();
- if (atomic_read(&sem->readers))
- BUG();
- if (sem->granted & 3)
- BUG();
- atomic_inc(&sem->writers);
-#endif
-}
-
-/* When a reader does a release, the only significant case is when
- there was a writer waiting, and we've bumped the count to 0: we must
- wake the writer up. */
-
-extern inline void up_read(struct rw_semaphore *sem)
-{
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
- if (sem->granted & 2)
- BUG();
- if (atomic_read(&sem->writers))
- BUG();
- atomic_dec(&sem->readers);
-#endif
-
- mb();
- if (atomic_inc_return(&sem->count) == 0)
- __rwsem_wake(sem, 0);
-}
-
-/*
- * Releasing the writer is easy -- just release it and wake up any sleepers.
- */
-extern inline void up_write(struct rw_semaphore *sem)
-{
- int count;
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
- if (sem->granted & 3)
- BUG();
- if (atomic_read(&sem->readers))
- BUG();
- if (atomic_read(&sem->writers) != 1)
- BUG();
- atomic_dec(&sem->writers);
-#endif
-
- mb();
- count = atomic_add_return(RW_LOCK_BIAS, &sem->count);
- if (count - RW_LOCK_BIAS < 0 && count >= 0) {
- /* Only do the wake if we're no longer negative. */
- __rwsem_wake(sem, count);
- }
-}
-
#endif /* _ASM_SEMAPHORE_H */
diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h
index f255684b4e8b..346b321e4cfc 100644
--- a/include/asm-mips/serial.h
+++ b/include/asm-mips/serial.h
@@ -1,5 +1,4 @@
-/* $Id: serial.h,v 1.9 2000/02/16 01:45:55 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -76,6 +75,81 @@
#define JAZZ_SERIAL_PORT_DEFNS
#endif
+#ifdef CONFIG_MIPS_ATLAS
+#include <asm/mips-boards/atlas.h>
+#include <asm/mips-boards/atlasint.h>
+#define ATLAS_SERIAL_PORT_DEFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, ATLAS_BASE_BAUD, ATLAS_UART_REGS_BASE, ATLASINT_UART, STD_COM_FLAGS }, /* ttyS0 */
+#else
+#define ATLAS_SERIAL_PORT_DEFNS
+#endif
+
+/*
+ * Both Galileo boards have the same UART mappings.
+ */
+#if defined (CONFIG_MIPS_EV96100) || defined (CONFIG_MIPS_EV64120)
+#include <asm/galileo-boards/ev96100.h>
+#include <asm/galileo-boards/ev96100int.h>
+#define EV96100_SERIAL_PORT_DEFNS \
+ { baud_base: EV96100_BASE_BAUD, port: EV96100_UART0_REGS_BASE, \
+ irq: EV96100INT_UART_0, flags: STD_COM_FLAGS, type: 0x3, \
+ iomem_base: EV96100_UART0_REGS_BASE }, \
+ { baud_base: EV96100_BASE_BAUD, port: EV96100_UART1_REGS_BASE, \
+ irq: EV96100INT_UART_0, flags: STD_COM_FLAGS, type: 0x3, \
+ iomem_base: EV96100_UART1_REGS_BASE },
+#else
+#define EV96100_SERIAL_PORT_DEFNS
+#endif
+
+#ifdef CONFIG_MIPS_ITE8172
+#include <asm/it8172/it8172.h>
+#include <asm/it8172/it8172_int.h>
+#include <asm/it8712.h>
+#define ITE_SERIAL_PORT_DEFNS \
+ { baud_base: BASE_BAUD, port: (IT8172_PCI_IO_BASE + IT_UART_BASE), \
+ irq: IT8172_UART_IRQ, flags: STD_COM_FLAGS, type: 0x3 }, \
+ { baud_base: (24000000/(16*13)), port: (IT8172_PCI_IO_BASE + IT8712_UART1_PORT), \
+ irq: IT8172_SERIRQ_4, flags: STD_COM_FLAGS, type: 0x3 }, \
+ /* Smart Card Reader 0 */ \
+ { baud_base: BASE_BAUD, port: (IT8172_PCI_IO_BASE + IT_SCR0_BASE), \
+ irq: IT8172_SCR0_IRQ, flags: STD_COM_FLAGS, type: 0x3 }, \
+ /* Smart Card Reader 1 */ \
+ { baud_base: BASE_BAUD, port: (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \
+ irq: IT8172_SCR1_IRQ, flags: STD_COM_FLAGS, type: 0x3 },
+#else
+#define ITE_SERIAL_PORT_DEFNS
+#endif
+
+#ifdef CONFIG_MIPS_IVR
+#include <asm/it8172/it8172.h>
+#include <asm/it8172/it8172_int.h>
+#define IVR_SERIAL_PORT_DEFNS \
+ { baud_base: BASE_BAUD, port: (IT8172_PCI_IO_BASE + IT_UART_BASE), \
+ irq: IT8172_UART_IRQ, flags: STD_COM_FLAGS, type: 0x3 }, \
+ /* Smart Card Reader 1 */ \
+ { baud_base: BASE_BAUD, port: (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \
+ irq: IT8172_SCR1_IRQ, flags: STD_COM_FLAGS, type: 0x3 },
+#else
+#define IVR_SERIAL_PORT_DEFNS
+#endif
+
+#ifdef CONFIG_AU1000_UART
+#include <asm/au1000.h>
+#define AU1000_SERIAL_PORT_DEFNS \
+ { baud_base: 0, port: UART0_ADDR, irq: AU1000_UART0_INT, \
+ flags: STD_COM_FLAGS, type: 1 }, \
+ { baud_base: 0, port: UART1_ADDR, irq: AU1000_UART1_INT, \
+ flags: STD_COM_FLAGS, type: 1 }, \
+ { baud_base: 0, port: UART2_ADDR, irq: AU1000_UART2_INT, \
+ flags: STD_COM_FLAGS, type: 1 }, \
+ { baud_base: 0, port: UART3_ADDR, irq: AU1000_UART3_INT, \
+ flags: STD_COM_FLAGS, type: 1 },
+#else
+#define AU1000_SERIAL_PORT_DEFNS
+#endif
+
+#ifdef CONFIG_HAVE_STD_PC_SERIAL_PORT
#define STD_SERIAL_PORT_DEFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
@@ -83,7 +157,6 @@
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
-
#ifdef CONFIG_SERIAL_MANY_PORTS
#define EXTRA_SERIAL_PORT_DEFNS \
{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
@@ -96,8 +169,8 @@
{ 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
{ 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
{ 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
- { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
- { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
{ 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
{ 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
{ 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
@@ -114,9 +187,14 @@
{ 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
{ 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
{ 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
-#else
+#else /* CONFIG_SERIAL_MANY_PORTS */
#define EXTRA_SERIAL_PORT_DEFNS
-#endif
+#endif /* CONFIG_SERIAL_MANY_PORTS */
+
+#else /* CONFIG_HAVE_STD_PC_SERIAL_PORTS */
+#define STD_SERIAL_PORT_DEFNS
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif /* CONFIG_HAVE_STD_PC_SERIAL_PORTS */
/* You can have up to four HUB6's in the system, but I've only
* included two cards here for a total of twelve ports.
@@ -151,8 +229,44 @@
#define MCA_SERIAL_PORT_DFNS
#endif
+#ifdef CONFIG_MOMENCO_OCELOT
+/* Ordinary NS16552 duart with a 20MHz crystal. */
+#define OCELOT_BASE_BAUD ( 20000000 / 16 )
+
+#define OCELOT_SERIAL1_IRQ 4
+#define OCELOT_SERIAL1_BASE 0xe0001020
+
+#define _OCELOT_SERIAL_INIT(int, base) \
+ { baud_base: OCELOT_BASE_BAUD, irq: int, flags: STD_COM_FLAGS, \
+ iomem_base: (u8 *) base, iomem_reg_shift: 2, \
+ io_type: SERIAL_IO_MEM }
+#define MOMENCO_OCELOT_SERIAL_PORT_DEFNS \
+ _OCELOT_SERIAL_INIT(OCELOT_SERIAL1_IRQ, OCELOT_SERIAL1_BASE)
+#else
+#define MOMENCO_OCELOT_SERIAL_PORT_DEFNS
+#endif
+
+#ifdef CONFIG_DDB5477
+#define DDB5477_SERIAL_PORT_DEFNS \
+ { baud_base: BASE_BAUD, irq: 12, flags: STD_COM_FLAGS, \
+ iomem_base: (u8*)0xbfa04200, iomem_reg_shift: 3, \
+ io_type: SERIAL_IO_MEM},\
+ { baud_base: BASE_BAUD, irq: 28, flags: STD_COM_FLAGS, \
+ iomem_base: (u8*)0xbfa04240, iomem_reg_shift: 3, \
+ io_type: SERIAL_IO_MEM},
+#else
+#define DDB5477_SERIAL_PORT_DEFNS
+#endif
+
#define SERIAL_PORT_DFNS \
+ IVR_SERIAL_PORT_DEFNS \
+ ITE_SERIAL_PORT_DEFNS \
+ ATLAS_SERIAL_PORT_DEFNS \
+ EV96100_SERIAL_PORT_DEFNS \
JAZZ_SERIAL_PORT_DEFNS \
STD_SERIAL_PORT_DEFNS \
EXTRA_SERIAL_PORT_DEFNS \
- HUB6_SERIAL_PORT_DFNS
+ HUB6_SERIAL_PORT_DFNS \
+ MOMENCO_OCELOT_SERIAL_PORT_DEFNS\
+ AU1000_SERIAL_PORT_DEFNS \
+ DDB5477_SERIAL_PORT_DEFNS
diff --git a/include/asm-mips/sgi/sgint23.h b/include/asm-mips/sgi/sgint23.h
index d851d2f3011a..0641c133c626 100644
--- a/include/asm-mips/sgi/sgint23.h
+++ b/include/asm-mips/sgi/sgint23.h
@@ -1,5 +1,4 @@
-/* $Id: sgint23.h,v 1.4 1999/09/28 21:02:12 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -7,6 +6,7 @@
* sgint23.h: Defines for the SGI INT2 and INT3 chipsets.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1997, 98, 1999, 2000 Ralf Baechle
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu) - INT2 corrections
*/
#ifndef _ASM_SGI_SGINT23_H
@@ -25,20 +25,8 @@
#define SGINT_HPCDMA 41 /* INDY has 11 HPCDMA irq _sources_ */
#define SGINT_END 52 /* End of 'spaces' */
-/* Individual interrupt definitions for the INDY and Indigo2
- */
-
-#define SGI_WD93_0_IRQ SGINT_LOCAL0 + 1 /* 1st onboard WD93 */
-#define SGI_WD93_1_IRQ SGINT_LOCAL0 + 2 /* 2nd onboard WD93 */
-#define SGI_ENET_IRQ SGINT_LOCAL0 + 3 /* onboard ethernet */
-
-#define SGI_PANEL_IRQ SGINT_LOCAL1 + 1 /* front panel */
-
-#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */
-#define SGI_KEYBOARD_IRQ SGINT_LOCAL2 + 4 /* keyboard */
-#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */
-
-/* Individual interrupt definitions for the INDY and Indigo2
+/*
+ * Individual interrupt definitions for the INDY and Indigo2
*/
#define SGI_WD93_0_IRQ SGINT_LOCAL0 + 1 /* 1st onboard WD93 */
@@ -215,4 +203,4 @@ extern volatile unsigned char *ioc_tclear;
extern void sgint_init(void);
extern void indy_timer_init(void);
-#endif /* !(_ASM_SGINT23_H) */
+#endif /* _ASM_SGI_SGINT23_H */
diff --git a/include/asm-mips/sgialib.h b/include/asm-mips/sgialib.h
index 8a9e2f49e1bc..f53389c5b9dd 100644
--- a/include/asm-mips/sgialib.h
+++ b/include/asm-mips/sgialib.h
@@ -1,9 +1,8 @@
-/* $Id: sgialib.h,v 1.5 2000/03/19 01:28:58 ralf Exp $
+/*
* sgialib.h: SGI ARCS firmware interface library for the Linux kernel.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*/
-
#ifndef _ASM_SGIALIB_H
#define _ASM_SGIALIB_H
@@ -21,7 +20,7 @@ extern int prom_flags;
* Init the PROM library and it's internal data structures. Called
* at boot time from head.S before start_kernel is invoked.
*/
-extern int prom_init(int argc, char **argv, char **envp, int *prom_vec);
+extern void prom_init(int argc, char **argv, char **envp, int *prom_vec);
/* Simple char-by-char console I/O. */
extern void prom_putchar(char c);
@@ -30,28 +29,12 @@ extern char prom_getchar(void);
/* Generic printf() using ARCS console I/O. */
extern void prom_printf(char *fmt, ...);
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS 32
-struct prom_pmemblock {
- unsigned long base; /* Within KSEG0. */
- unsigned int size; /* In bytes. */
- unsigned int type; /* free or prom memory */
-};
-
-/* Get next memory descriptor after CURR, returns first descriptor
- * in chain is CURR is NULL.
- */
-extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
/* Called by prom_init to setup the physical memory pmemblock
* array.
*/
extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-
-/* Returns pointer to PROM physical memory block array. */
-extern struct prom_pmemblock *prom_getpblock_array(void);
/* PROM device tree library routines. */
#define PROM_NULL_COMPONENT ((pcomponent *) 0)
@@ -81,9 +64,9 @@ extern pcomponent *prom_componentbypath(char *path);
*/
extern void prom_identify_arch(void);
-/* Environemt variable routines. */
+/* Environment variable routines. */
extern PCHAR ArcGetEnvironmentVariable(CHAR *name);
-extern LONG SetEnvironmentVariable(PCHAR name, PCHAR value);
+extern LONG ArcSetEnvironmentVariable(PCHAR name, PCHAR value);
/* ARCS command line acquisition and parsing. */
extern char *prom_getcmdline(void);
@@ -115,7 +98,7 @@ extern void prom_halt(void) __attribute__((noreturn));
extern void prom_powerdown(void) __attribute__((noreturn));
extern void prom_restart(void) __attribute__((noreturn));
extern void prom_reboot(void) __attribute__((noreturn));
-extern void prom_imode(void) __attribute__((noreturn));
+extern void ArcEnterInteractiveMode(void) __attribute__((noreturn));
extern long prom_cfgsave(void);
extern struct linux_sysid *prom_getsysid(void);
extern void prom_cacheflush(void);
diff --git a/include/asm-mips/shmbuf.h b/include/asm-mips/shmbuf.h
index 077cfced31bb..37274d6d1e74 100644
--- a/include/asm-mips/shmbuf.h
+++ b/include/asm-mips/shmbuf.h
@@ -7,7 +7,7 @@
* between kernel and user space.
*
* Pad space is left for:
- * - 2 miscellaneous 64-bit values
+ * - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 601826429bbe..1bfcf41e09c2 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -1,6 +1,37 @@
#ifndef __ASM_MIPS_SMP_H
#define __ASM_MIPS_SMP_H
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/current.h>
+
+
+/* Mappings are straight across. If we want
+ to add support for disabling cpus and such,
+ we'll have to do what the mips64 port does here */
#define cpu_logical_map(cpu) (cpu)
+#define cpu_number_map(cpu) (cpu)
+
+#define smp_processor_id() (current->processor)
+
+
+/* I've no idea what the real meaning of this is */
+#define PROC_CHANGE_PENALTY 20
+
+#define NO_PROC_ID (-1)
+
+struct smp_fn_call_struct {
+ spinlock_t lock;
+ atomic_t finished;
+ void (*fn)(void *);
+ void *data;
+};
+
+extern struct smp_fn_call_struct smp_fn_call;
+#endif /* CONFIG_SMP */
#endif /* __ASM_MIPS_SMP_H */
diff --git a/include/asm-mips/sni.h b/include/asm-mips/sni.h
index 1e2391ddb0e4..fb961478e07d 100644
--- a/include/asm-mips/sni.h
+++ b/include/asm-mips/sni.h
@@ -30,6 +30,14 @@
#define PCIMT_ERRADDR 0xbfff0040
#define PCIMT_SYNDROME 0xbfff0048
#define PCIMT_ITPEND 0xbfff0050
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
#define PCIMT_IRQSEL 0xbfff0058
#define PCIMT_TESTMEM 0xbfff0060
#define PCIMT_ECCREG 0xbfff0068
@@ -73,16 +81,18 @@
* to the other interrupts generated by ASIC PCI.
*/
#define PCIMT_KEYBOARD_IRQ 1
-#define PCIMT_IRQ_ETHERNET 16
-#define PCIMT_IRQ_TEMPERATURE 17
-#define PCIMT_IRQ_EISA_NMI 18
-#define PCIMT_IRQ_POWER_OFF 19
-#define PCIMT_IRQ_BUTTON 20
-#define PCIMT_IRQ_INTA 21
-#define PCIMT_IRQ_INTB 22
-#define PCIMT_IRQ_INTC 23
-#define PCIMT_IRQ_INTD 24
-#define PCIMT_IRQ_SCSI 25
+#define PCIMT_IRQ_INT2 16 /* What is that? */
+#define PCIMT_IRQ_INTD 17
+#define PCIMT_IRQ_INTC 18
+#define PCIMT_IRQ_INTB 19
+#define PCIMT_IRQ_INTA 20
+#define PCIMT_IRQ_EISA 21
+#define PCIMT_IRQ_SCSI 22
+#define PCIMT_IRQ_ETHERNET 23
+#define PCIMT_IRQ_TEMPERATURE 24
+#define PCIMT_IRQ_EISA_NMI 25
+#define PCIMT_IRQ_POWER_OFF 26
+#define PCIMT_IRQ_BUTTON 27
/*
* Base address for the mapped 16mb EISA bus segment.
diff --git a/include/asm-mips/socket.h b/include/asm-mips/socket.h
index 201317fb6445..b8a570fa0af9 100644
--- a/include/asm-mips/socket.h
+++ b/include/asm-mips/socket.h
@@ -33,7 +33,7 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
#define SO_SNDTIMEO 0x1005 /* send timeout */
#define SO_RCVTIMEO 0x1006 /* receive timeout */
-#define SO_ACCEPTCONN 0x1007
+#define SO_ACCEPTCONN 0x1009
/* linux-specific, might as well be the same as on i386 */
#define SO_NO_CHECK 11
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index 724d105200fc..61ebfa6603eb 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -1,5 +1,4 @@
-/* $Id: spinlock.h,v 1.8 2000/01/23 21:15:52 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -22,6 +21,9 @@ typedef struct {
#define spin_lock_init(x) do { (x)->lock = 0; } while(0);
+#define spin_is_locked(x) ((x)->lock != 0)
+#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
@@ -29,9 +31,6 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-typedef struct { unsigned long a[100]; } __dummy_lock_t;
-#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-
static inline void spin_lock(spinlock_t *lock)
{
unsigned int tmp;
@@ -45,8 +44,8 @@ static inline void spin_lock(spinlock_t *lock)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock)), "=&r" (tmp)
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock), "=&r" (tmp)
+ : "o" (lock->lock)
: "memory");
}
@@ -57,8 +56,8 @@ static inline void spin_unlock(spinlock_t *lock)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock))
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock)
+ : "o" (lock->lock)
: "memory");
}
@@ -92,8 +91,8 @@ static inline void read_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -111,8 +110,8 @@ static inline void read_unlock(rwlock_t *rw)
"sc\t%1, %0\n\t"
"beqz\t%1, 1b\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -129,8 +128,8 @@ static inline void write_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -141,8 +140,8 @@ static inline void write_unlock(rwlock_t *rw)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw))
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock)
+ : "o" (rw->lock)
: "memory");
}
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 490724b21a66..d46cecef9afc 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -1,13 +1,17 @@
/*
- * include/asm-mips/stackframe.h
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
- * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Paul M. Antoine.
- *
- * $Id: stackframe.h,v 1.10 1999/08/13 17:07:27 harald Exp $
+ * Copyright (C) 1994, 1995, 1996, 2001 Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
*/
-#ifndef __ASM_MIPS_STACKFRAME_H
-#define __ASM_MIPS_STACKFRAME_H
+#ifndef __ASM_STACKFRAME_H
+#define __ASM_STACKFRAME_H
+#include <asm/addrspace.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
#include <asm/asm.h>
#include <asm/offset.h>
#include <linux/config.h>
@@ -47,20 +51,45 @@
#define __str2(x) #x
#define __str(x) __str2(x)
-#define save_static(frame) \
- __asm__ __volatile__( \
- "sw\t$16,"__str(PT_R16)"(%0)\n\t" \
- "sw\t$17,"__str(PT_R17)"(%0)\n\t" \
- "sw\t$18,"__str(PT_R18)"(%0)\n\t" \
- "sw\t$19,"__str(PT_R19)"(%0)\n\t" \
- "sw\t$20,"__str(PT_R20)"(%0)\n\t" \
- "sw\t$21,"__str(PT_R21)"(%0)\n\t" \
- "sw\t$22,"__str(PT_R22)"(%0)\n\t" \
- "sw\t$23,"__str(PT_R23)"(%0)\n\t" \
- "sw\t$30,"__str(PT_R30)"(%0)\n\t" \
- : /* No outputs */ \
- : "r" (frame))
+#define save_static_function(symbol) \
+__asm__ ( \
+ ".globl\t" #symbol "\n\t" \
+ ".align\t2\n\t" \
+ ".type\t" #symbol ", @function\n\t" \
+ ".ent\t" #symbol ", 0\n" \
+ #symbol":\n\t" \
+ ".frame\t$29, 0, $31\n\t" \
+ "sw\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \
+ "sw\t$17,"__str(PT_R17)"($29)\n\t" \
+ "sw\t$18,"__str(PT_R18)"($29)\n\t" \
+ "sw\t$19,"__str(PT_R19)"($29)\n\t" \
+ "sw\t$20,"__str(PT_R20)"($29)\n\t" \
+ "sw\t$21,"__str(PT_R21)"($29)\n\t" \
+ "sw\t$22,"__str(PT_R22)"($29)\n\t" \
+ "sw\t$23,"__str(PT_R23)"($29)\n\t" \
+ "sw\t$30,"__str(PT_R30)"($29)\n\t" \
+ ".end\t" #symbol "\n\t" \
+ ".size\t" #symbol",. - " #symbol)
+
+/* Used in declaration of save_static functions. */
+#define static_unused static __attribute__((unused))
+
+
+#ifdef CONFIG_SMP
+# define GET_SAVED_SP \
+ mfc0 k0, CP0_CONTEXT; \
+ lui k1, %hi(kernelsp); \
+ srl k0, k0, 23; \
+ sll k0, k0, 2; \
+ addu k1, k0; \
+ lw k1, %lo(kernelsp)(k1);
+#else
+# define GET_SAVED_SP \
+ lui k1, %hi(kernelsp); \
+ lw k1, %lo(kernelsp)(k1);
+#endif
+
#define SAVE_SOME \
.set push; \
.set reorder; \
@@ -71,13 +100,12 @@
move k1, sp; \
.set reorder; \
/* Called from user mode, new stack. */ \
- lui k1, %hi(kernelsp); \
- lw k1, %lo(kernelsp)(k1); \
+ GET_SAVED_SP \
8: \
move k0, sp; \
subu sp, k1, PT_SIZE; \
sw k0, PT_R29(sp); \
- sw $3, PT_R3(sp); \
+ sw $3, PT_R3(sp); \
sw $0, PT_R0(sp); \
mfc0 v1, CP0_STATUS; \
sw $2, PT_R2(sp); \
@@ -208,6 +236,16 @@
#endif
+#define RESTORE_SP \
+ lw sp, PT_R29(sp); \
+
+#define RESTORE_ALL \
+ RESTORE_SOME; \
+ RESTORE_AT; \
+ RESTORE_TEMP; \
+ RESTORE_STATIC; \
+ RESTORE_SP
+
#define RESTORE_ALL_AND_RET \
RESTORE_SOME; \
RESTORE_AT; \
@@ -215,6 +253,7 @@
RESTORE_STATIC; \
RESTORE_SP_AND_RET
+
/*
* Move to kernel mode and disable interrupts.
* Set cp0 enable bit as sign that we're running on the kernel stack
@@ -248,4 +287,4 @@
xori t0,0x1e; \
mtc0 t0,CP0_STATUS
-#endif /* __ASM_MIPS_STACKFRAME_H */
+#endif /* __ASM_STACKFRAME_H */
diff --git a/include/asm-mips/stat.h b/include/asm-mips/stat.h
index e8892d3da493..f3f49a4ae386 100644
--- a/include/asm-mips/stat.h
+++ b/include/asm-mips/stat.h
@@ -17,8 +17,7 @@ struct __old_kernel_stat {
unsigned int st_ctime, st_res3;
unsigned int st_blksize;
int st_blocks;
- unsigned int st_flags;
- unsigned int st_gen;
+ unsigned int st_unused0[2];
};
struct stat {
@@ -45,11 +44,7 @@ struct stat {
long reserved2;
long st_blksize;
long st_blocks;
- char st_fstype[16]; /* Filesystem type name */
- long st_pad4[8];
- /* Linux specific fields */
- unsigned int st_flags;
- unsigned int st_gen;
+ long st_pad4[14];
};
/*
@@ -61,25 +56,36 @@ struct stat {
struct stat64 {
unsigned long st_dev;
unsigned long st_pad0[3]; /* Reserved for st_dev expansion */
- ino_t st_ino;
+
+ unsigned long long st_ino;
+
mode_t st_mode;
nlink_t st_nlink;
+
uid_t st_uid;
gid_t st_gid;
+
unsigned long st_rdev;
unsigned long st_pad1[3]; /* Reserved for st_rdev expansion */
+
long long st_size;
+
/*
* Actually this should be timestruc_t st_atime, st_mtime and st_ctime
* but we don't have it under Linux.
*/
time_t st_atime;
unsigned long reserved0; /* Reserved for st_atime expansion */
+
time_t st_mtime;
- unsigned long reserved1; /* Reserved for st_atime expansion */
+ unsigned long reserved1; /* Reserved for st_mtime expansion */
+
time_t st_ctime;
- unsigned long reserved2; /* Reserved for st_atime expansion */
+ unsigned long reserved2; /* Reserved for st_ctime expansion */
+
unsigned long st_blksize;
+ unsigned long st_pad2;
+
long long st_blocks;
};
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index c057c0925506..df0dac9c9d35 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -1,5 +1,4 @@
-/* $Id: system.h,v 1.20 1999/12/06 23:13:21 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -7,6 +6,12 @@
* Copyright (C) 1994 - 1999 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1994 - 1999 by Ralf Baechle
+ *
+ * Changed set_except_vector declaration to allow return of previous
+ * vector address value - necessary for "borrowing" vectors.
+ *
+ * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
*/
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
@@ -61,65 +66,76 @@ __cli(void)
: "$1", "memory");
}
-#define __save_flags(x) \
-__asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- "mfc0\t%0,$12\n\t" \
- ".set\tpop\n\t" \
- : "=r" (x) \
- : /* no inputs */ \
- : "memory")
+#define __save_flags(x) \
+__asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\treorder\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ ".set\tpop\n\t" \
+ : "=r" (x))
-#define __save_and_cli(x) \
-__asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- ".set\tnoat\n\t" \
- "mfc0\t%0,$12\n\t" \
- "ori\t$1,%0,1\n\t" \
- "xori\t$1,1\n\t" \
- ".set\tnoreorder\n\t" \
- "mtc0\t$1,$12\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- ".set\tpop\n\t" \
- : "=r" (x) \
- : /* no inputs */ \
+#define __save_and_cli(x) \
+__asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\treorder\n\t" \
+ ".set\tnoat\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ "ori\t$1,%0,1\n\t" \
+ "xori\t$1,1\n\t" \
+ ".set\tnoreorder\n\t" \
+ "mtc0\t$1,$12\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".set\tpop\n\t" \
+ : "=r" (x) \
+ : /* no inputs */ \
: "$1", "memory")
-extern void __inline__
-__restore_flags(int flags)
-{
- __asm__ __volatile__(
- ".set\tpush\n\t"
- ".set\treorder\n\t"
- "mfc0\t$8,$12\n\t"
- "li\t$9,0xff00\n\t"
- "and\t$8,$9\n\t"
- "nor\t$9,$0,$9\n\t"
- "and\t%0,$9\n\t"
- "or\t%0,$8\n\t"
- ".set\tnoreorder\n\t"
- "mtc0\t%0,$12\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- ".set\tpop\n\t"
- :
- : "r" (flags)
- : "$8", "$9", "memory");
-}
+#define __restore_flags(flags) \
+do { \
+ unsigned long __tmp1; \
+ \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\t\t\t# __restore_flags\n\t" \
+ ".set\tnoat\n\t" \
+ "mfc0\t$1, $12\n\t" \
+ "andi\t%0, 1\n\t" \
+ "ori\t$1, 1\n\t" \
+ "xori\t$1, 1\n\t" \
+ "or\t%0, $1\n\t" \
+ "mtc0\t%0, $12\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "=r" (__tmp1) \
+ : "0" (flags) \
+ : "$1", "memory"); \
+} while(0)
-/*
- * Non-SMP versions ...
- */
-#define sti() __sti()
-#define cli() __cli()
-#define save_flags(x) __save_flags(x)
-#define save_and_cli(x) __save_and_cli(x)
-#define restore_flags(x) __restore_flags(x)
+#ifdef CONFIG_SMP
+
+extern void __global_sti(void);
+extern void __global_cli(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+# define sti() __global_sti()
+# define cli() __global_cli()
+# define save_flags(x) do { x = __global_save_flags(); } while (0)
+# define restore_flags(x) __global_restore_flags(x)
+# define save_and_cli(x) do { save_flags(x); cli(); } while(0)
+
+#else /* Single processor */
+
+# define sti() __sti()
+# define cli() __cli()
+# define save_flags(x) __save_flags(x)
+# define save_and_cli(x) __save_and_cli(x)
+# define restore_flags(x) __restore_flags(x)
+
+#endif /* SMP */
/* For spinlocks etc */
#define local_irq_save(x) __save_and_cli(x);
@@ -131,11 +147,14 @@ __restore_flags(int flags)
* These are probably defined overly paranoid ...
*/
#ifdef CONFIG_CPU_HAS_WB
+
#include <asm/wbflush.h>
-#define rmb()
-#define wmb() wbflush()
-#define mb() wbflush()
-#else
+#define rmb() do { } while(0)
+#define wmb() wbflush()
+#define mb() wbflush()
+
+#else /* CONFIG_CPU_HAS_WB */
+
#define mb() \
__asm__ __volatile__( \
"# prevent instructions being moved around\n\t" \
@@ -148,6 +167,17 @@ __asm__ __volatile__( \
: "memory")
#define rmb() mb()
#define wmb() mb()
+
+#endif /* CONFIG_CPU_HAS_WB */
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
#endif
#define set_mb(var, value) \
@@ -180,17 +210,17 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long dummy;
__asm__ __volatile__(
- ".set\tnoreorder\n\t"
+ ".set\tnoreorder\t\t\t# xchg_u32\n\t"
".set\tnoat\n\t"
- "ll\t%0,(%1)\n"
- "1:\tmove\t$1,%2\n\t"
- "sc\t$1,(%1)\n\t"
- "beqzl\t$1,1b\n\t"
- "ll\t%0,(%1)\n\t"
+ "ll\t%0, %3\n"
+ "1:\tmove\t$1, %2\n\t"
+ "sc\t$1, %1\n\t"
+ "beqzl\t$1, 1b\n\t"
+ " ll\t%0, %3\n\t"
".set\tat\n\t"
".set\treorder"
- : "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (m), "2" (val)
+ : "=r" (val), "=o" (*m), "=r" (dummy)
+ : "o" (*m), "2" (val)
: "memory");
return val;
@@ -207,64 +237,25 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif /* Processor-dependent optimization */
}
-/*
- * Only used for 64 bit kernel.
- */
-extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "lld\t%0,(%1)\n"
- "1:\tmove\t$1,%2\n\t"
- "scd\t$1,(%1)\n\t"
- "beqzl\t$1,1b\n\t"
- "lld\t%0,(%1)\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (m), "2" (val)
- : "memory");
-
- return val;
-}
-
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- *
- * This only works if the compiler isn't horribly bad at optimizing.
- * gcc-2.5.8 reportedly can't handle this, but I define that one to
- * be dead anyway.
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
-#if defined(__mips64)
- case 8:
- return xchg_u64(ptr, x);
-#endif
}
- __xchg_called_with_bad_pointer();
return x;
}
-extern void set_except_vector(int n, void *addr);
+extern void *set_except_vector(int n, void *addr);
extern void __die(const char *, struct pt_regs *, const char *where,
unsigned long line) __attribute__((noreturn));
extern void __die_if_kernel(const char *, struct pt_regs *, const char *where,
unsigned long line);
-extern int abs(int);
#define die(msg, regs) \
__die(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
diff --git a/include/asm-mips/termios.h b/include/asm-mips/termios.h
index f31509f58a36..d60a4fb4945b 100644
--- a/include/asm-mips/termios.h
+++ b/include/asm-mips/termios.h
@@ -1,10 +1,9 @@
-/* $Id: termios.h,v 1.8 2000/01/27 23:45:30 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1996 by Ralf Baechle
+ * Copyright (C) 1995, 1996, 2001 by Ralf Baechle
*/
#ifndef _ASM_TERMIOS_H
#define _ASM_TERMIOS_H
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
new file mode 100644
index 000000000000..436bb7c360a1
--- /dev/null
+++ b/include/asm-mips/time.h
@@ -0,0 +1,66 @@
+/***********************************************************************
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * include/asm-mips/time.h
+ * header file for the new style time.c file and time services.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ ***********************************************************************
+ */
+
+#ifndef _ASM_TIME_H
+#define _ASM_TIME_H
+
+#include <linux/ptrace.h> /* for struct pt_regs */
+#include <linux/linkage.h> /* for asmlinkage */
+
+/*
+ * RTC ops. By default, they point a no-RTC functions.
+ * rtc_get_time - mktime(year, mon, day, hour, min, sec) in seconds.
+ * rtc_set_time - reverse the above translation
+ */
+extern unsigned long (*rtc_get_time)(void);
+extern int (*rtc_set_time)(unsigned long);
+
+/*
+ * do_gettimeoffset(). By default, this func pointer points to
+ * do_null_gettimeoffset(), which leads to the same resolution as HZ.
+ * Higher resolution versions are vailable.
+ */
+extern unsigned long (*do_gettimeoffset)(void);
+
+extern unsigned long null_gettimeoffset(void);
+extern unsigned long fixed_rate_gettimeoffset(void);
+extern unsigned long calibrate_div32_gettimeoffset(void);
+extern unsigned long calibrate_div64_gettimeoffset(void);
+
+/*
+ * high-level timer interrupt routines.
+ */
+extern void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*
+ * the corresponding low-level timer interrupt routine.
+ */
+asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs);
+
+/*
+ * board specific routines required by time_init().
+ * board_time_init is defaulted to NULL and can remains so.
+ * board_timer_setup must be setup properly in machine setup routine.
+ */
+struct irqaction;
+extern void (*board_time_init)(void);
+extern void (*board_timer_setup)(struct irqaction *irq);
+
+/*
+ * mips_counter_frequency - must be set if you intend to use
+ * counter as timer interrupt source or use fixed_rate_gettimeoffset.
+ */
+extern unsigned int mips_counter_frequency;
+
+#endif /* _ASM_TIME_H */
diff --git a/include/asm-mips/tlb.h b/include/asm-mips/tlb.h
new file mode 100644
index 000000000000..69c0faa93194
--- /dev/null
+++ b/include/asm-mips/tlb.h
@@ -0,0 +1 @@
+#include <asm-generic/tlb.h>
diff --git a/include/asm-mips/tx3912.h b/include/asm-mips/tx3912.h
new file mode 100644
index 000000000000..64f81bea9034
--- /dev/null
+++ b/include/asm-mips/tx3912.h
@@ -0,0 +1,576 @@
+/*
+ * linux/include/asm-mips/tx3912.h
+ *
+ * Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Register includes for TMPR3912/05 and PR31700 processors
+ */
+#ifndef __TX3912_H__
+#define __TX3912_H__
+
+#include <asm/addrspace.h>
+
+#define inb(addr) (*(volatile unsigned char *)(addr))
+#define inw(addr) (*(volatile unsigned short *)(addr))
+#define inl(addr) (*(volatile unsigned int *)(addr))
+#define outb(b,addr) (*(volatile unsigned char *)(addr)) = (b)
+#define outw(b,addr) (*(volatile unsigned short *)(addr)) = (b)
+#define outl(b,addr) (*(volatile unsigned int *)(addr)) = (b)
+
+
+/******************************************************************************
+*
+* 01 General macro definitions
+*
+******************************************************************************/
+
+#define REGISTER_BASE 0xb0c00000
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+ #define REG_AT(x) (*((volatile unsigned long *)(REGISTER_BASE + x)))
+
+#else
+
+ #define REG_AT(x) (REGISTER_BASE + x)
+
+#endif
+
+#define BIT(x) (1 << x)
+
+/******************************************************************************
+*
+* 02 Bus Interface Unit
+*
+******************************************************************************/
+
+#define MemConfig0 REG_AT(0x000)
+#define MemConfig1 REG_AT(0x004)
+#define MemConfig2 REG_AT(0x008)
+#define MemConfig3 REG_AT(0x00c)
+#define MemConfig4 REG_AT(0x010)
+#define MemConfig5 REG_AT(0x014)
+#define MemConfig6 REG_AT(0x018)
+#define MemConfig7 REG_AT(0x01c)
+#define MemConfig8 REG_AT(0x020)
+
+/* Memory config register 1 */
+#define MEM1_ENCS1USER BIT(21)
+
+/* Memory config register 3 */
+#define MEM3_CARD1ACCVAL_MASK (BIT(24) | BIT(25) | BIT(26) | BIT(27))
+#define MEM3_CARD1IOEN BIT(4)
+
+/* Memory config register 4 */
+#define MEM4_ARBITRATIONEN BIT(29)
+#define MEM4_MEMPOWERDOWN BIT(16)
+#define MEM4_ENREFRESH1 BIT(15)
+#define MEM4_ENREFRESH0 BIT(14)
+#define MEM4_ENWATCH BIT(24)
+#define MEM4_WATCHTIMEVAL_MASK (0xf)
+#define MEM4_WATCHTIMEVAL_SHIFT (20)
+#define MEM4_WATCHTIME_VALUE (0xf)
+
+/*
+ ***********************************************************************
+ * *
+ * 06 Clock Module *
+ * *
+ ***********************************************************************
+ */
+#define TX3912_CLK_CTRL_BASE (REGISTER_BASE + 0x1c0)
+
+#define TX3912_CLK_CTRL_CHICLKDIV_MASK 0xff000000
+#define TX3912_CLK_CTRL_CHICLKDIV_SHIFT 24
+#define TX3912_CLK_CTRL_ENCLKTEST 0x00800000
+#define TX3912_CLK_CTRL_CLKTESTSELSIB 0x00400000
+#define TX3912_CLK_CTRL_CHIMCLKSEL 0x00200000
+#define TX3912_CLK_CTRL_CHICLKDIR 0x00100000
+#define TX3912_CLK_CTRL_ENCHIMCLK 0x00080000
+#define TX3912_CLK_CTRL_ENVIDCLK 0x00040000
+#define TX3912_CLK_CTRL_ENMBUSCLK 0x00020000
+#define TX3912_CLK_CTRL_ENSPICLK 0x00010000
+#define TX3912_CLK_CTRL_ENTIMERCLK 0x00008000
+#define TX3912_CLK_CTRL_ENFASTTIMERCLK 0x00004000
+#define TX3912_CLK_CTRL_SIBMCLKDIR 0x00002000
+#define TX3912_CLK_CTRL_RESERVED 0x00001000
+#define TX3912_CLK_CTRL_ENSIBMCLK 0x00000800
+#define TX3912_CLK_CTRL_SIBMCLKDIV_MASK 0x00000700
+#define TX3912_CLK_CTRL_SIBMCLKDIV_SHIFT 8
+#define TX3912_CLK_CTRL_CSERSEL 0x00000080
+#define TX3912_CLK_CTRL_CSERDIV_MASK 0x00000070
+#define TX3912_CLK_CTRL_CSERDIV_SHIFT 4
+#define TX3912_CLK_CTRL_ENCSERCLK 0x00000008
+#define TX3912_CLK_CTRL_ENIRCLK 0x00000004
+#define TX3912_CLK_CTRL_ENUARTACLK 0x00000002
+#define TX3912_CLK_CTRL_ENUARTBCLK 0x00000001
+
+
+
+
+/******************************************************************************
+*
+* 07 CHI module
+*
+******************************************************************************/
+
+#define CHIControl REG_AT(0x1D8)
+#define CHIPointerEnable REG_AT(0x1DC)
+#define CHIReceivePtrA REG_AT(0x1E0)
+#define CHIReceivePtrB REG_AT(0x1E4)
+#define CHITransmitPtrA REG_AT(0x1E8)
+#define CHITransmitPtrB REG_AT(0x1EC)
+#define CHISize REG_AT(0x1F0)
+#define CHIReceiveStart REG_AT(0x1F4)
+#define CHITransmitStart REG_AT(0x1F8)
+#define CHIHoldingReg REG_AT(0x1FC)
+
+/* CHI Control Register */
+/* <incomplete!> */
+#define CHI_RXEN BIT(2)
+#define CHI_TXEN BIT(1)
+#define CHI_ENCHI BIT(0)
+
+/******************************************************************************
+*
+* 08 Interrupt module
+*
+******************************************************************************/
+
+/* Register locations */
+
+#define IntStatus1 REG_AT(0x100)
+#define IntStatus2 REG_AT(0x104)
+#define IntStatus3 REG_AT(0x108)
+#define IntStatus4 REG_AT(0x10c)
+#define IntStatus5 REG_AT(0x110)
+#define IntStatus6 REG_AT(0x114)
+
+#define IntClear1 REG_AT(0x100)
+#define IntClear2 REG_AT(0x104)
+#define IntClear3 REG_AT(0x108)
+#define IntClear4 REG_AT(0x10c)
+#define IntClear5 REG_AT(0x110)
+#define IntClear6 REG_AT(0x114)
+
+#define IntEnable1 REG_AT(0x118)
+#define IntEnable2 REG_AT(0x11c)
+#define IntEnable3 REG_AT(0x120)
+#define IntEnable4 REG_AT(0x124)
+#define IntEnable5 REG_AT(0x128)
+#define IntEnable6 REG_AT(0x12c)
+
+/* Interrupt Status Register 1 at offset 100 */
+#define INT1_LCDINT BIT(31)
+#define INT1_DFINT BIT(30)
+#define INT1_CHIDMAHALF BIT(29)
+#define INT1_CHIDMAFULL BIT(28)
+#define INT1_CHIDMACNTINT BIT(27)
+#define INT1_CHIRXAINT BIT(26)
+#define INT1_CHIRXBINT BIT(25)
+#define INT1_CHIACTINT BIT(24)
+#define INT1_CHIERRINT BIT(23)
+#define INT1_SND0_5INT BIT(22)
+#define INT1_SND1_0INT BIT(21)
+#define INT1_TEL0_5INT BIT(20)
+#define INT1_TEL1_0INT BIT(19)
+#define INT1_SNDDMACNTINT BIT(18)
+#define INT1_TELDMACNTINT BIT(17)
+#define INT1_LSNDCLIPINT BIT(16)
+#define INT1_RSNDCLIPINT BIT(15)
+#define INT1_VALSNDPOSINT BIT(14)
+#define INT1_VALSNDNEGINT BIT(13)
+#define INT1_VALTELPOSINT BIT(12)
+#define INT1_VALTELNEGINT BIT(11)
+#define INT1_SNDININT BIT(10)
+#define INT1_TELININT BIT(9)
+#define INT1_SIBSF0INT BIT(8)
+#define INT1_SIBSF1INT BIT(7)
+#define INT1_SIBIRQPOSINT BIT(6)
+#define INT1_SIBIRQNEGINT BIT(5)
+
+/* Interrupt Status Register 2 at offset 104 */
+#define INT2_UARTARXINT BIT(31)
+#define INT2_UARTARXOVERRUN BIT(30)
+#define INT2_UARTAFRAMEINT BIT(29)
+#define INT2_UARTABREAKINT BIT(28)
+#define INT2_UARTATXINT BIT(26)
+#define INT2_UARTATXOVERRUN BIT(25)
+#define INT2_UARTAEMPTY BIT(24)
+
+#define INT2_UARTBRXINT BIT(21)
+#define INT2_UARTBRXOVERRUN BIT(20)
+#define INT2_UARTBFRAMEINT BIT(29)
+#define INT2_UARTBBREAKINT BIT(18)
+#define INT2_UARTBTXINT BIT(16)
+#define INT2_UARTBTXOVERRUN BIT(15)
+#define INT2_UARTBEMPTY BIT(14)
+
+#define INT2_UARTA_RX (BIT(31) | BIT(30) | BIT(29) | BIT(28) | BIT(27))
+#define INT2_UARTA_TX (BIT(26) | BIT(25) | BIT(24))
+#define INT2_UARTA_DMA (BIT(23) | BIT(22))
+
+#define INT2_UARTB_RX (BIT(21) | BIT(20) | BIT(19) | BIT(18) | BIT(17))
+#define INT2_UARTB_TX (BIT(16) | BIT(15) | BIT(14))
+#define INT2_UARTB_DMA (BIT(13) | BIT(12))
+
+/* Interrupt Status Register 5 */
+#define INT5_RTCINT BIT(31)
+#define INT5_ALARMINT BIT(30)
+#define INT5_PERIODICINT BIT(29)
+#define INT5_POSPWRINT BIT(27)
+#define INT5_NEGPWRINT BIT(26)
+#define INT5_POSPWROKINT BIT(25)
+#define INT5_NEGPWROKINT BIT(24)
+#define INT5_POSONBUTINT BIT(23)
+#define INT5_NEGONBUTINT BIT(22)
+#define INT5_SPIAVAILINT BIT(21) /* 0x0020 0000 */
+#define INT5_SPIERRINT BIT(20) /* 0x0010 0000 */
+#define INT5_SPIRCVINT BIT(19) /* 0x0008 0000 */
+#define INT5_SPIEMPTYINT BIT(18) /* 0x0004 0000 */
+#define INT5_IOPOSINT6 BIT(13)
+#define INT5_IOPOSINT5 BIT(12)
+#define INT5_IOPOSINT4 BIT(11)
+#define INT5_IOPOSINT3 BIT(10)
+#define INT5_IOPOSINT2 BIT(9)
+#define INT5_IOPOSINT1 BIT(8)
+#define INT5_IOPOSINT0 BIT(7)
+#define INT5_IONEGINT6 BIT(6)
+#define INT5_IONEGINT5 BIT(5)
+#define INT5_IONEGINT4 BIT(4)
+#define INT5_IONEGINT3 BIT(3)
+#define INT5_IONEGINT2 BIT(2)
+#define INT5_IONEGINT1 BIT(1)
+#define INT5_IONEGINT0 BIT(0)
+
+#define INT5_IONEGINT_SHIFT 0
+#define INT5_IONEGINT_MASK (0x7F<<INT5_IONEGINT_SHIFT)
+#define INT5_IOPOSINT_SHIFT 7
+#define INT5_IOPOSINT_MASK (0x7F<<INT5_IOPOSINT_SHIFT)
+
+/* Interrupt Status Register 6 */
+#define INT6_IRQHIGH BIT(31)
+#define INT6_IRQLOW BIT(30)
+#define INT6_INTVECT (BIT(5) | BIT(4) | BIT(3) | BIT(2))
+
+
+/* Interrupt Enable Register 6 */
+#define INT6_GLOBALEN BIT(18)
+#define INT6_PWROKINT BIT(15)
+#define INT6_ALARMINT BIT(14)
+#define INT6_PERIODICINT BIT(13)
+#define INT6_MBUSINT BIT(12)
+#define INT6_UARTARXINT BIT(11)
+#define INT6_UARTBRXINT BIT(10)
+#define INT6_MFIOPOSINT1619 BIT(9)
+#define INT6_IOPOSINT56 BIT(8)
+#define INT6_MFIONEGINT1619 BIT(7)
+#define INT6_IONEGINT56 BIT(6)
+#define INT6_MBUSDMAFULLINT BIT(5)
+#define INT6_SNDDMACNTINT BIT(4)
+#define INT6_TELDMACNTINT BIT(3)
+#define INT6_CHIDMACNTINT BIT(2)
+#define INT6_IOPOSNEGINT0 BIT(1)
+
+/******************************************************************************
+*
+* 09 GPIO and MFIO modules
+*
+******************************************************************************/
+
+#define IOControl REG_AT(0x180)
+#define MFIOOutput REG_AT(0x184)
+#define MFIODirection REG_AT(0x188)
+#define MFIOInput REG_AT(0x18c)
+#define MFIOSelect REG_AT(0x190)
+#define IOPowerDown REG_AT(0x194)
+#define MFIOPowerDown REG_AT(0x198)
+
+#define IODIN_MASK 0x0000007f
+#define IODIN_SHIFT 0
+#define IODOUT_MASK 0x00007f00
+#define IODOUT_SHIFT 8
+#define IODIREC_MASK 0x007f0000
+#define IODIREC_SHIFT 16
+#define IODEBSEL_MASK 0x7f000000
+#define IODEBSEL_SHIFT 24
+
+/******************************************************************************
+*
+* 10 IR module
+*
+******************************************************************************/
+
+#define IRControl1 REG_AT(0x0a0)
+#define IRControl2 REG_AT(0x0a4)
+
+/* IR Control 1 Register */
+#define IR_CARDRET BIT(24)
+#define IR_BAUDVAL_MASK 0x00ff0000
+#define IR_BAUDVAL_SHIFT 16
+#define IR_TESTIR BIT(4)
+#define IR_DTINVERT BIT(3)
+#define IR_RXPWR BIT(2)
+#define IR_ENSTATE BIT(1)
+#define IR_ENCONSM BIT(0)
+
+/* IR Control 2 Register */
+#define IR_PER_MASK 0xff000000
+#define IR_PER_SHIFT 24
+#define IR_ONTIME_MASK 0x00ff0000
+#define IR_ONTIME_SHIFT 16
+#define IR_DELAYVAL_MASK 0x0000ff00
+#define IR_DELAYVAL_SHIFT 8
+#define IR_WAITVAL_MASK 0x000000ff
+#define IR_WAITVAL_SHIFT 0
+
+/******************************************************************************
+*
+* 11 Magicbus Module
+*
+******************************************************************************/
+
+#define MbusCntrl1 REG_AT(0x0e0)
+#define MbusCntrl2 REG_AT(0x0e4)
+#define MbusDMACntrl1 REG_AT(0x0e8)
+#define MbusDMACntrl2 REG_AT(0x0ec)
+#define MbusDMACount REG_AT(0x0f0)
+#define MbusTxReg REG_AT(0x0f4)
+#define MbusRxReg REG_AT(0x0f8)
+
+#define MBUS_CLKPOL BIT(4)
+#define MBUS_SLAVE BIT(3)
+#define MBUS_FSLAVE BIT(2)
+#define MBUS_LONG BIT(1)
+#define MBUS_ENMBUS BIT(0)
+
+/******************************************************************************
+*
+* 12 Power module
+*
+******************************************************************************/
+
+#define PowerControl REG_AT(0x1C4)
+
+#define PWR_ONBUTN BIT(31)
+#define PWR_PWRINT BIT(30)
+#define PWR_PWROK BIT(29)
+#define PWR_VIDRF_MASK (BIT(28) | BIT(27))
+#define PWR_VIDRF_SHIFT 27
+#define PWR_SLOWBUS BIT(26)
+#define PWR_DIVMOD BIT(25)
+#define PWR_STPTIMERVAL_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12))
+#define PWR_STPTIMERVAL_SHIFT 12
+#define PWR_ENSTPTIMER BIT(11)
+#define PWR_ENFORCESHUTDWN BIT(10)
+#define PWR_FORCESHUTDWN BIT(9)
+#define PWR_FORCESHUTDWNOCC BIT(8)
+#define PWR_SELC2MS BIT(7)
+#define PWR_BPDBVCC3 BIT(5)
+#define PWR_STOPCPU BIT(4)
+#define PWR_DBNCONBUTN BIT(3)
+#define PWR_COLDSTART BIT(2)
+#define PWR_PWRCS BIT(1)
+#define PWR_VCCON BIT(0)
+
+/******************************************************************************
+*
+* 13 SIB (Serial Interconnect Bus) Module
+*
+******************************************************************************/
+
+/* Register locations */
+#define SIBSize REG_AT(0x060)
+#define SIBSoundRXStart REG_AT(0x064)
+#define SIBSoundTXStart REG_AT(0x068)
+#define SIBTelecomRXStart REG_AT(0x06C)
+#define SIBTelecomTXStart REG_AT(0x070)
+#define SIBControl REG_AT(0x074)
+#define SIBSoundTXRXHolding REG_AT(0x078)
+#define SIBTelecomTXRXHolding REG_AT(0x07C)
+#define SIBSubFrame0Control REG_AT(0x080)
+#define SIBSubFrame1Control REG_AT(0x084)
+#define SIBSubFrame0Status REG_AT(0x088)
+#define SIBSubFrame1Status REG_AT(0x08C)
+#define SIBDMAControl REG_AT(0x090)
+
+/* SIB Size Register */
+#define SIB_SNDSIZE_MASK 0x3ffc0000
+#define SIB_SNDSIZE_SHIFT 18
+#define SIB_TELSIZE_MASK 0x00003ffc
+#define SIB_TELSIZE_SHIFT 2
+
+/* SIB Control Register */
+#define SIB_SIBIRQ BIT(31)
+#define SIB_ENCNTTEST BIT(30)
+#define SIB_ENDMATEST BIT(29)
+#define SIB_SNDMONO BIT(28)
+#define SIB_RMONOSNDIN BIT(27)
+#define SIB_SIBSCLKDIV_MASK (BIT(26) | BIT(25) | BIT(24))
+#define SIB_SIBSCLKDIV_SHIFT 24
+#define SIB_TEL16 BIT(23)
+#define SIB_TELFSDIV_MASK 0x007f0000
+#define SIB_TELFSDIV_SHIFT 16
+#define SIB_SND16 BIT(15)
+#define SIB_SNDFSDIV_MASK 0x00007f00
+#define SIB_SNDFSDIV_SHIFT 8
+#define SIB_SELTELSF1 BIT(7)
+#define SIB_SELSNDSF1 BIT(6)
+#define SIB_ENTEL BIT(5)
+#define SIB_ENSND BIT(4)
+#define SIB_SIBLOOP BIT(3)
+#define SIB_ENSF1 BIT(2)
+#define SIB_ENSF0 BIT(1)
+#define SIB_ENSIB BIT(0)
+
+/* SIB Frame Format (SIBSubFrame0Status and SIBSubFrame1Status) */
+#define SIB_REGISTER_EXT BIT(31) /* Must be zero */
+#define SIB_ADDRESS_MASK 0x78000000
+#define SIB_ADDRESS_SHIFT 27
+#define SIB_WRITE BIT(26)
+#define SIB_AUD_VALID BIT(17)
+#define SIB_TEL_VALID BIT(16)
+#define SIB_DATA_MASK 0x00ff
+#define SIB_DATA_SHIFT 0
+
+/* SIB DMA Control Register */
+#define SIB_SNDBUFF1TIME BIT(31)
+#define SIB_SNDDMALOOP BIT(30)
+#define SIB_SNDDMAPTR_MASK 0x3ffc0000
+#define SIB_SNDDMAPTR_SHIFT 18
+#define SIB_ENDMARXSND BIT(17)
+#define SIB_ENDMATXSND BIT(16)
+#define SIB_TELBUFF1TIME BIT(15)
+#define SIB_TELDMALOOP BIT(14)
+#define SIB_TELDMAPTR_MASK 0x00003ffc
+#define SIB_TELDMAPTR_SHIFT 2
+#define SIB_ENDMARXTEL BIT(1)
+#define SIB_ENDMATXTEL BIT(0)
+
+/******************************************************************************
+*
+* 14 SPI module
+*
+******************************************************************************/
+
+#define SPIControl REG_AT(0x160)
+#define SPITransmit REG_AT(0x164)
+#define SPIReceive REG_AT(0x164)
+
+#define SPI_SPION BIT(17)
+#define SPI_EMPTY BIT(16)
+#define SPI_DELAYVAL_MASK (BIT(12) | BIT(13) | BIT(14) | BIT(15))
+#define SPI_DELAYVAL_SHIFT 12
+#define SPI_BAUDRATE_MASK (BIT(8) | BIT(9) | BIT(10) | BIT(11))
+#define SPI_BAUDRATE_SHIFT 8
+#define SPI_PHAPOL BIT(5)
+#define SPI_CLKPOL BIT(4)
+#define SPI_WORD BIT(2)
+#define SPI_LSB BIT(1)
+#define SPI_ENSPI BIT(0)
+
+/******************************************************************************
+*
+* 15 Timer module
+*
+******************************************************************************/
+
+#define RTChigh REG_AT(0x140)
+#define RTClow REG_AT(0x144)
+#define RTCalarmHigh REG_AT(0x148)
+#define RTCalarmLow REG_AT(0x14c)
+#define RTCtimerControl REG_AT(0x150)
+#define RTCperiodTimer REG_AT(0x154)
+
+/* RTC Timer Control */
+#define TIM_FREEZEPRE BIT(7)
+#define TIM_FREEZERTC BIT(6)
+#define TIM_FREEZETIMER BIT(5)
+#define TIM_ENPERTIMER BIT(4)
+#define TIM_RTCCLEAR BIT(3)
+
+#define RTC_HIGHMASK (0xFF)
+
+/* RTC Periodic Timer */
+#define TIM_PERCNT 0xFFFF0000
+#define TIM_PERVAL 0x0000FFFF
+
+/* For a system clock frequency of 36.864MHz, the timer counts at one tick
+ every 868nS (ie CLK/32). Therefore 11520 counts gives a 10mS interval
+ */
+#define PER_TIMER_COUNT (1152000/HZ)
+
+/*
+ ***********************************************************************
+ * *
+ * 15 UART Module *
+ * *
+ ***********************************************************************
+ */
+#define TX3912_UARTA_BASE (REGISTER_BASE + 0x0b0)
+#define TX3912_UARTB_BASE (REGISTER_BASE + 0x0c8)
+
+/*
+ * TX3912 UART register offsets
+ */
+#define TX3912_UART_CTRL1 0x00
+#define TX3912_UART_CTRL2 0x04
+#define TX3912_UART_DMA_CTRL1 0x08
+#define TX3912_UART_DMA_CTRL2 0x0c
+#define TX3912_UART_DMA_CNT 0x10
+#define TX3912_UART_DATA 0x14
+
+#define UartA_Ctrl1 REG_AT(0x0b0)
+#define UartA_Data REG_AT(0x0c4)
+
+/*
+ * Defines for UART Control Register 1
+ */
+#define TX3912_UART_CTRL1_UARTON 0x80000000
+#define UART_TX_EMPTY BIT(30)
+#define UART_PRX_HOLD_FULL BIT(29)
+#define UART_RX_HOLD_FULL BIT(28)
+#define UART_EN_DMA_RX BIT(15)
+#define UART_EN_DMA_TX BIT(14)
+#define UART_BREAK_HALT BIT(12)
+#define UART_DMA_LOOP BIT(10)
+#define UART_PULSE_THREE BIT(9)
+#define UART_PULSE_SIX BIT(8)
+#define UART_DT_INVERT BIT(7)
+#define UART_DIS_TXD BIT(6)
+#define UART_LOOPBACK BIT(4)
+#define TX3912_UART_CTRL1_ENUART 0x00000001
+
+#define SER_SEVEN_BIT BIT(3)
+#define SER_EIGHT_BIT 0
+#define SER_EVEN_PARITY (BIT(2) | BIT(1))
+#define SER_ODD_PARITY BIT(1)
+#define SER_NO_PARITY 0
+#define SER_TWO_STOP BIT(5)
+#define SER_ONE_STOP 0
+
+/*
+ * Defines for UART Control Register 2
+ *
+ * 3.6864MHz
+ * divisors = ----------- - 1
+ * (baud * 16)
+ */
+#define TX3912_UART_CTRL2_B230400 0x000 /* 0 */
+#define TX3912_UART_CTRL2_B115200 0x001 /* 1 */
+#define TX3912_UART_CTRL2_B76800 0x002 /* 2 */
+#define TX3912_UART_CTRL2_B57600 0x003 /* 3 */
+#define TX3912_UART_CTRL2_B38400 0x005 /* 5 */
+#define TX3912_UART_CTRL2_B19200 0x00b /* 11 */
+#define TX3912_UART_CTRL2_B9600 0x016 /* 22 */
+#define TX3912_UART_CTRL2_B4800 0x02f /* 47 */
+#define TX3912_UART_CTRL2_B2400 0x05f /* 95 */
+#define TX3912_UART_CTRL2_B1200 0x0bf /* 191 */
+#define TX3912_UART_CTRL2_B600 0x17f /* 383 */
+#define TX3912_UART_CTRL2_B300 0x2ff /* 767 */
+
+#endif /* __TX3912_H__ */
diff --git a/include/asm-mips/unaligned.h b/include/asm-mips/unaligned.h
index 622e1e977400..2b0174925f22 100644
--- a/include/asm-mips/unaligned.h
+++ b/include/asm-mips/unaligned.h
@@ -1,10 +1,10 @@
-/* $Id$
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_UNALIGNED_H
#define _ASM_UNALIGNED_H
@@ -13,133 +13,147 @@ extern void __get_unaligned_bad_length(void);
extern void __put_unaligned_bad_length(void);
/*
- * Load quad unaligned.
+ * Load double unaligned.
+ *
+ * This could have been implemented in plain C like IA64 but egcs 1.0.3a
+ * inflates this to 23 instructions ...
*/
-extern __inline__ unsigned long ldq_u(const unsigned long long * __addr)
+extern inline unsigned long long __ldq_u(const unsigned long long * __addr)
{
unsigned long long __res;
- __asm__("uld\t%0,(%1)"
- :"=&r" (__res)
- :"r" (__addr));
+ __asm__("ulw\t%0, %1\n\t"
+ "ulw\t%D0, 4+%1"
+ : "=&r" (__res)
+ : "m" (*__addr));
return __res;
}
/*
- * Load long unaligned.
+ * Load word unaligned.
*/
-extern __inline__ unsigned long ldl_u(const unsigned int * __addr)
+extern inline unsigned long __ldl_u(const unsigned int * __addr)
{
unsigned long __res;
- __asm__("ulw\t%0,(%1)"
- :"=&r" (__res)
- :"r" (__addr));
+ __asm__("ulw\t%0,%1"
+ : "=&r" (__res)
+ : "m" (*__addr));
return __res;
}
/*
- * Load word unaligned.
+ * Load halfword unaligned.
*/
-extern __inline__ unsigned long ldw_u(const unsigned short * __addr)
+extern inline unsigned long __ldw_u(const unsigned short * __addr)
{
unsigned long __res;
- __asm__("ulh\t%0,(%1)"
- :"=&r" (__res)
- :"r" (__addr));
+ __asm__("ulh\t%0,%1"
+ : "=&r" (__res)
+ : "m" (*__addr));
return __res;
}
/*
- * Store quad ununaligned.
+ * Store doubleword ununaligned.
*/
-extern __inline__ void stq_u(unsigned long __val, unsigned long long * __addr)
+extern inline void __stq_u(unsigned long __val, unsigned long long * __addr)
{
- __asm__ __volatile__(
- "usd\t%0,(%1)"
- : /* No results */
- :"r" (__val),
- "r" (__addr));
+ __asm__("usw\t%1, %0\n\t"
+ "usw\t%D1, 4+%0"
+ : "=m" (*__addr)
+ : "r" (__val));
}
/*
* Store long ununaligned.
*/
-extern __inline__ void stl_u(unsigned long __val, unsigned int * __addr)
+extern inline void __stl_u(unsigned long __val, unsigned int * __addr)
{
- __asm__ __volatile__(
- "usw\t%0,(%1)"
- : /* No results */
- :"r" (__val),
- "r" (__addr));
+ __asm__("usw\t%1, %0"
+ : "=m" (*__addr)
+ : "r" (__val));
}
/*
* Store word ununaligned.
*/
-extern __inline__ void stw_u(unsigned long __val, unsigned short * __addr)
+extern inline void __stw_u(unsigned long __val, unsigned short * __addr)
{
- __asm__ __volatile__(
- "ush\t%0,(%1)"
- : /* No results */
- :"r" (__val),
- "r" (__addr));
+ __asm__("ush\t%1, %0"
+ : "=m" (*__addr)
+ : "r" (__val));
}
-extern inline unsigned long __get_unaligned(const void *ptr, size_t size)
-{
- unsigned long val;
- switch (size) {
- case 1:
- val = *(const unsigned char *)ptr;
- break;
- case 2:
- val = ldw_u((const unsigned short *)ptr);
- break;
- case 4:
- val = ldl_u((const unsigned int *)ptr);
- break;
- case 8:
- val = ldq_u((const unsigned long long *)ptr);
- break;
- default:
- __get_unaligned_bad_length();
- break;
- }
- return val;
-}
-
-extern inline void __put_unaligned(unsigned long val, void *ptr, size_t size)
-{
- switch (size) {
- case 1:
- *(unsigned char *)ptr = (val);
- break;
- case 2:
- stw_u(val, (unsigned short *)ptr);
- break;
- case 4:
- stl_u(val, (unsigned int *)ptr);
- break;
- case 8:
- stq_u(val, (unsigned long long *)ptr);
- break;
- default:
- __put_unaligned_bad_length();
- break;
- }
-}
+/*
+ * get_unaligned - get value from possibly mis-aligned location
+ * @ptr: pointer to value
+ *
+ * This macro should be used for accessing values larger in size than
+ * single bytes at locations that are expected to be improperly aligned,
+ * e.g. retrieving a u16 value from a location not u16-aligned.
+ *
+ * Note that unaligned accesses can be very expensive on some architectures.
+ */
+#define get_unaligned(ptr) \
+({ \
+ __typeof__(*(ptr)) __val; \
+ \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __val = *(const unsigned char *)ptr; \
+ break; \
+ case 2: \
+ __val = __ldw_u((const unsigned short *)ptr); \
+ break; \
+ case 4: \
+ __val = __ldl_u((const unsigned int *)ptr); \
+ break; \
+ case 8: \
+ __val = __ldq_u((const unsigned long long *)ptr); \
+ break; \
+ default: \
+ __get_unaligned_bad_length(); \
+ break; \
+ } \
+ \
+ __val; \
+})
-/*
- * The main single-value unaligned transfer routines.
+/*
+ * put_unaligned - put value to a possibly mis-aligned location
+ * @val: value to place
+ * @ptr: pointer to location
+ *
+ * This macro should be used for placing values larger in size than
+ * single bytes at locations that are expected to be improperly aligned,
+ * e.g. writing a u16 value to a location not u16-aligned.
+ *
+ * Note that unaligned accesses can be very expensive on some architectures.
*/
-#define get_unaligned(ptr) \
- ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr))))
-#define put_unaligned(x,ptr) \
- __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
+#define put_unaligned(val,ptr) \
+do { \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ *(unsigned char *)(ptr) = (val); \
+ break; \
+ case 2: \
+ __stw_u(val, (unsigned short *)(ptr)); \
+ break; \
+ case 4: \
+ __stl_u(val, (unsigned int *)(ptr)); \
+ break; \
+ case 8: \
+ __stq_u(val, (unsigned long long *)(ptr)); \
+ break; \
+ default: \
+ __put_unaligned_bad_length(); \
+ break; \
+ } \
+} while(0)
#endif /* _ASM_UNALIGNED_H */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 46986ccfa2d9..97d031618666 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -1,990 +1,16 @@
-/* $Id: unistd.h,v 1.20 2000/02/18 00:24:48 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
*
* Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto
* the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A
*/
-#ifndef __ASM_MIPS_UNISTD_H
-#define __ASM_MIPS_UNISTD_H
-
-/*
- * The syscalls 0 - 3999 are reserved for a down to the root syscall
- * compatibility with RISC/os and IRIX. We'll see how to deal with the
- * various "real" BSD variants like Ultrix, NetBSD ...
- */
-
-/*
- * SVR4 syscalls are in the range from 1 to 999
- */
-#define __NR_SVR4 0
-#define __NR_SVR4_syscall (__NR_SVR4 + 0)
-#define __NR_SVR4_exit (__NR_SVR4 + 1)
-#define __NR_SVR4_fork (__NR_SVR4 + 2)
-#define __NR_SVR4_read (__NR_SVR4 + 3)
-#define __NR_SVR4_write (__NR_SVR4 + 4)
-#define __NR_SVR4_open (__NR_SVR4 + 5)
-#define __NR_SVR4_close (__NR_SVR4 + 6)
-#define __NR_SVR4_wait (__NR_SVR4 + 7)
-#define __NR_SVR4_creat (__NR_SVR4 + 8)
-#define __NR_SVR4_link (__NR_SVR4 + 9)
-#define __NR_SVR4_unlink (__NR_SVR4 + 10)
-#define __NR_SVR4_exec (__NR_SVR4 + 11)
-#define __NR_SVR4_chdir (__NR_SVR4 + 12)
-#define __NR_SVR4_gtime (__NR_SVR4 + 13)
-#define __NR_SVR4_mknod (__NR_SVR4 + 14)
-#define __NR_SVR4_chmod (__NR_SVR4 + 15)
-#define __NR_SVR4_chown (__NR_SVR4 + 16)
-#define __NR_SVR4_sbreak (__NR_SVR4 + 17)
-#define __NR_SVR4_stat (__NR_SVR4 + 18)
-#define __NR_SVR4_lseek (__NR_SVR4 + 19)
-#define __NR_SVR4_getpid (__NR_SVR4 + 20)
-#define __NR_SVR4_mount (__NR_SVR4 + 21)
-#define __NR_SVR4_umount (__NR_SVR4 + 22)
-#define __NR_SVR4_setuid (__NR_SVR4 + 23)
-#define __NR_SVR4_getuid (__NR_SVR4 + 24)
-#define __NR_SVR4_stime (__NR_SVR4 + 25)
-#define __NR_SVR4_ptrace (__NR_SVR4 + 26)
-#define __NR_SVR4_alarm (__NR_SVR4 + 27)
-#define __NR_SVR4_fstat (__NR_SVR4 + 28)
-#define __NR_SVR4_pause (__NR_SVR4 + 29)
-#define __NR_SVR4_utime (__NR_SVR4 + 30)
-#define __NR_SVR4_stty (__NR_SVR4 + 31)
-#define __NR_SVR4_gtty (__NR_SVR4 + 32)
-#define __NR_SVR4_access (__NR_SVR4 + 33)
-#define __NR_SVR4_nice (__NR_SVR4 + 34)
-#define __NR_SVR4_statfs (__NR_SVR4 + 35)
-#define __NR_SVR4_sync (__NR_SVR4 + 36)
-#define __NR_SVR4_kill (__NR_SVR4 + 37)
-#define __NR_SVR4_fstatfs (__NR_SVR4 + 38)
-#define __NR_SVR4_setpgrp (__NR_SVR4 + 39)
-#define __NR_SVR4_cxenix (__NR_SVR4 + 40)
-#define __NR_SVR4_dup (__NR_SVR4 + 41)
-#define __NR_SVR4_pipe (__NR_SVR4 + 42)
-#define __NR_SVR4_times (__NR_SVR4 + 43)
-#define __NR_SVR4_profil (__NR_SVR4 + 44)
-#define __NR_SVR4_plock (__NR_SVR4 + 45)
-#define __NR_SVR4_setgid (__NR_SVR4 + 46)
-#define __NR_SVR4_getgid (__NR_SVR4 + 47)
-#define __NR_SVR4_sig (__NR_SVR4 + 48)
-#define __NR_SVR4_msgsys (__NR_SVR4 + 49)
-#define __NR_SVR4_sysmips (__NR_SVR4 + 50)
-#define __NR_SVR4_sysacct (__NR_SVR4 + 51)
-#define __NR_SVR4_shmsys (__NR_SVR4 + 52)
-#define __NR_SVR4_semsys (__NR_SVR4 + 53)
-#define __NR_SVR4_ioctl (__NR_SVR4 + 54)
-#define __NR_SVR4_uadmin (__NR_SVR4 + 55)
-#define __NR_SVR4_exch (__NR_SVR4 + 56)
-#define __NR_SVR4_utssys (__NR_SVR4 + 57)
-#define __NR_SVR4_fsync (__NR_SVR4 + 58)
-#define __NR_SVR4_exece (__NR_SVR4 + 59)
-#define __NR_SVR4_umask (__NR_SVR4 + 60)
-#define __NR_SVR4_chroot (__NR_SVR4 + 61)
-#define __NR_SVR4_fcntl (__NR_SVR4 + 62)
-#define __NR_SVR4_ulimit (__NR_SVR4 + 63)
-#define __NR_SVR4_reserved1 (__NR_SVR4 + 64)
-#define __NR_SVR4_reserved2 (__NR_SVR4 + 65)
-#define __NR_SVR4_reserved3 (__NR_SVR4 + 66)
-#define __NR_SVR4_reserved4 (__NR_SVR4 + 67)
-#define __NR_SVR4_reserved5 (__NR_SVR4 + 68)
-#define __NR_SVR4_reserved6 (__NR_SVR4 + 69)
-#define __NR_SVR4_advfs (__NR_SVR4 + 70)
-#define __NR_SVR4_unadvfs (__NR_SVR4 + 71)
-#define __NR_SVR4_unused1 (__NR_SVR4 + 72)
-#define __NR_SVR4_unused2 (__NR_SVR4 + 73)
-#define __NR_SVR4_rfstart (__NR_SVR4 + 74)
-#define __NR_SVR4_unused3 (__NR_SVR4 + 75)
-#define __NR_SVR4_rdebug (__NR_SVR4 + 76)
-#define __NR_SVR4_rfstop (__NR_SVR4 + 77)
-#define __NR_SVR4_rfsys (__NR_SVR4 + 78)
-#define __NR_SVR4_rmdir (__NR_SVR4 + 79)
-#define __NR_SVR4_mkdir (__NR_SVR4 + 80)
-#define __NR_SVR4_getdents (__NR_SVR4 + 81)
-#define __NR_SVR4_libattach (__NR_SVR4 + 82)
-#define __NR_SVR4_libdetach (__NR_SVR4 + 83)
-#define __NR_SVR4_sysfs (__NR_SVR4 + 84)
-#define __NR_SVR4_getmsg (__NR_SVR4 + 85)
-#define __NR_SVR4_putmsg (__NR_SVR4 + 86)
-#define __NR_SVR4_poll (__NR_SVR4 + 87)
-#define __NR_SVR4_lstat (__NR_SVR4 + 88)
-#define __NR_SVR4_symlink (__NR_SVR4 + 89)
-#define __NR_SVR4_readlink (__NR_SVR4 + 90)
-#define __NR_SVR4_setgroups (__NR_SVR4 + 91)
-#define __NR_SVR4_getgroups (__NR_SVR4 + 92)
-#define __NR_SVR4_fchmod (__NR_SVR4 + 93)
-#define __NR_SVR4_fchown (__NR_SVR4 + 94)
-#define __NR_SVR4_sigprocmask (__NR_SVR4 + 95)
-#define __NR_SVR4_sigsuspend (__NR_SVR4 + 96)
-#define __NR_SVR4_sigaltstack (__NR_SVR4 + 97)
-#define __NR_SVR4_sigaction (__NR_SVR4 + 98)
-#define __NR_SVR4_sigpending (__NR_SVR4 + 99)
-#define __NR_SVR4_setcontext (__NR_SVR4 + 100)
-#define __NR_SVR4_evsys (__NR_SVR4 + 101)
-#define __NR_SVR4_evtrapret (__NR_SVR4 + 102)
-#define __NR_SVR4_statvfs (__NR_SVR4 + 103)
-#define __NR_SVR4_fstatvfs (__NR_SVR4 + 104)
-#define __NR_SVR4_reserved7 (__NR_SVR4 + 105)
-#define __NR_SVR4_nfssys (__NR_SVR4 + 106)
-#define __NR_SVR4_waitid (__NR_SVR4 + 107)
-#define __NR_SVR4_sigsendset (__NR_SVR4 + 108)
-#define __NR_SVR4_hrtsys (__NR_SVR4 + 109)
-#define __NR_SVR4_acancel (__NR_SVR4 + 110)
-#define __NR_SVR4_async (__NR_SVR4 + 111)
-#define __NR_SVR4_priocntlset (__NR_SVR4 + 112)
-#define __NR_SVR4_pathconf (__NR_SVR4 + 113)
-#define __NR_SVR4_mincore (__NR_SVR4 + 114)
-#define __NR_SVR4_mmap (__NR_SVR4 + 115)
-#define __NR_SVR4_mprotect (__NR_SVR4 + 116)
-#define __NR_SVR4_munmap (__NR_SVR4 + 117)
-#define __NR_SVR4_fpathconf (__NR_SVR4 + 118)
-#define __NR_SVR4_vfork (__NR_SVR4 + 119)
-#define __NR_SVR4_fchdir (__NR_SVR4 + 120)
-#define __NR_SVR4_readv (__NR_SVR4 + 121)
-#define __NR_SVR4_writev (__NR_SVR4 + 122)
-#define __NR_SVR4_xstat (__NR_SVR4 + 123)
-#define __NR_SVR4_lxstat (__NR_SVR4 + 124)
-#define __NR_SVR4_fxstat (__NR_SVR4 + 125)
-#define __NR_SVR4_xmknod (__NR_SVR4 + 126)
-#define __NR_SVR4_clocal (__NR_SVR4 + 127)
-#define __NR_SVR4_setrlimit (__NR_SVR4 + 128)
-#define __NR_SVR4_getrlimit (__NR_SVR4 + 129)
-#define __NR_SVR4_lchown (__NR_SVR4 + 130)
-#define __NR_SVR4_memcntl (__NR_SVR4 + 131)
-#define __NR_SVR4_getpmsg (__NR_SVR4 + 132)
-#define __NR_SVR4_putpmsg (__NR_SVR4 + 133)
-#define __NR_SVR4_rename (__NR_SVR4 + 134)
-#define __NR_SVR4_nuname (__NR_SVR4 + 135)
-#define __NR_SVR4_setegid (__NR_SVR4 + 136)
-#define __NR_SVR4_sysconf (__NR_SVR4 + 137)
-#define __NR_SVR4_adjtime (__NR_SVR4 + 138)
-#define __NR_SVR4_sysinfo (__NR_SVR4 + 139)
-#define __NR_SVR4_reserved8 (__NR_SVR4 + 140)
-#define __NR_SVR4_seteuid (__NR_SVR4 + 141)
-#define __NR_SVR4_PYRAMID_statis (__NR_SVR4 + 142)
-#define __NR_SVR4_PYRAMID_tuning (__NR_SVR4 + 143)
-#define __NR_SVR4_PYRAMID_forcerr (__NR_SVR4 + 144)
-#define __NR_SVR4_PYRAMID_mpcntl (__NR_SVR4 + 145)
-#define __NR_SVR4_reserved9 (__NR_SVR4 + 146)
-#define __NR_SVR4_reserved10 (__NR_SVR4 + 147)
-#define __NR_SVR4_reserved11 (__NR_SVR4 + 148)
-#define __NR_SVR4_reserved12 (__NR_SVR4 + 149)
-#define __NR_SVR4_reserved13 (__NR_SVR4 + 150)
-#define __NR_SVR4_reserved14 (__NR_SVR4 + 151)
-#define __NR_SVR4_reserved15 (__NR_SVR4 + 152)
-#define __NR_SVR4_reserved16 (__NR_SVR4 + 153)
-#define __NR_SVR4_reserved17 (__NR_SVR4 + 154)
-#define __NR_SVR4_reserved18 (__NR_SVR4 + 155)
-#define __NR_SVR4_reserved19 (__NR_SVR4 + 156)
-#define __NR_SVR4_reserved20 (__NR_SVR4 + 157)
-#define __NR_SVR4_reserved21 (__NR_SVR4 + 158)
-#define __NR_SVR4_reserved22 (__NR_SVR4 + 159)
-#define __NR_SVR4_reserved23 (__NR_SVR4 + 160)
-#define __NR_SVR4_reserved24 (__NR_SVR4 + 161)
-#define __NR_SVR4_reserved25 (__NR_SVR4 + 162)
-#define __NR_SVR4_reserved26 (__NR_SVR4 + 163)
-#define __NR_SVR4_reserved27 (__NR_SVR4 + 164)
-#define __NR_SVR4_reserved28 (__NR_SVR4 + 165)
-#define __NR_SVR4_reserved29 (__NR_SVR4 + 166)
-#define __NR_SVR4_reserved30 (__NR_SVR4 + 167)
-#define __NR_SVR4_reserved31 (__NR_SVR4 + 168)
-#define __NR_SVR4_reserved32 (__NR_SVR4 + 169)
-#define __NR_SVR4_reserved33 (__NR_SVR4 + 170)
-#define __NR_SVR4_reserved34 (__NR_SVR4 + 171)
-#define __NR_SVR4_reserved35 (__NR_SVR4 + 172)
-#define __NR_SVR4_reserved36 (__NR_SVR4 + 173)
-#define __NR_SVR4_reserved37 (__NR_SVR4 + 174)
-#define __NR_SVR4_reserved38 (__NR_SVR4 + 175)
-#define __NR_SVR4_reserved39 (__NR_SVR4 + 176)
-#define __NR_SVR4_reserved40 (__NR_SVR4 + 177)
-#define __NR_SVR4_reserved41 (__NR_SVR4 + 178)
-#define __NR_SVR4_reserved42 (__NR_SVR4 + 179)
-#define __NR_SVR4_reserved43 (__NR_SVR4 + 180)
-#define __NR_SVR4_reserved44 (__NR_SVR4 + 181)
-#define __NR_SVR4_reserved45 (__NR_SVR4 + 182)
-#define __NR_SVR4_reserved46 (__NR_SVR4 + 183)
-#define __NR_SVR4_reserved47 (__NR_SVR4 + 184)
-#define __NR_SVR4_reserved48 (__NR_SVR4 + 185)
-#define __NR_SVR4_reserved49 (__NR_SVR4 + 186)
-#define __NR_SVR4_reserved50 (__NR_SVR4 + 187)
-#define __NR_SVR4_reserved51 (__NR_SVR4 + 188)
-#define __NR_SVR4_reserved52 (__NR_SVR4 + 189)
-#define __NR_SVR4_reserved53 (__NR_SVR4 + 190)
-#define __NR_SVR4_reserved54 (__NR_SVR4 + 191)
-#define __NR_SVR4_reserved55 (__NR_SVR4 + 192)
-#define __NR_SVR4_reserved56 (__NR_SVR4 + 193)
-#define __NR_SVR4_reserved57 (__NR_SVR4 + 194)
-#define __NR_SVR4_reserved58 (__NR_SVR4 + 195)
-#define __NR_SVR4_reserved59 (__NR_SVR4 + 196)
-#define __NR_SVR4_reserved60 (__NR_SVR4 + 197)
-#define __NR_SVR4_reserved61 (__NR_SVR4 + 198)
-#define __NR_SVR4_reserved62 (__NR_SVR4 + 199)
-#define __NR_SVR4_reserved63 (__NR_SVR4 + 200)
-#define __NR_SVR4_aread (__NR_SVR4 + 201)
-#define __NR_SVR4_awrite (__NR_SVR4 + 202)
-#define __NR_SVR4_listio (__NR_SVR4 + 203)
-#define __NR_SVR4_mips_acancel (__NR_SVR4 + 204)
-#define __NR_SVR4_astatus (__NR_SVR4 + 205)
-#define __NR_SVR4_await (__NR_SVR4 + 206)
-#define __NR_SVR4_areadv (__NR_SVR4 + 207)
-#define __NR_SVR4_awritev (__NR_SVR4 + 208)
-#define __NR_SVR4_MIPS_reserved1 (__NR_SVR4 + 209)
-#define __NR_SVR4_MIPS_reserved2 (__NR_SVR4 + 210)
-#define __NR_SVR4_MIPS_reserved3 (__NR_SVR4 + 211)
-#define __NR_SVR4_MIPS_reserved4 (__NR_SVR4 + 212)
-#define __NR_SVR4_MIPS_reserved5 (__NR_SVR4 + 213)
-#define __NR_SVR4_MIPS_reserved6 (__NR_SVR4 + 214)
-#define __NR_SVR4_MIPS_reserved7 (__NR_SVR4 + 215)
-#define __NR_SVR4_MIPS_reserved8 (__NR_SVR4 + 216)
-#define __NR_SVR4_MIPS_reserved9 (__NR_SVR4 + 217)
-#define __NR_SVR4_MIPS_reserved10 (__NR_SVR4 + 218)
-#define __NR_SVR4_MIPS_reserved11 (__NR_SVR4 + 219)
-#define __NR_SVR4_MIPS_reserved12 (__NR_SVR4 + 220)
-#define __NR_SVR4_CDC_reserved1 (__NR_SVR4 + 221)
-#define __NR_SVR4_CDC_reserved2 (__NR_SVR4 + 222)
-#define __NR_SVR4_CDC_reserved3 (__NR_SVR4 + 223)
-#define __NR_SVR4_CDC_reserved4 (__NR_SVR4 + 224)
-#define __NR_SVR4_CDC_reserved5 (__NR_SVR4 + 225)
-#define __NR_SVR4_CDC_reserved6 (__NR_SVR4 + 226)
-#define __NR_SVR4_CDC_reserved7 (__NR_SVR4 + 227)
-#define __NR_SVR4_CDC_reserved8 (__NR_SVR4 + 228)
-#define __NR_SVR4_CDC_reserved9 (__NR_SVR4 + 229)
-#define __NR_SVR4_CDC_reserved10 (__NR_SVR4 + 230)
-#define __NR_SVR4_CDC_reserved11 (__NR_SVR4 + 231)
-#define __NR_SVR4_CDC_reserved12 (__NR_SVR4 + 232)
-#define __NR_SVR4_CDC_reserved13 (__NR_SVR4 + 233)
-#define __NR_SVR4_CDC_reserved14 (__NR_SVR4 + 234)
-#define __NR_SVR4_CDC_reserved15 (__NR_SVR4 + 235)
-#define __NR_SVR4_CDC_reserved16 (__NR_SVR4 + 236)
-#define __NR_SVR4_CDC_reserved17 (__NR_SVR4 + 237)
-#define __NR_SVR4_CDC_reserved18 (__NR_SVR4 + 238)
-#define __NR_SVR4_CDC_reserved19 (__NR_SVR4 + 239)
-#define __NR_SVR4_CDC_reserved20 (__NR_SVR4 + 240)
-
-/*
- * SYS V syscalls are in the range from 1000 to 1999
- */
-#define __NR_SYSV 1000
-#define __NR_SYSV_syscall (__NR_SYSV + 0)
-#define __NR_SYSV_exit (__NR_SYSV + 1)
-#define __NR_SYSV_fork (__NR_SYSV + 2)
-#define __NR_SYSV_read (__NR_SYSV + 3)
-#define __NR_SYSV_write (__NR_SYSV + 4)
-#define __NR_SYSV_open (__NR_SYSV + 5)
-#define __NR_SYSV_close (__NR_SYSV + 6)
-#define __NR_SYSV_wait (__NR_SYSV + 7)
-#define __NR_SYSV_creat (__NR_SYSV + 8)
-#define __NR_SYSV_link (__NR_SYSV + 9)
-#define __NR_SYSV_unlink (__NR_SYSV + 10)
-#define __NR_SYSV_execv (__NR_SYSV + 11)
-#define __NR_SYSV_chdir (__NR_SYSV + 12)
-#define __NR_SYSV_time (__NR_SYSV + 13)
-#define __NR_SYSV_mknod (__NR_SYSV + 14)
-#define __NR_SYSV_chmod (__NR_SYSV + 15)
-#define __NR_SYSV_chown (__NR_SYSV + 16)
-#define __NR_SYSV_brk (__NR_SYSV + 17)
-#define __NR_SYSV_stat (__NR_SYSV + 18)
-#define __NR_SYSV_lseek (__NR_SYSV + 19)
-#define __NR_SYSV_getpid (__NR_SYSV + 20)
-#define __NR_SYSV_mount (__NR_SYSV + 21)
-#define __NR_SYSV_umount (__NR_SYSV + 22)
-#define __NR_SYSV_setuid (__NR_SYSV + 23)
-#define __NR_SYSV_getuid (__NR_SYSV + 24)
-#define __NR_SYSV_stime (__NR_SYSV + 25)
-#define __NR_SYSV_ptrace (__NR_SYSV + 26)
-#define __NR_SYSV_alarm (__NR_SYSV + 27)
-#define __NR_SYSV_fstat (__NR_SYSV + 28)
-#define __NR_SYSV_pause (__NR_SYSV + 29)
-#define __NR_SYSV_utime (__NR_SYSV + 30)
-#define __NR_SYSV_stty (__NR_SYSV + 31)
-#define __NR_SYSV_gtty (__NR_SYSV + 32)
-#define __NR_SYSV_access (__NR_SYSV + 33)
-#define __NR_SYSV_nice (__NR_SYSV + 34)
-#define __NR_SYSV_statfs (__NR_SYSV + 35)
-#define __NR_SYSV_sync (__NR_SYSV + 36)
-#define __NR_SYSV_kill (__NR_SYSV + 37)
-#define __NR_SYSV_fstatfs (__NR_SYSV + 38)
-#define __NR_SYSV_setpgrp (__NR_SYSV + 39)
-#define __NR_SYSV_syssgi (__NR_SYSV + 40)
-#define __NR_SYSV_dup (__NR_SYSV + 41)
-#define __NR_SYSV_pipe (__NR_SYSV + 42)
-#define __NR_SYSV_times (__NR_SYSV + 43)
-#define __NR_SYSV_profil (__NR_SYSV + 44)
-#define __NR_SYSV_plock (__NR_SYSV + 45)
-#define __NR_SYSV_setgid (__NR_SYSV + 46)
-#define __NR_SYSV_getgid (__NR_SYSV + 47)
-#define __NR_SYSV_sig (__NR_SYSV + 48)
-#define __NR_SYSV_msgsys (__NR_SYSV + 49)
-#define __NR_SYSV_sysmips (__NR_SYSV + 50)
-#define __NR_SYSV_acct (__NR_SYSV + 51)
-#define __NR_SYSV_shmsys (__NR_SYSV + 52)
-#define __NR_SYSV_semsys (__NR_SYSV + 53)
-#define __NR_SYSV_ioctl (__NR_SYSV + 54)
-#define __NR_SYSV_uadmin (__NR_SYSV + 55)
-#define __NR_SYSV_sysmp (__NR_SYSV + 56)
-#define __NR_SYSV_utssys (__NR_SYSV + 57)
-#define __NR_SYSV_USG_reserved1 (__NR_SYSV + 58)
-#define __NR_SYSV_execve (__NR_SYSV + 59)
-#define __NR_SYSV_umask (__NR_SYSV + 60)
-#define __NR_SYSV_chroot (__NR_SYSV + 61)
-#define __NR_SYSV_fcntl (__NR_SYSV + 62)
-#define __NR_SYSV_ulimit (__NR_SYSV + 63)
-#define __NR_SYSV_SAFARI4_reserved1 (__NR_SYSV + 64)
-#define __NR_SYSV_SAFARI4_reserved2 (__NR_SYSV + 65)
-#define __NR_SYSV_SAFARI4_reserved3 (__NR_SYSV + 66)
-#define __NR_SYSV_SAFARI4_reserved4 (__NR_SYSV + 67)
-#define __NR_SYSV_SAFARI4_reserved5 (__NR_SYSV + 68)
-#define __NR_SYSV_SAFARI4_reserved6 (__NR_SYSV + 69)
-#define __NR_SYSV_advfs (__NR_SYSV + 70)
-#define __NR_SYSV_unadvfs (__NR_SYSV + 71)
-#define __NR_SYSV_rmount (__NR_SYSV + 72)
-#define __NR_SYSV_rumount (__NR_SYSV + 73)
-#define __NR_SYSV_rfstart (__NR_SYSV + 74)
-#define __NR_SYSV_getrlimit64 (__NR_SYSV + 75)
-#define __NR_SYSV_setrlimit64 (__NR_SYSV + 76)
-#define __NR_SYSV_nanosleep (__NR_SYSV + 77)
-#define __NR_SYSV_lseek64 (__NR_SYSV + 78)
-#define __NR_SYSV_rmdir (__NR_SYSV + 79)
-#define __NR_SYSV_mkdir (__NR_SYSV + 80)
-#define __NR_SYSV_getdents (__NR_SYSV + 81)
-#define __NR_SYSV_sginap (__NR_SYSV + 82)
-#define __NR_SYSV_sgikopt (__NR_SYSV + 83)
-#define __NR_SYSV_sysfs (__NR_SYSV + 84)
-#define __NR_SYSV_getmsg (__NR_SYSV + 85)
-#define __NR_SYSV_putmsg (__NR_SYSV + 86)
-#define __NR_SYSV_poll (__NR_SYSV + 87)
-#define __NR_SYSV_sigreturn (__NR_SYSV + 88)
-#define __NR_SYSV_accept (__NR_SYSV + 89)
-#define __NR_SYSV_bind (__NR_SYSV + 90)
-#define __NR_SYSV_connect (__NR_SYSV + 91)
-#define __NR_SYSV_gethostid (__NR_SYSV + 92)
-#define __NR_SYSV_getpeername (__NR_SYSV + 93)
-#define __NR_SYSV_getsockname (__NR_SYSV + 94)
-#define __NR_SYSV_getsockopt (__NR_SYSV + 95)
-#define __NR_SYSV_listen (__NR_SYSV + 96)
-#define __NR_SYSV_recv (__NR_SYSV + 97)
-#define __NR_SYSV_recvfrom (__NR_SYSV + 98)
-#define __NR_SYSV_recvmsg (__NR_SYSV + 99)
-#define __NR_SYSV_select (__NR_SYSV + 100)
-#define __NR_SYSV_send (__NR_SYSV + 101)
-#define __NR_SYSV_sendmsg (__NR_SYSV + 102)
-#define __NR_SYSV_sendto (__NR_SYSV + 103)
-#define __NR_SYSV_sethostid (__NR_SYSV + 104)
-#define __NR_SYSV_setsockopt (__NR_SYSV + 105)
-#define __NR_SYSV_shutdown (__NR_SYSV + 106)
-#define __NR_SYSV_socket (__NR_SYSV + 107)
-#define __NR_SYSV_gethostname (__NR_SYSV + 108)
-#define __NR_SYSV_sethostname (__NR_SYSV + 109)
-#define __NR_SYSV_getdomainname (__NR_SYSV + 110)
-#define __NR_SYSV_setdomainname (__NR_SYSV + 111)
-#define __NR_SYSV_truncate (__NR_SYSV + 112)
-#define __NR_SYSV_ftruncate (__NR_SYSV + 113)
-#define __NR_SYSV_rename (__NR_SYSV + 114)
-#define __NR_SYSV_symlink (__NR_SYSV + 115)
-#define __NR_SYSV_readlink (__NR_SYSV + 116)
-#define __NR_SYSV_lstat (__NR_SYSV + 117)
-#define __NR_SYSV_nfsmount (__NR_SYSV + 118)
-#define __NR_SYSV_nfssvc (__NR_SYSV + 119)
-#define __NR_SYSV_getfh (__NR_SYSV + 120)
-#define __NR_SYSV_async_daemon (__NR_SYSV + 121)
-#define __NR_SYSV_exportfs (__NR_SYSV + 122)
-#define __NR_SYSV_setregid (__NR_SYSV + 123)
-#define __NR_SYSV_setreuid (__NR_SYSV + 124)
-#define __NR_SYSV_getitimer (__NR_SYSV + 125)
-#define __NR_SYSV_setitimer (__NR_SYSV + 126)
-#define __NR_SYSV_adjtime (__NR_SYSV + 127)
-#define __NR_SYSV_BSD_getime (__NR_SYSV + 128)
-#define __NR_SYSV_sproc (__NR_SYSV + 129)
-#define __NR_SYSV_prctl (__NR_SYSV + 130)
-#define __NR_SYSV_procblk (__NR_SYSV + 131)
-#define __NR_SYSV_sprocsp (__NR_SYSV + 132)
-#define __NR_SYSV_sgigsc (__NR_SYSV + 133)
-#define __NR_SYSV_mmap (__NR_SYSV + 134)
-#define __NR_SYSV_munmap (__NR_SYSV + 135)
-#define __NR_SYSV_mprotect (__NR_SYSV + 136)
-#define __NR_SYSV_msync (__NR_SYSV + 137)
-#define __NR_SYSV_madvise (__NR_SYSV + 138)
-#define __NR_SYSV_pagelock (__NR_SYSV + 139)
-#define __NR_SYSV_getpagesize (__NR_SYSV + 140)
-#define __NR_SYSV_quotactl (__NR_SYSV + 141)
-#define __NR_SYSV_libdetach (__NR_SYSV + 142)
-#define __NR_SYSV_BSDgetpgrp (__NR_SYSV + 143)
-#define __NR_SYSV_BSDsetpgrp (__NR_SYSV + 144)
-#define __NR_SYSV_vhangup (__NR_SYSV + 145)
-#define __NR_SYSV_fsync (__NR_SYSV + 146)
-#define __NR_SYSV_fchdir (__NR_SYSV + 147)
-#define __NR_SYSV_getrlimit (__NR_SYSV + 148)
-#define __NR_SYSV_setrlimit (__NR_SYSV + 149)
-#define __NR_SYSV_cacheflush (__NR_SYSV + 150)
-#define __NR_SYSV_cachectl (__NR_SYSV + 151)
-#define __NR_SYSV_fchown (__NR_SYSV + 152)
-#define __NR_SYSV_fchmod (__NR_SYSV + 153)
-#define __NR_SYSV_wait3 (__NR_SYSV + 154)
-#define __NR_SYSV_socketpair (__NR_SYSV + 155)
-#define __NR_SYSV_sysinfo (__NR_SYSV + 156)
-#define __NR_SYSV_nuname (__NR_SYSV + 157)
-#define __NR_SYSV_xstat (__NR_SYSV + 158)
-#define __NR_SYSV_lxstat (__NR_SYSV + 159)
-#define __NR_SYSV_fxstat (__NR_SYSV + 160)
-#define __NR_SYSV_xmknod (__NR_SYSV + 161)
-#define __NR_SYSV_ksigaction (__NR_SYSV + 162)
-#define __NR_SYSV_sigpending (__NR_SYSV + 163)
-#define __NR_SYSV_sigprocmask (__NR_SYSV + 164)
-#define __NR_SYSV_sigsuspend (__NR_SYSV + 165)
-#define __NR_SYSV_sigpoll (__NR_SYSV + 166)
-#define __NR_SYSV_swapctl (__NR_SYSV + 167)
-#define __NR_SYSV_getcontext (__NR_SYSV + 168)
-#define __NR_SYSV_setcontext (__NR_SYSV + 169)
-#define __NR_SYSV_waitsys (__NR_SYSV + 170)
-#define __NR_SYSV_sigstack (__NR_SYSV + 171)
-#define __NR_SYSV_sigaltstack (__NR_SYSV + 172)
-#define __NR_SYSV_sigsendset (__NR_SYSV + 173)
-#define __NR_SYSV_statvfs (__NR_SYSV + 174)
-#define __NR_SYSV_fstatvfs (__NR_SYSV + 175)
-#define __NR_SYSV_getpmsg (__NR_SYSV + 176)
-#define __NR_SYSV_putpmsg (__NR_SYSV + 177)
-#define __NR_SYSV_lchown (__NR_SYSV + 178)
-#define __NR_SYSV_priocntl (__NR_SYSV + 179)
-#define __NR_SYSV_ksigqueue (__NR_SYSV + 180)
-#define __NR_SYSV_readv (__NR_SYSV + 181)
-#define __NR_SYSV_writev (__NR_SYSV + 182)
-#define __NR_SYSV_truncate64 (__NR_SYSV + 183)
-#define __NR_SYSV_ftruncate64 (__NR_SYSV + 184)
-#define __NR_SYSV_mmap64 (__NR_SYSV + 185)
-#define __NR_SYSV_dmi (__NR_SYSV + 186)
-#define __NR_SYSV_pread (__NR_SYSV + 187)
-#define __NR_SYSV_pwrite (__NR_SYSV + 188)
+#ifndef _ASM_UNISTD_H
+#define _ASM_UNISTD_H
-/*
- * BSD 4.3 syscalls are in the range from 2000 to 2999
- */
-#define __NR_BSD43 2000
-#define __NR_BSD43_syscall (__NR_BSD43 + 0)
-#define __NR_BSD43_exit (__NR_BSD43 + 1)
-#define __NR_BSD43_fork (__NR_BSD43 + 2)
-#define __NR_BSD43_read (__NR_BSD43 + 3)
-#define __NR_BSD43_write (__NR_BSD43 + 4)
-#define __NR_BSD43_open (__NR_BSD43 + 5)
-#define __NR_BSD43_close (__NR_BSD43 + 6)
-#define __NR_BSD43_wait (__NR_BSD43 + 7)
-#define __NR_BSD43_creat (__NR_BSD43 + 8)
-#define __NR_BSD43_link (__NR_BSD43 + 9)
-#define __NR_BSD43_unlink (__NR_BSD43 + 10)
-#define __NR_BSD43_exec (__NR_BSD43 + 11)
-#define __NR_BSD43_chdir (__NR_BSD43 + 12)
-#define __NR_BSD43_time (__NR_BSD43 + 13)
-#define __NR_BSD43_mknod (__NR_BSD43 + 14)
-#define __NR_BSD43_chmod (__NR_BSD43 + 15)
-#define __NR_BSD43_chown (__NR_BSD43 + 16)
-#define __NR_BSD43_sbreak (__NR_BSD43 + 17)
-#define __NR_BSD43_oldstat (__NR_BSD43 + 18)
-#define __NR_BSD43_lseek (__NR_BSD43 + 19)
-#define __NR_BSD43_getpid (__NR_BSD43 + 20)
-#define __NR_BSD43_oldmount (__NR_BSD43 + 21)
-#define __NR_BSD43_umount (__NR_BSD43 + 22)
-#define __NR_BSD43_setuid (__NR_BSD43 + 23)
-#define __NR_BSD43_getuid (__NR_BSD43 + 24)
-#define __NR_BSD43_stime (__NR_BSD43 + 25)
-#define __NR_BSD43_ptrace (__NR_BSD43 + 26)
-#define __NR_BSD43_alarm (__NR_BSD43 + 27)
-#define __NR_BSD43_oldfstat (__NR_BSD43 + 28)
-#define __NR_BSD43_pause (__NR_BSD43 + 29)
-#define __NR_BSD43_utime (__NR_BSD43 + 30)
-#define __NR_BSD43_stty (__NR_BSD43 + 31)
-#define __NR_BSD43_gtty (__NR_BSD43 + 32)
-#define __NR_BSD43_access (__NR_BSD43 + 33)
-#define __NR_BSD43_nice (__NR_BSD43 + 34)
-#define __NR_BSD43_ftime (__NR_BSD43 + 35)
-#define __NR_BSD43_sync (__NR_BSD43 + 36)
-#define __NR_BSD43_kill (__NR_BSD43 + 37)
-#define __NR_BSD43_stat (__NR_BSD43 + 38)
-#define __NR_BSD43_oldsetpgrp (__NR_BSD43 + 39)
-#define __NR_BSD43_lstat (__NR_BSD43 + 40)
-#define __NR_BSD43_dup (__NR_BSD43 + 41)
-#define __NR_BSD43_pipe (__NR_BSD43 + 42)
-#define __NR_BSD43_times (__NR_BSD43 + 43)
-#define __NR_BSD43_profil (__NR_BSD43 + 44)
-#define __NR_BSD43_msgsys (__NR_BSD43 + 45)
-#define __NR_BSD43_setgid (__NR_BSD43 + 46)
-#define __NR_BSD43_getgid (__NR_BSD43 + 47)
-#define __NR_BSD43_ssig (__NR_BSD43 + 48)
-#define __NR_BSD43_reserved1 (__NR_BSD43 + 49)
-#define __NR_BSD43_reserved2 (__NR_BSD43 + 50)
-#define __NR_BSD43_sysacct (__NR_BSD43 + 51)
-#define __NR_BSD43_phys (__NR_BSD43 + 52)
-#define __NR_BSD43_lock (__NR_BSD43 + 53)
-#define __NR_BSD43_ioctl (__NR_BSD43 + 54)
-#define __NR_BSD43_reboot (__NR_BSD43 + 55)
-#define __NR_BSD43_mpxchan (__NR_BSD43 + 56)
-#define __NR_BSD43_symlink (__NR_BSD43 + 57)
-#define __NR_BSD43_readlink (__NR_BSD43 + 58)
-#define __NR_BSD43_execve (__NR_BSD43 + 59)
-#define __NR_BSD43_umask (__NR_BSD43 + 60)
-#define __NR_BSD43_chroot (__NR_BSD43 + 61)
-#define __NR_BSD43_fstat (__NR_BSD43 + 62)
-#define __NR_BSD43_reserved3 (__NR_BSD43 + 63)
-#define __NR_BSD43_getpagesize (__NR_BSD43 + 64)
-#define __NR_BSD43_mremap (__NR_BSD43 + 65)
-#define __NR_BSD43_vfork (__NR_BSD43 + 66)
-#define __NR_BSD43_vread (__NR_BSD43 + 67)
-#define __NR_BSD43_vwrite (__NR_BSD43 + 68)
-#define __NR_BSD43_sbrk (__NR_BSD43 + 69)
-#define __NR_BSD43_sstk (__NR_BSD43 + 70)
-#define __NR_BSD43_mmap (__NR_BSD43 + 71)
-#define __NR_BSD43_vadvise (__NR_BSD43 + 72)
-#define __NR_BSD43_munmap (__NR_BSD43 + 73)
-#define __NR_BSD43_mprotect (__NR_BSD43 + 74)
-#define __NR_BSD43_madvise (__NR_BSD43 + 75)
-#define __NR_BSD43_vhangup (__NR_BSD43 + 76)
-#define __NR_BSD43_vlimit (__NR_BSD43 + 77)
-#define __NR_BSD43_mincore (__NR_BSD43 + 78)
-#define __NR_BSD43_getgroups (__NR_BSD43 + 79)
-#define __NR_BSD43_setgroups (__NR_BSD43 + 80)
-#define __NR_BSD43_getpgrp (__NR_BSD43 + 81)
-#define __NR_BSD43_setpgrp (__NR_BSD43 + 82)
-#define __NR_BSD43_setitimer (__NR_BSD43 + 83)
-#define __NR_BSD43_wait3 (__NR_BSD43 + 84)
-#define __NR_BSD43_swapon (__NR_BSD43 + 85)
-#define __NR_BSD43_getitimer (__NR_BSD43 + 86)
-#define __NR_BSD43_gethostname (__NR_BSD43 + 87)
-#define __NR_BSD43_sethostname (__NR_BSD43 + 88)
-#define __NR_BSD43_getdtablesize (__NR_BSD43 + 89)
-#define __NR_BSD43_dup2 (__NR_BSD43 + 90)
-#define __NR_BSD43_getdopt (__NR_BSD43 + 91)
-#define __NR_BSD43_fcntl (__NR_BSD43 + 92)
-#define __NR_BSD43_select (__NR_BSD43 + 93)
-#define __NR_BSD43_setdopt (__NR_BSD43 + 94)
-#define __NR_BSD43_fsync (__NR_BSD43 + 95)
-#define __NR_BSD43_setpriority (__NR_BSD43 + 96)
-#define __NR_BSD43_socket (__NR_BSD43 + 97)
-#define __NR_BSD43_connect (__NR_BSD43 + 98)
-#define __NR_BSD43_oldaccept (__NR_BSD43 + 99)
-#define __NR_BSD43_getpriority (__NR_BSD43 + 100)
-#define __NR_BSD43_send (__NR_BSD43 + 101)
-#define __NR_BSD43_recv (__NR_BSD43 + 102)
-#define __NR_BSD43_sigreturn (__NR_BSD43 + 103)
-#define __NR_BSD43_bind (__NR_BSD43 + 104)
-#define __NR_BSD43_setsockopt (__NR_BSD43 + 105)
-#define __NR_BSD43_listen (__NR_BSD43 + 106)
-#define __NR_BSD43_vtimes (__NR_BSD43 + 107)
-#define __NR_BSD43_sigvec (__NR_BSD43 + 108)
-#define __NR_BSD43_sigblock (__NR_BSD43 + 109)
-#define __NR_BSD43_sigsetmask (__NR_BSD43 + 110)
-#define __NR_BSD43_sigpause (__NR_BSD43 + 111)
-#define __NR_BSD43_sigstack (__NR_BSD43 + 112)
-#define __NR_BSD43_oldrecvmsg (__NR_BSD43 + 113)
-#define __NR_BSD43_oldsendmsg (__NR_BSD43 + 114)
-#define __NR_BSD43_vtrace (__NR_BSD43 + 115)
-#define __NR_BSD43_gettimeofday (__NR_BSD43 + 116)
-#define __NR_BSD43_getrusage (__NR_BSD43 + 117)
-#define __NR_BSD43_getsockopt (__NR_BSD43 + 118)
-#define __NR_BSD43_reserved4 (__NR_BSD43 + 119)
-#define __NR_BSD43_readv (__NR_BSD43 + 120)
-#define __NR_BSD43_writev (__NR_BSD43 + 121)
-#define __NR_BSD43_settimeofday (__NR_BSD43 + 122)
-#define __NR_BSD43_fchown (__NR_BSD43 + 123)
-#define __NR_BSD43_fchmod (__NR_BSD43 + 124)
-#define __NR_BSD43_oldrecvfrom (__NR_BSD43 + 125)
-#define __NR_BSD43_setreuid (__NR_BSD43 + 126)
-#define __NR_BSD43_setregid (__NR_BSD43 + 127)
-#define __NR_BSD43_rename (__NR_BSD43 + 128)
-#define __NR_BSD43_truncate (__NR_BSD43 + 129)
-#define __NR_BSD43_ftruncate (__NR_BSD43 + 130)
-#define __NR_BSD43_flock (__NR_BSD43 + 131)
-#define __NR_BSD43_semsys (__NR_BSD43 + 132)
-#define __NR_BSD43_sendto (__NR_BSD43 + 133)
-#define __NR_BSD43_shutdown (__NR_BSD43 + 134)
-#define __NR_BSD43_socketpair (__NR_BSD43 + 135)
-#define __NR_BSD43_mkdir (__NR_BSD43 + 136)
-#define __NR_BSD43_rmdir (__NR_BSD43 + 137)
-#define __NR_BSD43_utimes (__NR_BSD43 + 138)
-#define __NR_BSD43_sigcleanup (__NR_BSD43 + 139)
-#define __NR_BSD43_adjtime (__NR_BSD43 + 140)
-#define __NR_BSD43_oldgetpeername (__NR_BSD43 + 141)
-#define __NR_BSD43_gethostid (__NR_BSD43 + 142)
-#define __NR_BSD43_sethostid (__NR_BSD43 + 143)
-#define __NR_BSD43_getrlimit (__NR_BSD43 + 144)
-#define __NR_BSD43_setrlimit (__NR_BSD43 + 145)
-#define __NR_BSD43_killpg (__NR_BSD43 + 146)
-#define __NR_BSD43_shmsys (__NR_BSD43 + 147)
-#define __NR_BSD43_quota (__NR_BSD43 + 148)
-#define __NR_BSD43_qquota (__NR_BSD43 + 149)
-#define __NR_BSD43_oldgetsockname (__NR_BSD43 + 150)
-#define __NR_BSD43_sysmips (__NR_BSD43 + 151)
-#define __NR_BSD43_cacheflush (__NR_BSD43 + 152)
-#define __NR_BSD43_cachectl (__NR_BSD43 + 153)
-#define __NR_BSD43_debug (__NR_BSD43 + 154)
-#define __NR_BSD43_reserved5 (__NR_BSD43 + 155)
-#define __NR_BSD43_reserved6 (__NR_BSD43 + 156)
-#define __NR_BSD43_nfs_mount (__NR_BSD43 + 157)
-#define __NR_BSD43_nfs_svc (__NR_BSD43 + 158)
-#define __NR_BSD43_getdirentries (__NR_BSD43 + 159)
-#define __NR_BSD43_statfs (__NR_BSD43 + 160)
-#define __NR_BSD43_fstatfs (__NR_BSD43 + 161)
-#define __NR_BSD43_unmount (__NR_BSD43 + 162)
-#define __NR_BSD43_async_daemon (__NR_BSD43 + 163)
-#define __NR_BSD43_nfs_getfh (__NR_BSD43 + 164)
-#define __NR_BSD43_getdomainname (__NR_BSD43 + 165)
-#define __NR_BSD43_setdomainname (__NR_BSD43 + 166)
-#define __NR_BSD43_pcfs_mount (__NR_BSD43 + 167)
-#define __NR_BSD43_quotactl (__NR_BSD43 + 168)
-#define __NR_BSD43_oldexportfs (__NR_BSD43 + 169)
-#define __NR_BSD43_smount (__NR_BSD43 + 170)
-#define __NR_BSD43_mipshwconf (__NR_BSD43 + 171)
-#define __NR_BSD43_exportfs (__NR_BSD43 + 172)
-#define __NR_BSD43_nfsfh_open (__NR_BSD43 + 173)
-#define __NR_BSD43_libattach (__NR_BSD43 + 174)
-#define __NR_BSD43_libdetach (__NR_BSD43 + 175)
-#define __NR_BSD43_accept (__NR_BSD43 + 176)
-#define __NR_BSD43_reserved7 (__NR_BSD43 + 177)
-#define __NR_BSD43_reserved8 (__NR_BSD43 + 178)
-#define __NR_BSD43_recvmsg (__NR_BSD43 + 179)
-#define __NR_BSD43_recvfrom (__NR_BSD43 + 180)
-#define __NR_BSD43_sendmsg (__NR_BSD43 + 181)
-#define __NR_BSD43_getpeername (__NR_BSD43 + 182)
-#define __NR_BSD43_getsockname (__NR_BSD43 + 183)
-#define __NR_BSD43_aread (__NR_BSD43 + 184)
-#define __NR_BSD43_awrite (__NR_BSD43 + 185)
-#define __NR_BSD43_listio (__NR_BSD43 + 186)
-#define __NR_BSD43_acancel (__NR_BSD43 + 187)
-#define __NR_BSD43_astatus (__NR_BSD43 + 188)
-#define __NR_BSD43_await (__NR_BSD43 + 189)
-#define __NR_BSD43_areadv (__NR_BSD43 + 190)
-#define __NR_BSD43_awritev (__NR_BSD43 + 191)
-
-/*
- * POSIX syscalls are in the range from 3000 to 3999
- */
-#define __NR_POSIX 3000
-#define __NR_POSIX_syscall (__NR_POSIX + 0)
-#define __NR_POSIX_exit (__NR_POSIX + 1)
-#define __NR_POSIX_fork (__NR_POSIX + 2)
-#define __NR_POSIX_read (__NR_POSIX + 3)
-#define __NR_POSIX_write (__NR_POSIX + 4)
-#define __NR_POSIX_open (__NR_POSIX + 5)
-#define __NR_POSIX_close (__NR_POSIX + 6)
-#define __NR_POSIX_wait (__NR_POSIX + 7)
-#define __NR_POSIX_creat (__NR_POSIX + 8)
-#define __NR_POSIX_link (__NR_POSIX + 9)
-#define __NR_POSIX_unlink (__NR_POSIX + 10)
-#define __NR_POSIX_exec (__NR_POSIX + 11)
-#define __NR_POSIX_chdir (__NR_POSIX + 12)
-#define __NR_POSIX_gtime (__NR_POSIX + 13)
-#define __NR_POSIX_mknod (__NR_POSIX + 14)
-#define __NR_POSIX_chmod (__NR_POSIX + 15)
-#define __NR_POSIX_chown (__NR_POSIX + 16)
-#define __NR_POSIX_sbreak (__NR_POSIX + 17)
-#define __NR_POSIX_stat (__NR_POSIX + 18)
-#define __NR_POSIX_lseek (__NR_POSIX + 19)
-#define __NR_POSIX_getpid (__NR_POSIX + 20)
-#define __NR_POSIX_mount (__NR_POSIX + 21)
-#define __NR_POSIX_umount (__NR_POSIX + 22)
-#define __NR_POSIX_setuid (__NR_POSIX + 23)
-#define __NR_POSIX_getuid (__NR_POSIX + 24)
-#define __NR_POSIX_stime (__NR_POSIX + 25)
-#define __NR_POSIX_ptrace (__NR_POSIX + 26)
-#define __NR_POSIX_alarm (__NR_POSIX + 27)
-#define __NR_POSIX_fstat (__NR_POSIX + 28)
-#define __NR_POSIX_pause (__NR_POSIX + 29)
-#define __NR_POSIX_utime (__NR_POSIX + 30)
-#define __NR_POSIX_stty (__NR_POSIX + 31)
-#define __NR_POSIX_gtty (__NR_POSIX + 32)
-#define __NR_POSIX_access (__NR_POSIX + 33)
-#define __NR_POSIX_nice (__NR_POSIX + 34)
-#define __NR_POSIX_statfs (__NR_POSIX + 35)
-#define __NR_POSIX_sync (__NR_POSIX + 36)
-#define __NR_POSIX_kill (__NR_POSIX + 37)
-#define __NR_POSIX_fstatfs (__NR_POSIX + 38)
-#define __NR_POSIX_getpgrp (__NR_POSIX + 39)
-#define __NR_POSIX_syssgi (__NR_POSIX + 40)
-#define __NR_POSIX_dup (__NR_POSIX + 41)
-#define __NR_POSIX_pipe (__NR_POSIX + 42)
-#define __NR_POSIX_times (__NR_POSIX + 43)
-#define __NR_POSIX_profil (__NR_POSIX + 44)
-#define __NR_POSIX_lock (__NR_POSIX + 45)
-#define __NR_POSIX_setgid (__NR_POSIX + 46)
-#define __NR_POSIX_getgid (__NR_POSIX + 47)
-#define __NR_POSIX_sig (__NR_POSIX + 48)
-#define __NR_POSIX_msgsys (__NR_POSIX + 49)
-#define __NR_POSIX_sysmips (__NR_POSIX + 50)
-#define __NR_POSIX_sysacct (__NR_POSIX + 51)
-#define __NR_POSIX_shmsys (__NR_POSIX + 52)
-#define __NR_POSIX_semsys (__NR_POSIX + 53)
-#define __NR_POSIX_ioctl (__NR_POSIX + 54)
-#define __NR_POSIX_uadmin (__NR_POSIX + 55)
-#define __NR_POSIX_exch (__NR_POSIX + 56)
-#define __NR_POSIX_utssys (__NR_POSIX + 57)
-#define __NR_POSIX_USG_reserved1 (__NR_POSIX + 58)
-#define __NR_POSIX_exece (__NR_POSIX + 59)
-#define __NR_POSIX_umask (__NR_POSIX + 60)
-#define __NR_POSIX_chroot (__NR_POSIX + 61)
-#define __NR_POSIX_fcntl (__NR_POSIX + 62)
-#define __NR_POSIX_ulimit (__NR_POSIX + 63)
-#define __NR_POSIX_SAFARI4_reserved1 (__NR_POSIX + 64)
-#define __NR_POSIX_SAFARI4_reserved2 (__NR_POSIX + 65)
-#define __NR_POSIX_SAFARI4_reserved3 (__NR_POSIX + 66)
-#define __NR_POSIX_SAFARI4_reserved4 (__NR_POSIX + 67)
-#define __NR_POSIX_SAFARI4_reserved5 (__NR_POSIX + 68)
-#define __NR_POSIX_SAFARI4_reserved6 (__NR_POSIX + 69)
-#define __NR_POSIX_advfs (__NR_POSIX + 70)
-#define __NR_POSIX_unadvfs (__NR_POSIX + 71)
-#define __NR_POSIX_rmount (__NR_POSIX + 72)
-#define __NR_POSIX_rumount (__NR_POSIX + 73)
-#define __NR_POSIX_rfstart (__NR_POSIX + 74)
-#define __NR_POSIX_reserved1 (__NR_POSIX + 75)
-#define __NR_POSIX_rdebug (__NR_POSIX + 76)
-#define __NR_POSIX_rfstop (__NR_POSIX + 77)
-#define __NR_POSIX_rfsys (__NR_POSIX + 78)
-#define __NR_POSIX_rmdir (__NR_POSIX + 79)
-#define __NR_POSIX_mkdir (__NR_POSIX + 80)
-#define __NR_POSIX_getdents (__NR_POSIX + 81)
-#define __NR_POSIX_sginap (__NR_POSIX + 82)
-#define __NR_POSIX_sgikopt (__NR_POSIX + 83)
-#define __NR_POSIX_sysfs (__NR_POSIX + 84)
-#define __NR_POSIX_getmsg (__NR_POSIX + 85)
-#define __NR_POSIX_putmsg (__NR_POSIX + 86)
-#define __NR_POSIX_poll (__NR_POSIX + 87)
-#define __NR_POSIX_sigreturn (__NR_POSIX + 88)
-#define __NR_POSIX_accept (__NR_POSIX + 89)
-#define __NR_POSIX_bind (__NR_POSIX + 90)
-#define __NR_POSIX_connect (__NR_POSIX + 91)
-#define __NR_POSIX_gethostid (__NR_POSIX + 92)
-#define __NR_POSIX_getpeername (__NR_POSIX + 93)
-#define __NR_POSIX_getsockname (__NR_POSIX + 94)
-#define __NR_POSIX_getsockopt (__NR_POSIX + 95)
-#define __NR_POSIX_listen (__NR_POSIX + 96)
-#define __NR_POSIX_recv (__NR_POSIX + 97)
-#define __NR_POSIX_recvfrom (__NR_POSIX + 98)
-#define __NR_POSIX_recvmsg (__NR_POSIX + 99)
-#define __NR_POSIX_select (__NR_POSIX + 100)
-#define __NR_POSIX_send (__NR_POSIX + 101)
-#define __NR_POSIX_sendmsg (__NR_POSIX + 102)
-#define __NR_POSIX_sendto (__NR_POSIX + 103)
-#define __NR_POSIX_sethostid (__NR_POSIX + 104)
-#define __NR_POSIX_setsockopt (__NR_POSIX + 105)
-#define __NR_POSIX_shutdown (__NR_POSIX + 106)
-#define __NR_POSIX_socket (__NR_POSIX + 107)
-#define __NR_POSIX_gethostname (__NR_POSIX + 108)
-#define __NR_POSIX_sethostname (__NR_POSIX + 109)
-#define __NR_POSIX_getdomainname (__NR_POSIX + 110)
-#define __NR_POSIX_setdomainname (__NR_POSIX + 111)
-#define __NR_POSIX_truncate (__NR_POSIX + 112)
-#define __NR_POSIX_ftruncate (__NR_POSIX + 113)
-#define __NR_POSIX_rename (__NR_POSIX + 114)
-#define __NR_POSIX_symlink (__NR_POSIX + 115)
-#define __NR_POSIX_readlink (__NR_POSIX + 116)
-#define __NR_POSIX_lstat (__NR_POSIX + 117)
-#define __NR_POSIX_nfs_mount (__NR_POSIX + 118)
-#define __NR_POSIX_nfs_svc (__NR_POSIX + 119)
-#define __NR_POSIX_nfs_getfh (__NR_POSIX + 120)
-#define __NR_POSIX_async_daemon (__NR_POSIX + 121)
-#define __NR_POSIX_exportfs (__NR_POSIX + 122)
-#define __NR_POSIX_SGI_setregid (__NR_POSIX + 123)
-#define __NR_POSIX_SGI_setreuid (__NR_POSIX + 124)
-#define __NR_POSIX_getitimer (__NR_POSIX + 125)
-#define __NR_POSIX_setitimer (__NR_POSIX + 126)
-#define __NR_POSIX_adjtime (__NR_POSIX + 127)
-#define __NR_POSIX_SGI_bsdgettime (__NR_POSIX + 128)
-#define __NR_POSIX_SGI_sproc (__NR_POSIX + 129)
-#define __NR_POSIX_SGI_prctl (__NR_POSIX + 130)
-#define __NR_POSIX_SGI_blkproc (__NR_POSIX + 131)
-#define __NR_POSIX_SGI_reserved1 (__NR_POSIX + 132)
-#define __NR_POSIX_SGI_sgigsc (__NR_POSIX + 133)
-#define __NR_POSIX_SGI_mmap (__NR_POSIX + 134)
-#define __NR_POSIX_SGI_munmap (__NR_POSIX + 135)
-#define __NR_POSIX_SGI_mprotect (__NR_POSIX + 136)
-#define __NR_POSIX_SGI_msync (__NR_POSIX + 137)
-#define __NR_POSIX_SGI_madvise (__NR_POSIX + 138)
-#define __NR_POSIX_SGI_mpin (__NR_POSIX + 139)
-#define __NR_POSIX_SGI_getpagesize (__NR_POSIX + 140)
-#define __NR_POSIX_SGI_libattach (__NR_POSIX + 141)
-#define __NR_POSIX_SGI_libdetach (__NR_POSIX + 142)
-#define __NR_POSIX_SGI_getpgrp (__NR_POSIX + 143)
-#define __NR_POSIX_SGI_setpgrp (__NR_POSIX + 144)
-#define __NR_POSIX_SGI_reserved2 (__NR_POSIX + 145)
-#define __NR_POSIX_SGI_reserved3 (__NR_POSIX + 146)
-#define __NR_POSIX_SGI_reserved4 (__NR_POSIX + 147)
-#define __NR_POSIX_SGI_reserved5 (__NR_POSIX + 148)
-#define __NR_POSIX_SGI_reserved6 (__NR_POSIX + 149)
-#define __NR_POSIX_cacheflush (__NR_POSIX + 150)
-#define __NR_POSIX_cachectl (__NR_POSIX + 151)
-#define __NR_POSIX_fchown (__NR_POSIX + 152)
-#define __NR_POSIX_fchmod (__NR_POSIX + 153)
-#define __NR_POSIX_wait3 (__NR_POSIX + 154)
-#define __NR_POSIX_mmap (__NR_POSIX + 155)
-#define __NR_POSIX_munmap (__NR_POSIX + 156)
-#define __NR_POSIX_madvise (__NR_POSIX + 157)
-#define __NR_POSIX_BSD_getpagesize (__NR_POSIX + 158)
-#define __NR_POSIX_setreuid (__NR_POSIX + 159)
-#define __NR_POSIX_setregid (__NR_POSIX + 160)
-#define __NR_POSIX_setpgid (__NR_POSIX + 161)
-#define __NR_POSIX_getgroups (__NR_POSIX + 162)
-#define __NR_POSIX_setgroups (__NR_POSIX + 163)
-#define __NR_POSIX_gettimeofday (__NR_POSIX + 164)
-#define __NR_POSIX_getrusage (__NR_POSIX + 165)
-#define __NR_POSIX_getrlimit (__NR_POSIX + 166)
-#define __NR_POSIX_setrlimit (__NR_POSIX + 167)
-#define __NR_POSIX_waitpid (__NR_POSIX + 168)
-#define __NR_POSIX_dup2 (__NR_POSIX + 169)
-#define __NR_POSIX_reserved2 (__NR_POSIX + 170)
-#define __NR_POSIX_reserved3 (__NR_POSIX + 171)
-#define __NR_POSIX_reserved4 (__NR_POSIX + 172)
-#define __NR_POSIX_reserved5 (__NR_POSIX + 173)
-#define __NR_POSIX_reserved6 (__NR_POSIX + 174)
-#define __NR_POSIX_reserved7 (__NR_POSIX + 175)
-#define __NR_POSIX_reserved8 (__NR_POSIX + 176)
-#define __NR_POSIX_reserved9 (__NR_POSIX + 177)
-#define __NR_POSIX_reserved10 (__NR_POSIX + 178)
-#define __NR_POSIX_reserved11 (__NR_POSIX + 179)
-#define __NR_POSIX_reserved12 (__NR_POSIX + 180)
-#define __NR_POSIX_reserved13 (__NR_POSIX + 181)
-#define __NR_POSIX_reserved14 (__NR_POSIX + 182)
-#define __NR_POSIX_reserved15 (__NR_POSIX + 183)
-#define __NR_POSIX_reserved16 (__NR_POSIX + 184)
-#define __NR_POSIX_reserved17 (__NR_POSIX + 185)
-#define __NR_POSIX_reserved18 (__NR_POSIX + 186)
-#define __NR_POSIX_reserved19 (__NR_POSIX + 187)
-#define __NR_POSIX_reserved20 (__NR_POSIX + 188)
-#define __NR_POSIX_reserved21 (__NR_POSIX + 189)
-#define __NR_POSIX_reserved22 (__NR_POSIX + 190)
-#define __NR_POSIX_reserved23 (__NR_POSIX + 191)
-#define __NR_POSIX_reserved24 (__NR_POSIX + 192)
-#define __NR_POSIX_reserved25 (__NR_POSIX + 193)
-#define __NR_POSIX_reserved26 (__NR_POSIX + 194)
-#define __NR_POSIX_reserved27 (__NR_POSIX + 195)
-#define __NR_POSIX_reserved28 (__NR_POSIX + 196)
-#define __NR_POSIX_reserved29 (__NR_POSIX + 197)
-#define __NR_POSIX_reserved30 (__NR_POSIX + 198)
-#define __NR_POSIX_reserved31 (__NR_POSIX + 199)
-#define __NR_POSIX_reserved32 (__NR_POSIX + 200)
-#define __NR_POSIX_reserved33 (__NR_POSIX + 201)
-#define __NR_POSIX_reserved34 (__NR_POSIX + 202)
-#define __NR_POSIX_reserved35 (__NR_POSIX + 203)
-#define __NR_POSIX_reserved36 (__NR_POSIX + 204)
-#define __NR_POSIX_reserved37 (__NR_POSIX + 205)
-#define __NR_POSIX_reserved38 (__NR_POSIX + 206)
-#define __NR_POSIX_reserved39 (__NR_POSIX + 207)
-#define __NR_POSIX_reserved40 (__NR_POSIX + 208)
-#define __NR_POSIX_reserved41 (__NR_POSIX + 209)
-#define __NR_POSIX_reserved42 (__NR_POSIX + 210)
-#define __NR_POSIX_reserved43 (__NR_POSIX + 211)
-#define __NR_POSIX_reserved44 (__NR_POSIX + 212)
-#define __NR_POSIX_reserved45 (__NR_POSIX + 213)
-#define __NR_POSIX_reserved46 (__NR_POSIX + 214)
-#define __NR_POSIX_reserved47 (__NR_POSIX + 215)
-#define __NR_POSIX_reserved48 (__NR_POSIX + 216)
-#define __NR_POSIX_reserved49 (__NR_POSIX + 217)
-#define __NR_POSIX_reserved50 (__NR_POSIX + 218)
-#define __NR_POSIX_reserved51 (__NR_POSIX + 219)
-#define __NR_POSIX_reserved52 (__NR_POSIX + 220)
-#define __NR_POSIX_reserved53 (__NR_POSIX + 221)
-#define __NR_POSIX_reserved54 (__NR_POSIX + 222)
-#define __NR_POSIX_reserved55 (__NR_POSIX + 223)
-#define __NR_POSIX_reserved56 (__NR_POSIX + 224)
-#define __NR_POSIX_reserved57 (__NR_POSIX + 225)
-#define __NR_POSIX_reserved58 (__NR_POSIX + 226)
-#define __NR_POSIX_reserved59 (__NR_POSIX + 227)
-#define __NR_POSIX_reserved60 (__NR_POSIX + 228)
-#define __NR_POSIX_reserved61 (__NR_POSIX + 229)
-#define __NR_POSIX_reserved62 (__NR_POSIX + 230)
-#define __NR_POSIX_reserved63 (__NR_POSIX + 231)
-#define __NR_POSIX_reserved64 (__NR_POSIX + 232)
-#define __NR_POSIX_reserved65 (__NR_POSIX + 233)
-#define __NR_POSIX_reserved66 (__NR_POSIX + 234)
-#define __NR_POSIX_reserved67 (__NR_POSIX + 235)
-#define __NR_POSIX_reserved68 (__NR_POSIX + 236)
-#define __NR_POSIX_reserved69 (__NR_POSIX + 237)
-#define __NR_POSIX_reserved70 (__NR_POSIX + 238)
-#define __NR_POSIX_reserved71 (__NR_POSIX + 239)
-#define __NR_POSIX_reserved72 (__NR_POSIX + 240)
-#define __NR_POSIX_reserved73 (__NR_POSIX + 241)
-#define __NR_POSIX_reserved74 (__NR_POSIX + 242)
-#define __NR_POSIX_reserved75 (__NR_POSIX + 243)
-#define __NR_POSIX_reserved76 (__NR_POSIX + 244)
-#define __NR_POSIX_reserved77 (__NR_POSIX + 245)
-#define __NR_POSIX_reserved78 (__NR_POSIX + 246)
-#define __NR_POSIX_reserved79 (__NR_POSIX + 247)
-#define __NR_POSIX_reserved80 (__NR_POSIX + 248)
-#define __NR_POSIX_reserved81 (__NR_POSIX + 249)
-#define __NR_POSIX_reserved82 (__NR_POSIX + 250)
-#define __NR_POSIX_reserved83 (__NR_POSIX + 251)
-#define __NR_POSIX_reserved84 (__NR_POSIX + 252)
-#define __NR_POSIX_reserved85 (__NR_POSIX + 253)
-#define __NR_POSIX_reserved86 (__NR_POSIX + 254)
-#define __NR_POSIX_reserved87 (__NR_POSIX + 255)
-#define __NR_POSIX_reserved88 (__NR_POSIX + 256)
-#define __NR_POSIX_reserved89 (__NR_POSIX + 257)
-#define __NR_POSIX_reserved90 (__NR_POSIX + 258)
-#define __NR_POSIX_reserved91 (__NR_POSIX + 259)
-#define __NR_POSIX_netboot (__NR_POSIX + 260)
-#define __NR_POSIX_netunboot (__NR_POSIX + 261)
-#define __NR_POSIX_rdump (__NR_POSIX + 262)
-#define __NR_POSIX_setsid (__NR_POSIX + 263)
-#define __NR_POSIX_getmaxsig (__NR_POSIX + 264)
-#define __NR_POSIX_sigpending (__NR_POSIX + 265)
-#define __NR_POSIX_sigprocmask (__NR_POSIX + 266)
-#define __NR_POSIX_sigsuspend (__NR_POSIX + 267)
-#define __NR_POSIX_sigaction (__NR_POSIX + 268)
-#define __NR_POSIX_MIPS_reserved1 (__NR_POSIX + 269)
-#define __NR_POSIX_MIPS_reserved2 (__NR_POSIX + 270)
-#define __NR_POSIX_MIPS_reserved3 (__NR_POSIX + 271)
-#define __NR_POSIX_MIPS_reserved4 (__NR_POSIX + 272)
-#define __NR_POSIX_MIPS_reserved5 (__NR_POSIX + 273)
-#define __NR_POSIX_MIPS_reserved6 (__NR_POSIX + 274)
-#define __NR_POSIX_MIPS_reserved7 (__NR_POSIX + 275)
-#define __NR_POSIX_MIPS_reserved8 (__NR_POSIX + 276)
-#define __NR_POSIX_MIPS_reserved9 (__NR_POSIX + 277)
-#define __NR_POSIX_MIPS_reserved10 (__NR_POSIX + 278)
-#define __NR_POSIX_MIPS_reserved11 (__NR_POSIX + 279)
-#define __NR_POSIX_TANDEM_reserved1 (__NR_POSIX + 280)
-#define __NR_POSIX_TANDEM_reserved2 (__NR_POSIX + 281)
-#define __NR_POSIX_TANDEM_reserved3 (__NR_POSIX + 282)
-#define __NR_POSIX_TANDEM_reserved4 (__NR_POSIX + 283)
-#define __NR_POSIX_TANDEM_reserved5 (__NR_POSIX + 284)
-#define __NR_POSIX_TANDEM_reserved6 (__NR_POSIX + 285)
-#define __NR_POSIX_TANDEM_reserved7 (__NR_POSIX + 286)
-#define __NR_POSIX_TANDEM_reserved8 (__NR_POSIX + 287)
-#define __NR_POSIX_TANDEM_reserved9 (__NR_POSIX + 288)
-#define __NR_POSIX_TANDEM_reserved10 (__NR_POSIX + 289)
-#define __NR_POSIX_TANDEM_reserved11 (__NR_POSIX + 290)
-#define __NR_POSIX_TANDEM_reserved12 (__NR_POSIX + 291)
-#define __NR_POSIX_TANDEM_reserved13 (__NR_POSIX + 292)
-#define __NR_POSIX_TANDEM_reserved14 (__NR_POSIX + 293)
-#define __NR_POSIX_TANDEM_reserved15 (__NR_POSIX + 294)
-#define __NR_POSIX_TANDEM_reserved16 (__NR_POSIX + 295)
-#define __NR_POSIX_TANDEM_reserved17 (__NR_POSIX + 296)
-#define __NR_POSIX_TANDEM_reserved18 (__NR_POSIX + 297)
-#define __NR_POSIX_TANDEM_reserved19 (__NR_POSIX + 298)
-#define __NR_POSIX_TANDEM_reserved20 (__NR_POSIX + 299)
-#define __NR_POSIX_SGI_reserved7 (__NR_POSIX + 300)
-#define __NR_POSIX_SGI_reserved8 (__NR_POSIX + 301)
-#define __NR_POSIX_SGI_reserved9 (__NR_POSIX + 302)
-#define __NR_POSIX_SGI_reserved10 (__NR_POSIX + 303)
-#define __NR_POSIX_SGI_reserved11 (__NR_POSIX + 304)
-#define __NR_POSIX_SGI_reserved12 (__NR_POSIX + 305)
-#define __NR_POSIX_SGI_reserved13 (__NR_POSIX + 306)
-#define __NR_POSIX_SGI_reserved14 (__NR_POSIX + 307)
-#define __NR_POSIX_SGI_reserved15 (__NR_POSIX + 308)
-#define __NR_POSIX_SGI_reserved16 (__NR_POSIX + 309)
-#define __NR_POSIX_SGI_reserved17 (__NR_POSIX + 310)
-#define __NR_POSIX_SGI_reserved18 (__NR_POSIX + 311)
-#define __NR_POSIX_SGI_reserved19 (__NR_POSIX + 312)
-#define __NR_POSIX_SGI_reserved20 (__NR_POSIX + 313)
-#define __NR_POSIX_SGI_reserved21 (__NR_POSIX + 314)
-#define __NR_POSIX_SGI_reserved22 (__NR_POSIX + 315)
-#define __NR_POSIX_SGI_reserved23 (__NR_POSIX + 316)
-#define __NR_POSIX_SGI_reserved24 (__NR_POSIX + 317)
-#define __NR_POSIX_SGI_reserved25 (__NR_POSIX + 318)
-#define __NR_POSIX_SGI_reserved26 (__NR_POSIX + 319)
-
-/*
- * Linux syscalls are in the range from 4000 to 4999
- * Hopefully these syscall numbers are unused ... If not everyone using
- * statically linked binaries is pretty upsh*t. You've been warned.
- */
#define __NR_Linux 4000
#define __NR_syscall (__NR_Linux + 0)
#define __NR_exit (__NR_Linux + 1)
@@ -1045,7 +71,7 @@
#define __NR_mpx (__NR_Linux + 56)
#define __NR_setpgid (__NR_Linux + 57)
#define __NR_ulimit (__NR_Linux + 58)
-#define __NR_oldolduname (__NR_Linux + 59)
+#define __NR_unused59 (__NR_Linux + 59)
#define __NR_umask (__NR_Linux + 60)
#define __NR_chroot (__NR_Linux + 61)
#define __NR_ustat (__NR_Linux + 62)
@@ -1095,7 +121,7 @@
#define __NR_stat (__NR_Linux + 106)
#define __NR_lstat (__NR_Linux + 107)
#define __NR_fstat (__NR_Linux + 108)
-#define __NR_olduname (__NR_Linux + 109)
+#define __NR_unused109 (__NR_Linux + 109)
#define __NR_iopl (__NR_Linux + 110)
#define __NR_vhangup (__NR_Linux + 111)
#define __NR_idle (__NR_Linux + 112)
@@ -1206,11 +232,12 @@
#define __NR_mincore (__NR_Linux + 217)
#define __NR_madvise (__NR_Linux + 218)
#define __NR_getdents64 (__NR_Linux + 219)
+#define __NR_fcntl64 (__NR_Linux + 220)
/*
* Offset of the last Linux flavoured syscall
*/
-#define __NR_Linux_syscalls 219
+#define __NR_Linux_syscalls 220
#ifndef _LANGUAGE_ASSEMBLY
@@ -1218,13 +245,15 @@
#define _syscall0(type,name) \
type name(void) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("li\t$2,%2\n\t" \
- "syscall" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name) \
- : "$8","$9","$10","$11","$12","$13","$14","$15","$24"); \
+ : "$2","$7","$8","$9","$10","$11","$12","$13","$14","$15", \
+ "$24"); \
if (__err == 0) \
return (type) __res; \
errno = __res; \
@@ -1238,14 +267,15 @@ return -1; \
#define _syscall1(type,name,atype,a) \
type name(atype a) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"li\t$2,%2\n\t" \
- "syscall" \
- : "=r" (__res), "=r" (__err) \
- : "i" (__NR_##name),"r" ((long)(a)) \
- : "$4","$8","$9","$10","$11","$12","$13","$14","$15","$24"); \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7" \
+ : "=r" (__res), "=r" (__err) \
+ : "i" (__NR_##name),"r" ((long)(a)) \
+ : "$2","$4","$7","$8","$9","$10","$11","$12","$13","$14","$15","$24"); \
if (__err == 0) \
return (type) __res; \
errno = __res; \
@@ -1255,17 +285,18 @@ return -1; \
#define _syscall2(type,name,atype,a,btype,b) \
type name(atype a,btype b) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"li\t$2,%2\n\t" \
- "syscall" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
"r" ((long)(b)) \
- : "$4","$5","$8","$9","$10","$11","$12","$13","$14","$15", \
- "$24"); \
+ : "$2","$4","$5","$7","$8","$9","$10","$11","$12","$13", \
+ "$14","$15", "$24"); \
if (__err == 0) \
return (type) __res; \
errno = __res; \
@@ -1275,19 +306,20 @@ return -1; \
#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
type name (atype a, btype b, ctype c) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"move\t$6,%5\n\t" \
"li\t$2,%2\n\t" \
- "syscall" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
"r" ((long)(b)), \
"r" ((long)(c)) \
- : "$4","$5","$6","$8","$9","$10","$11","$12","$13","$14", \
- "$15","$24"); \
+ : "$2","$4","$5","$6","$7","$8","$9","$10","$11","$12", \
+ "$13","$14","$15","$24"); \
if (__err == 0) \
return (type) __res; \
errno = __res; \
@@ -1297,21 +329,22 @@ return -1; \
#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
type name (atype a, btype b, ctype c, dtype d) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"move\t$6,%5\n\t" \
"move\t$7,%6\n\t" \
"li\t$2,%2\n\t" \
- "syscall" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
"r" ((long)(b)), \
"r" ((long)(c)), \
"r" ((long)(d)) \
- : "$4","$5","$6","$8","$9","$10","$11","$12","$13","$14", \
- "$15","$24"); \
+ : "$2","$4","$5","$6","$7","$8","$9","$10","$11","$12", \
+ "$13","$14","$15","$24"); \
if (__err == 0) \
return (type) __res; \
errno = __res; \
@@ -1321,8 +354,7 @@ return -1; \
#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
type name (atype a,btype b,ctype c,dtype d,etype e) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"move\t$6,%5\n\t" \
@@ -1331,7 +363,9 @@ __asm__ volatile ("move\t$4,%3\n\t" \
"subu\t$29,24\n\t" \
"sw\t$2,16($29)\n\t" \
"li\t$2,%2\n\t" \
- "syscall\n\t" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7\n\t" \
"addiu\t$29,24" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
@@ -1350,8 +384,7 @@ return -1; \
#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"move\t$6,%5\n\t" \
@@ -1362,7 +395,9 @@ __asm__ volatile ("move\t$4,%3\n\t" \
"sw\t$2,16($29)\n\t" \
"sw\t$3,20($29)\n\t" \
"li\t$2,%2\n\t" \
- "syscall\n\t" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7\n\t" \
"addiu\t$29,24" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
@@ -1382,8 +417,7 @@ return -1; \
#define _syscall7(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f,gtype,g) \
type name (atype a,btype b,ctype c,dtype d,etype e,ftype f,gtype g) \
{ \
-register long __res __asm__ ("$2"); \
-register long __err __asm__ ("$7"); \
+long __res, __err; \
__asm__ volatile ("move\t$4,%3\n\t" \
"move\t$5,%4\n\t" \
"move\t$6,%5\n\t" \
@@ -1396,7 +430,9 @@ __asm__ volatile ("move\t$4,%3\n\t" \
"sw\t$3,20($29)\n\t" \
"sw\t$2,24($29)\n\t" \
"li\t$2,%2\n\t" \
- "syscall\n\t" \
+ "syscall\n\t" \
+ "move\t%0, $2\n\t" \
+ "move\t%1, $7\n\t" \
"addiu\t$29,32" \
: "=r" (__res), "=r" (__err) \
: "i" (__NR_##name),"r" ((long)(a)), \
@@ -1450,4 +486,4 @@ static inline pid_t wait(int * wait_stat)
#endif /* !defined (__KERNEL_SYSCALLS__) */
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
-#endif /* __ASM_MIPS_UNISTD_H */
+#endif /* _ASM_UNISTD_H */
diff --git a/include/asm-mips/watch.h b/include/asm-mips/watch.h
index abfc622efee9..7945e0b2fef3 100644
--- a/include/asm-mips/watch.h
+++ b/include/asm-mips/watch.h
@@ -1,10 +1,9 @@
-/* $Id: watch.h,v 1.3 1998/08/19 21:58:15 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998, 2000, 2001 by Ralf Baechle
*/
#ifndef __ASM_WATCH_H
#define __ASM_WATCH_H
@@ -35,4 +34,4 @@ extern asmlinkage void __watch_reenable(void);
if (watch_available) \
__watch_reenable()
-#endif __ASM_WATCH_H
+#endif /* __ASM_WATCH_H */
diff --git a/include/asm-mips/wbflush.h b/include/asm-mips/wbflush.h
index 31a3679090b1..0d9c9a8683e0 100644
--- a/include/asm-mips/wbflush.h
+++ b/include/asm-mips/wbflush.h
@@ -27,7 +27,7 @@ extern void (*__wbflush) (void);
* we don't need no stinkin' wbflush
*/
-#define wbflush()
+#define wbflush() do { } while(0)
#endif
diff --git a/include/asm-ppc/bootinfo.h b/include/asm-ppc/bootinfo.h
index ce7650611398..2e0841f966f2 100644
--- a/include/asm-ppc/bootinfo.h
+++ b/include/asm-ppc/bootinfo.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.bootinfo.h 1.7 05/23/01 00:38:42 cort
+ * BK Id: SCCS/s.bootinfo.h 1.9 06/13/01 15:28:43 paulus
*/
/*
* Non-machine dependent bootinfo structure. Basic idea
@@ -34,27 +34,6 @@ struct bi_record {
#endif /* CONFIG_APUS */
-/*
- * prom_init() is called very early on, before the kernel text
- * and data have been mapped to KERNELBASE. At this point the code
- * is running at whatever address it has been loaded at, so
- * references to extern and static variables must be relocated
- * explicitly. The procedure reloc_offset() returns the address
- * we're currently running at minus the address we were linked at.
- * (Note that strings count as static variables.)
- *
- * Because OF may have mapped I/O devices into the area starting at
- * KERNELBASE, particularly on CHRP machines, we can't safely call
- * OF once the kernel has been mapped to KERNELBASE. Therefore all
- * OF calls should be done within prom_init(), and prom_init()
- * and all routines called within it must be careful to relocate
- * references as necessary.
- */
-#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
-#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
-#define RELOC(x) (*PTRRELOC(&(x)))
#endif /* _PPC_BOOTINFO_H */
#endif /* __KERNEL__ */
-
-
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 13460cb60575..c7d11722aa20 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.highmem.h 1.7 05/17/01 18:14:24 cort
+ * BK Id: SCCS/s.highmem.h 1.10 06/28/01 15:50:17 paulus
*/
/*
* highmem.h: virtual kernel memory mappings for high memory
@@ -94,7 +94,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
BUG();
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
- flush_hash_page(0, vaddr);
+ flush_tlb_page(0, vaddr);
return (void*) vaddr;
}
@@ -116,7 +116,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
- flush_hash_page(0, vaddr);
+ flush_tlb_page(0, vaddr);
#endif
}
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index 422757477089..c3a0a44412d6 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.machdep.h 1.11 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.machdep.h 1.14 06/28/01 16:13:50 paulus
*/
#ifdef __KERNEL__
#ifndef _PPC_MACHDEP_H
@@ -67,26 +67,19 @@ struct machdep_calls {
* optional PCI "hooks"
*/
- /* Called after scanning the bus, before allocating
- * resources
- */
+ /* Called after scanning the bus, before allocating resources */
void (*pcibios_fixup)(void);
- /* Called for each PCI bus in the system
- * when it's probed
- */
+ /* Called for each PCI bus in the system when it's probed */
void (*pcibios_fixup_bus)(struct pci_bus *);
-
- /* Called when pci_enable_device() is called (initial=0) or
- * when a device with no assigned resource is found (initial=1).
- * Returns 0 to allow assignement/enabling of the device
- */
+
+ /* Called when pci_enable_device() is called (initial=0) or
+ * when a device with no assigned resource is found (initial=1).
+ * Returns 0 to allow assignment/enabling of the device. */
int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
- /* Called at then very end of pcibios_init()
- */
+ /* Called at then very end of pcibios_init() */
void (*pcibios_after_init)(void);
-
/* this is for modules, since _machine can be a define -- Cort */
int ppc_machine;
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
index bdc358194888..d89f9d16c96c 100644
--- a/include/asm-ppc/mmu.h
+++ b/include/asm-ppc/mmu.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.mmu.h 1.7 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.mmu.h 1.10 06/28/01 15:50:17 paulus
*/
/*
* PowerPC memory management structures
@@ -115,34 +115,6 @@ typedef struct _P601_BAT {
P601_BATL batl; /* Lower register */
} P601_BAT;
-/*
- * Simulated two-level MMU. This structure is used by the kernel
- * to keep track of MMU mappings and is used to update/maintain
- * the hardware HASH table which is really a cache of mappings.
- *
- * The simulated structures mimic the hardware available on other
- * platforms, notably the 80x86 and 680x0.
- */
-
-typedef struct _pte {
- unsigned long page_num:20;
- unsigned long flags:12; /* Page flags (some unused bits) */
-} pte;
-
-#define PD_SHIFT (10+12) /* Page directory */
-#define PD_MASK 0x02FF
-#define PT_SHIFT (12) /* Page Table */
-#define PT_MASK 0x02FF
-#define PG_SHIFT (12) /* Page Entry */
-
-
-/* MMU context */
-
-typedef struct _MMU_context {
- SEGREG segs[16]; /* Segment registers */
- pte **pmap; /* Two-level page-map structure */
-} MMU_context;
-
extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
extern void _tlbia(void); /* invalidate all TLB entries */
@@ -167,22 +139,6 @@ extern void _tlbia(void); /* invalidate all TLB entries */
#define BPP_RX 0x01 /* Read only */
#define BPP_RW 0x02 /* Read/write */
-/* Used to set up SDR1 register */
-#define HASH_TABLE_SIZE_64K 0x00010000
-#define HASH_TABLE_SIZE_128K 0x00020000
-#define HASH_TABLE_SIZE_256K 0x00040000
-#define HASH_TABLE_SIZE_512K 0x00080000
-#define HASH_TABLE_SIZE_1M 0x00100000
-#define HASH_TABLE_SIZE_2M 0x00200000
-#define HASH_TABLE_SIZE_4M 0x00400000
-#define HASH_TABLE_MASK_64K 0x000
-#define HASH_TABLE_MASK_128K 0x001
-#define HASH_TABLE_MASK_256K 0x003
-#define HASH_TABLE_MASK_512K 0x007
-#define HASH_TABLE_MASK_1M 0x00F
-#define HASH_TABLE_MASK_2M 0x01F
-#define HASH_TABLE_MASK_4M 0x03F
-
/* Control/status registers for the MPC8xx.
* A write operation to these registers causes serialized access.
* During software tablewalk, the registers used perform mask/shift-add
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index da57f426538f..ce8f81fd4708 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -1,18 +1,40 @@
/*
- * BK Id: SCCS/s.mmu_context.h 1.9 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.mmu_context.h 1.12 06/28/01 15:50:17 paulus
*/
-#include <linux/config.h>
-
#ifdef __KERNEL__
#ifndef __PPC_MMU_CONTEXT_H
#define __PPC_MMU_CONTEXT_H
-/* the way contexts are handled on the ppc they are vsid's and
- don't need any special treatment right now.
- perhaps I can defer flushing the tlb by keeping a list of
- zombie vsid/context's and handling that through destroy_context
- later -- Cort
+#include <linux/config.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/mmu.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed correspondly.
+ */
+#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ & 0xffffff)
+/*
The MPC8xx has only 16 contexts. We rotate through them on each
task switch. A better way would be to keep track of tasks that
own contexts, and implement an LRU usage. That way very active
@@ -32,38 +54,22 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
+
#ifdef CONFIG_8xx
#define NO_CONTEXT 16
#define LAST_CONTEXT 15
-#define BASE_CONTEXT (-1)
-#define MUNGE_CONTEXT(n) (n)
-#define flush_hash_segments(X, Y) do { } while (0)
#elif CONFIG_4xx
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
-#define BASE_CONTEXT (0)
-#define MUNGE_CONTEXT(n) (n)
-#define flush_hash_segments(X, Y) do { } while (0)
#else
/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT 0
-#define BASE_CONTEXT (0)
-#define LAST_CONTEXT 0xfffff
-
-/*
- * Allocating context numbers this way tends to spread out
- * the entries in the hash table better than a simple linear
- * allocation.
- */
-#define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT)
+#define NO_CONTEXT ((mm_context_t) -1)
+#define LAST_CONTEXT 32767
#endif
-extern atomic_t next_mmu_context;
-extern void mmu_context_overflow(void);
-
/*
* Set the current MMU context.
* On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
@@ -73,19 +79,58 @@ extern void mmu_context_overflow(void);
* and once I implement a real TLB context manager this will disappear.
* The PGD is ignored on other processors. - Dan
*/
-extern void set_context(int context, void *pgd);
+extern void set_context(mm_context_t context);
/*
- * Get a new mmu context for task tsk if necessary.
+ * Bitmap of contexts in use.
+ * The size of this bitmap is LAST_CONTEXT + 1 bits.
*/
-#define get_mmu_context(mm) \
-do { \
- if (mm->context == NO_CONTEXT) { \
- if (atomic_read(&next_mmu_context) == LAST_CONTEXT) \
- mmu_context_overflow(); \
- mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));\
- } \
-} while (0)
+extern unsigned long context_map[(LAST_CONTEXT+1) / (8*sizeof(unsigned long))];
+
+/*
+ * This caches the next context number that we expect to be free.
+ * Its use is an optimization only, we can't rely on this context
+ * number to be free, but it usually will be.
+ */
+extern mm_context_t next_mmu_context;
+
+/*
+ * If we don't have sufficient contexts to give one to every task
+ * that could be in the system, we need to be able to steal contexts.
+ * These variables support that.
+ */
+#if LAST_CONTEXT < 30000
+#define FEW_CONTEXTS 1
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+#endif
+
+/*
+ * Get a new mmu context for the address space described by `mm'.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+ mm_context_t ctx;
+
+ if (mm->context != NO_CONTEXT)
+ return;
+#ifdef FEW_CONTEXTS
+ while (atomic_dec_if_positive(&nr_free_contexts) < 0)
+ steal_context();
+#endif
+ ctx = next_mmu_context;
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context = ctx;
+#ifdef FEW_CONTEXTS
+ context_mm[ctx] = mm;
+#endif
+}
/*
* Set up the context for a new address space.
@@ -95,14 +140,23 @@ do { \
/*
* We're finished using the context for an address space.
*/
-#define destroy_context(mm) do { } while (0)
+static inline void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context != NO_CONTEXT) {
+ clear_bit(mm->context, context_map);
+ mm->context = NO_CONTEXT;
+#ifdef FEW_CONTEXTS
+ atomic_inc(&nr_free_contexts);
+#endif
+ }
+}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu)
{
tsk->thread.pgdir = next->pgd;
get_mmu_context(next);
- set_context(next->context, next->pgd);
+ set_context(next->context);
}
/*
@@ -113,16 +167,8 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
{
current->thread.pgdir = mm->pgd;
get_mmu_context(mm);
- set_context(mm->context, mm->pgd);
+ set_context(mm->context);
}
-/*
- * compute the vsid from the context and segment
- * segments > 7 are kernel segments and their
- * vsid is the segment -- Cort
- */
-#define VSID_FROM_CONTEXT(segment,context) \
- ((segment < 8) ? ((segment) | (context)<<4) : (segment))
-
-#endif
+#endif /* __PPC_MMU_CONTEXT_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 686ad149d71b..94f898f6da18 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.pgtable.h 1.9 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.pgtable.h 1.12 06/28/01 15:50:17 paulus
*/
#ifdef __KERNEL__
#ifndef _PPC_PGTABLE_H
@@ -20,8 +20,8 @@ extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
-static inline void flush_hash_page(unsigned context, unsigned long va)
- { }
+#define update_mmu_cache(vma, addr, pte) do { } while (0)
+
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia" : : )
@@ -35,9 +35,9 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
-static inline void flush_hash_page(unsigned context, unsigned long va)
- { }
-#else
+#define update_mmu_cache(vma, addr, pte) do { } while (0)
+
+#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
extern void local_flush_tlb_all(void);
@@ -45,6 +45,15 @@ extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to put a corresponding HPTE into the hash table
+ * ahead of time, instead of waiting for the inevitable extra
+ * hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
#define flush_tlb_all local_flush_tlb_all
@@ -52,16 +61,20 @@ extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
+/*
+ * This is called in munmap when we have freed up some page-table
+ * pages. We don't need to do anything here, there's nothing special
+ * about our page-table pages. -- paulus
+ */
static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end)
{
- /* PPC has hw page tables. */
}
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
- * addressed.
+ * addressed. -- paulus
* Also, when SMP we use the coherency (M) bit of the
* BATs and PTEs. -- Cort
*/
@@ -88,12 +101,12 @@ extern unsigned long ioremap_bot, ioremap_base;
* the virtual to physical address mapping.
*
* We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much like
- * that used by the i386, for the sake of the Linux memory management code.
- * Low-level assembler code in head.S (procedure hash_page) is responsible
- * for extracting ptes from the tree and putting them into the hash table
- * when necessary, and updating the accessed and modified bits in the
- * page table tree.
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hashtable.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
*/
/*
@@ -189,12 +202,11 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_EXEC 0x020 /* software: i-cache coherency required */
#define _PAGE_PRESENT 0x040 /* software: PTE contains a translation */
#define _PAGE_DIRTY 0x100 /* C: page changed */
#define _PAGE_RW 0x200 /* Writes permitted */
#define _PAGE_ACCESSED 0x400 /* R: page referenced */
-#define _PAGE_HWWRITE 0x800 /* software: _PAGE_RW & _PAGE_DIRTY */
-#define _PAGE_SHARED 0
#elif defined(CONFIG_8xx)
/* Definitions for 8xx embedded chips. */
@@ -205,48 +217,62 @@ extern unsigned long ioremap_bot, ioremap_base;
/* These five software bits must be masked out when the entry is loaded
* into the TLB.
*/
-#define _PAGE_DIRTY 0x0008 /* software: page changed */
+#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
#define _PAGE_GUARDED 0x0010 /* software: guarded access */
#define _PAGE_WRITETHRU 0x0020 /* software: use writethrough cache */
#define _PAGE_RW 0x0040 /* software: user write access allowed */
#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
-#define _PAGE_HWWRITE 0x0100 /* C: page changed (write protect) */
-#define _PAGE_USER 0x0800 /* One of the PP bits, the other must be 0 */
+#define _PAGE_DIRTY 0x0100 /* C: page changed (write protect) */
+#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
#else /* CONFIG_6xx */
/* Definitions for 60x, 740/750, etc. */
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_USER 0x002 /* matches one of the PP bits */
-#define _PAGE_RW 0x004 /* software: user write access allowed */
-#define _PAGE_GUARDED 0x008
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
+#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#endif
+
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE 0
+#endif
+#ifndef _PAGE_SHARED
#define _PAGE_SHARED 0
#endif
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#ifdef CONFIG_SMP
-#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT
-#else
+/*
+ * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
+ * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
+ * to have it in the Linux PTE, and in fact the bit could be reused for
+ * another purpose. -- paulus.
+ */
#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
-#endif
-#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
+#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED
+#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
- _PAGE_SHARED)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
-#define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
- _PAGE_NO_CACHE )
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
+#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
/*
* The PowerPC can only do execute protection on a segment (256MB) basis,
@@ -255,22 +281,22 @@ extern unsigned long ioremap_bot, ioremap_base;
* This is the closest we can get..
*/
#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
+#define __P001 PAGE_READONLY_X
#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
+#define __P011 PAGE_COPY_X
#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
+#define __P111 PAGE_COPY_X
#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
+#define __S001 PAGE_READONLY_X
#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
#ifndef __ASSEMBLY__
/*
@@ -280,33 +306,11 @@ extern unsigned long ioremap_bot, ioremap_base;
extern unsigned long empty_zero_page[1024];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern pte_t __bad_page(void);
-extern pte_t * __bad_pagetable(void);
-
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
#endif /* __ASSEMBLY__ */
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 */
-/* 64-bit machines, beware! SRB. */
-#define SIZEOF_PTR_LOG2 2
-
-#define pte_none(pte) (!pte_val(pte))
+#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HASHPTE) == 0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-#define pte_clear(ptep) do { pte_val(*(ptep)) = 0; } while (0)
+#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) != 0)
@@ -316,8 +320,7 @@ extern pte_t * __bad_pagetable(void);
/*
* Permanent address of a page.
*/
-#define page_address(page) ((page)->virtual)
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+#define page_address(page) ((page)->virtual)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#ifndef __ASSEMBLY__
@@ -340,7 +343,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
*/
static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
@@ -349,42 +352,26 @@ static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACH
static inline pte_t pte_rdprotect(pte_t pte) {
pte_val(pte) &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_USER; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+ pte_val(pte) &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_exprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkread(pte_t pte) {
pte_val(pte) |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) {
- pte_val(pte) |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)
-{
- pte_val(pte) |= _PAGE_RW;
- if (pte_val(pte) & _PAGE_DIRTY)
- pte_val(pte) |= _PAGE_HWWRITE;
- return pte;
-}
-static inline pte_t pte_mkdirty(pte_t pte)
-{
- pte_val(pte) |= _PAGE_DIRTY;
- if (pte_val(pte) & _PAGE_RW)
- pte_val(pte) |= _PAGE_HWWRITE;
- return pte;
-}
+ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-/* Certain architectures need to do special things when pte's
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -421,11 +408,11 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
{
unsigned long old, tmp;
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- andc %1,%0,%4 \n\
- or %1,%1,%5 \n\
- stwcx. %1,0,%3 \n\
+ __asm__ __volatile__("\
+1: lwarx %0,0,%3\n\
+ andc %1,%0,%4\n\
+ or %1,%1,%5\n\
+ stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
@@ -433,6 +420,12 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
return old;
}
+/*
+ * Writing a new value into the PTE doesn't disturb the state of the
+ * _PAGE_HASHPTE bit, on those machines which use an MMU hash table.
+ */
+extern void set_pte(pte_t *ptep, pte_t pte);
+
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
@@ -440,36 +433,25 @@ static inline int ptep_test_and_clear_young(pte_t *ptep)
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
- return (pte_update(ptep, _PAGE_DIRTY | _PAGE_HWWRITE, 0)
- & _PAGE_DIRTY) != 0;
+ return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
}
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
- return __pte(pte_update(ptep, ~0UL, 0));
+ return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
- pte_update(ptep, _PAGE_RW | _PAGE_HWWRITE, 0);
+ pte_update(ptep, _PAGE_RW, 0);
}
static inline void ptep_mkdirty(pte_t *ptep)
{
- /*
- * N.B. this doesn't set the _PAGE_HWWRITE bit in the case
- * where _PAGE_RW is set and _PAGE_DIRTY was clear. This
- * doesn't matter; all it will mean is that if the next call
- * to hash_page for this page is for a read, it will put a
- * readonly HPTE into the hash table rather than a R/W HPTE.
- * A call to hash_page for a write to this page will set
- * _PAGE_HWWRITE and put a R/W HPTE into the hash table.
- * -- paulus.
- */
pte_update(ptep, 0, _PAGE_DIRTY);
}
-#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pmd_page(pmd) (pmd_val(pmd))
@@ -496,25 +478,25 @@ extern pgd_t swapper_pg_dir[1024];
extern void paging_init(void);
/*
- * Page tables may have changed. We don't need to do anything here
- * as entries are faulted into the hash table by the low-level
- * data/instruction access exception handlers.
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
*/
-#define update_mmu_cache(vma, addr, pte) do { } while (0)
+extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
/*
- * When flushing the tlb entry for a page, we also need to flush the
- * hash table entry. flush_hash_page is assembler (for speed) in head.S.
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
+ * (if used). -- paulus
*/
-extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
-extern void flush_hash_page(unsigned context, unsigned long va);
-
-/* Encode and de-code a swap entry */
-#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
-#define SWP_OFFSET(entry) ((entry).val >> 8)
-#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define swp_entry_to_pte(x) ((pte_t) { (x).val })
+#define SWP_TYPE(entry) ((entry).val & 0x3f)
+#define SWP_OFFSET(entry) ((entry).val >> 6)
+#define SWP_ENTRY(type, offset) ((swp_entry_t) { (type) | ((offset) << 6) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
/* CONFIG_APUS */
/* For virtual address to physical address conversion */
@@ -545,7 +527,6 @@ extern void kernel_set_cachemode (unsigned long address, unsigned long size,
unsigned int cmode);
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range
diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h
index 3bd0b2a7e345..4c18e818c3a3 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-ppc/processor.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.processor.h 1.19 05/18/01 08:18:10 patch
+ * BK Id: SCCS/s.processor.h 1.24 06/15/01 13:56:56 paulus
*/
#ifdef __KERNEL__
#ifndef __ASM_PPC_PROCESSOR_H
@@ -445,6 +445,7 @@
#define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
+#define PVR_STB03XXX 0x40310000
#define PVR_601 0x00010000
#define PVR_602 0x00050000
#define PVR_603 0x00030000
@@ -459,6 +460,8 @@
#define PVR_750 PVR_740
#define PVR_740P 0x10080000
#define PVR_750P PVR_740P
+#define PVR_7400 0x000C0000
+#define PVR_7410 0x800C0000
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
@@ -469,7 +472,6 @@
#define PVR_823 PVR_821
#define PVR_850 PVR_821
#define PVR_860 PVR_821
-#define PVR_7400 0x000C0000
#define PVR_8240 0x00810100
#define PVR_8260 PVR_8240
diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h
index 11f57aadb4d7..43e3f600341a 100644
--- a/include/asm-ppc/prom.h
+++ b/include/asm-ppc/prom.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.prom.h 1.11 05/18/01 08:18:10 patch
+ * BK Id: SCCS/s.prom.h 1.14 06/13/01 15:28:43 paulus
*/
/*
* Definitions for talking to the Open Firmware PROM on
@@ -45,6 +45,9 @@ struct property {
struct property *next;
};
+/*
+ * Note: don't change this structure for now or you'll break BootX !
+ */
struct device_node {
char *name;
char *type;
@@ -60,10 +63,6 @@ struct device_node {
struct device_node *sibling;
struct device_node *next; /* next device of same type */
struct device_node *allnext; /* next in list of all nodes */
-#if 0 /* Don't change this structure for now or you'll break BootX ! */
- int n_addr_cells;
- int n_size_cells;
-#endif
};
struct prom_args;
@@ -102,5 +101,23 @@ extern void map_bootx_text(void);
extern void bootx_update_display(unsigned long phys, int width, int height,
int depth, int pitch);
+/*
+ * When we call back to the Open Firmware client interface, we usually
+ * have to do that before the kernel is relocated to its final location
+ * (this is because we can't use OF after we have overwritten the
+ * exception vectors with our exception handlers). These macros assist
+ * in performing the address calculations that we need to do to access
+ * data when the kernel is running at an address that is different from
+ * the address that the kernel is linked at. The reloc_offset() function
+ * returns the difference between these two addresses and the macros
+ * simplify the process of adding or subtracting this offset to/from
+ * pointer values. See arch/ppc/kernel/prom.c for how these are used.
+ */
+extern unsigned long reloc_offset(void);
+
+#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
+#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
+#define RELOC(x) (*PTRRELOC(&(x)))
+
#endif /* _PPC_PROM_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/time.h b/include/asm-ppc/time.h
index 2c9f1026882e..2a774bc50428 100644
--- a/include/asm-ppc/time.h
+++ b/include/asm-ppc/time.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.time.h 1.10 05/17/01 18:14:26 cort
+ * BK Id: SCCS/s.time.h 1.13 06/27/01 14:49:58 trini
*/
/*
* Common time prototypes and such for all ppc machines.
@@ -24,6 +24,8 @@ extern unsigned long disarm_decr[NR_CPUS];
extern void to_tm(int tim, struct rtc_time * tm);
extern time_t last_rtc_update;
+extern void set_dec_cpu6(unsigned int val);
+
int via_calibrate_decr(void);
/* Accessor functions for the decrementer register.
diff --git a/include/asm-sh/hitachi_se.h b/include/asm-sh/hitachi_se.h
index 05b701e1f1c6..8aafb71a47e1 100644
--- a/include/asm-sh/hitachi_se.h
+++ b/include/asm-sh/hitachi_se.h
@@ -38,7 +38,7 @@
#define PA_LED 0xb0c00000 /* LED */
#define PA_BCR 0xb1400000 /* FPGA */
-#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controler */
+#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controller */
#define PA_MRSHPC_MW1 0xb8400000 /* MR-SHPC-01 memory window base */
#define PA_MRSHPC_MW2 0xb8500000 /* MR-SHPC-01 attribute window base */
#define PA_MRSHPC_IO 0xb8600000 /* MR-SHPC-01 I/O window base */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index ac828721c7e0..ab7958844f44 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -1,7 +1,7 @@
/* hardirq.h: 32-bit Sparc hard IRQ support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-2000 Anton Blanchard (anton@linuxcare.com)
+ * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
#ifndef __SPARC_HARDIRQ_H
diff --git a/include/asm-sparc/vaddrs.h b/include/asm-sparc/vaddrs.h
index 704f79003d9f..e2f6fbe91fc8 100644
--- a/include/asm-sparc/vaddrs.h
+++ b/include/asm-sparc/vaddrs.h
@@ -9,7 +9,7 @@
* which important things will be mapped.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
+ * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
*/
#define SRMMU_MAXMEM 0x0c000000
diff --git a/include/asm-sparc64/starfire.h b/include/asm-sparc64/starfire.h
index d59bda0c3a9d..a738fb363a40 100644
--- a/include/asm-sparc64/starfire.h
+++ b/include/asm-sparc64/starfire.h
@@ -1,7 +1,7 @@
/* $Id: starfire.h,v 1.1 2000/09/21 06:18:53 anton Exp $
* starfire.h: Group all starfire specific code together.
*
- * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
+ * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC64_STARFIRE_H
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 221be1c68bca..63619c2eb6f6 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -45,7 +45,7 @@ enum chipset_type {
INTEL_BX,
INTEL_GX,
INTEL_I810,
- INTEL_I815,
+ INTEL_I815,
INTEL_I840,
INTEL_I850,
VIA_GENERIC,
@@ -65,7 +65,10 @@ enum chipset_type {
ALI_M1641,
ALI_M1647,
ALI_M1651,
- ALI_GENERIC
+ ALI_GENERIC,
+ SVWRKS_HE,
+ SVWRKS_LE,
+ SVWRKS_GENERIC
};
typedef struct _agp_version {
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index ef8931cd7b5f..2b14cb90f604 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -22,6 +22,13 @@
max cell rate: 353207.547 cells/sec */
#define ATM_25_PCR ((25600000/8-8000)/54)
/* 25 Mbps ATM cell rate (59111) */
+#define ATM_OC12_PCR (622080000/1080*1040/8/53)
+ /* OC12 link rate: 622080000 bps
+ SONET overhead: /1080*1040
+ bits per cell: /8/53
+ max cell rate: 1412830.188 cells/sec */
+#define ATM_DS3_PCR (8000*12)
+ /* DS3: 12 cells in a 125 usec time slot */
#define ATM_PDU_OVHD 0 /* number of bytes to charge against buffer
quota per PDU */
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index bb57005b1e07..18772903d770 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -1,4 +1,4 @@
-/* $Id: isdn.h,v 1.111.6.6 2001/05/17 21:15:34 kai Exp $
+/* $Id: isdn.h,v 1.111.6.7 2001/06/30 19:47:51 kai Exp $
* Main header for the Linux ISDN subsystem (linklevel).
*
@@ -27,6 +27,15 @@
#include <linux/ioctl.h>
+#ifdef CONFIG_COBALT_MICRO_SERVER
+/* Save memory */
+#define ISDN_MAX_DRIVERS 2
+#define ISDN_MAX_CHANNELS 8
+#else
+#define ISDN_MAX_DRIVERS 32
+#define ISDN_MAX_CHANNELS 64
+#endif
+
/* New ioctl-codes */
#define IIOCNETAIF _IO('I',1)
#define IIOCNETDIF _IO('I',2)
@@ -181,14 +190,6 @@ typedef struct {
* the correspondent code in isdn.c
*/
-#ifdef CONFIG_COBALT_MICRO_SERVER
-/* Save memory */
-#define ISDN_MAX_DRIVERS 2
-#define ISDN_MAX_CHANNELS 8
-#else
-#define ISDN_MAX_DRIVERS 32
-#define ISDN_MAX_CHANNELS 64
-#endif
#define ISDN_MINOR_B 0
#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1)
#define ISDN_MINOR_CTRL 64
diff --git a/include/linux/isdn/tpam.h b/include/linux/isdn/tpam.h
new file mode 100644
index 000000000000..9f65bea49d11
--- /dev/null
+++ b/include/linux/isdn/tpam.h
@@ -0,0 +1,56 @@
+/* $Id: tpam.h,v 1.1.2.1 2001/06/08 08:23:46 kai Exp $
+ *
+ * Turbo PAM ISDN driver for Linux. (Kernel Driver)
+ *
+ * Copyright 2001 Stelian Pop <stelian.pop@fr.alcove.com>, Alcôve
+ *
+ * For all support questions please contact: <support@auvertech.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _TPAM_H_
+#define _TPAM_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/* IOCTL commands */
+#define TPAM_CMD_DSPLOAD 0x0001
+#define TPAM_CMD_DSPSAVE 0x0002
+#define TPAM_CMD_DSPRUN 0x0003
+#define TPAM_CMD_LOOPMODEON 0x0004
+#define TPAM_CMD_LOOPMODEOFF 0x0005
+
+/* addresses of debug information zones on board */
+#define TPAM_TRAPAUDIT_REGISTER 0x005493e4
+#define TPAM_NCOAUDIT_REGISTER 0x00500000
+#define TPAM_MSGAUDIT_REGISTER 0x008E30F0
+
+/* length of debug information zones on board */
+#define TPAM_TRAPAUDIT_LENGTH 10000
+#define TPAM_NCOAUDIT_LENGTH 300000
+#define TPAM_NCOAUDIT_COUNT 30
+#define TPAM_MSGAUDIT_LENGTH 60000
+
+/* IOCTL load/save parameter */
+typedef struct tpam_dsp_ioctl {
+ __u32 address; /* address to load/save data */
+ __u32 data_len; /* size of data to be loaded/saved */
+ __u8 data[0]; /* data */
+} tpam_dsp_ioctl;
+
+#endif /* _TPAM_H_ */
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index f6239bd176d9..cca81b6b50a5 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -1,4 +1,4 @@
-/* $Id: isdnif.h,v 1.37.6.2 2001/05/17 21:15:34 kai Exp $
+/* $Id: isdnif.h,v 1.37.6.4 2001/06/09 15:14:19 kai Exp $
* Linux ISDN subsystem
*
@@ -433,6 +433,7 @@ typedef struct {
#ifdef CONFIG_ISDN_TTY_FAX
T30_s *fax; /* Pointer to ttys fax struct */
#endif
+ ulong userdata; /* User Data */
} parm;
} isdn_ctrl;
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index f183f57a5e87..ed5fbc59d247 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -81,7 +81,7 @@ void nfsd_racache_shutdown(void);
int nfsd_lookup(struct svc_rqst *, struct svc_fh *,
const char *, int, struct svc_fh *);
int nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- struct iattr *);
+ struct iattr *, int, time_t);
int nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
int type, dev_t rdev, struct svc_fh *res);
@@ -143,6 +143,7 @@ void nfsd_lockd_unexport(struct svc_client *);
#define nfserr_noent __constant_htonl(NFSERR_NOENT)
#define nfserr_io __constant_htonl(NFSERR_IO)
#define nfserr_nxio __constant_htonl(NFSERR_NXIO)
+#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN)
#define nfserr_acces __constant_htonl(NFSERR_ACCES)
#define nfserr_exist __constant_htonl(NFSERR_EXIST)
#define nfserr_xdev __constant_htonl(NFSERR_XDEV)
@@ -160,9 +161,10 @@ void nfsd_lockd_unexport(struct svc_client *);
#define nfserr_dquot __constant_htonl(NFSERR_DQUOT)
#define nfserr_stale __constant_htonl(NFSERR_STALE)
#define nfserr_remote __constant_htonl(NFSERR_REMOTE)
+#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH)
#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE)
-#define nfserr_notsync __constant_htonl(NFSERR_NOTSYNC)
-#define nfserr_badcookie __constant_htonl(NFSERR_BADCOOKIE)
+#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC)
+#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE)
#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP)
#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL)
#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT)
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index dc2dae68d1c7..2149cff1db25 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -199,7 +199,7 @@ inline static char * SVCFH_fmt(struct svc_fh *fhp)
* Function prototypes
*/
u32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
-int fh_compose(struct svc_fh *, struct svc_export *, struct dentry *);
+int fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
int fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3b4ae41a9f14..9fc5ccf43a5a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -641,6 +641,8 @@ static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_module_init(struct pci_driver *drv) { return -ENODEV; }
+static inline int pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) { return -EIO; }
+static inline int pci_set_power_state(struct pci_dev *dev, int state) { return 0; }
static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;}
static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
static inline void pci_unregister_driver(struct pci_driver *drv) { }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bb42ad226df2..9575ef38dee0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -211,6 +211,7 @@
#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050
#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051
#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052
+#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452
#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053
#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054
#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055
@@ -326,6 +327,7 @@
#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
#define PCI_DEVICE_ID_IBM_MPIC 0x0046
#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+#define PCI_DEVICE_ID_IBM_CHUKAR 0x0096
#define PCI_DEVICE_ID_IBM_405GP 0x0156
#define PCI_DEVICE_ID_IBM_MPIC_2 0xffff
@@ -345,6 +347,9 @@
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006
+#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007
+#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E
+#define PCI_DEVICE_ID_AMD_FE_GATE_700F 0x700F
#define PCI_DEVICE_ID_AMD_COBRA_7400 0x7400
#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401
#define PCI_DEVICE_ID_AMD_COBRA_7403 0x7403
@@ -353,6 +358,10 @@
#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409
#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B
#define PCI_DEVICE_ID_AMD_VIPER_740C 0x740C
+#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410
+#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411
+#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413
+#define PCI_DEVICE_ID_AMD_VIPER_7414 0x7414
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -403,6 +412,7 @@
#define PCI_VENDOR_ID_NEC 0x1033
#define PCI_DEVICE_ID_NEC_PCX2 0x0046
#define PCI_DEVICE_ID_NEC_NILE4 0x005a
+#define PCI_DEVICE_ID_NEC_VRC5476 0x009b
#define PCI_VENDOR_ID_FD 0x1036
#define PCI_DEVICE_ID_FD_36C70 0x0000
@@ -417,15 +427,20 @@
#define PCI_DEVICE_ID_SI_501 0x0406
#define PCI_DEVICE_ID_SI_496 0x0496
#define PCI_DEVICE_ID_SI_300 0x0300
+#define PCI_DEVICE_ID_SI_315H 0x0310
+#define PCI_DEVICE_ID_SI_315 0x0315
#define PCI_DEVICE_ID_SI_530 0x0530
#define PCI_DEVICE_ID_SI_540 0x0540
+#define PCI_DEVICE_ID_SI_550 0x0550
#define PCI_DEVICE_ID_SI_540_VGA 0x5300
+#define PCI_DEVICE_ID_SI_550_VGA 0x5315
#define PCI_DEVICE_ID_SI_601 0x0601
#define PCI_DEVICE_ID_SI_620 0x0620
#define PCI_DEVICE_ID_SI_630 0x0630
-#define PCI_DEVICE_ID_SI_730 0x0730
+#define PCI_DEVICE_ID_SI_730 0x0730
#define PCI_DEVICE_ID_SI_630_VGA 0x6300
#define PCI_DEVICE_ID_SI_730_VGA 0x7300
+#define PCI_DEVICE_ID_SI_900 0x0900
#define PCI_DEVICE_ID_SI_5107 0x5107
#define PCI_DEVICE_ID_SI_5300 0x5300
#define PCI_DEVICE_ID_SI_5511 0x5511
@@ -439,6 +454,7 @@
#define PCI_DEVICE_ID_SI_6306 0x6306
#define PCI_DEVICE_ID_SI_6326 0x6326
#define PCI_DEVICE_ID_SI_7001 0x7001
+#define PCI_DEVICE_ID_SI_7016 0x7016
#define PCI_VENDOR_ID_HP 0x103c
#define PCI_DEVICE_ID_HP_DONNER_GFX 0x1008
@@ -508,6 +524,8 @@
#define PCI_DEVICE_ID_TI_1251A 0xac1d
#define PCI_DEVICE_ID_TI_1211 0xac1e
#define PCI_DEVICE_ID_TI_1251B 0xac1f
+#define PCI_DEVICE_ID_TI_4410 0xac41
+#define PCI_DEVICE_ID_TI_4451 0xac42
#define PCI_DEVICE_ID_TI_1420 0xac51
#define PCI_VENDOR_ID_SONY 0x104d
@@ -544,6 +562,7 @@
#define PCI_DEVICE_ID_PROMISE_20267 0x4d30
#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_20268 0x4d68
#define PCI_DEVICE_ID_PROMISE_5300 0x5300
#define PCI_VENDOR_ID_N9 0x105d
@@ -800,6 +819,8 @@
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_8849 0x8849
+#define PCI_DEVICE_ID_IMS_TT128 0x9128
+#define PCI_DEVICE_ID_IMS_TT3D 0x9135
#define PCI_VENDOR_ID_TEKRAM2 0x10e1
#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
@@ -825,6 +846,9 @@
#define PCI_DEVICE_ID_REALTEK_8129 0x8129
#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+#define PCI_VENDOR_ID_XILINX 0x10ee
+#define PCI_DEVICE_ID_TURBOPAM 0x4020
+
#define PCI_VENDOR_ID_TRUEVISION 0x10fa
#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
@@ -1003,15 +1027,18 @@
#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
-#define PCI_VENDOR_ID_SERVERWORKS 0x1166
-#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
-#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
-#define PCI_DEVICE_ID_SERVERWORKS_CIOB30 0x0010
-#define PCI_DEVICE_ID_SERVERWORKS_CMIC_HE 0x0011
-#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
-#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
+#define PCI_VENDOR_ID_SERVERWORKS 0x1166
+#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
+#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
+#define PCI_DEVICE_ID_SERVERWORKS_CIOB30 0x0010
+#define PCI_DEVICE_ID_SERVERWORKS_CMIC_HE 0x0011
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
#define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5ISA 0x0230
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
@@ -1189,6 +1216,9 @@
#define PCI_VENDOR_ID_ROCKWELL 0x127A
+#define PCI_VENDOR_ID_ITE 0x1283
+#define PCI_DEVICE_ID_ITE_IT8172G 0x8172
+
/* formerly Platform Tech */
#define PCI_VENDOR_ID_ESS_OLD 0x1285
#define PCI_DEVICE_ID_ESS_ESS0100 0x0100
@@ -1299,6 +1329,9 @@
#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
#define PCI_SUBDEVICE_ID_HYPERCOPE_PLEXUS 0x0109
+#define PCI_VENDOR_ID_KAWASAKI 0x136b
+#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
+
#define PCI_VENDOR_ID_LMC 0x1376
#define PCI_DEVICE_ID_LMC_HSSI 0x0003
#define PCI_DEVICE_ID_LMC_DS3 0x0004
@@ -1372,6 +1405,8 @@
#define PCI_DEVICE_ID_AIRONET_4500 0x4800 // drivers/net/aironet4500_card.c
#define PCI_VENDOR_ID_TITAN 0x14D2
+#define PCI_DEVICE_ID_TITAN_110L 0x8011
+#define PCI_DEVICE_ID_TITAN_210L 0x8021
#define PCI_DEVICE_ID_TITAN_100 0xA001
#define PCI_DEVICE_ID_TITAN_200 0xA005
#define PCI_DEVICE_ID_TITAN_400 0xA003
@@ -1592,6 +1627,10 @@
#define PCI_VENDOR_ID_HOLTEK 0x9412
#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+#define PCI_VENDOR_ID_NETMOS 0x9710
+#define PCI_DEVICE_ID_NETMOS_9735 0x9735
+#define PCI_DEVICE_ID_NETMOS_9835 0x9835
+
#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
index 4cff4260c939..00d314fb918e 100644
--- a/include/linux/sysv_fs.h
+++ b/include/linux/sysv_fs.h
@@ -7,7 +7,6 @@
/* This code assumes
- - a little endian processor like 386,
- sizeof(short) = 2, sizeof(int) = 4, sizeof(long) = 4,
- alignof(short) = 2, alignof(long) = 4.
*/
@@ -26,27 +25,17 @@
/* Layout on disk */
/* ============== */
-
-/* The block size is sb->sv_block_size which may be smaller than BLOCK_SIZE. */
-
-/* zones (= data allocation units) are blocks */
-
-/* On Coherent FS, 32 bit quantities are stored using (I quote the Coherent
- manual) a "canonical byte ordering". This is the PDP-11 byte ordering:
- x = 2^24 * byte3 + 2^16 * byte2 + 2^8 * byte1 + byte0 is stored
- as { byte2, byte3, byte0, byte1 }. We need conversions.
-*/
-
-typedef u32 coh_ulong;
-
-static inline coh_ulong to_coh_ulong (u32 x)
-{
- return ((x & 0xffff) << 16) | ((x & 0xffff0000) >> 16);
-}
-
-static inline u32 from_coh_ulong (coh_ulong x)
+static inline u32 PDP_swab(u32 x)
{
+#ifdef __LITTLE_ENDIAN
return ((x & 0xffff) << 16) | ((x & 0xffff0000) >> 16);
+#else
+#ifdef __BIG_ENDIAN
+ return ((x & 0xff00ff) << 8) | ((x & 0xff00ff00) >> 8);
+#else
+#error BYTESEX
+#endif
+#endif
}
/* inode numbers are 16 bit */
@@ -103,12 +92,6 @@ struct xenix_super_block {
};
-/* Xenix free list block on disk */
-struct xenix_freelist_chunk {
- u16 fl_nfree; /* number of free blocks in fl_free, <= XENIX_NICFREE] */
- u32 fl_free[XENIX_NICFREE] __packed2__;
-};
-
/* SystemV FS comes in two variants:
* sysv2: System V Release 2 (e.g. Microport), structure elements aligned(2).
* sysv4: System V Release 4 (e.g. Consensys), structure elements aligned(4).
@@ -148,12 +131,6 @@ struct sysv4_super_block {
2 for 1024 byte blocks */
};
-/* SystemV4 free list block on disk */
-struct sysv4_freelist_chunk {
- u16 fl_nfree; /* number of free blocks in fl_free, <= SYSV_NICFREE] */
- u32 fl_free[SYSV_NICFREE];
-};
-
/* SystemV2 super-block data on disk */
struct sysv2_super_block {
u16 s_isize; /* index of first data zone */
@@ -182,10 +159,31 @@ struct sysv2_super_block {
2 for 1024 byte blocks */
};
-/* SystemV2 free list block on disk */
-struct sysv2_freelist_chunk {
- u16 fl_nfree; /* number of free blocks in fl_free, <= SYSV_NICFREE] */
- u32 fl_free[SYSV_NICFREE] __packed2__;
+/* V7 super-block data on disk */
+#define V7_NICINOD 100 /* number of inode cache entries */
+#define V7_NICFREE 50 /* number of free block list chunk entries */
+struct v7_super_block {
+ u16 s_isize; /* index of first data zone */
+ u32 s_fsize __packed2__; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ u16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */
+ u32 s_free[V7_NICFREE]; /* first free block list chunk */
+ /* the cache of free inodes: */
+ u16 s_ninode; /* number of free inodes in s_inode, <= V7_NICINOD */
+ sysv_ino_t s_inode[V7_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux or V7: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ u32 s_time __packed2__; /* time of last super block update */
+ /* the following fields are not maintained by V7: */
+ u32 s_tfree __packed2__; /* total number of free zones */
+ u16 s_tinode; /* total number of free inodes */
+ u16 s_m; /* interleave factor */
+ u16 s_n; /* interleave factor */
+ char s_fname[6]; /* file system name */
+ char s_fpack[6]; /* file system pack name */
};
/* Coherent super-block data on disk */
@@ -193,10 +191,10 @@ struct sysv2_freelist_chunk {
#define COH_NICFREE 64 /* number of free block list chunk entries */
struct coh_super_block {
u16 s_isize; /* index of first data zone */
- coh_ulong s_fsize __packed2__; /* total number of zones of this fs */
+ u32 s_fsize __packed2__; /* total number of zones of this fs */
/* the start of the free block list: */
u16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */
- coh_ulong s_free[COH_NICFREE] __packed2__; /* first free block list chunk */
+ u32 s_free[COH_NICFREE] __packed2__; /* first free block list chunk */
/* the cache of free inodes: */
u16 s_ninode; /* number of free inodes in s_inode, <= COH_NICINOD */
sysv_ino_t s_inode[COH_NICINOD]; /* some free inodes */
@@ -205,8 +203,8 @@ struct coh_super_block {
char s_ilock; /* lock during inode cache manipulation */
char s_fmod; /* super-block modified flag */
char s_ronly; /* flag whether fs is mounted read-only */
- coh_ulong s_time __packed2__; /* time of last super block update */
- coh_ulong s_tfree __packed2__; /* total number of free zones */
+ u32 s_time __packed2__; /* time of last super block update */
+ u32 s_tfree __packed2__; /* total number of free zones */
u16 s_tinode; /* total number of free inodes */
u16 s_interleave_m; /* interleave factor */
u16 s_interleave_n;
@@ -215,13 +213,6 @@ struct coh_super_block {
u32 s_unique; /* zero, not used */
};
-/* Coherent free list block on disk */
-struct coh_freelist_chunk {
- u16 fl_nfree; /* number of free blocks in fl_free, <= COH_NICFREE] */
- u32 fl_free[COH_NICFREE] __packed2__;
-};
-
-
/* SystemV/Coherent inode data on disk */
struct sysv_inode {
@@ -237,8 +228,6 @@ struct sysv_inode {
* then 1 triple indirection block.
* Then maybe a "file generation number" ??
*/
- /* devices */
- dev_t i_rdev;
/* named pipes on Coherent */
struct {
char p_addp[30];
@@ -300,9 +289,12 @@ extern inline unsigned short to_coh_imode(mode_t mode)
}
/* Admissible values for i_nlink: 0.._LINK_MAX */
-#define XENIX_LINK_MAX 126 /* ?? */
-#define SYSV_LINK_MAX 126 /* 127? 251? */
-#define COH_LINK_MAX 10000 /* max number of hard links to an inode */
+enum {
+ XENIX_LINK_MAX = 126, /* ?? */
+ SYSV_LINK_MAX = 126, /* 127? 251? */
+ V7_LINK_MAX = 126, /* ?? */
+ COH_LINK_MAX = 10000,
+};
/* The number of inodes per block is
sb->sv_inodes_per_block = block_size / sizeof(struct sysv_inode) */
@@ -325,12 +317,16 @@ struct sysv_dir_entry {
/* Operations */
/* ========== */
-
/* identify the FS in memory */
-#define FSTYPE_XENIX 1
-#define FSTYPE_SYSV4 2
-#define FSTYPE_SYSV2 3
-#define FSTYPE_COH 4
+enum {
+ FSTYPE_NONE = 0,
+ FSTYPE_XENIX,
+ FSTYPE_SYSV4,
+ FSTYPE_SYSV2,
+ FSTYPE_COH,
+ FSTYPE_V7,
+ FSTYPE_END,
+};
#define SYSV_MAGIC_BASE 0x012FF7B3
@@ -341,55 +337,112 @@ struct sysv_dir_entry {
#ifdef __KERNEL__
-/* sv_get_hash_table(sb,dev,block) is equivalent to get_hash_table(dev,block,block_size) */
-static inline struct buffer_head *
-sv_get_hash_table (struct super_block *sb, kdev_t dev, unsigned int block)
-{
- return get_hash_table (dev, block + sb->sv_block_base, sb->sv_block_size);
-}
-
-/* sv_getblk(sb,dev,block) is equivalent to getblk(dev,block,block_size) */
-static inline struct buffer_head *
-sv_getblk (struct super_block *sb, kdev_t dev, unsigned int block)
-{
- return getblk (dev, block + sb->sv_block_base, sb->sv_block_size);
-}
-
-/* sv_bread(sb,dev,block) is equivalent to bread(dev,block,block_size) */
-static inline struct buffer_head *
-sv_bread (struct super_block *sb, kdev_t dev, unsigned int block)
-{
- return bread (dev, block + sb->sv_block_base, sb->sv_block_size);
-}
-
+enum {
+ BYTESEX_LE,
+ BYTESEX_PDP,
+ BYTESEX_BE,
+};
/*
* Function prototypes
*/
-extern struct inode * sysv_new_inode(const struct inode * dir);
-extern void sysv_free_inode(struct inode * inode);
-extern unsigned long sysv_count_free_inodes(struct super_block *sb);
-extern int sysv_new_block(struct super_block * sb);
-extern void sysv_free_block(struct super_block * sb, unsigned int block);
-extern unsigned long sysv_count_free_blocks(struct super_block *sb);
-
-extern struct buffer_head * sysv_file_bread(struct inode *, int, int);
+extern struct inode * sysv_new_inode(const struct inode *, mode_t);
+extern void sysv_free_inode(struct inode *);
+extern unsigned long sysv_count_free_inodes(struct super_block *);
+extern u32 sysv_new_block(struct super_block *);
+extern void sysv_free_block(struct super_block *, u32);
+extern unsigned long sysv_count_free_blocks(struct super_block *);
extern void sysv_truncate(struct inode *);
+
extern void sysv_write_inode(struct inode *, int);
extern int sysv_sync_inode(struct inode *);
extern int sysv_sync_file(struct file *, struct dentry *, int);
extern int sysv_notify_change(struct dentry *, struct iattr *);
+extern void sysv_set_inode(struct inode *, dev_t);
+
+extern struct sysv_dir_entry *sysv_find_entry(struct dentry*, struct page**);
+extern int sysv_add_link(struct dentry*, struct inode*);
+extern int sysv_delete_entry(struct sysv_dir_entry*, struct page*);
+extern int sysv_make_empty(struct inode*, struct inode*);
+extern int sysv_empty_dir(struct inode*);
+extern void sysv_set_link(struct sysv_dir_entry*, struct page*, struct inode*);
+extern struct sysv_dir_entry *sysv_dotdot(struct inode*, struct page**);
+extern ino_t sysv_inode_by_name(struct dentry*);
extern struct inode_operations sysv_file_inode_operations;
-extern struct inode_operations sysv_symlink_inode_operations;
extern struct inode_operations sysv_dir_inode_operations;
extern struct file_operations sysv_file_operations;
extern struct file_operations sysv_dir_operations;
extern struct address_space_operations sysv_aops;
+extern struct super_operations sysv_sops;
+extern struct dentry_operations sysv_dentry_operations;
+
+extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned, struct buffer_head **);
+
+static inline void dirty_sb(struct super_block *sb)
+{
+ mark_buffer_dirty(sb->sv_bh1);
+ if (sb->sv_bh1 != sb->sv_bh2)
+ mark_buffer_dirty(sb->sv_bh2);
+ sb->s_dirt = 1;
+}
+
+static inline u32 fs32_to_cpu(struct super_block *sb, u32 n)
+{
+ if (sb->sv_bytesex == BYTESEX_PDP)
+ return PDP_swab(n);
+ else if (sb->sv_bytesex == BYTESEX_LE)
+ return le32_to_cpu(n);
+ else
+ return be32_to_cpu(n);
+}
+
+static inline u32 cpu_to_fs32(struct super_block *sb, u32 n)
+{
+ if (sb->sv_bytesex == BYTESEX_PDP)
+ return PDP_swab(n);
+ else if (sb->sv_bytesex == BYTESEX_LE)
+ return cpu_to_le32(n);
+ else
+ return cpu_to_be32(n);
+}
+
+static inline u32 fs32_add(struct super_block *sb, u32 *n, int d)
+{
+ if (sb->sv_bytesex == BYTESEX_PDP)
+ return *n = PDP_swab(PDP_swab(*n)+d);
+ else if (sb->sv_bytesex == BYTESEX_LE)
+ return *n = cpu_to_le32(le32_to_cpu(*n)+d);
+ else
+ return *n = cpu_to_be32(be32_to_cpu(*n)+d);
+}
+
+static inline u16 fs16_to_cpu(struct super_block *sb, u16 n)
+{
+ if (sb->sv_bytesex != BYTESEX_BE)
+ return le16_to_cpu(n);
+ else
+ return be16_to_cpu(n);
+}
+
+static inline u16 cpu_to_fs16(struct super_block *sb, u16 n)
+{
+ if (sb->sv_bytesex != BYTESEX_BE)
+ return cpu_to_le16(n);
+ else
+ return cpu_to_be16(n);
+}
+
+static inline u16 fs16_add(struct super_block *sb, u16 *n, int d)
+{
+ if (sb->sv_bytesex != BYTESEX_BE)
+ return *n = cpu_to_le16(le16_to_cpu(*n)+d);
+ else
+ return *n = cpu_to_be16(be16_to_cpu(*n)+d);
+}
#endif /* __KERNEL__ */
#endif
-
diff --git a/include/linux/sysv_fs_i.h b/include/linux/sysv_fs_i.h
index 990b3543fdcd..08eff4449aff 100644
--- a/include/linux/sysv_fs_i.h
+++ b/include/linux/sysv_fs_i.h
@@ -2,7 +2,7 @@
#define _SYSV_FS_I
/*
- * SystemV/Coherent FS inode data in memory
+ * SystemV/V7/Coherent FS inode data in memory
*/
struct sysv_inode_info {
u32 i_data[10+1+1+1]; /* zone numbers: max. 10 data blocks,
diff --git a/include/linux/sysv_fs_sb.h b/include/linux/sysv_fs_sb.h
index df886f651c02..fe324c96df84 100644
--- a/include/linux/sysv_fs_sb.h
+++ b/include/linux/sysv_fs_sb.h
@@ -2,8 +2,8 @@
#define _SYSV_FS_SB
/*
- * SystemV/Coherent super-block data in memory
- * The SystemV/Coherent superblock contains dynamic data (it gets modified
+ * SystemV/V7/Coherent super-block data in memory
+ * The SystemV/V7/Coherent superblock contains dynamic data (it gets modified
* while the system is running). This is in contrast to the Minix and Berkeley
* filesystems (where the superblock is never modified). This affects the
* sync() operation: we must keep the superblock in a disk buffer and use this
@@ -12,12 +12,7 @@
struct sysv_sb_info {
int s_type; /* file system type: FSTYPE_{XENIX|SYSV|COH} */
- unsigned int s_block_size; /* zone size, = 512 or = 1024 */
- unsigned int s_block_size_1; /* block_size - 1 */
- unsigned int s_block_size_bits; /* log2(block_size) */
- unsigned int s_block_size_inc_bits; /* log2(block_size/BLOCK_SIZE) if >0 */
- unsigned int s_block_size_dec_bits; /* log2(BLOCK_SIZE/block_size) if >0 */
- char s_convert; /* flag whether byte ordering requires conversion */
+ char s_bytesex; /* bytesex (le/be/pdp) */
char s_kludge_symlinks; /* flag whether symlinks have a kludgey mode */
char s_truncate; /* if 1: names > SYSV_NAMELEN chars are truncated */
/* if 0: they are disallowed (ENAMETOOLONG) */
@@ -26,19 +21,8 @@ struct sysv_sb_info {
unsigned int s_inodes_per_block_1; /* inodes_per_block - 1 */
unsigned int s_inodes_per_block_bits; /* log2(inodes_per_block) */
unsigned int s_ind_per_block; /* number of indirections per block */
- unsigned int s_ind_per_block_1; /* ind_per_block - 1 */
unsigned int s_ind_per_block_bits; /* log2(ind_per_block) */
unsigned int s_ind_per_block_2; /* ind_per_block ^ 2 */
- unsigned int s_ind_per_block_2_1; /* ind_per_block ^ 2 - 1 */
- unsigned int s_ind_per_block_2_bits; /* log2(ind_per_block^2) */
- unsigned int s_ind_per_block_3; /* ind_per_block ^ 3 */
- unsigned int s_ind_per_block_block_size_1; /* ind_per_block*block_size - 1 */
- unsigned int s_ind_per_block_block_size_bits; /* log2(ind_per_block*block_size) */
- unsigned int s_ind_per_block_2_block_size_1; /* ind_per_block^2 * block_size - 1 */
- unsigned int s_ind_per_block_2_block_size_bits; /* log2(ind_per_block^2 * block_size) */
- unsigned int s_ind0_size; /* 10 * block_size */
- unsigned int s_ind1_size; /* (10 + ipb) * block_size */
- unsigned int s_ind2_size; /* (10 + ipb + ipb^2) * block_size */
unsigned int s_toobig_block; /* 10 + ipb + ipb^2 + ipb^3 */
unsigned int s_block_base; /* physical block number of block 0 */
unsigned short s_fic_size; /* free inode cache size, NICINOD */
@@ -53,9 +37,9 @@ struct sysv_sb_info {
u16 *s_sb_fic_count; /* pointer to s_sbd->s_ninode */
u16 *s_sb_fic_inodes; /* pointer to s_sbd->s_inode */
u16 *s_sb_total_free_inodes; /* pointer to s_sbd->s_tinode */
- u16 *s_sb_flc_count; /* pointer to s_sbd->s_nfree */
- u32 *s_sb_flc_blocks; /* pointer to s_sbd->s_free */
- u32 *s_sb_total_free_blocks;/* pointer to s_sbd->s_tfree */
+ u16 *s_bcache_count; /* pointer to s_sbd->s_nfree */
+ u32 *s_bcache; /* pointer to s_sbd->s_free */
+ u32 *s_free_blocks; /* pointer to s_sbd->s_tfree */
u32 *s_sb_time; /* pointer to s_sbd->s_time */
u32 *s_sb_state; /* pointer to s_sbd->s_state, only FSTYPE_SYSV */
/* We keep those superblock entities that don't change here;
@@ -65,17 +49,13 @@ struct sysv_sb_info {
u32 s_ninodes; /* total number of inodes */
u32 s_ndatazones; /* total number of data zones */
u32 s_nzones; /* same as s_sbd->s_fsize */
+ u16 s_namelen; /* max length of dir entry */
};
-/* The fields s_ind_per_block_2_1, s_toobig_block are currently unused. */
+/* The field s_toobig_block is currently unused. */
/* sv_ == u.sysv_sb.s_ */
#define sv_type u.sysv_sb.s_type
-#define sv_block_size u.sysv_sb.s_block_size
-#define sv_block_size_1 u.sysv_sb.s_block_size_1
-#define sv_block_size_bits u.sysv_sb.s_block_size_bits
-#define sv_block_size_inc_bits u.sysv_sb.s_block_size_inc_bits
-#define sv_block_size_dec_bits u.sysv_sb.s_block_size_dec_bits
-#define sv_convert u.sysv_sb.s_convert
+#define sv_bytesex u.sysv_sb.s_bytesex
#define sv_kludge_symlinks u.sysv_sb.s_kludge_symlinks
#define sv_truncate u.sysv_sb.s_truncate
#define sv_link_max u.sysv_sb.s_link_max
@@ -83,19 +63,8 @@ struct sysv_sb_info {
#define sv_inodes_per_block_1 u.sysv_sb.s_inodes_per_block_1
#define sv_inodes_per_block_bits u.sysv_sb.s_inodes_per_block_bits
#define sv_ind_per_block u.sysv_sb.s_ind_per_block
-#define sv_ind_per_block_1 u.sysv_sb.s_ind_per_block_1
#define sv_ind_per_block_bits u.sysv_sb.s_ind_per_block_bits
#define sv_ind_per_block_2 u.sysv_sb.s_ind_per_block_2
-#define sv_ind_per_block_2_1 u.sysv_sb.s_ind_per_block_2_1
-#define sv_ind_per_block_2_bits u.sysv_sb.s_ind_per_block_2_bits
-#define sv_ind_per_block_3 u.sysv_sb.s_ind_per_block_3
-#define sv_ind_per_block_block_size_1 u.sysv_sb.s_ind_per_block_block_size_1
-#define sv_ind_per_block_block_size_bits u.sysv_sb.s_ind_per_block_block_size_bits
-#define sv_ind_per_block_2_block_size_1 u.sysv_sb.s_ind_per_block_2_block_size_1
-#define sv_ind_per_block_2_block_size_bits u.sysv_sb.s_ind_per_block_2_block_size_bits
-#define sv_ind0_size u.sysv_sb.s_ind0_size
-#define sv_ind1_size u.sysv_sb.s_ind1_size
-#define sv_ind2_size u.sysv_sb.s_ind2_size
#define sv_toobig_block u.sysv_sb.s_toobig_block
#define sv_block_base u.sysv_sb.s_block_base
#define sv_fic_size u.sysv_sb.s_fic_size
@@ -107,9 +76,9 @@ struct sysv_sb_info {
#define sv_sb_fic_count u.sysv_sb.s_sb_fic_count
#define sv_sb_fic_inodes u.sysv_sb.s_sb_fic_inodes
#define sv_sb_total_free_inodes u.sysv_sb.s_sb_total_free_inodes
-#define sv_sb_flc_count u.sysv_sb.s_sb_flc_count
-#define sv_sb_flc_blocks u.sysv_sb.s_sb_flc_blocks
-#define sv_sb_total_free_blocks u.sysv_sb.s_sb_total_free_blocks
+#define sv_bcache_count u.sysv_sb.s_bcache_count
+#define sv_bcache u.sysv_sb.s_bcache
+#define sv_free_blocks u.sysv_sb.s_free_blocks
#define sv_sb_time u.sysv_sb.s_sb_time
#define sv_sb_state u.sysv_sb.s_sb_state
#define sv_firstinodezone u.sysv_sb.s_firstinodezone
@@ -117,6 +86,6 @@ struct sysv_sb_info {
#define sv_ninodes u.sysv_sb.s_ninodes
#define sv_ndatazones u.sysv_sb.s_ndatazones
#define sv_nzones u.sysv_sb.s_nzones
+#define sv_namelen u.sysv_sb.s_namelen
#endif
-
diff --git a/include/linux/udf_fs.h b/include/linux/udf_fs.h
index 4edb0a337318..736d617f0a68 100644
--- a/include/linux/udf_fs.h
+++ b/include/linux/udf_fs.h
@@ -37,8 +37,8 @@
#define UDF_PREALLOCATE
#define UDF_DEFAULT_PREALLOC_BLOCKS 8
-#define UDFFS_DATE "2001/06/06"
-#define UDFFS_VERSION "0.9.4"
+#define UDFFS_DATE "2001/06/13"
+#define UDFFS_VERSION "0.9.4.1"
#if !defined(UDFFS_RW)
diff --git a/include/linux/udf_fs_i.h b/include/linux/udf_fs_i.h
index 84c481cd390b..3d48fced7cc1 100644
--- a/include/linux/udf_fs_i.h
+++ b/include/linux/udf_fs_i.h
@@ -30,9 +30,10 @@ typedef struct
struct udf_inode_info
{
- long i_uatime;
long i_umtime;
long i_uctime;
+ long i_crtime;
+ long i_ucrtime;
/* Physical address of inode */
lb_addr i_location;
__u64 i_unique;
diff --git a/include/linux/udf_udf.h b/include/linux/udf_udf.h
index 4fa4b3df754e..6b9df04be3e1 100644
--- a/include/linux/udf_udf.h
+++ b/include/linux/udf_udf.h
@@ -162,6 +162,10 @@ struct VirtualAllocationTable20 {
Uint32 vatEntry[0];
};
+/* ----------- 2.01 ------------- */
+/* UDF 2.01 6.11 */
+#define FILE_TYPE_REALTIME 0xf9U
+
/* Sparing maps, see UDF 1.5 2.2.11 */
typedef struct {
Uint32 origLocation;