summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bitops.h4
-rw-r--r--include/asm-arm/arch-ebsa110/io.h54
-rw-r--r--include/asm-arm/arch-ebsa110/uncompress.h4
-rw-r--r--include/asm-arm/arch-ixp4xx/dma.h26
-rw-r--r--include/asm-arm/arch-ixp4xx/memory.h34
-rw-r--r--include/asm-arm/arch-rpc/uncompress.h7
-rw-r--r--include/asm-arm/arch-sa1100/dma.h17
-rw-r--r--include/asm-arm/arch-sa1100/memory.h24
-rw-r--r--include/asm-arm/arch-shark/dma.h12
-rw-r--r--include/asm-arm/arch-shark/memory.h21
-rw-r--r--include/asm-arm/dma.h5
-rw-r--r--include/asm-arm/hardware/clock.h2
-rw-r--r--include/asm-arm/mach/arch.h6
-rw-r--r--include/asm-arm/memory.h14
-rw-r--r--include/asm-arm/processor.h1
-rw-r--r--include/asm-arm/scatterlist.h3
-rw-r--r--include/asm-i386/setup.h3
-rw-r--r--include/asm-ia64/iosapic.h21
-rw-r--r--include/asm-ia64/smp.h4
-rw-r--r--include/asm-ia64/sn/bte.h49
-rw-r--r--include/asm-ia64/sn/pda.h2
-rw-r--r--include/asm-ia64/sn/sn_cpuid.h1
-rw-r--r--include/asm-ppc64/iSeries/HvTypes.h4
-rw-r--r--include/asm-ppc64/vio.h8
-rw-r--r--include/asm-sh64/a.out.h37
-rw-r--r--include/asm-sh64/atomic.h126
-rw-r--r--include/asm-sh64/bitops.h518
-rw-r--r--include/asm-sh64/bug.h7
-rw-r--r--include/asm-sh64/bugs.h38
-rw-r--r--include/asm-sh64/byteorder.h49
-rw-r--r--include/asm-sh64/cache.h141
-rw-r--r--include/asm-sh64/cacheflush.h44
-rw-r--r--include/asm-sh64/cayman.h20
-rw-r--r--include/asm-sh64/checksum.h95
-rw-r--r--include/asm-sh64/cpumask.h6
-rw-r--r--include/asm-sh64/current.h28
-rw-r--r--include/asm-sh64/delay.h11
-rw-r--r--include/asm-sh64/div64.h6
-rw-r--r--include/asm-sh64/dma-mapping.h163
-rw-r--r--include/asm-sh64/dma.h41
-rw-r--r--include/asm-sh64/elf.h101
-rw-r--r--include/asm-sh64/errno.h6
-rw-r--r--include/asm-sh64/fcntl.h7
-rw-r--r--include/asm-sh64/hardirq.h7
-rw-r--r--include/asm-sh64/hardware.h45
-rw-r--r--include/asm-sh64/hdreg.h6
-rw-r--r--include/asm-sh64/hw_irq.h16
-rw-r--r--include/asm-sh64/ide.h30
-rw-r--r--include/asm-sh64/io.h217
-rw-r--r--include/asm-sh64/ioctl.h83
-rw-r--r--include/asm-sh64/ioctls.h111
-rw-r--r--include/asm-sh64/ipc.h6
-rw-r--r--include/asm-sh64/ipcbuf.h40
-rw-r--r--include/asm-sh64/irq.h148
-rw-r--r--include/asm-sh64/keyboard.h74
-rw-r--r--include/asm-sh64/kmap_types.h7
-rw-r--r--include/asm-sh64/linkage.h7
-rw-r--r--include/asm-sh64/local.h7
-rw-r--r--include/asm-sh64/mc146818rtc.h7
-rw-r--r--include/asm-sh64/mman.h6
-rw-r--r--include/asm-sh64/mmu.h7
-rw-r--r--include/asm-sh64/mmu_context.h209
-rw-r--r--include/asm-sh64/module.h12
-rw-r--r--include/asm-sh64/msgbuf.h42
-rw-r--r--include/asm-sh64/namei.h24
-rw-r--r--include/asm-sh64/page.h137
-rw-r--r--include/asm-sh64/param.h43
-rw-r--r--include/asm-sh64/pci.h110
-rw-r--r--include/asm-sh64/percpu.h6
-rw-r--r--include/asm-sh64/pgalloc.h202
-rw-r--r--include/asm-sh64/pgtable.h498
-rw-r--r--include/asm-sh64/platform.h69
-rw-r--r--include/asm-sh64/poll.h36
-rw-r--r--include/asm-sh64/posix_types.h131
-rw-r--r--include/asm-sh64/processor.h292
-rw-r--r--include/asm-sh64/ptrace.h36
-rw-r--r--include/asm-sh64/registers.h106
-rw-r--r--include/asm-sh64/resource.h47
-rw-r--r--include/asm-sh64/scatterlist.h23
-rw-r--r--include/asm-sh64/sections.h7
-rw-r--r--include/asm-sh64/segment.h6
-rw-r--r--include/asm-sh64/semaphore-helper.h101
-rw-r--r--include/asm-sh64/semaphore.h146
-rw-r--r--include/asm-sh64/sembuf.h36
-rw-r--r--include/asm-sh64/serial.h33
-rw-r--r--include/asm-sh64/shmbuf.h53
-rw-r--r--include/asm-sh64/shmparam.h20
-rw-r--r--include/asm-sh64/sigcontext.h30
-rw-r--r--include/asm-sh64/siginfo.h6
-rw-r--r--include/asm-sh64/signal.h185
-rw-r--r--include/asm-sh64/smp.h15
-rw-r--r--include/asm-sh64/smplock.h77
-rw-r--r--include/asm-sh64/socket.h6
-rw-r--r--include/asm-sh64/sockios.h24
-rw-r--r--include/asm-sh64/softirq.h30
-rw-r--r--include/asm-sh64/spinlock.h17
-rw-r--r--include/asm-sh64/stat.h88
-rw-r--r--include/asm-sh64/statfs.h6
-rw-r--r--include/asm-sh64/string.h21
-rw-r--r--include/asm-sh64/system.h194
-rw-r--r--include/asm-sh64/termbits.h6
-rw-r--r--include/asm-sh64/termios.h117
-rw-r--r--include/asm-sh64/thread_info.h82
-rw-r--r--include/asm-sh64/timex.h36
-rw-r--r--include/asm-sh64/tlb.h92
-rw-r--r--include/asm-sh64/tlbflush.h31
-rw-r--r--include/asm-sh64/topology.h6
-rw-r--r--include/asm-sh64/types.h76
-rw-r--r--include/asm-sh64/uaccess.h317
-rw-r--r--include/asm-sh64/ucontext.h23
-rw-r--r--include/asm-sh64/unaligned.h28
-rw-r--r--include/asm-sh64/unistd.h555
-rw-r--r--include/asm-sh64/user.h71
-rw-r--r--include/linux/binfmts.h8
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/dma-mapping.h23
-rw-r--r--include/linux/edd.h31
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fd.h1
-rw-r--r--include/linux/hpet.h14
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/init.h12
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/net/pkt_sched.h46
127 files changed, 7115 insertions, 204 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 4c0e5f417cf3..22ecbf95ffe1 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -418,9 +418,9 @@ find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
* Find next one bit in a bitmap reasonably efficiently.
*/
static inline unsigned long
-find_next_bit(void * addr, unsigned long size, unsigned long offset)
+find_next_bit(const void * addr, unsigned long size, unsigned long offset)
{
- unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
+ const unsigned long * p = ((const unsigned long *) addr) + (offset >> 6);
unsigned long result = offset & ~63UL;
unsigned long tmp;
diff --git a/include/asm-arm/arch-ebsa110/io.h b/include/asm-arm/arch-ebsa110/io.h
index 52030abe9cdd..e2c242ff0eb8 100644
--- a/include/asm-arm/arch-ebsa110/io.h
+++ b/include/asm-arm/arch-ebsa110/io.h
@@ -15,26 +15,44 @@
#define IO_SPACE_LIMIT 0xffff
-u8 __inb(int port);
-u16 __inw(int port);
-u32 __inl(int port);
+u8 __inb8(unsigned int port);
+void __outb8(u8 val, unsigned int port);
-#define inb(p) __inb(p)
-#define inw(p) __inw(p)
-#define inl(p) __inl(p)
+u8 __inb16(unsigned int port);
+void __outb16(u8 val, unsigned int port);
-void __outb(u8 val, int port);
-void __outw(u16 val, int port);
-void __outl(u32 val, int port);
+u16 __inw(unsigned int port);
+void __outw(u16 val, unsigned int port);
-#define outb(v,p) __outb(v,p)
-#define outw(v,p) __outw(v,p)
-#define outl(v,p) __outl(v,p)
+u32 __inl(unsigned int port);
+void __outl(u32 val, unsigned int port);
u8 __readb(void *addr);
u16 __readw(void *addr);
u32 __readl(void *addr);
+void __writeb(u8 val, void *addr);
+void __writew(u16 val, void *addr);
+void __writel(u32 val, void *addr);
+
+/*
+ * Argh, someone forgot the IOCS16 line. We therefore have to handle
+ * the byte stearing by selecting the correct byte IO functions here.
+ */
+#ifdef ISA_SIXTEEN_BIT_PERIPHERAL
+#define inb(p) __inb16(p)
+#define outb(v,p) __outb16(v,p)
+#else
+#define inb(p) __inb8(p)
+#define outb(v,p) __outb8(v,p)
+#endif
+
+#define inw(p) __inw(p)
+#define outw(v,p) __outw(v,p)
+
+#define inl(p) __inl(p)
+#define outl(v,p) __outl(v,p)
+
#define readb(b) __readb(b)
#define readw(b) __readw(b)
#define readl(b) __readl(b)
@@ -42,10 +60,6 @@ u32 __readl(void *addr);
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
-void __writeb(u8 val, void *addr);
-void __writew(u16 val, void *addr);
-void __writel(u32 val, void *addr);
-
#define writeb(v,b) __writeb(v,b)
#define writew(v,b) __writew(v,b)
#define writel(v,b) __writel(v,b)
@@ -53,4 +67,12 @@ void __writel(u32 val, void *addr);
#define __arch_ioremap(cookie,sz,c,a) ((void *)(cookie))
#define __arch_iounmap(cookie) do { } while (0)
+extern void insb(unsigned int port, void *buf, int sz);
+extern void insw(unsigned int port, void *buf, int sz);
+extern void insl(unsigned int port, void *buf, int sz);
+
+extern void outsb(unsigned int port, const void *buf, int sz);
+extern void outsw(unsigned int port, const void *buf, int sz);
+extern void outsl(unsigned int port, const void *buf, int sz);
+
#endif
diff --git a/include/asm-arm/arch-ebsa110/uncompress.h b/include/asm-arm/arch-ebsa110/uncompress.h
index 8396e9a3dc0b..1d7d841efc6e 100644
--- a/include/asm-arm/arch-ebsa110/uncompress.h
+++ b/include/asm-arm/arch-ebsa110/uncompress.h
@@ -13,6 +13,7 @@
*/
static void puts(const char *s)
{
+ unsigned long tmp1, tmp2;
__asm__ __volatile__(
"ldrb %0, [%2], #1\n"
" teq %0, #0\n"
@@ -32,7 +33,8 @@ static void puts(const char *s)
" and %1, %1, #0x60\n"
" teq %1, #0x60\n"
" bne 3b"
- : : "r" (0), "r" (0), "r" (s), "r" (0xf0000be0) : "cc");
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (s), "r" (0xf0000be0) : "cc");
}
/*
diff --git a/include/asm-arm/arch-ixp4xx/dma.h b/include/asm-arm/arch-ixp4xx/dma.h
index 686eacaf013c..312065dc0e7a 100644
--- a/include/asm-arm/arch-ixp4xx/dma.h
+++ b/include/asm-arm/arch-ixp4xx/dma.h
@@ -23,30 +23,4 @@
/* No DMA */
#define MAX_DMA_CHANNELS 0
-/*
- * Only first 64MB of memory can be accessed via PCI.
- * We use GFP_DMA to allocate safe buffers to do map/unmap.
- * This is really ugly and we need a better way of specifying
- * DMA-capable regions of memory.
- */
-static inline void __arch_adjust_zones(int node, unsigned long *zone_size,
- unsigned long *zhole_size)
-{
- unsigned int sz = SZ_64M >> PAGE_SHIFT;
-
- /*
- * Only adjust if > 64M on current system
- */
- if (node || (zone_size[0] <= sz))
- return;
-
- zone_size[1] = zone_size[0] - sz;
- zone_size[0] = sz;
- zhole_size[1] = zhole_size[0];
- zhole_size[0] = 0;
-}
-
-#define arch_adjust_zones(node, size, holes) \
- __arch_adjust_zones(node, size, holes)
-
#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-ixp4xx/memory.h b/include/asm-arm/arch-ixp4xx/memory.h
index 3f6da112712e..d348548b592b 100644
--- a/include/asm-arm/arch-ixp4xx/memory.h
+++ b/include/asm-arm/arch-ixp4xx/memory.h
@@ -7,11 +7,45 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
+#include <asm/sizes.h>
+
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET (0x00000000UL)
+#ifndef __ASSEMBLY__
+
+/*
+ * Only first 64MB of memory can be accessed via PCI.
+ * We use GFP_DMA to allocate safe buffers to do map/unmap.
+ * This is really ugly and we need a better way of specifying
+ * DMA-capable regions of memory.
+ */
+static inline void __arch_adjust_zones(int node, unsigned long *zone_size,
+ unsigned long *zhole_size)
+{
+ unsigned int sz = SZ_64M >> PAGE_SHIFT;
+
+ /*
+ * Only adjust if > 64M on current system
+ */
+ if (node || (zone_size[0] <= sz))
+ return;
+
+ zone_size[1] = zone_size[0] - sz;
+ zone_size[0] = sz;
+ zhole_size[1] = zhole_size[0];
+ zhole_size[0] = 0;
+}
+
+#define arch_adjust_zones(node, size, holes) \
+ __arch_adjust_zones(node, size, holes)
+
+#define ISA_DMA_THRESHOLD (SZ_64M - 1)
+
+#endif
+
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
diff --git a/include/asm-arm/arch-rpc/uncompress.h b/include/asm-arm/arch-rpc/uncompress.h
index 4a3036dad1f2..845db94983bd 100644
--- a/include/asm-arm/arch-rpc/uncompress.h
+++ b/include/asm-arm/arch-rpc/uncompress.h
@@ -56,7 +56,12 @@ static const unsigned long palette_4[16] = {
#define palette_setpixel(p) *(unsigned long *)(IO_START+0x00400000) = 0x10000000|((p) & 255)
#define palette_write(v) *(unsigned long *)(IO_START+0x00400000) = 0x00000000|((v) & 0x00ffffff)
-static struct param_struct *params = (struct param_struct *)PARAMS_PHYS;
+/*
+ * params_phys is a linker defined symbol - see
+ * arch/arm/boot/compressed/Makefile
+ */
+extern struct param_struct params_phys;
+#define params (&params_phys)
#ifndef STANDALONE_DEBUG
/*
diff --git a/include/asm-arm/arch-sa1100/dma.h b/include/asm-arm/arch-sa1100/dma.h
index 8927a3d26172..3d60ed9f8c34 100644
--- a/include/asm-arm/arch-sa1100/dma.h
+++ b/include/asm-arm/arch-sa1100/dma.h
@@ -129,21 +129,4 @@ extern void sa1100_reset_dma(dma_regs_t *regs);
#define sa1100_clear_dma(regs) ((regs)->ClrDCSR = DCSR_IE|DCSR_RUN|DCSR_STRTA|DCSR_STRTB)
-
-#ifdef CONFIG_SA1111
-static inline void
-__arch_adjust_zones(int node, unsigned long *size, unsigned long *holes)
-{
- unsigned int sz = 256;
-
- if (node != 0)
- sz = 0;
-
- size[1] = size[0] - sz;
- size[0] = sz;
-}
-
-#define arch_adjust_zones(node,size,holes) __arch_adjust_zones(node,size,holes)
-#endif
-
#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-sa1100/memory.h b/include/asm-arm/arch-sa1100/memory.h
index 38794846b3a3..32d3d5bde34d 100644
--- a/include/asm-arm/arch-sa1100/memory.h
+++ b/include/asm-arm/arch-sa1100/memory.h
@@ -8,12 +8,36 @@
#define __ASM_ARCH_MEMORY_H
#include <linux/config.h>
+#include <asm/sizes.h>
/*
* Physical DRAM offset is 0xc0000000 on the SA1100
*/
#define PHYS_OFFSET (0xc0000000UL)
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SA1111
+static inline void
+__arch_adjust_zones(int node, unsigned long *size, unsigned long *holes)
+{
+ unsigned int sz = SZ_1M >> PAGE_SHIFT;
+
+ if (node != 0)
+ sz = 0;
+
+ size[1] = size[0] - sz;
+ size[0] = sz;
+}
+
+#define arch_adjust_zones(node, size, holes) \
+ __arch_adjust_zones(node, size, holes)
+
+#define ISA_DMA_THRESHOLD (PHYS_OFFSET + SZ_1M - 1)
+
+#endif
+#endif
+
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
diff --git a/include/asm-arm/arch-shark/dma.h b/include/asm-arm/arch-shark/dma.h
index 5c1562f145be..fc985d5e62af 100644
--- a/include/asm-arm/arch-shark/dma.h
+++ b/include/asm-arm/arch-shark/dma.h
@@ -14,17 +14,5 @@
#define MAX_DMA_CHANNELS 8
#define DMA_ISA_CASCADE 4
-static inline void __arch_adjust_zones(int node, unsigned long *zone_size, unsigned long *zhole_size)
-{
- if (node != 0) return;
- /* Only the first 4 MB (=1024 Pages) are usable for DMA */
- zone_size[1] = zone_size[0] - 1024;
- zone_size[0] = 1024;
- zhole_size[1] = zhole_size[0];
- zhole_size[0] = 0;
-}
-
-#define arch_adjust_zones(node,size,holes) __arch_adjust_zones(node,size,holes)
-
#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-shark/memory.h b/include/asm-arm/arch-shark/memory.h
index 7a06f07f6418..8ff956d25463 100644
--- a/include/asm-arm/arch-shark/memory.h
+++ b/include/asm-arm/arch-shark/memory.h
@@ -10,11 +10,32 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
+#include <asm/sizes.h>
+
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET (0x08000000UL)
+#ifndef __ASSEMBLY__
+
+static inline void __arch_adjust_zones(int node, unsigned long *zone_size, unsigned long *zhole_size)
+{
+ if (node != 0) return;
+ /* Only the first 4 MB (=1024 Pages) are usable for DMA */
+ zone_size[1] = zone_size[0] - 1024;
+ zone_size[0] = 1024;
+ zhole_size[1] = zhole_size[0];
+ zhole_size[0] = 0;
+}
+
+#define arch_adjust_zones(node, size, holes) \
+ __arch_adjust_zones(node, size, holes)
+
+#define ISA_DMA_THRESHOLD (PHYS_OFFSET + SZ_4M - 1)
+
+#endif
+
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
diff --git a/include/asm-arm/dma.h b/include/asm-arm/dma.h
index 0dee33c1aad8..ef41df43a584 100644
--- a/include/asm-arm/dma.h
+++ b/include/asm-arm/dma.h
@@ -6,7 +6,6 @@ typedef unsigned int dmach_t;
#include <linux/config.h>
#include <linux/spinlock.h>
#include <asm/system.h>
-#include <asm/memory.h>
#include <asm/scatterlist.h>
#include <asm/arch/dma.h>
@@ -133,8 +132,4 @@ extern int isa_dma_bridge_buggy;
#define isa_dma_bridge_buggy (0)
#endif
-#ifndef arch_adjust_zones
-#define arch_adjust_zones(node,size,holes)
-#endif
-
#endif /* _ARM_DMA_H */
diff --git a/include/asm-arm/hardware/clock.h b/include/asm-arm/hardware/clock.h
index 2fbf6078b313..9dfc0624b611 100644
--- a/include/asm-arm/hardware/clock.h
+++ b/include/asm-arm/hardware/clock.h
@@ -108,7 +108,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate);
* Returns success (0) or negative errno.
*/
int clk_set_parent(struct clk *clk, struct clk *parent);
-
+
/**
* clk_get_parent - get the parent clock source for this clock
* @clk: clock source
diff --git a/include/asm-arm/mach/arch.h b/include/asm-arm/mach/arch.h
index 37ae74796331..5c37b709fe9f 100644
--- a/include/asm-arm/mach/arch.h
+++ b/include/asm-arm/mach/arch.h
@@ -8,12 +8,6 @@
* published by the Free Software Foundation.
*/
-/*
- * The size of struct machine_desc
- * (for assembler code)
- */
-#define SIZEOF_MACHINE_DESC 52
-
#ifndef __ASSEMBLY__
struct tag;
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index 91dee4da45ce..41f117fb5e9f 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -60,6 +60,20 @@
#ifndef __ASSEMBLY__
/*
+ * The DMA mask corresponding to the maximum bus address allocatable
+ * using GFP_DMA. The default here places no restriction on DMA
+ * allocations. This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+#ifndef ISA_DMA_THRESHOLD
+#define ISA_DMA_THRESHOLD (0xffffffffULL)
+#endif
+
+#ifndef arch_adjust_zones
+#define arch_adjust_zones(node,size,holes) do { } while (0)
+#endif
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h
index 3890d6038355..e2de28eec30a 100644
--- a/include/asm-arm/processor.h
+++ b/include/asm-arm/processor.h
@@ -25,7 +25,6 @@
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/procinfo.h>
-#include <asm/arch/memory.h>
#include <asm/types.h>
#define KERNEL_STACK_SIZE PAGE_SIZE
diff --git a/include/asm-arm/scatterlist.h b/include/asm-arm/scatterlist.h
index d9c056c7784e..83b876fb04cc 100644
--- a/include/asm-arm/scatterlist.h
+++ b/include/asm-arm/scatterlist.h
@@ -1,6 +1,7 @@
#ifndef _ASMARM_SCATTERLIST_H
#define _ASMARM_SCATTERLIST_H
+#include <asm/memory.h>
#include <asm/types.h>
struct scatterlist {
@@ -21,6 +22,4 @@ struct scatterlist {
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
-#define ISA_DMA_THRESHOLD (0xffffffff)
-
#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index e17ba1cd8bc6..59f4a1ad3a49 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -56,8 +56,9 @@ extern unsigned char boot_params[PARAM_SIZE];
#define INITRD_START (*(unsigned long *) (PARAM+0x218))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
-#define DISK80_SIGNATURE (*(unsigned int*) (PARAM+DISK80_SIG_BUFFER))
#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index 1174e9774012..3fac17e98113 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -1,13 +1,11 @@
#ifndef __ASM_IA64_IOSAPIC_H
#define __ASM_IA64_IOSAPIC_H
-#define IOSAPIC_DEFAULT_ADDR 0xFEC00000
-
#define IOSAPIC_REG_SELECT 0x0
#define IOSAPIC_WINDOW 0x10
#define IOSAPIC_EOI 0x40
-#define IOSAPIC_VERSION 0x1
+#define IOSAPIC_VERSION 0x1
/*
* Redirection table entry
@@ -55,6 +53,23 @@
#define NR_IOSAPICS 256
+static inline unsigned int iosapic_read(char *iosapic, unsigned int reg)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ return readl(iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void iosapic_write(char *iosapic, unsigned int reg, u32 val)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ writel(val, iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void iosapic_eoi(char *iosapic, u32 vector)
+{
+ writel(vector, iosapic + IOSAPIC_EOI);
+}
+
extern void __init iosapic_system_init (int pcat_compat);
extern void __init iosapic_init (unsigned long address,
unsigned int gsi_base);
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index f12f939e3c00..513c704d1306 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -123,5 +123,9 @@ extern void smp_send_reschedule (int cpu);
extern void lock_ipi_calllock(void);
extern void unlock_ipi_calllock(void);
+#else
+
+#define cpu_logical_id(cpuid) 0
+
#endif /* CONFIG_SMP */
#endif /* _ASM_IA64_SMP_H */
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
index 538385a21fb7..1b643d1e0820 100644
--- a/include/asm-ia64/sn/bte.h
+++ b/include/asm-ia64/sn/bte.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -48,35 +48,31 @@
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
/* Use a reserved bit to let the caller specify a wait for any BTE */
#define BTE_WACQUIRE (0x4000)
+/* Use the BTE on the node with the destination memory */
+#define BTE_USE_DEST (BTE_WACQUIRE << 1)
+/* Use any available BTE interface on any node for the transfer */
+#define BTE_USE_ANY (BTE_USE_DEST << 1)
/* macro to force the IBCT0 value valid */
#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
-
-/*
- * Handle locking of the bte interfaces.
- *
- * All transfers spinlock the interface before setting up the SHUB
- * registers. Sync transfers hold the lock until all processing is
- * complete. Async transfers release the lock as soon as the transfer
- * is initiated.
- *
- * To determine if an interface is available, we must check both the
- * busy bit and the spinlock for that interface.
- */
-#define BTE_LOCK_IF_AVAIL(_x) (\
- (*pda->cpu_bte_if[_x]->most_rcnt_na & (IBLS_BUSY | IBLS_ERROR)) && \
- (!(spin_trylock(&(pda->cpu_bte_if[_x]->spinlock)))) \
- )
+#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
/*
* Some macros to simplify reading.
* Start with macros to locate the BTE control registers.
*/
-#define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
-#define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
-#define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
-#define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
-#define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
+#define BTE_LNSTAT_LOAD(_bte) \
+ HUB_L(_bte->bte_base_addr)
+#define BTE_LNSTAT_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr, (_x))
+#define BTE_SRC_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x))
+#define BTE_DEST_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x))
+#define BTE_CTRL_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x))
+#define BTE_NOTIF_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x))
/* Possible results from bte_copy and bte_unaligned_copy */
@@ -110,16 +106,15 @@ typedef enum {
* to work with a BTE.
*/
struct bteinfo_s {
- u64 volatile notify ____cacheline_aligned;
- char *bte_base_addr ____cacheline_aligned;
+ volatile u64 notify ____cacheline_aligned;
+ u64 *bte_base_addr ____cacheline_aligned;
spinlock_t spinlock;
cnodeid_t bte_cnode; /* cnode */
int bte_error_count; /* Number of errors encountered */
int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
int cleanup_active; /* Interface is locked for cleanup */
volatile bte_result_t bh_error; /* error while processing */
- u64 volatile *most_rcnt_na;
- void *scratch_buf; /* Node local scratch buffer */
+ volatile u64 *most_rcnt_na;
};
@@ -130,6 +125,8 @@ extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
extern void bte_error_handler(unsigned long);
+#define bte_zero(dest, len, mode, notification) \
+ bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
/*
* The following is the prefered way of calling bte_unaligned_copy
diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
index 20e9b5775435..fa472c3f983e 100644
--- a/include/asm-ia64/sn/pda.h
+++ b/include/asm-ia64/sn/pda.h
@@ -49,8 +49,6 @@ typedef struct pda_s {
volatile unsigned long *pio_shub_war_cam_addr;
volatile unsigned long *mem_write_status_addr;
- struct bteinfo_s *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
-
unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h
index 0c7cce652368..6f1128f1e895 100644
--- a/include/asm-ia64/sn/sn_cpuid.h
+++ b/include/asm-ia64/sn/sn_cpuid.h
@@ -84,7 +84,6 @@
*/
#ifndef CONFIG_SMP
-#define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif
diff --git a/include/asm-ppc64/iSeries/HvTypes.h b/include/asm-ppc64/iSeries/HvTypes.h
index bdaaa2c5dd59..3ec49c1aec32 100644
--- a/include/asm-ppc64/iSeries/HvTypes.h
+++ b/include/asm-ppc64/iSeries/HvTypes.h
@@ -65,6 +65,10 @@ typedef u8 HvAgentId; // Hypervisor DevFn
#define HVMAXARCHITECTEDLPS 32
+#define HVMAXARCHITECTEDVIRTUALLANS 16
+#define HVMAXARCHITECTEDVIRTUALDISKS 32
+#define HVMAXARCHITECTEDVIRTUALCDROMS 8
+#define HVMAXARCHITECTEDVIRTUALTAPES 8
#define HVCHUNKSIZE 256 * 1024
#define HVPAGESIZE 4 * 1024
#define HVLPMINMEGSPRIMARY 256
diff --git a/include/asm-ppc64/vio.h b/include/asm-ppc64/vio.h
index 36fc9f9d1f1c..3fd465c9263d 100644
--- a/include/asm-ppc64/vio.h
+++ b/include/asm-ppc64/vio.h
@@ -14,6 +14,7 @@
#ifndef _ASM_VIO_H
#define _ASM_VIO_H
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
@@ -44,7 +45,10 @@ struct iommu_table;
int vio_register_driver(struct vio_driver *drv);
void vio_unregister_driver(struct vio_driver *drv);
-struct vio_dev * __devinit vio_register_device(struct device_node *node_vdev);
+#ifdef CONFIG_PPC_PSERIES
+struct vio_dev * __devinit vio_register_device_node(
+ struct device_node *node_vdev);
+#endif
void __devinit vio_unregister_device(struct vio_dev *dev);
struct vio_dev *vio_find_node(struct device_node *vnode);
@@ -108,6 +112,8 @@ static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
*/
struct vio_dev {
struct iommu_table *iommu_table; /* vio_map_* uses this */
+ char *name;
+ char *type;
uint32_t unit_address;
unsigned int irq;
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h
new file mode 100644
index 000000000000..e1995e86b663
--- /dev/null
+++ b/include/asm-sh64/a.out.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_SH64_A_OUT_H
+#define __ASM_SH64_A_OUT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/a.out.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP TASK_SIZE
+
+#endif
+
+#endif /* __ASM_SH64_A_OUT_H */
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
new file mode 100644
index 000000000000..8c3872d3e65f
--- /dev/null
+++ b/include/asm-sh64/atomic.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_SH64_ATOMIC_H
+#define __ASM_SH64_ATOMIC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/atomic.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ */
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) ((v)->counter = (i))
+
+#include <asm/system.h>
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v += i;
+ local_irq_restore(flags);
+}
+
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v -= i;
+ local_irq_restore(flags);
+}
+
+static __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp += i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp -= i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_dec(v) atomic_sub(1,(v))
+
+static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v &= ~mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v |= mask;
+ local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on SH */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#endif /* __ASM_SH64_ATOMIC_H */
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
new file mode 100644
index 000000000000..86420c9d94ad
--- /dev/null
+++ b/include/asm-sh64/bitops.h
@@ -0,0 +1,518 @@
+#ifndef __ASM_SH64_BITOPS_H
+#define __ASM_SH64_BITOPS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/bitops.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ */
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/system.h>
+/* For __swab32 */
+#include <asm/byteorder.h>
+
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ int mask;
+ volatile unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a |= mask;
+ local_irq_restore(flags);
+}
+
+static inline void __set_bit(int nr, void *addr)
+{
+ int mask;
+ unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+static __inline__ void clear_bit(int nr, void * addr)
+{
+ int mask;
+ unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a &= ~mask;
+ local_irq_restore(flags);
+}
+
+static inline void __clear_bit(int nr, void *addr)
+{
+ int mask;
+ unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a &= ~mask;
+}
+
+static __inline__ void change_bit(int nr, volatile void * addr)
+{
+ int mask;
+ volatile unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a ^= mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ int mask;
+ volatile unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a ^= mask;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+
+ return retval;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+
+ return retval;
+}
+
+static __inline__ int test_bit(int nr, const volatile void *addr)
+{
+ return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
+}
+
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ unsigned long result, __d2, __d3;
+
+ __asm__("gettr tr0, %2\n\t"
+ "pta $+32, tr0\n\t"
+ "andi %1, 1, %3\n\t"
+ "beq %3, r63, tr0\n\t"
+ "pta $+4, tr0\n"
+ "0:\n\t"
+ "shlri.l %1, 1, %1\n\t"
+ "addi %0, 1, %0\n\t"
+ "andi %1, 1, %3\n\t"
+ "beqi %3, 1, tr0\n"
+ "1:\n\t"
+ "ptabs %2, tr0\n\t"
+ : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
+ : "0" (0L), "1" (word));
+
+ return result;
+}
+
+/**
+ * __ffs - find first bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ int r = 0;
+
+ if (!word)
+ return 0;
+ if (!(word & 0xffff)) {
+ word >>= 16;
+ r += 16;
+ }
+ if (!(word & 0xff)) {
+ word >>= 8;
+ r += 8;
+ }
+ if (!(word & 0xf)) {
+ word >>= 4;
+ r += 4;
+ }
+ if (!(word & 3)) {
+ word >>= 2;
+ r += 2;
+ }
+ if (!(word & 1)) {
+ word >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline unsigned long find_next_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+ unsigned int result = offset & ~31UL;
+ unsigned int tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *p++;
+ tmp &= ~0UL << offset;
+ if (size < 32)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ if ((tmp = *p++) != 0)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= ~0UL >> (32 - size);
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+
+static inline int find_next_zero_bit(void *addr, int size, int offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (32-offset);
+ if (size < 32)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size & ~31UL) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+found_middle:
+ return result + ffz(tmp);
+}
+
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+
+static inline int sched_find_first_bit(unsigned long *b)
+{
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+#define ffs(x) generic_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#ifdef __LITTLE_ENDIAN__
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
+#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
+#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
+#define ext2_find_next_zero_bit(addr, size, offset) \
+ find_next_zero_bit((addr), (size), (offset))
+#else
+static __inline__ int ext2_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ unsigned long flags;
+ volatile unsigned char *ADDR = (unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ local_irq_save(flags);
+ retval = (mask & *ADDR) != 0;
+ *ADDR |= mask;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ unsigned long flags;
+ volatile unsigned char *ADDR = (unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ local_irq_save(flags);
+ retval = (mask & *ADDR) != 0;
+ *ADDR &= ~mask;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
+{
+ int mask;
+ const volatile unsigned char *ADDR = (const unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ return ((mask & *ADDR) != 0);
+}
+
+#define ext2_find_first_zero_bit(addr, size) \
+ ext2_find_next_zero_bit((addr), (size), 0)
+
+static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if(offset) {
+ /* We hold the little endian value in tmp, but then the
+ * shift is illegal. So we could keep a big endian value
+ * in tmp, like this:
+ *
+ * tmp = __swab32(*(p++));
+ * tmp |= ~0UL >> (32-offset);
+ *
+ * but this would decrease preformance, so we change the
+ * shift:
+ */
+ tmp = *(p++);
+ tmp |= __swab32(~0UL >> (32-offset));
+ if(size < 32)
+ goto found_first;
+ if(~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while(size & ~31UL) {
+ if(~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if(!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ /* tmp is little endian, so we would have to swab the shift,
+ * see above. But then we have to swab tmp below for ffz, so
+ * we might as well do this here.
+ */
+ return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+ return result + ffz(__swab32(tmp));
+}
+#endif
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_set_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_clear_bit((nr), (addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+/* Bitmap functions for the minix filesystem. */
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#define ffs(x) generic_ffs(x)
+#define fls(x) generic_fls(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_BITOPS_H */
diff --git a/include/asm-sh64/bug.h b/include/asm-sh64/bug.h
new file mode 100644
index 000000000000..9a81b7232f14
--- /dev/null
+++ b/include/asm-sh64/bug.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_BUG_H
+#define __ASM_SH64_BUG_H
+
+#include <asm-sh/bug.h>
+
+#endif /* __ASM_SH64_BUG_H */
+
diff --git a/include/asm-sh64/bugs.h b/include/asm-sh64/bugs.h
new file mode 100644
index 000000000000..05554aaea672
--- /dev/null
+++ b/include/asm-sh64/bugs.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_SH64_BUGS_H
+#define __ASM_SH64_BUGS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/bugs.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+/*
+ * I don't know of any Super-H bugs yet.
+ */
+
+#include <asm/processor.h>
+
+static void __init check_bugs(void)
+{
+ extern char *get_cpu_subtype(void);
+ extern unsigned long loops_per_jiffy;
+
+ cpu_data->loops_per_jiffy = loops_per_jiffy;
+
+ printk("CPU: %s\n", get_cpu_subtype());
+}
+#endif /* __ASM_SH64_BUGS_H */
diff --git a/include/asm-sh64/byteorder.h b/include/asm-sh64/byteorder.h
new file mode 100644
index 000000000000..f602ebe334eb
--- /dev/null
+++ b/include/asm-sh64/byteorder.h
@@ -0,0 +1,49 @@
+#ifndef __ASM_SH64_BYTEORDER_H
+#define __ASM_SH64_BYTEORDER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/byteorder.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <asm/types.h>
+
+static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
+{
+ __asm__("byterev %0, %0\n\t"
+ "shari %0, 32, %0"
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+
+static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
+{
+ __asm__("byterev %0, %0\n\t"
+ "shari %0, 48, %0"
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab16(x) ___arch__swab16(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __BYTEORDER_HAS_U64__
+# define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
+#include <linux/byteorder/big_endian.h>
+#endif
+
+#endif /* __ASM_SH64_BYTEORDER_H */
diff --git a/include/asm-sh64/cache.h b/include/asm-sh64/cache.h
new file mode 100644
index 000000000000..f54e85e8a470
--- /dev/null
+++ b/include/asm-sh64/cache.h
@@ -0,0 +1,141 @@
+#ifndef __ASM_SH64_CACHE_H
+#define __ASM_SH64_CACHE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/cache.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ */
+#include <asm/cacheflush.h>
+
+#define L1_CACHE_SHIFT 5
+/* bytes per L1 cache line */
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1))
+#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK)
+#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10)
+/* Largest L1 which this arch supports */
+#define L1_CACHE_SHIFT_MAX 5
+
+#ifdef MODULE
+#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
+#else
+#define __cacheline_aligned \
+ __attribute__((__aligned__(L1_CACHE_BYTES), \
+ __section__(".data.cacheline_aligned")))
+#endif
+
+/*
+ * Control Registers.
+ */
+#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
+#define ICCR_REG0 0 /* Register 0 offset */
+#define ICCR_REG1 1 /* Register 1 offset */
+#define ICCR0 ICCR_BASE+ICCR_REG0
+#define ICCR1 ICCR_BASE+ICCR_REG1
+
+#define ICCR0_OFF 0x0 /* Set ICACHE off */
+#define ICCR0_ON 0x1 /* Set ICACHE on */
+#define ICCR0_ICI 0x2 /* Invalidate all in IC */
+
+#define ICCR1_NOLOCK 0x0 /* Set No Locking */
+
+#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
+#define OCCR_REG0 0 /* Register 0 offset */
+#define OCCR_REG1 1 /* Register 1 offset */
+#define OCCR0 OCCR_BASE+OCCR_REG0
+#define OCCR1 OCCR_BASE+OCCR_REG1
+
+#define OCCR0_OFF 0x0 /* Set OCACHE off */
+#define OCCR0_ON 0x1 /* Set OCACHE on */
+#define OCCR0_OCI 0x2 /* Invalidate all in OC */
+#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
+#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
+
+#define OCCR1_NOLOCK 0x0 /* Set No Locking */
+
+
+/*
+ * SH-5
+ * A bit of description here, for neff=32.
+ *
+ * |<--- tag (19 bits) --->|
+ * +-----------------------------+-----------------+------+----------+------+
+ * | | | ways |set index |offset|
+ * +-----------------------------+-----------------+------+----------+------+
+ * ^ 2 bits 8 bits 5 bits
+ * +- Bit 31
+ *
+ * Cacheline size is based on offset: 5 bits = 32 bytes per line
+ * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
+ * have a broader space for registers. These are outlined by
+ * CACHE_?C_*_STEP below.
+ *
+ */
+
+/* Valid and Dirty bits */
+#define SH_CACHE_VALID (1LL<<0)
+#define SH_CACHE_UPDATED (1LL<<57)
+
+/* Cache flags */
+#define SH_CACHE_MODE_WT (1LL<<0)
+#define SH_CACHE_MODE_WB (1LL<<1)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Cache information structure.
+ *
+ * Defined for both I and D cache, per-processor.
+ */
+struct cache_info {
+ unsigned int ways;
+ unsigned int sets;
+ unsigned int linesz;
+
+ unsigned int way_shift;
+ unsigned int entry_shift;
+ unsigned int set_shift;
+ unsigned int way_step_shift;
+ unsigned int asid_shift;
+
+ unsigned int way_ofs;
+
+ unsigned int asid_mask;
+ unsigned int idx_mask;
+ unsigned int epn_mask;
+
+ unsigned long flags;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/* Instruction cache */
+#define CACHE_IC_ADDRESS_ARRAY 0x01000000
+
+/* Operand Cache */
+#define CACHE_OC_ADDRESS_ARRAY 0x01800000
+
+/* These declarations relate to cache 'synonyms' in the operand cache. A
+ 'synonym' occurs where effective address bits overlap between those used for
+ indexing the cache sets and those passed to the MMU for translation. In the
+ case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
+
+#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
+#define CACHE_OC_SYN_SHIFT 12
+/* Mask to select synonym bit(s) */
+#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
+
+
+/*
+ * Instruction cache can't be invalidated based on physical addresses.
+ * No Instruction Cache defines required, then.
+ */
+
+#endif /* __ASM_SH64_CACHE_H */
diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh64/cacheflush.h
new file mode 100644
index 000000000000..6dcc87213976
--- /dev/null
+++ b/include/asm-sh64/cacheflush.h
@@ -0,0 +1,44 @@
+#ifndef __ASM_SH64_CACHEFLUSH_H
+#define __ASM_SH64_CACHEFLUSH_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+struct vm_area_struct;
+struct page;
+struct mm_struct;
+
+extern void flush_cache_all(void);
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_dcache_page(struct page *pg);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr,
+ int len);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+#define flush_icache_page(vma, page) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_CACHEFLUSH_H */
+
diff --git a/include/asm-sh64/cayman.h b/include/asm-sh64/cayman.h
new file mode 100644
index 000000000000..7b6b96844842
--- /dev/null
+++ b/include/asm-sh64/cayman.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/cayman.h
+ *
+ * Cayman definitions
+ *
+ * Global defintions for the SH5 Cayman board
+ *
+ * Copyright (C) 2002 Stuart Menefy
+ */
+
+
+/* Setup for the SMSC FDC37C935 / LAN91C100FD */
+#define SMSC_IRQ IRQ_IRL1
+
+/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
+#define PCI2_IRQ IRQ_IRL3
diff --git a/include/asm-sh64/checksum.h b/include/asm-sh64/checksum.h
new file mode 100644
index 000000000000..aa3911a99490
--- /dev/null
+++ b/include/asm-sh64/checksum.h
@@ -0,0 +1,95 @@
+#ifndef __ASM_SH64_CHECKSUM_H
+#define __ASM_SH64_CHECKSUM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/checksum.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <asm/registers.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage unsigned int csum_partial(const unsigned char *buff, int len,
+ unsigned int sum);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * verify_area().
+ */
+
+
+unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
+ unsigned int sum);
+
+unsigned int csum_partial_copy_from_user(const char *src, char *dst,
+ int len, int sum, int *err_ptr);
+
+/*
+ * These are the old (and unsafe) way of doing checksums, a warning message will be
+ * printed if they are used and an exeption occurs.
+ *
+ * these functions should go away after some time.
+ */
+
+#define csum_partial_copy_fromuser csum_partial_copy
+
+unsigned int csum_partial_copy(const char *src, char *dst, int len,
+ unsigned int sum);
+
+static inline unsigned short csum_fold(unsigned int sum)
+{
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return ~(sum);
+}
+
+unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+
+unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+ unsigned short len, unsigned short proto,
+ unsigned int sum);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#endif /* __ASM_SH64_CHECKSUM_H */
+
diff --git a/include/asm-sh64/cpumask.h b/include/asm-sh64/cpumask.h
new file mode 100644
index 000000000000..b7b105dbedaf
--- /dev/null
+++ b/include/asm-sh64/cpumask.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_CPUMASK_H
+#define __ASM_SH64_CPUMASK_H
+
+#include <asm-generic/cpumask.h>
+
+#endif /* __ASM_SH64_CPUMASK_H */
diff --git a/include/asm-sh64/current.h b/include/asm-sh64/current.h
new file mode 100644
index 000000000000..261224339d6f
--- /dev/null
+++ b/include/asm-sh64/current.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH64_CURRENT_H
+#define __ASM_SH64_CURRENT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/current.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static __inline__ struct task_struct * get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#endif /* __ASM_SH64_CURRENT_H */
+
diff --git a/include/asm-sh64/delay.h b/include/asm-sh64/delay.h
new file mode 100644
index 000000000000..6ae31301a16a
--- /dev/null
+++ b/include/asm-sh64/delay.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_SH64_DELAY_H
+#define __ASM_SH64_DELAY_H
+
+extern void __delay(int loops);
+extern void __udelay(unsigned long long usecs, unsigned long lpj);
+extern void __ndelay(unsigned long long nsecs, unsigned long lpj);
+extern void udelay(unsigned long usecs);
+extern void ndelay(unsigned long nsecs);
+
+#endif /* __ASM_SH64_DELAY_H */
+
diff --git a/include/asm-sh64/div64.h b/include/asm-sh64/div64.h
new file mode 100644
index 000000000000..f75869565e2e
--- /dev/null
+++ b/include/asm-sh64/div64.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_DIV64_H
+#define __ASM_SH64_DIV64_H
+
+#include <asm-generic/div64.h>
+
+#endif /* __ASM_SH64_DIV64_H */
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
new file mode 100644
index 000000000000..3a6424986b4b
--- /dev/null
+++ b/include/asm-sh64/dma-mapping.h
@@ -0,0 +1,163 @@
+#ifndef __ASM_SH_DMA_MAPPING_H
+#define __ASM_SH_DMA_MAPPING_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+struct pci_dev;
+extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+extern void consistent_free(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+#define dma_supported(dev, mask) (1)
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag)
+{
+ return consistent_alloc(NULL, size, dma_handle);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ consistent_free(NULL, size, vaddr, dma_handle);
+}
+
+static inline void dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_cache_wback_inv((unsigned long)vaddr, size);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev,
+ void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+ if (dev->bus == &pci_bus_type)
+ return virt_to_bus(ptr);
+#endif
+ dma_cache_sync(ptr, size, dir);
+
+ return virt_to_bus(ptr);
+}
+
+#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+ dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ sg[i].length, dir);
+#endif
+ sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ }
+
+ return nents;
+}
+
+#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_single(dev, page_address(page) + offset, size, dir);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(dev, dma_address, size, dir);
+}
+
+static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+ if (dev->bus == &pci_bus_type)
+ return;
+#endif
+ dma_cache_sync(bus_to_virt(dma_handle), size, dir);
+}
+
+static inline void dma_sync_single_range(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+ if (dev->bus == &pci_bus_type)
+ return;
+#endif
+ dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
+}
+
+static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++) {
+#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
+ dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ sg[i].length, dir);
+#endif
+ sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ }
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+ __attribute__ ((alias("dma_sync_single")));
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+ __attribute__ ((alias("dma_sync_single")));
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+ __attribute__ ((alias("dma_sync_sg")));
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+ __attribute__ ((alias("dma_sync_sg")));
+
+static inline int dma_get_cache_alignment(void)
+{
+ /*
+ * Each processor family will define its own L1_CACHE_SHIFT,
+ * L1_CACHE_BYTES wraps to this, so this is always safe.
+ */
+ return L1_CACHE_BYTES;
+}
+
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+
+#endif /* __ASM_SH_DMA_MAPPING_H */
+
diff --git a/include/asm-sh64/dma.h b/include/asm-sh64/dma.h
new file mode 100644
index 000000000000..e701f39470a2
--- /dev/null
+++ b/include/asm-sh64/dma.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_SH64_DMA_H
+#define __ASM_SH64_DMA_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/dma.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#define MAX_DMA_CHANNELS 4
+
+/*
+ * SH5 can DMA in any memory area.
+ *
+ * The static definition is dodgy because it should limit
+ * the highest DMA-able address based on the actual
+ * Physical memory available. This is actually performed
+ * at run time in defining the memory allowed to DMA_ZONE.
+ */
+#define MAX_DMA_ADDRESS ~(NPHYS_MASK)
+
+#define DMA_MODE_READ 0
+#define DMA_MODE_WRITE 1
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __ASM_SH64_DMA_H */
diff --git a/include/asm-sh64/elf.h b/include/asm-sh64/elf.h
new file mode 100644
index 000000000000..bc483669d2f3
--- /dev/null
+++ b/include/asm-sh64/elf.h
@@ -0,0 +1,101 @@
+#ifndef __ASM_SH64_ELF_H
+#define __ASM_SH64_ELF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/elf.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/byteorder.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fpu_struct elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA ELFDATA2LSB
+#else
+#define ELF_DATA ELFDATA2MSB
+#endif
+#define ELF_ARCH EM_SH
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+
+#define ELF_CORE_COPY_REGS(_dest,_regs) \
+ memcpy((char *) &_dest, (char *) _regs, \
+ sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
+ _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
+ _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
+ _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
+ _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
+ _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
+ _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
+ _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
+ _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
+ _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
+ _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
+ _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
+ _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
+ _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
+ _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
+ _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
+ _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
+ _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
+ _r->sr = SR_FD | SR_MMU; } while (0)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
+#endif
+
+#endif /* __ASM_SH64_ELF_H */
diff --git a/include/asm-sh64/errno.h b/include/asm-sh64/errno.h
new file mode 100644
index 000000000000..57b46d4bdd41
--- /dev/null
+++ b/include/asm-sh64/errno.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_ERRNO_H
+#define __ASM_SH64_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif /* __ASM_SH64_ERRNO_H */
diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h
new file mode 100644
index 000000000000..ffcc36c64fa5
--- /dev/null
+++ b/include/asm-sh64/fcntl.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_FCNTL_H
+#define __ASM_SH64_FCNTL_H
+
+#include <asm-sh/fcntl.h>
+
+#endif /* __ASM_SH64_FCNTL_H */
+
diff --git a/include/asm-sh64/hardirq.h b/include/asm-sh64/hardirq.h
new file mode 100644
index 000000000000..75bb083e65f5
--- /dev/null
+++ b/include/asm-sh64/hardirq.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_HARDIRQ_H
+#define __ASM_SH64_HARDIRQ_H
+
+#include <asm-sh/hardirq.h>
+
+#endif /* __ASM_SH64_HARDIRQ_H */
+
diff --git a/include/asm-sh64/hardware.h b/include/asm-sh64/hardware.h
new file mode 100644
index 000000000000..a2e6112621d1
--- /dev/null
+++ b/include/asm-sh64/hardware.h
@@ -0,0 +1,45 @@
+#ifndef __ASM_SH64_HARDWARE_H
+#define __ASM_SH64_HARDWARE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/hardware.h
+ *
+ * Copyright (C) 2002 Stuart Menefy
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * Defitions of the locations of registers in the physical address space.
+ */
+
+#define PHYS_PERIPHERAL_BLOCK 0x09000000
+#define PHYS_DMAC_BLOCK 0x0e000000
+#define PHYS_PCI_BLOCK 0x60000000
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <asm/io.h>
+
+struct vcr_info {
+ u8 perr_flags; /* P-port Error flags */
+ u8 merr_flags; /* Module Error flags */
+ u16 mod_vers; /* Module Version */
+ u16 mod_id; /* Module ID */
+ u8 bot_mb; /* Bottom Memory block */
+ u8 top_mb; /* Top Memory block */
+};
+
+static inline struct vcr_info sh64_get_vcr_info(unsigned long base)
+{
+ unsigned long long tmp;
+
+ tmp = sh64_in64(base);
+
+ return *((struct vcr_info *)&tmp);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_HARDWARE_H */
diff --git a/include/asm-sh64/hdreg.h b/include/asm-sh64/hdreg.h
new file mode 100644
index 000000000000..52d983635a27
--- /dev/null
+++ b/include/asm-sh64/hdreg.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_HDREG_H
+#define __ASM_SH64_HDREG_H
+
+#include <asm-generic/hdreg.h>
+
+#endif /* __ASM_SH64_HDREG_H */
diff --git a/include/asm-sh64/hw_irq.h b/include/asm-sh64/hw_irq.h
new file mode 100644
index 000000000000..ae718d1f2d6c
--- /dev/null
+++ b/include/asm-sh64/hw_irq.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_SH64_HW_IRQ_H
+#define __ASM_SH64_HW_IRQ_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/hw_irq.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
+
+#endif /* __ASM_SH64_HW_IRQ_H */
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
new file mode 100644
index 000000000000..900315ac4054
--- /dev/null
+++ b/include/asm-sh64/ide.h
@@ -0,0 +1,30 @@
+/*
+ * linux/include/asm-sh64/ide.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * sh64 version by Richard Curnow & Paul Mundt
+ */
+
+/*
+ * This file contains the sh64 architecture specific IDE code.
+ */
+
+#ifndef __ASM_SH64_IDE_H
+#define __ASM_SH64_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
+#endif
+
+#define ide_default_io_ctl(base) (0)
+
+#include <asm-generic/ide_iops.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_IDE_H */
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
new file mode 100644
index 000000000000..8e99f5ba3c11
--- /dev/null
+++ b/include/asm-sh64/io.h
@@ -0,0 +1,217 @@
+#ifndef __ASM_SH64_IO_H
+#define __ASM_SH64_IO_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/io.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+
+/*
+ * Convention:
+ * read{b,w,l}/write{b,w,l} are for PCI,
+ * while in{b,w,l}/out{b,w,l} are for ISA
+ * These may (will) be platform specific function.
+ *
+ * In addition, we have
+ * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
+ * which are processor specific. Address should be the result of
+ * onchip_remap();
+ */
+
+#include <asm/cache.h>
+#include <asm/system.h>
+#include <asm/page.h>
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define page_to_bus page_to_phys
+
+/*
+ * Nothing overly special here.. instead of doing the same thing
+ * over and over again, we just define a set of sh64_in/out functions
+ * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
+ * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
+ */
+static inline unsigned char sh64_in8(unsigned long addr)
+{
+ return *(volatile unsigned char *)addr;
+}
+
+static inline unsigned short sh64_in16(unsigned long addr)
+{
+ return *(volatile unsigned short *)addr;
+}
+
+static inline unsigned long sh64_in32(unsigned long addr)
+{
+ return *(volatile unsigned long *)addr;
+}
+
+static inline unsigned long long sh64_in64(unsigned long addr)
+{
+ return *(volatile unsigned long long *)addr;
+}
+
+static inline void sh64_out8(unsigned char b, unsigned long addr)
+{
+ *(volatile unsigned char *)addr = b;
+ wmb();
+}
+
+static inline void sh64_out16(unsigned short b, unsigned long addr)
+{
+ *(volatile unsigned short *)addr = b;
+ wmb();
+}
+
+static inline void sh64_out32(unsigned long b, unsigned long addr)
+{
+ *(volatile unsigned long *)addr = b;
+ wmb();
+}
+
+static inline void sh64_out64(unsigned long long b, unsigned long addr)
+{
+ *(volatile unsigned long long *)addr = b;
+ wmb();
+}
+
+#define readb(addr) sh64_in8(addr)
+#define readw(addr) sh64_in16(addr)
+#define readl(addr) sh64_in32(addr)
+
+#define writeb(b, addr) sh64_out8(b, addr)
+#define writew(b, addr) sh64_out16(b, addr)
+#define writel(b, addr) sh64_out32(b, addr)
+
+#define ctrl_inb(addr) sh64_in8(addr)
+#define ctrl_inw(addr) sh64_in16(addr)
+#define ctrl_inl(addr) sh64_in32(addr)
+
+#define ctrl_outb(b, addr) sh64_out8(b, addr)
+#define ctrl_outw(b, addr) sh64_out16(b, addr)
+#define ctrl_outl(b, addr) sh64_out32(b, addr)
+
+unsigned long inb(unsigned long port);
+unsigned long inw(unsigned long port);
+unsigned long inl(unsigned long port);
+void outb(unsigned long value, unsigned long port);
+void outw(unsigned long value, unsigned long port);
+void outl(unsigned long value, unsigned long port);
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SH_CAYMAN
+extern unsigned long smsc_superio_virt;
+#endif
+#ifdef CONFIG_PCI
+extern unsigned long pciio_virt;
+#endif
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/SuperH mapping
+ */
+extern __inline__ unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+extern __inline__ void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+extern void * __ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags);
+
+extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
+{
+ return __ioremap(phys_addr, size, 1);
+}
+
+extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ return __ioremap(phys_addr, size, 0);
+}
+
+extern void iounmap(void *addr);
+
+unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
+extern void onchip_unmap(unsigned long vaddr);
+
+static __inline__ int check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/*
+ * The caches on some architectures aren't dma-coherent and have need to
+ * handle this in software. There are three types of operations that
+ * can be applied to dma buffers.
+ *
+ * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
+ * writing the content of the caches back to memory, if necessary.
+ * The function also invalidates the affected part of the caches as
+ * necessary before DMA transfers from outside to memory.
+ * - dma_cache_inv(start, size) invalidates the affected parts of the
+ * caches. Dirty lines of the caches may be written back or simply
+ * be discarded. This operation is necessary before dma operations
+ * to the memory.
+ * - dma_cache_wback(start, size) writes back any dirty lines but does
+ * not invalidate the cache. This can be used before DMA reads from
+ * memory,
+ */
+
+static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
+{
+ unsigned long s = start & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbp %0, 0" : : "r" (s));
+}
+
+static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
+{
+ // Note that caller has to be careful with overzealous
+ // invalidation should there be partial cache lines at the extremities
+ // of the specified range
+ unsigned long s = start & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbi %0, 0" : : "r" (s));
+}
+
+static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
+{
+ unsigned long s = start & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbwb %0, 0" : : "r" (s));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH64_IO_H */
diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h
new file mode 100644
index 000000000000..c089a6fb78e0
--- /dev/null
+++ b/include/asm-sh64/ioctl.h
@@ -0,0 +1,83 @@
+#ifndef __ASM_SH64_IOCTL_H
+#define __ASM_SH64_IOCTL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ioctl.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ *
+ */
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* __ASM_SH64_IOCTL_H */
diff --git a/include/asm-sh64/ioctls.h b/include/asm-sh64/ioctls.h
new file mode 100644
index 000000000000..e5d55629f206
--- /dev/null
+++ b/include/asm-sh64/ioctls.h
@@ -0,0 +1,111 @@
+#ifndef __ASM_SH64_IOCTLS_H
+#define __ASM_SH64_IOCTLS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ioctls.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL _IO('T', 12) /* 0x540C */
+#define TIOCNXCL _IO('T', 13) /* 0x540D */
+#define TIOCSCTTY _IO('T', 14) /* 0x540E */
+
+#define TIOCSTI _IOW('T', 18, char) /* 0x5412 */
+#define TIOCMGET _IOR('T', 21, unsigned int) /* 0x5415 */
+#define TIOCMBIS _IOW('T', 22, unsigned int) /* 0x5416 */
+#define TIOCMBIC _IOW('T', 23, unsigned int) /* 0x5417 */
+#define TIOCMSET _IOW('T', 24, unsigned int) /* 0x5418 */
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+
+#define TIOCGSOFTCAR _IOR('T', 25, unsigned int) /* 0x5419 */
+#define TIOCSSOFTCAR _IOW('T', 26, unsigned int) /* 0x541A */
+#define TIOCLINUX _IOW('T', 28, char) /* 0x541C */
+#define TIOCCONS _IO('T', 29) /* 0x541D */
+#define TIOCGSERIAL _IOR('T', 30, struct serial_struct) /* 0x541E */
+#define TIOCSSERIAL _IOW('T', 31, struct serial_struct) /* 0x541F */
+#define TIOCPKT _IOW('T', 32, int) /* 0x5420 */
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+
+
+#define TIOCNOTTY _IO('T', 34) /* 0x5422 */
+#define TIOCSETD _IOW('T', 35, int) /* 0x5423 */
+#define TIOCGETD _IOR('T', 36, int) /* 0x5424 */
+#define TCSBRKP _IOW('T', 37, int) /* 0x5425 */ /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* 0x5426 */ /* For debugging only */
+#define TIOCSBRK _IO('T', 39) /* 0x5427 */ /* BSD compatibility */
+#define TIOCCBRK _IO('T', 40) /* 0x5428 */ /* BSD compatibility */
+#define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */
+#define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */
+#define TIOCSERSWILD _IOW('T', 85, int) /* 0x5455 */
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT _IOR('T', 88, struct async_struct) /* 0x5458 */ /* For debugging only */
+#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* 0x5459 */ /* Get line status register */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* 0x545A */ /* Get multiport config */
+#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* 0x545B */ /* Set multiport config */
+
+#define TIOCMIWAIT _IO('T', 92) /* 0x545C */ /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* 0x545D */ /* read serial port inline interrupt counts */
+
+#endif /* __ASM_SH64_IOCTLS_H */
diff --git a/include/asm-sh64/ipc.h b/include/asm-sh64/ipc.h
new file mode 100644
index 000000000000..d8d9389bd3ce
--- /dev/null
+++ b/include/asm-sh64/ipc.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_IPC_H
+#define __ASM_SH64_IPC_H
+
+#include <asm-sh/ipc.h>
+
+#endif /* __ASM_SH64_IPC_H */
diff --git a/include/asm-sh64/ipcbuf.h b/include/asm-sh64/ipcbuf.h
new file mode 100644
index 000000000000..c441e35299c0
--- /dev/null
+++ b/include/asm-sh64/ipcbuf.h
@@ -0,0 +1,40 @@
+#ifndef __ASM_SH64_IPCBUF_H__
+#define __ASM_SH64_IPCBUF_H__
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ipcbuf.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * The ipc64_perm structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __ASM_SH64_IPCBUF_H__ */
diff --git a/include/asm-sh64/irq.h b/include/asm-sh64/irq.h
new file mode 100644
index 000000000000..95056a0181d0
--- /dev/null
+++ b/include/asm-sh64/irq.h
@@ -0,0 +1,148 @@
+#ifndef __ASM_SH64_IRQ_H
+#define __ASM_SH64_IRQ_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/irq.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <linux/config.h>
+
+/*
+ * Encoded IRQs are not considered worth to be supported.
+ * Main reason is that there's no per-encoded-interrupt
+ * enable/disable mechanism (as there was in SH3/4).
+ * An all enabled/all disabled is worth only if there's
+ * a cascaded IC to disable/enable/ack on. Until such
+ * IC is available there's no such support.
+ *
+ * Presumably Encoded IRQs may use extra IRQs beyond 64,
+ * below. Some logic must be added to cope with IRQ_IRL?
+ * in an exclusive way.
+ *
+ * Priorities are set at Platform level, when IRQ_IRL0-3
+ * are set to 0 Encoding is allowed. Otherwise it's not
+ * allowed.
+ */
+
+/* Independent IRQs */
+#define IRQ_IRL0 0
+#define IRQ_IRL1 1
+#define IRQ_IRL2 2
+#define IRQ_IRL3 3
+
+#define IRQ_INTA 4
+#define IRQ_INTB 5
+#define IRQ_INTC 6
+#define IRQ_INTD 7
+
+#define IRQ_SERR 12
+#define IRQ_ERR 13
+#define IRQ_PWR3 14
+#define IRQ_PWR2 15
+#define IRQ_PWR1 16
+#define IRQ_PWR0 17
+
+#define IRQ_DMTE0 18
+#define IRQ_DMTE1 19
+#define IRQ_DMTE2 20
+#define IRQ_DMTE3 21
+#define IRQ_DAERR 22
+
+#define IRQ_TUNI0 32
+#define IRQ_TUNI1 33
+#define IRQ_TUNI2 34
+#define IRQ_TICPI2 35
+
+#define IRQ_ATI 36
+#define IRQ_PRI 37
+#define IRQ_CUI 38
+
+#define IRQ_ERI 39
+#define IRQ_RXI 40
+#define IRQ_BRI 41
+#define IRQ_TXI 42
+
+#define IRQ_ITI 63
+
+#define NR_INTC_IRQS 64
+
+#ifdef CONFIG_SH_CAYMAN
+#define NR_EXT_IRQS 32
+#define START_EXT_IRQS 64
+
+/* PCI bus 2 uses encoded external interrupts on the Cayman board */
+#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
+#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
+#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
+#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
+
+#define START_EXT_IRQS 64
+
+#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
+#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
+
+#else
+#define NR_EXT_IRQS 0
+#endif
+
+#define NR_IRQS (NR_INTC_IRQS+NR_EXT_IRQS)
+
+
+/* Default IRQs, fixed */
+#define TIMER_IRQ IRQ_TUNI0
+#define RTC_IRQ IRQ_CUI
+
+/* Default Priorities, Platform may choose differently */
+#define NO_PRIORITY 0 /* Disabled */
+#define TIMER_PRIORITY 2
+#define RTC_PRIORITY TIMER_PRIORITY
+#define SCIF_PRIORITY 3
+#define INTD_PRIORITY 3
+#define IRL3_PRIORITY 4
+#define INTC_PRIORITY 6
+#define IRL2_PRIORITY 7
+#define INTB_PRIORITY 9
+#define IRL1_PRIORITY 10
+#define INTA_PRIORITY 12
+#define IRL0_PRIORITY 13
+#define TOP_PRIORITY 15
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+extern int intc_evt_to_irq[(0xE20/0x20)+1];
+int intc_irq_describe(char* p, int irq);
+
+#define irq_canonicalize(irq) (irq)
+
+#ifdef CONFIG_SH_CAYMAN
+int cayman_irq_demux(int evt);
+int cayman_irq_describe(char* p, int irq);
+#define irq_demux(x) cayman_irq_demux(x)
+#define irq_describe(p, x) cayman_irq_describe(p, x)
+#else
+#define irq_demux(x) (intc_evt_to_irq[x])
+#define irq_describe(p, x) intc_irq_describe(p, x)
+#endif
+
+/*
+ * Function for "on chip support modules".
+ */
+
+/*
+ * SH-5 supports Priority based interrupts only.
+ * Interrupt priorities are defined at platform level.
+ */
+#define set_ipr_data(a, b, c, d)
+#define make_ipr_irq(a)
+#define make_imask_irq(a)
+
+#endif /* __ASM_SH64_IRQ_H */
diff --git a/include/asm-sh64/keyboard.h b/include/asm-sh64/keyboard.h
new file mode 100644
index 000000000000..cda75f6d1e0c
--- /dev/null
+++ b/include/asm-sh64/keyboard.h
@@ -0,0 +1,74 @@
+/*
+ * linux/include/asm-shmedia/keyboard.h
+ *
+ * Copied from i386 version:
+ * Created 3 Nov 1996 by Geert Uytterhoeven
+ */
+
+/*
+ * This file contains the i386 architecture specific keyboard definitions
+ */
+
+#ifndef __ASM_SH64_KEYBOARD_H
+#define __ASM_SH64_KEYBOARD_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_SH_CAYMAN
+#define KEYBOARD_IRQ (START_EXT_IRQS + 2) /* SMSC SuperIO IRQ 1 */
+#endif
+#define DISABLE_KBD_DURING_INTERRUPTS 0
+
+extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
+extern int pckbd_getkeycode(unsigned int scancode);
+extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
+ char raw_mode);
+extern char pckbd_unexpected_up(unsigned char keycode);
+extern void pckbd_leds(unsigned char leds);
+extern void pckbd_init_hw(void);
+extern unsigned char pckbd_sysrq_xlate[128];
+
+#define kbd_setkeycode pckbd_setkeycode
+#define kbd_getkeycode pckbd_getkeycode
+#define kbd_translate pckbd_translate
+#define kbd_unexpected_up pckbd_unexpected_up
+#define kbd_leds pckbd_leds
+#define kbd_init_hw pckbd_init_hw
+#define kbd_sysrq_xlate pckbd_sysrq_xlate
+
+#define SYSRQ_KEY 0x54
+
+/* resource allocation */
+#define kbd_request_region()
+#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
+ "keyboard", NULL)
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+
+/* Some stoneage hardware needs delays after some operations. */
+#define kbd_pause() do { } while(0)
+
+/*
+ * Machine specific bits for the PS/2 driver
+ */
+
+#ifdef CONFIG_SH_CAYMAN
+#define AUX_IRQ (START_EXT_IRQS + 6) /* SMSC SuperIO IRQ12 */
+#endif
+
+#define aux_request_irq(hand, dev_id) \
+ request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id)
+
+#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH64_KEYBOARD_H */
+
diff --git a/include/asm-sh64/kmap_types.h b/include/asm-sh64/kmap_types.h
new file mode 100644
index 000000000000..2ae7c7587919
--- /dev/null
+++ b/include/asm-sh64/kmap_types.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_KMAP_TYPES_H
+#define __ASM_SH64_KMAP_TYPES_H
+
+#include <asm-sh/kmap_types.h>
+
+#endif /* __ASM_SH64_KMAP_TYPES_H */
+
diff --git a/include/asm-sh64/linkage.h b/include/asm-sh64/linkage.h
new file mode 100644
index 000000000000..1dd0e84a228d
--- /dev/null
+++ b/include/asm-sh64/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_LINKAGE_H
+#define __ASM_SH64_LINKAGE_H
+
+#include <asm-sh/linkage.h>
+
+#endif /* __ASM_SH64_LINKAGE_H */
+
diff --git a/include/asm-sh64/local.h b/include/asm-sh64/local.h
new file mode 100644
index 000000000000..d9bd95dd36e2
--- /dev/null
+++ b/include/asm-sh64/local.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_LOCAL_H
+#define __ASM_SH64_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* __ASM_SH64_LOCAL_H */
+
diff --git a/include/asm-sh64/mc146818rtc.h b/include/asm-sh64/mc146818rtc.h
new file mode 100644
index 000000000000..6cd3aec68dbe
--- /dev/null
+++ b/include/asm-sh64/mc146818rtc.h
@@ -0,0 +1,7 @@
+/*
+ * linux/include/asm-sh64/mc146818rtc.h
+ *
+*/
+
+/* For now, an empty place-holder to get IDE to compile. */
+
diff --git a/include/asm-sh64/mman.h b/include/asm-sh64/mman.h
new file mode 100644
index 000000000000..a9be6d885c3e
--- /dev/null
+++ b/include/asm-sh64/mman.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_MMAN_H
+#define __ASM_SH64_MMAN_H
+
+#include <asm-sh/mman.h>
+
+#endif /* __ASM_SH64_MMAN_H */
diff --git a/include/asm-sh64/mmu.h b/include/asm-sh64/mmu.h
new file mode 100644
index 000000000000..ccd36d26615a
--- /dev/null
+++ b/include/asm-sh64/mmu.h
@@ -0,0 +1,7 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+#endif
diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h
new file mode 100644
index 000000000000..f062e1513272
--- /dev/null
+++ b/include/asm-sh64/mmu_context.h
@@ -0,0 +1,209 @@
+#ifndef __ASM_SH64_MMU_CONTEXT_H
+#define __ASM_SH64_MMU_CONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/mmu_context.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * ASID handling idea taken from MIPS implementation.
+ *
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Cache of MMU context last used.
+ *
+ * The MMU "context" consists of two things:
+ * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
+ * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
+ */
+extern unsigned long mmu_context_cache;
+
+#include <linux/config.h>
+#include <asm/page.h>
+
+
+/* Current mm's pgd */
+extern pgd_t *mmu_pdtp_cache;
+
+#define SR_ASID_MASK 0xffffffffff00ffffULL
+#define SR_ASID_SHIFT 16
+
+#define MMU_CONTEXT_ASID_MASK 0x000000ff
+#define MMU_CONTEXT_VERSION_MASK 0xffffff00
+#define MMU_CONTEXT_FIRST_VERSION 0x00000100
+#define NO_CONTEXT 0
+
+/* ASID is 8-bit value, so it can't be 0x100 */
+#define MMU_NO_ASID 0x100
+
+
+/*
+ * Virtual Page Number mask
+ */
+#define MMU_VPN_MASK 0xfffff000
+
+extern __inline__ void
+get_new_mmu_context(struct mm_struct *mm)
+{
+ extern void flush_tlb_all(void);
+ extern void flush_cache_all(void);
+
+ unsigned long mc = ++mmu_context_cache;
+
+ if (!(mc & MMU_CONTEXT_ASID_MASK)) {
+ /* We exhaust ASID of this version.
+ Flush all TLB and start new cycle. */
+ flush_tlb_all();
+ /* We have to flush all caches as ASIDs are
+ used in cache */
+ flush_cache_all();
+ /* Fix version if needed.
+ Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
+ if (!mc)
+ mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
+ }
+ mm->context = mc;
+}
+
+/*
+ * Get MMU context if needed.
+ */
+static __inline__ void
+get_mmu_context(struct mm_struct *mm)
+{
+ if (mm) {
+ unsigned long mc = mmu_context_cache;
+ /* Check if we have old version of context.
+ If it's old, we need to get new context with new version. */
+ if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
+ get_new_mmu_context(mm);
+ }
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+
+ return 0;
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ extern void flush_tlb_mm(struct mm_struct *mm);
+
+ /* Well, at least free TLB entries */
+ flush_tlb_mm(mm);
+}
+
+#endif /* __ASSEMBLY__ */
+
+/* Common defines */
+#define TLB_STEP 0x00000010
+#define TLB_PTEH 0x00000000
+#define TLB_PTEL 0x00000008
+
+/* PTEH defines */
+#define PTEH_ASID_SHIFT 2
+#define PTEH_VALID 0x0000000000000001
+#define PTEH_SHARED 0x0000000000000002
+#define PTEH_MATCH_ASID 0x00000000000003ff
+
+#ifndef __ASSEMBLY__
+/* This has to be a common function because the next location to fill
+ * information is shared. */
+extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
+
+/* Profiling counter. */
+#ifdef CONFIG_SH64_PROC_TLB
+extern unsigned long long calls_to_do_fast_page_fault;
+#endif
+
+static inline unsigned long get_asid(void)
+{
+ unsigned long long sr;
+
+ asm volatile ("getcon " __SR ", %0\n\t"
+ : "=r" (sr));
+
+ sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
+ return (unsigned long) sr;
+}
+
+/* Set ASID into SR */
+static inline void set_asid(unsigned long asid)
+{
+ unsigned long long sr, pc;
+
+ asm volatile ("getcon " __SR ", %0" : "=r" (sr));
+
+ sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
+
+ /*
+ * It is possible that this function may be inlined and so to avoid
+ * the assembler reporting duplicate symbols we make use of the gas trick
+ * of generating symbols using numerics and forward reference.
+ */
+ asm volatile ("movi 1, %1\n\t"
+ "shlli %1, 28, %1\n\t"
+ "or %0, %1, %1\n\t"
+ "putcon %1, " __SR "\n\t"
+ "putcon %0, " __SSR "\n\t"
+ "movi 1f, %1\n\t"
+ "ori %1, 1 , %1\n\t"
+ "putcon %1, " __SPC "\n\t"
+ "rte\n"
+ "1:\n\t"
+ : "=r" (sr), "=r" (pc) : "0" (sr));
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static __inline__ void activate_context(struct mm_struct *mm)
+{
+ get_mmu_context(mm);
+ set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+}
+
+
+static __inline__ void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (prev != next) {
+ mmu_pdtp_cache = next->pgd;
+ activate_context(next);
+ }
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+#define activate_mm(prev, next) \
+ switch_mm((prev),(next),NULL)
+
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_MMU_CONTEXT_H */
diff --git a/include/asm-sh64/module.h b/include/asm-sh64/module.h
new file mode 100644
index 000000000000..bf382bccf3e8
--- /dev/null
+++ b/include/asm-sh64/module.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_SH64_MODULE_H
+#define __ASM_SH64_MODULE_H
+/*
+ * This file contains the SH architecture specific module code.
+ */
+
+#define module_map(x) vmalloc(x)
+#define module_unmap(x) vfree(x)
+#define module_arch_init(x) (0)
+#define arch_init_modules(x) do { } while (0)
+
+#endif /* __ASM_SH64_MODULE_H */
diff --git a/include/asm-sh64/msgbuf.h b/include/asm-sh64/msgbuf.h
new file mode 100644
index 000000000000..cf0494ce0ba8
--- /dev/null
+++ b/include/asm-sh64/msgbuf.h
@@ -0,0 +1,42 @@
+#ifndef __ASM_SH64_MSGBUF_H
+#define __ASM_SH64_MSGBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/msgbuf.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * The msqid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* __ASM_SH64_MSGBUF_H */
diff --git a/include/asm-sh64/namei.h b/include/asm-sh64/namei.h
new file mode 100644
index 000000000000..99d759a805ce
--- /dev/null
+++ b/include/asm-sh64/namei.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_SH64_NAMEI_H
+#define __ASM_SH64_NAMEI_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/namei.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * Included from linux/fs/namei.c
+ *
+ */
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __ASM_SH64_NAMEI_H */
diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h
new file mode 100644
index 000000000000..e1f7f5a41210
--- /dev/null
+++ b/include/asm-sh64/page.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_SH64_PAGE_H
+#define __ASM_SH64_PAGE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/page.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * benedict.gaster@superh.com 19th, 24th July 2002.
+ *
+ * Modified to take account of enabling for D-CACHE support.
+ *
+ */
+
+#include <linux/config.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE 4096
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PTE_MASK PAGE_MASK
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HPAGE_SHIFT 16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#define HPAGE_SHIFT 20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
+#define HPAGE_SHIFT 29
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE-1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
+#endif
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+extern struct page *mem_map;
+extern void sh64_page_clear(void *page);
+extern void sh64_page_copy(void *from, void *to);
+
+#define clear_page(page) sh64_page_clear(page)
+#define copy_page(to,from) sh64_page_copy(from, to)
+
+#if defined(CONFIG_DCACHE_DISABLED)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#else
+
+extern void clear_user_page(void *to, unsigned long address, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
+
+#endif /* defined(CONFIG_DCACHE_DISABLED) */
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * Kconfig defined.
+ */
+#define __MEMORY_START (CONFIG_MEMORY_START)
+#define PAGE_OFFSET (CONFIG_CACHED_MEMORY_OFFSET)
+
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define MAP_NR(addr) ((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT)
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
+#define phys_to_page(phys) (mem_map + (((phys) - __MEMORY_START) >> PAGE_SHIFT))
+#define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START)
+
+/* PFN start number, because of __MEMORY_START */
+#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
+
+#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START)
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifndef __ASSEMBLY__
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_PAGE_H */
diff --git a/include/asm-sh64/param.h b/include/asm-sh64/param.h
new file mode 100644
index 000000000000..d18cc87c1a80
--- /dev/null
+++ b/include/asm-sh64/param.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/param.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+#ifndef __ASM_SH64_PARAM_H
+#define __ASM_SH64_PARAM_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+# ifdef CONFIG_SH_WDT
+# define HZ 1000 /* Needed for high-res WOVF */
+# else
+# define HZ 100
+# endif
+# define USER_HZ 100 /* User interfaces are in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* __ASM_SH64_PARAM_H */
diff --git a/include/asm-sh64/pci.h b/include/asm-sh64/pci.h
new file mode 100644
index 000000000000..8cc14e139750
--- /dev/null
+++ b/include/asm-sh64/pci.h
@@ -0,0 +1,110 @@
+#ifndef __ASM_SH64_PCI_H
+#define __ASM_SH64_PCI_H
+
+#ifdef __KERNEL__
+
+#include <linux/dma-mapping.h>
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#define pcibios_assign_all_busses() 1
+
+/*
+ * These are currently the correct values for the STM overdrive board
+ * We need some way of setting this on a board specific way, it will
+ * not be the same on other boards I think
+ */
+#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x40000000
+#endif
+
+extern void pcibios_set_master(struct pci_dev *dev);
+
+/*
+ * Set penalize isa irq function
+ */
+static inline void pcibios_penalize_isa_irq(int irq)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+/* Dynamic DMA mapping stuff.
+ * SuperH has everything mapped statically like x86.
+ */
+
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* pci_unmap_{single,page} being a nop depends upon the
+ * configuration.
+ */
+#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+#else
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#endif
+
+/* Not supporting more than 32-bit PCI bus addresses now, but
+ * must satisfy references to this function. Change if needed.
+ */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->length)
+
+/* Board-specific fixup routines. */
+extern void pcibios_fixup(void);
+extern void pcibios_fixup_irqs(void);
+
+#ifdef CONFIG_PCI_AUTO
+extern int pciauto_assign_resources(int busno, struct pci_channel *hose);
+#endif
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+/* generic DMA-mapping stuff */
+#include <asm-generic/pci-dma-compat.h>
+
+#endif /* __ASM_SH64_PCI_H */
+
diff --git a/include/asm-sh64/percpu.h b/include/asm-sh64/percpu.h
new file mode 100644
index 000000000000..a01d16cd0e8c
--- /dev/null
+++ b/include/asm-sh64/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_PERCPU
+#define __ASM_SH64_PERCPU
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_SH64_PERCPU */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
new file mode 100644
index 000000000000..02723085d0d9
--- /dev/null
+++ b/include/asm-sh64/pgalloc.h
@@ -0,0 +1,202 @@
+#ifndef __ASM_SH64_PGALLOC_H
+#define __ASM_SH64_PGALLOC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/pgalloc.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ */
+
+#include <asm/processor.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist (current_cpu_data.pmd_quick)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+static inline void pgd_init(unsigned long page)
+{
+ unsigned long *pgd = (unsigned long *)page;
+ extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+ pgd[i] = (unsigned long)empty_bad_pte_table;
+}
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
+ pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+
+ if (ret) {
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ }
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ kfree((void *)pgd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct page *pte)
+{
+ __free_page(pte);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ if (pte)
+ clear_page(pte);
+
+ return pte;
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *pte;
+
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ if (pte)
+ clear_page(page_address(pte));
+
+ return pte;
+}
+
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pmd_free_tlb(tlb,pmd) do { } while (0)
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+
+static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd;
+ pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pmd)
+ clear_page(pmd);
+ return pmd;
+}
+
+static __inline__ void pmd_free(pmd_t *pmd)
+{
+ free_page((unsigned long) pmd);
+}
+
+#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
+#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
+
+#else
+#error "No defined page table size"
+#endif
+
+#define check_pgt_cache() do { } while (0)
+#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc(mm) get_pgd_fast()
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+}
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ struct page *pte)
+{
+ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
+}
+
+#endif /* __ASM_SH64_PGALLOC_H */
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
new file mode 100644
index 000000000000..1f333c1060a1
--- /dev/null
+++ b/include/asm-sh64/pgtable.h
@@ -0,0 +1,498 @@
+#ifndef __ASM_SH64_PGTABLE_H
+#define __ASM_SH64_PGTABLE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/pgtable.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the SuperH page table tree.
+ */
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/config.h>
+
+extern void paging_init(void);
+
+/* We provide our own get_unmapped_area to avoid cache synonym issue */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * Basically we have the same two-level (which is the logical three level
+ * Linux page table layout folded) page tables as the i386.
+ */
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * NEFF and NPHYS related defines.
+ * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103
+ * implement 32 bits effective and 32 bits physical. But future implementations may
+ * extend beyond this.
+ */
+#define NEFF 32
+#define NEFF_SIGN (1LL << (NEFF - 1))
+#define NEFF_MASK (-1LL << NEFF)
+
+#define NPHYS 32
+#define NPHYS_SIGN (1LL << (NPHYS - 1))
+#define NPHYS_MASK (-1LL << NPHYS)
+
+/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond
+ that 3-level would be appropriate. */
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */
+#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
+#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
+#define PTE_SHIFT PAGE_SHIFT
+#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
+
+/* top level: PMD. */
+#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
+#define PGD_BITS (NEFF - PGDIR_SHIFT)
+#define PTRS_PER_PGD (1<<PGD_BITS)
+
+/* middle level: PMD. This doesn't do anything for the 2-level case. */
+#define PTRS_PER_PMD (1)
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PMD_SHIFT PGDIR_SHIFT
+#define PMD_SIZE PGDIR_SIZE
+#define PMD_MASK PGDIR_MASK
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+/*
+ * three-level asymmetric paging structure: PGD is top level.
+ * The asymmetry comes from 32-bit pointers and 64-bit PTEs.
+ */
+/* bottom level: PTE. It's 9 bits = 512 pointers */
+#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
+#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
+#define PTE_SHIFT PAGE_SHIFT
+#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
+
+/* middle level: PMD. It's 10 bits = 1024 pointers */
+#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *))
+#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */
+#define PMD_SHIFT (PTE_SHIFT + PTE_BITS)
+#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE)
+
+/* top level: PMD. It's 1 bit = 2 pointers */
+#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS)
+#define PGD_BITS (NEFF - PGDIR_SHIFT)
+#define PTRS_PER_PGD (1<<PGD_BITS)
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#else
+#error "No defined number of page table levels"
+#endif
+
+/*
+ * Error outputs.
+ */
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Table setting routines. Used within arch/mm only.
+ */
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
+{
+ unsigned long long x = ((unsigned long long) pteval.pte);
+ unsigned long long *xp = (unsigned long long *) pteptr;
+ /*
+ * Sign-extend based on NPHYS.
+ */
+ *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
+}
+
+static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long) ptep;
+}
+
+/*
+ * PGD defines. Top level.
+ */
+
+/* To find an entry in a generic PGD. */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define __pgd_offset(address) pgd_index(address)
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* To find an entry in a kernel PGD. */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * PGD level access routines.
+ *
+ * Note1:
+ * There's no need to use physical addresses since the tree walk is all
+ * in performed in software, until the PTE translation.
+ *
+ * Note 2:
+ * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad,
+ * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain
+ * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must
+ * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and
+ * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a
+ * bad pgd that must be notified via printk().
+ *
+ */
+#define _PGD_EMPTY 0x0
+
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0)
+#define pgd_clear(xx) do { } while(0)
+
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+#define pgd_present(pgd_entry) (1)
+#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY)
+/* TODO: Think later about what a useful definition of 'bad' would be now. */
+#define pgd_bad(pgd_entry) (0)
+#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY)))
+
+#endif
+
+
+#define pgd_page(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK))
+
+/*
+ * PMD defines. Middle level.
+ */
+
+/* PGD to PMD dereferencing */
+#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_offset(dir, addr) \
+ ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr)))
+#endif
+
+/*
+ * PMD level access routines. Same notes as above.
+ */
+#define _PMD_EMPTY 0x0
+/* Either the PMD is empty or present, it's not paged out */
+#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
+#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
+#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
+#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+#define pmd_page_kernel(pmd_entry) \
+ ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
+
+#define pmd_page(pmd) \
+ (virt_to_page(pmd_val(pmd)))
+
+/* PMD to PTE dereferencing */
+#define pte_index(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_kernel(dir, addr) \
+ ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+/* Round it up ! */
+#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#ifndef __ASSEMBLY__
+#define VMALLOC_END 0xff000000
+#define VMALLOC_START 0xf0000000
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+#define IOBASE_VADDR 0xff000000
+#define IOBASE_END 0xffffffff
+
+/*
+ * PTEL coherent flags.
+ * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
+ */
+/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
+ positions, to avoid expensive bit shuffling on every refill. The remaining
+ bits are used for s/w purposes and masked out on each refill.
+
+ Note, the PTE slots are used to hold data of type swp_entry_t when a page is
+ swapped out. Only the _PAGE_PRESENT flag is significant when the page is
+ swapped out, and it must be placed so that it doesn't overlap either the
+ type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
+ at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
+ scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
+ [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
+ into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
+#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
+#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
+#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
+#define _PAGE_PRESENT 0x004 /* software: page referenced */
+#define _PAGE_FILE 0x004 /* software: only when !present */
+#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
+#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
+#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
+#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
+#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
+#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
+#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
+#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
+#define _PAGE_ACCESSED 0x800 /* software: page referenced */
+
+/* Mask which drops software flags */
+#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
+/* Flags default: 4KB, Read, Not write, Not execute, Not user */
+#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x0000000000000040LL
+
+/*
+ * HugeTLB support
+ */
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE (_PAGE_SIZE0)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#define _PAGE_SZHUGE (_PAGE_SIZE1)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
+#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
+#endif
+
+/*
+ * Default flags for a Kernel page.
+ * This is fundametally also SHARED because the main use of this define
+ * (other than for PGD/PMD entries) is for the VMALLOC pool which is
+ * contextless.
+ *
+ * _PAGE_EXECUTE is required for modules
+ *
+ */
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE | \
+ _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SHARED)
+
+/* Default flags for a User page */
+#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
+
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \
+ _PAGE_SHARED)
+/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
+ * protection mode for the stack. */
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
+ _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
+ _PAGE_ACCESSED | _PAGE_USER)
+#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
+
+
+/*
+ * In ST50 we have full permissions (Read/Write/Execute/Shared).
+ * Just match'em all. These are for mmap(), therefore all at least
+ * User/Cachable/Present/Accessed. No point in making Fault on Write.
+ */
+#define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+ /* sxwr */
+#define __P000 __pgprot(__MMAP_COMMON)
+#define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ)
+#define __P010 __pgprot(__MMAP_COMMON)
+#define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ)
+#define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
+#define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
+#define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
+#define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
+
+#define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED)
+#define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ)
+#define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE)
+#define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE)
+#define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE)
+#define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ)
+#define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE)
+#define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE)
+
+/* Make it a device mapping for maximum safety (e.g. for mapping device
+ registers into user-space via /dev/map). */
+#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
+#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
+
+/*
+ * Handling allocation failures during page table setup.
+ */
+extern void __handle_bad_pmd_kernel(pmd_t * pmd);
+#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
+
+/*
+ * PTE level access routines.
+ *
+ * Note1:
+ * It's the tree walk leaf. This is physical address to be stored.
+ *
+ * Note 2:
+ * Regarding the choice of _PTE_EMPTY:
+
+ We must choose a bit pattern that cannot be valid, whether or not the page
+ is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
+ out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
+ left for us to select. If we force bit[7]==0 when swapped out, we could use
+ the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
+ we force bit[7]==1 when swapped out, we can use all zeroes to indicate
+ empty. This is convenient, because the page tables get cleared to zero
+ when they are allocated.
+
+ */
+#define _PTE_EMPTY 0x0
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(xp) (set_pte(xp, __pte(_PTE_EMPTY)))
+#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page
+ * addresses:
+ */
+
+/*
+ * Given a PTE, return the index of the mem_map[] entry corresponding
+ * to the page frame the PTE. Get the absolute physical address, make
+ * a relative physical address and translate it to an index.
+ */
+#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
+ __MEMORY_START) >> PAGE_SHIFT)
+
+/*
+ * Given a PTE, return the "struct page *".
+ */
+#define pte_page(x) (mem_map + pte_pagenr(x))
+
+/*
+ * Return number of (down rounded) MB corresponding to x pages.
+ */
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; }
+static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+
+extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry.
+ *
+ * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
+ */
+#define mk_pte(page,pgprot) \
+({ \
+ pte_t __pte; \
+ \
+ set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
+ __MEMORY_START | pgprot_val((pgprot)))); \
+ __pte; \
+})
+
+/*
+ * This takes a (absolute) physical page address that is used
+ * by the remapping functions
+ */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
+
+#define page_pte_prot(page, prot) mk_pte(page, prot)
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+typedef pte_t *pte_addr_t;
+#define pgtable_cache_init() do { } while (0)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte);
+
+/* Encode and decode a swap entry */
+#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS 29
+#define pte_to_pgoff(pte) (pte_val(pte))
+#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_page_range remap_page_range
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASM_SH64_PGTABLE_H */
diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h
new file mode 100644
index 000000000000..7046a9014027
--- /dev/null
+++ b/include/asm-sh64/platform.h
@@ -0,0 +1,69 @@
+#ifndef __ASM_SH64_PLATFORM_H
+#define __ASM_SH64_PLATFORM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/platform.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * benedict.gaster@superh.com: 3rd May 2002
+ * Added support for ramdisk, removing statically linked romfs at the same time.
+ */
+
+#include <linux/ioport.h>
+#include <asm/irq.h>
+
+
+/*
+ * Platform definition structure.
+ */
+struct sh64_platform {
+ unsigned int readonly_rootfs;
+ unsigned int ramdisk_flags;
+ unsigned int initial_root_dev;
+ unsigned int loader_type;
+ unsigned int initrd_start;
+ unsigned int initrd_size;
+ unsigned int fpu_flags;
+ unsigned int io_res_count;
+ unsigned int kram_res_count;
+ unsigned int xram_res_count;
+ unsigned int rom_res_count;
+ struct resource *io_res_p;
+ struct resource *kram_res_p;
+ struct resource *xram_res_p;
+ struct resource *rom_res_p;
+};
+
+extern struct sh64_platform platform_parms;
+
+extern unsigned long long memory_start, memory_end;
+
+extern unsigned long long fpu_in_use;
+
+extern int platform_int_priority[NR_INTC_IRQS];
+
+#define FPU_FLAGS (platform_parms.fpu_flags)
+#define STANDARD_IO_RESOURCES (platform_parms.io_res_count)
+#define STANDARD_KRAM_RESOURCES (platform_parms.kram_res_count)
+#define STANDARD_XRAM_RESOURCES (platform_parms.xram_res_count)
+#define STANDARD_ROM_RESOURCES (platform_parms.rom_res_count)
+
+/*
+ * Kernel Memory description, Respectively:
+ * code = last but one memory descriptor
+ * data = last memory descriptor
+ */
+#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2])
+#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1])
+
+/* Be prepared to 64-bit sign extensions */
+#define PFN_UP(x) ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff)
+#define PFN_DOWN(x) (((x) >> PAGE_SHIFT) & 0x000fffff)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+#endif /* __ASM_SH64_PLATFORM_H */
diff --git a/include/asm-sh64/poll.h b/include/asm-sh64/poll.h
new file mode 100644
index 000000000000..a420d14eb704
--- /dev/null
+++ b/include/asm-sh64/poll.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_POLL_H
+#define __ASM_SH64_POLL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/poll.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/* These are specified by iBCS2 */
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM 0x0100
+#define POLLWRBAND 0x0200
+#define POLLMSG 0x0400
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* __ASM_SH64_POLL_H */
diff --git a/include/asm-sh64/posix_types.h b/include/asm-sh64/posix_types.h
new file mode 100644
index 000000000000..0620317a6f0f
--- /dev/null
+++ b/include/asm-sh64/posix_types.h
@@ -0,0 +1,131 @@
+#ifndef __ASM_SH64_POSIX_TYPES_H
+#define __ASM_SH64_POSIX_TYPES_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/posix_types.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef long unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+typedef unsigned short __kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+ int val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+ int __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
+{
+ unsigned long *__tmp = __p->fds_bits;
+ int __i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ __tmp[ 8] = 0; __tmp[ 9] = 0;
+ __tmp[10] = 0; __tmp[11] = 0;
+ __tmp[12] = 0; __tmp[13] = 0;
+ __tmp[14] = 0; __tmp[15] = 0;
+ return;
+
+ case 8:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ return;
+
+ case 4:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ return;
+ }
+ }
+ __i = __FDSET_LONGS;
+ while (__i) {
+ __i--;
+ *__tmp = 0;
+ __tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
+#endif /* __ASM_SH64_POSIX_TYPES_H */
diff --git a/include/asm-sh64/processor.h b/include/asm-sh64/processor.h
new file mode 100644
index 000000000000..0f45ae686110
--- /dev/null
+++ b/include/asm-sh64/processor.h
@@ -0,0 +1,292 @@
+#ifndef __ASM_SH64_PROCESSOR_H
+#define __ASM_SH64_PROCESSOR_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/processor.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ */
+
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/registers.h>
+#include <linux/threads.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ \
+void *pc; \
+unsigned long long __dummy = 0; \
+__asm__("gettr tr0, %1\n\t" \
+ "pta 4, tr0\n\t" \
+ "gettr tr0, %0\n\t" \
+ "ptabs %1, tr0\n\t" \
+ :"=r" (pc), "=r" (__dummy) \
+ : "1" (__dummy)); \
+pc; })
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ */
+enum cpu_type {
+ CPU_SH5_101,
+ CPU_SH5_103,
+ CPU_SH_NONE
+};
+
+/*
+ * TLB information structure
+ *
+ * Defined for both I and D tlb, per-processor.
+ */
+struct tlb_info {
+ unsigned long long next;
+ unsigned long long first;
+ unsigned long long last;
+
+ unsigned int entries;
+ unsigned int step;
+
+ unsigned long flags;
+};
+
+struct sh_cpuinfo {
+ enum cpu_type type;
+ unsigned long loops_per_jiffy;
+
+ char hard_math;
+
+ unsigned long *pgd_quick;
+ unsigned long *pmd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+ unsigned int cpu_clock, master_clock, bus_clock, module_clock;
+
+ /* Cache info */
+ struct cache_info icache;
+ struct cache_info dcache;
+
+ /* TLB info */
+ struct tlb_info itlb;
+ struct tlb_info dtlb;
+};
+
+extern struct sh_cpuinfo boot_cpu_data;
+
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+
+#endif
+
+/*
+ * User space process size: 2GB - 4k.
+ */
+#define TASK_SIZE 0x7ffff000UL
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+/*
+ * Bit of SR register
+ *
+ * FD-bit:
+ * When it's set, it means the processor doesn't have right to use FPU,
+ * and it results exception when the floating operation is executed.
+ *
+ * IMASK-bit:
+ * Interrupt level mask
+ *
+ * STEP-bit:
+ * Single step bit
+ *
+ */
+#define SR_FD 0x00008000
+
+#if defined(CONFIG_SH64_SR_WATCH)
+#define SR_MMU 0x84000000
+#else
+#define SR_MMU 0x80000000
+#endif
+
+#define SR_IMASK 0x000000f0
+#define SR_SSTEP 0x08000000
+
+#ifndef __ASSEMBLY__
+
+/*
+ * FPU structure and data : require 8-byte alignment as we need to access it
+ with fld.p, fst.p
+ */
+
+struct sh_fpu_hard_struct {
+ unsigned long fp_regs[64];
+ unsigned int fpscr;
+ /* long status; * software status information */
+};
+
+#if 0
+/* Dummy fpu emulator */
+struct sh_fpu_soft_struct {
+ unsigned long long fp_regs[32];
+ unsigned int fpscr;
+ unsigned char lookahead;
+ unsigned long entry_pc;
+};
+#endif
+
+union sh_fpu_union {
+ struct sh_fpu_hard_struct hard;
+ /* 'hard' itself only produces 32 bit alignment, yet we need
+ to access it using 64 bit load/store as well. */
+ unsigned long long alignment_dummy;
+};
+
+struct thread_struct {
+ unsigned long sp;
+ unsigned long pc;
+ /* This stores the address of the pt_regs built during a context
+ switch, or of the register save area built for a kernel mode
+ exception. It is used for backtracing the stack of a sleeping task
+ or one that traps in kernel mode. */
+ struct pt_regs *kregs;
+ /* This stores the address of the pt_regs constructed on entry from
+ user mode. It is a fixed value over the lifetime of a process, or
+ NULL for a kernel thread. */
+ struct pt_regs *uregs;
+
+ unsigned long trap_no, error_code;
+ unsigned long address;
+ /* Hardware debugging registers may come here */
+
+ /* floating point info */
+ union sh_fpu_union fpu;
+};
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
+
+extern struct pt_regs fake_swapper_regs;
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + \
+ (long) &init_stack, \
+ .pc = 0, \
+ .kregs = &fake_swapper_regs, \
+ .uregs = NULL, \
+ .trap_no = 0, \
+ .error_code = 0, \
+ .address = 0, \
+ .fpu = { { { 0, } }, } \
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+#define SR_USER (SR_MMU | SR_FD)
+
+#define start_thread(regs, new_pc, new_sp) \
+ set_fs(USER_DS); \
+ regs->sr = SR_USER; /* User mode. */ \
+ regs->pc = new_pc - 4; /* Compensate syscall exit */ \
+ regs->pc |= 1; /* Set SHmedia ! */ \
+ regs->regs[18] = 0; \
+ regs->regs[15] = new_sp
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/*
+ * Bus types
+ */
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(p, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+#define prepare_to_copy(tsk) do { } while (0)
+/*
+ * FPU lazy state save handling.
+ */
+
+extern __inline__ void release_fpu(void)
+{
+ unsigned long long __dummy;
+
+ /* Set FD flag in SR */
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (SR_FD));
+}
+
+extern __inline__ void grab_fpu(void)
+{
+ unsigned long long __dummy;
+
+ /* Clear out FD flag in SR */
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (~SR_FD));
+}
+
+/* Round to nearest, no exceptions on inexact, overflow, underflow,
+ zero-divide, invalid. Configure option for whether to flush denorms to
+ zero, or except if a denorm is encountered. */
+#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
+#define FPSCR_INIT 0x00040000
+#else
+#define FPSCR_INIT 0x00000000
+#endif
+
+/* Save the current FP regs */
+void fpsave(struct sh_fpu_hard_struct *fpregs);
+
+/* Initialise the FP state of a task */
+void fpinit(struct sh_fpu_hard_struct *fpregs);
+
+extern struct task_struct *last_task_used_math;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+#define thread_saved_pc(tsk) (tsk->thread.pc)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.pc)
+#define KSTK_ESP(tsk) ((tsk)->thread.sp)
+
+#define cpu_relax() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SH64_PROCESSOR_H */
+
diff --git a/include/asm-sh64/ptrace.h b/include/asm-sh64/ptrace.h
new file mode 100644
index 000000000000..56f836e1ce78
--- /dev/null
+++ b/include/asm-sh64/ptrace.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_PTRACE_H
+#define __ASM_SH64_PTRACE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ptrace.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long long pc;
+ unsigned long long sr;
+ unsigned long long syscall_nr;
+ unsigned long long regs[63];
+ unsigned long long tregs[8];
+ unsigned long long pad[2];
+};
+
+#ifdef __KERNEL__
+#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
+#define instruction_pointer(regs) ((regs)->pc)
+extern void show_regs(struct pt_regs *);
+#endif
+
+#define PTRACE_O_TRACESYSGOOD 0x00000001
+
+#endif /* __ASM_SH64_PTRACE_H */
diff --git a/include/asm-sh64/registers.h b/include/asm-sh64/registers.h
new file mode 100644
index 000000000000..7eec666acf84
--- /dev/null
+++ b/include/asm-sh64/registers.h
@@ -0,0 +1,106 @@
+#ifndef __ASM_SH64_REGISTERS_H
+#define __ASM_SH64_REGISTERS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/registers.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 Richard Curnow
+ */
+
+#ifdef __ASSEMBLY__
+/* =====================================================================
+**
+** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
+** Assigns symbolic names to control & target registers.
+*/
+
+/*
+ * Define some useful aliases for control registers.
+ */
+#define SR cr0
+#define SSR cr1
+#define PSSR cr2
+ /* cr3 UNDEFINED */
+#define INTEVT cr4
+#define EXPEVT cr5
+#define PEXPEVT cr6
+#define TRA cr7
+#define SPC cr8
+#define PSPC cr9
+#define RESVEC cr10
+#define VBR cr11
+ /* cr12 UNDEFINED */
+#define TEA cr13
+ /* cr14-cr15 UNDEFINED */
+#define DCR cr16
+#define KCR0 cr17
+#define KCR1 cr18
+ /* cr19-cr31 UNDEFINED */
+ /* cr32-cr61 RESERVED */
+#define CTC cr62
+#define USR cr63
+
+/*
+ * ABI dependent registers (general purpose set)
+ */
+#define RET r2
+#define ARG1 r2
+#define ARG2 r3
+#define ARG3 r4
+#define ARG4 r5
+#define ARG5 r6
+#define ARG6 r7
+#define SP r15
+#define LINK r18
+#define ZERO r63
+
+/*
+ * Status register defines: used only by assembly sources (and
+ * syntax independednt)
+ */
+#define SR_RESET_VAL 0x0000000050008000
+#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
+#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
+
+#if defined (CONFIG_SH64_SR_WATCH)
+#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
+#else
+#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
+#endif
+
+#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
+#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
+
+#else /* Not __ASSEMBLY__ syntax */
+
+/*
+** Stringify reg. name
+*/
+#define __str(x) #x
+
+/* Stringify control register names for use in inline assembly */
+#define __SR __str(SR)
+#define __SSR __str(SSR)
+#define __PSSR __str(PSSR)
+#define __INTEVT __str(INTEVT)
+#define __EXPEVT __str(EXPEVT)
+#define __PEXPEVT __str(PEXPEVT)
+#define __TRA __str(TRA)
+#define __SPC __str(SPC)
+#define __PSPC __str(PSPC)
+#define __RESVEC __str(RESVEC)
+#define __VBR __str(VBR)
+#define __TEA __str(TEA)
+#define __DCR __str(DCR)
+#define __KCR0 __str(KCR0)
+#define __KCR1 __str(KCR1)
+#define __CTC __str(CTC)
+#define __USR __str(USR)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SH64_REGISTERS_H */
diff --git a/include/asm-sh64/resource.h b/include/asm-sh64/resource.h
new file mode 100644
index 000000000000..574b64488d5c
--- /dev/null
+++ b/include/asm-sh64/resource.h
@@ -0,0 +1,47 @@
+#ifndef __ASM_SH_RESOURCE_H
+#define __ASM_SH_RESOURCE_H
+
+/*
+ * Resource limits
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit */
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
+
+#define RLIM_NLIMITS 11
+
+#ifdef __KERNEL__
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ */
+#define RLIM_INFINITY (~0UL)
+
+#define INIT_RLIMITS \
+{ \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { _STK_LIM, RLIM_INFINITY }, \
+ { 0, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { 0, 0 }, \
+ { INR_OPEN, INR_OPEN }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH_RESOURCE_H */
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
new file mode 100644
index 000000000000..5d8fa32d2e9d
--- /dev/null
+++ b/include/asm-sh64/scatterlist.h
@@ -0,0 +1,23 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/scatterlist.h
+ *
+ * Copyright (C) 2003 Paul Mundt
+ *
+ */
+#ifndef __ASM_SH64_SCATTERLIST_H
+#define __ASM_SH64_SCATTERLIST_H
+
+struct scatterlist {
+ struct page * page; /* Location for highmem page, if any */
+ unsigned int offset;/* for highmem, page offset */
+ dma_addr_t dma_address;
+ unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0xffffffff)
+
+#endif /* !__ASM_SH64_SCATTERLIST_H */
diff --git a/include/asm-sh64/sections.h b/include/asm-sh64/sections.h
new file mode 100644
index 000000000000..897f36bcdf85
--- /dev/null
+++ b/include/asm-sh64/sections.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_SH64_SECTIONS_H
+#define __ASM_SH64_SECTIONS_H
+
+#include <asm-sh/sections.h>
+
+#endif /* __ASM_SH64_SECTIONS_H */
+
diff --git a/include/asm-sh64/segment.h b/include/asm-sh64/segment.h
new file mode 100644
index 000000000000..92ac001fc483
--- /dev/null
+++ b/include/asm-sh64/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h
new file mode 100644
index 000000000000..fcfafe263e86
--- /dev/null
+++ b/include/asm-sh64/semaphore-helper.h
@@ -0,0 +1,101 @@
+#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
+#define __ASM_SH64_SEMAPHORE_HELPER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/semaphore-helper.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+#include <asm/errno.h>
+
+/*
+ * SMP- and interrupt-safe semaphores helper functions.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999 Andrea Arcangeli
+ */
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * which we have. Let the rest of the losers suck eggs.
+ */
+static __inline__ void wake_one_more(struct semaphore * sem)
+{
+ atomic_inc((atomic_t *)&sem->sleepers);
+}
+
+static __inline__ int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ atomic_inc(&sem->count);
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ * 1 failed to lock
+ * 0 got the lock
+ *
+ * We must undo the sem->count down_trylock() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers <= 0)
+ atomic_inc(&sem->count);
+ else {
+ sem->sleepers--;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h
new file mode 100644
index 000000000000..3e97ead32d69
--- /dev/null
+++ b/include/asm-sh64/semaphore.h
@@ -0,0 +1,146 @@
+#ifndef __ASM_SH64_SEMAPHORE_H
+#define __ASM_SH64_SEMAPHORE_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/semaphore.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * SMP- and interrupt-safe semaphores.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * SuperH verison by Niibe Yutaka
+ * (Currently no asm implementation but generic C code...)
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+struct semaphore {
+ atomic_t count;
+ int sleepers;
+ wait_queue_head_t wait;
+#ifdef WAITQUEUE_DEBUG
+ long __magic;
+#endif
+};
+
+#ifdef WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (int)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
+ */
+ atomic_set(&sem->count, val);
+ sem->sleepers = 0;
+ init_waitqueue_head(&sem->wait);
+#ifdef WAITQUEUE_DEBUG
+ sem->__magic = (int)&sem->__magic;
+#endif
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+#if 0
+asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int __down_failed_interruptible(void /* params in registers */);
+asmlinkage int __down_failed_trylock(void /* params in registers */);
+asmlinkage void __up_wakeup(void /* special register calling convention */);
+#endif
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
+static inline void down(struct semaphore * sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_dec_return(&sem->count) < 0)
+ __down(sem);
+}
+
+static inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+static inline int down_trylock(struct semaphore * sem)
+{
+ int ret = 0;
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_trylock(sem);
+ return ret;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ */
+static inline void up(struct semaphore * sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_inc_return(&sem->count) <= 0)
+ __up(sem);
+}
+
+#endif /* __ASM_SH64_SEMAPHORE_H */
diff --git a/include/asm-sh64/sembuf.h b/include/asm-sh64/sembuf.h
new file mode 100644
index 000000000000..ec4d9f143577
--- /dev/null
+++ b/include/asm-sh64/sembuf.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_SEMBUF_H
+#define __ASM_SH64_SEMBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sembuf.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * The semid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH64_SEMBUF_H */
diff --git a/include/asm-sh64/serial.h b/include/asm-sh64/serial.h
new file mode 100644
index 000000000000..8e39b4e90c76
--- /dev/null
+++ b/include/asm-sh64/serial.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-sh/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ */
+
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#define RS_TABLE_SIZE 2
+
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+
+#define STD_SERIAL_PORT_DEFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
+ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS } /* ttyS1 */
+
+#define SERIAL_PORT_DFNS STD_SERIAL_PORT_DEFNS
+
+/* XXX: This should be moved ino irq.h */
+#define irq_cannonicalize(x) (x)
+
+#endif /* _ASM_SERIAL_H */
diff --git a/include/asm-sh64/shmbuf.h b/include/asm-sh64/shmbuf.h
new file mode 100644
index 000000000000..022f3494dd64
--- /dev/null
+++ b/include/asm-sh64/shmbuf.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH64_SHMBUF_H
+#define __ASM_SH64_SHMBUF_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/shmbuf.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/*
+ * The shmid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH64_SHMBUF_H */
diff --git a/include/asm-sh64/shmparam.h b/include/asm-sh64/shmparam.h
new file mode 100644
index 000000000000..d3a99a4dc0e3
--- /dev/null
+++ b/include/asm-sh64/shmparam.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SH64_SHMPARAM_H
+#define __ASM_SH64_SHMPARAM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/shmparam.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <asm/cache.h>
+
+/* attach addr a multiple of this */
+#define SHMLBA (cpu_data->dcache.sets * L1_CACHE_BYTES)
+
+#endif /* __ASM_SH64_SHMPARAM_H */
diff --git a/include/asm-sh64/sigcontext.h b/include/asm-sh64/sigcontext.h
new file mode 100644
index 000000000000..6293509d8cc1
--- /dev/null
+++ b/include/asm-sh64/sigcontext.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_SH64_SIGCONTEXT_H
+#define __ASM_SH64_SIGCONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sigcontext.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+struct sigcontext {
+ unsigned long oldmask;
+
+ /* CPU registers */
+ unsigned long long sc_regs[63];
+ unsigned long long sc_tregs[8];
+ unsigned long long sc_pc;
+ unsigned long long sc_sr;
+
+ /* FPU registers */
+ unsigned long long sc_fpregs[32];
+ unsigned int sc_fpscr;
+ unsigned int sc_fpvalid;
+};
+
+#endif /* __ASM_SH64_SIGCONTEXT_H */
diff --git a/include/asm-sh64/siginfo.h b/include/asm-sh64/siginfo.h
new file mode 100644
index 000000000000..56ef1da534d7
--- /dev/null
+++ b/include/asm-sh64/siginfo.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_SIGINFO_H
+#define __ASM_SH64_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#endif /* __ASM_SH64_SIGINFO_H */
diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h
new file mode 100644
index 000000000000..77957e9b92d9
--- /dev/null
+++ b/include/asm-sh64/signal.h
@@ -0,0 +1,185 @@
+#ifndef __ASM_SH64_SIGNAL_H
+#define __ASM_SH64_SIGNAL_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/signal.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/processor.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ THREAD_SIZE
+
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_SHIRQ 0x04000000
+#endif
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+
+#define sigmask(sig) (1UL << ((sig) - 1))
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_SIGNAL_H */
diff --git a/include/asm-sh64/smp.h b/include/asm-sh64/smp.h
new file mode 100644
index 000000000000..4a4d0da39a84
--- /dev/null
+++ b/include/asm-sh64/smp.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_SH64_SMP_H
+#define __ASM_SH64_SMP_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/smp.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#endif /* __ASM_SH64_SMP_H */
diff --git a/include/asm-sh64/smplock.h b/include/asm-sh64/smplock.h
new file mode 100644
index 000000000000..ff244b89cb90
--- /dev/null
+++ b/include/asm-sh64/smplock.h
@@ -0,0 +1,77 @@
+#ifndef __ASM_SH64_SMPLOCK_H
+#define __ASM_SH64_SMPLOCK_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/smplock.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <linux/config.h>
+
+#ifndef CONFIG_SMP
+
+#define lock_kernel() do { } while(0)
+#define unlock_kernel() do { } while(0)
+#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
+#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
+
+#else
+
+#error "We do not support SMP on SH64 yet"
+/*
+ * Default SMP lock implementation
+ */
+
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_SH64_SMPLOCK_H */
diff --git a/include/asm-sh64/socket.h b/include/asm-sh64/socket.h
new file mode 100644
index 000000000000..1853f7246ab0
--- /dev/null
+++ b/include/asm-sh64/socket.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_SOCKET_H
+#define __ASM_SH64_SOCKET_H
+
+#include <asm-sh/socket.h>
+
+#endif /* __ASM_SH64_SOCKET_H */
diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h
new file mode 100644
index 000000000000..1ae23ae82977
--- /dev/null
+++ b/include/asm-sh64/sockios.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_SH64_SOCKIOS_H
+#define __ASM_SH64_SOCKIOS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/sockios.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+/* Socket-level I/O control calls. */
+#define FIOGETOWN _IOR('f', 123, int)
+#define FIOSETOWN _IOW('f', 124, int)
+
+#define SIOCATMARK _IOR('s', 7, int)
+#define SIOCSPGRP _IOW('s', 8, pid_t)
+#define SIOCGPGRP _IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */
+#endif /* __ASM_SH64_SOCKIOS_H */
diff --git a/include/asm-sh64/softirq.h b/include/asm-sh64/softirq.h
new file mode 100644
index 000000000000..1c4229e1b9e5
--- /dev/null
+++ b/include/asm-sh64/softirq.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_SH_SOFTIRQ_H
+#define __ASM_SH_SOFTIRQ_H
+
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
+#define local_bh_disable() \
+do { \
+ local_bh_count(smp_processor_id())++; \
+ barrier(); \
+} while (0)
+
+#define __local_bh_enable() \
+do { \
+ barrier(); \
+ local_bh_count(smp_processor_id())--; \
+} while (0)
+
+#define local_bh_enable() \
+do { \
+ barrier(); \
+ if (!--local_bh_count(smp_processor_id()) \
+ && softirq_pending(smp_processor_id())) { \
+ do_softirq(); \
+ } \
+} while (0)
+
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+
+#endif /* __ASM_SH_SOFTIRQ_H */
diff --git a/include/asm-sh64/spinlock.h b/include/asm-sh64/spinlock.h
new file mode 100644
index 000000000000..296b0c9b24a2
--- /dev/null
+++ b/include/asm-sh64/spinlock.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_SH64_SPINLOCK_H
+#define __ASM_SH64_SPINLOCK_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/spinlock.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#error "No SMP on SH64"
+
+#endif /* __ASM_SH64_SPINLOCK_H */
diff --git a/include/asm-sh64/stat.h b/include/asm-sh64/stat.h
new file mode 100644
index 000000000000..86f551b1987e
--- /dev/null
+++ b/include/asm-sh64/stat.h
@@ -0,0 +1,88 @@
+#ifndef __ASM_SH64_STAT_H
+#define __ASM_SH64_STAT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/stat.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+ unsigned short st_dev;
+ unsigned char __pad0[10];
+
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned short st_rdev;
+ unsigned char __pad3[10];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
+
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __ASM_SH64_STAT_H */
diff --git a/include/asm-sh64/statfs.h b/include/asm-sh64/statfs.h
new file mode 100644
index 000000000000..083fd79b2417
--- /dev/null
+++ b/include/asm-sh64/statfs.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_STATFS_H
+#define __ASM_SH64_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif /* __ASM_SH64_STATFS_H */
diff --git a/include/asm-sh64/string.h b/include/asm-sh64/string.h
new file mode 100644
index 000000000000..8a7357366ce8
--- /dev/null
+++ b/include/asm-sh64/string.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SH64_STRING_H
+#define __ASM_SH64_STRING_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/string.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * Empty on purpose. ARCH SH64 ASM libs are out of the current project scope.
+ *
+ */
+
+#define __HAVE_ARCH_MEMCPY
+
+extern void *memcpy(void *dest, const void *src, size_t count);
+
+#endif
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
new file mode 100644
index 000000000000..8b3a6f9e62fb
--- /dev/null
+++ b/include/asm-sh64/system.h
@@ -0,0 +1,194 @@
+#ifndef __ASM_SH64_SYSTEM_H
+#define __ASM_SH64_SYSTEM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/system.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/registers.h>
+#include <asm/processor.h>
+
+/*
+ * switch_to() should switch tasks to task nr n, first
+ */
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+extern struct task_struct *sh64_switch_to(struct task_struct *prev,
+ struct thread_struct *prev_thread,
+ struct task_struct *next,
+ struct thread_struct *next_thread);
+
+#define switch_to(prev,next,last) \
+ do {\
+ if (last_task_used_math != next) {\
+ struct pt_regs *regs = next->thread.uregs;\
+ if (regs) regs->sr |= SR_FD;\
+ }\
+ last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
+ } while(0)
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr), 1))
+
+extern void __xchg_called_with_bad_pointer(void);
+
+#define mb() __asm__ __volatile__ ("synco": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("synco": : :"memory")
+#define read_barrier_depends() do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#endif /* CONFIG_SMP */
+
+#define set_rmb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) set_rmb(var, value)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* Interrupt Control */
+#ifndef HARD_CLI
+#define SR_MASK_L 0x000000f0L
+#define SR_MASK_LL 0x00000000000000f0LL
+#else
+#define SR_MASK_L 0x10000000L
+#define SR_MASK_LL 0x0000000010000000LL
+#endif
+
+static __inline__ void local_irq_enable(void)
+{
+ /* cli/sti based on SR.BL */
+ unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
+
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
+
+static __inline__ void local_irq_disable(void)
+{
+ /* cli/sti based on SR.BL */
+ unsigned long long __dummy0, __dummy1=SR_MASK_LL;
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
+
+#define local_save_flags(x) \
+(__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
+ __asm__ __volatile__( \
+ "getcon " __SR ", %0\n\t" \
+ "and %0, %1, %0" \
+ : "=&r" (x) \
+ : "r" (__dummy));}))
+
+#define local_irq_save(x) \
+(__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
+ __asm__ __volatile__( \
+ "getcon " __SR ", %1\n\t" \
+ "or %1, r63, %0\n\t" \
+ "or %1, %2, %1\n\t" \
+ "putcon %1, " __SR "\n\t" \
+ "and %0, %2, %0" \
+ : "=&r" (x), "=&r" (__d1) \
+ : "r" (__d2));}));
+
+#define local_irq_restore(x) do { \
+ if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
+ local_irq_enable(); /* yes...re-enable */ \
+} while (0)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ (flags != 0); \
+})
+
+extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ local_irq_restore(flags);
+ return retval;
+}
+
+extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val & 0xff;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return xchg_u32(ptr, x);
+ break;
+ case 1:
+ return xchg_u8(ptr, x);
+ break;
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+/* XXX
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+
+#ifdef CONFIG_SH_ALPHANUMERIC
+/* This is only used for debugging. */
+extern void print_seg(char *file,int line);
+#define PLS() print_seg(__FILE__,__LINE__)
+#else /* CONFIG_SH_ALPHANUMERIC */
+#define PLS()
+#endif /* CONFIG_SH_ALPHANUMERIC */
+
+#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
+
+#endif /* __ASM_SH64_SYSTEM_H */
diff --git a/include/asm-sh64/termbits.h b/include/asm-sh64/termbits.h
new file mode 100644
index 000000000000..86bde5ec1414
--- /dev/null
+++ b/include/asm-sh64/termbits.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_TERMBITS_H
+#define __ASM_SH64_TERMBITS_H
+
+#include <asm-sh/termbits.h>
+
+#endif /* __ASM_SH64_TERMBITS_H */
diff --git a/include/asm-sh64/termios.h b/include/asm-sh64/termios.h
new file mode 100644
index 000000000000..4a9c7fb411bc
--- /dev/null
+++ b/include/asm-sh64/termios.h
@@ -0,0 +1,117 @@
+#ifndef __ASM_SH64_TERMIOS_H
+#define __ASM_SH64_TERMIOS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/termios.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
+#define N_SYNC_PPP 14
+#define N_HCI 15 /* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_TERMIOS_H */
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
new file mode 100644
index 000000000000..70c27a409124
--- /dev/null
+++ b/include/asm-sh64/thread_info.h
@@ -0,0 +1,82 @@
+#ifndef __ASM_SH64_THREAD_INFO_H
+#define __ASM_SH64_THREAD_INFO_H
+
+/*
+ * SuperH 5 version
+ * Copyright (C) 2003 Paul Mundt
+ */
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/registers.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ /* Put the 4 32-bit fields together to make asm offsetting easier. */
+ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
+ __u16 cpu;
+
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+
+ __u8 supervisor_stack[0];
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+
+ __asm__ __volatile__ ("getcon " __KCR0 ", %0\n\t" : "=r" (ti));
+
+ return ti;
+}
+
+/* thread information allocation */
+#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,2))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x4000000
+
+/* thread information flags */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+
+#define THREAD_SIZE 16384
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_THREAD_INFO_H */
diff --git a/include/asm-sh64/timex.h b/include/asm-sh64/timex.h
new file mode 100644
index 000000000000..e07fd9a7cbd5
--- /dev/null
+++ b/include/asm-sh64/timex.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_SH64_TIMEX_H
+#define __ASM_SH64_TIMEX_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/timex.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * sh-5 architecture timex specifications
+ *
+ */
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+ (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+ << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+typedef unsigned long cycles_t;
+
+extern cycles_t cacheflush_time;
+
+static __inline__ cycles_t get_cycles (void)
+{
+ return 0;
+}
+
+#define vxtime_lock() do {} while (0)
+#define vxtime_unlock() do {} while (0)
+
+#endif /* __ASM_SH64_TIMEX_H */
diff --git a/include/asm-sh64/tlb.h b/include/asm-sh64/tlb.h
new file mode 100644
index 000000000000..4979408bd88c
--- /dev/null
+++ b/include/asm-sh64/tlb.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-sh64/tlb.h
+ *
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#ifndef __ASM_SH64_TLB_H
+#define __ASM_SH64_TLB_H
+
+/*
+ * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED
+ * for head.S! Once this limitation is gone, we can clean the rest of this up.
+ */
+
+/* ITLB defines */
+#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
+#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
+
+/* DTLB defines */
+#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
+#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
+
+#ifndef __ASSEMBLY__
+
+/**
+ * for_each_dtlb_entry
+ *
+ * @tlb: TLB entry
+ *
+ * Iterate over free (non-wired) DTLB entries
+ */
+#define for_each_dtlb_entry(tlb) \
+ for (tlb = cpu_data->dtlb.first; \
+ tlb <= cpu_data->dtlb.last; \
+ tlb += cpu_data->dtlb.step)
+
+/**
+ * for_each_itlb_entry
+ *
+ * @tlb: TLB entry
+ *
+ * Iterate over free (non-wired) ITLB entries
+ */
+#define for_each_itlb_entry(tlb) \
+ for (tlb = cpu_data->itlb.first; \
+ tlb <= cpu_data->itlb.last; \
+ tlb += cpu_data->itlb.step)
+
+/**
+ * __flush_tlb_slot
+ *
+ * @slot: Address of TLB slot.
+ *
+ * Flushes TLB slot @slot.
+ */
+static inline void __flush_tlb_slot(unsigned long long slot)
+{
+ __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
+}
+
+/* arch/sh64/mm/tlb.c */
+extern int sh64_tlb_init(void);
+extern unsigned long long sh64_next_free_dtlb_entry(void);
+extern unsigned long long sh64_get_wired_dtlb_entry(void);
+extern int sh64_put_wired_dtlb_entry(unsigned long long entry);
+
+extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr);
+extern void sh64_teardown_tlb_slot(unsigned long long config_addr);
+
+#define tlb_start_vma(tlb, vma) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end)
+
+#define tlb_end_vma(tlb, vma) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+
+/*
+ * Flush whole TLBs for MM
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_SH64_TLB_H */
+
diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h
new file mode 100644
index 000000000000..15c0719eecc3
--- /dev/null
+++ b/include/asm-sh64/tlbflush.h
@@ -0,0 +1,31 @@
+#ifndef __ASM_SH64_TLBFLUSH_H
+#define __ASM_SH64_TLBFLUSH_H
+
+#include <asm/pgalloc.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ *
+ */
+
+extern void flush_tlb(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#endif /* __ASM_SH64_TLBFLUSH_H */
+
diff --git a/include/asm-sh64/topology.h b/include/asm-sh64/topology.h
new file mode 100644
index 000000000000..34211787345f
--- /dev/null
+++ b/include/asm-sh64/topology.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH64_TOPOLOGY_H
+#define __ASM_SH64_TOPOLOGY_H
+
+#include <asm-generic/topology.h>
+
+#endif /* __ASM_SH64_TOPOLOGY_H */
diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h
new file mode 100644
index 000000000000..41d4d2f82aa9
--- /dev/null
+++ b/include/asm-sh64/types.h
@@ -0,0 +1,76 @@
+#ifndef __ASM_SH64_TYPES_H
+#define __ASM_SH64_TYPES_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/types.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+typedef __signed__ char s8;
+typedef unsigned char u8;
+
+typedef __signed__ short s16;
+typedef unsigned short u16;
+
+typedef __signed__ int s32;
+typedef unsigned int u32;
+
+typedef __signed__ long long s64;
+typedef unsigned long long u64;
+
+/* DMA addresses come in generic and 64-bit flavours. */
+
+#ifdef CONFIG_HIGHMEM64G
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+typedef u64 dma64_addr_t;
+
+typedef unsigned int kmem_bufctl_t;
+
+#endif /* __ASSEMBLY__ */
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH64_TYPES_H */
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h
new file mode 100644
index 000000000000..0207bae934d9
--- /dev/null
+++ b/include/asm-sh64/uaccess.h
@@ -0,0 +1,317 @@
+#ifndef __ASM_SH64_UACCESS_H
+#define __ASM_SH64_UACCESS_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/uaccess.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * User space memory access functions
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Based on:
+ * MIPS implementation version 1.15 by
+ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
+ * and i386 version.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are misnamed.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS MAKE_MM_SEG(0x80000000)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit=(x))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
+
+/*
+ * Uhhuh, this needs 33-bit arithmetic. We have a carry..
+ *
+ * sum := addr + size; carry? --> flag = true;
+ * if (sum >= addr_limit) flag = true;
+ */
+#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
+
+extern inline int verify_area(int type, const void __user * addr, unsigned long size)
+{
+ return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+/*
+ * Uh, these should become the main single-value transfer routines ...
+ * They automatically use the right size if we just have the right
+ * pointer type ...
+ *
+ * As MIPS uses the same address space for kernel and user data, we
+ * can just do these as direct assignments.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
+#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the user has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+
+#define put_user_ret(x,ptr,ret) ({ \
+if (put_user(x,ptr)) return ret; })
+
+#define get_user_ret(x,ptr,ret) ({ \
+if (get_user(x,ptr)) return ret; })
+
+#define __put_user_ret(x,ptr,ret) ({ \
+if (__put_user(x,ptr)) return ret; })
+
+#define __get_user_ret(x,ptr,ret) ({ \
+if (__get_user(x,ptr)) return ret; })
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+#define __get_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ retval = __get_user_asm_b(x, ptr); \
+ break; \
+ case 2: \
+ retval = __get_user_asm_w(x, ptr); \
+ break; \
+ case 4: \
+ retval = __get_user_asm_l(x, ptr); \
+ break; \
+ case 8: \
+ retval = __get_user_asm_q(x, ptr); \
+ break; \
+ default: \
+ __get_user_unknown(); \
+ break; \
+ } \
+} while (0)
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ long __gu_addr = (long)(ptr); \
+ long __gu_err; \
+ __typeof(*(ptr)) __gu_val; \
+ __asm__ ("":"=r" (__gu_val)); \
+ __asm__ ("":"=r" (__gu_err)); \
+ __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x,ptr,size) \
+({ \
+ long __gu_addr = (long)(ptr); \
+ long __gu_err = -EFAULT; \
+ __typeof(*(ptr)) __gu_val; \
+ __asm__ ("":"=r" (__gu_val)); \
+ __asm__ ("":"=r" (__gu_err)); \
+ if (__access_ok(__gu_addr, (size))) \
+ __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_asm_b(void *, long);
+extern long __get_user_asm_w(void *, long);
+extern long __get_user_asm_l(void *, long);
+extern long __get_user_asm_q(void *, long);
+extern void __get_user_unknown(void);
+
+#define __put_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ retval = __put_user_asm_b(x, ptr); \
+ break; \
+ case 2: \
+ retval = __put_user_asm_w(x, ptr); \
+ break; \
+ case 4: \
+ retval = __put_user_asm_l(x, ptr); \
+ break; \
+ case 8: \
+ retval = __put_user_asm_q(x, ptr); \
+ break; \
+ default: \
+ __put_user_unknown(); \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x,ptr,size) \
+({ \
+ long __pu_err = -EFAULT; \
+ long __pu_addr = (long)(ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ \
+ if (__access_ok(__pu_addr, (size))) \
+ __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
+ __pu_err; \
+})
+
+extern long __put_user_asm_b(void *, long);
+extern long __put_user_asm_w(void *, long);
+extern long __put_user_asm_l(void *, long);
+extern long __put_user_asm_q(void *, long);
+extern void __put_user_unknown(void);
+
+
+/* Generic arbitrary sized copy. */
+/* Return the number of bytes NOT copied */
+/* XXX: should be such that: 4byte and the rest. */
+extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
+
+#define copy_to_user(to,from,n) ({ \
+void *__copy_to = (void *) (to); \
+__kernel_size_t __copy_size = (__kernel_size_t) (n); \
+__kernel_size_t __copy_res; \
+if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
+__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
+} else __copy_res = __copy_size; \
+__copy_res; })
+
+#define copy_to_user_ret(to,from,n,retval) ({ \
+if (copy_to_user(to,from,n)) \
+ return retval; \
+})
+
+#define __copy_to_user(to,from,n) \
+ __copy_user((void *)(to), \
+ (void *)(from), n)
+
+#define __copy_to_user_ret(to,from,n,retval) ({ \
+if (__copy_to_user(to,from,n)) \
+ return retval; \
+})
+
+#define copy_from_user(to,from,n) ({ \
+void *__copy_to = (void *) (to); \
+void *__copy_from = (void *) (from); \
+__kernel_size_t __copy_size = (__kernel_size_t) (n); \
+__kernel_size_t __copy_res; \
+if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
+__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
+} else __copy_res = __copy_size; \
+__copy_res; })
+
+#define copy_from_user_ret(to,from,n,retval) ({ \
+if (copy_from_user(to,from,n)) \
+ return retval; \
+})
+
+#define __copy_from_user(to,from,n) \
+ __copy_user((void *)(to), \
+ (void *)(from), n)
+
+#define __copy_from_user_ret(to,from,n,retval) ({ \
+if (__copy_from_user(to,from,n)) \
+ return retval; \
+})
+
+/* XXX: Not sure it works well..
+ should be such that: 4byte clear and the rest. */
+extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
+
+#define clear_user(addr,n) ({ \
+void * __cl_addr = (addr); \
+unsigned long __cl_size = (n); \
+if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
+__cl_size = __clear_user(__cl_addr, __cl_size); \
+__cl_size; })
+
+extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
+
+#define strncpy_from_user(dest,src,count) ({ \
+unsigned long __sfu_src = (unsigned long) (src); \
+int __sfu_count = (int) (count); \
+long __sfu_res = -EFAULT; \
+if(__access_ok(__sfu_src, __sfu_count)) { \
+__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
+} __sfu_res; })
+
+#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char *__s, long __n);
+
+extern __inline__ long strnlen_user(const char *s, long n)
+{
+ if (!__addr_ok(s))
+ return 0;
+ else
+ return __strnlen_user(s, n);
+}
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+#define ARCH_HAS_SEARCH_EXTABLE
+
+/* If gcc inlines memset, it will use st.q instructions. Therefore, we need
+ kmalloc allocations to be 8-byte aligned. Without this, the alignment
+ becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
+ sh64 at the moment). */
+#define ARCH_KMALLOC_MINALIGN 8
+
+/* Returns 0 if exception not found and fixup.unit otherwise. */
+extern unsigned long search_exception_table(unsigned long addr);
+extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
+
+#endif /* __ASM_SH64_UACCESS_H */
diff --git a/include/asm-sh64/ucontext.h b/include/asm-sh64/ucontext.h
new file mode 100644
index 000000000000..cf77a08551ca
--- /dev/null
+++ b/include/asm-sh64/ucontext.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_SH64_UCONTEXT_H
+#define __ASM_SH64_UCONTEXT_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/ucontext.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* __ASM_SH64_UCONTEXT_H */
diff --git a/include/asm-sh64/unaligned.h b/include/asm-sh64/unaligned.h
new file mode 100644
index 000000000000..ad22487086c4
--- /dev/null
+++ b/include/asm-sh64/unaligned.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH64_UNALIGNED_H
+#define __ASM_SH64_UNALIGNED_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/unaligned.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <linux/string.h>
+
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) \
+ ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
+
+#define put_unaligned(val, ptr) \
+ ({ __typeof__(*(ptr)) __tmp = (val); \
+ memmove((ptr), &__tmp, sizeof(*(ptr))); \
+ (void)0; })
+
+#endif /* __ASM_SH64_UNALIGNED_H */
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
new file mode 100644
index 000000000000..c7d9a52983e4
--- /dev/null
+++ b/include/asm-sh64/unistd.h
@@ -0,0 +1,555 @@
+#ifndef __ASM_SH64_UNISTD_H
+#define __ASM_SH64_UNISTD_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/unistd.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2004 Sean McGoogan
+ *
+ * This file contains the system call numbers.
+ *
+ */
+
+#define __NR_setup 0 /* used only by init, to get system going */
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102 /* old implementation of socket systemcall */
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86old 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_vm86 166
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread 180
+#define __NR_pwrite 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_streams1 188 /* some people actually want it */
+#define __NR_streams2 189 /* some people actually want it */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+
+/* Non-multiplexed socket family */
+#define __NR_socket 220
+#define __NR_bind 221
+#define __NR_connect 222
+#define __NR_listen 223
+#define __NR_accept 224
+#define __NR_getsockname 225
+#define __NR_getpeername 226
+#define __NR_socketpair 227
+#define __NR_send 228
+#define __NR_sendto 229
+#define __NR_recv 230
+#define __NR_recvfrom 231
+#define __NR_shutdown 232
+#define __NR_setsockopt 233
+#define __NR_getsockopt 234
+#define __NR_sendmsg 235
+#define __NR_recvmsg 236
+
+/* Non-multiplexed IPC family */
+#define __NR_semop 237
+#define __NR_semget 238
+#define __NR_semctl 239
+#define __NR_msgsnd 240
+#define __NR_msgrcv 241
+#define __NR_msgget 242
+#define __NR_msgctl 243
+#if 0
+#define __NR_shmatcall 244
+#endif
+#define __NR_shmdt 245
+#define __NR_shmget 246
+#define __NR_shmctl 247
+
+#define __NR_getdents64 248
+#define __NR_fcntl64 249
+/* 223 is unused */
+#define __NR_gettid 252
+#define __NR_readahead 253
+#define __NR_setxattr 254
+#define __NR_lsetxattr 255
+#define __NR_fsetxattr 256
+#define __NR_getxattr 257
+#define __NR_lgetxattr 258
+#define __NR_fgetxattr 269
+#define __NR_listxattr 260
+#define __NR_llistxattr 261
+#define __NR_flistxattr 262
+#define __NR_removexattr 263
+#define __NR_lremovexattr 264
+#define __NR_fremovexattr 265
+#define __NR_tkill 266
+#define __NR_sendfile64 267
+#define __NR_futex 268
+#define __NR_sched_setaffinity 269
+#define __NR_sched_getaffinity 270
+#define __NR_set_thread_area 271
+#define __NR_get_thread_area 272
+#define __NR_io_setup 273
+#define __NR_io_destroy 274
+#define __NR_io_getevents 275
+#define __NR_io_submit 276
+#define __NR_io_cancel 277
+#define __NR_fadvise64 278
+#define __NR_exit_group 280
+
+#define __NR_lookup_dcookie 281
+#define __NR_epoll_create 282
+#define __NR_epoll_ctl 283
+#define __NR_epoll_wait 284
+#define __NR_remap_file_pages 285
+#define __NR_set_tid_address 286
+#define __NR_timer_create 287
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 296
+#define __NR_fstatfs64 297
+#define __NR_tgkill 298
+#define __NR_utimes 299
+#define __NR_fadvise64_64 300
+#define __NR_vserver 301
+#define __NR_mbind 302
+#define __NR_get_mempolicy 303
+#define __NR_set_mempolicy 304
+#define __NR_mq_open 305
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+
+#define NR_syscalls 311
+
+/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */
+
+#define __syscall_return(type, res) \
+do { \
+ /* Note: when returning from kernel the return value is in r9 \
+ ** This prevents conflicts between return value and arg1 \
+ ** when dispatching signal handler, in other words makes \
+ ** life easier in the system call epilogue (see entry.S) \
+ */ \
+ register unsigned long __sr2 __asm__ ("r2") = res; \
+ if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ errno = -(res); \
+ __sr2 = -1; \
+ } \
+ return (type) (__sr2); \
+} while (0)
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "()" \
+ : "=r" (__sc0) \
+ : "r" (__sc0) ); \
+__syscall_return(type,__sc0); \
+}
+
+ /*
+ * The apparent spurious "dummy" assembler comment is *needed*,
+ * as without it, the compiler treats the arg<n> variables
+ * as no longer live just before the asm. The compiler can
+ * then optimize the storage into any registers it wishes.
+ * The additional dummy statement forces the compiler to put
+ * the arguments into the correct registers before the TRAPA.
+ */
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2)); \
+__asm__ __volatile__ ("!dummy %0 %1" \
+ : \
+ : "r" (__sc0), "r" (__sc2)); \
+__syscall_return(type,__sc0); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
+__asm__ __volatile__ ("!dummy %0 %1 %2" \
+ : \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
+__syscall_return(type,__sc0); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
+__asm__ __volatile__ ("!dummy %0 %1 %2 %3" \
+ : \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
+__syscall_return(type,__sc0); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
+__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4" \
+ : \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
+__syscall_return(type,__sc0); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
+register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
+ "r" (__sc6)); \
+__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5" \
+ : \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
+ "r" (__sc6)); \
+__syscall_return(type,__sc0); \
+}
+
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
+register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
+register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
+register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
+register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
+register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
+register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6; \
+__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)" \
+ : "=r" (__sc0) \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
+ "r" (__sc6), "r" (__sc7)); \
+__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5 %6" \
+ : \
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
+ "r" (__sc6), "r" (__sc7)); \
+__syscall_return(type,__sc0); \
+}
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#endif
+
+#ifdef __KERNEL_SYSCALLS__
+
+/* Copy from sh */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,pause)
+static inline _syscall1(int,setup,int,magic)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+static inline _syscall1(int,delete_module,const char *,name)
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
+#endif
+
+#endif /* __ASM_SH64_UNISTD_H */
diff --git a/include/asm-sh64/user.h b/include/asm-sh64/user.h
new file mode 100644
index 000000000000..8f32f39a8ca9
--- /dev/null
+++ b/include/asm-sh64/user.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH64_USER_H
+#define __ASM_SH64_USER_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/user.h
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd). The file contents are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+
+struct user_fpu_struct {
+ unsigned long long fp_regs[32];
+ unsigned int fpscr;
+};
+
+struct user {
+ struct pt_regs regs; /* entire machine state */
+ struct user_fpu_struct fpu; /* Math Co-processor registers */
+ int u_fpvalid; /* True if math co-processor being used */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ struct regs * u_ar0; /* help gdb find registers */
+ struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* __ASM_SH64_USER_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b59bacd48b03..d3f220b57b4a 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,13 +35,19 @@ struct linux_binprm{
char * interp; /* Name of the binary really executed. Most
of the time same as filename, but could be
different for binfmt_{misc,script} */
- unsigned long interp_flags;
+ unsigned interp_flags;
+ unsigned interp_data;
unsigned long loader, exec;
};
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
+/* fd of the binary should be passed to the interpreter */
+#define BINPRM_FLAGS_EXECFD_BIT 1
+#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+
+
/*
* This structure defines the functions that are used to load the binary formats that
* linux accepts.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 15cf6aa2fc34..0ac26dad8931 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -592,7 +592,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
-extern inline void __generic_unplug_device(request_queue_t *);
+extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
int blk_get_queue(request_queue_t *);
diff --git a/include/linux/console.h b/include/linux/console.h
index c8a8fe4f27b3..488678c037a1 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -104,6 +104,9 @@ extern void acquire_console_sem(void);
extern void release_console_sem(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+extern struct tty_driver *console_device(int *);
+extern void console_stop(struct console *);
+extern void console_start(struct console *);
extern int is_console_locked(void);
/* Some debug stub to catch some of the obvious races in the VT code */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2c0f0b59368d..c1620bb06051 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -19,6 +19,29 @@ enum dma_data_direction {
#define dma_sync_single dma_sync_single_for_cpu
#define dma_sync_sg dma_sync_sg_for_cpu
+#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
+static inline u64 dma_get_required_mask(struct device *dev)
+{
+ extern unsigned long max_pfn; /* defined in bootmem.h but may
+ not be included */
+ u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
+ u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
+ u64 mask;
+
+ if (!high_totalram) {
+ /* convert to mask just covering totalram */
+ low_totalram = (1 << (fls(low_totalram) - 1));
+ low_totalram += low_totalram - 1;
+ mask = low_totalram;
+ } else {
+ high_totalram = (1 << (fls(high_totalram) - 1));
+ high_totalram += high_totalram - 1;
+ mask = (((u64)high_totalram) << 32) + 0xffffffff;
+ }
+ return mask & *dev->dma_mask;
+}
+#endif
+
#endif
diff --git a/include/linux/edd.h b/include/linux/edd.h
index b3b36e2833fe..5f93881106fa 100644
--- a/include/linux/edd.h
+++ b/include/linux/edd.h
@@ -1,6 +1,6 @@
/*
* linux/include/linux/edd.h
- * Copyright (C) 2002, 2003 Dell Inc.
+ * Copyright (C) 2002, 2003, 2004 Dell Inc.
* by Matt Domsch <Matt_Domsch@dell.com>
*
* structures and definitions for the int 13h, ax={41,48}h
@@ -9,8 +9,8 @@
* available at http://www.t13.org/docs2002/d1572r0.pdf. It is
* very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf
*
- * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch table
- * in the empty_zero_block that contains a list of BIOS-enumerated
+ * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch
+ * table in the boot_params that contains a list of BIOS-enumerated
* boot devices.
* In arch/{i386,x86_64}/kernel/setup.c, this information is
* transferred into the edd structure, and in drivers/firmware/edd.c, that
@@ -31,8 +31,8 @@
#define _LINUX_EDD_H
#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF
- in empty_zero_block - treat this as 1 byte */
-#define EDDBUF 0x600 /* addr of edd_info structs in empty_zero_block */
+ in boot_params - treat this as 1 byte */
+#define EDDBUF 0x600 /* addr of edd_info structs in boot_params */
#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */
#define EDDEXTSIZE 8 /* change these if you muck with the structures */
#define EDDPARMSIZE 74
@@ -42,9 +42,13 @@
#define EDDMAGIC1 0x55AA
#define EDDMAGIC2 0xAA55
-#define READ_SECTORS 0x02
-#define MBR_SIG_OFFSET 0x1B8
-#define DISK80_SIG_BUFFER 0x2cc
+
+#define READ_SECTORS 0x02 /* int13 AH=0x02 is READ_SECTORS command */
+#define EDD_MBR_SIG_OFFSET 0x1B8 /* offset of signature in the MBR */
+#define EDD_MBR_SIG_BUF 0x290 /* addr in boot params */
+#define EDD_MBR_SIG_MAX 16 /* max number of signatures to store */
+#define EDD_MBR_SIG_NR_BUF 0x1ea /* addr of number of MBR signtaures at EDD_MBR_SIG_BUF
+ in boot_params - treat this as 1 byte */
#ifndef __ASSEMBLY__
#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0)
@@ -172,9 +176,14 @@ struct edd_info {
struct edd_device_params params;
} __attribute__ ((packed));
-extern struct edd_info edd[EDDMAXNR];
-extern unsigned char eddnr;
-extern unsigned int edd_disk80_sig;
+struct edd {
+ unsigned int mbr_signature[EDD_MBR_SIG_MAX];
+ struct edd_info edd_info[EDDMAXNR];
+ unsigned char mbr_signature_nr;
+ unsigned char edd_info_nr;
+};
+
+extern struct edd edd;
#endif /*!__ASSEMBLY__ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a25a0ae12656..4e5f196258c8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -593,7 +593,7 @@ struct fb_info {
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
diff --git a/include/linux/fd.h b/include/linux/fd.h
index 7ce5f01a4de2..b6bd41d2b460 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -2,6 +2,7 @@
#define _LINUX_FD_H
#include <linux/ioctl.h>
+#include <linux/compiler.h>
/* New file layout: Now the ioctl definitions immediately follow the
* definitions of the structures that they use */
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 6b7fb6208f8c..af4da7d302d8 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -54,12 +54,6 @@ struct hpet {
#define HPET_LEG_RT_CNF_MASK (2UL)
#define HPET_ENABLE_CNF_MASK (1UL)
-/*
- * HPET interrupt status register
- */
-
-#define HPET_ISR_CLEAR(HPET, TIMER) \
- (HPET)->hpet_isr |= (1UL << TIMER)
/*
* Timer configuration register
@@ -115,8 +109,6 @@ struct hpet_task {
void *ht_opaque;
};
-#define HD_STATE(HD, TIMER) (HD)->hd_state |= (1 << TIMER)
-
struct hpet_data {
unsigned long hd_address;
unsigned short hd_nirqs;
@@ -127,6 +119,12 @@ struct hpet_data {
#define HPET_DATA_PLATFORM 0x0001 /* platform call to hpet_alloc */
+static inline void hpet_reserve_timer(struct hpet_data *hd, int timer)
+{
+ hd->hd_state |= (1 << timer);
+ return;
+}
+
int hpet_alloc(struct hpet_data *);
int hpet_register(struct hpet_task *, int);
int hpet_unregister(struct hpet_task *);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 17f9f8384288..5e578c036157 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,13 +35,6 @@ extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
-static inline void
-mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- if (is_vm_hugetlb_page(vma))
- mm->used_hugetlb = 1;
-}
-
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) 0
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
@@ -74,8 +67,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
-#define mark_mm_hugetlb(mm, vma) do { } while (0)
-#define follow_huge_pmd(mm, addr, pmd, write) 0
+#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
diff --git a/include/linux/init.h b/include/linux/init.h
index 64d7417c835e..7a9f69992516 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -113,12 +113,18 @@ struct obs_kernel_param {
int early;
};
-/* Only for really core code. See moduleparam.h for the normal way. */
+/*
+ * Only for really core code. See moduleparam.h for the normal way.
+ *
+ * Force the alignment so the compiler doesn't space elements of the
+ * obs_kernel_param "array" too far apart in .init.setup.
+ */
#define __setup_param(str, unique_id, fn, early) \
static char __setup_str_##unique_id[] __initdata = str; \
static struct obs_kernel_param __setup_##unique_id \
- __attribute_used__ \
- __attribute__((__section__(".init.setup"))) \
+ __attribute_used__ \
+ __attribute__((__section__(".init.setup"))) \
+ __attribute__((aligned((sizeof(long))))) \
= { __setup_str_##unique_id, fn, early }
#define __setup_null_param(str, unique_id) \
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0602eaffbad2..16af86e4743a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -73,7 +73,8 @@ struct nlmsghdr
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
(struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
-#define NLMSG_OK(nlh,len) ((len) > 0 && (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
+#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
+ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len)))
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 366eae3b4fc3..4f949964bb9b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -73,7 +73,8 @@ struct rtattr
#define RTA_ALIGNTO 4
#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
-#define RTA_OK(rta,len) ((len) > 0 && (rta)->rta_len >= sizeof(struct rtattr) && \
+#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
+ (rta)->rta_len >= sizeof(struct rtattr) && \
(rta)->rta_len <= (len))
#define RTA_NEXT(rta,attrlen) ((attrlen) -= RTA_ALIGN((rta)->rta_len), \
(struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 630cc8f5dc6c..6eb3b3afa1a6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -215,9 +215,6 @@ struct mm_struct {
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
unsigned dumpable:1;
-#ifdef CONFIG_HUGETLB_PAGE
- int used_hugetlb;
-#endif
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 221f1dac7853..99bc7af242a5 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -210,12 +210,8 @@ typedef long psched_tdiff_t;
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
-#define PSCHED_EXPORTLIST EXPORT_SYMBOL(psched_tod_diff);
-
#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
-#define PSCHED_EXPORTLIST PSCHED_EXPORTLIST_1 PSCHED_EXPORTLIST_2
-
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
@@ -235,27 +231,7 @@ extern psched_time_t psched_time_base;
#define PSCHED_JSCALE 10
#endif
-#define PSCHED_EXPORTLIST_2
-
-#if BITS_PER_LONG <= 32
-
-#define PSCHED_WATCHER unsigned long
-
-extern PSCHED_WATCHER psched_time_mark;
-
-#define PSCHED_GET_TIME(stamp) ((stamp) = psched_time_base + (((unsigned long)(jiffies-psched_time_mark))<<PSCHED_JSCALE))
-
-#define PSCHED_EXPORTLIST_1 EXPORT_SYMBOL(psched_time_base); \
- EXPORT_SYMBOL(psched_time_mark);
-
-#else
-
-#define PSCHED_GET_TIME(stamp) ((stamp) = (jiffies<<PSCHED_JSCALE))
-
-#define PSCHED_EXPORTLIST_1
-
-#endif
-
+#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
@@ -264,9 +240,6 @@ extern PSCHED_WATCHER psched_time_mark;
extern psched_tdiff_t psched_clock_per_hz;
extern int psched_clock_scale;
-#define PSCHED_EXPORTLIST_2 EXPORT_SYMBOL(psched_clock_per_hz); \
- EXPORT_SYMBOL(psched_clock_scale);
-
#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
@@ -278,8 +251,6 @@ extern int psched_clock_scale;
(stamp) = __cur>>psched_clock_scale; \
})
-#define PSCHED_EXPORTLIST_1
-
#elif defined (__alpha__)
#define PSCHED_WATCHER u32
@@ -294,9 +265,6 @@ extern PSCHED_WATCHER psched_time_mark;
(stamp) = (psched_time_base + __res)>>psched_clock_scale; \
})
-#define PSCHED_EXPORTLIST_1 EXPORT_SYMBOL(psched_time_base); \
- EXPORT_SYMBOL(psched_time_mark);
-
#else
#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
@@ -327,13 +295,13 @@ extern PSCHED_WATCHER psched_time_mark;
extern int psched_tod_diff(int delta_sec, int bound);
-#define PSCHED_TDIFF_SAFE(tv1, tv2, bound, guard) \
+#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
({ \
int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
int __delta = (tv1).tv_usec - (tv2).tv_usec; \
switch (__delta_sec) { \
default: \
- __delta = psched_tod_diff(__delta_sec, bound); guard; break; \
+ __delta = psched_tod_diff(__delta_sec, bound); break; \
case 2: \
__delta += 1000000; \
case 1: \
@@ -374,12 +342,8 @@ extern int psched_tod_diff(int delta_sec, int bound);
#else
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
-#define PSCHED_TDIFF_SAFE(tv1, tv2, bound, guard) \
-({ \
- long long __delta = (tv1) - (tv2); \
- if ( __delta > (long long)(bound)) { __delta = (bound); guard; } \
- __delta; \
-})
+#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
+ min_t(long long, (tv1) - (tv2), bound)
#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))