summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-04-25 04:21:52 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-04-25 04:21:52 -0700
commit5541b4278d607ecdf064226818a7dddcd9d31b92 (patch)
tree1a4fb287b09a5756c3b9948916a0db4e10e7b890 /include
parent7ce42ae14e32016ac8e83003db5811a33e3b16e3 (diff)
parentc2e26dd2a08599cd24ffd7629ab263b3ddf5d4e7 (diff)
Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk
into ppc970.osdl.org:/home/torvalds/v2.6/linux
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/atomic.h20
-rw-r--r--include/asm-arm/div64.h8
-rw-r--r--include/asm-arm/dma-mapping.h153
-rw-r--r--include/asm-arm/system.h9
-rw-r--r--include/asm-arm/uaccess.h9
5 files changed, 125 insertions, 74 deletions
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 595fdd7451b6..dda22e24ce76 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -44,7 +44,7 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}
-static inline void atomic_add(int i, volatile atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp, tmp2;
@@ -59,7 +59,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
: "cc");
}
-static inline void atomic_sub(int i, volatile atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp, tmp2;
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
-static inline int atomic_dec_and_test(volatile atomic_t *v)
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned long tmp;
int result;
@@ -95,7 +95,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return result == 0;
}
-static inline int atomic_add_negative(int i, volatile atomic_t *v)
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned long tmp;
int result;
@@ -138,7 +138,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define atomic_set(v,i) (((v)->counter) = (i))
-static inline void atomic_add(int i, volatile atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
@@ -147,7 +147,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
local_irq_restore(flags);
}
-static inline void atomic_sub(int i, volatile atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
@@ -156,7 +156,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
local_irq_restore(flags);
}
-static inline void atomic_inc(volatile atomic_t *v)
+static inline void atomic_inc(atomic_t *v)
{
unsigned long flags;
@@ -165,7 +165,7 @@ static inline void atomic_inc(volatile atomic_t *v)
local_irq_restore(flags);
}
-static inline void atomic_dec(volatile atomic_t *v)
+static inline void atomic_dec(atomic_t *v)
{
unsigned long flags;
@@ -174,7 +174,7 @@ static inline void atomic_dec(volatile atomic_t *v)
local_irq_restore(flags);
}
-static inline int atomic_dec_and_test(volatile atomic_t *v)
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned long flags;
int val;
@@ -187,7 +187,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return val == 0;
}
-static inline int atomic_add_negative(int i, volatile atomic_t *v)
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned long flags;
int val;
diff --git a/include/asm-arm/div64.h b/include/asm-arm/div64.h
index 4957da3df270..3682616804ca 100644
--- a/include/asm-arm/div64.h
+++ b/include/asm-arm/div64.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
+#include <asm/system.h>
+
/*
* The semantics of do_div() are:
*
@@ -31,7 +33,11 @@
register unsigned long long __n asm("r0") = n; \
register unsigned long long __res asm("r2"); \
register unsigned int __rem asm(__xh); \
- asm("bl __do_div64" \
+ asm( __asmeq("%0", __xh) \
+ __asmeq("%1", "r2") \
+ __asmeq("%2", "r0") \
+ __asmeq("%3", "r4") \
+ "bl __do_div64" \
: "=r" (__rem), "=r" (__res) \
: "r" (__n), "r" (__base) \
: "ip", "lr", "cc"); \
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index c65d9e38ddc7..011c539c7449 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -17,29 +17,6 @@
extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
- * For SA-1111 these functions are "magic" and utilize bounce
- * bufferes as needed to work around SA-1111 DMA bugs.
- */
-dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
-void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
-int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
-void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
-void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
-void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
-void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
-void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
-
-#ifdef CONFIG_SA1111
-
-extern struct bus_type sa1111_bus_type;
-
-#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
-
-#else
-#define dmadev_is_sa1111(dev) (0)
-#endif
-
-/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle)
return 0;
}
+/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == ~0;
+}
+
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
+
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
*/
+#ifndef CONFIG_DMABOUNCE
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
- if (dmadev_is_sa1111(dev))
- return sa1111_map_single(dev, cpu_addr, size, dir);
-
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
}
+#else
+extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
+#endif
/**
* dma_map_page - map a portion of a page for streaming DMA
@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
+#ifndef CONFIG_DMABOUNCE
static inline void
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- if (dmadev_is_sa1111(dev))
- sa1111_unmap_single(dev, handle, size, dir);
-
/* nothing to do */
}
+#else
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+#endif
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
* Device ownership issues as mentioned above for dma_map_single are
* the same here.
*/
+#ifndef CONFIG_DMABOUNCE
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
- if (dmadev_is_sa1111(dev))
- return sa1111_map_sg(dev, sg, nents, dir);
-
for (i = 0; i < nents; i++, sg++) {
char *virt;
@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
+#else
+extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
+#endif
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* Again, CPU read rules concerning calls here are the same as for
* dma_unmap_single() above.
*/
+#ifndef CONFIG_DMABOUNCE
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
- if (dmadev_is_sa1111(dev)) {
- sa1111_unmap_sg(dev, sg, nents, dir);
- return;
- }
/* nothing to do */
}
+#else
+extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
+#endif
+
/**
* dma_sync_single_for_cpu
@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
*/
+#ifndef CONFIG_DMABOUNCE
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- if (dmadev_is_sa1111(dev)) {
- sa1111_dma_sync_single_for_cpu(dev, handle, size, dir);
- return;
- }
-
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
@@ -309,13 +295,13 @@ static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- if (dmadev_is_sa1111(dev)) {
- sa1111_dma_sync_single_for_device(dev, handle, size, dir);
- return;
- }
-
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
+#else
+extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+#endif
+
/**
* dma_sync_sg_for_cpu
@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
* The same as dma_sync_single_for_* but for a scatter-gather list,
* same rules and usage.
*/
+#ifndef CONFIG_DMABOUNCE
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
- if (dmadev_is_sa1111(dev)) {
- sa1111_dma_sync_sg_for_cpu(dev, sg, nents, dir);
- return;
- }
-
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
{
int i;
- if (dmadev_is_sa1111(dev)) {
- sa1111_dma_sync_sg_for_device(dev, sg, nents, dir);
- return;
- }
-
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
}
+#else
+extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
+extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
+#endif
+#ifdef CONFIG_DMABOUNCE
/*
- * DMA errors are defined by all-bits-set in the DMA address.
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+ *
+ * On the SA-1111, a bug limits DMA to only certain regions of RAM.
+ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
+ * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
+ *
+ * The following are helper functions used by the dmabounce subystem
+ *
*/
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return dma_addr == ~0;
-}
+
+/**
+ * dmabounce_register_dev
+ *
+ * @dev: valid struct device pointer
+ * @small_buf_size: size of buffers to use with small buffer pool
+ * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ *
+ * This function should be called by low-level platform code to register
+ * a device as requireing DMA buffer bouncing. The function will allocate
+ * appropriate DMA pools for the device.
+ *
+ */
+extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
+
+/**
+ * dmabounce_unregister_dev
+ *
+ * @dev: valid struct device pointer
+ *
+ * This function should be called by low-level platform code when device
+ * that was previously registered with dmabounce_register_dev is removed
+ * from the system.
+ *
+ */
+extern void dmabounce_unregister_dev(struct device *);
+
+/**
+ * dma_needs_bounce
+ *
+ * @dev: valid struct device pointer
+ * @dma_handle: dma_handle of unbounced buffer
+ * @size: size of region being mapped
+ *
+ * Platforms that utilize the dmabounce mechanism must implement
+ * this function.
+ *
+ * The dmabounce routines call this function whenever a dma-mapping
+ * is requested to determine whether a given buffer needs to be bounced
+ * or not. The function must return 0 if the the buffer is OK for
+ * DMA access and 1 if the buffer needs to be bounced.
+ *
+ */
+extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
+#endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 4e19a5719fe8..dc7ef45d3b4e 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -42,6 +42,15 @@
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
+/*
+ * This is used to ensure the compiler did actually allocate the register we
+ * asked it for some inline assembly sequences. Apparently we can't trust
+ * the compiler from one version to another so a bit of paranoia won't hurt.
+ * This string is meant to be concatenated with the inline asm string and
+ * will cause compilation to stop on mismatch.
+ */
+#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index 3703b43c2f57..119745a6dd7c 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/arch/memory.h>
#include <asm/domain.h>
+#include <asm/system.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -107,7 +108,9 @@ extern int __get_user_8(void *);
extern int __get_user_bad(void);
#define __get_user_x(__r1,__p,__e,__s,__i...) \
- __asm__ __volatile__ ("bl __get_user_" #__s \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r1") \
+ "bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r1) \
: "0" (__p) \
: __i, "cc")
@@ -223,7 +226,9 @@ extern int __put_user_8(void *, unsigned long long);
extern int __put_user_bad(void);
#define __put_user_x(__r1,__p,__e,__s) \
- __asm__ __volatile__ ("bl __put_user_" #__s \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r1") \
+ "bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r1) \
: "ip", "lr", "cc")