summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRussell King <rmk@flint.arm.linux.org.uk>2002-05-13 02:05:00 +0100
committerRussell King <rmk@flint.arm.linux.org.uk>2002-05-13 02:05:00 +0100
commit34dc307ad5fd2af8ee41469f587c34885d554c83 (patch)
tree98631acff0ab73fb963d0b0694277bf291412abb /include
parent3bcf06b721e993e9b4a6edc74b80e1ba4708f813 (diff)
2.5.14 updates - for the new memory management pfn() macros. Also,
we fix ARM720T support - this CPU has unified writethrough caches only, so we can't use the Harvard cache operations when copying pages. Also, we don't have to evict cache entries during copypage.
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-clps711x/memory.h57
-rw-r--r--include/asm-arm/arch-sa1100/memory.h41
-rw-r--r--include/asm-arm/glue.h19
-rw-r--r--include/asm-arm/io.h18
-rw-r--r--include/asm-arm/memory.h87
-rw-r--r--include/asm-arm/page.h13
-rw-r--r--include/asm-arm/pgtable.h26
7 files changed, 127 insertions, 134 deletions
diff --git a/include/asm-arm/arch-clps711x/memory.h b/include/asm-arm/arch-clps711x/memory.h
index 1e884d9e4167..dd1ae0acb55e 100644
--- a/include/asm-arm/arch-clps711x/memory.h
+++ b/include/asm-arm/arch-clps711x/memory.h
@@ -120,9 +120,10 @@
(((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MAX_MEM_SHIFT)
/*
- * Given a physical address, convert it to a node id.
+ * Given a page frame number, convert it to a node id.
*/
-#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr))
+#define PFN_TO_NID(pfn) \
+ (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MAX_MEM_SHIFT - PAGE_SHIFT))
/*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
@@ -131,30 +132,15 @@
#define ADDR_TO_MAPBASE(kaddr) \
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
+
/*
* Given a kaddr, LOCAL_MAR_NR finds the owning node of the memory
* and returns the index corresponding to the appropriate page in the
* node's mem_map.
*/
-#define LOCAL_MAP_NR(kaddr) \
- (((unsigned long)(kaddr)-LOCAL_BASE_ADDR((kaddr))) >> PAGE_SHIFT)
-
-/*
- * Given a kaddr, virt_to_page returns a pointer to the corresponding
- * mem_map entry.
- */
-#define virt_to_page(kaddr) \
- (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
-
-/*
- * VALID_PAGE returns a non-zero value if given page pointer is valid.
- * This assumes all node's mem_maps are stored within the node they refer to.
- */
-#define VALID_PAGE(page) \
-({ unsigned int node = KVADDR_TO_NID(page); \
- ( (node < NR_NODES) && \
- ((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
-})
+#define LOCAL_MAP_NR(addr) \
+ (((unsigned long)(addr) & (NODE_MAX_MEM_SIZE - 1)) >> PAGE_SHIFT)
/*
* The PS7211 allows up to 256MB max per DRAM bank, but the EDB7211
@@ -167,40 +153,13 @@
#define NODE_MAX_MEM_SHIFT 24
#define NODE_MAX_MEM_SIZE (1<<NODE_MAX_MEM_SHIFT)
-/*
- * Given a mem_map_t, LOCAL_MAP_BASE finds the owning node for the
- * physical page and returns the kaddr for the mem_map of that node.
- */
-#define LOCAL_MAP_BASE(page) \
- NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(page)))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)(kaddr) & ~(NODE_MAX_MEM_SIZE-1))
-
-/*
- * With discontigmem, the conceptual mem_map array starts from PAGE_OFFSET.
- * Given a kaddr, MAP_NR returns the appropriate global mem_map index so
- * it matches the corresponding node's local mem_map.
- */
-#define MAP_NR(kaddr) (LOCAL_MAP_NR((kaddr)) + \
- (((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
- sizeof(mem_map_t)))
-
#else
-#define PHYS_TO_NID(addr) (0)
+#define PFN_TO_NID(pfn) (0)
#endif /* CONFIG_DISCONTIGMEM */
#endif /* CONFIG_ARCH_EDB7211 */
-#ifndef PHYS_TO_NID
-#define PHYS_TO_NID(addr) (0)
-#endif
-
#endif
diff --git a/include/asm-arm/arch-sa1100/memory.h b/include/asm-arm/arch-sa1100/memory.h
index 192d44243330..1f7c463b32b1 100644
--- a/include/asm-arm/arch-sa1100/memory.h
+++ b/include/asm-arm/arch-sa1100/memory.h
@@ -79,49 +79,36 @@
/*
* Given a kernel address, find the home node of the underlying memory.
*/
-#define KVADDR_TO_NID(addr) \
- (((unsigned long)(addr) - 0xc0000000) >> 27)
+#define KVADDR_TO_NID(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> 27)
/*
- * Given a physical address, convert it to a node id.
+ * Given a page frame number, convert it to a node id.
*/
-#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr))
+#define PFN_TO_NID(pfn) (((pfn) - PHYS_PFN_OFFSET) >> (27 - PAGE_SHIFT))
/*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
- * and returns the mem_map of that node.
+ * and return the mem_map of that node.
*/
-#define ADDR_TO_MAPBASE(kaddr) \
- NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
/*
- * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
- * and returns the index corresponding to the appropriate page in the
- * node's mem_map.
+ * Given a page frame number, find the owning node of the memory
+ * and return the mem_map of that node.
*/
-#define LOCAL_MAP_NR(kvaddr) \
- (((unsigned long)(kvaddr) & 0x07ffffff) >> PAGE_SHIFT)
+#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
/*
- * Given a kaddr, virt_to_page returns a pointer to the corresponding
- * mem_map entry.
- */
-#define virt_to_page(kaddr) \
- (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
-
-/*
- * VALID_PAGE returns a non-zero value if given page pointer is valid.
- * This assumes all node's mem_maps are stored within the node they refer to.
+ * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
+ * and returns the index corresponding to the appropriate page in the
+ * node's mem_map.
*/
-#define VALID_PAGE(page) \
-({ unsigned int node = KVADDR_TO_NID(page); \
- ( (node < NR_NODES) && \
- ((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
-})
+#define LOCAL_MAP_NR(addr) \
+ (((unsigned long)(addr) & 0x07ffffff) >> PAGE_SHIFT)
#else
-#define PHYS_TO_NID(addr) (0)
+#define PFN_TO_NID(addr) (0)
#endif
diff --git a/include/asm-arm/glue.h b/include/asm-arm/glue.h
index 9cbfa2d6605c..d0c0c121aa28 100644
--- a/include/asm-arm/glue.h
+++ b/include/asm-arm/glue.h
@@ -158,7 +158,8 @@
*
* We have the following to choose from:
* v3 - ARMv3
- * v4 - ARMv4 without minicache
+ * v4wt - ARMv4 with writethrough cache, without minicache
+ * v4wb - ARMv4 with writeback cache, without minicache
* v4_mc - ARMv4 with minicache
* v5te_mc - ARMv5TE with minicache
*/
@@ -173,13 +174,21 @@
# endif
#endif
-#if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM920T) || \
- defined(CONFIG_CPU_ARM922T) || defined(CONFIG_CPU_ARM926T) || \
- defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_ARM1020)
+#if defined(CONFIG_CPU_ARM720T)
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER v4wt
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_SA110) || \
+ defined(CONFIG_CPU_ARM1020)
# ifdef _USER
# define MULTI_USER 1
# else
-# define _USER v4
+# define _USER v4wb
# endif
#endif
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index d12af77640aa..21ca5ae0b847 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -270,24 +270,6 @@ extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *vaddr, size_t size, int rw);
/*
- * Change "struct page" to physical address.
- */
-#ifdef CONFIG_DISCONTIGMEM
-#define page_to_phys(page) \
- ((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
- + page_zone(page)->zone_start_paddr)
-#else
-#define page_to_phys(page) \
- (PHYS_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#endif
-
-/*
- * We should really eliminate virt_to_bus() here - it's depreciated.
- */
-#define page_to_bus(page) \
- (virt_to_bus(page_address(page)))
-
-/*
* can the hardware map this into one segment or not, given no other
* constraints.
*/
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index c9f63415e223..865f1d686a30 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -1,22 +1,34 @@
/*
* linux/include/asm-arm/memory.h
*
- * Copyright (C) 2000 Russell King
+ * Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note: this file should not be included by non-asm/.h files
- *
- * Modifications:
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#include <linux/config.h>
#include <asm/arch/memory.h>
-static inline unsigned long virt_to_phys(volatile void *x)
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view. We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * These are *only* valid on the kernel direct mapped RAM memory.
+ */
+static inline unsigned long virt_to_phys(void *x)
{
return __virt_to_phys((unsigned long)(x));
}
@@ -26,10 +38,77 @@ static inline void *phys_to_virt(unsigned long x)
return (void *)(__phys_to_virt((unsigned long)(x)));
}
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+
/*
* Virtual <-> DMA view memory address translations
+ * Again, these are *only* valid on the kernel direct mapped RAM
+ * memory. Use of these is *depreciated*.
*/
#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE(). It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ * page_to_pfn(page) convert a struct page * to a PFN number
+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
+ * pfn_valid(pfn) indicates whether a PFN number is valid
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#ifndef CONFIG_DISCONTIGMEM
+
+#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET)
+#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)
+#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < max_mapnr)
+
+#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
+#define virt_addr_valid(kaddr) ((kaddr) >= PAGE_OFFSET && (kaddr) < (unsigned long)high_memory)
+
+#else
+/*
+ * This is more complex. We have a set of mem_map arrays spread
+ * around in memory.
+ */
+#define page_to_pfn(page) \
+ (((page) - page_zone(page)->zone_mem_map) \
+ + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
+
+#define pfn_to_page(pfn) \
+ (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
+
+#define pfn_valid(pfn) (PFN_TO_NID(pfn) < NR_NODES)
+
+#define virt_to_page(kaddr) \
+ (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
+
+#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < NR_NODES)
+
+/*
+ * Common discontigmem stuff.
+ * PHYS_TO_NID is used by the ARM kernel/setup.c
+ */
+#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
+
+#endif
+
+/*
+ * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
+ */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * We should really eliminate virt_to_bus() here - it's depreciated.
+ */
+#define page_to_bus(page) (virt_to_bus(page_address(page)))
+
#endif
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 56730ced5d2d..6f8afed589ac 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -125,18 +125,9 @@ static inline int get_order(unsigned long size)
return order;
}
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/arch/memory.h>
-
-#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#include <asm/memory.h>
-#ifndef CONFIG_DISCONTIGMEM
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT) - \
- (PHYS_OFFSET >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#endif
+#endif /* !__ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index f01a476df49a..dbbb85bd3995 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -11,7 +11,7 @@
#define _ASMARM_PGTABLE_H
#include <linux/config.h>
-#include <asm/arch/memory.h>
+#include <asm/memory.h>
#include <asm/arch/vmalloc.h>
/*
@@ -79,21 +79,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(ptep) set_pte((ptep), __pte(0))
-
-#ifndef CONFIG_DISCONTIGMEM
-#define pte_page(x) (mem_map + (pte_val((x)) >> PAGE_SHIFT) - \
- (PHYS_OFFSET >> PAGE_SHIFT))
-#else
-/*
- * I'm not happy with this - we needlessly convert a physical address
- * to a virtual one, and then immediately back to a physical address,
- * which, if __va and __pa are expensive causes twice the expense for
- * zero gain. --rmk
- */
-#define pte_page(x) (virt_to_page(__va(pte_val((x)))))
-#endif
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
@@ -107,12 +98,7 @@ extern struct page *empty_zero_page;
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-{
- return __pte(physpage | pgprot_val(pgprot));
-}
-
-#define mk_pte(page,pgprot) mk_pte_phys(__pa(page_address(page)), pgprot)
+#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level