diff options
| author | Russell King <rmk@flint.arm.linux.org.uk> | 2003-09-03 20:19:25 +0100 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2003-09-03 20:19:25 +0100 |
| commit | ac7ebfb23521ef77b3af31d57faffabb2b6f98ca (patch) | |
| tree | f5c7e3fd4839c307b59fc1774a386befde9f9491 /include/asm-arm/pgalloc.h | |
| parent | de950cef51ac8d7bd67d305857c0bee7bb8a97e5 (diff) | |
[ARM] Remove more reminants of 26-bit ARM support.
This removes include/asm-arm/proc-armv entirely, merging the
contents into the relevant include files in include/asm-arm.
We also update various files in arch/arm which reference
definitions in the now non-existent directory.
Diffstat (limited to 'include/asm-arm/pgalloc.h')
| -rw-r--r-- | include/asm-arm/pgalloc.h | 103 |
1 files changed, 102 insertions, 1 deletions
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h index 81a7eccf2991..23eb321f4af2 100644 --- a/include/asm-arm/pgalloc.h +++ b/include/asm-arm/pgalloc.h @@ -11,7 +11,8 @@ #define _ASMARM_PGALLOC_H #include <asm/processor.h> -#include <asm/proc/pgalloc.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> /* * Since we have only two-level page tables, these are trivial @@ -28,4 +29,104 @@ extern void free_pgd_slow(pgd_t *pgd); #define check_pgt_cache() do { } while (0) +/* + * Allocate one PTE table. + * + * This actually allocates two hardware PTE tables, but we wrap this up + * into one table thus: + * + * +------------+ + * | h/w pt 0 | + * +------------+ + * | h/w pt 1 | + * +------------+ + * | Linux pt 0 | + * +------------+ + * | Linux pt 1 | + * +------------+ + */ +static inline pte_t * +pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +{ + pte_t *pte; + + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + if (pte) { + clear_page(pte); + clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); + pte += PTRS_PER_PTE; + } + + return pte; +} + +static inline struct page * +pte_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + if (pte) { + void *page = page_address(pte); + clear_page(page); + clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + } + + return pte; +} + +/* + * Free one PTE table. + */ +static inline void pte_free_kernel(pte_t *pte) +{ + if (pte) { + pte -= PTRS_PER_PTE; + free_page((unsigned long)pte); + } +} + +static inline void pte_free(struct page *pte) +{ + __free_page(pte); +} + +/* + * Populate the pmdp entry with a pointer to the pte. This pmd is part + * of the mm address space. + * + * Ensure that we always set both PMD entries. + */ +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ + unsigned long pte_ptr = (unsigned long)ptep; + unsigned long pmdval; + + BUG_ON(mm != &init_mm); + + /* + * The pmd must be loaded with the physical + * address of the PTE table + */ + pte_ptr -= PTRS_PER_PTE * sizeof(void *); + pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; + pmdp[0] = __pmd(pmdval); + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); + flush_pmd_entry(pmdp); +} + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) +{ + unsigned long pmdval; + + BUG_ON(mm == &init_mm); + + pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; + pmdp[0] = __pmd(pmdval); + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); + flush_pmd_entry(pmdp); +} + #endif |
