diff options
| author | David S. Miller <davem@nuts.davemloft.net> | 2005-02-28 23:33:45 -0800 |
|---|---|---|
| committer | David S. Miller <davem@nuts.davemloft.net> | 2005-02-28 23:33:45 -0800 |
| commit | 7011dbd28d20d120232c804487b1213d6dcc048b (patch) | |
| tree | 1b6183ed5411d59db2a90a0463e150660f4cd099 /include | |
| parent | cd1496b6f40e04e5595db7dcfb16eaed439cd352 (diff) | |
| parent | b5fa3247be8c283b90fce065d7237c99e58a1eef (diff) | |
Resolve conflicts.
Diffstat (limited to 'include')
33 files changed, 239 insertions, 252 deletions
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index a22e394a3d52..d27970940f03 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -22,6 +22,7 @@ * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* PMD_SHIFT determines the size of the area a second-level page table can map */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) @@ -235,7 +236,10 @@ extern inline unsigned long pgd_page(pgd_t pgd) extern inline int pte_none(pte_t pte) { return !pte_val(pte); } extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } -extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; } +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 10d747ba0ce0..d5ed84d427dd 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -254,7 +254,7 @@ extern struct page *empty_zero_page; #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define pte_none(pte) (!pte_val(pte)) -#define pte_clear(ptep) set_pte((ptep), __pte(0)) +#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) #define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) @@ -263,6 +263,7 @@ extern struct page *empty_zero_page; #define pte_unmap_nested(pte) do { } while (0) #define set_pte(ptep, pte) cpu_set_pte(ptep,pte) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* * The following only work if pte_present() is true. diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h index fdebe1533405..95d16abeb189 100644 --- a/include/asm-arm26/pgtable.h +++ b/include/asm-arm26/pgtable.h @@ -154,7 +154,8 @@ extern struct page *empty_zero_page; #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define set_pte(pte_ptr, pte) ((*(pte_ptr)) = (pte)) -#define pte_clear(ptep) set_pte((ptep), __pte(0)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) /* macros to ease the getting of pointers to stuff... */ #define pgd_offset(mm, addr) ((pgd_t *)(mm)->pgd + __pgd_index(addr)) diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h index fc052d4a5032..957dd92d108d 100644 --- a/include/asm-cris/pgtable.h +++ b/include/asm-cris/pgtable.h @@ -34,6 +34,8 @@ extern void paging_init(void); * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -101,7 +103,7 @@ extern unsigned long empty_zero_page; #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) #define pmd_none(x) (!pmd_val(x)) /* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index eb56c87217d8..9d44b7e4a210 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h @@ -173,6 +173,7 @@ do { \ *(pteptr) = (pteval); \ asm volatile("dcf %M0" :: "U"(*pteptr)); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval)) @@ -353,7 +354,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) #undef TEST_VERIFY_AREA #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -390,39 +391,33 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pt static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); return i; } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); return i; } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long x = xchg(&ptep->pte, 0); asm volatile("dcf %M0" :: "U"(*ptep)); return __pte(x); } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { set_bit(_PAGE_BIT_WP, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - set_bit(_PAGE_BIT_DIRTY, ptep); - asm volatile("dcf %M0" :: "U"(*ptep)); -} - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -512,7 +507,6 @@ static inline int pte_file(pte_t pte) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index cf791b073e76..51aa714405a2 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -16,7 +16,7 @@ #ifndef __HAVE_ARCH_SET_PTE_ATOMIC #define ptep_establish(__vma, __address, __ptep, __entry) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) #else /* __HAVE_ARCH_SET_PTE_ATOMIC */ @@ -37,26 +37,30 @@ do { \ */ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_young(pte)) - return 0; - set_pte(ptep, pte_mkold(pte)); - return 1; -} +#define ptep_test_and_clear_young(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + int r = 1; \ + if (!pte_young(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), \ + (__ptep), pte_mkold(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ - int __young = ptep_test_and_clear_young(__ptep); \ + int __young; \ + __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ if (__young) \ flush_tlb_page(__vma, __address); \ __young; \ @@ -64,20 +68,24 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -static inline int ptep_test_and_clear_dirty(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_dirty(pte)) - return 0; - set_pte(ptep, pte_mkclean(pte)); - return 1; -} +#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *ptep; \ + int r = 1; \ + if (!pte_dirty(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ + pte_mkclean(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ ({ \ - int __dirty = ptep_test_and_clear_dirty(__ptep); \ + int __dirty; \ + __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ if (__dirty) \ flush_tlb_page(__vma, __address); \ __dirty; \ @@ -85,36 +93,29 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(pte_t *ptep) -{ - pte_t pte = *ptep; - pte_clear(ptep); - return pte; -} +#define ptep_get_and_clear(__mm, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + pte_clear((__mm), (__address), (__ptep)); \ + __pte; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH #define ptep_clear_flush(__vma, __address, __ptep) \ ({ \ - pte_t __pte = ptep_get_and_clear(__ptep); \ + pte_t __pte; \ + __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ flush_tlb_page(__vma, __address); \ __pte; \ }) #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(pte_t *ptep) -{ - pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -} -#endif - -#ifndef __HAVE_ARCH_PTEP_MKDIRTY -static inline void ptep_mkdirty(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 58026f6c987a..fa07bd6c7529 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -14,10 +14,11 @@ * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 0b8a0da75678..d609f9c2c1f0 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -56,6 +56,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) smp_wmb(); ptep->pte_low = pte.pte_low; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + #define __HAVE_ARCH_SET_PTE_ATOMIC #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) @@ -88,7 +90,7 @@ static inline void pud_clear (pud_t * pud) { } #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t res; diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index f34be5140a23..da4f252b7b84 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -201,7 +201,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -243,22 +243,24 @@ static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return p # include <asm/pgtable-2level.h> #endif -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); } -static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } -static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, &ptep->pte_low); +} /* * Macro to mark a page protection value as "uncacheable". On processors which do not support @@ -407,7 +409,6 @@ extern void noexec_setup(const char *str); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 594f73f8b574..b74428d554b2 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -202,6 +202,7 @@ ia64_phys_addr_valid (unsigned long addr) * the PTE in a page table. Nothing special needs to be on IA-64. */ #define set_pte(ptep, pteval) (*(ptep) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define RGN_SIZE (1UL << 61) #define RGN_KERNEL 7 @@ -243,7 +244,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) -#define pte_clear(pte) (pte_val(*(pte)) = 0UL) +#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) /* pte_page() returns the "struct page *" corresponding to the PTE: */ #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) @@ -345,7 +346,7 @@ pgd_offset (struct mm_struct *mm, unsigned long address) /* atomic versions of the some PTE manipulations: */ static inline int -ptep_test_and_clear_young (pte_t *ptep) +ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -355,13 +356,13 @@ ptep_test_and_clear_young (pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } static inline int -ptep_test_and_clear_dirty (pte_t *ptep) +ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -371,25 +372,25 @@ ptep_test_and_clear_dirty (pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } static inline pte_t -ptep_get_and_clear (pte_t *ptep) +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0)); #else pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; #endif } static inline void -ptep_set_wrprotect (pte_t *ptep) +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -400,18 +401,7 @@ ptep_set_wrprotect (pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void -ptep_mkdirty (pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(_PAGE_D_BIT, ptep); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -558,7 +548,6 @@ do { \ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PGD_OFFSET_GATE #include <asm-generic/pgtable.h> diff --git a/include/asm-m32r/pgtable-2level.h b/include/asm-m32r/pgtable-2level.h index 8af66284b436..861727c20e8f 100644 --- a/include/asm-m32r/pgtable-2level.h +++ b/include/asm-m32r/pgtable-2level.h @@ -44,6 +44,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; } * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) /* * (pmds are folded into pgds so this doesnt get actually called, @@ -60,7 +61,7 @@ static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) return (pmd_t *) dir; } -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) #define pte_same(a, b) (pte_val(a) == pte_val(b)) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!pte_val(x)) diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h index d2b67741e381..1710d91d4e56 100644 --- a/include/asm-m32r/pgtable.h +++ b/include/asm-m32r/pgtable.h @@ -176,7 +176,7 @@ extern unsigned long empty_zero_page[1024]; /* page table for 0-4MB for everybody */ #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -282,26 +282,21 @@ static inline pte_t pte_mkwrite(pte_t pte) return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_WRITE, ptep); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - set_bit(_PAGE_BIT_DIRTY, ptep); -} - /* * Macro and implementation to make a page protection as uncachable. */ @@ -390,7 +385,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h index f9391dfbb710..1628723458f5 100644 --- a/include/asm-m68k/motorola_pgtable.h +++ b/include/asm-m68k/motorola_pgtable.h @@ -129,7 +129,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; }) +#define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) #define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h index 05b3d68cb261..186585d45d8c 100644 --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h @@ -26,6 +26,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* PMD_SHIFT determines the size of the area a second-level page table can map */ diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h index e5485e830bd7..e974bb072047 100644 --- a/include/asm-m68k/sun3_pgtable.h +++ b/include/asm-m68k/sun3_pgtable.h @@ -123,7 +123,10 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) static inline int pte_none (pte_t pte) { return !pte_val (pte); } static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; } -static inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; } +static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_val (*ptep) = 0; +} #define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK) #define pfn_pte(pfn, pgprot) \ diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 0461ee1d3349..61983c3fc9c3 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -100,14 +100,15 @@ static inline void set_pte(pte_t *ptep, pte_t pte) buddy->pte_low |= _PAGE_GLOBAL; } } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void pte_clear(pte_t *ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); else - set_pte(ptep, __pte(0)); + set_pte_at(mm, addr, ptep, __pte(0)); } #else /* @@ -130,16 +131,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void pte_clear(pte_t *ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); else #endif - set_pte(ptep, __pte(0)); + set_pte_at(mm, addr, ptep, __pte(0)); } #endif diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index ccfc070fd055..149c17aea2fe 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h @@ -39,6 +39,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #endif /* !__ASSEMBLY__ */ @@ -263,7 +264,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) @@ -431,7 +432,7 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -441,12 +442,12 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -456,14 +457,14 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } extern spinlock_t pa_dbit_lock; -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; pte_t pte; @@ -472,13 +473,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) pte = old_pte = *ptep; pte_val(pte) &= ~_PAGE_PRESENT; pte_val(pte) |= _PAGE_FLUSH; - set_pte(ptep,pte); + set_pte_at(mm,addr,ptep,pte); spin_unlock(&pa_dbit_lock); return old_pte; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -489,17 +490,7 @@ static inline void ptep_set_wrprotect(pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void ptep_mkdirty(pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -518,7 +509,6 @@ static inline void ptep_mkdirty(pte_t *ptep) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h index 7cda32eb1237..5f1814443e26 100644 --- a/include/asm-ppc/highmem.h +++ b/include/asm-ppc/highmem.h @@ -90,7 +90,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) #ifdef HIGHMEM_DEBUG BUG_ON(!pte_none(*(kmap_pte+idx))); #endif - set_pte(kmap_pte+idx, mk_pte(page, kmap_prot)); + set_pte_at(&init_mm, vaddr, kmap_pte+idx, mk_pte(page, kmap_prot)); flush_tlb_page(NULL, vaddr); return (void*) vaddr; @@ -114,7 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte+idx); + pte_clear(&init_mm, vaddr, kmap_pte+idx); flush_tlb_page(NULL, vaddr); #endif dec_preempt_count(); diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 642c86b74121..0ee34debaf5a 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -448,7 +448,7 @@ extern unsigned long empty_zero_page[1024]; #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) -#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0) +#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -512,6 +512,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) } /* + * When flushing the tlb entry for a page, we also need to flush the hash + * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. + */ +extern int flush_hash_pages(unsigned context, unsigned long va, + unsigned long pmdval, int count); + +/* Add an HPTE to the hash table */ +extern void add_hash_page(unsigned context, unsigned long va, + unsigned long pmdval); + +/* * Atomic PTE updates. * * pte_update clears and sets bit atomically, and returns @@ -542,7 +553,8 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, * On machines which use an MMU hash table we avoid changing the * _PAGE_HASHPTE bit. */ -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { #if _PAGE_HASHPTE != 0 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); @@ -551,43 +563,47 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #endif } -extern void flush_hash_one_pte(pte_t *ptep); - /* * 2.6 calles this without flushing the TLB entry, this is wrong * for our hash-based implementation, we fix that up here */ -static inline int ptep_test_and_clear_young(pte_t *ptep) +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); #if _PAGE_HASHPTE != 0 - if (old & _PAGE_HASHPTE) - flush_hash_one_pte(ptep); + if (old & _PAGE_HASHPTE) { + unsigned long ptephys = __pa(ptep) & PAGE_MASK; + flush_hash_pages(context, addr, ptephys, 1); + } #endif return (old & _PAGE_ACCESSED) != 0; } +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ + __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep) -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); } -static inline void ptep_set_wrprotect(pte_t *ptep) +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - pte_update(ptep, 0, _PAGE_DIRTY); -} - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) { @@ -607,6 +623,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) +#define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) /* @@ -659,17 +676,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); /* - * When flushing the tlb entry for a page, we also need to flush the hash - * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. - */ -extern int flush_hash_pages(unsigned context, unsigned long va, - unsigned long pmdval, int count); - -/* Add an HPTE to the hash table */ -extern void add_hash_page(unsigned context, unsigned long va, - unsigned long pmdval); - -/* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the @@ -741,15 +747,9 @@ static inline int io_remap_page_range(struct vm_area_struct *vma, extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); -#endif /* !__ASSEMBLY__ */ - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY -#define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> +#endif /* !__ASSEMBLY__ */ + #endif /* _PPC_PGTABLE_H */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h index dd717c2bb644..f9c0366287a3 100644 --- a/include/asm-ppc64/pgalloc.h +++ b/include/asm-ppc64/pgalloc.h @@ -48,42 +48,26 @@ pmd_free(pmd_t *pmd) #define pmd_populate(mm, pmd, pte_page) \ pmd_populate_kernel(mm, pmd, page_address(pte_page)) -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte; - pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); - if (pte) { - struct page *ptepage = virt_to_page(pte); - ptepage->mapping = (void *) mm; - ptepage->index = address & PMD_MASK; - } - return pte; + return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); } -static inline struct page * -pte_alloc_one(struct mm_struct *mm, unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - pte_t *pte; - pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); - if (pte) { - struct page *ptepage = virt_to_page(pte); - ptepage->mapping = (void *) mm; - ptepage->index = address & PMD_MASK; - return ptepage; - } + pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); + if (pte) + return virt_to_page(pte); return NULL; } static inline void pte_free_kernel(pte_t *pte) { - virt_to_page(pte)->mapping = NULL; kmem_cache_free(zero_cache, pte); } static inline void pte_free(struct page *ptepage) { - ptepage->mapping = NULL; kmem_cache_free(zero_cache, page_address(ptepage)); } diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 0f6990ad957d..5466e1d3e4a4 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -315,9 +315,10 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) * batch, doesn't actually triggers the hash flush immediately, * you need to call flush_tlb_pending() to do that. */ -extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot); +extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, + int wrprot); -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -325,18 +326,25 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) return 0; old = pte_update(ptep, _PAGE_ACCESSED); if (old & _PAGE_HASHPTE) { - hpte_update(ptep, old, 0); + hpte_update(mm, addr, old, 0); flush_tlb_pending(); } return (old & _PAGE_ACCESSED) != 0; } +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) /* * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the * moment we always flush but we need to fix hpte_update and test if the * optimisation is worth it. */ -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -344,11 +352,19 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) return 0; old = pte_update(ptep, _PAGE_DIRTY); if (old & _PAGE_HASHPTE) - hpte_update(ptep, old, 0); + hpte_update(mm, addr, old, 0); return (old & _PAGE_DIRTY) != 0; } +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) -static inline void ptep_set_wrprotect(pte_t *ptep) +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -356,7 +372,7 @@ static inline void ptep_set_wrprotect(pte_t *ptep) return; old = pte_update(ptep, _PAGE_RW); if (old & _PAGE_HASHPTE) - hpte_update(ptep, old, 0); + hpte_update(mm, addr, old, 0); } /* @@ -370,42 +386,46 @@ static inline void ptep_set_wrprotect(pte_t *ptep) #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ - int __young = ptep_test_and_clear_young(__ptep); \ + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ + __ptep); \ __young; \ }) #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ ({ \ - int __dirty = ptep_test_and_clear_dirty(__ptep); \ + int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ + __ptep); \ flush_tlb_page(__vma, __address); \ __dirty; \ }) -static inline pte_t ptep_get_and_clear(pte_t *ptep) +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) - hpte_update(ptep, old, 0); + hpte_update(mm, addr, old, 0); return __pte(old); } -static inline void pte_clear(pte_t * ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) - hpte_update(ptep, old, 0); + hpte_update(mm, addr, old, 0); } /* * set_pte stores a linux PTE into the linux page table. */ -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { if (pte_present(*ptep)) { - pte_clear(ptep); + pte_clear(mm, addr, ptep); flush_tlb_pending(); } *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; @@ -443,6 +463,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) +#define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) extern unsigned long ioremap_bot, ioremap_base; @@ -550,14 +571,8 @@ static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) return pt; } -#endif /* __ASSEMBLY__ */ - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY -#define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> +#endif /* __ASSEMBLY__ */ + #endif /* _PPC64_PGTABLE_H */ diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 90308137c8e6..91effd7c5c9f 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -130,8 +130,10 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pte != NULL) { - for (i=0; i < PTRS_PER_PTE; i++) - pte_clear(pte+i); + for (i=0; i < PTRS_PER_PTE; i++) { + pte_clear(mm, vmaddr, pte+i); + vmaddr += PAGE_SIZE; + } } return pte; } diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 85f043c6d5cf..b918318f86a2 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -322,6 +322,7 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval) { *pteptr = pteval; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* * pgd/pmd/pte query functions @@ -457,7 +458,7 @@ extern inline void pmd_clear(pmd_t * pmdp) #endif /* __s390x__ */ -extern inline void pte_clear(pte_t *ptep) +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID_EMPTY; } @@ -521,7 +522,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) return pte; } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -531,10 +532,10 @@ ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_young(ptep); + return ptep_test_and_clear_young(vma, address, ptep); } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -544,13 +545,13 @@ ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_dirty(ptep); + return ptep_test_and_clear_dirty(vma, address, ptep); } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; } @@ -573,19 +574,14 @@ ptep_clear_flush(struct vm_area_struct *vma, : "=m" (*ptep) : "m" (*ptep), "a" (ptep), "a" (address) ); #endif /* __s390x__ */ - pte_clear(ptep); + pte_val(*ptep) = _PAGE_INVALID_EMPTY; return pte; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -} - -static inline void ptep_mkdirty(pte_t *ptep) -{ - pte_mkdirty(*ptep); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline void @@ -802,7 +798,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h index b7d4561d8244..b0528aa3cb1f 100644 --- a/include/asm-sh/pgtable-2level.h +++ b/include/asm-sh/pgtable-2level.h @@ -41,6 +41,8 @@ static inline void pgd_clear (pgd_t * pgdp) { } * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 8853eb2485a5..e3592103f4d1 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -164,7 +164,7 @@ extern unsigned long empty_zero_page[1024]; #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -290,7 +290,7 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -extern pte_t ptep_get_and_clear(pte_t *ptep); +extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #endif #include <asm-generic/pgtable.h> diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h index 4b3f31ac62df..2e63fd5a64b9 100644 --- a/include/asm-sh64/pgtable.h +++ b/include/asm-sh64/pgtable.h @@ -136,6 +136,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) */ *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) { @@ -383,7 +384,7 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); */ #define _PTE_EMPTY 0x0 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) (set_pte(xp, __pte(_PTE_EMPTY))) +#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) #define pte_none(x) (pte_val(x) == _PTE_EMPTY) /* diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index e62df8cbd99d..f45b1801be98 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -157,7 +157,7 @@ extern __inline__ int pte_none(pte_t pte) } #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte) -#define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte) +#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte) BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) @@ -339,6 +339,7 @@ extern unsigned int pg_iobits; BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t) #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) struct seq_file; BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index e1b053882ae9..7ba8df8c4921 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -191,25 +191,17 @@ extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = __pte_alloc_one_kernel(mm, address); - if (pte) { - struct page *page = virt_to_page(pte); - page->mapping = (void *) mm; - page->index = address & PMD_MASK; - } - return pte; + return __pte_alloc_one_kernel(mm, address); } static inline struct page * pte_alloc_one(struct mm_struct *mm, unsigned long addr) { pte_t *pte = __pte_alloc_one_kernel(mm, addr); - if (pte) { - struct page *page = virt_to_page(pte); - page->mapping = (void *) mm; - page->index = addr & PMD_MASK; - return page; - } + + if (pte) + return virt_to_page(pte); + return NULL; } @@ -246,13 +238,11 @@ static __inline__ void free_pte_slow(pte_t *pte) static inline void pte_free_kernel(pte_t *pte) { - virt_to_page(pte)->mapping = NULL; free_pte_fast(pte); } static inline void pte_free(struct page *ptepage) { - ptepage->mapping = NULL; free_pte_fast(page_address(ptepage)); } diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 83adf8070928..f6bc1dbdb367 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -15,6 +15,7 @@ #include <asm-generic/pgtable-nopud.h> #include <linux/config.h> +#include <linux/compiler.h> #include <asm/spitfire.h> #include <asm/asi.h> #include <asm/system.h> @@ -333,18 +334,23 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define pte_unmap_nested(pte) do { } while (0) /* Actual page table PTE updates. */ -extern void tlb_batch_add(pte_t *ptep, pte_t orig); +extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { pte_t orig = *ptep; *ptep = pte; - if (pte_present(orig)) - tlb_batch_add(ptep, orig); + + /* It is more efficient to let flush_tlb_kernel_range() + * handle init_mm tlb flushes. + */ + if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) + tlb_batch_add(mm, addr, ptep, orig); } -#define pte_clear(ptep) set_pte((ptep), __pte(0UL)) +#define pte_clear(mm,addr,ptep) \ + set_pte_at((mm), (addr), (ptep), __pte(0UL)) extern pgd_t swapper_pg_dir[1]; diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h index f6263d11f205..e5353c095f80 100644 --- a/include/asm-um/pgtable-2level.h +++ b/include/asm-um/pgtable-2level.h @@ -59,6 +59,7 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) *pteptr = pte_mknewpage(pteval); if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h index acebb593cfd6..faf051c2c418 100644 --- a/include/asm-um/pgtable-3level.h +++ b/include/asm-um/pgtable-3level.h @@ -84,6 +84,7 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) *pteptr = pte_mknewpage(*pteptr); if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index e0a0a95639a7..71f9c0c78c0c 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -142,7 +142,7 @@ extern pte_t * __bad_pagetable(void); #define PAGE_PTR(address) \ ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) -#define pte_clear(xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) #define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE)) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 1e2cc99aebd8..262544f187ee 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -73,6 +73,7 @@ static inline void set_pte(pte_t *dst, pte_t val) { pte_val(*dst) = pte_val(val); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) static inline void set_pmd(pmd_t *dst, pmd_t val) { @@ -102,7 +103,7 @@ extern inline void pgd_clear (pgd_t * pgd) #define pud_page(pud) \ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) #define pte_same(a, b) ((a).pte == (b).pte) #define PMD_SIZE (1UL << PMD_SHIFT) @@ -224,7 +225,7 @@ static inline unsigned long pud_bad(pud_t pud) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ @@ -263,22 +264,24 @@ extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); } -static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); } -static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); } +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, ptep); +} /* * Macro to mark a page protection value as "uncacheable". @@ -419,7 +422,6 @@ extern int kern_addr_valid(unsigned long addr); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> |
