diff options
| author | David S. Miller <davem@nuts.davemloft.net> | 2005-02-22 23:42:56 -0800 |
|---|---|---|
| committer | David S. Miller <davem@nuts.davemloft.net> | 2005-02-22 23:42:56 -0800 |
| commit | ae3d0a847f4b38812241e4a5dc3371965c752a8c (patch) | |
| tree | 41ba29cfb3f0085b0f82a9387c6e492274678a37 | |
| parent | 75be0272cb9b49ee7cb753b008844b89c2269585 (diff) | |
[MM]: Add set_pte_at() which takes 'mm' and 'addr' args.
I'm taking a slightly different approach this time around so things
are easier to integrate. Here is the first patch which builds the
infrastructure. Basically:
1) Add set_pte_at() which is set_pte() with 'mm' and 'addr' arguments
added. All generic code uses set_pte_at().
Most platforms simply get this define:
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
I chose this method over simply changing all set_pte() call sites
because many platforms implement this in assembler and it would
take forever to preserve the build and stabilize things if modifying
that was necessary.
Soon, with platform maintainer's help, we can kill of set_pte() entirely.
To be honest, there are only a handful of set_pte() call sites in the
arch specific code.
Actually, in this patch ppc64 is completely set_pte() free and does not
define it.
2) pte_clear() gets 'mm' and 'addr' arguments now.
This had a cascading effect on many ptep_test_and_*() routines. Specifically:
a) ptep_test_and_clear_{young,dirty}() now take 'vma' and 'address' args.
b) ptep_get_and_clear now take 'mm' and 'address' args.
c) ptep_mkdirty was deleted, unused by any code.
d) ptep_set_wrprotect now takes 'mm' and 'address' args.
I've tested this patch as follows:
1) compile and run tested on sparc64/SMP
2) compile tested on:
a) ppc64/SMP
b) i386 both with and without PAE enabled
Signed-off-by: David S. Miller <davem@davemloft.net>
59 files changed, 253 insertions, 250 deletions
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 3c2bde8e357b..36c7961bc1bd 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c @@ -322,7 +322,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { struct vm_region *c; - unsigned long flags; + unsigned long flags, addr; pte_t *ptep; size = PAGE_ALIGN(size); @@ -341,11 +341,13 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr } ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + addr = c->vm_start; do { - pte_t pte = ptep_get_and_clear(ptep); + pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); unsigned long pfn; ptep++; + addr += PAGE_SIZE; if (!pte_none(pte) && pte_present(pte)) { pfn = pte_pfn(pte); diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index 221698610121..fc4c4cad4e98 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -66,7 +66,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte-idx); + pte_clear(&init_mm, vaddr, kmap_pte-idx); __flush_tlb_one(vaddr); #endif diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 74516dabf629..ceaeef55e505 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -216,7 +216,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, BUG_ON(end & (HPAGE_SIZE - 1)); for (address = start; address < end; address += HPAGE_SIZE) { - pte = ptep_get_and_clear(huge_pte_offset(mm, address)); + pte = ptep_get_and_clear(mm, address, huge_pte_offset(mm, address)); if (pte_none(pte)) continue; page = pte_page(pte); diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 13e0f7d7c291..1d46b16764bd 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -244,7 +244,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig continue; page = pte_page(*pte); put_page(page); - pte_clear(pte); + pte_clear(mm, address, pte); } mm->rss -= (end - start) >> PAGE_SHIFT; flush_tlb_range(vma, start, end); diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 14b020d39d0f..dd5e2e31885b 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -75,7 +75,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte-idx); + pte_clear(&init_mm, vaddr, kmap_pte-idx); local_flush_tlb_one(vaddr); #endif diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 07850b6e1922..cd39863c1012 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -180,7 +180,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, end = PMD_SIZE; do { pte_t page = *pte; - pte_clear(pte); + pte_clear(&init_mm, vaddr, pte); purge_tlb_start(); pdtlb_kernel(orig_vaddr); purge_tlb_end(); diff --git a/arch/parisc/mm/kmap.c b/arch/parisc/mm/kmap.c index 20468fec5491..1b1acd5e2f6e 100644 --- a/arch/parisc/mm/kmap.c +++ b/arch/parisc/mm/kmap.c @@ -49,10 +49,10 @@ typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg); * unmap_uncached_page() and save a little code space but I didn't * do that since I'm not certain whether this is the right path. -PB */ -static void unmap_cached_pte(pte_t * pte, unsigned long arg) +static void unmap_cached_pte(pte_t * pte, unsigned long addr, unsigned long arg) { pte_t page = *pte; - pte_clear(pte); + pte_clear(&init_mm, addr, pte); if (!pte_none(page)) { if (pte_present(page)) { unsigned long map_nr = pte_pagenr(page); diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index 59359d89d37d..1daa41d1cb0f 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c @@ -262,7 +262,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent); void __dma_free_coherent(size_t size, void *vaddr) { struct vm_region *c; - unsigned long flags; + unsigned long flags, addr; pte_t *ptep; size = PAGE_ALIGN(size); @@ -281,11 +281,13 @@ void __dma_free_coherent(size_t size, void *vaddr) } ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + addr = c->vm_start; do { - pte_t pte = ptep_get_and_clear(ptep); + pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); unsigned long pfn; ptep++; + addr += PAGE_SIZE; if (!pte_none(pte) && pte_present(pte)) { pfn = pte_pfn(pte); diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 3aa910f89369..e82414f21b09 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -149,7 +149,8 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) } static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, - struct page *page, pte_t *ptep, int write_access) + unsigned long addr, struct page *page, + pte_t *ptep, int write_access) { pte_t entry; @@ -163,7 +164,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); - set_pte(ptep, entry); + set_pte_at(mm, addr, ptep, entry); } /* @@ -316,7 +317,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ptepage = pte_page(entry); get_page(ptepage); dst->rss += (HPAGE_SIZE / PAGE_SIZE); - set_pte(dst_pte, entry); + set_pte_at(dst, addr, dst_pte, entry); addr += HPAGE_SIZE; } @@ -421,7 +422,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, pte = *ptep; page = pte_page(pte); - pte_clear(ptep); + pte_clear(mm, addr, ptep); put_page(page); } @@ -486,7 +487,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } } - set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); + set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE); } out: spin_unlock(&mm->page_table_lock); diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index 4cb05a070c01..dddc68d7c6eb 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c @@ -153,7 +153,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); pa = abs_to_phys(pa); - set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); + set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); spin_unlock(&ioremap_mm.page_table_lock); } else { unsigned long va, vpn, hash, hpteg; @@ -305,7 +305,7 @@ static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, do { pte_t page; - page = ptep_get_and_clear(pte); + page = ptep_get_and_clear(&ioremap_mm, address, pte); address += PAGE_SIZE; pte++; if (pte_none(page)) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 7240782d7823..8e723bc7f795 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -145,7 +145,7 @@ void __init paging_init(void) for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { pte = pfn_pte(pfn, PAGE_KERNEL); if (pfn >= max_low_pfn) - pte_clear(&pte); + pte_clear(&init_mm, 0, &pte); set_pte(pg_table, pte); pfn++; } @@ -229,7 +229,7 @@ void __init paging_init(void) for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { pte = pfn_pte(pfn, PAGE_KERNEL); if (pfn >= max_low_pfn) { - pte_clear(&pte); + pte_clear(&init_mm, 0, &pte); continue; } set_pte(pt_dir, pte); diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 4d5d687d1106..ac807fdc6d99 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -202,7 +202,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, page = pte_page(*pte); put_page(page); for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(pte); + pte_clear(mm, address+(i*PAGE_SIZE), pte); pte++; } } diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index f6a76230872f..e5907c7330e5 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -56,7 +56,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); - pte_clear(pte); + pte_clear(&init_mm, p3_addr, pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } } @@ -95,7 +95,7 @@ void copy_user_page(void *to, void *from, unsigned long address, local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); - pte_clear(pte); + pte_clear(&init_mm, p3_addr, pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } } @@ -103,11 +103,11 @@ void copy_user_page(void *to, void *from, unsigned long address, /* * For SH-4, we have our own implementation for ptep_get_and_clear */ -inline pte_t ptep_get_and_clear(pte_t *ptep) +inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); if (!pte_not_present(pte)) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) { diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index 4a6eea8fb1e1..ff9ece986cbc 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c @@ -117,11 +117,11 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg * For SH7705, we have our own implementation for ptep_get_and_clear * Copied from pg-sh4.c */ -inline pte_t ptep_get_and_clear(pte_t *ptep) +inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); if (!pte_not_present(pte)) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) { diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c index 50b25735fb93..af566d7eee4b 100644 --- a/arch/sh64/mm/hugetlbpage.c +++ b/arch/sh64/mm/hugetlbpage.c @@ -202,7 +202,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, page = pte_page(*pte); put_page(page); for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(pte); + pte_clear(mm, address+(i*PAGE_SIZE), pte); pte++; } } diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c index 8e0549025161..89889e5f6671 100644 --- a/arch/sh64/mm/ioremap.c +++ b/arch/sh64/mm/ioremap.c @@ -400,7 +400,7 @@ static void shmedia_unmapioaddr(unsigned long vaddr) return; clear_page((void *)ptep); - pte_clear(ptep); + pte_clear(&init_mm, vaddr, ptep); } unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index f4c6851b5c1a..70d2acb3d1bc 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c @@ -47,7 +47,7 @@ static inline void forget_pte(pte_t page) * They use a pgprot that sets PAGE_IO and does not check the * mem_map table as this is independent of normal memory. */ -static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, +static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size, unsigned long offset, pgprot_t prot, int space) { unsigned long end; @@ -58,7 +58,7 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign end = PMD_SIZE; do { pte_t oldpage = *pte; - pte_clear(pte); + pte_clear(mm, address, pte); set_pte(pte, mk_pte_io(offset, prot, space)); forget_pte(oldpage); address += PAGE_SIZE; @@ -67,7 +67,7 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign } while (address < end); } -static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size, +static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, unsigned long offset, pgprot_t prot, int space) { unsigned long end; @@ -78,10 +78,10 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne end = PGDIR_SIZE; offset -= address; do { - pte_t * pte = pte_alloc_map(current->mm, pmd, address); + pte_t * pte = pte_alloc_map(mm, pmd, address); if (!pte) return -ENOMEM; - io_remap_pte_range(pte, address, end - address, address + offset, prot, space); + io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); @@ -107,7 +107,7 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned error = -ENOMEM; if (!pmd) break; - error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space); + error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); if (error) break; from = (from + PGDIR_SIZE) & PGDIR_MASK; diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index c85d4923dc33..4d8ed9c65182 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c @@ -88,7 +88,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte-idx); + pte_clear(&init_mm, vaddr, kmap_pte-idx); /* XXX Fix - Anton */ #if 0 __flush_tlb_one(vaddr); diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 5954cd7791fb..c29692817022 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -207,7 +207,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, page = pte_page(*pte); put_page(page); for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(pte); + pte_clear(mm, address+(i*PAGE_SIZE), pte); pte++; } } diff --git a/fs/exec.c b/fs/exec.c index ee58e91a9d7f..1a065b2f8444 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -328,7 +328,7 @@ void install_arg_page(struct vm_area_struct *vma, } mm->rss++; lru_cache_add_active(page); - set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte( + set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( page, vma->vm_page_prot)))); page_add_anon_rmap(page, vma, address); pte_unmap(pte); diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index a22e394a3d52..d27970940f03 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -22,6 +22,7 @@ * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* PMD_SHIFT determines the size of the area a second-level page table can map */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) @@ -235,7 +236,10 @@ extern inline unsigned long pgd_page(pgd_t pgd) extern inline int pte_none(pte_t pte) { return !pte_val(pte); } extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } -extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; } +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 10d747ba0ce0..d5ed84d427dd 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -254,7 +254,7 @@ extern struct page *empty_zero_page; #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define pte_none(pte) (!pte_val(pte)) -#define pte_clear(ptep) set_pte((ptep), __pte(0)) +#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) #define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) @@ -263,6 +263,7 @@ extern struct page *empty_zero_page; #define pte_unmap_nested(pte) do { } while (0) #define set_pte(ptep, pte) cpu_set_pte(ptep,pte) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* * The following only work if pte_present() is true. diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h index fdebe1533405..95d16abeb189 100644 --- a/include/asm-arm26/pgtable.h +++ b/include/asm-arm26/pgtable.h @@ -154,7 +154,8 @@ extern struct page *empty_zero_page; #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define set_pte(pte_ptr, pte) ((*(pte_ptr)) = (pte)) -#define pte_clear(ptep) set_pte((ptep), __pte(0)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) /* macros to ease the getting of pointers to stuff... */ #define pgd_offset(mm, addr) ((pgd_t *)(mm)->pgd + __pgd_index(addr)) diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h index fc052d4a5032..957dd92d108d 100644 --- a/include/asm-cris/pgtable.h +++ b/include/asm-cris/pgtable.h @@ -34,6 +34,8 @@ extern void paging_init(void); * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -101,7 +103,7 @@ extern unsigned long empty_zero_page; #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) #define pmd_none(x) (!pmd_val(x)) /* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index eb56c87217d8..9d44b7e4a210 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h @@ -173,6 +173,7 @@ do { \ *(pteptr) = (pteval); \ asm volatile("dcf %M0" :: "U"(*pteptr)); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval)) @@ -353,7 +354,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) #undef TEST_VERIFY_AREA #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -390,39 +391,33 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pt static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); return i; } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); return i; } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long x = xchg(&ptep->pte, 0); asm volatile("dcf %M0" :: "U"(*ptep)); return __pte(x); } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { set_bit(_PAGE_BIT_WP, ptep); asm volatile("dcf %M0" :: "U"(*ptep)); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - set_bit(_PAGE_BIT_DIRTY, ptep); - asm volatile("dcf %M0" :: "U"(*ptep)); -} - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -512,7 +507,6 @@ static inline int pte_file(pte_t pte) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index cf791b073e76..51aa714405a2 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -16,7 +16,7 @@ #ifndef __HAVE_ARCH_SET_PTE_ATOMIC #define ptep_establish(__vma, __address, __ptep, __entry) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) #else /* __HAVE_ARCH_SET_PTE_ATOMIC */ @@ -37,26 +37,30 @@ do { \ */ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_young(pte)) - return 0; - set_pte(ptep, pte_mkold(pte)); - return 1; -} +#define ptep_test_and_clear_young(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + int r = 1; \ + if (!pte_young(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), \ + (__ptep), pte_mkold(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ - int __young = ptep_test_and_clear_young(__ptep); \ + int __young; \ + __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ if (__young) \ flush_tlb_page(__vma, __address); \ __young; \ @@ -64,20 +68,24 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -static inline int ptep_test_and_clear_dirty(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_dirty(pte)) - return 0; - set_pte(ptep, pte_mkclean(pte)); - return 1; -} +#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *ptep; \ + int r = 1; \ + if (!pte_dirty(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ + pte_mkclean(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ ({ \ - int __dirty = ptep_test_and_clear_dirty(__ptep); \ + int __dirty; \ + __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ if (__dirty) \ flush_tlb_page(__vma, __address); \ __dirty; \ @@ -85,36 +93,29 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(pte_t *ptep) -{ - pte_t pte = *ptep; - pte_clear(ptep); - return pte; -} +#define ptep_get_and_clear(__mm, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + pte_clear((__mm), (__address), (__ptep)); \ + __pte; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH #define ptep_clear_flush(__vma, __address, __ptep) \ ({ \ - pte_t __pte = ptep_get_and_clear(__ptep); \ + pte_t __pte; \ + __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ flush_tlb_page(__vma, __address); \ __pte; \ }) #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(pte_t *ptep) -{ - pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -} -#endif - -#ifndef __HAVE_ARCH_PTEP_MKDIRTY -static inline void ptep_mkdirty(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 58026f6c987a..fa07bd6c7529 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -14,10 +14,11 @@ * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 0b8a0da75678..d609f9c2c1f0 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -56,6 +56,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) smp_wmb(); ptep->pte_low = pte.pte_low; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + #define __HAVE_ARCH_SET_PTE_ATOMIC #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) @@ -88,7 +90,7 @@ static inline void pud_clear (pud_t * pud) { } #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t res; diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index f34be5140a23..da4f252b7b84 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -201,7 +201,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -243,22 +243,24 @@ static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return p # include <asm/pgtable-2level.h> #endif -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); } -static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } -static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, &ptep->pte_low); +} /* * Macro to mark a page protection value as "uncacheable". On processors which do not support @@ -407,7 +409,6 @@ extern void noexec_setup(const char *str); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 594f73f8b574..b74428d554b2 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -202,6 +202,7 @@ ia64_phys_addr_valid (unsigned long addr) * the PTE in a page table. Nothing special needs to be on IA-64. */ #define set_pte(ptep, pteval) (*(ptep) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define RGN_SIZE (1UL << 61) #define RGN_KERNEL 7 @@ -243,7 +244,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) -#define pte_clear(pte) (pte_val(*(pte)) = 0UL) +#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) /* pte_page() returns the "struct page *" corresponding to the PTE: */ #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) @@ -345,7 +346,7 @@ pgd_offset (struct mm_struct *mm, unsigned long address) /* atomic versions of the some PTE manipulations: */ static inline int -ptep_test_and_clear_young (pte_t *ptep) +ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -355,13 +356,13 @@ ptep_test_and_clear_young (pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } static inline int -ptep_test_and_clear_dirty (pte_t *ptep) +ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -371,25 +372,25 @@ ptep_test_and_clear_dirty (pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } static inline pte_t -ptep_get_and_clear (pte_t *ptep) +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0)); #else pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; #endif } static inline void -ptep_set_wrprotect (pte_t *ptep) +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -400,18 +401,7 @@ ptep_set_wrprotect (pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void -ptep_mkdirty (pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(_PAGE_D_BIT, ptep); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -558,7 +548,6 @@ do { \ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PGD_OFFSET_GATE #include <asm-generic/pgtable.h> diff --git a/include/asm-m32r/pgtable-2level.h b/include/asm-m32r/pgtable-2level.h index 8af66284b436..861727c20e8f 100644 --- a/include/asm-m32r/pgtable-2level.h +++ b/include/asm-m32r/pgtable-2level.h @@ -44,6 +44,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; } * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) /* * (pmds are folded into pgds so this doesnt get actually called, @@ -60,7 +61,7 @@ static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) return (pmd_t *) dir; } -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) #define pte_same(a, b) (pte_val(a) == pte_val(b)) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!pte_val(x)) diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h index d2b67741e381..1710d91d4e56 100644 --- a/include/asm-m32r/pgtable.h +++ b/include/asm-m32r/pgtable.h @@ -176,7 +176,7 @@ extern unsigned long empty_zero_page[1024]; /* page table for 0-4MB for everybody */ #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -282,26 +282,21 @@ static inline pte_t pte_mkwrite(pte_t pte) return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_WRITE, ptep); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - set_bit(_PAGE_BIT_DIRTY, ptep); -} - /* * Macro and implementation to make a page protection as uncachable. */ @@ -390,7 +385,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h index f9391dfbb710..1628723458f5 100644 --- a/include/asm-m68k/motorola_pgtable.h +++ b/include/asm-m68k/motorola_pgtable.h @@ -129,7 +129,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; }) +#define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) #define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT)) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h index 05b3d68cb261..186585d45d8c 100644 --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h @@ -26,6 +26,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* PMD_SHIFT determines the size of the area a second-level page table can map */ diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h index e5485e830bd7..e974bb072047 100644 --- a/include/asm-m68k/sun3_pgtable.h +++ b/include/asm-m68k/sun3_pgtable.h @@ -123,7 +123,10 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) static inline int pte_none (pte_t pte) { return !pte_val (pte); } static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; } -static inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; } +static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_val (*ptep) = 0; +} #define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK) #define pfn_pte(pfn, pgprot) \ diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 0461ee1d3349..61983c3fc9c3 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -100,14 +100,15 @@ static inline void set_pte(pte_t *ptep, pte_t pte) buddy->pte_low |= _PAGE_GLOBAL; } } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void pte_clear(pte_t *ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); else - set_pte(ptep, __pte(0)); + set_pte_at(mm, addr, ptep, __pte(0)); } #else /* @@ -130,16 +131,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void pte_clear(pte_t *ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); else #endif - set_pte(ptep, __pte(0)); + set_pte_at(mm, addr, ptep, __pte(0)); } #endif diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index ccfc070fd055..149c17aea2fe 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h @@ -39,6 +39,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #endif /* !__ASSEMBLY__ */ @@ -263,7 +264,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) @@ -431,7 +432,7 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -441,12 +442,12 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -456,14 +457,14 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } extern spinlock_t pa_dbit_lock; -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; pte_t pte; @@ -472,13 +473,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) pte = old_pte = *ptep; pte_val(pte) &= ~_PAGE_PRESENT; pte_val(pte) |= _PAGE_FLUSH; - set_pte(ptep,pte); + set_pte_at(mm,addr,ptep,pte); spin_unlock(&pa_dbit_lock); return old_pte; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -489,17 +490,7 @@ static inline void ptep_set_wrprotect(pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void ptep_mkdirty(pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -518,7 +509,6 @@ static inline void ptep_mkdirty(pte_t *ptep) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h index 7cda32eb1237..614b5e8d1b47 100644 --- a/include/asm-ppc/highmem.h +++ b/include/asm-ppc/highmem.h @@ -114,7 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte+idx); + pte_clear(&init_mm, vaddr, kmap_pte+idx); flush_tlb_page(NULL, vaddr); #endif dec_preempt_count(); diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 642c86b74121..b78974041062 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -448,7 +448,7 @@ extern unsigned long empty_zero_page[1024]; #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) -#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0) +#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -550,6 +550,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) *ptep = pte; #endif } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) extern void flush_hash_one_pte(pte_t *ptep); @@ -557,7 +558,7 @@ extern void flush_hash_one_pte(pte_t *ptep); * 2.6 calles this without flushing the TLB entry, this is wrong * for our hash-based implementation, we fix that up here */ -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); @@ -568,26 +569,21 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) return (old & _PAGE_ACCESSED) != 0; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); } -static inline void ptep_mkdirty(pte_t *ptep) -{ - pte_update(ptep, 0, _PAGE_DIRTY); -} - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) { @@ -747,7 +743,6 @@ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 0f6990ad957d..5b78582fd7da 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -317,7 +317,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) */ extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot); -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -336,7 +336,7 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) * moment we always flush but we need to fix hpte_update and test if the * optimisation is worth it. */ -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -348,7 +348,7 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) return (old & _PAGE_DIRTY) != 0; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; @@ -370,19 +370,21 @@ static inline void ptep_set_wrprotect(pte_t *ptep) #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ - int __young = ptep_test_and_clear_young(__ptep); \ + int __young; \ + __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ __young; \ }) #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ ({ \ - int __dirty = ptep_test_and_clear_dirty(__ptep); \ + int __dirty; \ + __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ flush_tlb_page(__vma, __address); \ __dirty; \ }) -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old = pte_update(ptep, ~0UL); @@ -391,7 +393,7 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) return __pte(old); } -static inline void pte_clear(pte_t * ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { unsigned long old = pte_update(ptep, ~0UL); @@ -402,10 +404,11 @@ static inline void pte_clear(pte_t * ptep) /* * set_pte stores a linux PTE into the linux page table. */ -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { if (pte_present(*ptep)) { - pte_clear(ptep); + pte_clear(mm, addr, ptep); flush_tlb_pending(); } *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 90308137c8e6..91effd7c5c9f 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -130,8 +130,10 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pte != NULL) { - for (i=0; i < PTRS_PER_PTE; i++) - pte_clear(pte+i); + for (i=0; i < PTRS_PER_PTE; i++) { + pte_clear(mm, vmaddr, pte+i); + vmaddr += PAGE_SIZE; + } } return pte; } diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 85f043c6d5cf..7a81a7c5d4ba 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -322,6 +322,7 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval) { *pteptr = pteval; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* * pgd/pmd/pte query functions @@ -457,7 +458,7 @@ extern inline void pmd_clear(pmd_t * pmdp) #endif /* __s390x__ */ -extern inline void pte_clear(pte_t *ptep) +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID_EMPTY; } @@ -521,7 +522,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) return pte; } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -531,10 +532,10 @@ ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_young(ptep); + return ptep_test_and_clear_young(vma, address, ptep); } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -544,13 +545,13 @@ ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_dirty(ptep); + return ptep_test_and_clear_dirty(vma, address, ptep); } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; } @@ -573,19 +574,14 @@ ptep_clear_flush(struct vm_area_struct *vma, : "=m" (*ptep) : "m" (*ptep), "a" (ptep), "a" (address) ); #endif /* __s390x__ */ - pte_clear(ptep); + pte_clear(vma->vm_mm, address, ptep); return pte; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -} - -static inline void ptep_mkdirty(pte_t *ptep) -{ - pte_mkdirty(*ptep); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline void @@ -802,7 +798,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h index b7d4561d8244..b0528aa3cb1f 100644 --- a/include/asm-sh/pgtable-2level.h +++ b/include/asm-sh/pgtable-2level.h @@ -41,6 +41,8 @@ static inline void pgd_clear (pgd_t * pgdp) { } * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 8853eb2485a5..e3592103f4d1 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -164,7 +164,7 @@ extern unsigned long empty_zero_page[1024]; #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) @@ -290,7 +290,7 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -extern pte_t ptep_get_and_clear(pte_t *ptep); +extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #endif #include <asm-generic/pgtable.h> diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h index 4b3f31ac62df..2e63fd5a64b9 100644 --- a/include/asm-sh64/pgtable.h +++ b/include/asm-sh64/pgtable.h @@ -136,6 +136,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) */ *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) { @@ -383,7 +384,7 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); */ #define _PTE_EMPTY 0x0 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) (set_pte(xp, __pte(_PTE_EMPTY))) +#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) #define pte_none(x) (pte_val(x) == _PTE_EMPTY) /* diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index e62df8cbd99d..f45b1801be98 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -157,7 +157,7 @@ extern __inline__ int pte_none(pte_t pte) } #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte) -#define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte) +#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte) BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) @@ -339,6 +339,7 @@ extern unsigned int pg_iobits; BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t) #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) struct seq_file; BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 83adf8070928..bfd5c93845a2 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -343,8 +343,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) if (pte_present(orig)) tlb_batch_add(ptep, orig); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -#define pte_clear(ptep) set_pte((ptep), __pte(0UL)) +#define pte_clear(mm,addr,ptep) \ + set_pte_at((mm), (addr), (ptep), __pte(0UL)) extern pgd_t swapper_pg_dir[1]; diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h index f6263d11f205..e5353c095f80 100644 --- a/include/asm-um/pgtable-2level.h +++ b/include/asm-um/pgtable-2level.h @@ -59,6 +59,7 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) *pteptr = pte_mknewpage(pteval); if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h index acebb593cfd6..faf051c2c418 100644 --- a/include/asm-um/pgtable-3level.h +++ b/include/asm-um/pgtable-3level.h @@ -84,6 +84,7 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) *pteptr = pte_mknewpage(*pteptr); if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index e0a0a95639a7..71f9c0c78c0c 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -142,7 +142,7 @@ extern pte_t * __bad_pagetable(void); #define PAGE_PTR(address) \ ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) -#define pte_clear(xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) #define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE)) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 1e2cc99aebd8..262544f187ee 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -73,6 +73,7 @@ static inline void set_pte(pte_t *dst, pte_t val) { pte_val(*dst) = pte_val(val); } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) static inline void set_pmd(pmd_t *dst, pmd_t val) { @@ -102,7 +103,7 @@ extern inline void pgd_clear (pgd_t * pgd) #define pud_page(pud) \ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) -#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) +#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) #define pte_same(a, b) ((a).pte == (b).pte) #define PMD_SIZE (1UL << PMD_SHIFT) @@ -224,7 +225,7 @@ static inline unsigned long pud_bad(pud_t pud) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ @@ -263,22 +264,24 @@ extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); } -static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); } -static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); } +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, ptep); +} /* * Macro to mark a page protection value as "uncacheable". @@ -419,7 +422,6 @@ extern int kern_addr_valid(unsigned long addr); #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include <asm-generic/pgtable.h> diff --git a/mm/fremap.c b/mm/fremap.c index bab48f238623..17d8c5b5d210 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -45,7 +45,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, } else { if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); - pte_clear(ptep); + pte_clear(mm, addr, ptep); } } @@ -94,7 +94,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, mm->rss++; flush_icache_page(vma, page); - set_pte(pte, mk_pte(page, prot)); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); page_add_file_rmap(page); pte_val = *pte; pte_unmap(pte); @@ -139,7 +139,7 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, zap_pte(mm, vma, addr, pte); - set_pte(pte, pgoff_to_pte(pgoff)); + set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); pte_val = *pte; pte_unmap(pte); update_mmu_cache(vma, addr, pte_val); diff --git a/mm/highmem.c b/mm/highmem.c index c37d66cb1af1..d5646a39859a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -90,7 +90,8 @@ static void flush_all_zero_pkmaps(void) * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); - pte_clear(&pkmap_page_table[i]); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); set_page_address(page, NULL); } @@ -138,7 +139,8 @@ start: } } vaddr = PKMAP_ADDR(last_pkmap_nr); - set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); diff --git a/mm/memory.c b/mm/memory.c index 48cbd6b7b98b..a4def64ea8cd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -278,7 +278,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, /* pte contains position in swap, so copy. */ if (!pte_present(pte)) { copy_swap_pte(dst_mm, src_mm, pte); - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); return; } pfn = pte_pfn(pte); @@ -292,7 +292,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, page = pfn_to_page(pfn); if (!page || PageReserved(page)) { - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); return; } @@ -301,7 +301,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * in the parent and the child */ if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) { - ptep_set_wrprotect(src_pte); + ptep_set_wrprotect(src_mm, addr, src_pte); pte = *src_pte; } @@ -316,7 +316,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, dst_mm->rss++; if (PageAnon(page)) dst_mm->anon_rss++; - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); page_dup_rmap(page); } @@ -502,14 +502,15 @@ static void zap_pte_range(struct mmu_gather *tlb, page->index > details->last_index)) continue; } - pte = ptep_get_and_clear(ptep); + pte = ptep_get_and_clear(tlb->mm, address+offset, ptep); tlb_remove_tlb_entry(tlb, ptep, address+offset); if (unlikely(!page)) continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, address+offset) != page->index) - set_pte(ptep, pgoff_to_pte(page->index)); + set_pte_at(tlb->mm, address+offset, + ptep, pgoff_to_pte(page->index)); if (pte_dirty(pte)) set_page_dirty(page); if (PageAnon(page)) @@ -529,7 +530,7 @@ static void zap_pte_range(struct mmu_gather *tlb, continue; if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); - pte_clear(ptep); + pte_clear(tlb->mm, address+offset, ptep); } pte_unmap(ptep-1); } @@ -987,8 +988,9 @@ out: EXPORT_SYMBOL(get_user_pages); -static void zeromap_pte_range(pte_t * pte, unsigned long address, - unsigned long size, pgprot_t prot) +static void zeromap_pte_range(struct mm_struct *mm, pte_t * pte, + unsigned long address, + unsigned long size, pgprot_t prot) { unsigned long end; @@ -999,7 +1001,7 @@ static void zeromap_pte_range(pte_t * pte, unsigned long address, do { pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot)); BUG_ON(!pte_none(*pte)); - set_pte(pte, zero_pte); + set_pte_at(mm, address, pte, zero_pte); address += PAGE_SIZE; pte++; } while (address && (address < end)); @@ -1019,7 +1021,7 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, pte_t * pte = pte_alloc_map(mm, pmd, base + address); if (!pte) return -ENOMEM; - zeromap_pte_range(pte, base + address, end - address, prot); + zeromap_pte_range(mm, pte, base + address, end - address, prot); pte_unmap(pte); address = (address + PMD_SIZE) & PMD_MASK; pmd++; @@ -1100,7 +1102,8 @@ int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, * in null mappings (currently treated as "copy-on-access") */ static inline void -remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, +remap_pte_range(struct mm_struct *mm, pte_t * pte, + unsigned long address, unsigned long size, unsigned long pfn, pgprot_t prot) { unsigned long end; @@ -1112,7 +1115,7 @@ remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, do { BUG_ON(!pte_none(*pte)); if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn))) - set_pte(pte, pfn_pte(pfn, prot)); + set_pte_at(mm, address, pte, pfn_pte(pfn, prot)); address += PAGE_SIZE; pfn++; pte++; @@ -1135,7 +1138,7 @@ remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, pte_t * pte = pte_alloc_map(mm, pmd, base + address); if (!pte) return -ENOMEM; - remap_pte_range(pte, base + address, end - address, + remap_pte_range(mm, pte, base + address, end - address, (address >> PAGE_SHIFT) + pfn, prot); pte_unmap(pte); address = (address + PMD_SIZE) & PMD_MASK; @@ -1758,7 +1761,7 @@ static int do_swap_page(struct mm_struct * mm, unlock_page(page); flush_icache_page(vma, page); - set_pte(page_table, pte); + set_pte_at(mm, address, page_table, pte); page_add_anon_rmap(page, vma, address); if (write_access) { @@ -1824,7 +1827,7 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, page_add_anon_rmap(page, vma, addr); } - set_pte(page_table, entry); + set_pte_at(mm, addr, page_table, entry); pte_unmap(page_table); /* No need to invalidate - it was non-present before */ @@ -1939,7 +1942,7 @@ retry: entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); - set_pte(page_table, entry); + set_pte_at(mm, address, page_table, entry); if (anon) { lru_cache_add_active(new_page); page_add_anon_rmap(new_page, vma, address); @@ -1983,7 +1986,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma, */ if (!vma->vm_ops || !vma->vm_ops->populate || (write_access && !(vma->vm_flags & VM_SHARED))) { - pte_clear(pte); + pte_clear(mm, address, pte); return do_no_page(mm, vma, address, write_access, pte, pmd); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 0427711ec10b..00a27529935f 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -26,7 +26,7 @@ #include <asm/tlbflush.h> static inline void -change_pte_range(pmd_t *pmd, unsigned long address, +change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size, pgprot_t newprot) { pte_t * pte; @@ -52,8 +52,8 @@ change_pte_range(pmd_t *pmd, unsigned long address, * bits by wiping the pte and then setting the new pte * into place. */ - entry = ptep_get_and_clear(pte); - set_pte(pte, pte_modify(entry, newprot)); + entry = ptep_get_and_clear(mm, address, pte); + set_pte_at(mm, address, pte, pte_modify(entry, newprot)); } address += PAGE_SIZE; pte++; @@ -62,8 +62,8 @@ change_pte_range(pmd_t *pmd, unsigned long address, } static inline void -change_pmd_range(pud_t *pud, unsigned long address, - unsigned long size, pgprot_t newprot) +change_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long address, + unsigned long size, pgprot_t newprot) { pmd_t * pmd; unsigned long end; @@ -81,15 +81,15 @@ change_pmd_range(pud_t *pud, unsigned long address, if (end > PUD_SIZE) end = PUD_SIZE; do { - change_pte_range(pmd, address, end - address, newprot); + change_pte_range(mm, pmd, address, end - address, newprot); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); } static inline void -change_pud_range(pgd_t *pgd, unsigned long address, - unsigned long size, pgprot_t newprot) +change_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long address, + unsigned long size, pgprot_t newprot) { pud_t * pud; unsigned long end; @@ -107,7 +107,7 @@ change_pud_range(pgd_t *pgd, unsigned long address, if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { - change_pmd_range(pud, address, end - address, newprot); + change_pmd_range(mm, pud, address, end - address, newprot); address = (address + PUD_SIZE) & PUD_MASK; pud++; } while (address && (address < end)); @@ -130,7 +130,7 @@ change_protection(struct vm_area_struct *vma, unsigned long start, next = (start + PGDIR_SIZE) & PGDIR_MASK; if (next <= start || next > end) next = end; - change_pud_range(pgd, start, next - start, newprot); + change_pud_range(mm, pgd, start, next - start, newprot); start = next; pgd++; } diff --git a/mm/mremap.c b/mm/mremap.c index ebdf621984ac..708564b6d26d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -150,7 +150,7 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, if (dst) { pte_t pte; pte = ptep_clear_flush(vma, old_addr, src); - set_pte(dst, pte); + set_pte_at(mm, new_addr, dst, pte); } else error = -ENOMEM; pte_unmap_nested(src); diff --git a/mm/rmap.c b/mm/rmap.c index 4ff8183fa18e..83decd72c41a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -594,7 +594,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } - set_pte(pte, swp_entry_to_pte(entry)); + set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); mm->anon_rss--; } @@ -697,7 +697,7 @@ static void try_to_unmap_cluster(unsigned long cursor, /* If nonlinear, store the file page offset in the pte. */ if (page->index != linear_page_index(vma, address)) - set_pte(pte, pgoff_to_pte(page->index)); + set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) diff --git a/mm/swapfile.c b/mm/swapfile.c index 547ecd9c060d..bc711dd83e0a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -434,7 +434,8 @@ unuse_pte(struct vm_area_struct *vma, unsigned long address, pte_t *dir, { vma->vm_mm->rss++; get_page(page); - set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot))); + set_pte_at(vma->vm_mm, address, dir, + pte_mkold(mk_pte(page, vma->vm_page_prot))); page_add_anon_rmap(page, vma, address); swap_free(entry); acct_update_integrals(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e6516c208d62..c88295d99263 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -45,7 +45,7 @@ static void unmap_area_pte(pmd_t *pmd, unsigned long address, do { pte_t page; - page = ptep_get_and_clear(pte); + page = ptep_get_and_clear(&init_mm, address, pte); address += PAGE_SIZE; pte++; if (pte_none(page)) @@ -127,7 +127,7 @@ static int map_area_pte(pte_t *pte, unsigned long address, if (!page) return -ENOMEM; - set_pte(pte, mk_pte(page, prot)); + set_pte_at(&init_mm, address, pte, mk_pte(page, prot)); address += PAGE_SIZE; pte++; (*pages)++; |
