diff options
| author | David S. Miller <davem@nuts.davemloft.net> | 2005-02-22 23:42:56 -0800 |
|---|---|---|
| committer | David S. Miller <davem@nuts.davemloft.net> | 2005-02-22 23:42:56 -0800 |
| commit | ae3d0a847f4b38812241e4a5dc3371965c752a8c (patch) | |
| tree | 41ba29cfb3f0085b0f82a9387c6e492274678a37 /mm/memory.c | |
| parent | 75be0272cb9b49ee7cb753b008844b89c2269585 (diff) | |
[MM]: Add set_pte_at() which takes 'mm' and 'addr' args.
I'm taking a slightly different approach this time around so things
are easier to integrate. Here is the first patch which builds the
infrastructure. Basically:
1) Add set_pte_at() which is set_pte() with 'mm' and 'addr' arguments
added. All generic code uses set_pte_at().
Most platforms simply get this define:
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
I chose this method over simply changing all set_pte() call sites
because many platforms implement this in assembler and it would
take forever to preserve the build and stabilize things if modifying
that was necessary.
Soon, with platform maintainer's help, we can kill of set_pte() entirely.
To be honest, there are only a handful of set_pte() call sites in the
arch specific code.
Actually, in this patch ppc64 is completely set_pte() free and does not
define it.
2) pte_clear() gets 'mm' and 'addr' arguments now.
This had a cascading effect on many ptep_test_and_*() routines. Specifically:
a) ptep_test_and_clear_{young,dirty}() now take 'vma' and 'address' args.
b) ptep_get_and_clear now take 'mm' and 'address' args.
c) ptep_mkdirty was deleted, unused by any code.
d) ptep_set_wrprotect now takes 'mm' and 'address' args.
I've tested this patch as follows:
1) compile and run tested on sparc64/SMP
2) compile tested on:
a) ppc64/SMP
b) i386 both with and without PAE enabled
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c index 48cbd6b7b98b..a4def64ea8cd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -278,7 +278,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, /* pte contains position in swap, so copy. */ if (!pte_present(pte)) { copy_swap_pte(dst_mm, src_mm, pte); - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); return; } pfn = pte_pfn(pte); @@ -292,7 +292,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, page = pfn_to_page(pfn); if (!page || PageReserved(page)) { - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); return; } @@ -301,7 +301,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * in the parent and the child */ if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) { - ptep_set_wrprotect(src_pte); + ptep_set_wrprotect(src_mm, addr, src_pte); pte = *src_pte; } @@ -316,7 +316,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, dst_mm->rss++; if (PageAnon(page)) dst_mm->anon_rss++; - set_pte(dst_pte, pte); + set_pte_at(dst_mm, addr, dst_pte, pte); page_dup_rmap(page); } @@ -502,14 +502,15 @@ static void zap_pte_range(struct mmu_gather *tlb, page->index > details->last_index)) continue; } - pte = ptep_get_and_clear(ptep); + pte = ptep_get_and_clear(tlb->mm, address+offset, ptep); tlb_remove_tlb_entry(tlb, ptep, address+offset); if (unlikely(!page)) continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, address+offset) != page->index) - set_pte(ptep, pgoff_to_pte(page->index)); + set_pte_at(tlb->mm, address+offset, + ptep, pgoff_to_pte(page->index)); if (pte_dirty(pte)) set_page_dirty(page); if (PageAnon(page)) @@ -529,7 +530,7 @@ static void zap_pte_range(struct mmu_gather *tlb, continue; if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); - pte_clear(ptep); + pte_clear(tlb->mm, address+offset, ptep); } pte_unmap(ptep-1); } @@ -987,8 +988,9 @@ out: EXPORT_SYMBOL(get_user_pages); -static void zeromap_pte_range(pte_t * pte, unsigned long address, - unsigned long size, pgprot_t prot) +static void zeromap_pte_range(struct mm_struct *mm, pte_t * pte, + unsigned long address, + unsigned long size, pgprot_t prot) { unsigned long end; @@ -999,7 +1001,7 @@ static void zeromap_pte_range(pte_t * pte, unsigned long address, do { pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot)); BUG_ON(!pte_none(*pte)); - set_pte(pte, zero_pte); + set_pte_at(mm, address, pte, zero_pte); address += PAGE_SIZE; pte++; } while (address && (address < end)); @@ -1019,7 +1021,7 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, pte_t * pte = pte_alloc_map(mm, pmd, base + address); if (!pte) return -ENOMEM; - zeromap_pte_range(pte, base + address, end - address, prot); + zeromap_pte_range(mm, pte, base + address, end - address, prot); pte_unmap(pte); address = (address + PMD_SIZE) & PMD_MASK; pmd++; @@ -1100,7 +1102,8 @@ int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, * in null mappings (currently treated as "copy-on-access") */ static inline void -remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, +remap_pte_range(struct mm_struct *mm, pte_t * pte, + unsigned long address, unsigned long size, unsigned long pfn, pgprot_t prot) { unsigned long end; @@ -1112,7 +1115,7 @@ remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, do { BUG_ON(!pte_none(*pte)); if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn))) - set_pte(pte, pfn_pte(pfn, prot)); + set_pte_at(mm, address, pte, pfn_pte(pfn, prot)); address += PAGE_SIZE; pfn++; pte++; @@ -1135,7 +1138,7 @@ remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, pte_t * pte = pte_alloc_map(mm, pmd, base + address); if (!pte) return -ENOMEM; - remap_pte_range(pte, base + address, end - address, + remap_pte_range(mm, pte, base + address, end - address, (address >> PAGE_SHIFT) + pfn, prot); pte_unmap(pte); address = (address + PMD_SIZE) & PMD_MASK; @@ -1758,7 +1761,7 @@ static int do_swap_page(struct mm_struct * mm, unlock_page(page); flush_icache_page(vma, page); - set_pte(page_table, pte); + set_pte_at(mm, address, page_table, pte); page_add_anon_rmap(page, vma, address); if (write_access) { @@ -1824,7 +1827,7 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, page_add_anon_rmap(page, vma, addr); } - set_pte(page_table, entry); + set_pte_at(mm, addr, page_table, entry); pte_unmap(page_table); /* No need to invalidate - it was non-present before */ @@ -1939,7 +1942,7 @@ retry: entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); - set_pte(page_table, entry); + set_pte_at(mm, address, page_table, entry); if (anon) { lru_cache_add_active(new_page); page_add_anon_rmap(new_page, vma, address); @@ -1983,7 +1986,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma, */ if (!vma->vm_ops || !vma->vm_ops->populate || (write_access && !(vma->vm_flags & VM_SHARED))) { - pte_clear(pte); + pte_clear(mm, address, pte); return do_no_page(mm, vma, address, write_access, pte, pmd); } |
