diff options
| author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-24 23:04:59 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-24 23:04:59 -0700 |
| commit | 87050fae7607166d02c1eaf052f8a55d7eca8e5c (patch) | |
| tree | 261cd3c6f1e11ddfe61a1a653f192bb655902ace /include | |
| parent | c9e1750cf60872334f2425848dfc5d3a7dbd323d (diff) | |
Introduce architecture-specific "ptep_update_dirty_accessed()"
helper function to write-back the dirty and accessed bits from
ptep_establish().
Right now this defaults to the same old "set_pte()" that we've
always done, except for x86 where we now fix the (unlikely)
race in updating accessed bits and dropping a concurrent dirty
bit.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/pgtable.h | 7 | ||||
| -rw-r--r-- | include/asm-i386/pgtable.h | 10 |
2 files changed, 16 insertions, 1 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index fc380ce905ad..e9a387cdd37a 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -2,6 +2,11 @@ #define _ASM_GENERIC_PGTABLE_H #ifndef __HAVE_ARCH_PTEP_ESTABLISH + +#ifndef ptep_update_dirty_accessed +#define ptep_update_dirty_accessed(__ptep, __entry, __dirty) set_pte(__ptep, __entry) +#endif + /* * Establish a new mapping: * - flush the old one @@ -12,7 +17,7 @@ */ #define ptep_establish(__vma, __address, __ptep, __entry, __dirty) \ do { \ - set_pte(__ptep, __entry); \ + ptep_update_dirty_accessed(__ptep, __entry, __dirty); \ flush_tlb_page(__vma, __address); \ } while (0) #endif diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index e510ad494a5e..c408168b22e2 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -317,8 +317,18 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. + * + * Also, we only update the dirty/accessed state if we set + * the dirty bit by hand in the kernel, since the hardware + * will do the accessed bit for us, and we don't want to + * race with other CPU's that might be updating the dirty + * bit at the same time. */ #define update_mmu_cache(vma,address,pte) do { } while (0) +#define ptep_update_dirty_accessed(__ptep, __entry, __dirty) \ + do { \ + if (__dirty) set_pte(__ptep, __entry); \ + } while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) |
