diff options
| author | Christoph Lameter <clameter@sgi.com> | 2005-03-28 03:19:14 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-03-28 03:19:14 -0800 |
| commit | 9b65d113fe1cd4961ce94127584413cefd49aaed (patch) | |
| tree | eb716c7d5fb899ee2160d199cb7a5366e8314ac3 | |
| parent | ae102ac599f557ea3544875957dda84ed5bb020e (diff) | |
[PATCH] mm counter operations through macros
This patch extracts all the operations on counters protected by the page
table lock (currently rss and anon_rss) into definitions in
include/linux/sched.h. All rss operations are performed through the
following macros:
get_mm_counter(mm, member) -> Obtain the value of a counter
set_mm_counter(mm, member, value) -> Set the value of a counter
update_mm_counter(mm, member, value) -> Add to a counter
inc_mm_counter(mm, member) -> Increment a counter
dec_mm_counter(mm, member) -> Decrement a counter
With this patch it becomes easier to add new counters and it is possible to
redefine the method of counter handling. The counters are an issue for
scalability since they are used in frequently used code paths and may cause
cache line bouncing.
F.e. One may not use counters at all and count the pages when needed, switch
to atomic operations if the mm_struct locking changes or split the rss
into counters that can be locally incremented.
The relevant fields of the task_struct are renamed with a leading underscore
to catch out people who are not using the acceessor macros.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
32 files changed, 87 insertions, 71 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index ceaeef55e505..f3e8b354c4ea 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -46,7 +46,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc { pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); @@ -86,7 +86,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ptepage = pte_page(entry); get_page(ptepage); set_pte(dst_pte, entry); - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; } return 0; @@ -222,7 +222,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, page = pte_page(pte); put_page(page); } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 1d46b16764bd..40ad8328ffd5 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -73,7 +73,7 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, { pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); @@ -116,7 +116,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ptepage = pte_page(entry); get_page(ptepage); set_pte(dst_pte, entry); - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; } return 0; @@ -246,7 +246,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig put_page(page); pte_clear(mm, address, pte); } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c index 7ed86e722698..5a3c106b40c8 100644 --- a/arch/m68k/atari/stram.c +++ b/arch/m68k/atari/stram.c @@ -635,7 +635,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long set_pte(dir, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); swap_free(entry); get_page(page); - ++vma->vm_mm->rss; + inc_mm_counter(vma->vm_mm, rss); } static inline void unswap_pmd(struct vm_area_struct * vma, pmd_t *dir, diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c index 4bbf1274743f..4af20cd91f9f 100644 --- a/arch/mips/kernel/irixelf.c +++ b/arch/mips/kernel/irixelf.c @@ -692,7 +692,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) /* Do this so that we can load the interpreter, if need be. We will * change some of these later. */ - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); current->mm->start_stack = bprm->p; diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 9a4db04f0874..c62ddaff0720 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -154,7 +154,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, { pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); @@ -316,7 +316,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ptepage = pte_page(entry); get_page(ptepage); - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); set_pte_at(dst, addr, dst_pte, entry); addr += HPAGE_SIZE; @@ -426,7 +426,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, put_page(page); } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_pending(); } diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index ac807fdc6d99..1f897bab2318 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -62,7 +62,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long i; pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) entry = pte_mkwrite(pte_mkdirty(mk_pte(page, @@ -115,7 +115,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, pte_val(entry) += PAGE_SIZE; dst_pte++; } - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; } return 0; @@ -206,7 +206,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, pte++; } } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c index af566d7eee4b..bcad2aefa4ee 100644 --- a/arch/sh64/mm/hugetlbpage.c +++ b/arch/sh64/mm/hugetlbpage.c @@ -62,7 +62,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long i; pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) entry = pte_mkwrite(pte_mkdirty(mk_pte(page, @@ -115,7 +115,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, pte_val(entry) += PAGE_SIZE; dst_pte++; } - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; } return 0; @@ -206,7 +206,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, pte++; } } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index 4de3e352e832..b2854ef221d0 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c @@ -241,7 +241,7 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 49b814f9c371..ffa207795f1d 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -68,7 +68,7 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long i; pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); if (write_access) entry = pte_mkwrite(pte_mkdirty(mk_pte(page, @@ -123,7 +123,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, dst_pte++; addr += PAGE_SIZE; } - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); } return 0; @@ -213,7 +213,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, pte++; } } - mm->rss -= (end - start) >> PAGE_SHIFT; + add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c index 58d2c8a38916..1965efc974dc 100644 --- a/arch/x86_64/ia32/ia32_aout.c +++ b/arch/x86_64/ia32/ia32_aout.c @@ -313,7 +313,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = TASK_UNMAPPED_BASE; - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index bb7d9032f64a..009b8920c1ff 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -317,7 +317,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = current->mm->mmap_base; - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a7ce4ea2dab1..76ec9d8939ff 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -773,7 +773,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) /* Do this so that we can load the interpreter, if need be. We will change some of these later */ - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); current->mm->free_area_cache = current->mm->mmap_base; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 745e9b8b3a5e..134c9c0d1f54 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -299,7 +299,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, struct pt_regs *regs /* do this so that we can load the interpreter, if need be * - we will change some of these later */ - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); #ifdef CONFIG_MMU retval = setup_arg_pages(bprm, current->mm->start_stack, executable_stack); diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index fb440602cfc1..f0cd67d9d31b 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -650,7 +650,7 @@ static int load_flat_file(struct linux_binprm * bprm, current->mm->start_brk = datapos + data_len + bss_len; current->mm->brk = (current->mm->start_brk + 3) & ~3; current->mm->context.end_brk = memp + ksize((void *) memp) - stack_len; - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); } if (flags & FLAT_FLAG_KTRACE) diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index f02ee506dd65..227a2682d2bf 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -259,7 +259,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) create_som_tables(bprm); current->mm->start_stack = bprm->p; - current->mm->rss = 0; + set_mm_counter(current->mm, rss, 0); #if 0 printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk); diff --git a/fs/exec.c b/fs/exec.c index 98dcef04c830..a8394499926c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -326,7 +326,7 @@ void install_arg_page(struct vm_area_struct *vma, pte_unmap(pte); goto out; } - mm->rss++; + inc_mm_counter(mm, rss); lru_cache_add_active(page); set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( page, vma->vm_page_prot)))); diff --git a/fs/proc/array.c b/fs/proc/array.c index 254a8eb6df15..37668fe998ad 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -432,7 +432,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) jiffies_to_clock_t(it_real_value), start_time, vsize, - mm ? mm->rss : 0, /* you might want to shift this left 3 */ + mm ? get_mm_counter(mm, rss) : 0, /* you might want to shift this left 3 */ rsslim, mm ? mm->start_code : 0, mm ? mm->end_code : 0, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 49de0bd2e623..28b4a0253a92 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -24,7 +24,7 @@ char *task_mem(struct mm_struct *mm, char *buffer) "VmPTE:\t%8lu kB\n", (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), mm->locked_vm << (PAGE_SHIFT-10), - mm->rss << (PAGE_SHIFT-10), + get_mm_counter(mm, rss) << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10), mm->stack_vm << (PAGE_SHIFT-10), text, lib, (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); @@ -39,11 +39,13 @@ unsigned long task_vsize(struct mm_struct *mm) int task_statm(struct mm_struct *mm, int *shared, int *text, int *data, int *resident) { - *shared = mm->rss - mm->anon_rss; + int rss = get_mm_counter(mm, rss); + + *shared = rss - get_mm_counter(mm, anon_rss); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->total_vm - mm->shared_vm; - *resident = mm->rss; + *resident = rss; return mm->total_vm; } diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h index 0843a876b3ce..b00ab7b8856b 100644 --- a/include/asm-arm/tlb.h +++ b/include/asm-arm/tlb.h @@ -54,11 +54,11 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { struct mm_struct *mm = tlb->mm; unsigned long freed = tlb->freed; - int rss = mm->rss; + int rss = get_mm_counter(mm, rss); if (rss < freed) freed = rss; - mm->rss = rss - freed; + add_mm_counter(mm, rss, -freed); if (freed) { flush_tlb_mm(mm); diff --git a/include/asm-arm26/tlb.h b/include/asm-arm26/tlb.h index 7a3ac8815c1d..1316352a58f3 100644 --- a/include/asm-arm26/tlb.h +++ b/include/asm-arm26/tlb.h @@ -37,11 +37,11 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { struct mm_struct *mm = tlb->mm; unsigned long freed = tlb->freed; - int rss = mm->rss; + int rss = get_mm_counter(mm, rss); if (rss < freed) freed = rss; - mm->rss = rss - freed; + add_mm_counter(mm, rss, -freed); if (freed) { flush_tlb_mm(mm); diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 0dac231d565b..faff403e1061 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -88,11 +88,11 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { int freed = tlb->freed; struct mm_struct *mm = tlb->mm; - int rss = mm->rss; + int rss = get_mm_counter(mm, rss); if (rss < freed) freed = rss; - mm->rss = rss - freed; + add_mm_counter(mm, rss, -freed); tlb_flush_mmu(tlb, start, end); /* keep the page table cache within bounds */ diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index aec6e832ddbc..3a9a6d1be75c 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h @@ -161,11 +161,11 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) { unsigned long freed = tlb->freed; struct mm_struct *mm = tlb->mm; - unsigned long rss = mm->rss; + unsigned long rss = get_mm_counter(mm, rss); if (rss < freed) freed = rss; - mm->rss = rss - freed; + add_mm_counter(mm, rss, -freed); /* * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and * tlb->end_addr. diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h index 9ff2b61533d8..fa0ebf6786fc 100644 --- a/include/asm-sparc64/tlb.h +++ b/include/asm-sparc64/tlb.h @@ -80,11 +80,11 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un { unsigned long freed = mp->freed; struct mm_struct *mm = mp->mm; - unsigned long rss = mm->rss; + unsigned long rss = get_mm_counter(mm, rss); if (rss < freed) freed = rss; - mm->rss = rss - freed; + add_mm_counter(mm, rss, -freed); tlb_flush_mmu(mp); diff --git a/include/linux/sched.h b/include/linux/sched.h index 814ee929cd67..a30e91f40da6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -204,6 +204,12 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, extern void arch_unmap_area(struct vm_area_struct *area); extern void arch_unmap_area_topdown(struct vm_area_struct *area); +#define set_mm_counter(mm, member, value) (mm)->_##member = (value) +#define get_mm_counter(mm, member) ((mm)->_##member) +#define add_mm_counter(mm, member, value) (mm)->_##member += (value) +#define inc_mm_counter(mm, member) (mm)->_##member++ +#define dec_mm_counter(mm, member) (mm)->_##member-- +typedef unsigned long mm_counter_t; struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ @@ -220,7 +226,7 @@ struct mm_struct { atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ struct rw_semaphore mmap_sem; - spinlock_t page_table_lock; /* Protects page tables, mm->rss, mm->anon_rss */ + spinlock_t page_table_lock; /* Protects page tables and some counters */ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung * together off init_mm.mmlist, and are protected @@ -230,9 +236,13 @@ struct mm_struct { unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; - unsigned long rss, anon_rss, total_vm, locked_vm, shared_vm; + unsigned long total_vm, locked_vm, shared_vm; unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes; + /* Special counters protected by the page_table_lock */ + mm_counter_t _rss; + mm_counter_t _anon_rss; + unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ unsigned dumpable:1; diff --git a/kernel/acct.c b/kernel/acct.c index 035669624b6c..4168f631868e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -542,7 +542,7 @@ void acct_update_integrals(struct task_struct *tsk) if (delta == 0) return; tsk->acct_stimexpd = tsk->stime; - tsk->acct_rss_mem1 += delta * tsk->mm->rss; + tsk->acct_rss_mem1 += delta * get_mm_counter(tsk->mm, rss); tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; } } diff --git a/kernel/fork.c b/kernel/fork.c index 5b67b3ebf3c0..f42a17f88699 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -195,8 +195,8 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; mm->map_count = 0; - mm->rss = 0; - mm->anon_rss = 0; + set_mm_counter(mm, rss, 0); + set_mm_counter(mm, anon_rss, 0); cpus_clear(mm->cpu_vm_mask); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; @@ -492,7 +492,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) if (retval) goto free_pt; - mm->hiwater_rss = mm->rss; + mm->hiwater_rss = get_mm_counter(mm,rss); mm->hiwater_vm = mm->total_vm; good_mm: diff --git a/mm/fremap.c b/mm/fremap.c index 55bdaacff231..3235fb77c133 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -39,7 +39,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); - mm->rss--; + dec_mm_counter(mm, rss); } } } else { @@ -92,7 +92,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, zap_pte(mm, vma, addr, pte); - mm->rss++; + inc_mm_counter(mm,rss); flush_icache_page(vma, page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); page_add_file_rmap(page); diff --git a/mm/memory.c b/mm/memory.c index db09f2089ed2..f9dfb7be08fa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -309,9 +309,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte = pte_mkclean(pte); pte = pte_mkold(pte); get_page(page); - dst_mm->rss++; + inc_mm_counter(dst_mm, rss); if (PageAnon(page)) - dst_mm->anon_rss++; + inc_mm_counter(dst_mm, anon_rss); set_pte_at(dst_mm, addr, dst_pte, pte); page_dup_rmap(page); } @@ -475,7 +475,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, if (pte_dirty(ptent)) set_page_dirty(page); if (PageAnon(page)) - tlb->mm->anon_rss--; + dec_mm_counter(tlb->mm, anon_rss); else if (pte_young(ptent)) mark_page_accessed(page); tlb->freed++; @@ -1219,9 +1219,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, page_table = pte_offset_map(pmd, address); if (likely(pte_same(*page_table, pte))) { if (PageAnon(old_page)) - mm->anon_rss--; + dec_mm_counter(mm, anon_rss); if (PageReserved(old_page)) - ++mm->rss; + inc_mm_counter(mm, rss); else page_remove_rmap(old_page); flush_cache_page(vma, address, pfn); @@ -1627,7 +1627,7 @@ static int do_swap_page(struct mm_struct * mm, if (vm_swap_full()) remove_exclusive_swap_page(page); - mm->rss++; + inc_mm_counter(mm, rss); pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); @@ -1691,7 +1691,7 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); goto out; } - mm->rss++; + inc_mm_counter(mm, rss); entry = maybe_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)), vma); @@ -1807,7 +1807,7 @@ retry: /* Only go through if we didn't race with anybody else... */ if (pte_none(*page_table)) { if (!PageReserved(new_page)) - ++mm->rss; + inc_mm_counter(mm, rss); flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); @@ -2112,8 +2112,10 @@ EXPORT_SYMBOL(vmalloc_to_pfn); void update_mem_hiwater(struct task_struct *tsk) { if (tsk->mm) { - if (tsk->mm->hiwater_rss < tsk->mm->rss) - tsk->mm->hiwater_rss = tsk->mm->rss; + unsigned long rss = get_mm_counter(tsk->mm, rss); + + if (tsk->mm->hiwater_rss < rss) + tsk->mm->hiwater_rss = rss; if (tsk->mm->hiwater_vm < tsk->mm->total_vm) tsk->mm->hiwater_vm = tsk->mm->total_vm; } diff --git a/mm/mmap.c b/mm/mmap.c index 942895cf5f5c..3937ce2d1d3f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1978,7 +1978,7 @@ void exit_mmap(struct mm_struct *mm) vma = mm->mmap; mm->mmap = mm->mmap_cache = NULL; mm->mm_rb = RB_ROOT; - mm->rss = 0; + set_mm_counter(mm, rss, 0); mm->total_vm = 0; mm->locked_vm = 0; diff --git a/mm/nommu.c b/mm/nommu.c index f72d40c31a96..a9cf49a0e035 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -961,9 +961,11 @@ void arch_unmap_area(struct vm_area_struct *area) void update_mem_hiwater(struct task_struct *tsk) { + unsigned long rss = get_mm_counter(tsk->mm, rss); + if (likely(tsk->mm)) { - if (tsk->mm->hiwater_rss < tsk->mm->rss) - tsk->mm->hiwater_rss = tsk->mm->rss; + if (tsk->mm->hiwater_rss < rss) + tsk->mm->hiwater_rss = rss; if (tsk->mm->hiwater_vm < tsk->mm->total_vm) tsk->mm->hiwater_vm = tsk->mm->total_vm; } diff --git a/mm/rmap.c b/mm/rmap.c index 521fa4fa17bf..884d6d1928bc 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -257,7 +257,7 @@ static int page_referenced_one(struct page *page, pte_t *pte; int referenced = 0; - if (!mm->rss) + if (!get_mm_counter(mm, rss)) goto out; address = vma_address(page, vma); if (address == -EFAULT) @@ -436,7 +436,7 @@ void page_add_anon_rmap(struct page *page, BUG_ON(PageReserved(page)); BUG_ON(!anon_vma); - vma->vm_mm->anon_rss++; + inc_mm_counter(vma->vm_mm, anon_rss); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; index = (address - vma->vm_start) >> PAGE_SHIFT; @@ -509,7 +509,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) pte_t pteval; int ret = SWAP_AGAIN; - if (!mm->rss) + if (!get_mm_counter(mm, rss)) goto out; address = vma_address(page, vma); if (address == -EFAULT) @@ -595,10 +595,10 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); - mm->anon_rss--; + dec_mm_counter(mm, anon_rss); } - mm->rss--; + inc_mm_counter(mm, rss); page_remove_rmap(page); page_cache_release(page); @@ -703,7 +703,7 @@ static void try_to_unmap_cluster(unsigned long cursor, page_remove_rmap(page); page_cache_release(page); - mm->rss--; + dec_mm_counter(mm, rss); (*mapcount)--; } @@ -802,7 +802,7 @@ static int try_to_unmap_file(struct page *page) if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) continue; cursor = (unsigned long) vma->vm_private_data; - while (vma->vm_mm->rss && + while (get_mm_counter(vma->vm_mm, rss) && cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { try_to_unmap_cluster(cursor, &mapcount, vma); diff --git a/mm/swapfile.c b/mm/swapfile.c index e2294292792b..a60e0075d55b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -422,7 +422,7 @@ void free_swap_and_cache(swp_entry_t entry) static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, swp_entry_t entry, struct page *page) { - vma->vm_mm->rss++; + inc_mm_counter(vma->vm_mm, rss); get_page(page); set_pte_at(vma->vm_mm, addr, pte, pte_mkold(mk_pte(page, vma->vm_page_prot))); |
