diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-07-18 21:09:17 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-18 21:09:17 -0700 |
| commit | e177ea28e7eded3490174487c81e5bef8a2c4d95 (patch) | |
| tree | 3a4422d4f04b7643fd14e809e7b8385246122bd9 /mm | |
| parent | 6a2ea3382b534e937ba2153f4a0c6021e04a1ef5 (diff) | |
[PATCH] VM instrumentation
A patch from Rik which adds some operational statitics to the VM.
In /proc/meminfo:
PageTables: Amount of memory used for process pagetables
PteChainTot: Amount of memory allocated for pte_chain objects
PteChainUsed: Amount of memory currently in use for pte chains.
In /proc/stat:
pageallocs: Number of pages allocated in the page allocator
pagefrees: Number of pages returned to the page allocator
(These can be used to measure the allocation rate)
pageactiv: Number of pages activated (moved to the active list)
pagedeact: Number of pages deactivated (moved to the inactive list)
pagefault: Total pagefaults
majorfault: Major pagefaults
pagescan: Number of pages which shrink_cache looked at
pagesteal: Number of pages which shrink_cache freed
pageoutrun: Number of calls to try_to_free_pages()
allocstall: Number of calls to balance_classzone()
Rik will be writing a userspace app which interprets these things.
The /proc/meminfo stats are efficient, but the /proc/stat accumulators
will cause undesirable cacheline bouncing. We need to break the disk
statistics out of struct kernel_stat and make everything else in there
per-cpu. If that doesn't happen in time for 2.6 then we disable
KERNEL_STAT_INC().
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/filemap.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 3 | ||||
| -rw-r--r-- | mm/page_alloc.c | 9 | ||||
| -rw-r--r-- | mm/rmap.c | 3 | ||||
| -rw-r--r-- | mm/swap.c | 1 | ||||
| -rw-r--r-- | mm/vmscan.c | 9 |
6 files changed, 27 insertions, 0 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index d7a4970c68a6..b11dcb824da5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/compiler.h> #include <linux/fs.h> +#include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/pagemap.h> @@ -1533,6 +1534,7 @@ no_cached_page: return NULL; page_not_uptodate: + KERNEL_STAT_INC(pgmajfault); lock_page(page); /* Did it get unhashed while we waited for it? */ diff --git a/mm/memory.c b/mm/memory.c index c2455084c4d2..2937caaa907d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -36,6 +36,7 @@ * (Gerhard.Wichert@pdb.siemens.de) */ +#include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/swap.h> @@ -1177,6 +1178,7 @@ static int do_swap_page(struct mm_struct * mm, /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; + KERNEL_STAT_INC(pgmajfault); } lock_page(page); @@ -1419,6 +1421,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, current->state = TASK_RUNNING; pgd = pgd_offset(mm, address); + KERNEL_STAT_INC(pgfault); /* * We need the page table lock to synchronize with kswapd * and the SMP-safe atomic PTE updates. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a5b6e175632d..2acac7c0aa80 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -13,6 +13,7 @@ */ #include <linux/config.h> +#include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> @@ -86,6 +87,8 @@ static void __free_pages_ok (struct page *page, unsigned int order) struct page *base; zone_t *zone; + KERNEL_STAT_ADD(pgfree, 1<<order); + BUG_ON(PagePrivate(page)); BUG_ON(page->mapping != NULL); BUG_ON(PageLocked(page)); @@ -324,6 +327,8 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ struct page * page; int freed; + KERNEL_STAT_ADD(pgalloc, 1<<order); + zone = zonelist->zones; classzone = *zone; if (classzone == NULL) @@ -393,6 +398,7 @@ nopage: if (!(gfp_mask & __GFP_WAIT)) goto nopage; + KERNEL_STAT_INC(allocstall); page = balance_classzone(classzone, gfp_mask, order, &freed); if (page) return page; @@ -563,6 +569,9 @@ void get_page_state(struct page_state *ret) ret->nr_pagecache += ps->nr_pagecache; ret->nr_active += ps->nr_active; ret->nr_inactive += ps->nr_inactive; + ret->nr_page_table_pages += ps->nr_page_table_pages; + ret->nr_pte_chain_pages += ps->nr_pte_chain_pages; + ret->used_pte_chains_bytes += ps->used_pte_chains_bytes; } } diff --git a/mm/rmap.c b/mm/rmap.c index 7d7c2a621944..98dd16cae14a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -391,6 +391,7 @@ static inline struct pte_chain * pte_chain_pop(void) static inline void pte_chain_free(struct pte_chain * pte_chain, struct pte_chain * prev_pte_chain, struct page * page) { + mod_page_state(used_pte_chains_bytes, -sizeof(struct pte_chain)); if (prev_pte_chain) prev_pte_chain->next = pte_chain->next; else if (page) @@ -423,6 +424,7 @@ static inline struct pte_chain * pte_chain_alloc() spin_unlock(&pte_chain_freelist_lock); + mod_page_state(used_pte_chains_bytes, sizeof(struct pte_chain)); return pte_chain; } @@ -443,6 +445,7 @@ static void alloc_new_pte_chains() int i = PAGE_SIZE / sizeof(struct pte_chain); if (pte_chain) { + inc_page_state(nr_pte_chain_pages); for (; i-- > 0; pte_chain++) pte_chain_push(pte_chain); } else { diff --git a/mm/swap.c b/mm/swap.c index 7ea683cc9467..cc97e3c8c6b8 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -41,6 +41,7 @@ static inline void activate_page_nolock(struct page * page) if (PageLRU(page) && !PageActive(page)) { del_page_from_inactive_list(page); add_page_to_active_list(page); + KERNEL_STAT_INC(pgactivate); } } diff --git a/mm/vmscan.c b/mm/vmscan.c index 925c82063e8b..17e53ac5a567 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -92,6 +92,7 @@ shrink_cache(int nr_pages, zone_t *classzone, list_del(entry); list_add(entry, &inactive_list); + KERNEL_STAT_INC(pgscan); /* * Zero page counts can happen because we unlink the pages @@ -142,6 +143,7 @@ shrink_cache(int nr_pages, zone_t *classzone, add_page_to_active_list(page); pte_chain_unlock(page); unlock_page(page); + KERNEL_STAT_INC(pgactivate); continue; } @@ -302,6 +304,7 @@ page_freeable: /* effectively free the page here */ page_cache_release(page); + KERNEL_STAT_INC(pgsteal); if (--nr_pages) continue; goto out; @@ -315,6 +318,7 @@ page_active: add_page_to_active_list(page); pte_chain_unlock(page); unlock_page(page); + KERNEL_STAT_INC(pgactivate); } out: spin_unlock(&pagemap_lru_lock); return nr_pages; @@ -339,6 +343,8 @@ static void refill_inactive(int nr_pages) page = list_entry(entry, struct page, lru); entry = entry->prev; + KERNEL_STAT_INC(pgscan); + pte_chain_lock(page); if (page->pte.chain && page_referenced(page)) { list_del(&page->lru); @@ -349,6 +355,7 @@ static void refill_inactive(int nr_pages) del_page_from_active_list(page); add_page_to_inactive_list(page); pte_chain_unlock(page); + KERNEL_STAT_INC(pgdeactivate); } spin_unlock(&pagemap_lru_lock); } @@ -398,6 +405,8 @@ int try_to_free_pages(zone_t *classzone, unsigned int gfp_mask, unsigned int ord int priority = DEF_PRIORITY; int nr_pages = SWAP_CLUSTER_MAX; + KERNEL_STAT_INC(pageoutrun); + do { nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); if (nr_pages <= 0) |
