summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-02-03 16:59:17 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-02-03 16:59:17 -0800
commitb29422e31ced8c0b37b23eff2eda8ec2cbfc3f3a (patch)
treed186bba0c0d4195ba9f9da5980d9e12759d0fa90
parentf5585f5df513f0ebdd2d6ad2ff884c9c765010af (diff)
[PATCH] add stats for page reclaim via inode freeing
pagecache can be reclaimed via the page LRU and via prune_icache. We currently don't know how much reclaim is happening via each. The patch adds instrumentation to display the number of pages which were freed via prune_icache. This is displayed in /proc/vmstat:pginodesteal and /proc/vmstat:kswapd_inodesteal. Turns out that under some workloads (well, dbench at least), fully half of page reclaim is via the unused inode list. Which seems quite OK to me.
-rw-r--r--fs/inode.c8
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/truncate.c27
5 files changed, 34 insertions, 15 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 257f33f98e05..4179e6a45075 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -16,6 +16,7 @@
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/hash.h>
+#include <linux/swap.h>
#include <linux/security.h>
/*
@@ -392,6 +393,7 @@ static void prune_icache(int nr_to_scan)
LIST_HEAD(freeable);
int nr_pruned = 0;
int nr_scanned;
+ unsigned long reap = 0;
spin_lock(&inode_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
@@ -410,7 +412,7 @@ static void prune_icache(int nr_to_scan)
__iget(inode);
spin_unlock(&inode_lock);
if (remove_inode_buffers(inode))
- invalidate_inode_pages(&inode->i_data);
+ reap += invalidate_inode_pages(&inode->i_data);
iput(inode);
spin_lock(&inode_lock);
@@ -428,6 +430,10 @@ static void prune_icache(int nr_to_scan)
inodes_stat.nr_unused -= nr_pruned;
spin_unlock(&inode_lock);
dispose_list(&freeable);
+ if (current_is_kswapd)
+ mod_page_state(kswapd_inodesteal, reap);
+ else
+ mod_page_state(pginodesteal, reap);
}
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f4c994d02f5d..9a17c9819ae9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1102,9 +1102,9 @@ extern int full_check_disk_change(struct block_device *);
extern int __check_disk_change(dev_t);
extern int invalidate_inodes(struct super_block *);
extern int invalidate_device(kdev_t, int);
-extern void invalidate_mapping_pages(struct address_space *mapping,
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-extern void invalidate_inode_pages(struct address_space *mapping);
+unsigned long invalidate_inode_pages(struct address_space *mapping);
extern void invalidate_inode_pages2(struct address_space *mapping);
extern void write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a50e09ff79ea..0327a8421c9d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -97,15 +97,20 @@ struct page_state {
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
unsigned long pgalloc; /* page allocations */
+
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
+
unsigned long pgscan; /* pages scanned by page reclaim */
unsigned long pgrefill; /* inspected in refill_inactive_zone */
unsigned long pgsteal; /* total pages reclaimed */
+ unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
+
+ unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3969ad493b35..0112d0eb63cf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1379,15 +1379,20 @@ static char *vmstat_text[] = {
"pswpin",
"pswpout",
"pgalloc",
+
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
+
"pgscan",
"pgrefill",
"pgsteal",
+ "pginodesteal",
"kswapd_steal",
+
+ "kswapd_inodesteal",
"pageoutrun",
"allocstall",
"pgrotated",
diff --git a/mm/truncate.c b/mm/truncate.c
index 0c1dd53ae48f..56bdd03b1576 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -64,24 +64,25 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
* ->page_lock. That provides exclusion against the __set_page_dirty
* functions.
*/
-static void
+static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
if (page->mapping != mapping)
- return;
+ return 0;
if (PagePrivate(page) && !try_to_release_page(page, 0))
- return;
+ return 0;
write_lock(&mapping->page_lock);
if (PageDirty(page)) {
write_unlock(&mapping->page_lock);
- } else {
- __remove_from_page_cache(page);
- write_unlock(&mapping->page_lock);
- ClearPageUptodate(page);
- page_cache_release(page); /* pagecache ref */
+ return 0;
}
+ __remove_from_page_cache(page);
+ write_unlock(&mapping->page_lock);
+ ClearPageUptodate(page);
+ page_cache_release(page); /* pagecache ref */
+ return 1;
}
/**
@@ -189,11 +190,12 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
-void invalidate_mapping_pages(struct address_space *mapping,
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
struct pagevec pvec;
pgoff_t next = start;
+ unsigned long ret = 0;
int i;
pagevec_init(&pvec, 0);
@@ -213,18 +215,19 @@ void invalidate_mapping_pages(struct address_space *mapping,
goto unlock;
if (page_mapped(page))
goto unlock;
- invalidate_complete_page(mapping, page);
+ ret += invalidate_complete_page(mapping, page);
unlock:
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
+ return ret;
}
-void invalidate_inode_pages(struct address_space *mapping)
+unsigned long invalidate_inode_pages(struct address_space *mapping)
{
- invalidate_mapping_pages(mapping, 0, ~0UL);
+ return invalidate_mapping_pages(mapping, 0, ~0UL);
}
/**