summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-12-02 21:31:09 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2002-12-02 21:31:09 -0800
commit3b0db538ef6782a1e2a549c68f1605ca8d35dd7e (patch)
tree2b36774ee8fe53a6a0a643941f0c4e90ef0ddf7d /include
parent3139a3ecc8756f79e00f72822c3168fb23353643 (diff)
[PATCH] Move reclaimable pages to the tail ofthe inactive list on
The patch addresses some search complexity failures which occur when there is a large amount of dirty data on the inactive list. Normally we attempt to write out those pages and then move them to the head of the inactive list. But this goes against page aging, and means that the page has to traverse the entire list again before it can be reclaimed. But the VM really wants to reclaim that page - it has reached the tail of the LRU. So what we do in this patch is to mark the page as needing reclamation, and then start I/O. In the IO completion handler we check to see if the page is still probably reclaimable and if so, move it to the tail of the inactive list, where it can be reclaimed immediately. Under really heavy swap-intensive loads this increases the page reclaim efficiency (pages reclaimed/pages scanned) from 10% to 25%. Which is OK for that sort of load. Not great, but OK. This code path takes the LRU lock once per page. I didn't bother playing games with batching up the locking work - it's a rare code path, and the machine has plenty of CPU to spare when this is happening.
Diffstat (limited to 'include')
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--include/linux/swap.h1
2 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f8aaadb1691e..7018961aea91 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -72,6 +72,7 @@
#define PG_direct 16 /* ->pte_chain points directly at pte */
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
+#define PG_reclaim 18 /* To be recalimed asap */
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
@@ -239,6 +240,11 @@ extern void get_full_page_state(struct page_state *ret);
#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
+#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
+#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
+#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
+#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+
/*
* The PageSwapCache predicate doesn't use a PG_flag at this time,
* but it may again do so one day.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d74f0800ab32..f6b1421f86b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,6 +150,7 @@ extern void FASTCALL(lru_cache_add(struct page *));
extern void FASTCALL(lru_cache_add_active(struct page *));
extern void FASTCALL(activate_page(struct page *));
extern void lru_add_drain(void);
+extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */