summaryrefslogtreecommitdiff
path: root/include/linux/swap.h
diff options
context:
space:
mode:
authorAndrew Morton <akpm@zip.com.au>2002-08-30 01:49:17 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-08-30 01:49:17 -0700
commit8fd3d4584142a0d68eaeabecff9fa99831c9451a (patch)
tree228245b567fdc5daece516de97eff464e279b50e /include/linux/swap.h
parent2b3414431b7b631ffed4170de9e2101c4adbf31d (diff)
[PATCH] batched freeing of anon pages
A reworked version of the batched page freeing and lock amortisation for VMA teardown. It walks the existing 507-page list in the mmu_gather_t in 16-page chunks, drops their refcounts in 16-page chunks, and de-LRUs and frees any resulting zero-count pages in up-to-16 page chunks.
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index affa89d77eb1..8844b1408788 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -182,6 +182,7 @@ extern int move_to_swap_cache(struct page *page, swp_entry_t entry);
extern int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping);
extern void free_page_and_swap_cache(struct page *page);
+extern void free_pages_and_swap_cache(struct page **pages, int nr);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t);