diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-08-30 01:49:17 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-08-30 01:49:17 -0700 |
| commit | 8fd3d4584142a0d68eaeabecff9fa99831c9451a (patch) | |
| tree | 228245b567fdc5daece516de97eff464e279b50e /include | |
| parent | 2b3414431b7b631ffed4170de9e2101c4adbf31d (diff) | |
[PATCH] batched freeing of anon pages
A reworked version of the batched page freeing and lock amortisation
for VMA teardown.
It walks the existing 507-page list in the mmu_gather_t in 16-page
chunks, drops their refcounts in 16-page chunks, and de-LRUs and
frees any resulting zero-count pages in up-to-16 page chunks.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/tlb.h | 4 | ||||
| -rw-r--r-- | include/linux/pagemap.h | 1 | ||||
| -rw-r--r-- | include/linux/swap.h | 1 |
3 files changed, 3 insertions, 3 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e629251cb7a7..f3564a558be4 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -79,10 +79,8 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne tlb_flush(tlb); nr = tlb->nr; if (!tlb_fast_mode(tlb)) { - unsigned long i; + free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; - for (i=0; i < nr; i++) - free_page_and_swap_cache(tlb->pages[i]); } } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7a41a4184a27..43390b2e2ef4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,6 +24,7 @@ #define page_cache_get(page) get_page(page) #define page_cache_release(page) put_page(page) +void release_pages(struct page **pages, int nr); static inline struct page *page_cache_alloc(struct address_space *x) { diff --git a/include/linux/swap.h b/include/linux/swap.h index affa89d77eb1..8844b1408788 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -182,6 +182,7 @@ extern int move_to_swap_cache(struct page *page, swp_entry_t entry); extern int move_from_swap_cache(struct page *page, unsigned long index, struct address_space *mapping); extern void free_page_and_swap_cache(struct page *page); +extern void free_pages_and_swap_cache(struct page **pages, int nr); extern struct page * lookup_swap_cache(swp_entry_t); extern struct page * read_swap_cache_async(swp_entry_t); |
