diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-03-11 16:25:47 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-03-11 16:25:47 -0800 |
| commit | 349055d09d3ebcbc7cb0bd50a0488194dc68f2ee (patch) | |
| tree | 1b6bb1b39895cdef534c6f57b716be389b633173 | |
| parent | fb5b4abea5de73406d63f60aeb455edccff8eb6e (diff) | |
[PATCH] vmscan: add lru_to_page() helper
From: Nick Piggin <piggin@cyberone.com.au>
Add a little helper macro for a common list extraction operation in vmscan.c
| -rw-r--r-- | mm/vmscan.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7768aca74d1d..be07be47f92b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -43,14 +43,15 @@ int vm_swappiness = 60; static long total_memory; +#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) + #ifdef ARCH_HAS_PREFETCH #define prefetch_prev_lru_page(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ - prev = list_entry(_page->lru.prev, \ - struct page, lru); \ + prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) @@ -64,8 +65,7 @@ static long total_memory; if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ - prev = list_entry(_page->lru.prev, \ - struct page, lru); \ + prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) @@ -260,7 +260,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned) int may_enter_fs; int referenced; - page = list_entry(page_list->prev, struct page, lru); + page = lru_to_page(page_list); list_del(&page->lru); if (TestSetPageLocked(page)) @@ -494,8 +494,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask, while (nr_scan++ < SWAP_CLUSTER_MAX && !list_empty(&zone->inactive_list)) { - page = list_entry(zone->inactive_list.prev, - struct page, lru); + page = lru_to_page(&zone->inactive_list); prefetchw_prev_lru_page(page, &zone->inactive_list, flags); @@ -540,7 +539,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask, * Put back any unfreeable pages. */ while (!list_empty(&page_list)) { - page = list_entry(page_list.prev, struct page, lru); + page = lru_to_page(&page_list); if (TestSetPageLRU(page)) BUG(); list_del(&page->lru); @@ -599,7 +598,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, pgmoved = 0; spin_lock_irq(&zone->lru_lock); while (nr_pages && !list_empty(&zone->active_list)) { - page = list_entry(zone->active_list.prev, struct page, lru); + page = lru_to_page(&zone->active_list); prefetchw_prev_lru_page(page, &zone->active_list, flags); if (!TestClearPageLRU(page)) BUG(); @@ -650,7 +649,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, reclaim_mapped = 1; while (!list_empty(&l_hold)) { - page = list_entry(l_hold.prev, struct page, lru); + page = lru_to_page(&l_hold); list_del(&page->lru); if (page_mapped(page)) { if (!reclaim_mapped) { @@ -681,7 +680,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, pgmoved = 0; spin_lock_irq(&zone->lru_lock); while (!list_empty(&l_inactive)) { - page = list_entry(l_inactive.prev, struct page, lru); + page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); if (TestSetPageLRU(page)) BUG(); @@ -710,7 +709,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, pgmoved = 0; while (!list_empty(&l_active)) { - page = list_entry(l_active.prev, struct page, lru); + page = lru_to_page(&l_active); prefetchw_prev_lru_page(page, &l_active, flags); if (TestSetPageLRU(page)) BUG(); |
