diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-08-27 21:03:37 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2002-08-27 21:03:37 -0700 |
| commit | e6f0e61d9ed94134f57bcf6c72b81848b9d3c2fe (patch) | |
| tree | ae2dadfcfdce68721c846dbee6213d2c72a5baac /include/linux | |
| parent | 4fce9c6f187c263e93b74c7db01b258ff77104b4 (diff) | |
[PATCH] per-zone-LRU
Replace the global page LRUs with per-zone LRUs.
This fixes the failure described at
http://mail.nl.linux.org/linux-mm/2002-08/msg00049.html
It will also fixes the problem wherein a search for a reclaimable
ZONE_NORMAL page will undesirably move aged ZONE_HIGHMEM pages to the
head of the inactive list. (I haven't tried to measure any benefit
from this aspect).
It will also reduces the amount of CPU spent scanning pages in page
reclaim. I haven't instrumented this either.
This is a minimal conversion - the aging and reclaim logic is left
unchanged, as far as is possible.
I was bitten by the "incremental min" logic in __alloc_pages again.
There's a state in which the sum-of-mins exceeds zone->pages_high. So
we call into try_to_free_pages(), which does nothing at all (all zones
have free_pages > pages_high). The incremental min is unchanged and
the VM locks up.
This was fixed in __alloc_pages: if zone->free_pages is greater than
zone->pages_high then just go and grab a page.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/mm_inline.h | 40 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 11 | ||||
| -rw-r--r-- | include/linux/page-flags.h | 2 | ||||
| -rw-r--r-- | include/linux/swap.h | 45 |
4 files changed, 49 insertions, 49 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h new file mode 100644 index 000000000000..47762ca695a5 --- /dev/null +++ b/include/linux/mm_inline.h @@ -0,0 +1,40 @@ + +static inline void +add_page_to_active_list(struct zone *zone, struct page *page) +{ + list_add(&page->lru, &zone->active_list); + zone->nr_active++; +} + +static inline void +add_page_to_inactive_list(struct zone *zone, struct page *page) +{ + list_add(&page->lru, &zone->inactive_list); + zone->nr_inactive++; +} + +static inline void +del_page_from_active_list(struct zone *zone, struct page *page) +{ + list_del(&page->lru); + zone->nr_active--; +} + +static inline void +del_page_from_inactive_list(struct zone *zone, struct page *page) +{ + list_del(&page->lru); + zone->nr_inactive--; +} + +static inline void +del_page_from_lru(struct zone *zone, struct page *page) +{ + list_del(&page->lru); + if (PageActive(page)) { + ClearPageActive(page); + zone->nr_active--; + } else { + zone->nr_inactive--; + } +} diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 63739682f8df..928000348e6b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -8,6 +8,7 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/wait.h> +#include <asm/atomic.h> /* * Free memory management - zoned buddy allocator. @@ -43,6 +44,12 @@ struct zone { unsigned long pages_min, pages_low, pages_high; int need_balance; + struct list_head active_list; + struct list_head inactive_list; + atomic_t refill_counter; + unsigned long nr_active; + unsigned long nr_inactive; + /* * free areas of different sizes */ @@ -157,10 +164,10 @@ memclass(struct zone *pgzone, struct zone *classzone) * prototypes for the discontig memory code. */ struct page; -extern void show_free_areas_core(pg_data_t *pgdat); -extern void free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, +void free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, unsigned long *zones_size, unsigned long paddr, unsigned long *zholes_size, struct page *pmap); +void get_zone_counts(unsigned long *active, unsigned long *inactive); extern pg_data_t contig_page_data; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0ceef121b662..cc74c699f3ad 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -76,8 +76,6 @@ extern struct page_state { unsigned long nr_dirty; unsigned long nr_writeback; unsigned long nr_pagecache; - unsigned long nr_active; /* on active_list LRU */ - unsigned long nr_inactive; /* on inactive_list LRU */ unsigned long nr_page_table_pages; unsigned long nr_reverse_maps; } ____cacheline_aligned_in_smp page_states[NR_CPUS]; diff --git a/include/linux/swap.h b/include/linux/swap.h index ecc4eb3d3211..d9a4a9dc00ce 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -213,51 +213,6 @@ extern spinlock_t _pagemap_lru_lock; extern void FASTCALL(mark_page_accessed(struct page *)); -/* - * List add/del helper macros. These must be called - * with the pagemap_lru_lock held! - */ -#define DEBUG_LRU_PAGE(page) \ -do { \ - if (!PageLRU(page)) \ - BUG(); \ - if (PageActive(page)) \ - BUG(); \ -} while (0) - -#define __add_page_to_active_list(page) \ -do { \ - list_add(&(page)->lru, &active_list); \ - inc_page_state(nr_active); \ -} while (0) - -#define add_page_to_active_list(page) \ -do { \ - DEBUG_LRU_PAGE(page); \ - SetPageActive(page); \ - __add_page_to_active_list(page); \ -} while (0) - -#define add_page_to_inactive_list(page) \ -do { \ - DEBUG_LRU_PAGE(page); \ - list_add(&(page)->lru, &inactive_list); \ - inc_page_state(nr_inactive); \ -} while (0) - -#define del_page_from_active_list(page) \ -do { \ - list_del(&(page)->lru); \ - ClearPageActive(page); \ - dec_page_state(nr_active); \ -} while (0) - -#define del_page_from_inactive_list(page) \ -do { \ - list_del(&(page)->lru); \ - dec_page_state(nr_inactive); \ -} while (0) - extern spinlock_t swaplock; #define swap_list_lock() spin_lock(&swaplock) |
