diff options
| author | Andrew Morton <akpm@digeo.com> | 2002-10-04 20:34:45 -0700 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2002-10-04 20:34:45 -0700 |
| commit | d39755802e430876d612bc792e4c29652ed8b99b (patch) | |
| tree | ba4b3c34d50324f18761c54f68debb1b73fbab14 | |
| parent | 9d66d9e91730e97f653c3143b637f1d63605f074 (diff) | |
[PATCH] distinguish between address span of a zone and the number
From David Mosberger
The patch below fixes a bug in nr_free_zone_pages() which shows when a
zone has hole. The problem is due to the fact that "struct zone"
didn't keep track of the amount of real memory in a zone. Because of
this, nr_free_zone_pages() simply assumed that a zone consists entirely
of real memory. On machines with large holes, this has catastrophic
effects on VM performance, because the VM system ends up thinking that
there is plenty of memory left over in a zone, when in fact it may be
completely full.
The patch below fixes the problem by replacing the "size" member in
"struct zone" with "spanned_pages" and "present_pages" and updating
page_alloc.c.
| -rw-r--r-- | include/linux/mmzone.h | 3 | ||||
| -rw-r--r-- | mm/page_alloc.c | 17 |
2 files changed, 11 insertions, 9 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d7d12a69f505..dab0f76cfb20 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -120,7 +120,8 @@ struct zone { * rarely used fields: */ char *name; - unsigned long size; + unsigned long spanned_pages; /* total size, including holes */ + unsigned long present_pages; /* amount of memory (excluding holes) */ } ____cacheline_maxaligned_in_smp; #define ZONE_DMA 0 diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b9cced8d19a0..9694db4322b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,7 +48,7 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, }; */ static inline int bad_range(struct zone *zone, struct page *page) { - if (page_to_pfn(page) >= zone->zone_start_pfn + zone->size) + if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages) return 1; if (page_to_pfn(page) < zone->zone_start_pfn) return 1; @@ -509,7 +509,7 @@ static unsigned int nr_free_zone_pages(int offset) struct zone *zone; for (zone = *zonep++; zone; zone = *zonep++) { - unsigned long size = zone->size; + unsigned long size = zone->present_pages; unsigned long high = zone->pages_high; if (size > high) sum += size - high; @@ -681,7 +681,7 @@ void show_free_areas(void) struct zone *zone = &pgdat->node_zones[type]; unsigned long nr, flags, order, total = 0; - if (!zone->size) + if (!zone->present_pages) continue; spin_lock_irqsave(&zone->lock, flags); @@ -710,7 +710,7 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli BUG(); case ZONE_HIGHMEM: zone = pgdat->node_zones + ZONE_HIGHMEM; - if (zone->size) { + if (zone->present_pages) { #ifndef CONFIG_HIGHMEM BUG(); #endif @@ -718,11 +718,11 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli } case ZONE_NORMAL: zone = pgdat->node_zones + ZONE_NORMAL; - if (zone->size) + if (zone->present_pages) zonelist->zones[j++] = zone; case ZONE_DMA: zone = pgdat->node_zones + ZONE_DMA; - if (zone->size) + if (zone->present_pages) zonelist->zones[j++] = zone; } @@ -866,7 +866,8 @@ void __init free_area_init_core(pg_data_t *pgdat, realsize -= zholes_size[j]; printk(" %s zone: %lu pages\n", zone_names[j], realsize); - zone->size = size; + zone->spanned_pages = size; + zone->present_pages = realsize; zone->name = zone_names[j]; spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); @@ -1034,7 +1035,7 @@ static int frag_show(struct seq_file *m, void *arg) int order; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { - if (!zone->size) + if (!zone->present_pages) continue; spin_lock_irqsave(&zone->lock, flags); |
