summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-09-07 22:21:55 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-09-07 22:21:55 -0700
commit5f607d6ecf03d5fc3512d2c0a2fc3d447ccd4174 (patch)
tree5104f861c8d59d9fe31181ca4642ca4b10d8b6f3
parent9fdbd959bfec877913d4eab793e813cc7d447b3f (diff)
[PATCH] refill the inactive list more quickly
Fix a problem noticed by Ed Tomlinson: under shifting workloads the shrink_zone() logic will refill the inactive load too slowly. Bale out of the zone scan when we've reclaimed enough pages. Fixes a rarely-occurring problem wherein refill_inactive_zone() ends up shuffling 100,000 pages and generally goes silly. This needs to be revisited - we should go on and rebalance the lower zones even if we reclaimed enough pages from highmem.
-rw-r--r--mm/vmscan.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a00fe27159d9..5ed1ec3eda52 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -483,7 +483,7 @@ shrink_zone(struct zone *zone, int priority,
ratio = (unsigned long)nr_pages * zone->nr_active /
((zone->nr_inactive | 1) * 2);
atomic_add(ratio+1, &zone->refill_counter);
- if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
+ while (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
atomic_sub(SWAP_CLUSTER_MAX, &zone->refill_counter);
refill_inactive_zone(zone, SWAP_CLUSTER_MAX);
}
@@ -517,7 +517,7 @@ shrink_caches(struct zone *classzone, int priority,
first_classzone = classzone->zone_pgdat->node_zones;
zone = classzone;
- while (zone >= first_classzone) {
+ while (zone >= first_classzone && nr_pages > 0) {
if (zone->free_pages <= zone->pages_high) {
nr_pages = shrink_zone(zone, priority,
gfp_mask, nr_pages);