diff options
| author | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 20:18:59 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 20:18:59 -0800 |
| commit | dfc52b82fee5bc6713ecce3f81767a8565c4f874 (patch) | |
| tree | d48688e5ffd92742e5556791c1c8f086317203d5 /mm/page_alloc.c | |
| parent | a880f45a48be2956d2c78a839c472287d54435c1 (diff) | |
v2.4.9.11 -> v2.4.9.12
- Alan Cox: much more merging
- Pete Zaitcev: ymfpci race fixes
- Andrea Arkangeli: VM race fix and OOM tweak.
- Arjan Van de Ven: merge RH kernel fixes
- Andi Kleen: use more readable 'likely()/unlikely()' instead of __builtin_expect()
- Keith Owens: fix 64-bit ELF types
- Gerd Knorr: mark more broken PCI bridges, update btaudio driver
- Paul Mackerras: powermac driver update
- me: clean up PTRACE_DETACH to use common infrastructure
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f1f3125630a..779c4af4f8e8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -17,6 +17,7 @@ #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/slab.h> +#include <linux/compiler.h> int nr_swap_pages; int nr_active_pages; @@ -253,7 +254,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask local_pages = ¤t->local_pages; - if (__freed) { + if (likely(__freed)) { /* pick from the last inserted so we're lifo */ entry = local_pages->next; do { @@ -372,19 +373,21 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ return page; zone = zonelist->zones; - if (__builtin_expect(freed, 1)) { + if (likely(freed)) { for (;;) { zone_t *z = *(zone++); if (!z) break; - if (zone_free_pages(z, order) > (gfp_mask & __GFP_HIGH ? z->pages_min / 2 : z->pages_min)) { - page = rmqueue(z, order); - if (page) - return page; - } + page = rmqueue(z, order); + if (page) + return page; } } else { + /* + * Check that no other task is been killed meanwhile, + * in such a case we can succeed the allocation. + */ for (;;) { zone_t *z = *(zone++); if (!z) @@ -683,6 +686,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, zone->lock = SPIN_LOCK_UNLOCKED; zone->zone_pgdat = pgdat; zone->free_pages = 0; + zone->need_balance = 0; if (!size) continue; |
