diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-07-28 03:14:12 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-28 03:14:12 -0700 |
| commit | c1ab3459d0ce0820b4bcddfa55fde62bb88d13c1 (patch) | |
| tree | 262951d2239ed9182905bb3a6dff8d939c3189bc | |
| parent | cbb6e8ecaa8ec7e08ee2b0095236fabb87638742 (diff) | |
[PATCH] show_free_areas() cleanup
Cleanup to show_free_areas() from Bill Irwin:
show_free_areas() and show_free_areas_core() is a mess.
(1) it uses a bizarre and ugly form of list iteration to walk buddy lists
use standard list functions instead
(2) it prints the same information repeatedly once per-node
rationalize the braindamaged iteration logic
(3) show_free_areas_node() is useless and not called anywhere
remove it entirely
(4) show_free_areas() itself just calls show_free_areas_core()
remove show_free_areas_core() and do the stuff directly
(5) SWAP_CACHE_INFO is always #defined, remove it
(6) INC_CACHE_INFO() doesn't use the do { } while (0) construct
This patch also includes Matthew Dobson's patch which removes
mm/numa.c:node_lock. The consensus is that it doesn't do anything now
that show_free_areas_node() isn't there.
| -rw-r--r-- | include/linux/mm.h | 1 | ||||
| -rw-r--r-- | include/linux/swap.h | 3 | ||||
| -rw-r--r-- | mm/numa.c | 16 | ||||
| -rw-r--r-- | mm/page_alloc.c | 73 | ||||
| -rw-r--r-- | mm/swap_state.c | 6 |
5 files changed, 33 insertions, 66 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 685868b9e0e4..891b4b5e8e29 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -327,7 +327,6 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num) extern struct page *mem_map; extern void show_free_areas(void); -extern void show_free_areas_node(pg_data_t *pgdat); extern int fail_writepage(struct page *); struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused); diff --git a/include/linux/swap.h b/include/linux/swap.h index 8ba0854d69af..c355567500e0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -176,10 +176,7 @@ int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page); /* linux/mm/page_alloc.c */ /* linux/mm/swap_state.c */ -#define SWAP_CACHE_INFO -#ifdef SWAP_CACHE_INFO extern void show_swap_cache_info(void); -#endif extern int add_to_swap_cache(struct page *, swp_entry_t); extern int add_to_swap(struct page *); extern void __delete_from_swap_cache(struct page *page); diff --git a/mm/numa.c b/mm/numa.c index 773588e89c38..fc6e4445fb86 100644 --- a/mm/numa.c +++ b/mm/numa.c @@ -44,17 +44,6 @@ struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int orde #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) -static spinlock_t node_lock = SPIN_LOCK_UNLOCKED; - -void show_free_areas_node(pg_data_t *pgdat) -{ - unsigned long flags; - - spin_lock_irqsave(&node_lock, flags); - show_free_areas_core(pgdat); - spin_unlock_irqrestore(&node_lock, flags); -} - /* * Nodes can be initialized parallely, in no particular order. */ @@ -106,11 +95,10 @@ struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order) #ifdef CONFIG_NUMA temp = NODE_DATA(numa_node_id()); #else - spin_lock_irqsave(&node_lock, flags); - if (!next) next = pgdat_list; + if (!next) + next = pgdat_list; temp = next; next = next->node_next; - spin_unlock_irqrestore(&node_lock, flags); #endif start = temp; while (temp) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e352a9eb127..5c857db1a60e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -601,12 +601,11 @@ void si_meminfo(struct sysinfo *val) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list. */ -void show_free_areas_core(pg_data_t *pgdat) +void show_free_areas(void) { - unsigned int order; - unsigned type; - pg_data_t *tmpdat = pgdat; + pg_data_t *pgdat; struct page_state ps; + int type; get_page_state(&ps); @@ -614,20 +613,20 @@ void show_free_areas_core(pg_data_t *pgdat) K(nr_free_pages()), K(nr_free_highpages())); - while (tmpdat) { - zone_t *zone; - for (zone = tmpdat->node_zones; - zone < tmpdat->node_zones + MAX_NR_ZONES; zone++) - printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB " - "high:%6lukB\n", - zone->name, - K(zone->free_pages), - K(zone->pages_min), - K(zone->pages_low), - K(zone->pages_high)); - - tmpdat = tmpdat->node_next; - } + for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) + for (type = 0; type < MAX_NR_ZONES; ++type) { + zone_t *zone = &pgdat->node_zones[type]; + printk("Zone:%s " + "freepages:%6lukB " + "min:%6lukB " + "low:%6lukB " + "high:%6lukB\n", + zone->name, + K(zone->free_pages), + K(zone->pages_min), + K(zone->pages_low), + K(zone->pages_high)); + } printk("( Active:%lu inactive:%lu dirty:%lu writeback:%lu free:%u )\n", ps.nr_active, @@ -636,40 +635,28 @@ void show_free_areas_core(pg_data_t *pgdat) ps.nr_writeback, nr_free_pages()); - for (type = 0; type < MAX_NR_ZONES; type++) { - struct list_head *head, *curr; - zone_t *zone = pgdat->node_zones + type; - unsigned long nr, total, flags; + for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) + for (type = 0; type < MAX_NR_ZONES; type++) { + list_t *elem; + zone_t *zone = &pgdat->node_zones[type]; + unsigned long nr, flags, order, total = 0; + + if (!zone->size) + continue; - total = 0; - if (zone->size) { spin_lock_irqsave(&zone->lock, flags); - for (order = 0; order < MAX_ORDER; order++) { - head = &(zone->free_area + order)->free_list; - curr = head; + for (order = 0; order < MAX_ORDER; order++) { nr = 0; - for (;;) { - curr = curr->next; - if (curr == head) - break; - nr++; - } - total += nr * (1 << order); + list_for_each(elem, &zone->free_area[order].free_list) + ++nr; + total += nr << order; printk("%lu*%lukB ", nr, K(1UL) << order); } spin_unlock_irqrestore(&zone->lock, flags); + printk("= %lukB)\n", K(total)); } - printk("= %lukB)\n", K(total)); - } -#ifdef SWAP_CACHE_INFO show_swap_cache_info(); -#endif -} - -void show_free_areas(void) -{ - show_free_areas_core(pgdat_list); } /* diff --git a/mm/swap_state.c b/mm/swap_state.c index 2124864f4cea..5fa036b0503c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -42,8 +42,7 @@ struct address_space swapper_space = { private_list: LIST_HEAD_INIT(swapper_space.private_list), }; -#ifdef SWAP_CACHE_INFO -#define INC_CACHE_INFO(x) (swap_cache_info.x++) +#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) static struct { unsigned long add_total; @@ -61,9 +60,6 @@ void show_swap_cache_info(void) swap_cache_info.find_success, swap_cache_info.find_total, swap_cache_info.noent_race, swap_cache_info.exist_race); } -#else -#define INC_CACHE_INFO(x) do { } while (0) -#endif int add_to_swap_cache(struct page *page, swp_entry_t entry) { |
