summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-09-19 08:36:22 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-09-19 08:36:22 -0700
commitccc98a67de98c912840e0a35a24115ad64ae426d (patch)
tree58c156d45127ae55fcfdc71c43f20538af37ac55 /include/linux
parente07316f9c849b0fe92eb273e7ada4652053d32d1 (diff)
[PATCH] _alloc_pages cleanup
Patch from Martin Bligh. It should only affect machines using discontigmem. "This patch is was originally from Andrea's tree (from SGI??), and has been tweaked since by both Christoph (who cleaned up all the code), and myself (who just hit it until it worked). It removes _alloc_pages, and adds all nodes to the zonelists directly, which also changes the fallback zone order to something more sensible ... instead of: "foreach (node) { foreach (zone) }" we now do something more like "foreach (zone_type) { foreach (node) }" Christoph has a more recent version that's fancier and does a couple more cleanups, but it seems to have a bug in it that I can't track down easily, so I propose we do the simple thing for now, and take the rest of the cleanups when it works ... it seems to build nicely on top of this seperately to me. Tested on 16-way NUMA-Q with discontigmem + NUMA support."
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h19
-rw-r--r--include/linux/mmzone.h14
2 files changed, 22 insertions, 11 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 10021357c093..437572e2240b 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -39,18 +39,25 @@
* can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s).
*/
-extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order));
extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, struct zonelist *zonelist));
extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
+/*
+ * We get the zone list from the current node and the gfp_mask.
+ * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ *
+ * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
+ * optimized to &contig_page_data at compile-time.
+ */
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
- /*
- * Gets optimized away by the compiler.
- */
- if (order >= MAX_ORDER)
+ pg_data_t *pgdat = NODE_DATA(numa_node_id());
+ unsigned int idx = (gfp_mask & GFP_ZONEMASK);
+
+ if (unlikely(order >= MAX_ORDER))
return NULL;
- return _alloc_pages(gfp_mask, order);
+
+ return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + idx);
}
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5cdd464992da..580c39c4dcc1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -10,11 +10,14 @@
#include <linux/wait.h>
#include <linux/cache.h>
#include <asm/atomic.h>
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/numnodes.h>
+#endif
+#ifndef MAX_NUMNODES
+#define MAX_NUMNODES 1
+#endif
-/*
- * Free memory management - zoned buddy allocator.
- */
-
+/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
@@ -137,7 +140,7 @@ struct zone {
* footprint of this construct is very small.
*/
struct zonelist {
- struct zone *zones[MAX_NR_ZONES+1]; // NULL delimited
+ struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
};
#define GFP_ZONEMASK 0x0f
@@ -190,6 +193,7 @@ extern void calculate_totalpages (pg_data_t *pgdat, unsigned long *zones_size,
extern void free_area_init_core(pg_data_t *pgdat, unsigned long *zones_size,
unsigned long *zholes_size);
void get_zone_counts(unsigned long *active, unsigned long *inactive);
+extern void build_all_zonelists(void);
extern pg_data_t contig_page_data;