summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/io.h2
-rw-r--r--include/asm-alpha/machvec.h6
-rw-r--r--include/asm-alpha/mmzone.h135
-rw-r--r--include/asm-alpha/numnodes.h8
-rw-r--r--include/asm-alpha/pgalloc.h3
-rw-r--r--include/asm-alpha/pgtable.h32
-rw-r--r--include/asm-alpha/topology.h47
7 files changed, 131 insertions, 102 deletions
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 0e181d0d73ff..4eb067ecc60b 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -83,7 +83,7 @@ static inline void * phys_to_virt(unsigned long address)
}
#endif
-#define page_to_phys(page) PAGE_TO_PA(page)
+#define page_to_phys(page) page_to_pa(page)
/* This depends on working iommu. */
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 5becb8ef34eb..d77895f2fc9f 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -92,6 +92,12 @@ struct alpha_machine_vector
const char *vector_name;
+ /* NUMA information */
+ int (*pa_to_nid)(unsigned long);
+ int (*cpuid_to_nid)(int);
+ unsigned long (*node_mem_start)(int);
+ unsigned long (*node_mem_size)(int);
+
/* System specific parameters. */
union {
struct {
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
index 999e85fff3ad..d45af2ff9ae4 100644
--- a/include/asm-alpha/mmzone.h
+++ b/include/asm-alpha/mmzone.h
@@ -6,25 +6,7 @@
#define _ASM_MMZONE_H_
#include <linux/config.h>
-#ifdef CONFIG_NUMA_SCHED
-#include <linux/numa_sched.h>
-#endif
-#ifdef NOTYET
-#include <asm/sn/types.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/klkernvars.h>
-#endif /* NOTYET */
-
-typedef struct plat_pglist_data {
- pg_data_t gendata;
-#ifdef NOTYET
- kern_vars_t kern_vars;
-#endif
-#if defined(CONFIG_NUMA) && defined(CONFIG_NUMA_SCHED)
- struct numa_schedule_data schedule_data;
-#endif
-} plat_pg_data_t;
+#include <asm/smp.h>
struct bootmem_data_t; /* stupid forward decl. */
@@ -32,19 +14,26 @@ struct bootmem_data_t; /* stupid forward decl. */
* Following are macros that are specific to this numa platform.
*/
-extern plat_pg_data_t *plat_node_data[];
+extern pg_data_t node_data[];
-#ifdef CONFIG_ALPHA_WILDFIRE
-# define ALPHA_PA_TO_NID(pa) ((pa) >> 36) /* 16 nodes max due 43bit kseg */
-# define NODE_MAX_MEM_SIZE (64L * 1024L * 1024L * 1024L) /* 64 GB */
-#else
-# define ALPHA_PA_TO_NID(pa) (0)
-# define NODE_MAX_MEM_SIZE (~0UL)
-#endif
+#define alpha_pa_to_nid(pa) \
+ (alpha_mv.pa_to_nid \
+ ? alpha_mv.pa_to_nid(pa) \
+ : (0))
+#define node_mem_start(nid) \
+ (alpha_mv.node_mem_start \
+ ? alpha_mv.node_mem_start(nid) \
+ : (0UL))
+#define node_mem_size(nid) \
+ (alpha_mv.node_mem_size \
+ ? alpha_mv.node_mem_size(nid) \
+ : ((nid) ? (0UL) : (~0UL)))
+
+#define pa_to_nid(pa) alpha_pa_to_nid(pa)
+#define NODE_DATA(nid) (&node_data[(nid)])
+#define node_size(nid) (NODE_DATA(nid)->node_size)
-#define PHYSADDR_TO_NID(pa) ALPHA_PA_TO_NID(pa)
-#define PLAT_NODE_DATA(n) (plat_node_data[(n)])
-#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
+#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
#if 1
#define PLAT_NODE_DATA_LOCALNR(p, n) \
@@ -68,46 +57,76 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
/*
* Given a kernel address, find the home node of the underlying memory.
*/
-#define KVADDR_TO_NID(kaddr) PHYSADDR_TO_NID(__pa(kaddr))
+#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
+#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
+#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-/*
- * Return a pointer to the node data for node n.
- */
-#define NODE_DATA(n) (&((PLAT_NODE_DATA(n))->gendata))
-
-/*
- * NODE_MEM_MAP gives the kaddr for the mem_map of the node.
- */
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-
-/*
- * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
- * and returns the mem_map of that node.
- */
-#define ADDR_TO_MAPBASE(kaddr) \
- NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+#define local_mapnr(kvaddr) \
+ ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
/*
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
* and returns the kaddr corresponding to first physical page in the
* node's mem_map.
*/
-#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_pfn << PAGE_SHIFT))
+#define LOCAL_BASE_ADDR(kaddr) \
+ ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
+ << PAGE_SHIFT))
-#define LOCAL_MAP_NR(kvaddr) \
- (((unsigned long)(kvaddr)-LOCAL_BASE_ADDR(kvaddr)) >> PAGE_SHIFT)
+#define kern_addr_valid(kaddr) \
+ test_bit(local_mapnr(kaddr), \
+ NODE_DATA(kvaddr_to_nid(kaddr))->valid_addr_bitmap)
-#define kern_addr_valid(kaddr) test_bit(LOCAL_MAP_NR(kaddr), \
- NODE_DATA(KVADDR_TO_NID(kaddr))->valid_addr_bitmap)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_to_page(kaddr) (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
-#ifdef CONFIG_NUMA
-#ifdef CONFIG_NUMA_SCHED
-#define NODE_SCHEDULE_DATA(nid) (&((PLAT_NODE_DATA(nid))->schedule_data))
-#endif
-#endif /* CONFIG_NUMA */
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
+#define pte_pfn(pte) (pte_val(pte) >> 32)
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ unsigned long pfn; \
+ \
+ pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
+ pfn += page_zone(page)->zone_start_pfn << 32; \
+ pte_val(pte) = pfn | pgprot_val(pgprot); \
+ \
+ pte; \
+})
+
+#define pte_page(x) \
+({ \
+ unsigned long kvirt; \
+ struct page * __xx; \
+ \
+ kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
+ __xx = virt_to_page(kvirt); \
+ \
+ __xx; \
+})
+
+#define pfn_to_page(pfn) \
+({ \
+ unsigned long kaddr = (unsigned long)__va(pfn << PAGE_SHIFT); \
+ (node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr)); \
+})
+
+#define page_to_pfn(page) \
+ ((page) - page_zone(page)->zone_mem_map + \
+ (page_zone(page)->zone_start_pfn))
+
+#define page_to_pa(page) \
+ ((( (page) - page_zone(page)->zone_mem_map ) \
+ + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
+
+#define pfn_to_nid(pfn) pa_to_nid(((u64)pfn << PAGE_SHIFT))
+#define pfn_valid(pfn) \
+ (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
+ node_size(pfn_to_nid(pfn))) \
+
+#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/include/asm-alpha/numnodes.h b/include/asm-alpha/numnodes.h
index 4ff6b3ecfbed..3c370ca3aaa6 100644
--- a/include/asm-alpha/numnodes.h
+++ b/include/asm-alpha/numnodes.h
@@ -1,12 +1,6 @@
#ifndef _ASM_MAX_NUMNODES_H
#define _ASM_MAX_NUMNODES_H
-/*
- * Currently the Wildfire is the only discontigmem/NUMA capable Alpha core.
- */
-#if defined(CONFIG_ALPHA_WILDFIRE) || defined(CONFIG_ALPHA_GENERIC)
-# include <asm/core_wildfire.h>
-# define MAX_NUMNODES WILDFIRE_MAX_QBB
-#endif
+#define MAX_NUMNODES 128 /* Marvel */
#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index 6feaa69c0893..fc675efac381 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/mm.h>
+#include <linux/mmzone.h>
/*
* Allocate and free page tables. The xxx_kernel() versions are
@@ -13,7 +14,7 @@
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
- pmd_set(pmd, (pte_t *)(((pte - mem_map) << PAGE_SHIFT) + PAGE_OFFSET));
+ pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
static inline void
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index ab896db01d1f..3defe6e5c82f 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -192,14 +192,8 @@ extern unsigned long __zero_page(void);
* and a page entry and page directory to the page they refer to.
*/
#ifndef CONFIG_DISCONTIGMEM
-#define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)
-#else
-#define PAGE_TO_PA(page) \
- ((( (page) - (page)->zone->zone_mem_map ) \
- + (page)->zone->zone_start_pfn) << PAGE_SHIFT)
-#endif
+#define page_to_pa(page) ((page - mem_map) << PAGE_SHIFT)
-#ifndef CONFIG_DISCONTIGMEM
#define pte_pfn(pte) (pte_val(pte) >> 32)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
@@ -209,28 +203,6 @@ extern unsigned long __zero_page(void);
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
pte; \
})
-#else
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- unsigned long pfn; \
- \
- pfn = ((unsigned long)((page)-(page)->zone->zone_mem_map)) << 32; \
- pfn += (page)->zone->zone_start_pfn << 32); \
- pte_val(pte) = pfn | pgprot_val(pgprot); \
- \
- pte; \
-})
-#define pte_page(x) \
-({ \
- unsigned long kvirt; \
- struct page * __xx; \
- \
- kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
- __xx = virt_to_page(kvirt); \
- \
- __xx; \
-})
#endif
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
@@ -252,7 +224,9 @@ pmd_page_kernel(pmd_t pmd)
return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
}
+#ifndef CONFIG_DISCONTIGMEM
#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
+#endif
extern inline unsigned long pgd_page(pgd_t pgd)
{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index e80327049f70..074daa9353ae 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -1,15 +1,50 @@
#ifndef _ASM_ALPHA_TOPOLOGY_H
#define _ASM_ALPHA_TOPOLOGY_H
-#if defined(CONFIG_NUMA) && defined(CONFIG_ALPHA_WILDFIRE)
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <asm/machvec.h>
-/* With wildfire assume 4 CPUs per node */
-#define __cpu_to_node(cpu) ((cpu) >> 2)
+#ifdef CONFIG_NUMA
+static inline int __cpu_to_node(int cpu)
+{
+ int node;
+
+ if (!alpha_mv.cpuid_to_nid)
+ return 0;
-#else /* !CONFIG_NUMA || !CONFIG_ALPHA_WILDFIRE */
+ node = alpha_mv.cpuid_to_nid(cpu);
-#include <asm-generic/topology.h>
+#ifdef DEBUG_NUMA
+ if (node < 0)
+ BUG();
+#endif
-#endif /* CONFIG_NUMA && CONFIG_ALPHA_WILDFIRE */
+ return node;
+}
+
+static inline int __node_to_cpu_mask(int node)
+{
+ unsigned long node_cpu_mask = 0;
+ int cpu;
+
+ for(cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (cpu_online(cpu) && (__cpu_to_node(cpu) == node))
+ node_cpu_mask |= 1UL << cpu;
+ }
+
+#if DEBUG_NUMA
+ printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
+#endif
+
+ return node_cpu_mask;
+}
+
+# define __node_to_memblk(node) (node)
+# define __memblk_to_node(memblk) (memblk)
+
+#else /* CONFIG_NUMA */
+# include <asm-generic/topology.h>
+#endif /* !CONFIG_NUMA */
#endif /* _ASM_ALPHA_TOPOLOGY_H */