diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-03-06 08:48:52 -0800 |
|---|---|---|
| committer | Jaroslav Kysela <perex@suse.cz> | 2004-03-06 08:48:52 -0800 |
| commit | 5b9c017de3fc96695bb606c191acce716f6d08fb (patch) | |
| tree | 763abbf07c9216dbf5d2c8bb2eb2958979b137c3 /include | |
| parent | 99e7e863ffe3d5d3d24b09096b424cb72cc17974 (diff) | |
[PATCH] ppc64: Convert mm_context_t to a struct
From: David Gibson <david@gibson.dropbear.id.au>
Converts the mm_context_t on ppc64 to be a struct. This lets us separate
the low_hpages flag into a separate field rather than folding it into the
actual context id. That makes things neater, since the flag is
conceptually separate and has, for example, should be propogate across a
fork whereas the context ID obviously isn't. The mm_context_id is the only
place to put arch-specific information in the mm_struct.
This patch will also make some interesting extensions to the hugepage
support much easier, such as allowing dynamic resizing of the hugepage
address space, or using special pagetables for hugepages.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-ppc64/mmu.h | 18 | ||||
| -rw-r--r-- | include/asm-ppc64/mmu_context.h | 11 | ||||
| -rw-r--r-- | include/asm-ppc64/page.h | 5 |
3 files changed, 20 insertions, 14 deletions
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 3ecacc7de85f..a68e47f717e7 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -18,15 +18,25 @@ #ifndef __ASSEMBLY__ -/* Default "unsigned long" context */ -typedef unsigned long mm_context_t; +/* Time to allow for more things here */ +typedef unsigned long mm_context_id_t; +typedef struct { + mm_context_id_t id; +#ifdef CONFIG_HUGETLB_PAGE + int low_hpages; +#endif +} mm_context_t; #ifdef CONFIG_HUGETLB_PAGE -#define CONTEXT_LOW_HPAGES (1UL<<63) +#define KERNEL_LOW_HPAGES .low_hpages = 0, #else -#define CONTEXT_LOW_HPAGES 0 +#define KERNEL_LOW_HPAGES #endif +#define KERNEL_CONTEXT(ea) ({ \ + mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \ + ctx; }) + /* * Hardware Segment Lookaside Buffer Entry * This structure has been padded out to two 64b doublewords (actual SLBE's are diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index 57076c656f07..8be5b8d0a61b 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h @@ -52,7 +52,7 @@ struct mmu_context_queue_t { long head; long tail; long size; - mm_context_t elements[LAST_USER_CONTEXT]; + mm_context_id_t elements[LAST_USER_CONTEXT]; }; extern struct mmu_context_queue_t mmu_context_queue; @@ -83,7 +83,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) long head; unsigned long flags; /* This does the right thing across a fork (I hope) */ - unsigned long low_hpages = mm->context & CONTEXT_LOW_HPAGES; spin_lock_irqsave(&mmu_context_queue.lock, flags); @@ -93,8 +92,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) } head = mmu_context_queue.head; - mm->context = mmu_context_queue.elements[head]; - mm->context |= low_hpages; + mm->context.id = mmu_context_queue.elements[head]; head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0; mmu_context_queue.head = head; @@ -132,8 +130,7 @@ destroy_context(struct mm_struct *mm) #endif mmu_context_queue.size++; - mmu_context_queue.elements[index] = - mm->context & ~CONTEXT_LOW_HPAGES; + mmu_context_queue.elements[index] = mm->context.id; spin_unlock_irqrestore(&mmu_context_queue.lock, flags); } @@ -212,8 +209,6 @@ get_vsid( unsigned long context, unsigned long ea ) { unsigned long ordinal, vsid; - context &= ~CONTEXT_LOW_HPAGES; - ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context; vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK; diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h index 66b24246f50b..dd2c424fbef9 100644 --- a/include/asm-ppc64/page.h +++ b/include/asm-ppc64/page.h @@ -32,6 +32,7 @@ /* For 64-bit processes the hugepage range is 1T-1.5T */ #define TASK_HPAGE_BASE (0x0000010000000000UL) #define TASK_HPAGE_END (0x0000018000000000UL) + /* For 32-bit processes the hugepage range is 2-3G */ #define TASK_HPAGE_BASE_32 (0x80000000UL) #define TASK_HPAGE_END_32 (0xc0000000UL) @@ -39,7 +40,7 @@ #define ARCH_HAS_HUGEPAGE_ONLY_RANGE #define is_hugepage_only_range(addr, len) \ ( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \ - ((current->mm->context & CONTEXT_LOW_HPAGES) && \ + (current->mm->context.low_hpages && \ (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) ) #define hugetlb_free_pgtables free_pgtables #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA @@ -47,7 +48,7 @@ #define in_hugepage_area(context, addr) \ ((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \ ((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ - (((context) & CONTEXT_LOW_HPAGES) && \ + ((context).low_hpages && \ (((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32))))) #else /* !CONFIG_HUGETLB_PAGE */ |
