summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-03-31 21:50:36 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-03-31 21:50:36 -0800
commite9acfc13953a90f474b88e1c0bd4cb7071a65e35 (patch)
treedfaef0281446f810cc29f0f5bcdd1ad380bcfa07
parentccfcbaed2b9e84641912bbdd6f52363bc0f3db7a (diff)
[PATCH] ppc64: allow MAP_FIXED hugepage mappings
From: David Gibson <david@gibson.dropbear.id.au> On PowerPC64 the "low" hugepage range (at 2-3G for use by 32-bit processes) needs to be activated before it can be used. hugetlb_get_unmapped_area() automatically activates the range for hugepage mappings in 32-bit processes which are not MAP_FIXED. However for MAP_FIXED mmap()s, even at a suitable address will fail if the region is not already activated, because there is no suitable callback from the generic MAP_FIXED code path into the arch code. This patch corrects this problem and allows PPC64 to do MAP_FIXED hugepage mappings in the low hugepage range.
-rw-r--r--arch/ppc64/mm/hugetlbpage.c10
-rw-r--r--include/asm-ppc64/page.h13
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--mm/mmap.c5
4 files changed, 31 insertions, 5 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 1f620734b304..3a50af4962dd 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -295,6 +295,16 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
return 0;
}
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (is_hugepage_high_range(addr, len))
+ return 0;
+ else if (is_hugepage_low_range(addr, len))
+ return open_32bit_htlbpage_range(current->mm);
+
+ return -EINVAL;
+}
+
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index 6e32a197dba4..fd707bb57da5 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -38,10 +38,17 @@
#define TASK_HPAGE_END_32 (0xc0000000UL)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+
+#define is_hugepage_low_range(addr, len) \
+ (((addr) > (TASK_HPAGE_BASE_32-(len))) && ((addr) < TASK_HPAGE_END_32))
+#define is_hugepage_high_range(addr, len) \
+ (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
+
#define is_hugepage_only_range(addr, len) \
- ( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
- (current->mm->context.low_hpages && \
- (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
+ (is_hugepage_high_range((addr), (len)) || \
+ (current->mm->context.low_hpages \
+ && is_hugepage_low_range((addr), (len))))
#define hugetlb_free_pgtables free_pgtables
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a88f16ccdc5a..5075a90bcd65 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -42,6 +42,13 @@ mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
#endif
+#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+#define prepare_hugepage_range(addr, len) \
+ is_aligned_hugepage_range(addr, len)
+#else
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
+#endif
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -62,6 +69,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
+#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(addr, len) 0
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
diff --git a/mm/mmap.c b/mm/mmap.c
index 416817625cd3..31520c769854 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -807,9 +807,10 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return -EINVAL;
if (file && is_file_hugepages(file)) {
/*
- * Make sure that addr and length are properly aligned.
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
*/
- ret = is_aligned_hugepage_range(addr, len);
+ ret = prepare_hugepage_range(addr, len);
} else {
/*
* Ensure that a normal request is not falling in a