summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-02-05 16:57:17 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-02-05 16:57:17 -0800
commitf93fcfa9e8a17bb8ef6a631ace1a14b02091e08f (patch)
tree7fdcdc86133f5c5919e949fe2d5af052e92d97e3 /include
parent08f16f8ff05d9de37ea187a3bde79806c64b82e2 (diff)
[PATCH] Fix futexes in huge pages
Using a futex in a large page causes a kernel lockup in __pin_page() - because __pin_page's page revalidation uses follow_page(), and follow_page() doesn't work for hugepages. The patch fixes up follow_page() to return the appropriate 4k page for hugepages. This incurs a vma lookup for each follow_page(), which is considerable overhead in some situations. We only _need_ to do this if the architecture cannot determin a page's hugeness from the contents of the PMD. So this patch is a "reference" implementation for, say, PPC BAT-based hugepages.
Diffstat (limited to 'include')
-rw-r--r--include/linux/hugetlb.h18
-rw-r--r--include/linux/sched.h4
2 files changed, 19 insertions, 3 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 325d91ba012a..6b2f606c08f7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -20,16 +20,28 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void huge_page_release(struct page *);
int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t);
-
+struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write);
+struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
+ unsigned long address);
extern int htlbpage_max;
+static inline void
+mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ if (is_vm_hugetlb_page(vma))
+ mm->used_hugetlb = 1;
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
+#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
+#define follow_huge_addr(mm, vma, addr, write) 0
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define zap_hugepage_range(vma, start, len) BUG()
@@ -37,6 +49,8 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define huge_page_release(page) BUG()
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
+#define hugepage_vma(mm, addr) 0
+#define mark_mm_hugetlb(mm, vma) do { } while (0)
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3a1367bacd1c..648d4d3ace3c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -201,7 +201,9 @@ struct mm_struct {
unsigned long swap_address;
unsigned dumpable:1;
-
+#ifdef CONFIG_HUGETLB_PAGE
+ int used_hugetlb;
+#endif
/* Architecture-specific MM context */
mm_context_t context;