summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-02-05 16:57:23 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-02-05 16:57:23 -0800
commit1f1921fc15dc2408ab3900d036cffcf0d732801f (patch)
tree8e17e9927b68558eab699161701642838b51cc43 /include
parentf93fcfa9e8a17bb8ef6a631ace1a14b02091e08f (diff)
[PATCH] Optimise follow_page() for page-table-based hugepages
ia32 and others can determine a page's hugeness by inspecting the pmd's value directly. No need to perform a VMA lookup against the user's virtual address. This patch ifdef's away the VMA-based implementation of hugepage-aware-follow_page for ia32 and replaces it with a pmd-based implementation. The intent is that architectures will implement one or the other. So the architecture either: 1: Implements hugepage_vma()/follow_huge_addr(), and stubs out pmd_huge()/follow_huge_pmd() or 2: Implements pmd_huge()/follow_huge_pmd(), and stubs out hugepage_vma()/follow_huge_addr()
Diffstat (limited to 'include')
-rw-r--r--include/linux/hugetlb.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b2f606c08f7..b51d51d05190 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -24,6 +24,10 @@ struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write);
struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
unsigned long address);
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write);
+int pmd_huge(pmd_t pmd);
+
extern int htlbpage_max;
static inline void
@@ -51,6 +55,8 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define hugetlb_report_meminfo(buf) 0
#define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
+#define follow_huge_pmd(mm, addr, pmd, write) 0
+#define pmd_huge(x) 0
#endif /* !CONFIG_HUGETLB_PAGE */