diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-04-18 22:06:30 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-04-18 22:06:30 -0700 |
| commit | 49c18faa72569c6b2dfdfa6a833aa2414e5e3b86 (patch) | |
| tree | b6be2600a6438e5fa36f8c33be5c2a6c425cf362 /include | |
| parent | 3d9d1320db840f032f47b3ad1cd31dde18675057 (diff) | |
[PATCH] From: David Gibson <david@gibson.dropbear.id.au>
hugepage_vma() is both misleadingly named and unnecessary. On most archs it
always returns NULL, and on IA64 the vma it returns is never used. The
function's real purpose is to determine whether the address it is passed is a
special hugepage address which must be looked up in hugepage pagetables,
rather than being looked up in the normal pagetables (which might have
specially marked hugepage PMDs or PTEs).
This patch kills off hugepage_vma() and folds the logic it really needs into
follow_huge_addr(). That now returns a (page *) if called on a special
hugepage address, and an error encoded with ERR_PTR otherwise. This also
requires tweaking the IA64 code to check that the hugepage PTE is present in
follow_huge_addr() - previously this was guaranteed, since it was only called
if the address was in an existing hugepage VMA, and hugepages are always
prefaulted.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/hugetlb.h | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b0e98cfe15f9..2a8fa4fd9366 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -20,10 +20,8 @@ void huge_page_release(struct page *); int hugetlb_report_meminfo(char *); int is_hugepage_mem_enough(size_t); unsigned long hugetlb_total_pages(void); -struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write); -struct vm_area_struct *hugepage_vma(struct mm_struct *mm, - unsigned long address); +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); int is_aligned_hugepage_range(unsigned long addr, unsigned long len); @@ -65,7 +63,7 @@ static inline unsigned long hugetlb_total_pages(void) } #define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; }) -#define follow_huge_addr(mm, vma, addr, write) 0 +#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) #define zap_hugepage_range(vma, start, len) BUG() @@ -73,7 +71,6 @@ static inline unsigned long hugetlb_total_pages(void) #define huge_page_release(page) BUG() #define is_hugepage_mem_enough(size) 0 #define hugetlb_report_meminfo(buf) 0 -#define hugepage_vma(mm, addr) 0 #define mark_mm_hugetlb(mm, vma) do { } while (0) #define follow_huge_pmd(mm, addr, pmd, write) 0 #define is_aligned_hugepage_range(addr, len) 0 |
