diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-05-10 00:08:49 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-10 00:08:49 -0700 |
| commit | 9008d35b80e72edc52a2f3d64cefe4e2ee858329 (patch) | |
| tree | b0916207b9f08fbf39c45e9de18d5c96c69bebbb | |
| parent | d755344394bdf8304e563f46479687c37aec1716 (diff) | |
[PATCH] hugepage: fix add_to_page_cache() error handling
From: David Gibson <david@gibson.dropbear.id.au>
add_to_page_cache() locks the given page if and only if it suceeds. The
hugepage code (every arch), however, does an unlock_page() after
add_to_page_cache() before checking the return code, which could trip the
BUG() in unlock_page() if add_to_page_cache() failed.
In practice we've never hit this bug, because the only ways
add_to_page_cache() can fail are when we fail to allocate a radix tree node
(very rare), or when there is already a page at that offset in the radix
tree, which never happens during prefault, obviously. We should probably
fix it anyway, though.
The analagous bug in some of the patches floating about to
demand-allocation of hugepages is more of a problem, because multiple
processes can race to instantiate a particular page in the radix tree -
that's been hit at least once (which is how I found this).
| -rw-r--r-- | arch/i386/mm/hugetlbpage.c | 5 | ||||
| -rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 5 | ||||
| -rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 5 | ||||
| -rw-r--r-- | arch/sh/mm/hugetlbpage.c | 5 | ||||
| -rw-r--r-- | arch/sparc64/mm/hugetlbpage.c | 5 |
5 files changed, 15 insertions, 10 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 6e2fa1f3930f..0397fe963418 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -264,8 +264,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 114e3d96c6cc..56e409ec6800 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -293,8 +293,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 3ac622c60054..393feec3af32 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -452,8 +452,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 5309f67f6519..4d5d687d1106 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -248,8 +248,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 1df587bdd31c..6da2759c2517 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -245,8 +245,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; |
