summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:07:26 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:07:26 -0700
commit16ceff2d5dc9f0347ab5a08abff3f4647c2fee04 (patch)
tree91bea65a789e1c8c766831d0925fc10c51ca922f /arch
parentb124bc14b39502c8b46e3af5b12f821670e82298 (diff)
[PATCH] rmap 22 flush_dcache_mmap_lock
From: Hugh Dickins <hugh@veritas.com> arm and parisc __flush_dcache_page have been scanning the i_mmap(_shared) list without locking or disabling preemption. That may be even more unsafe now it's a prio tree instead of a list. It looks like we cannot use i_shared_lock for this protection: most uses of flush_dcache_page are okay, and only one would need lock ordering fixed (get_user_pages holds page_table_lock across flush_dcache_page); but there's a few (e.g. in net and ntfs) which look as if they're using it in I/O completion - and it would be restrictive to disallow it there. So, on arm and parisc only, define flush_dcache_mmap_lock(mapping) as spin_lock_irq(&(mapping)->tree_lock); on i386 (and other arches left to the next patch) define it away to nothing; and use where needed. While updating locking hierarchy in filemap.c, remove two layers of the fossil record from add_to_page_cache comment: no longer used for swap. I believe all the #includes will work out, but have only built i386. I can see several things about this patch which might cause revulsion: the name flush_dcache_mmap_lock? the reuse of the page radix_tree's tree_lock for this different purpose? spin_lock_irqsave instead? can't we somehow get i_shared_lock to handle the problem?
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/fault-armv.c5
-rw-r--r--arch/parisc/kernel/cache.c4
2 files changed, 8 insertions, 1 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 5ff9c9f8323f..81485b5b4de4 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -94,6 +94,8 @@ void __flush_dcache_page(struct page *page)
* and invalidate any user data.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
@@ -106,6 +108,7 @@ void __flush_dcache_page(struct page *page)
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
}
static void
@@ -129,6 +132,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
* space, then we need to handle them specially to maintain
* cache coherency.
*/
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
@@ -143,6 +147,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
if (aliases)
adjust_pte(vma, addr);
else
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 8f0bd9e72482..9fc061440122 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -249,6 +249,7 @@ void __flush_dcache_page(struct page *page)
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
@@ -266,8 +267,9 @@ void __flush_dcache_page(struct page *page)
__flush_cache_page(mpnt, addr);
- return;
+ break;
}
+ flush_dcache_mmap_unlock(mapping);
}
EXPORT_SYMBOL(__flush_dcache_page);