summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-04-12 00:54:31 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-04-12 00:54:31 -0700
commitfbf7adfafae19dd118facbbfe011510ba6aa8315 (patch)
treef3c91443c31fcf344095716b3885696c507828f8 /include
parentda47ca23dfeb76d99add903af69726ffcb2d999d (diff)
[PATCH] rmap 3 arches + mapping_mapped
From: Hugh Dickins <hugh@veritas.com> Some arches refer to page->mapping for their dcache flushing: use page_mapping(page) for safety, to avoid confusion on anon pages, which will store a different pointer there - though in most cases flush_dcache_page is being applied to pagecache pages. arm has a useful mapping_mapped macro: move that to generic, and add mapping_writably_mapped, to avoid explicit list_empty checks on i_mmap and i_mmap_shared in several places. Very tempted to add page_mapped(page) tests, perhaps along with the mapping_writably_mapped tests in do_generic_mapping_read and do_shmem_file_read, to cut down on wasted flush_dcache effort; but the serialization is not obvious, too unsafe to do in a hurry.
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/cacheflush.h12
-rw-r--r--include/asm-parisc/cacheflush.h3
-rw-r--r--include/asm-sh/pgalloc.h4
-rw-r--r--include/linux/fs.h20
4 files changed, 27 insertions, 12 deletions
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 6968e8e90c3e..91b16cc3f502 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -283,23 +283,19 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
- * If this page isn't mapped (ie, page->mapping = NULL), or it has
- * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
- * then we _must_ always clean + invalidate the dcache entries associated
- * with the kernel mapping.
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
-#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
- !list_empty(&(map)->i_mmap_shared))
-
extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
- if (page->mapping && !mapping_mapped(page->mapping))
+ if (page_mapping(page) && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 52b0c6a96aea..7a77986e3738 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -69,8 +69,7 @@ extern void __flush_dcache_page(struct page *page);
static inline void flush_dcache_page(struct page *page)
{
- if (page->mapping && list_empty(&page->mapping->i_mmap) &&
- list_empty(&page->mapping->i_mmap_shared)) {
+ if (page_mapping(page) && !mapping_mapped(page->mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
__flush_dcache_page(page);
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index bd02728a69d5..4584c9e37a75 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -101,8 +101,8 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (!page->mapping
- || list_empty(&page->mapping->i_mmap_shared))
+ if (!page_mapping(page) ||
+ !mapping_writably_mapped(page->mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c7f0052b4abd..3d7c320d675e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -374,6 +374,26 @@ struct block_device {
int mapping_tagged(struct address_space *mapping, int tag);
/*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+ return !list_empty(&mapping->i_mmap) ||
+ !list_empty(&mapping->i_mmap_shared);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+ return !list_empty(&mapping->i_mmap_shared);
+}
+
+/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)