summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:01:30 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:01:30 -0700
commit123e4df7e093329599a75ad8ad0eed9ebbd9aa27 (patch)
tree286e401f5cc70586044b6ca44b1822c14d0ae6ce /include/linux/mm.h
parentb33a7bad9bcb5c8453d7a13a99b3151ebe69563f (diff)
[PATCH] rmap 9 remove pte_chains
From: Hugh Dickins <hugh@veritas.com> Lots of deletions: the next patch will put in the new anon rmap, which should look clearer if first we remove all of the old pte-pointer-based rmap from the core in this patch - which therefore leaves anonymous rmap totally disabled, anon pages locked in memory until process frees them. Leave arch files (and page table rmap) untouched for now, clean them up in a later batch. A few constructive changes amidst all the deletions: Choose names (e.g. page_add_anon_rmap) and args (e.g. no more pteps) now so we need not revisit so many files in the next patch. Inline function page_dup_rmap for fork's copy_page_range, simply bumps mapcount under lock. cond_resched_lock in copy_page_range. Struct page rearranged: no pte union, just mapcount moved next to atomic count, so two ints can occupy one long on 64-bit; i386 struct page now 32 bytes even with PAE. Never pass PageReserved to page_remove_rmap, only do_wp_page did so. From: Hugh Dickins <hugh@veritas.com> Move page_add_anon_rmap's BUG_ON(page_mapping(page)) inside the rmap_lock (well, might as well just check mapping if !mapcount then): if this page is being mapped or unmapped on another cpu at the same time, page_mapping's PageAnon(page) and page->mapping are volatile. But page_mapping(page) is used more widely: I've a nasty feeling that clear_page_anon, page_add_anon_rmap and/or page_mapping need barriers added (also in 2.6.6 itself),
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h34
1 files changed, 14 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 48781c75e53b..905e87989dd6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -147,8 +147,6 @@ struct vm_operations_struct {
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
};
-/* forward declaration; pte_chain is meant to be internal to rmap.c */
-struct pte_chain;
struct mmu_gather;
struct inode;
@@ -170,28 +168,26 @@ typedef unsigned long page_flags_t;
*
* The first line is data used in page cache lookup, the second line
* is used for linear searches (eg. clock algorithm scans).
- *
- * TODO: make this structure smaller, it could be as small as 32 bytes.
*/
struct page {
- page_flags_t flags; /* atomic flags, some possibly
- updated asynchronously */
+ page_flags_t flags; /* Atomic flags, some possibly
+ * updated asynchronously */
atomic_t _count; /* Usage count, see below. */
- struct address_space *mapping; /* The inode (or ...) we belong to. */
- pgoff_t index; /* Our offset within mapping. */
- struct list_head lru; /* Pageout list, eg. active_list;
- protected by zone->lru_lock !! */
- union {
- struct pte_chain *chain;/* Reverse pte mapping pointer.
- * protected by PG_chainlock */
- pte_addr_t direct;
- unsigned int mapcount; /* Count ptes mapped into mms */
- } pte;
+ unsigned int mapcount; /* Count of ptes mapped in mms,
+ * to show when page is mapped
+ * & limit reverse map searches,
+ * protected by PG_maplock.
+ */
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache
*/
+ struct address_space *mapping; /* The inode (or ...) we belong to. */
+ pgoff_t index; /* Our offset within mapping. */
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ */
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
@@ -440,13 +436,11 @@ static inline pgoff_t page_index(struct page *page)
}
/*
- * Return true if this page is mapped into pagetables. Subtle: test pte.direct
- * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain
- * is only 32-bit.
+ * Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
{
- return page->pte.direct != 0;
+ return page->mapcount != 0;
}
/*