summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:01:30 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:01:30 -0700
commit123e4df7e093329599a75ad8ad0eed9ebbd9aa27 (patch)
tree286e401f5cc70586044b6ca44b1822c14d0ae6ce /include
parentb33a7bad9bcb5c8453d7a13a99b3151ebe69563f (diff)
[PATCH] rmap 9 remove pte_chains
From: Hugh Dickins <hugh@veritas.com> Lots of deletions: the next patch will put in the new anon rmap, which should look clearer if first we remove all of the old pte-pointer-based rmap from the core in this patch - which therefore leaves anonymous rmap totally disabled, anon pages locked in memory until process frees them. Leave arch files (and page table rmap) untouched for now, clean them up in a later batch. A few constructive changes amidst all the deletions: Choose names (e.g. page_add_anon_rmap) and args (e.g. no more pteps) now so we need not revisit so many files in the next patch. Inline function page_dup_rmap for fork's copy_page_range, simply bumps mapcount under lock. cond_resched_lock in copy_page_range. Struct page rearranged: no pte union, just mapcount moved next to atomic count, so two ints can occupy one long on 64-bit; i386 struct page now 32 bytes even with PAE. Never pass PageReserved to page_remove_rmap, only do_wp_page did so. From: Hugh Dickins <hugh@veritas.com> Move page_add_anon_rmap's BUG_ON(page_mapping(page)) inside the rmap_lock (well, might as well just check mapping if !mapcount then): if this page is being mapped or unmapped on another cpu at the same time, page_mapping's PageAnon(page) and page->mapping are volatile. But page_mapping(page) is used more widely: I've a nasty feeling that clear_page_anon, page_add_anon_rmap and/or page_mapping need barriers added (also in 2.6.6 itself),
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/page-flags.h12
-rw-r--r--include/linux/rmap.h28
3 files changed, 33 insertions, 41 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 48781c75e53b..905e87989dd6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -147,8 +147,6 @@ struct vm_operations_struct {
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
};
-/* forward declaration; pte_chain is meant to be internal to rmap.c */
-struct pte_chain;
struct mmu_gather;
struct inode;
@@ -170,28 +168,26 @@ typedef unsigned long page_flags_t;
*
* The first line is data used in page cache lookup, the second line
* is used for linear searches (eg. clock algorithm scans).
- *
- * TODO: make this structure smaller, it could be as small as 32 bytes.
*/
struct page {
- page_flags_t flags; /* atomic flags, some possibly
- updated asynchronously */
+ page_flags_t flags; /* Atomic flags, some possibly
+ * updated asynchronously */
atomic_t _count; /* Usage count, see below. */
- struct address_space *mapping; /* The inode (or ...) we belong to. */
- pgoff_t index; /* Our offset within mapping. */
- struct list_head lru; /* Pageout list, eg. active_list;
- protected by zone->lru_lock !! */
- union {
- struct pte_chain *chain;/* Reverse pte mapping pointer.
- * protected by PG_chainlock */
- pte_addr_t direct;
- unsigned int mapcount; /* Count ptes mapped into mms */
- } pte;
+ unsigned int mapcount; /* Count of ptes mapped in mms,
+ * to show when page is mapped
+ * & limit reverse map searches,
+ * protected by PG_maplock.
+ */
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache
*/
+ struct address_space *mapping; /* The inode (or ...) we belong to. */
+ pgoff_t index; /* Our offset within mapping. */
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ */
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
@@ -440,13 +436,11 @@ static inline pgoff_t page_index(struct page *page)
}
/*
- * Return true if this page is mapped into pagetables. Subtle: test pte.direct
- * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain
- * is only 32-bit.
+ * Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
{
- return page->pte.direct != 0;
+ return page->mapcount != 0;
}
/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6959827c9f62..9f3864fbb5da 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -71,12 +71,12 @@
#define PG_nosave 14 /* Used for system suspend/resume */
#define PG_maplock 15 /* Lock bit for rmap to ptes */
-#define PG_direct 16 /* ->pte_chain points directly at pte */
+#define PG_swapcache 16 /* Swap page: swp_entry_t in private */
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
#define PG_reclaim 18 /* To be reclaimed asap */
#define PG_compound 19 /* Part of a compound page */
-#define PG_anon 20 /* Anonymous page: anon_vma in mapping*/
-#define PG_swapcache 21 /* Swap page: swp_entry_t in private */
+
+#define PG_anon 20 /* Anonymous page: anonmm in mapping */
/*
@@ -281,12 +281,6 @@ extern void get_full_page_state(struct page_state *ret);
#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags)
#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags)
-#define PageDirect(page) test_bit(PG_direct, &(page)->flags)
-#define SetPageDirect(page) set_bit(PG_direct, &(page)->flags)
-#define TestSetPageDirect(page) test_and_set_bit(PG_direct, &(page)->flags)
-#define ClearPageDirect(page) clear_bit(PG_direct, &(page)->flags)
-#define TestClearPageDirect(page) test_and_clear_bit(PG_direct, &(page)->flags)
-
#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 65e0f74f5390..b87144a2dd44 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -15,21 +15,25 @@
#ifdef CONFIG_MMU
-struct pte_chain;
-struct pte_chain *pte_chain_alloc(int gfp_flags);
-void __pte_chain_free(struct pte_chain *pte_chain);
-
-static inline void pte_chain_free(struct pte_chain *pte_chain)
+void fastcall page_add_anon_rmap(struct page *,
+ struct mm_struct *, unsigned long addr);
+void fastcall page_add_file_rmap(struct page *);
+void fastcall page_remove_rmap(struct page *);
+
+/**
+ * page_dup_rmap - duplicate pte mapping to a page
+ * @page: the page to add the mapping to
+ *
+ * For copy_page_range only: minimal extract from page_add_rmap,
+ * avoiding unnecessary tests (already checked) so it's quicker.
+ */
+static inline void page_dup_rmap(struct page *page)
{
- if (pte_chain)
- __pte_chain_free(pte_chain);
+ page_map_lock(page);
+ page->mapcount++;
+ page_map_unlock(page);
}
-struct pte_chain * fastcall
- page_add_rmap(struct page *, pte_t *, struct pte_chain *);
-void fastcall page_add_file_rmap(struct page *);
-void fastcall page_remove_rmap(struct page *, pte_t *);
-
/*
* Called from mm/vmscan.c to handle paging out
*/