summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:10:00 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:10:00 -0700
commita89cd0f027d187c9c639a171aee7ee80b5a37da5 (patch)
tree3009be607705b1d950b6674e7a215f7faa67e951 /include/linux
parente1fd9cc9cbb5ac6465601833ea1cc5d6d376a155 (diff)
[PATCH] rmap 38 remove anonmm rmap
From: Hugh Dickins <hugh@veritas.com> Before moving on to anon_vma rmap, remove now what's peculiar to anonmm rmap: the anonmm handling and the mremap move cows. Temporarily reduce page_referenced_anon and try_to_unmap_anon to stubs, so a kernel built with this patch will not swap anonymous at all.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/rmap.h53
-rw-r--r--include/linux/sched.h1
3 files changed, 1 insertions, 55 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9f3864fbb5da..40028bc59127 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -76,7 +76,7 @@
#define PG_reclaim 18 /* To be reclaimed asap */
#define PG_compound 19 /* Part of a compound page */
-#define PG_anon 20 /* Anonymous page: anonmm in mapping */
+#define PG_anon 20 /* Anonymous: anon_vma in mapping */
/*
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 13e896e984aa..620987fa9607 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -35,54 +35,6 @@ static inline void page_dup_rmap(struct page *page)
page_map_unlock(page);
}
-int mremap_move_anon_rmap(struct page *page, unsigned long addr);
-
-/**
- * mremap_moved_anon_rmap - does new address clash with that noted?
- * @page: the page just brought back in from swap
- * @addr: the user virtual address at which it is mapped
- *
- * Returns boolean, true if addr clashes with address already in page.
- *
- * For do_swap_page and unuse_pte: anonmm rmap cannot find the page if
- * it's at different addresses in different mms, so caller must take a
- * copy of the page to avoid that: not very clever, but too rare a case
- * to merit cleverness.
- */
-static inline int mremap_moved_anon_rmap(struct page *page, unsigned long addr)
-{
- return page->index != (addr & PAGE_MASK);
-}
-
-/**
- * make_page_exclusive - try to make page exclusive to one mm
- * @vma the vm_area_struct covering this address
- * @addr the user virtual address of the page in question
- *
- * Assumes that the page at this address is anonymous (COWable),
- * and that the caller holds mmap_sem for reading or for writing.
- *
- * For mremap's move_page_tables and for swapoff's unuse_process:
- * not a general purpose routine, and in general may not succeed.
- * But move_page_tables loops until it succeeds, and unuse_process
- * holds the original page locked, which protects against races.
- */
-static inline int make_page_exclusive(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (handle_mm_fault(vma->vm_mm, vma, addr, 1) != VM_FAULT_OOM)
- return 0;
- return -ENOMEM;
-}
-
-/*
- * Called from kernel/fork.c to manage anonymous memory
- */
-void init_rmap(void);
-int exec_rmap(struct mm_struct *);
-int dup_rmap(struct mm_struct *, struct mm_struct *oldmm);
-void exit_rmap(struct mm_struct *);
-
/*
* Called from mm/vmscan.c to handle paging out
*/
@@ -91,11 +43,6 @@ int try_to_unmap(struct page *);
#else /* !CONFIG_MMU */
-#define init_rmap() do {} while (0)
-#define exec_rmap(mm) (0)
-#define dup_rmap(mm, oldmm) (0)
-#define exit_rmap(mm) do {} while (0)
-
#define page_referenced(page) TestClearPageReferenced(page)
#define try_to_unmap(page) SWAP_FAIL
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9cf8e5be3ed0..60e8bb20c43e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -207,7 +207,6 @@ struct mm_struct {
* together off init_mm.mmlist, and are protected
* by mmlist_lock
*/
- struct anonmm *anonmm; /* For rmap to track anon mem */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;