diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-04-12 00:54:03 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-04-12 00:54:03 -0700 |
| commit | 4875a6018bcc53201ddbf745bff35ed723b468eb (patch) | |
| tree | a73a9da331ec044b4c3e1b9d6b12021058b921ae /include | |
| parent | 4c4acd2447ef473f23aee53f04518f93840a8693 (diff) | |
[PATCH] rmap 2 anon and swapcache
From: Hugh Dickins <hugh@veritas.com>
Tracking anonymous pages by anon_vma,pgoff or mm,address needs a
pointer,offset pair in struct page: mapping,index the natural choice. But
swapcache uses those for &swapper_space,swp_entry_t.
It's trivial to separate swapcache from pagecache with radix tree; most of
swapper_space is actually unused, just a fiction to pretend swap like file;
and page->private is a good place to keep swp_entry_t, now that swap never
uses bufferheads.
Define PG_anon bit, page_add_rmap SetPageAnon and put an oopsable address in
page->mapping to test that we're not confused by it. Define
page_mapping(page) macro to give NULL when PageAnon, whatever may be in
page->mapping. Define PG_swapcache bit, deduce swapper_space from that in
the few places we need it.
add_to_swap_cache now distinct from add_to_page_cache. Separating the caches
somewhat simplifies the tmpfs swizzling in swap_state.c, now the page can
briefly be in both caches.
The rmap method remains pte chains, no change to that yet. But one small
functional difference: the use of PageAnon implies that a page truncated
while still mapped will no longer be found and freed (swapped out) by
try_to_unmap, will only be freed by exit or munmap. But normally pages are
unmapped by vmtruncate: this should only affect nonlinear mappings, and a
later patch not in this batch will fix that.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/mm.h | 38 | ||||
| -rw-r--r-- | include/linux/page-flags.h | 17 |
2 files changed, 29 insertions, 26 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 6d6abe8c656e..796f498658d6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -189,8 +189,11 @@ struct page { * protected by PG_chainlock */ pte_addr_t direct; } pte; - unsigned long private; /* mapping-private opaque data */ - + unsigned long private; /* Mapping-private opaque data: + * usually used for buffer_heads + * if PagePrivate set; used for + * swp_entry_t if PageSwapCache + */ /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with @@ -403,6 +406,19 @@ void page_address_init(void); #endif /* + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anon_vma, not to a struct address_space. + * + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. + */ +static inline struct address_space *page_mapping(struct page *page) +{ + return PageAnon(page)? NULL: page->mapping; +} + +/* * Return true if this page is mapped into pagetables. Subtle: test pte.direct * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain * is only 32-bit. @@ -471,6 +487,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long int __set_page_dirty_buffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page); +int FASTCALL(set_page_dirty(struct page *page)); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); @@ -498,23 +515,6 @@ extern struct shrinker *set_shrinker(int, shrinker_t); extern void remove_shrinker(struct shrinker *shrinker); /* - * If the mapping doesn't provide a set_page_dirty a_op, then - * just fall through and assume that it wants buffer_heads. - * FIXME: make the method unconditional. - */ -static inline int set_page_dirty(struct page *page) -{ - if (page->mapping) { - int (*spd)(struct page *); - - spd = page->mapping->a_ops->set_page_dirty; - if (spd) - return (*spd)(page); - } - return __set_page_dirty_buffers(page); -} - -/* * On a two-level page table, this ends up being trivial. Thus the * inlining and the symmetry break with pte_alloc_map() that does all * of this out-of-line. diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 93f22640b6cb..6959827c9f62 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -75,6 +75,8 @@ #define PG_mappedtodisk 17 /* Has blocks allocated on-disk */ #define PG_reclaim 18 /* To be reclaimed asap */ #define PG_compound 19 /* Part of a compound page */ +#define PG_anon 20 /* Anonymous page: anon_vma in mapping*/ +#define PG_swapcache 21 /* Swap page: swp_entry_t in private */ /* @@ -298,15 +300,16 @@ extern void get_full_page_state(struct page_state *ret); #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) -/* - * The PageSwapCache predicate doesn't use a PG_flag at this time, - * but it may again do so one day. - */ +#define PageAnon(page) test_bit(PG_anon, &(page)->flags) +#define SetPageAnon(page) set_bit(PG_anon, &(page)->flags) +#define ClearPageAnon(page) clear_bit(PG_anon, &(page)->flags) + #ifdef CONFIG_SWAP -extern struct address_space swapper_space; -#define PageSwapCache(page) ((page)->mapping == &swapper_space) +#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) +#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) +#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags) #else -#define PageSwapCache(page) 0 +#define PageSwapCache(page) 0 #endif struct page; /* forward declaration */ |
