diff options
| author | Andrew Morton <akpm@zip.com.au> | 2002-07-28 03:13:57 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-28 03:13:57 -0700 |
| commit | cbb6e8ecaa8ec7e08ee2b0095236fabb87638742 (patch) | |
| tree | 9ae8f7cb4bb69ad32e315d0908ac83d97e38ef11 | |
| parent | 1a40868e97c4f45b6388593bef1c80b41e0c8845 (diff) | |
[PATCH] use a slab cache for pte_chains
Patch from Bill Irwin.
It removes the custom pte_chain allocator in mm/rmap.c and replaces it
with a slab cache.
"This patch
(1) eliminates the pte_chain_freelist_lock and all contention on it
(2) gives the VM the ability to recover unused pte_chain pages
Anton Blanchard has reported (1) from prior incarnations of this patch.
Craig Kulesa has reported (2) in combination with slab-on-LRU patches.
I've left OOM detection out of this patch entirely as upcoming patches
will do real OOM handling for pte_chains and all the code changed anyway."
| -rw-r--r-- | fs/proc/proc_misc.c | 6 | ||||
| -rw-r--r-- | include/linux/page-flags.h | 3 | ||||
| -rw-r--r-- | init/main.c | 4 | ||||
| -rw-r--r-- | mm/page_alloc.c | 3 | ||||
| -rw-r--r-- | mm/rmap.c | 84 |
5 files changed, 24 insertions, 76 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index a84b41f61ba9..715dcc586ee2 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -161,8 +161,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" "PageTables: %8lu kB\n" - "PteChainTot: %8lu kB\n" - "PteChainUsed: %8lu kB\n", + "ReverseMaps: %8lu\n", K(i.totalram), K(i.freeram), K(i.sharedram), @@ -179,8 +178,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, K(ps.nr_dirty), K(ps.nr_writeback), K(ps.nr_page_table_pages), - K(ps.nr_pte_chain_pages), - ps.used_pte_chains_bytes >> 10 + ps.nr_reverse_maps ); return proc_calc_metrics(page, start, off, count, eof, len); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d8e4c7779d05..1b2458455afa 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -79,8 +79,7 @@ extern struct page_state { unsigned long nr_active; /* on active_list LRU */ unsigned long nr_inactive; /* on inactive_list LRU */ unsigned long nr_page_table_pages; - unsigned long nr_pte_chain_pages; - unsigned long used_pte_chains_bytes; + unsigned long nr_reverse_maps; } ____cacheline_aligned_in_smp page_states[NR_CPUS]; extern void get_page_state(struct page_state *ret); diff --git a/init/main.c b/init/main.c index 32cad19bf432..292cd0cb2b3d 100644 --- a/init/main.c +++ b/init/main.c @@ -70,7 +70,7 @@ extern void sbus_init(void); extern void sysctl_init(void); extern void signals_init(void); extern void buffer_init(void); - +extern void pte_chain_init(void); extern void radix_tree_init(void); extern void free_initmem(void); @@ -432,7 +432,7 @@ asmlinkage void __init start_kernel(void) mem_init(); kmem_cache_sizes_init(); pgtable_cache_init(); - + pte_chain_init(); mempages = num_physpages; fork_init(mempages); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8991e2ae6037..7e352a9eb127 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -566,8 +566,7 @@ void get_page_state(struct page_state *ret) ret->nr_active += ps->nr_active; ret->nr_inactive += ps->nr_inactive; ret->nr_page_table_pages += ps->nr_page_table_pages; - ret->nr_pte_chain_pages += ps->nr_pte_chain_pages; - ret->used_pte_chains_bytes += ps->used_pte_chains_bytes; + ret->nr_reverse_maps += ps->nr_reverse_maps; } } diff --git a/mm/rmap.c b/mm/rmap.c index 55cc05c40443..6287e9cacc2a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -23,6 +23,8 @@ #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/swapops.h> +#include <linux/slab.h> +#include <linux/init.h> #include <asm/pgalloc.h> #include <asm/rmap.h> @@ -50,10 +52,10 @@ struct pte_chain { pte_t * ptep; }; +static kmem_cache_t *pte_chain_cache; static inline struct pte_chain * pte_chain_alloc(void); static inline void pte_chain_free(struct pte_chain *, struct pte_chain *, struct page *); -static void alloc_new_pte_chains(void); /** * page_referenced - test if the page was referenced @@ -148,6 +150,7 @@ void page_add_rmap(struct page * page, pte_t * ptep) } pte_chain_unlock(page); + inc_page_state(nr_reverse_maps); } /** @@ -210,9 +213,9 @@ void page_remove_rmap(struct page * page, pte_t * ptep) #endif out: + dec_page_state(nr_reverse_maps); pte_chain_unlock(page); return; - } /** @@ -357,27 +360,6 @@ int try_to_unmap(struct page * page) ** functions. **/ -struct pte_chain * pte_chain_freelist; -spinlock_t pte_chain_freelist_lock = SPIN_LOCK_UNLOCKED; - -/* Maybe we should have standard ops for singly linked lists ... - Rik */ -static inline void pte_chain_push(struct pte_chain * pte_chain) -{ - pte_chain->ptep = NULL; - pte_chain->next = pte_chain_freelist; - pte_chain_freelist = pte_chain; -} - -static inline struct pte_chain * pte_chain_pop(void) -{ - struct pte_chain *pte_chain; - - pte_chain = pte_chain_freelist; - pte_chain_freelist = pte_chain->next; - pte_chain->next = NULL; - - return pte_chain; -} /** * pte_chain_free - free pte_chain structure @@ -393,15 +375,12 @@ static inline struct pte_chain * pte_chain_pop(void) static inline void pte_chain_free(struct pte_chain * pte_chain, struct pte_chain * prev_pte_chain, struct page * page) { - mod_page_state(used_pte_chains_bytes, -sizeof(struct pte_chain)); if (prev_pte_chain) prev_pte_chain->next = pte_chain->next; else if (page) page->pte.chain = pte_chain->next; - spin_lock(&pte_chain_freelist_lock); - pte_chain_push(pte_chain); - spin_unlock(&pte_chain_freelist_lock); + kmem_cache_free(pte_chain_cache, pte_chain); } /** @@ -411,47 +390,20 @@ static inline void pte_chain_free(struct pte_chain * pte_chain, * pte_chain structures as required. * Caller needs to hold the page's pte_chain_lock. */ -static inline struct pte_chain * pte_chain_alloc() +static inline struct pte_chain *pte_chain_alloc(void) { - struct pte_chain * pte_chain; - - spin_lock(&pte_chain_freelist_lock); - - /* Allocate new pte_chain structs as needed. */ - if (!pte_chain_freelist) - alloc_new_pte_chains(); - - /* Grab the first pte_chain from the freelist. */ - pte_chain = pte_chain_pop(); - - spin_unlock(&pte_chain_freelist_lock); - - mod_page_state(used_pte_chains_bytes, sizeof(struct pte_chain)); - return pte_chain; + return kmem_cache_alloc(pte_chain_cache, GFP_ATOMIC); } -/** - * alloc_new_pte_chains - convert a free page to pte_chain structures - * - * Grabs a free page and converts it to pte_chain structures. We really - * should pre-allocate these earlier in the pagefault path or come up - * with some other trick. - * - * Note that we cannot use the slab cache because the pte_chain structure - * is way smaller than the minimum size of a slab cache allocation. - * Caller needs to hold the pte_chain_freelist_lock - */ -static void alloc_new_pte_chains() +void __init pte_chain_init(void) { - struct pte_chain * pte_chain = (void *) get_zeroed_page(GFP_ATOMIC); - int i = PAGE_SIZE / sizeof(struct pte_chain); - - if (pte_chain) { - inc_page_state(nr_pte_chain_pages); - for (; i-- > 0; pte_chain++) - pte_chain_push(pte_chain); - } else { - /* Yeah yeah, I'll fix the pte_chain allocation ... */ - panic("Fix pte_chain allocation, you lazy bastard!\n"); - } + pte_chain_cache = kmem_cache_create( "pte_chain", + sizeof(struct pte_chain), + 0, + 0, + NULL, + NULL); + + if (!pte_chain_cache) + panic("failed to create pte_chain cache!\n"); } |
