diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-01-18 18:22:50 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.osdl.org> | 2004-01-18 18:22:50 -0800 |
| commit | d4361b6c4ee2dbd5eee1f6932b88532525bb50a7 (patch) | |
| tree | cea6411f86ae383a0d6a4f6bd6797b18637c6cef /include/asm-ppc64/pgalloc.h | |
| parent | 04879b04bf500585d6b015e440be3ee7bdaf9cd4 (diff) | |
[PATCH] ppc64: Fix {pte,pmd}_free vs. hash_page race by relaying actual deallocation with RCU, from Ben Herrenschmidt
From: Anton Blanchard <anton@samba.org>
Fix {pte,pmd}_free vs. hash_page race by relaying actual deallocation with
RCU, from Ben Herrenschmidt
Diffstat (limited to 'include/asm-ppc64/pgalloc.h')
| -rw-r--r-- | include/asm-ppc64/pgalloc.h | 55 |
1 files changed, 49 insertions, 6 deletions
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h index 23b0cde55e4e..43c70d211c57 100644 --- a/include/asm-ppc64/pgalloc.h +++ b/include/asm-ppc64/pgalloc.h @@ -3,7 +3,10 @@ #include <linux/mm.h> #include <linux/slab.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> #include <asm/processor.h> +#include <asm/tlb.h> extern kmem_cache_t *zero_cache; @@ -40,8 +43,6 @@ pmd_free(pmd_t *pmd) kmem_cache_free(zero_cache, pmd); } -#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) - #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) #define pmd_populate(mm, pmd, pte_page) \ pmd_populate_kernel(mm, pmd, page_address(pte_page)) @@ -62,15 +63,57 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) return NULL; } - -static inline void -pte_free_kernel(pte_t *pte) + +static inline void pte_free_kernel(pte_t *pte) { kmem_cache_free(zero_cache, pte); } #define pte_free(pte_page) pte_free_kernel(page_address(pte_page)) -#define __pte_free_tlb(tlb, pte) pte_free(pte) + +struct pte_freelist_batch +{ + struct rcu_head rcu; + unsigned int index; + struct page * pages[0]; +}; + +#define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch) / \ + sizeof(struct page *))) + +extern void pte_free_now(struct page *ptepage); +extern void pte_free_submit(struct pte_freelist_batch *batch); + +DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); + +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) +{ + /* This is safe as we are holding page_table_lock */ + cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); + struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + + if (atomic_read(&tlb->mm->mm_users) < 2 || + cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { + pte_free(ptepage); + return; + } + + if (*batchp == NULL) { + *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); + if (*batchp == NULL) { + pte_free_now(ptepage); + return; + } + (*batchp)->index = 0; + } + (*batchp)->pages[(*batchp)->index++] = ptepage; + if ((*batchp)->index == PTE_FREELIST_SIZE) { + pte_free_submit(*batchp); + *batchp = NULL; + } +} + +#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) #define check_pgt_cache() do { } while (0) |
