diff options
| author | Christoph Lameter <clameter@sgi.com> | 2005-01-04 23:36:59 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-01-04 23:36:59 -0800 |
| commit | 76af7e63a1304bc7ecd93c24320315d2e12df197 (patch) | |
| tree | 4a9ddfaef44d20d5b0f2818665e4889957311d81 | |
| parent | 904e737b7ccca4eb807db430320f0fea7c2a6c9a (diff) | |
[PATCH] Make page allocator aware of requests for zeroed memory
Thisintroduces __GFP_ZERO as an additional gfp_mask element to allow to
request zeroed pages from the page allocator:
- Modifies the page allocator so that it zeroes memory if __GFP_ZERO is
set
- Replace all page zeroing after allocating pages by prior allocations with
allocations using __GFP_ZERO
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | arch/alpha/mm/init.c | 7 | ||||
| -rw-r--r-- | arch/i386/mm/pgtable.c | 11 | ||||
| -rw-r--r-- | arch/ppc/mm/pgtable.c | 7 | ||||
| -rw-r--r-- | arch/sparc64/mm/init.c | 3 | ||||
| -rw-r--r-- | arch/um/kernel/mem.c | 8 | ||||
| -rw-r--r-- | drivers/block/pktcdvd.c | 4 | ||||
| -rw-r--r-- | include/asm-alpha/pgalloc.h | 4 | ||||
| -rw-r--r-- | include/asm-arm/pgalloc.h | 6 | ||||
| -rw-r--r-- | include/asm-cris/pgalloc.h | 8 | ||||
| -rw-r--r-- | include/asm-ia64/pgalloc.h | 16 | ||||
| -rw-r--r-- | include/asm-m32r/pgalloc.h | 14 | ||||
| -rw-r--r-- | include/asm-m68k/motorola_pgalloc.h | 6 | ||||
| -rw-r--r-- | include/asm-mips/pgalloc.h | 4 | ||||
| -rw-r--r-- | include/asm-parisc/pgalloc.h | 8 | ||||
| -rw-r--r-- | include/asm-sh/pgalloc.h | 8 | ||||
| -rw-r--r-- | include/asm-sh64/pgalloc.h | 12 | ||||
| -rw-r--r-- | include/asm-sparc64/pgalloc.h | 3 | ||||
| -rw-r--r-- | include/linux/gfp.h | 2 | ||||
| -rw-r--r-- | kernel/profile.c | 12 | ||||
| -rw-r--r-- | mm/memory.c | 3 | ||||
| -rw-r--r-- | mm/page_alloc.c | 20 | ||||
| -rw-r--r-- | mm/shmem.c | 8 |
22 files changed, 58 insertions, 116 deletions
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3ea81ca1c6d9..90752f6d8867 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -42,10 +42,9 @@ pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *)__get_free_page(GFP_KERNEL); + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); init = pgd_offset(&init_mm, 0UL); if (ret) { - clear_page(ret); #ifdef CONFIG_ALPHA_LARGE_VMALLOC memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t)); @@ -63,9 +62,7 @@ pgd_alloc(struct mm_struct *mm) pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) - clear_page(pte); + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return pte; } diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 83c201b0fd56..77ffaf855fd1 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -140,10 +140,7 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) - clear_page(pte); - return pte; + return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); } struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) @@ -151,12 +148,10 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; #ifdef CONFIG_HIGHPTE - pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); + pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); #else - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); #endif - if (pte) - clear_highpage(pte); return pte; } diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index 4adc48c695d6..bda12bd5786f 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -85,8 +85,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; - if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL) - clear_pages(ret, PGDIR_ORDER); + ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); return ret; } @@ -102,7 +101,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) extern void *early_get_page(void); if (mem_init_done) { - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); if (pte) { struct page *ptepage = virt_to_page(pte); ptepage->mapping = (void *) mm; @@ -110,8 +109,6 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) } } else pte = (pte_t *)early_get_page(); - if (pte) - clear_page(pte); return pte; } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e85060445191..ebc49a45102a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1687,13 +1687,12 @@ void __init mem_init(void) * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. */ - mem_map_zero = alloc_pages(GFP_KERNEL, 0); + mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if (mem_map_zero == NULL) { prom_printf("paging_init: Cannot alloc zero page.\n"); prom_halt(); } SetPageReserved(mem_map_zero); - clear_page(page_address(mem_map_zero)); codepages = (((unsigned long) _etext) - ((unsigned long) _start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index e907e0521564..4279161c8849 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -327,9 +327,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) - clear_page(pte); + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return pte; } @@ -337,9 +335,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) - clear_highpage(pte); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); return pte; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 03a9388049a4..aa4a3065b361 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -135,12 +135,10 @@ static struct packet_data *pkt_alloc_packet_data(void) goto no_bio; for (i = 0; i < PAGES_PER_PACKET; i++) { - pkt->pages[i] = alloc_page(GFP_KERNEL); + pkt->pages[i] = alloc_page(GFP_KERNEL|| __GFP_ZERO); if (!pkt->pages[i]) goto no_page; } - for (i = 0; i < PAGES_PER_PACKET; i++) - clear_page(page_address(pkt->pages[i])); spin_lock_init(&pkt->lock); diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h index b671bb3c20b4..308475642913 100644 --- a/include/asm-alpha/pgalloc.h +++ b/include/asm-alpha/pgalloc.h @@ -40,9 +40,7 @@ pgd_free(pgd_t *pgd) static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (ret) - clear_page(ret); + pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return ret; } diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h index 23eb321f4af2..e814f8144f8b 100644 --- a/include/asm-arm/pgalloc.h +++ b/include/asm-arm/pgalloc.h @@ -50,9 +50,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); if (pte) { - clear_page(pte); clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); pte += PTRS_PER_PTE; } @@ -65,10 +64,9 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); if (pte) { void *page = page_address(pte); - clear_page(page); clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); } diff --git a/include/asm-cris/pgalloc.h b/include/asm-cris/pgalloc.h index ca769e060298..7f313d563c28 100644 --- a/include/asm-cris/pgalloc.h +++ b/include/asm-cris/pgalloc.h @@ -24,18 +24,14 @@ extern inline void pgd_free (pgd_t *pgd) extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) - clear_page(pte); + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return pte; } extern inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) - clear_page(page_address(pte)); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); return pte; } diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index 2bd68d4aa77c..fda0dba0b187 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h @@ -61,9 +61,7 @@ pgd_alloc (struct mm_struct *mm) pgd_t *pgd = pgd_alloc_one_fast(mm); if (unlikely(pgd == NULL)) { - pgd = (pgd_t *)__get_free_page(GFP_KERNEL); - if (likely(pgd != NULL)) - clear_page(pgd); + pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); } return pgd; } @@ -106,10 +104,8 @@ pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) static inline pmd_t* pmd_alloc_one (struct mm_struct *mm, unsigned long addr) { - pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - if (likely(pmd != NULL)) - clear_page(pmd); return pmd; } @@ -140,20 +136,16 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) static inline struct page * pte_alloc_one (struct mm_struct *mm, unsigned long addr) { - struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); - if (likely(pte != NULL)) - clear_page(page_address(pte)); return pte; } static inline pte_t * pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - if (likely(pte != NULL)) - clear_page(pte); return pte; } diff --git a/include/asm-m32r/pgalloc.h b/include/asm-m32r/pgalloc.h index 8d5a44485211..da6dd7bd7529 100644 --- a/include/asm-m32r/pgalloc.h +++ b/include/asm-m32r/pgalloc.h @@ -23,10 +23,7 @@ static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); - - if (pgd) - clear_page(pgd); + pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); return pgd; } @@ -39,10 +36,7 @@ static __inline__ void pgd_free(pgd_t *pgd) static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL); - - if (pte) - clear_page(pte); + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); return pte; } @@ -50,10 +44,8 @@ static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static __inline__ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *pte = alloc_page(GFP_KERNEL); + struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); - if (pte) - clear_page(page_address(pte)); return pte; } diff --git a/include/asm-m68k/motorola_pgalloc.h b/include/asm-m68k/motorola_pgalloc.h index dda9618adc07..5158412cd54d 100644 --- a/include/asm-m68k/motorola_pgalloc.h +++ b/include/asm-m68k/motorola_pgalloc.h @@ -12,9 +12,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); if (pte) { - clear_page(pte); __flush_page_to_ram(pte); flush_tlb_kernel_page(pte); nocache_page(pte); @@ -31,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); pte_t *pte; if(!page) @@ -39,7 +38,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add pte = kmap(page); if (pte) { - clear_page(pte); __flush_page_to_ram(pte); flush_tlb_kernel_page(pte); nocache_page(pte); diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h index 4f96427f89f4..2d63f5ba403f 100644 --- a/include/asm-mips/pgalloc.h +++ b/include/asm-mips/pgalloc.h @@ -56,9 +56,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, { pte_t *pte; - pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PTE_ORDER); - if (pte) - clear_page(pte); + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); return pte; } diff --git a/include/asm-parisc/pgalloc.h b/include/asm-parisc/pgalloc.h index 53a6fec7df95..22b6ae6c45e0 100644 --- a/include/asm-parisc/pgalloc.h +++ b/include/asm-parisc/pgalloc.h @@ -120,18 +120,14 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) static inline struct page * pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (likely(page != NULL)) - clear_page(page_address(page)); + struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return page; } static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (likely(pte != NULL)) - clear_page(pte); + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return pte; } diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h index 8a1b3597ff30..1a998085f961 100644 --- a/include/asm-sh/pgalloc.h +++ b/include/asm-sh/pgalloc.h @@ -44,9 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, { pte_t *pte; - pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); - if (pte) - clear_page(pte); + pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); return pte; } @@ -56,9 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) - clear_page(page_address(pte)); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); return pte; } diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h index 02723085d0d9..b843ec2478b1 100644 --- a/include/asm-sh64/pgalloc.h +++ b/include/asm-sh64/pgalloc.h @@ -112,9 +112,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); - if (pte) - clear_page(pte); + pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO); return pte; } @@ -123,9 +121,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) - clear_page(page_address(pte)); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); return pte; } @@ -150,9 +146,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { pmd_t *pmd; - pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pmd) - clear_page(pmd); + pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); return pmd; } diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index fa5c704dedaf..6a365ffe30dc 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -73,10 +73,9 @@ static __inline__ pgd_t *get_pgd_fast(void) struct page *page; preempt_enable(); - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); + page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); if (page) { ret = (struct page *)page_address(page); - clear_page(ret); page->lru.prev = (void *) 2UL; preempt_disable(); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 600f83c80aad..823589766569 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -37,6 +37,7 @@ struct vm_area_struct; #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ #define __GFP_COMP 0x4000 /* Add compound page metadata */ +#define __GFP_ZERO 0x8000 /* Return zeroed page on success */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) @@ -52,6 +53,7 @@ struct vm_area_struct; #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS) #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM) +#define GFP_HIGHZERO (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM | __GFP_ZERO) /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some platforms, used as appropriate on others */ diff --git a/kernel/profile.c b/kernel/profile.c index ff62fa98328a..1bf1ec5fe56b 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -326,17 +326,15 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, node = cpu_to_node(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = alloc_pages_node(node, GFP_KERNEL, 0); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return NOTIFY_BAD; - clear_highpage(page); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = alloc_pages_node(node, GFP_KERNEL, 0); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) goto out_free; - clear_highpage(page); per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); } break; @@ -510,16 +508,14 @@ static int __init create_hash_tables(void) int node = cpu_to_node(cpu); struct page *page; - page = alloc_pages_node(node, GFP_KERNEL, 0); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) goto out_cleanup; - clear_highpage(page); per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); - page = alloc_pages_node(node, GFP_KERNEL, 0); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) goto out_cleanup; - clear_highpage(page); per_cpu(cpu_profile_hits, cpu)[0] = (struct profile_hit *)page_address(page); } diff --git a/mm/memory.c b/mm/memory.c index b0c61dac23e6..ad9407594ba5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1673,10 +1673,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(anon_vma_prepare(vma))) goto no_mem; - page = alloc_page_vma(GFP_HIGHUSER, vma, addr); + page = alloc_page_vma(GFP_HIGHZERO, vma, addr); if (!page) goto no_mem; - clear_user_highpage(page, addr); spin_lock(&mm->page_table_lock); page_table = pte_offset_map(pmd, addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4fb4da2dd5e2..fd6f8014689b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -558,6 +558,13 @@ void fastcall free_cold_page(struct page *page) * we cheat by calling it from here, in the order > 0 path. Saves a branch * or two. */ +static inline void prep_zero_page(struct page *page, int order) +{ + int i; + + for(i = 0; i < (1 << order); i++) + clear_highpage(page + i); +} static struct page * buffered_rmqueue(struct zone *zone, int order, int gfp_flags) @@ -593,6 +600,10 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags) BUG_ON(bad_range(zone, page)); mod_page_state_zone(zone, pgalloc, 1 << order); prep_new_page(page, order); + + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order); + if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); } @@ -805,12 +816,9 @@ fastcall unsigned long get_zeroed_page(unsigned int gfp_mask) */ BUG_ON(gfp_mask & __GFP_HIGHMEM); - page = alloc_pages(gfp_mask, 0); - if (page) { - void *address = page_address(page); - clear_page(address); - return (unsigned long) address; - } + page = alloc_pages(gfp_mask | __GFP_ZERO, 0); + if (page) + return (unsigned long) page_address(page); return 0; } diff --git a/mm/shmem.c b/mm/shmem.c index e1ff7d74c249..2a97375d5ece 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -369,9 +369,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long } spin_unlock(&info->lock); - page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); + page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); if (page) { - clear_highpage(page); page->nr_swapped = 0; } spin_lock(&info->lock); @@ -910,7 +909,7 @@ shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; - page = alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); return page; } @@ -926,7 +925,7 @@ static inline struct page * shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info, unsigned long idx) { - return alloc_page(gfp); + return alloc_page(gfp | __GFP_ZERO); } #endif @@ -1135,7 +1134,6 @@ repeat: info->alloced++; spin_unlock(&info->lock); - clear_highpage(filepage); flush_dcache_page(filepage); SetPageUptodate(filepage); } |
