From 41d54d3bf83f62d3ff5948cb788fe6007e66a0d0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 3 Jul 2008 09:14:26 -0500 Subject: slub: Do not use 192 byte sized cache if minimum alignment is 128 byte The 192 byte cache is not necessary if we have a basic alignment of 128 byte. If it would be used then the 192 would be aligned to the next 128 byte boundary which would result in another 256 byte cache. Two 256 kmalloc caches cause sysfs to complain about a duplicate entry. MIPS needs 128 byte aligned kmalloc caches and spits out warnings on boot without this patch. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slub_def.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/slub_def.h') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 71e43a12ebbb..cef6f8fddd7d 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -137,10 +137,12 @@ static __always_inline int kmalloc_index(size_t size) if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; +#if KMALLOC_MIN_SIZE <= 64 if (size > 64 && size <= 96) return 1; if (size > 128 && size <= 192) return 2; +#endif if (size <= 8) return 3; if (size <= 16) return 4; if (size <= 32) return 5; -- cgit v1.2.3 From cde53535991fbb5c34a1566f25955297c1487b8d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 4 Jul 2008 09:59:22 -0700 Subject: Christoph has moved Remove all clameter@sgi.com addresses from the kernel tree since they will become invalid on June 27th. Change my maintainer email address for the slab allocators to cl@linux-foundation.org (which will be the new email address for the future). Signed-off-by: Christoph Lameter Signed-off-by: Christoph Lameter Cc: Pekka Enberg Cc: Stephen Rothwell Cc: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/slabinfo.c | 4 ++-- Documentation/vm/slub.txt | 2 +- MAINTAINERS | 2 +- include/asm-generic/atomic.h | 2 +- include/linux/slab.h | 2 +- include/linux/slub_def.h | 2 +- kernel/workqueue.c | 2 +- lib/radix-tree.c | 2 +- mm/allocpercpu.c | 2 +- mm/migrate.c | 2 +- mm/slub.c | 2 +- mm/sparse-vmemmap.c | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux/slub_def.h') diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c index e4230ed16ee7..df3227605d59 100644 --- a/Documentation/vm/slabinfo.c +++ b/Documentation/vm/slabinfo.c @@ -1,7 +1,7 @@ /* * Slabinfo: Tool to get reports about slabs * - * (C) 2007 sgi, Christoph Lameter + * (C) 2007 sgi, Christoph Lameter * * Compile by: * @@ -99,7 +99,7 @@ void fatal(const char *x, ...) void usage(void) { - printf("slabinfo 5/7/2007. (c) 2007 sgi. clameter@sgi.com\n\n" + printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n" "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n" "-a|--aliases Show aliases\n" "-A|--activity Most active slabs first\n" diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index 7c13f22a0c9e..bb1f5c6e28b3 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt @@ -266,4 +266,4 @@ of other objects. slub_debug=FZ,dentry -Christoph Lameter, , May 30, 2007 +Christoph Lameter, May 30, 2007 diff --git a/MAINTAINERS b/MAINTAINERS index 460e699fd280..13b7b19692e6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3672,7 +3672,7 @@ S: Maintained SLAB ALLOCATOR P: Christoph Lameter -M: clameter@sgi.com +M: cl@linux-foundation.org P: Pekka Enberg M: penberg@cs.helsinki.fi P: Matt Mackall diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 85fd0aa27a8c..4ec0a296bdec 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -2,7 +2,7 @@ #define _ASM_GENERIC_ATOMIC_H /* * Copyright (C) 2005 Silicon Graphics, Inc. - * Christoph Lameter + * Christoph Lameter * * Allows to provide arch independent atomic definitions without the need to * edit all arch specific atomic.h files. diff --git a/include/linux/slab.h b/include/linux/slab.h index c2ad35016599..9aa90a6f20e0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1,7 +1,7 @@ /* * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). * - * (C) SGI 2006, Christoph Lameter + * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cef6f8fddd7d..d117ea2825a9 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -4,7 +4,7 @@ /* * SLUB : A Slab allocator without object queues. * - * (C) 2007 SGI, Christoph Lameter + * (C) 2007 SGI, Christoph Lameter */ #include #include diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 29fc39f1029c..ce7799540c91 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -13,7 +13,7 @@ * Kai Petzke * Theodore Ts'o * - * Made to use alloc_percpu by Christoph Lameter . + * Made to use alloc_percpu by Christoph Lameter. */ #include diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 169a2f8dabcc..56ec21a7f73d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig - * Copyright (C) 2005 SGI, Christoph Lameter + * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index f4026bae6eed..05f2b4009ccc 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -1,7 +1,7 @@ /* * linux/mm/allocpercpu.c * - * Separated from slab.c August 11, 2006 Christoph Lameter + * Separated from slab.c August 11, 2006 Christoph Lameter */ #include #include diff --git a/mm/migrate.c b/mm/migrate.c index 112bcaeaa104..55bd355d170d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -9,7 +9,7 @@ * IWAMOTO Toshihiro * Hirokazu Takahashi * Dave Hansen - * Christoph Lameter + * Christoph Lameter */ #include diff --git a/mm/slub.c b/mm/slub.c index 2c9a62d1f429..1a427c0ae83b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5,7 +5,7 @@ * The allocator synchronizes using per slab locks and only * uses a centralized lock to manage a pool of partial slabs. * - * (C) 2007 SGI, Christoph Lameter + * (C) 2007 SGI, Christoph Lameter */ #include diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 99c4f36eb8a3..a91b5f8fcaf6 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -1,7 +1,7 @@ /* * Virtual Memory Map support * - * (C) 2007 sgi. Christoph Lameter . + * (C) 2007 sgi. Christoph Lameter. * * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset -- cgit v1.2.3