From e47c897a29491ade20b27612fdd3107c39a07357 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 23 Jan 2026 07:52:44 +0100 Subject: slab: add sheaves to most caches In the first step to replace cpu (partial) slabs with sheaves, enable sheaves for almost all caches. Treat args->sheaf_capacity as a minimum, and calculate sheaf capacity with a formula that roughly follows the formula for number of objects in cpu partial slabs in set_cpu_partial(). This should achieve roughly similar contention on the barn spin lock as there's currently for node list_lock without sheaves, to make benchmarking results comparable. It can be further tuned later. Don't enable sheaves for bootstrap caches as that wouldn't work. In order to recognize them by SLAB_NO_OBJ_EXT, make sure the flag exists even for !CONFIG_SLAB_OBJ_EXT. This limitation will be lifted for kmalloc caches after the necessary bootstrapping changes. Also do not enable sheaves for SLAB_NOLEAKTRACE caches to avoid recursion with kmemleak tracking (thanks to Breno Leitao). Reviewed-by: Suren Baghdasaryan Reviewed-by: Harry Yoo Reviewed-by: Hao Li Tested-by: Breno Leitao Reviewed-by: Liam R. Howlett Tested-by: Zhao Liu Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 2482992248dc..2682ee57ec90 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -57,9 +57,7 @@ enum _slab_flag_bits { #endif _SLAB_OBJECT_POISON, _SLAB_CMPXCHG_DOUBLE, -#ifdef CONFIG_SLAB_OBJ_EXT _SLAB_NO_OBJ_EXT, -#endif _SLAB_FLAGS_LAST_BIT }; @@ -238,11 +236,7 @@ enum _slab_flag_bits { #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab created using create_boot_cache */ -#ifdef CONFIG_SLAB_OBJ_EXT #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) -#else -#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED -#endif /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. -- cgit v1.2.3 From a13b68d79d5caa5ec0d34b4c0fb2dedf3259fc32 Mon Sep 17 00:00:00 2001 From: Harry Yoo Date: Tue, 13 Jan 2026 15:18:38 +0900 Subject: mm/slab: allow specifying free pointer offset when using constructor When a slab cache has a constructor, the free pointer is placed after the object because certain fields must not be overwritten even after the object is freed. However, some fields that the constructor does not initialize can safely be overwritten after free. Allow specifying the free pointer offset within the object, reducing the overall object size when some fields can be reused for the free pointer. Adjust the document accordingly. Signed-off-by: Harry Yoo Link: https://patch.msgid.link/20260113061845.159790-3-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 30 ++++++++++++++++-------------- mm/slab_common.c | 2 +- mm/slub.c | 6 ++++-- 3 files changed, 21 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 2482992248dc..4554c04a9bd7 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -299,24 +299,26 @@ struct kmem_cache_args { unsigned int usersize; /** * @freeptr_offset: Custom offset for the free pointer - * in &SLAB_TYPESAFE_BY_RCU caches + * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor * - * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer - * outside of the object. This might cause the object to grow in size. - * Cache creators that have a reason to avoid this can specify a custom - * free pointer offset in their struct where the free pointer will be - * placed. + * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free + * pointer outside of the object. This might cause the object to grow + * in size. Cache creators that have a reason to avoid this can specify + * a custom free pointer offset in their data structure where the free + * pointer will be placed. * - * Note that placing the free pointer inside the object requires the - * caller to ensure that no fields are invalidated that are required to - * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for - * details). + * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that + * the free pointer does not overlay fields required to guard against + * object recycling (See &SLAB_TYPESAFE_BY_RCU for details). * - * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset - * is specified, %use_freeptr_offset must be set %true. + * For caches with @ctor, the caller must ensure that the free pointer + * does not overlay fields initialized by the constructor. + * + * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor + * may specify @freeptr_offset. * - * Note that @ctor currently isn't supported with custom free pointers - * as a @ctor requires an external free pointer. + * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset + * is specified, @use_freeptr_offset must be set %true. */ unsigned int freeptr_offset; /** diff --git a/mm/slab_common.c b/mm/slab_common.c index b6836f8500b6..027bf64c2e35 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -239,7 +239,7 @@ static struct kmem_cache *create_cache(const char *name, err = -EINVAL; if (args->use_freeptr_offset && (args->freeptr_offset >= object_size || - !(flags & SLAB_TYPESAFE_BY_RCU) || + (!(flags & SLAB_TYPESAFE_BY_RCU) && !args->ctor) || !IS_ALIGNED(args->freeptr_offset, __alignof__(freeptr_t)))) goto out; diff --git a/mm/slub.c b/mm/slub.c index 2c000dddcf74..1b7ed91a2f15 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -7998,7 +7998,8 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) s->inuse = size; if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || - (flags & SLAB_POISON) || s->ctor || + (flags & SLAB_POISON) || + (s->ctor && !args->use_freeptr_offset) || ((flags & SLAB_RED_ZONE) && (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { /* @@ -8019,7 +8020,8 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) */ s->offset = size; size += sizeof(void *); - } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { + } else if (((flags & SLAB_TYPESAFE_BY_RCU) || s->ctor) && + args->use_freeptr_offset) { s->offset = args->freeptr_offset; } else { /* -- cgit v1.2.3 From fab0694646d75d5b03e9898ffb85899fb23320ea Mon Sep 17 00:00:00 2001 From: Harry Yoo Date: Tue, 13 Jan 2026 15:18:44 +0900 Subject: mm/slab: move [__]ksize and slab_ksize() to mm/slub.c To access SLUB's internal implementation details beyond cache flags in ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c. [vbabka@suse.cz: also make __ksize() static and move its kerneldoc to ksize() ] Signed-off-by: Harry Yoo Link: https://patch.msgid.link/20260113061845.159790-9-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 1 - mm/slab.h | 27 ----------------- mm/slab_common.c | 61 ------------------------------------- mm/slub.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 89 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 4554c04a9bd7..93e367b6a5f6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -509,7 +509,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size void kfree(const void *objp); void kfree_nolock(const void *objp); void kfree_sensitive(const void *objp); -size_t __ksize(const void *objp); DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) diff --git a/mm/slab.h b/mm/slab.h index d5da2f69e2d5..43b7c5ababb5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -661,33 +661,6 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void kvfree_rcu_cb(struct rcu_head *head); -size_t __ksize(const void *objp); - -static inline size_t slab_ksize(const struct kmem_cache *s) -{ -#ifdef CONFIG_SLUB_DEBUG - /* - * Debugging requires use of the padding between object - * and whatever may come after it. - */ - if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) - return s->object_size; -#endif - if (s->flags & SLAB_KASAN) - return s->object_size; - /* - * If we have the need to store the freelist pointer - * back there or track user information then we can - * only use the space before that information. - */ - if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) - return s->inuse; - /* - * Else we can use all the padding etc for the allocation - */ - return s->size; -} - static inline unsigned int large_kmalloc_order(const struct page *page) { return page[1].flags.f & 0xff; diff --git a/mm/slab_common.c b/mm/slab_common.c index 027bf64c2e35..b2db8f8f3cf0 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1021,43 +1021,6 @@ void __init create_kmalloc_caches(void) 0, SLAB_NO_MERGE, NULL); } -/** - * __ksize -- Report full size of underlying allocation - * @object: pointer to the object - * - * This should only be used internally to query the true size of allocations. - * It is not meant to be a way to discover the usable size of an allocation - * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond - * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, - * and/or FORTIFY_SOURCE. - * - * Return: size of the actual memory used by @object in bytes - */ -size_t __ksize(const void *object) -{ - const struct page *page; - const struct slab *slab; - - if (unlikely(object == ZERO_SIZE_PTR)) - return 0; - - page = virt_to_page(object); - - if (unlikely(PageLargeKmalloc(page))) - return large_kmalloc_size(page); - - slab = page_slab(page); - /* Delete this after we're sure there are no users */ - if (WARN_ON(!slab)) - return page_size(page); - -#ifdef CONFIG_SLUB_DEBUG - skip_orig_size_check(slab->slab_cache, object); -#endif - - return slab_ksize(slab->slab_cache); -} - gfp_t kmalloc_fix_flags(gfp_t flags) { gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; @@ -1273,30 +1236,6 @@ void kfree_sensitive(const void *p) } EXPORT_SYMBOL(kfree_sensitive); -size_t ksize(const void *objp) -{ - /* - * We need to first check that the pointer to the object is valid. - * The KASAN report printed from ksize() is more useful, then when - * it's printed later when the behaviour could be undefined due to - * a potential use-after-free or double-free. - * - * We use kasan_check_byte(), which is supported for the hardware - * tag-based KASAN mode, unlike kasan_check_read/write(). - * - * If the pointed to memory is invalid, we return 0 to avoid users of - * ksize() writing to and potentially corrupting the memory region. - * - * We want to perform the check before __ksize(), to avoid potentially - * crashing in __ksize() due to accessing invalid metadata. - */ - if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) - return 0; - - return kfence_ksize(objp) ?: __ksize(objp); -} -EXPORT_SYMBOL(ksize); - #ifdef CONFIG_BPF_SYSCALL #include diff --git a/mm/slub.c b/mm/slub.c index 7b6d8df06ad9..782685433580 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -7028,6 +7028,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); +static inline size_t slab_ksize(const struct kmem_cache *s) +{ +#ifdef CONFIG_SLUB_DEBUG + /* + * Debugging requires use of the padding between object + * and whatever may come after it. + */ + if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) + return s->object_size; +#endif + if (s->flags & SLAB_KASAN) + return s->object_size; + /* + * If we have the need to store the freelist pointer + * back there or track user information then we can + * only use the space before that information. + */ + if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) + return s->inuse; + /* + * Else we can use all the padding etc for the allocation + */ + return s->size; +} + +static size_t __ksize(const void *object) +{ + const struct page *page; + const struct slab *slab; + + if (unlikely(object == ZERO_SIZE_PTR)) + return 0; + + page = virt_to_page(object); + + if (unlikely(PageLargeKmalloc(page))) + return large_kmalloc_size(page); + + slab = page_slab(page); + /* Delete this after we're sure there are no users */ + if (WARN_ON(!slab)) + return page_size(page); + +#ifdef CONFIG_SLUB_DEBUG + skip_orig_size_check(slab->slab_cache, object); +#endif + + return slab_ksize(slab->slab_cache); +} + +/** + * ksize -- Report full size of underlying allocation + * @objp: pointer to the object + * + * This should only be used internally to query the true size of allocations. + * It is not meant to be a way to discover the usable size of an allocation + * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond + * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, + * and/or FORTIFY_SOURCE. + * + * Return: size of the actual memory used by @objp in bytes + */ +size_t ksize(const void *objp) +{ + /* + * We need to first check that the pointer to the object is valid. + * The KASAN report printed from ksize() is more useful, then when + * it's printed later when the behaviour could be undefined due to + * a potential use-after-free or double-free. + * + * We use kasan_check_byte(), which is supported for the hardware + * tag-based KASAN mode, unlike kasan_check_read/write(). + * + * If the pointed to memory is invalid, we return 0 to avoid users of + * ksize() writing to and potentially corrupting the memory region. + * + * We want to perform the check before __ksize(), to avoid potentially + * crashing in __ksize() due to accessing invalid metadata. + */ + if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) + return 0; + + return kfence_ksize(objp) ?: __ksize(objp); +} +EXPORT_SYMBOL(ksize); + static void free_large_kmalloc(struct page *page, void *object) { unsigned int order = compound_order(page); -- cgit v1.2.3 From a77d6d338685025cbf84f6e3abd92a8e59a4d894 Mon Sep 17 00:00:00 2001 From: Harry Yoo Date: Tue, 13 Jan 2026 15:18:45 +0900 Subject: mm/slab: place slabobj_ext metadata in unused space within s->size When a cache has high s->align value and s->object_size is not aligned to it, each object ends up with some unused space because of alignment. If this wasted space is big enough, we can use it to store the slabobj_ext metadata instead of wasting it. On my system, this happens with caches like kmem_cache, mm_struct, pid, task_struct, sighand_cache, xfs_inode, and others. To place the slabobj_ext metadata within each object, the existing slab_obj_ext() logic can still be used by setting: - slab->obj_exts = slab_address(slab) + (slabobj_ext offset) - stride = s->size slab_obj_ext() doesn't need know where the metadata is stored, so this method works without adding extra overhead to slab_obj_ext(). A good example benefiting from this optimization is xfs_inode (object_size: 992, align: 64). To measure memory savings, 2 millions of files were created on XFS. [ MEMCG=y, MEM_ALLOC_PROFILING=n ] Before patch (creating ~2.64M directories on xfs): Slab: 5175976 kB SReclaimable: 3837524 kB SUnreclaim: 1338452 kB After patch (creating ~2.64M directories on xfs): Slab: 5152912 kB SReclaimable: 3838568 kB SUnreclaim: 1314344 kB (-23.54 MiB) Enjoy the memory savings! Suggested-by: Vlastimil Babka Signed-off-by: Harry Yoo Link: https://patch.msgid.link/20260113061845.159790-10-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 9 +++++ mm/slab_common.c | 7 ++-- mm/slub.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 101 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 93e367b6a5f6..34db237319c1 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -59,6 +59,9 @@ enum _slab_flag_bits { _SLAB_CMPXCHG_DOUBLE, #ifdef CONFIG_SLAB_OBJ_EXT _SLAB_NO_OBJ_EXT, +#endif +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) + _SLAB_OBJ_EXT_IN_OBJ, #endif _SLAB_FLAGS_LAST_BIT }; @@ -244,6 +247,12 @@ enum _slab_flag_bits { #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) +#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ) +#else +#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED +#endif + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/mm/slab_common.c b/mm/slab_common.c index b2db8f8f3cf0..886d02fa94fb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -43,10 +43,13 @@ DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; /* - * Set of flags that will prevent slab merging + * Set of flags that will prevent slab merging. + * Any flag that adds per-object metadata should be included, + * since slab merging can update s->inuse that affects the metadata layout. */ #define SLAB_NEVER_MERGE (SLAB_DEBUG_FLAGS | SLAB_TYPESAFE_BY_RCU | \ - SLAB_NOLEAKTRACE | SLAB_FAILSLAB | SLAB_NO_MERGE) + SLAB_NOLEAKTRACE | SLAB_FAILSLAB | SLAB_NO_MERGE | \ + SLAB_OBJ_EXT_IN_OBJ) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) diff --git a/mm/slub.c b/mm/slub.c index 782685433580..0805c09d4b55 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -972,6 +972,46 @@ static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab) { return false; } + +#endif + +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) +static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab) +{ + /* + * Note we cannot rely on the SLAB_OBJ_EXT_IN_OBJ flag here and need to + * check the stride. A cache can have SLAB_OBJ_EXT_IN_OBJ set, but + * allocations within_slab_leftover are preferred. And those may be + * possible or not depending on the particular slab's size. + */ + return obj_exts_in_slab(s, slab) && + (slab_get_stride(slab) == s->size); +} + +static unsigned int obj_exts_offset_in_object(struct kmem_cache *s) +{ + unsigned int offset = get_info_end(s); + + if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) + offset += sizeof(struct track) * 2; + + if (slub_debug_orig_size(s)) + offset += sizeof(unsigned long); + + offset += kasan_metadata_size(s, false); + + return offset; +} +#else +static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab) +{ + return false; +} + +static inline unsigned int obj_exts_offset_in_object(struct kmem_cache *s) +{ + return 0; +} #endif #ifdef CONFIG_SLUB_DEBUG @@ -1272,6 +1312,9 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) off += kasan_metadata_size(s, false); + if (obj_exts_in_object(s, slab)) + off += sizeof(struct slabobj_ext); + if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ print_section(KERN_ERR, "Padding ", p + off, @@ -1453,8 +1496,11 @@ skip_bug_print: * between metadata and the next object, independent of alignment. * - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set. * [Final alignment padding] - * - Any bytes added by ALIGN(size, s->align) to reach s->size. - * - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set. + * - Bytes added by ALIGN(size, s->align) to reach s->size. + * - When the padding is large enough, it can be used to store + * struct slabobj_ext for accounting metadata (obj_exts_in_object()). + * - The remaining bytes (if any) are filled with 0x5a (POISON_INUSE) + * when SLAB_POISON is set. * * Notes: * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE. @@ -1485,6 +1531,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) off += kasan_metadata_size(s, false); + if (obj_exts_in_object(s, slab)) + off += sizeof(struct slabobj_ext); + if (size_from_object(s) == off) return 1; @@ -1510,7 +1559,7 @@ slab_pad_check(struct kmem_cache *s, struct slab *slab) length = slab_size(slab); end = start + length; - if (obj_exts_in_slab(s, slab)) { + if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) { remainder = length; remainder -= obj_exts_offset_in_slab(s, slab); remainder -= obj_exts_size_in_slab(slab); @@ -2384,6 +2433,24 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab) #endif slab->obj_exts = obj_exts; slab_set_stride(slab, sizeof(struct slabobj_ext)); + } else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) { + unsigned int offset = obj_exts_offset_in_object(s); + + obj_exts = (unsigned long)slab_address(slab); + obj_exts += s->red_left_pad; + obj_exts += offset; + + get_slab_obj_exts(obj_exts); + for_each_object(addr, s, slab_address(slab), slab->objects) + memset(kasan_reset_tag(addr) + offset, 0, + sizeof(struct slabobj_ext)); + put_slab_obj_exts(obj_exts); + +#ifdef CONFIG_MEMCG + obj_exts |= MEMCG_DATA_OBJEXTS; +#endif + slab->obj_exts = obj_exts; + slab_set_stride(slab, s->size); } } @@ -7028,8 +7095,10 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); -static inline size_t slab_ksize(const struct kmem_cache *s) +static inline size_t slab_ksize(struct slab *slab) { + struct kmem_cache *s = slab->slab_cache; + #ifdef CONFIG_SLUB_DEBUG /* * Debugging requires use of the padding between object @@ -7042,11 +7111,13 @@ static inline size_t slab_ksize(const struct kmem_cache *s) return s->object_size; /* * If we have the need to store the freelist pointer - * back there or track user information then we can + * or any other metadata back there then we can * only use the space before that information. */ if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) return s->inuse; + else if (obj_exts_in_object(s, slab)) + return s->inuse; /* * Else we can use all the padding etc for the allocation */ @@ -7055,8 +7126,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s) static size_t __ksize(const void *object) { - const struct page *page; - const struct slab *slab; + struct page *page; + struct slab *slab; if (unlikely(object == ZERO_SIZE_PTR)) return 0; @@ -7075,7 +7146,7 @@ static size_t __ksize(const void *object) skip_orig_size_check(slab->slab_cache, object); #endif - return slab_ksize(slab->slab_cache); + return slab_ksize(slab); } /** @@ -8199,6 +8270,7 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; + unsigned int aligned_size; unsigned int order; /* @@ -8308,7 +8380,13 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) * offset 0. In order to align the objects we have to simply size * each object to conform to the alignment. */ - size = ALIGN(size, s->align); + aligned_size = ALIGN(size, s->align); +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) + if (aligned_size - size >= sizeof(struct slabobj_ext)) + s->flags |= SLAB_OBJ_EXT_IN_OBJ; +#endif + size = aligned_size; + s->size = size; s->reciprocal_size = reciprocal_value(size); order = calculate_order(size); -- cgit v1.2.3