summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-11 14:12:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-11 14:12:50 -0800
commit148f95f75c513936d466bcc7e6bf73298da2212b (patch)
treebe976c99c7d0c2446e7653b06ae82e4b59973f95 /include/linux
parent41f1a08645abb5ef7d2a3ed8835c747334878774 (diff)
parent815c8e35511d0b9a214e9f644983fe477af9d5cb (diff)
Merge tag 'slab-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - The percpu sheaves caching layer was introduced as opt-in in 6.18 and now we enable it for all caches and remove the previous cpu (partial) slab caching mechanism. Besides the lower locking overhead and much more likely fastpath when freeing, this removes the rather complicated code related to the cpu slab lockless fastpaths (using this_cpu_try_cmpxchg128/64) and all its complications for PREEMPT_RT or kmalloc_nolock(). The lockless slab freelist+counters update operation using try_cmpxchg128/64 remains and is crucial for freeing remote NUMA objects, and to allow flushing objects from sheaves to slabs mostly without the node list_lock (Vlastimil Babka) - Eliminate slabobj_ext metadata overhead when possible. Instead of using kmalloc() to allocate the array for memcg and/or allocation profiling tag pointers, use leftover space in a slab or per-object padding due to alignment (Harry Yoo) - Various followup improvements to the above (Hao Li) * tag 'slab-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (39 commits) slub: let need_slab_obj_exts() return false if SLAB_NO_OBJ_EXT is set mm/slab: only allow SLAB_OBJ_EXT_IN_OBJ for unmergeable caches mm/slab: place slabobj_ext metadata in unused space within s->size mm/slab: move [__]ksize and slab_ksize() to mm/slub.c mm/slab: save memory by allocating slabobj_ext array from leftover mm/memcontrol,alloc_tag: handle slabobj_ext access under KASAN poison mm/slab: use stride to access slabobj_ext mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper ext4: specify the free pointer offset for ext4_inode_cache mm/slab: allow specifying free pointer offset when using constructor mm/slab: use unsigned long for orig_size to ensure proper metadata align slub: clarify object field layout comments mm/slab: avoid allocating slabobj_ext array from its own slab slub: avoid list_lock contention from __refill_objects_any() mm/slub: cleanup and repurpose some stat items mm/slub: remove DEACTIVATE_TO_* stat items slab: remove frozen slab checks from __slab_free() slab: update overview comments slab: refill sheaves from all nodes slab: remove unused PREEMPT_RT specific macros ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/slab.h40
1 files changed, 22 insertions, 18 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7701b38cedec..c5fde8740281 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -58,8 +58,9 @@ enum _slab_flag_bits {
#endif
_SLAB_OBJECT_POISON,
_SLAB_CMPXCHG_DOUBLE,
-#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+ _SLAB_OBJ_EXT_IN_OBJ,
#endif
_SLAB_FLAGS_LAST_BIT
};
@@ -239,10 +240,12 @@ enum _slab_flag_bits {
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab created using create_boot_cache */
-#ifdef CONFIG_SLAB_OBJ_EXT
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
+
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ)
#else
-#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED
#endif
/*
@@ -300,24 +303,26 @@ struct kmem_cache_args {
unsigned int usersize;
/**
* @freeptr_offset: Custom offset for the free pointer
- * in &SLAB_TYPESAFE_BY_RCU caches
+ * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor
*
- * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
- * outside of the object. This might cause the object to grow in size.
- * Cache creators that have a reason to avoid this can specify a custom
- * free pointer offset in their struct where the free pointer will be
- * placed.
+ * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free
+ * pointer outside of the object. This might cause the object to grow
+ * in size. Cache creators that have a reason to avoid this can specify
+ * a custom free pointer offset in their data structure where the free
+ * pointer will be placed.
*
- * Note that placing the free pointer inside the object requires the
- * caller to ensure that no fields are invalidated that are required to
- * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
- * details).
+ * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that
+ * the free pointer does not overlay fields required to guard against
+ * object recycling (See &SLAB_TYPESAFE_BY_RCU for details).
*
- * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
- * is specified, %use_freeptr_offset must be set %true.
+ * For caches with @ctor, the caller must ensure that the free pointer
+ * does not overlay fields initialized by the constructor.
*
- * Note that @ctor currently isn't supported with custom free pointers
- * as a @ctor requires an external free pointer.
+ * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor
+ * may specify @freeptr_offset.
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, @use_freeptr_offset must be set %true.
*/
unsigned int freeptr_offset;
/**
@@ -508,7 +513,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
-size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))