summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a8fcc7e6f25a..d4367f25b20d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
}
}
-static inline void mark_failed_objexts_alloc(struct slab *slab)
+static inline bool mark_failed_objexts_alloc(struct slab *slab)
{
- slab->obj_exts = OBJEXTS_ALLOC_FAIL;
+ return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
}
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
@@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
-static inline void mark_failed_objexts_alloc(struct slab *slab) {}
+static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
slab_nid(slab));
}
if (!vec) {
- /* Mark vectors which failed to allocate */
- mark_failed_objexts_alloc(slab);
+ /*
+ * Try to mark vectors which failed to allocate.
+ * If this operation fails, there may be a racing process
+ * that has already completed the allocation.
+ */
+ if (!mark_failed_objexts_alloc(slab) &&
+ slab_obj_exts(slab))
+ return 0;
return -ENOMEM;
}
@@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
+retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) {
@@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned.
*/
slab->obj_exts = new_exts;
- } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
- cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ } else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
/*
* If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing
@@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
else
kfree(vec);
return 0;
+ } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ /* Retry if a racing thread changed slab->obj_exts from under us. */
+ goto retry;
}
if (allow_spin)
@@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
/* Unlucky, discard newly allocated slab */
- slab->frozen = 1;
defer_deactivate_slab(slab, NULL);
return NULL;
}
@@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work)
struct slab *slab = container_of(pos, struct slab, llnode);
#ifdef CONFIG_SLUB_TINY
- discard_slab(slab->slab_cache, slab);
+ free_slab(slab->slab_cache, slab);
#else
- deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ if (slab->frozen)
+ deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ else
+ free_slab(slab->slab_cache, slab);
#endif
}
}