summaryrefslogtreecommitdiff
path: root/kernel/ucount.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2025-02-03 16:05:25 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-03-16 22:30:50 -0700
commitb4dc0bee2a749083028afba346910e198653f42a (patch)
tree6cca8e505189d9fde61d736a4f8dd605891b40de /kernel/ucount.c
parent5f01a22c5b231dd590f61a2591b3090665733bcb (diff)
ucount: use rcuref_t for reference counting
Use rcuref_t for reference counting. This eliminates the cmpxchg loop in the get and put path. This also eliminates the need to acquire the lock in the put path because once the final user returns the reference, it can no longer be obtained anymore. Use rcuref_t for reference counting. Link: https://lkml.kernel.org/r/20250203150525.456525-5-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Lai jiangshan <jiangshanlai@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Mengen Sun <mengensun@tencent.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com> Cc: YueHong Wu <yuehongwu@tencent.com> Cc: Zqiang <qiang.zhang1211@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/ucount.c')
-rw-r--r--kernel/ucount.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c
index b6abaf68cdcc..8686e329b8f2 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -11,7 +11,7 @@
struct ucounts init_ucounts = {
.ns = &init_user_ns,
.uid = GLOBAL_ROOT_UID,
- .count = ATOMIC_INIT(1),
+ .count = RCUREF_INIT(1),
};
#define UCOUNTS_HASHTABLE_BITS 10
@@ -138,7 +138,7 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid,
guard(rcu)();
hlist_nulls_for_each_entry_rcu(ucounts, pos, hashent, node) {
if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns)) {
- if (atomic_inc_not_zero(&ucounts->count))
+ if (rcuref_get(&ucounts->count))
return ucounts;
}
}
@@ -154,13 +154,6 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
spin_unlock_irq(&ucounts_lock);
}
-struct ucounts *get_ucounts(struct ucounts *ucounts)
-{
- if (atomic_inc_not_zero(&ucounts->count))
- return ucounts;
- return NULL;
-}
-
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_nulls_head *hashent = ucounts_hashentry(ns, uid);
@@ -176,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
new->ns = ns;
new->uid = uid;
- atomic_set(&new->count, 1);
+ rcuref_init(&new->count, 1);
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -196,7 +189,8 @@ void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
+ if (rcuref_put(&ucounts->count)) {
+ spin_lock_irqsave(&ucounts_lock, flags);
hlist_nulls_del_rcu(&ucounts->node);
spin_unlock_irqrestore(&ucounts_lock, flags);