summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-16 12:37:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-16 12:37:13 -0800
commit543b9b63394ee67ecf5298fe42cbe65b21a16eac (patch)
treeddae1ef25280398551cf0077799e0a51dc3363b4 /kernel
parent57d76ceccee4b497eb835831206b50e72915a501 (diff)
parent3673dd3c7dc1f37baf0448164d323d7c7a44d1da (diff)
Merge tag 'kernel-7.0-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull pidfs updates from Christian Brauner: - pid: introduce task_ppid_vnr() helper - pidfs: convert rb-tree to rhashtable Mateusz reported performance penalties during task creation because pidfs uses pidmap_lock to add elements into the rbtree. Switch to an rhashtable to have separate fine-grained locking and to decouple from pidmap_lock moving all heavy manipulations outside of it Also move inode allocation outside of pidmap_lock. With this there's nothing happening for pidfs under pidmap_lock - pid: reorder fields in pid_namespace to reduce false sharing - Revert "pid: make __task_pid_nr_ns(ns => NULL) safe for zombie callers" - ipc: Add SPDX license id to mqueue.c * tag 'kernel-7.0-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: pid: introduce task_ppid_vnr() helper pidfs: implement ino allocation without the pidmap lock Revert "pid: make __task_pid_nr_ns(ns => NULL) safe for zombie callers" pid: reorder fields in pid_namespace to reduce false sharing pidfs: convert rb-tree to rhashtable ipc: Add SPDX license id to mqueue.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pid.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index f45ae56db7da..3b96571d0fe6 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -43,7 +43,6 @@
#include <linux/sched/task.h>
#include <linux/idr.h>
#include <linux/pidfs.h>
-#include <linux/seqlock.h>
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
@@ -85,7 +84,6 @@ struct pid_namespace init_pid_ns = {
EXPORT_SYMBOL_GPL(init_pid_ns);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock);
void put_pid(struct pid *pid)
{
@@ -141,9 +139,9 @@ void free_pid(struct pid *pid)
idr_remove(&ns->idr, upid->nr);
}
- pidfs_remove_pid(pid);
spin_unlock(&pidmap_lock);
+ pidfs_remove_pid(pid);
call_rcu(&pid->rcu, delayed_put_pid);
}
@@ -200,6 +198,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
INIT_HLIST_HEAD(&pid->tasks[type]);
init_waitqueue_head(&pid->wait_pidfd);
INIT_HLIST_HEAD(&pid->inodes);
+ pidfs_prepare_pid(pid);
/*
* 2. perm check checkpoint_restore_ns_capable()
@@ -316,7 +315,6 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
retval = -ENOMEM;
if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
goto out_free;
- pidfs_add_pid(pid);
for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
/* Make the PID visible to find_pid_ns. */
idr_replace(&upid->ns->idr, pid, upid->nr);
@@ -326,6 +324,12 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
idr_preload_end();
ns_ref_active_get(ns);
+ retval = pidfs_add_pid(pid);
+ if (unlikely(retval)) {
+ free_pid(pid);
+ pid = ERR_PTR(-ENOMEM);
+ }
+
return pid;
out_free:
@@ -554,8 +558,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
rcu_read_lock();
if (!ns)
ns = task_active_pid_ns(current);
- if (ns)
- nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
+ nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
rcu_read_unlock();
return nr;