diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-09-22 21:46:05 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-09-22 21:46:05 -0700 |
| commit | 817fdd72fae82f7b43b06ee7733c8ede31a4365b (patch) | |
| tree | 6472d9b86a7601e57d3e4448b13e2d7abd401e19 /kernel | |
| parent | 7641736676d476fb866d8750b2c94b671f9f8f9b (diff) | |
[PATCH] pidhash cleanups, tgid-2.5.38-F3
This does the following things:
- removes the ->thread_group list and uses a new PIDTYPE_TGID pid class
to handle thread groups. This cleans up lots of code in signal.c and
elsewhere.
- fixes sys_execve() if a non-leader thread calls it. (2.5.38 crashed in
this case.)
- renames list_for_each_noprefetch to __list_for_each.
- cleans up delayed-leader parent notification.
- introduces link_pid() to optimize PIDTYPE_TGID installation in the
thread-group case.
I've tested the patch with a number of threaded and non-threaded
workloads, and it works just fine. Compiles & boots on UP and SMP x86.
The session/pgrp bugs reported to lkml are probably still open, they are
the next on my todo - now that we have a clean pidhash architecture they
should be easier to fix.
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exit.c | 32 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/pid.c | 61 | ||||
| -rw-r--r-- | kernel/signal.c | 42 |
4 files changed, 99 insertions, 42 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 4b051cb290c2..6b1629ef433a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -34,13 +34,13 @@ static struct dentry * __unhash_process(struct task_struct *p) struct dentry *proc_dentry; nr_threads--; detach_pid(p, PIDTYPE_PID); + detach_pid(p, PIDTYPE_TGID); if (thread_group_leader(p)) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); } REMOVE_LINKS(p); - p->pid = 0; proc_dentry = p->proc_dentry; if (unlikely(proc_dentry != NULL)) { spin_lock(&dcache_lock); @@ -74,6 +74,15 @@ void release_task(struct task_struct * p) write_lock_irq(&tasklist_lock); __exit_sighand(p); proc_dentry = __unhash_process(p); + + /* + * If we are the last non-leader member of the thread + * group, and the leader is zombie, then notify the + * group leader's parent process. + */ + if (p->group_leader != p && thread_group_empty(p)) + do_notify_parent(p->group_leader, p->group_leader->exit_signal); + p->parent->cutime += p->utime + p->cutime; p->parent->cstime += p->stime + p->cstime; p->parent->cmin_flt += p->min_flt + p->cmin_flt; @@ -670,6 +679,25 @@ asmlinkage long sys_exit(int error_code) do_exit((error_code&0xff)<<8); } +task_t *next_thread(task_t *p) +{ + struct pid_link *link = p->pids + PIDTYPE_TGID; + struct list_head *tmp, *head = &link->pidptr->task_list; + +#if CONFIG_SMP + if (!p->sig) + BUG(); + if (!spin_is_locked(&p->sig->siglock) && + !rwlock_is_locked(&tasklist_lock)) + BUG(); +#endif + tmp = link->pid_chain.next; + if (tmp == head) + tmp = head->next; + + return pid_task(tmp, PIDTYPE_TGID); +} + /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this @@ -679,7 +707,7 @@ asmlinkage long sys_exit_group(int error_code) { unsigned int exit_code = (error_code & 0xff) << 8; - if (!list_empty(¤t->thread_group)) { + if (!thread_group_empty(current)) { struct signal_struct *sig = current->sig; spin_lock_irq(&sig->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index fceec868b97d..4688c232b9a5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -802,7 +802,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, */ p->tgid = p->pid; p->group_leader = p; - INIT_LIST_HEAD(&p->thread_group); INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); @@ -830,7 +829,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, } p->tgid = current->tgid; p->group_leader = current->group_leader; - list_add(&p->thread_group, ¤t->thread_group); spin_unlock(¤t->sig->siglock); } @@ -840,9 +838,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, attach_pid(p, PIDTYPE_PID, p->pid); if (thread_group_leader(p)) { + attach_pid(p, PIDTYPE_TGID, p->tgid); attach_pid(p, PIDTYPE_PGID, p->pgrp); attach_pid(p, PIDTYPE_SID, p->session); - } + } else + link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid); nr_threads++; write_unlock_irq(&tasklist_lock); diff --git a/kernel/pid.c b/kernel/pid.c index eeb0a81ea4e6..b4da62f0aef2 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -142,7 +142,7 @@ inline struct pid *find_pid(enum pid_type type, int nr) struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)]; struct pid *pid; - list_for_each_noprefetch(elem, bucket) { + __list_for_each(elem, bucket) { pid = list_entry(elem, struct pid, hash_chain); if (pid->nr == nr) return pid; @@ -150,6 +150,13 @@ inline struct pid *find_pid(enum pid_type type, int nr) return NULL; } +void link_pid(task_t *task, struct pid_link *link, struct pid *pid) +{ + atomic_inc(&pid->count); + list_add_tail(&link->pid_chain, &pid->task_list); + link->pidptr = pid; +} + int attach_pid(task_t *task, enum pid_type type, int nr) { struct pid *pid = find_pid(type, nr); @@ -165,13 +172,13 @@ int attach_pid(task_t *task, enum pid_type type, int nr) get_task_struct(task); list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]); } - list_add(&task->pids[type].pid_chain, &pid->task_list); + list_add_tail(&task->pids[type].pid_chain, &pid->task_list); task->pids[type].pidptr = pid; return 0; } -void detach_pid(task_t *task, enum pid_type type) +static inline int __detach_pid(task_t *task, enum pid_type type) { struct pid_link *link = task->pids + type; struct pid *pid = link->pidptr; @@ -179,19 +186,34 @@ void detach_pid(task_t *task, enum pid_type type) list_del(&link->pid_chain); if (!atomic_dec_and_test(&pid->count)) - return; + return 0; nr = pid->nr; list_del(&pid->hash_chain); put_task_struct(pid->task); + return nr; +} + +static void _detach_pid(task_t *task, enum pid_type type) +{ + __detach_pid(task, type); +} + +void detach_pid(task_t *task, enum pid_type type) +{ + int nr = __detach_pid(task, type); + + if (!nr) + return; + for (type = 0; type < PIDTYPE_MAX; ++type) if (find_pid(type, nr)) return; free_pidmap(nr); } -extern task_t *find_task_by_pid(int nr) +task_t *find_task_by_pid(int nr) { struct pid *pid = find_pid(PIDTYPE_PID, nr); @@ -200,6 +222,35 @@ extern task_t *find_task_by_pid(int nr) return pid_task(pid->task_list.next, PIDTYPE_PID); } +/* + * This function switches the PIDs if a non-leader thread calls + * sys_execve() - this must be done without releasing the PID. + * (which a detach_pid() would eventually do.) + */ +void switch_exec_pids(task_t *leader, task_t *thread) +{ + _detach_pid(leader, PIDTYPE_PID); + _detach_pid(leader, PIDTYPE_TGID); + _detach_pid(leader, PIDTYPE_PGID); + _detach_pid(leader, PIDTYPE_SID); + + _detach_pid(thread, PIDTYPE_PID); + _detach_pid(thread, PIDTYPE_TGID); + + leader->pid = leader->tgid = thread->pid; + thread->pid = thread->tgid; + + attach_pid(thread, PIDTYPE_PID, thread->pid); + attach_pid(thread, PIDTYPE_TGID, thread->tgid); + attach_pid(thread, PIDTYPE_PGID, thread->pgrp); + attach_pid(thread, PIDTYPE_SID, thread->session); + + attach_pid(leader, PIDTYPE_PID, leader->pid); + attach_pid(leader, PIDTYPE_TGID, leader->tgid); + attach_pid(leader, PIDTYPE_PGID, leader->pgrp); + attach_pid(leader, PIDTYPE_SID, leader->session); +} + void __init pidhash_init(void) { int i, j; diff --git a/kernel/signal.c b/kernel/signal.c index aa488b7dca7f..013c7a899152 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -254,7 +254,6 @@ static inline void __remove_thread_group(struct task_struct *tsk, struct signal_ { if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); - list_del_init(&tsk->thread_group); } void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig) @@ -281,15 +280,13 @@ void __exit_sighand(struct task_struct *tsk) BUG(); spin_lock(&sig->siglock); spin_lock(&tsk->sigmask_lock); - tsk->sig = NULL; if (atomic_dec_and_test(&sig->count)) { __remove_thread_group(tsk, sig); + tsk->sig = NULL; spin_unlock(&sig->siglock); flush_sigqueue(&sig->shared_pending); kmem_cache_free(sigact_cachep, sig); } else { - struct task_struct *leader = tsk->group_leader; - /* * If there is any task waiting for the group exit * then notify it: @@ -298,24 +295,9 @@ void __exit_sighand(struct task_struct *tsk) wake_up_process(sig->group_exit_task); sig->group_exit_task = NULL; } - /* - * If we are the last non-leader member of the thread - * group, and the leader is zombie, then notify the - * group leader's parent process. - * - * (subtle: here we also rely on the fact that if we are the - * thread group leader then we are not zombied yet.) - */ - if (atomic_read(&sig->count) == 1 && - leader->state == TASK_ZOMBIE) { - - __remove_thread_group(tsk, sig); - spin_unlock(&sig->siglock); - do_notify_parent(leader, leader->exit_signal); - } else { - __remove_thread_group(tsk, sig); - spin_unlock(&sig->siglock); - } + __remove_thread_group(tsk, sig); + tsk->sig = NULL; + spin_unlock(&sig->siglock); } clear_tsk_thread_flag(tsk,TIF_SIGPENDING); flush_sigqueue(&tsk->pending); @@ -853,7 +835,7 @@ int load_balance_thread_group(struct task_struct *p, int sig, p->sig->curr_target = p; else for (;;) { - if (list_empty(&p->thread_group)) + if (thread_group_empty(p)) BUG(); if (!tmp || tmp->tgid != p->tgid) BUG(); @@ -882,17 +864,13 @@ int load_balance_thread_group(struct task_struct *p, int sig, int __broadcast_thread_group(struct task_struct *p, int sig) { struct task_struct *tmp; - struct list_head *entry; + struct list_head *l; + struct pid *pid; int err = 0; - /* send a signal to the head of the list */ - err = __force_sig_info(sig, p); - - /* send a signal to all members of the list */ - list_for_each(entry, &p->thread_group) { - tmp = list_entry(entry, task_t, thread_group); + for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid) err = __force_sig_info(sig, tmp); - } + return err; } @@ -909,7 +887,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) spin_lock_irqsave(&p->sig->siglock, flags); /* not a thread group - normal signal behavior */ - if (list_empty(&p->thread_group) || !sig) + if (thread_group_empty(p) || !sig) goto out_send; if (sig_user_defined(p, sig)) { |
