summaryrefslogtreecommitdiff
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-09-12 21:36:11 -0700
committerIngo Molnar <mingo@elte.hu>2002-09-12 21:36:11 -0700
commit2c66151cbc2cc68465489fdf64d16bd1c6ca4d0e (patch)
treea726906587dfd198db17c02be4694e617e7a9401 /kernel/fork.c
parentf2e3a5d66f21eacafe15a846f2010200ed0cb8b6 (diff)
[PATCH] sys_exit() threading improvements, BK-curr
This implements the 'keep the initial thread around until every thread in the group exits' concept in a different, less intrusive way, along your suggestions. There is no exit_done completion handling anymore, freeing of the task is still done by wait4(). This has the following side-effect: detached threads/processes can only be started within a thread group, not in a standalone way. (This also fixes the bugs introduced by the ->exit_done code, which made it possible for a zombie task to be reactivated.) I've introduced the p->group_leader pointer, which can/will be used for other purposes in the future as well - since from now on the thread group leader is always existent. Right now it's used to notify the parent of the thread group leader from the last non-leader thread that exits [if the thread group leader is a zombie already].
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index bd902cc45283..5bc7f2a07915 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -628,7 +628,6 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
atomic_set(&sig->count, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
- init_completion(&sig->group_exit_done);
memcpy(sig->action, current->sig->action, sizeof(sig->action));
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
@@ -672,6 +671,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
*/
if (clone_flags & CLONE_THREAD)
clone_flags |= CLONE_SIGHAND;
+ /*
+ * Detached threads can only be started up within the thread
+ * group.
+ */
+ if (clone_flags & CLONE_DETACHED)
+ clone_flags |= CLONE_THREAD;
retval = security_ops->task_create(clone_flags);
if (retval)
@@ -843,6 +848,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* Let it rip!
*/
p->tgid = p->pid;
+ p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
@@ -870,6 +876,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_namespace;
}
p->tgid = current->tgid;
+ p->group_leader = current->group_leader;
list_add(&p->thread_group, &current->thread_group);
spin_unlock(&current->sig->siglock);
}