summaryrefslogtreecommitdiff
path: root/fs/exec.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-12-01 16:29:39 -0800
committerGreg Kroah-Hartman <greg@kroah.com>2002-12-01 16:29:39 -0800
commit8a0611595372910f0ead55d294a48a89136b7006 (patch)
tree81b0ab7c29568112f8d53479b27fd1576af785cf /fs/exec.c
parentfe43697ec79a56d0421930fd068202fc1012b1a8 (diff)
[PATCH] tcore-fixes-2.5.50-E6
This fixes threaded coredumps and streamlines the code. The old code caused crashes and hung coredumps. The new code has been tested for some time already and appears to be robust. Changes: - the code now uses completions instead of a semaphore and a waitqueue, attached to mm_struct: /* coredumping support */ int core_waiters; struct completion *core_startup_done, core_done; - extended the completion concept with a 'complete all' call - all pending threads are woken up in that case. - core_waiters is a plain integer now - it's always accessed from under the mmap_sem. It's also used as the fastpath-check in the sys_exit() path, instead of ->dumpable (which was incorrect). - got rid of the ->core_waiter task flag - it's not needed anymore.
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/fs/exec.c b/fs/exec.c
index a0a4aa824ae6..28ff9cbfe0f9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1238,49 +1238,58 @@ static void zap_threads (struct mm_struct *mm)
{
struct task_struct *g, *p;
- /* give other threads a chance to run: */
- yield();
-
read_lock(&tasklist_lock);
do_each_thread(g,p)
- if (mm == p->mm && !p->core_waiter)
+ if (mm == p->mm && p != current) {
force_sig_specific(SIGKILL, p);
+ mm->core_waiters++;
+ }
while_each_thread(g,p);
+
read_unlock(&tasklist_lock);
}
static void coredump_wait(struct mm_struct *mm)
{
- DECLARE_WAITQUEUE(wait, current);
+ DECLARE_COMPLETION(startup_done);
+
+ mm->core_waiters++; /* let other threads block */
+ mm->core_startup_done = &startup_done;
+
+ /* give other threads a chance to run: */
+ yield();
- atomic_inc(&mm->core_waiters);
- add_wait_queue(&mm->core_wait, &wait);
zap_threads(mm);
- current->state = TASK_UNINTERRUPTIBLE;
- if (atomic_read(&mm->core_waiters) != atomic_read(&mm->mm_users))
- schedule();
- else
- current->state = TASK_RUNNING;
+ if (--mm->core_waiters) {
+ up_write(&mm->mmap_sem);
+ wait_for_completion(&startup_done);
+ } else
+ up_write(&mm->mmap_sem);
+ BUG_ON(mm->core_waiters);
}
int do_coredump(long signr, struct pt_regs * regs)
{
- struct linux_binfmt * binfmt;
char corename[CORENAME_MAX_SIZE + 1];
- struct file * file;
+ struct mm_struct *mm = current->mm;
+ struct linux_binfmt * binfmt;
struct inode * inode;
+ struct file * file;
int retval = 0;
lock_kernel();
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
- if (!current->mm->dumpable)
+ down_write(&mm->mmap_sem);
+ if (!mm->dumpable) {
+ up_write(&mm->mmap_sem);
goto fail;
- current->mm->dumpable = 0;
- if (down_trylock(&current->mm->core_sem))
- BUG();
- coredump_wait(current->mm);
+ }
+ mm->dumpable = 0;
+ init_completion(&mm->core_done);
+ coredump_wait(mm);
+
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail_unlock;
@@ -1308,7 +1317,7 @@ int do_coredump(long signr, struct pt_regs * regs)
close_fail:
filp_close(file, NULL);
fail_unlock:
- up(&current->mm->core_sem);
+ complete_all(&mm->core_done);
fail:
unlock_kernel();
return retval;