diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-12-01 16:29:39 -0800 |
|---|---|---|
| committer | Greg Kroah-Hartman <greg@kroah.com> | 2002-12-01 16:29:39 -0800 |
| commit | 8a0611595372910f0ead55d294a48a89136b7006 (patch) | |
| tree | 81b0ab7c29568112f8d53479b27fd1576af785cf | |
| parent | fe43697ec79a56d0421930fd068202fc1012b1a8 (diff) | |
[PATCH] tcore-fixes-2.5.50-E6
This fixes threaded coredumps and streamlines the code. The old code
caused crashes and hung coredumps. The new code has been tested for
some time already and appears to be robust. Changes:
- the code now uses completions instead of a semaphore and a waitqueue,
attached to mm_struct:
/* coredumping support */
int core_waiters;
struct completion *core_startup_done, core_done;
- extended the completion concept with a 'complete all' call - all pending
threads are woken up in that case.
- core_waiters is a plain integer now - it's always accessed from under
the mmap_sem. It's also used as the fastpath-check in the sys_exit()
path, instead of ->dumpable (which was incorrect).
- got rid of the ->core_waiter task flag - it's not needed anymore.
| -rw-r--r-- | fs/exec.c | 49 | ||||
| -rw-r--r-- | include/linux/completion.h | 1 | ||||
| -rw-r--r-- | include/linux/sched.h | 7 | ||||
| -rw-r--r-- | kernel/exit.c | 15 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/sched.c | 10 |
6 files changed, 50 insertions, 37 deletions
diff --git a/fs/exec.c b/fs/exec.c index a0a4aa824ae6..28ff9cbfe0f9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1238,49 +1238,58 @@ static void zap_threads (struct mm_struct *mm) { struct task_struct *g, *p; - /* give other threads a chance to run: */ - yield(); - read_lock(&tasklist_lock); do_each_thread(g,p) - if (mm == p->mm && !p->core_waiter) + if (mm == p->mm && p != current) { force_sig_specific(SIGKILL, p); + mm->core_waiters++; + } while_each_thread(g,p); + read_unlock(&tasklist_lock); } static void coredump_wait(struct mm_struct *mm) { - DECLARE_WAITQUEUE(wait, current); + DECLARE_COMPLETION(startup_done); + + mm->core_waiters++; /* let other threads block */ + mm->core_startup_done = &startup_done; + + /* give other threads a chance to run: */ + yield(); - atomic_inc(&mm->core_waiters); - add_wait_queue(&mm->core_wait, &wait); zap_threads(mm); - current->state = TASK_UNINTERRUPTIBLE; - if (atomic_read(&mm->core_waiters) != atomic_read(&mm->mm_users)) - schedule(); - else - current->state = TASK_RUNNING; + if (--mm->core_waiters) { + up_write(&mm->mmap_sem); + wait_for_completion(&startup_done); + } else + up_write(&mm->mmap_sem); + BUG_ON(mm->core_waiters); } int do_coredump(long signr, struct pt_regs * regs) { - struct linux_binfmt * binfmt; char corename[CORENAME_MAX_SIZE + 1]; - struct file * file; + struct mm_struct *mm = current->mm; + struct linux_binfmt * binfmt; struct inode * inode; + struct file * file; int retval = 0; lock_kernel(); binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; - if (!current->mm->dumpable) + down_write(&mm->mmap_sem); + if (!mm->dumpable) { + up_write(&mm->mmap_sem); goto fail; - current->mm->dumpable = 0; - if (down_trylock(¤t->mm->core_sem)) - BUG(); - coredump_wait(current->mm); + } + mm->dumpable = 0; + init_completion(&mm->core_done); + coredump_wait(mm); + if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail_unlock; @@ -1308,7 +1317,7 @@ int do_coredump(long signr, struct pt_regs * regs) close_fail: filp_close(file, NULL); fail_unlock: - up(¤t->mm->core_sem); + complete_all(&mm->core_done); fail: unlock_kernel(); return retval; diff --git a/include/linux/completion.h b/include/linux/completion.h index e4d6817775be..b8d7ed948470 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -29,6 +29,7 @@ static inline void init_completion(struct completion *x) extern void FASTCALL(wait_for_completion(struct completion *)); extern void FASTCALL(complete(struct completion *)); +extern void FASTCALL(complete_all(struct completion *)); #define INIT_COMPLETION(x) ((x).done = 0) diff --git a/include/linux/sched.h b/include/linux/sched.h index f919a8ce52cb..80a9836df919 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -203,9 +203,8 @@ struct mm_struct { mm_context_t context; /* coredumping support */ - struct semaphore core_sem; - atomic_t core_waiters; - wait_queue_head_t core_wait; + int core_waiters; + struct completion *core_startup_done, core_done; /* aio bits */ rwlock_t ioctx_list_lock; @@ -397,8 +396,6 @@ struct task_struct { void *journal_info; struct dentry *proc_dentry; struct backing_dev_info *backing_dev_info; -/* threaded coredumping support */ - int core_waiter; unsigned long ptrace_message; }; diff --git a/kernel/exit.c b/kernel/exit.c index 5b55b0f9b635..5e7fcdaa6221 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -425,14 +425,13 @@ static inline void __exit_mm(struct task_struct * tsk) /* * Serialize with any possible pending coredump: */ - if (!mm->dumpable) { - current->core_waiter = 1; - atomic_inc(&mm->core_waiters); - if (atomic_read(&mm->core_waiters) ==atomic_read(&mm->mm_users)) - wake_up(&mm->core_wait); - down(&mm->core_sem); - up(&mm->core_sem); - atomic_dec(&mm->core_waiters); + if (mm->core_waiters) { + down_write(&mm->mmap_sem); + if (!--mm->core_waiters) + complete(mm->core_startup_done); + up_write(&mm->mmap_sem); + + wait_for_completion(&mm->core_done); } atomic_inc(&mm->mm_count); if (mm != tsk->active_mm) BUG(); diff --git a/kernel/fork.c b/kernel/fork.c index f1ec2c2d2fe0..e7c966432932 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -328,9 +328,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm) atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); - init_MUTEX(&mm->core_sem); - init_waitqueue_head(&mm->core_wait); - atomic_set(&mm->core_waiters, 0); + mm->core_waiters = 0; mm->page_table_lock = SPIN_LOCK_UNLOCKED; mm->ioctx_list_lock = RW_LOCK_UNLOCKED; mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); @@ -800,7 +798,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->start_time = jiffies; p->security = NULL; - p->core_waiter = 0; retval = -ENOMEM; if (security_task_alloc(p)) goto bad_fork_cleanup; diff --git a/kernel/sched.c b/kernel/sched.c index b7c0cff29846..11678e2b7f09 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1192,6 +1192,16 @@ void complete(struct completion *x) spin_unlock_irqrestore(&x->wait.lock, flags); } +void complete_all(struct completion *x) +{ + unsigned long flags; + + spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; + __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0); + spin_unlock_irqrestore(&x->wait.lock, flags); +} + void wait_for_completion(struct completion *x) { might_sleep(); |
