diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-11-05 04:25:29 -0800 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2002-11-05 04:25:29 -0800 |
| commit | d89f3847def4a55a84cc42809994bde2a148e9e0 (patch) | |
| tree | 44e264429c0e2f71167e77ee272e22c40d6acef8 /kernel | |
| parent | 5a7728c6d3eb83df9d120944cca4cf476dd326a1 (diff) | |
[PATCH] thread-aware coredumps, 2.5.43-C3
This is the second iteration of thread-aware coredumps.
Changes:
- Ulrich Drepper has reviewed the data structures and checked actual
coredumps via readelf - everything looks fine and according to the spec.
- a serious bug has been fixed in the thread-state dumping code - it was
still based on the 2.4 assumption that the task struct points to the
kernel stack - it's task->thread_info in 2.5. This bug caused bogus
register info to be filled in for threads.
- properly wait for all threads that share the same MM to serialize with
the coredumping thread. This is CLONE_VM based, not tied to
CLONE_THREAD and/or signal semantics, ie. old-style (or different-style)
threaded apps will be properly stopped as well.
The locking might look a bit complex, but i wanted to keep the
__exit_mm() overhead as low as possible. It's not quite trivial to get
these bits right, because 'sharing the MM' is detached from signals
semantics, so we cannot rely on broadcast-kill catching all threads. So
zap_threads() iterates through every thread and zaps those which were
left out. (There's a minimal race left in where a newly forked child
might escape the attention of zap_threads() - this race is fixed by the
OOM fixes in the mmap-speedup patch.)
- fill_psinfo() is now called with the thread group leader, for the
coredump to get 'process' state.
- initialize the elf_thread_status structure with zeroes.
the IA64 ELF bits are not included, yet, to reduce complexity of the
patch. The patch has been tested on x86 UP and SMP.
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exit.c | 32 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/signal.c | 18 |
3 files changed, 43 insertions, 12 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index c2b0f6eeff0f..9145e69f1d30 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -416,19 +416,31 @@ void end_lazy_tlb(struct mm_struct *mm) */ static inline void __exit_mm(struct task_struct * tsk) { - struct mm_struct * mm = tsk->mm; + struct mm_struct *mm = tsk->mm; mm_release(); - if (mm) { - atomic_inc(&mm->mm_count); - if (mm != tsk->active_mm) BUG(); - /* more a memory barrier than a real lock */ - task_lock(tsk); - tsk->mm = NULL; - enter_lazy_tlb(mm, current, smp_processor_id()); - task_unlock(tsk); - mmput(mm); + if (!mm) + return; + /* + * Serialize with any possible pending coredump: + */ + if (!mm->dumpable) { + current->core_waiter = 1; + atomic_inc(&mm->core_waiters); + if (atomic_read(&mm->core_waiters) ==atomic_read(&mm->mm_users)) + wake_up(&mm->core_wait); + down(&mm->core_sem); + up(&mm->core_sem); + atomic_dec(&mm->core_waiters); } + atomic_inc(&mm->mm_count); + if (mm != tsk->active_mm) BUG(); + /* more a memory barrier than a real lock */ + task_lock(tsk); + tsk->mm = NULL; + enter_lazy_tlb(mm, current, smp_processor_id()); + task_unlock(tsk); + mmput(mm); } void exit_mm(struct task_struct *tsk) diff --git a/kernel/fork.c b/kernel/fork.c index f8620b82f8f5..7dda73151778 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -305,6 +305,9 @@ static struct mm_struct * mm_init(struct mm_struct * mm) atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); + init_MUTEX(&mm->core_sem); + init_waitqueue_head(&mm->core_wait); + atomic_set(&mm->core_waiters, 0); mm->page_table_lock = SPIN_LOCK_UNLOCKED; mm->ioctx_list_lock = RW_LOCK_UNLOCKED; mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); @@ -771,6 +774,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, INIT_LIST_HEAD(&p->local_pages); + p->core_waiter = 0; + retval = -ENOMEM; if (security_ops->task_alloc_security(p)) goto bad_fork_cleanup; diff --git a/kernel/signal.c b/kernel/signal.c index b037b12ce04b..2f2a5c233f61 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -768,7 +768,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) } static int -specific_force_sig_info(int sig, struct task_struct *t) +__specific_force_sig_info(int sig, struct task_struct *t) { if (!t->sig) return -ESRCH; @@ -781,6 +781,20 @@ specific_force_sig_info(int sig, struct task_struct *t) return specific_send_sig_info(sig, (void *)2, t, 0); } +void +force_sig_specific(int sig, struct task_struct *t) +{ + unsigned long int flags; + + spin_lock_irqsave(&t->sig->siglock, flags); + if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN) + t->sig->action[sig-1].sa.sa_handler = SIG_DFL; + sigdelset(&t->blocked, sig); + recalc_sigpending_tsk(t); + specific_send_sig_info(sig, (void *)2, t, 0); + spin_unlock_irqrestore(&t->sig->siglock, flags); +} + #define can_take_signal(p, sig) \ (((unsigned long) p->sig->action[sig-1].sa.sa_handler > 1) && \ !sigismember(&p->blocked, sig) && (task_curr(p) || !signal_pending(p))) @@ -846,7 +860,7 @@ int __broadcast_thread_group(struct task_struct *p, int sig) int err = 0; for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid) - err = specific_force_sig_info(sig, tmp); + err = __specific_force_sig_info(sig, tmp); return err; } |
