diff options
| author | Linus Torvalds <torvalds@home.transmeta.com> | 2003-02-06 20:25:24 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2003-02-06 20:25:24 -0800 |
| commit | 8eae299835cf161a93a5acd890cebf0f83f2a2ce (patch) | |
| tree | 36e8d2c6717ac5b716b87fbcc6847924c2271691 /fs/exec.c | |
| parent | fef31b0354eeec8ce9c71c7adcf08848c25727f6 (diff) | |
Split up "struct signal_struct" into "signal" and "sighand" parts.
This is required to get make the old LinuxThread semantics work
together with the fixed-for-POSIX full signal sharing. A traditional
CLONE_SIGHAND thread (LinuxThread) will not see any other shared
signal state, while a new-style CLONE_THREAD thread will share all
of it.
This way the two methods don't confuse each other.
Diffstat (limited to 'fs/exec.c')
| -rw-r--r-- | fs/exec.c | 107 |
1 files changed, 63 insertions, 44 deletions
diff --git a/fs/exec.c b/fs/exec.c index 0b41239937b7..a63d5c43da1f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry) * disturbing other processes. (Other processes might share the signal * table via the CLONE_SIGHAND option to clone().) */ -static inline int de_thread(struct signal_struct *oldsig) +static inline int de_thread(struct task_struct *tsk) { - struct signal_struct *newsig; + struct signal_struct *newsig, *oldsig = tsk->signal; + struct sighand_struct *newsighand, *oldsighand = tsk->sighand; + spinlock_t *lock = &oldsighand->siglock; int count; - if (atomic_read(¤t->sig->count) <= 1) + /* + * If we don't share sighandlers, then we aren't sharing anything + * and we can just re-use it all. + */ + if (atomic_read(&oldsighand->count) <= 1) return 0; - newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); - if (!newsig) + newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + if (!newsighand) return -ENOMEM; + spin_lock_init(&newsighand->siglock); + atomic_set(&newsighand->count, 1); + memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); + + /* + * See if we need to allocate a new signal structure + */ + newsig = NULL; + if (atomic_read(&oldsig->count) > 1) { + newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + if (!newsig) { + kmem_cache_free(sighand_cachep, newsighand); + return -ENOMEM; + } + atomic_set(&newsig->count, 1); + newsig->group_exit = 0; + newsig->group_exit_code = 0; + newsig->group_exit_task = NULL; + newsig->group_stop_count = 0; + init_sigpending(&newsig->shared_pending); + } + if (thread_group_empty(current)) - goto out; + goto no_thread_group; /* * Kill all other threads in the thread group: */ - spin_lock_irq(&oldsig->siglock); + spin_lock_irq(lock); if (oldsig->group_exit) { /* * Another group action in progress, just * return so that the signal is processed. */ - spin_unlock_irq(&oldsig->siglock); - kmem_cache_free(sigact_cachep, newsig); + spin_unlock_irq(lock); + kmem_cache_free(sighand_cachep, newsighand); + if (newsig) + kmem_cache_free(signal_cachep, newsig); return -EAGAIN; } oldsig->group_exit = 1; @@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig) while (atomic_read(&oldsig->count) > count) { oldsig->group_exit_task = current; current->state = TASK_UNINTERRUPTIBLE; - spin_unlock_irq(&oldsig->siglock); + spin_unlock_irq(lock); schedule(); - spin_lock_irq(&oldsig->siglock); + spin_lock_irq(lock); if (oldsig->group_exit_task) BUG(); } - spin_unlock_irq(&oldsig->siglock); + spin_unlock_irq(lock); /* * At this point all other threads have exited, all we have to @@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig) release_task(leader); } -out: - spin_lock_init(&newsig->siglock); - atomic_set(&newsig->count, 1); - newsig->group_exit = 0; - newsig->group_exit_code = 0; - newsig->group_exit_task = NULL; - newsig->group_stop_count = 0; - memcpy(newsig->action, current->sig->action, sizeof(newsig->action)); - init_sigpending(&newsig->shared_pending); +no_thread_group: write_lock_irq(&tasklist_lock); - spin_lock(&oldsig->siglock); - spin_lock(&newsig->siglock); + spin_lock(&oldsighand->siglock); + spin_lock(&newsighand->siglock); if (current == oldsig->curr_target) oldsig->curr_target = next_thread(current); - current->sig = newsig; + if (newsig) + current->signal = newsig; + current->sighand = newsighand; init_sigpending(¤t->pending); recalc_sigpending(); - spin_unlock(&newsig->siglock); - spin_unlock(&oldsig->siglock); + spin_unlock(&newsighand->siglock); + spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); - if (atomic_dec_and_test(&oldsig->count)) - kmem_cache_free(sigact_cachep, oldsig); + if (newsig && atomic_dec_and_test(&oldsig->count)) + kmem_cache_free(signal_cachep, oldsig); + + if (atomic_dec_and_test(&oldsighand->count)) + kmem_cache_free(sighand_cachep, oldsighand); if (!thread_group_empty(current)) BUG(); @@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm) { char * name; int i, ch, retval; - struct signal_struct * oldsig = current->sig; /* * Release all of the old mmap stuff */ retval = exec_mmap(bprm->mm); if (retval) - goto mmap_failed; + goto out; /* * Make sure we have a private signal table and that * we are unassociated from the previous thread group. */ - retval = de_thread(oldsig); + retval = de_thread(current); if (retval) - goto flush_failed; + goto out; /* This is the point of no return */ @@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm) return 0; -mmap_failed: -flush_failed: - spin_lock_irq(¤t->sig->siglock); - if (current->sig != oldsig) { - kmem_cache_free(sigact_cachep, current->sig); - current->sig = oldsig; - } - spin_unlock_irq(¤t->sig->siglock); +out: return retval; } @@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm) if (must_not_trace_exec(current) || atomic_read(¤t->fs->count) > 1 || atomic_read(¤t->files->count) > 1 - || atomic_read(¤t->sig->count) > 1) { + || atomic_read(¤t->sighand->count) > 1) { if(!capable(CAP_SETUID)) { bprm->e_uid = current->uid; bprm->e_gid = current->gid; @@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) } mm->dumpable = 0; init_completion(&mm->core_done); - current->sig->group_exit = 1; - current->sig->group_exit_code = exit_code; + current->signal->group_exit = 1; + current->signal->group_exit_code = exit_code; coredump_wait(mm); if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) @@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) retval = binfmt->core_dump(signr, regs, file); - current->sig->group_exit_code |= 0x80; + current->signal->group_exit_code |= 0x80; close_fail: filp_close(file, NULL); fail_unlock: |
