diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-09-07 21:13:50 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-09-07 21:13:50 -0700 |
| commit | 6dfc88977e42a9d47c8e0c450c879610107bee36 (patch) | |
| tree | 22266adfaa056eceb8832f46ed3a1dc2aa95a43c /include/linux | |
| parent | 36780249955396f8c9a76192f360ac3a8e9d1e01 (diff) | |
[PATCH] shared thread signals
Support POSIX compliant thread signals on a kernel level with usable
debugging (broadcast SIGSTOP, SIGCONT) and thread group management
(broadcast SIGKILL), plus to load-balance 'process' signals between
threads for better signal performance.
Changes:
- POSIX thread semantics for signals
there are 7 'types' of actions a signal can take: specific, load-balance,
kill-all, kill-all+core, stop-all, continue-all and ignore. Depending on
the POSIX specifications each signal has one of the types defined for both
the 'handler defined' and the 'handler not defined (kernel default)' case.
Here is the table:
----------------------------------------------------------
| | userspace | kernel |
----------------------------------------------------------
| SIGHUP | load-balance | kill-all |
| SIGINT | load-balance | kill-all |
| SIGQUIT | load-balance | kill-all+core |
| SIGILL | specific | kill-all+core |
| SIGTRAP | specific | kill-all+core |
| SIGABRT/SIGIOT | specific | kill-all+core |
| SIGBUS | specific | kill-all+core |
| SIGFPE | specific | kill-all+core |
| SIGKILL | n/a | kill-all |
| SIGUSR1 | load-balance | kill-all |
| SIGSEGV | specific | kill-all+core |
| SIGUSR2 | load-balance | kill-all |
| SIGPIPE | specific | kill-all |
| SIGALRM | load-balance | kill-all |
| SIGTERM | load-balance | kill-all |
| SIGCHLD | load-balance | ignore |
| SIGCONT | load-balance | continue-all |
| SIGSTOP | n/a | stop-all |
| SIGTSTP | load-balance | stop-all |
| SIGTTIN | load-balancen | stop-all |
| SIGTTOU | load-balancen | stop-all |
| SIGURG | load-balance | ignore |
| SIGXCPU | specific | kill-all+core |
| SIGXFSZ | specific | kill-all+core |
| SIGVTALRM | load-balance | kill-all |
| SIGPROF | specific | kill-all |
| SIGPOLL/SIGIO | load-balance | kill-all |
| SIGSYS/SIGUNUSED | specific | kill-all+core |
| SIGSTKFLT | specific | kill-all |
| SIGWINCH | load-balance | ignore |
| SIGPWR | load-balance | kill-all |
| SIGRTMIN-SIGRTMAX | load-balance | kill-all |
----------------------------------------------------------
as you can see it from the list, signals that have handlers defined never
get broadcasted - they are either specific or load-balanced.
- CLONE_THREAD implies CLONE_SIGHAND
It does not make much sense to have a thread group that does not share
signal handlers. In fact in the patch i'm using the signal spinlock to
lock access to the thread group. I made the siglock IRQ-safe, thus we can
load-balance signals from interrupt contexts as well. (we cannot take the
tasklist lock in write mode from IRQ handlers.)
this is not as clean as i'd like it to be, but it's the best i could come
up with so far.
- thread group list management reworked.
threads are now removed from the group if the thread is unhashed from the
PID table. This makes the most sense. This also helps with another feature
that relies on an intact thread group list: multithreaded coredumps.
- child reparenting reworked.
the O(N) algorithm in forget_original_parent() causes massive performance
problems if a large number of threads exit from the group. Performance
improves more than 10-fold if the following simple rules are followed
instead:
- reparent children to the *previous* thread [exiting or not]
- if a thread is detached then reparent to init.
- fast broadcasting of kernel-internal SIGSTOP, SIGCONT, SIGKILL, etc.
kernel-internal broadcasted signals are a potential DoS problem, since
they might generate massive amounts of GFP_ATOMIC allocations of siginfo
structures. The important thing to note is that the siginfo structure does
not actually have to be allocated and queued - the signal processing code
has all the information it needs, neither of these signals carries any
information in the siginfo structure. This makes a broadcast SIGKILL a
very simple operation: all threads get the bit 9 set in their pending
bitmask. The speedup due to this was significant - and the robustness win
is invaluable.
- sys_execve() should not kill off 'all other' threads.
the 'exec kills all threads if the master thread does the exec()' is a
POSIX(-ish) thing that should not be hardcoded in the kernel in this case.
to handle POSIX exec() semantics, glibc uses a special syscall, which
kills 'all but self' threads: sys_exit_allbutself().
the straightforward exec() implementation just calls sys_exit_allbutself()
and then sys_execve().
(this syscall is also be used internally if the thread group leader
thread sys_exit()s or sys_exec()s, to ensure the integrity of the thread
group.)
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/sched.h | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 896b7f59941c..bd7073fdefaf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -211,6 +211,11 @@ struct signal_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; + + /* current thread group signal load-balancing target: */ + task_t *curr_target; + + struct sigpending shared_pending; }; /* @@ -356,7 +361,7 @@ struct task_struct { spinlock_t sigmask_lock; /* Protects signal and blocked */ struct signal_struct *sig; - sigset_t blocked; + sigset_t blocked, real_blocked, shared_unblocked; struct sigpending pending; unsigned long sas_ss_sp; @@ -431,6 +436,7 @@ extern void set_cpus_allowed(task_t *p, unsigned long new_mask); extern void set_user_nice(task_t *p, long nice); extern int task_prio(task_t *p); extern int task_nice(task_t *p); +extern int task_curr(task_t *p); extern int idle_cpu(int cpu); void yield(void); @@ -535,7 +541,7 @@ extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *); extern void sig_exit(int, int, struct siginfo *); -extern int dequeue_signal(sigset_t *, siginfo_t *); +extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info); extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); extern void unblock_all_signals(void); @@ -654,6 +660,7 @@ extern void exit_thread(void); extern void exit_mm(struct task_struct *); extern void exit_files(struct task_struct *); extern void exit_sighand(struct task_struct *); +extern void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig); extern void reparent_to_init(void); extern void daemonize(void); @@ -786,8 +793,29 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) #define for_each_thread(task) \ for (task = next_thread(current) ; task != current ; task = next_thread(task)) -#define next_thread(p) \ - list_entry((p)->thread_group.next, struct task_struct, thread_group) +static inline task_t *next_thread(task_t *p) +{ + if (!p->sig) + BUG(); +#if CONFIG_SMP + if (!spin_is_locked(&p->sig->siglock) && + !rwlock_is_locked(&tasklist_lock)) + BUG(); +#endif + return list_entry((p)->thread_group.next, task_t, thread_group); +} + +static inline task_t *prev_thread(task_t *p) +{ + if (!p->sig) + BUG(); +#if CONFIG_SMP + if (!spin_is_locked(&p->sig->siglock) && + !rwlock_is_locked(&tasklist_lock)) + BUG(); +#endif + return list_entry((p)->thread_group.prev, task_t, thread_group); +} #define thread_group_leader(p) (p->pid == p->tgid) @@ -903,21 +931,8 @@ static inline void cond_resched(void) This is required every time the blocked sigset_t changes. Athread cathreaders should have t->sigmask_lock. */ -static inline void recalc_sigpending_tsk(struct task_struct *t) -{ - if (has_pending_signals(&t->pending.signal, &t->blocked)) - set_tsk_thread_flag(t, TIF_SIGPENDING); - else - clear_tsk_thread_flag(t, TIF_SIGPENDING); -} - -static inline void recalc_sigpending(void) -{ - if (has_pending_signals(¤t->pending.signal, ¤t->blocked)) - set_thread_flag(TIF_SIGPENDING); - else - clear_thread_flag(TIF_SIGPENDING); -} +extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); +extern void recalc_sigpending(void); /* * Wrappers for p->thread_info->cpu access. No-op on UP. |
