summaryrefslogtreecommitdiff
path: root/fs/proc/array.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-09-28 21:00:15 -0700
committerJeff Garzik <jgarzik@mandrakesoft.com>2002-09-28 21:00:15 -0700
commit5a5ec729b503205d5770f4f9543ca768db7b6a5b (patch)
tree343437615e2d2b7f2c75bb5e34e8c13a695dc4b5 /fs/proc/array.c
parent5360ccf4ac149920c4be3fddc6ad5d2ef6779f37 (diff)
[PATCH] atomic-thread-signals
Avoid racing on signal delivery with thread signal blocking in thread groups. The method to do this is to eliminate the per-thread sigmask_lock, and use the per-group (per 'process') siglock for all signal related activities. This immensely simplified some of the locking interactions within signal.c, and enabled the fixing of the above category of signal delivery races. This became possible due to the former thread-signal patch, which made siglock an irq-safe thing. (it used to be a process-context-only spinlock.) And this is even a speedup for non-threaded applications: only one lock is used. I fixed all places within the kernel except the non-x86 arch sections. Even for them the transition is very straightforward, in almost every case the following is sufficient in arch/*/kernel/signal.c: :1,$s/->sigmask_lock/->sig->siglock/g
Diffstat (limited to 'fs/proc/array.c')
-rw-r--r--fs/proc/array.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index c1587b0cc89b..411dc0d1c49e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -228,7 +228,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(ign);
sigemptyset(catch);
- spin_lock_irq(&p->sigmask_lock);
+ spin_lock_irq(&p->sig->siglock);
if (p->sig) {
k = p->sig->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
@@ -238,7 +238,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigaddset(catch, i);
}
}
- spin_unlock_irq(&p->sigmask_lock);
+ spin_unlock_irq(&p->sig->siglock);
}
static inline char * task_sig(struct task_struct *p, char *buffer)