diff options
| author | Roland McGrath <roland@redhat.com> | 2005-03-07 18:18:00 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-03-07 18:18:00 -0800 |
| commit | 98e4b451306be8eb831c9ed102cf34c54ef358ea (patch) | |
| tree | 0f3811ce39b38db95c8867a47a3eb6c7809e59fe /kernel | |
| parent | 9046dd435b58f6118d9fa5a9ab2e51ba4e0e9c93 (diff) | |
[PATCH] override RLIMIT_SIGPENDING for non-RT signals
I can read POSIX to say that the siginfo_t data must be available when
`kill' was used, as well. This patch makes it allocate the siginfo_t, even
when that exceeds {RLIMIT_SIGPENDING}, for any non-RT signal (< SIGRTMIN)
not sent by sigqueue (actually, any signal that couldn't have been faked by
a sigqueue call). Of course, in an extreme memory shortage situation, you
are SOL and violate POSIX a little before you die horribly from being out
of memory anyway.
The LEGACY_QUEUE logic already ensures that, for non-RT signals, at most
one is ever on the queue. So there really is no risk at all of unbounded
resource consumption; the usage can reach {RLIMIT_SIGPENDING} + 31, is all.
It's already the case that the limit can be exceeded by (in theory) up to
{RLIMIT_NPROC}-1 in race conditions because the bump and the limit check
are not atomic. (Obviously you can only get anywhere near that many with
assloads of preemption, but exceeding it by a few is not too unlikely.)
This patch also fixes that accounting so that it should not be possible to
exceed {RLIMIT_SIGPENDING} + SIGRTMIN-1 queue items per user in races.
Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/signal.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 80cd734e3570..61dabc9cdf92 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -261,19 +261,23 @@ next_signal(struct sigpending *pending, sigset_t *mask) return sig; } -static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags) +static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags, + int override_rlimit) { struct sigqueue *q = NULL; - if (atomic_read(&t->user->sigpending) < + atomic_inc(&t->user->sigpending); + if (override_rlimit || + atomic_read(&t->user->sigpending) <= t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) q = kmem_cache_alloc(sigqueue_cachep, flags); - if (q) { + if (unlikely(q == NULL)) { + atomic_dec(&t->user->sigpending); + } else { INIT_LIST_HEAD(&q->list); q->flags = 0; q->lock = NULL; q->user = get_uid(t->user); - atomic_inc(&q->user->sigpending); } return(q); } @@ -805,7 +809,9 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, make sure at least one signal gets delivered and don't pass on the info struct. */ - q = __sigqueue_alloc(t, GFP_ATOMIC); + q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && + ((unsigned long) info < 2 || + info->si_code >= 0))); if (q) { list_add_tail(&q->list, &signals->list); switch ((unsigned long) info) { @@ -1328,7 +1334,7 @@ struct sigqueue *sigqueue_alloc(void) { struct sigqueue *q; - if ((q = __sigqueue_alloc(current, GFP_KERNEL))) + if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) q->flags |= SIGQUEUE_PREALLOC; return(q); } |
