summaryrefslogtreecommitdiff
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.ninka.net>2003-07-18 09:29:39 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2003-07-18 09:29:39 -0700
commit86df0e66c0ea429cc38d966be1bfcd856941e05c (patch)
tree1f6d70a8440bcfa3298e2a5d8bf2054b740d8e32 /kernel/fork.c
parent911e72b5a6b5492938e6763798ca120353ea9355 (diff)
parent1d02c2c0dfe098493f1359da83f1f2ba551f40a6 (diff)
Merge nuts.ninka.net:/home/davem/src/BK/network-2.5
into nuts.ninka.net:/home/davem/src/BK/net-2.5
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c66
1 files changed, 19 insertions, 47 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 2928684629e4..7c4c94b1a968 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -53,13 +53,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
-/*
- * A per-CPU task cache - this relies on the fact that
- * the very last portion of sys_exit() is executed with
- * preemption turned off.
- */
-static task_t *task_cache[NR_CPUS] __cacheline_aligned;
-
int nr_processes(void)
{
int cpu;
@@ -80,26 +73,8 @@ static kmem_cache_t *task_struct_cachep;
static void free_task(struct task_struct *tsk)
{
- /*
- * The task cache is effectively disabled right now.
- * Do we want it? The slab cache already has per-cpu
- * stuff, but the thread info (usually a order-1 page
- * allocation) doesn't.
- */
- if (tsk != current) {
- free_thread_info(tsk->thread_info);
- free_task_struct(tsk);
- } else {
- int cpu = get_cpu();
-
- tsk = task_cache[cpu];
- if (tsk) {
- free_thread_info(tsk->thread_info);
- free_task_struct(tsk);
- }
- task_cache[cpu] = current;
- put_cpu();
- }
+ free_thread_info(tsk->thread_info);
+ free_task_struct(tsk);
}
void __put_task_struct(struct task_struct *tsk)
@@ -220,25 +195,18 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
- int cpu = get_cpu();
prepare_to_copy(orig);
- tsk = task_cache[cpu];
- task_cache[cpu] = NULL;
- put_cpu();
- if (!tsk) {
- tsk = alloc_task_struct();
- if (!tsk)
- return NULL;
-
- ti = alloc_thread_info(tsk);
- if (!ti) {
- free_task_struct(tsk);
- return NULL;
- }
- } else
- ti = tsk->thread_info;
+ tsk = alloc_task_struct();
+ if (!tsk)
+ return NULL;
+
+ ti = alloc_thread_info(tsk);
+ if (!ti) {
+ free_task_struct(tsk);
+ return NULL;
+ }
*ti = *orig->thread_info;
*tsk = *orig;
@@ -791,8 +759,10 @@ struct task_struct *copy_process(unsigned long clone_flags,
goto fork_out;
retval = -EAGAIN;
- if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
+ if (atomic_read(&p->user->processes) >=
+ p->rlim[RLIMIT_NPROC].rlim_cur) {
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+ p->user != &root_user)
goto bad_fork_free;
}
@@ -1106,7 +1076,7 @@ long do_fork(unsigned long clone_flags,
init_completion(&vfork);
}
- if (p->ptrace & PT_PTRACED) {
+ if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
@@ -1114,7 +1084,9 @@ long do_fork(unsigned long clone_flags,
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
- wake_up_forked_process(p); /* do this last */
+ p->state = TASK_STOPPED;
+ if (!(clone_flags & CLONE_STOPPED))
+ wake_up_forked_process(p); /* do this last */
++total_forks;
if (unlikely (trace)) {