diff options
| author | David Howells <dhowells@redhat.com> | 2002-02-06 22:56:27 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2002-02-06 22:56:27 -0800 |
| commit | 9b10610a79a288a4dbac366b32970573405c4ed1 (patch) | |
| tree | 7d28e9c2d36303f4cf2f63f2be855d5c711952ca /kernel | |
| parent | 4da68d0b68d5db2c10d9bbbdb6d4f3e766a4e3c0 (diff) | |
[PATCH] thread information block
syscall latency improvement
* There's now an asm/thread_info.h header file with the basic structure
def and asm offsets in it.
* There's now a linux/thread_info.h header file which includes the asm
version and wraps some bitops calls to make convenience functions for
accessing the low-level flags.
* The task_struct has had some fields removed (and some flags), and has
acquired a pointer to the thread_info struct.
* task_struct's are now allocated on slabs in kernel/fork.c, whereas
thread_info structs are allocated at the bottom of the stack pages.
* Some more convenience functions are provided at the end of linux/sched.h to
access flags in other tasks (these are here because they need to access the
task_struct).
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exec_domain.c | 10 | ||||
| -rw-r--r-- | kernel/exit.c | 4 | ||||
| -rw-r--r-- | kernel/fork.c | 55 | ||||
| -rw-r--r-- | kernel/ksyms.c | 3 | ||||
| -rw-r--r-- | kernel/sched.c | 40 | ||||
| -rw-r--r-- | kernel/signal.c | 12 |
6 files changed, 84 insertions, 40 deletions
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index fc2c88549dd8..7d5ea35982b8 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c @@ -67,8 +67,8 @@ default_handler(int segment, struct pt_regs *regp) } set_personality(pers); - if (current->exec_domain->handler != default_handler) - current->exec_domain->handler(segment, regp); + if (current_thread_info()->exec_domain->handler != default_handler) + current_thread_info()->exec_domain->handler(segment, regp); else send_sig(SIGSEGV, current, 1); } @@ -162,7 +162,7 @@ __set_personality(u_long personality) struct exec_domain *ep, *oep; ep = lookup_exec_domain(personality); - if (ep == current->exec_domain) { + if (ep == current_thread_info()->exec_domain) { current->personality = personality; return 0; } @@ -190,8 +190,8 @@ __set_personality(u_long personality) */ current->personality = personality; - oep = current->exec_domain; - current->exec_domain = ep; + oep = current_thread_info()->exec_domain; + current_thread_info()->exec_domain = ep; set_fs_altroot(); put_exec_domain(oep); diff --git a/kernel/exit.c b/kernel/exit.c index 429fd2908b46..6b5a7cba048e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -65,7 +65,7 @@ static void release_task(struct task_struct * p) __restore_flags(flags); p->pid = 0; - free_task_struct(p); + put_task_struct(p); } /* @@ -529,7 +529,7 @@ fake_volatile: if (current->leader) disassociate_ctty(1); - put_exec_domain(tsk->exec_domain); + put_exec_domain(tsk->thread_info->exec_domain); if (tsk->binfmt && tsk->binfmt->module) __MOD_DEC_USE_COUNT(tsk->binfmt->module); diff --git a/kernel/fork.c b/kernel/fork.c index bfbae1b15439..3e49ad5c1ebc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -28,6 +28,8 @@ #include <asm/uaccess.h> #include <asm/mmu_context.h> +static kmem_cache_t *task_struct_cachep; + /* The idle threads do not count.. */ int nr_threads; @@ -70,6 +72,14 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) void __init fork_init(unsigned long mempages) { + /* create a slab on which task_structs can be allocated */ + task_struct_cachep = + kmem_cache_create("task_struct", + sizeof(struct task_struct),0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!task_struct_cachep) + panic("fork_init(): cannot create task_struct SLAB cache"); + /* * The default maximum number of threads is set to a safe * value: the thread structures can take up at most half @@ -81,6 +91,35 @@ void __init fork_init(unsigned long mempages) init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } +struct task_struct *dup_task_struct(struct task_struct *orig) +{ + struct task_struct *tsk; + struct thread_info *ti; + + ti = alloc_thread_info(); + if (!ti) return NULL; + + tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC); + if (!tsk) { + free_thread_info(ti); + return NULL; + } + + *ti = *orig->thread_info; + *tsk = *orig; + tsk->thread_info = ti; + ti->task = tsk; + atomic_set(&tsk->usage,1); + + return tsk; +} + +void __put_task_struct(struct task_struct *tsk) +{ + free_thread_info(tsk->thread_info); + kmem_cache_free(task_struct_cachep,tsk); +} + /* Protects next_safe and last_pid. */ spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED; @@ -546,7 +585,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; - new_flags &= ~(PF_SUPERPRIV | PF_USEDFPU); + new_flags &= ~PF_SUPERPRIV; new_flags |= PF_FORKNOEXEC; if (!(clone_flags & CLONE_PTRACE)) p->ptrace = 0; @@ -585,12 +624,10 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, } retval = -ENOMEM; - p = alloc_task_struct(); + p = dup_task_struct(current); if (!p) goto fork_out; - *p = *current; - retval = -EAGAIN; if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) @@ -608,7 +645,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, if (nr_threads >= max_threads) goto bad_fork_cleanup_count; - get_exec_domain(p->exec_domain); + get_exec_domain(p->thread_info->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); @@ -631,7 +668,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, } spin_lock_init(&p->alloc_lock); - p->work.sigpending = 0; + clear_tsk_thread_flag(p,TIF_SIGPENDING); init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; @@ -755,7 +792,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, * Let the child process run first, to avoid most of the * COW overhead when the child exec()s afterwards. */ - current->work.need_resched = 1; + set_need_resched(); fork_out: return retval; @@ -771,14 +808,14 @@ bad_fork_cleanup_fs: bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup: - put_exec_domain(p->exec_domain); + put_exec_domain(p->thread_info->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_DEC_USE_COUNT(p->binfmt->module); bad_fork_cleanup_count: atomic_dec(&p->user->processes); free_uid(p->user); bad_fork_free: - free_task_struct(p); + put_task_struct(p); goto fork_out; } diff --git a/kernel/ksyms.c b/kernel/ksyms.c index 42ebfe7f746c..ae89152ce936 100644 --- a/kernel/ksyms.c +++ b/kernel/ksyms.c @@ -563,7 +563,8 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); /* init task, for moving kthread roots - ought to export a function ?? */ -EXPORT_SYMBOL(init_task_union); +EXPORT_SYMBOL(init_task); +EXPORT_SYMBOL(init_thread_union); EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(pidhash); diff --git a/kernel/sched.c b/kernel/sched.c index 5fcebb0346fb..56fb9a54e7fe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -51,7 +51,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; #define cpu_rq(cpu) (runqueues + (cpu)) #define this_rq() cpu_rq(smp_processor_id()) -#define task_rq(p) cpu_rq((p)->cpu) +#define task_rq(p) cpu_rq((p)->thread_info->cpu) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define rt_task(p) ((p)->policy != SCHED_OTHER) @@ -192,13 +192,19 @@ static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) static inline void resched_task(task_t *p) { - int need_resched; +#ifdef CONFIG_SMP + int need_resched, nrpolling; + + /* minimise the chance of sending an interrupt to poll_idle() */ + nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); + need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED); + nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); - need_resched = p->work.need_resched; - wmb(); - p->work.need_resched = 1; - if (!need_resched && (p->cpu != smp_processor_id())) - smp_send_reschedule(p->cpu); + if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id())) + smp_send_reschedule(p->thread_info->cpu); +#else + set_tsk_need_resched(p); +#endif } #ifdef CONFIG_SMP @@ -236,7 +242,7 @@ repeat: void sched_task_migrated(task_t *new_task) { wait_task_inactive(new_task); - new_task->cpu = smp_processor_id(); + new_task->thread_info->cpu = smp_processor_id(); wake_up_process(new_task); } @@ -299,7 +305,7 @@ void wake_up_forked_process(task_t * p) current->sleep_avg = current->sleep_avg * PARENT_FORK_PENALTY / 100; } spin_lock_irq(&rq->lock); - p->cpu = smp_processor_id(); + p->thread_info->cpu = smp_processor_id(); activate_task(p, rq); spin_unlock_irq(&rq->lock); } @@ -519,11 +525,11 @@ skip_queue: */ dequeue_task(next, array); busiest->nr_running--; - next->cpu = this_cpu; + next->thread_info->cpu = this_cpu; this_rq->nr_running++; enqueue_task(next, this_rq->active); if (next->prio < current->prio) - current->work.need_resched = 1; + set_need_resched(); if (!idle && --imbalance) { if (array == busiest->expired) { array = busiest->active; @@ -572,7 +578,7 @@ void scheduler_tick(task_t *p) #endif /* Task might have expired already, but not scheduled off yet */ if (p->array != rq->active) { - p->work.need_resched = 1; + set_tsk_need_resched(p); return; } spin_lock(&rq->lock); @@ -583,7 +589,7 @@ void scheduler_tick(task_t *p) */ if ((p->policy == SCHED_RR) && !--p->time_slice) { p->time_slice = NICE_TO_TIMESLICE(p->__nice); - p->work.need_resched = 1; + set_tsk_need_resched(p); /* put it at the end of the queue: */ dequeue_task(p, rq->active); @@ -603,7 +609,7 @@ void scheduler_tick(task_t *p) p->sleep_avg--; if (!--p->time_slice) { dequeue_task(p, rq->active); - p->work.need_resched = 1; + set_tsk_need_resched(p); p->prio = effective_prio(p); p->time_slice = NICE_TO_TIMESLICE(p->__nice); @@ -684,7 +690,7 @@ pick_next_task: switch_tasks: prefetch(next); - prev->work.need_resched = 0; + clear_tsk_need_resched(prev); if (likely(prev != next)) { rq->nr_switches++; @@ -1316,9 +1322,9 @@ void __init init_idle(task_t *idle, int cpu) idle->array = NULL; idle->prio = MAX_PRIO; idle->state = TASK_RUNNING; - idle->cpu = cpu; + idle->thread_info->cpu = cpu; double_rq_unlock(idle_rq, rq); - idle->work.need_resched = 1; + set_tsk_need_resched(idle); __restore_flags(flags); } diff --git a/kernel/signal.c b/kernel/signal.c index d648cff08eb6..74d783557a24 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -23,7 +23,7 @@ #define DEBUG_SIG 0 #if DEBUG_SIG -#define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */) +#define SIG_SLAB_DEBUG (SLAB_RED_ZONE /* | SLAB_POISON */) #else #define SIG_SLAB_DEBUG 0 #endif @@ -105,7 +105,7 @@ static void flush_sigqueue(struct sigpending *queue) void flush_signals(struct task_struct *t) { - t->work.sigpending = 0; + clear_tsk_thread_flag(t,TIF_SIGPENDING); flush_sigqueue(&t->pending); } @@ -119,7 +119,7 @@ void exit_sighand(struct task_struct *tsk) if (atomic_dec_and_test(&sig->count)) kmem_cache_free(sigact_cachep, sig); } - tsk->work.sigpending = 0; + clear_tsk_thread_flag(tsk,TIF_SIGPENDING); flush_sigqueue(&tsk->pending); spin_unlock_irq(&tsk->sigmask_lock); } @@ -275,7 +275,7 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid, if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { if (!(current->notifier)(current->notifier_data)) { - current->work.sigpending = 0; + clear_thread_flag(TIF_SIGPENDING); return 0; } } @@ -494,7 +494,7 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals */ static inline void signal_wake_up(struct task_struct *t) { - t->work.sigpending = 1; + set_tsk_thread_flag(t,TIF_SIGPENDING); #ifdef CONFIG_SMP /* @@ -507,7 +507,7 @@ static inline void signal_wake_up(struct task_struct *t) * process of changing - but no harm is done by that * other than doing an extra (lightweight) IPI interrupt. */ - if ((t->state == TASK_RUNNING) && (t->cpu != smp_processor_id())) + if ((t->state == TASK_RUNNING) && (t->thread_info->cpu != smp_processor_id())) kick_if_running(t); #endif if (t->state & TASK_INTERRUPTIBLE) { |
