summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/mmu_context.h14
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/sched.h71
4 files changed, 15 insertions, 75 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 4853f679166e..7ff7555040e1 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -358,7 +358,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
-static __inline__ int find_next_bit (void * addr, int size, int offset)
+static __inline__ int find_next_bit(void * addr, int size, int offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
int set = 0, bit = offset & 31, res;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index c8bc24cd9a70..79df13ec9d6b 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -8,14 +8,10 @@
/*
* Every architecture must define this function. It's the fastest
- * way of searching a 168-bit bitmap where the first 128 bits are
- * unlikely to be set. It's guaranteed that at least one of the 168
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
-#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
-# error update this function.
-#endif
-
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
@@ -24,11 +20,9 @@ static inline int sched_find_first_bit(unsigned long *b)
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
- if (unlikely(b[3]))
+ if (b[3])
return __ffs(b[3]) + 96;
- if (b[4])
- return __ffs(b[4]) + 128;
- return __ffs(b[5]) + 32 + 128;
+ return __ffs(b[4]) + 128;
}
/*
* possibly do the LDT unload here?
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9007fe56c6ac..9f34e057079a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -45,7 +45,8 @@
thread_info: &init_thread_info, \
flags: 0, \
lock_depth: -1, \
- __nice: DEF_USER_NICE, \
+ prio: 120, \
+ static_prio: 120, \
policy: SCHED_OTHER, \
cpus_allowed: -1, \
mm: NULL, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dd6dc6983e04..f7e14aced6b9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -150,7 +150,7 @@ extern void trap_init(void);
extern void update_process_times(int user);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
-extern void scheduler_tick(struct task_struct *p);
+extern void scheduler_tick(int user_tick, int system);
extern void sched_task_migrated(struct task_struct *p);
extern void smp_migrate_task(int cpu, task_t *task);
extern unsigned long cache_decay_ticks;
@@ -241,18 +241,16 @@ struct task_struct {
int lock_depth; /* Lock depth */
- int prio;
- long __nice;
+ int prio, static_prio;
list_t run_list;
prio_array_t *array;
- unsigned int time_slice;
-
unsigned long sleep_avg;
unsigned long sleep_timestamp;
unsigned long policy;
unsigned long cpus_allowed;
+ unsigned int time_slice;
struct task_struct *next_task, *prev_task;
@@ -385,66 +383,12 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
*/
#define _STK_LIM (8*1024*1024)
-/*
- * RT priorites go from 0 to 99, but internally we max
- * them out at 128 to make it easier to search the
- * scheduler bitmap.
- */
-#define MAX_RT_PRIO 128
-/*
- * The lower the priority of a process, the more likely it is
- * to run. Priority of a process goes from 0 to 167. The 0-99
- * priority range is allocated to RT tasks, the 128-167 range
- * is for SCHED_OTHER tasks.
- */
-#define MAX_PRIO (MAX_RT_PRIO + 40)
-
-/*
- * Scales user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ 128 ... 167 (MAX_PRIO-1) ]
- *
- * User-nice value of -20 == static priority 128, and
- * user-nice value 19 == static priority 167. The lower
- * the priority value, the higher the task's priority.
- */
-#define NICE_TO_PRIO(n) (MAX_RT_PRIO + (n) + 20)
-#define DEF_USER_NICE 0
-
-/*
- * Default timeslice is 150 msecs, maximum is 300 msecs.
- * Minimum timeslice is 10 msecs.
- *
- * These are the 'tuning knobs' of the scheduler:
- */
-#define MIN_TIMESLICE ( 10 * HZ / 1000)
-#define MAX_TIMESLICE (300 * HZ / 1000)
-#define CHILD_FORK_PENALTY 95
-#define PARENT_FORK_PENALTY 100
-#define EXIT_WEIGHT 3
-#define PRIO_INTERACTIVE_RATIO 20
-#define PRIO_CPU_HOG_RATIO 60
-#define PRIO_BONUS_RATIO 70
-#define INTERACTIVE_DELTA 3
-#define MAX_SLEEP_AVG (2*HZ)
-#define STARVATION_LIMIT (2*HZ)
-
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
- * NICE_TO_TIMESLICE scales nice values [ -20 ... 19 ]
- * to time slice values.
- *
- * The higher a process's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority process gets MIN_TIMESLICE worth of execution time.
- */
-
-#define NICE_TO_TIMESLICE(n) (MIN_TIMESLICE + \
- ((MAX_TIMESLICE - MIN_TIMESLICE) * (19-(n))) / 39)
-
extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
extern void set_user_nice(task_t *p, long nice);
+extern int task_prio(task_t *p);
+extern int task_nice(task_t *p);
+extern int idle_cpu(int cpu);
+
asmlinkage long sys_sched_yield(void);
#define yield() sys_sched_yield()
@@ -526,6 +470,7 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
+extern void FASTCALL(sched_exit(task_t * p));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)