diff options
| author | Patrick Mochel <mochel@bambi.(none)> | 2003-07-26 07:53:20 -0500 |
|---|---|---|
| committer | Patrick Mochel <mochel@bambi.(none)> | 2003-07-26 07:53:20 -0500 |
| commit | e2f1995e370df60f6a3da62da6fdd02516bf1d98 (patch) | |
| tree | a2c7d0c3119e0d816a872a9f8ad530baa7930fb0 /kernel | |
| parent | 5c8c7a21cc09ac0bf0b2d5e3f83605301522ead7 (diff) | |
| parent | 5d6a21146966ca1d7fb1d43e7174f51f148720c5 (diff) | |
Merge bk://ldm.bkbits.net/linux-2.5-power
into bambi.(none):/home/mochel/src/kernel/devel/linux-2.5-power
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/compat.c | 4 | ||||
| -rw-r--r-- | kernel/extable.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 66 | ||||
| -rw-r--r-- | kernel/module.c | 22 | ||||
| -rw-r--r-- | kernel/profile.c | 5 | ||||
| -rw-r--r-- | kernel/sched.c | 6 | ||||
| -rw-r--r-- | kernel/sys.c | 18 | ||||
| -rw-r--r-- | kernel/time.c | 15 |
8 files changed, 71 insertions, 67 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 868edf9665f1..de5820d09d71 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -425,9 +425,11 @@ asmlinkage int compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, &kernel_mask); set_fs(old_fs); - if (ret > 0) + if (ret > 0) { + ret = sizeof(compat_ulong_t); if (put_user(kernel_mask, user_mask_ptr)) return -EFAULT; + } return ret; } diff --git a/kernel/extable.c b/kernel/extable.c index d49099854024..6f1fb8c6b75b 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -17,10 +17,10 @@ */ #include <linux/module.h> #include <asm/uaccess.h> +#include <asm/sections.h> extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __stop___ex_table[]; -extern char _stext[], _etext[], _sinittext[], _einittext[]; /* Given an address, look for it in the exception tables. */ const struct exception_table_entry *search_exception_tables(unsigned long addr) diff --git a/kernel/fork.c b/kernel/fork.c index 2928684629e4..7c4c94b1a968 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -53,13 +53,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ -/* - * A per-CPU task cache - this relies on the fact that - * the very last portion of sys_exit() is executed with - * preemption turned off. - */ -static task_t *task_cache[NR_CPUS] __cacheline_aligned; - int nr_processes(void) { int cpu; @@ -80,26 +73,8 @@ static kmem_cache_t *task_struct_cachep; static void free_task(struct task_struct *tsk) { - /* - * The task cache is effectively disabled right now. - * Do we want it? The slab cache already has per-cpu - * stuff, but the thread info (usually a order-1 page - * allocation) doesn't. - */ - if (tsk != current) { - free_thread_info(tsk->thread_info); - free_task_struct(tsk); - } else { - int cpu = get_cpu(); - - tsk = task_cache[cpu]; - if (tsk) { - free_thread_info(tsk->thread_info); - free_task_struct(tsk); - } - task_cache[cpu] = current; - put_cpu(); - } + free_thread_info(tsk->thread_info); + free_task_struct(tsk); } void __put_task_struct(struct task_struct *tsk) @@ -220,25 +195,18 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; - int cpu = get_cpu(); prepare_to_copy(orig); - tsk = task_cache[cpu]; - task_cache[cpu] = NULL; - put_cpu(); - if (!tsk) { - tsk = alloc_task_struct(); - if (!tsk) - return NULL; - - ti = alloc_thread_info(tsk); - if (!ti) { - free_task_struct(tsk); - return NULL; - } - } else - ti = tsk->thread_info; + tsk = alloc_task_struct(); + if (!tsk) + return NULL; + + ti = alloc_thread_info(tsk); + if (!ti) { + free_task_struct(tsk); + return NULL; + } *ti = *orig->thread_info; *tsk = *orig; @@ -791,8 +759,10 @@ struct task_struct *copy_process(unsigned long clone_flags, goto fork_out; retval = -EAGAIN; - if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) { - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) + if (atomic_read(&p->user->processes) >= + p->rlim[RLIMIT_NPROC].rlim_cur) { + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && + p->user != &root_user) goto bad_fork_free; } @@ -1106,7 +1076,7 @@ long do_fork(unsigned long clone_flags, init_completion(&vfork); } - if (p->ptrace & PT_PTRACED) { + if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ @@ -1114,7 +1084,9 @@ long do_fork(unsigned long clone_flags, set_tsk_thread_flag(p, TIF_SIGPENDING); } - wake_up_forked_process(p); /* do this last */ + p->state = TASK_STOPPED; + if (!(clone_flags & CLONE_STOPPED)) + wake_up_forked_process(p); /* do this last */ ++total_forks; if (unlikely (trace)) { diff --git a/kernel/module.c b/kernel/module.c index ea1cf83ee2b3..58d73701bbde 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -98,6 +98,17 @@ int init_module(void) } EXPORT_SYMBOL(init_module); +/* A thread that wants to hold a reference to a module only while it + * is running can call ths to safely exit. + * nfsd and lockd use this. + */ +void __module_put_and_exit(struct module *mod, long code) +{ + module_put(mod); + do_exit(code); +} +EXPORT_SYMBOL(__module_put_and_exit); + /* Find a module section: 0 means not found. */ static unsigned int find_sec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -374,9 +385,9 @@ static void module_unload_init(struct module *mod) INIT_LIST_HEAD(&mod->modules_which_use_me); for (i = 0; i < NR_CPUS; i++) - atomic_set(&mod->ref[i].count, 0); + local_set(&mod->ref[i].count, 0); /* Hold reference count during initialization. */ - atomic_set(&mod->ref[smp_processor_id()].count, 1); + local_set(&mod->ref[smp_processor_id()].count, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; } @@ -599,7 +610,7 @@ unsigned int module_refcount(struct module *mod) unsigned int i, total = 0; for (i = 0; i < NR_CPUS; i++) - total += atomic_read(&mod->ref[i].count); + total += local_read(&mod->ref[i].count); return total; } EXPORT_SYMBOL(module_refcount); @@ -610,7 +621,10 @@ static void free_module(struct module *mod); #ifdef CONFIG_MODULE_FORCE_UNLOAD static inline int try_force(unsigned int flags) { - return (flags & O_TRUNC); + int ret = (flags & O_TRUNC); + if (ret) + tainted |= TAINT_FORCED_MODULE; + return ret; } #else static inline int try_force(unsigned int flags) diff --git a/kernel/profile.c b/kernel/profile.c index 4f6d22ce33f2..5c02ac0fbbda 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -8,8 +8,7 @@ #include <linux/bootmem.h> #include <linux/notifier.h> #include <linux/mm.h> - -extern char _stext, _etext; +#include <asm/sections.h> unsigned int * prof_buffer; unsigned long prof_len; @@ -36,7 +35,7 @@ void __init profile_init(void) return; /* only text is profiled */ - prof_len = (unsigned long) &_etext - (unsigned long) &_stext; + prof_len = _etext - _stext; prof_len >>= prof_shift; size = prof_len * sizeof(unsigned int) + PAGE_SIZE - 1; diff --git a/kernel/sched.c b/kernel/sched.c index ba4897671015..21ee4d138388 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1151,6 +1151,8 @@ static inline void rebalance_tick(runqueue_t *this_rq, int idle) DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } }; +EXPORT_PER_CPU_SYMBOL(kstat); + /* * We place interactive tasks back into the active array, if possible. * @@ -2080,7 +2082,7 @@ asmlinkage long sys_sched_get_priority_max(int policy) } /** - * sys_sched_get_priority_mix - return minimum RT priority. + * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used @@ -2541,7 +2543,7 @@ void __might_sleep(char *file, int line) if (time_before(jiffies, prev_jiffy + HZ)) return; prev_jiffy = jiffies; - printk(KERN_ERR "Debug: sleeping function called from illegal" + printk(KERN_ERR "Debug: sleeping function called from invalid" " context at %s:%d\n", file, line); dump_stack(); } diff --git a/kernel/sys.c b/kernel/sys.c index 4c3f48c93148..0bd0c5a399a6 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -601,6 +601,14 @@ static int set_user(uid_t new_ruid, int dumpclear) new_user = alloc_uid(new_ruid); if (!new_user) return -EAGAIN; + + if (atomic_read(&new_user->processes) >= + current->rlim[RLIMIT_NPROC].rlim_cur && + new_user != &root_user) { + free_uid(new_user); + return -EAGAIN; + } + switch_uid(new_user); if(dumpclear) @@ -1159,6 +1167,7 @@ asmlinkage long sys_newuname(struct new_utsname __user * name) asmlinkage long sys_sethostname(char __user *name, int len) { int errno; + char tmp[__NEW_UTS_LEN]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1166,7 +1175,8 @@ asmlinkage long sys_sethostname(char __user *name, int len) return -EINVAL; down_write(&uts_sem); errno = -EFAULT; - if (!copy_from_user(system_utsname.nodename, name, len)) { + if (!copy_from_user(tmp, name, len)) { + memcpy(system_utsname.nodename, tmp, len); system_utsname.nodename[len] = 0; errno = 0; } @@ -1198,6 +1208,7 @@ asmlinkage long sys_gethostname(char __user *name, int len) asmlinkage long sys_setdomainname(char __user *name, int len) { int errno; + char tmp[__NEW_UTS_LEN]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1206,9 +1217,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len) down_write(&uts_sem); errno = -EFAULT; - if (!copy_from_user(system_utsname.domainname, name, len)) { - errno = 0; + if (!copy_from_user(tmp, name, len)) { + memcpy(system_utsname.domainname, tmp, len); system_utsname.domainname[len] = 0; + errno = 0; } up_write(&uts_sem); return errno; diff --git a/kernel/time.c b/kernel/time.c index beb7b007b443..fd23f6774b56 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -66,7 +66,7 @@ asmlinkage long sys_time(int * tloc) * architectures that need it). */ -asmlinkage long sys_stime(int * tptr) +asmlinkage long sys_stime(time_t *tptr) { struct timespec tv; @@ -160,22 +160,25 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) return 0; } -asmlinkage long sys_settimeofday(struct timeval __user *tv, struct timezone __user *tz) +asmlinkage long sys_settimeofday(struct timeval __user *tv, + struct timezone __user *tz) { - struct timespec new_tv; + struct timeval user_tv; + struct timespec new_ts; struct timezone new_tz; if (tv) { - if (copy_from_user(&new_tv, tv, sizeof(*tv))) + if (copy_from_user(&user_tv, tv, sizeof(*tv))) return -EFAULT; - new_tv.tv_nsec *= NSEC_PER_USEC; + new_ts.tv_sec = user_tv.tv_sec; + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; } if (tz) { if (copy_from_user(&new_tz, tz, sizeof(*tz))) return -EFAULT; } - return do_sys_settimeofday(tv ? &new_tv : NULL, tz ? &new_tz : NULL); + return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } long pps_offset; /* pps time offset (us) */ |
