diff options
| author | Linus Torvalds <torvalds@evo.osdl.org> | 2005-03-28 04:43:20 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-03-28 04:43:20 -0800 |
| commit | 5a00e89e1cc9dcea7507aca74eabf5bceb26967f (patch) | |
| tree | 7242cca7835e740c81d6707b6b00d965f504cf37 /kernel | |
| parent | ae55089235bcc2741c3a3466d30c0a7d62208886 (diff) | |
| parent | b489072eefdaaf41fb6a25cb940c40d2410bb3f1 (diff) | |
Merge whitespace and __nocast changes
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/acct.c | 2 | ||||
| -rw-r--r-- | kernel/cpuset.c | 34 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/futex.c | 89 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 10 | ||||
| -rw-r--r-- | kernel/kprobes.c | 5 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 106 | ||||
| -rw-r--r-- | kernel/posix-timers.c | 12 | ||||
| -rw-r--r-- | kernel/power/main.c | 8 | ||||
| -rw-r--r-- | kernel/power/swsusp.c | 23 | ||||
| -rw-r--r-- | kernel/printk.c | 5 | ||||
| -rw-r--r-- | kernel/sched.c | 5 | ||||
| -rw-r--r-- | kernel/signal.c | 2 | ||||
| -rw-r--r-- | kernel/spinlock.c | 2 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 7 | ||||
| -rw-r--r-- | kernel/sys_ni.c | 1 | ||||
| -rw-r--r-- | kernel/time.c | 11 |
17 files changed, 213 insertions, 115 deletions
diff --git a/kernel/acct.c b/kernel/acct.c index 035669624b6c..4168f631868e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -542,7 +542,7 @@ void acct_update_integrals(struct task_struct *tsk) if (delta == 0) return; tsk->acct_stimexpd = tsk->stime; - tsk->acct_rss_mem1 += delta * tsk->mm->rss; + tsk->acct_rss_mem1 += delta * get_mm_counter(tsk->mm, rss); tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; } } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 44c03c666b01..cd942ce30b73 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -505,6 +505,35 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) } /* + * Refresh current tasks mems_allowed and mems_generation from + * current tasks cpuset. Call with cpuset_sem held. + * + * Be sure to call refresh_mems() on any cpuset operation which + * (1) holds cpuset_sem, and (2) might possibly alloc memory. + * Call after obtaining cpuset_sem lock, before any possible + * allocation. Otherwise one risks trying to allocate memory + * while the task cpuset_mems_generation is not the same as + * the mems_generation in its cpuset, which would deadlock on + * cpuset_sem in cpuset_update_current_mems_allowed(). + * + * Since we hold cpuset_sem, once refresh_mems() is called, the + * test (current->cpuset_mems_generation != cs->mems_generation) + * in cpuset_update_current_mems_allowed() will remain false, + * until we drop cpuset_sem. Anyone else who would change our + * cpusets mems_generation needs to lock cpuset_sem first. + */ + +static void refresh_mems(void) +{ + struct cpuset *cs = current->cpuset; + + if (current->cpuset_mems_generation != cs->mems_generation) { + guarantee_online_mems(cs, ¤t->mems_allowed); + current->cpuset_mems_generation = cs->mems_generation; + } +} + +/* * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? * * One cpuset is a subset of another if all its allowed CPUs and @@ -1224,6 +1253,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) return -ENOMEM; down(&cpuset_sem); + refresh_mems(); cs->flags = 0; if (notify_on_release(parent)) set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); @@ -1277,6 +1307,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) /* the vfs holds both inode->i_sem already */ down(&cpuset_sem); + refresh_mems(); if (atomic_read(&cs->count) > 0) { up(&cpuset_sem); return -EBUSY; @@ -1433,8 +1464,7 @@ void cpuset_update_current_mems_allowed() return; /* task is exiting */ if (current->cpuset_mems_generation != cs->mems_generation) { down(&cpuset_sem); - guarantee_online_mems(cs, ¤t->mems_allowed); - current->cpuset_mems_generation = cs->mems_generation; + refresh_mems(); up(&cpuset_sem); } } diff --git a/kernel/fork.c b/kernel/fork.c index 5b67b3ebf3c0..f42a17f88699 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -195,8 +195,8 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; mm->map_count = 0; - mm->rss = 0; - mm->anon_rss = 0; + set_mm_counter(mm, rss, 0); + set_mm_counter(mm, anon_rss, 0); cpus_clear(mm->cpu_vm_mask); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; @@ -492,7 +492,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) if (retval) goto free_pt; - mm->hiwater_rss = mm->rss; + mm->hiwater_rss = get_mm_counter(mm,rss); mm->hiwater_vm = mm->total_vm; good_mm: diff --git a/kernel/futex.c b/kernel/futex.c index 7f9f4a012190..7b54a672d0ad 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -97,7 +97,6 @@ struct futex_q { */ struct futex_hash_bucket { spinlock_t lock; - unsigned int nqueued; struct list_head chain; }; @@ -265,7 +264,6 @@ static inline int get_futex_value_locked(int *dest, int __user *from) inc_preempt_count(); ret = __copy_from_user_inatomic(dest, from, sizeof(int)); dec_preempt_count(); - preempt_check_resched(); return ret ? -EFAULT : 0; } @@ -339,7 +337,6 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, struct list_head *head1; struct futex_q *this, *next; int ret, drop_count = 0; - unsigned int nqueued; retry: down_read(¤t->mm->mmap_sem); @@ -354,23 +351,22 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, bh1 = hash_futex(&key1); bh2 = hash_futex(&key2); - nqueued = bh1->nqueued; + if (bh1 < bh2) + spin_lock(&bh1->lock); + spin_lock(&bh2->lock); + if (bh1 > bh2) + spin_lock(&bh1->lock); + if (likely(valp != NULL)) { int curval; - /* In order to avoid doing get_user while - holding bh1->lock and bh2->lock, nqueued - (monotonically increasing field) must be first - read, then *uaddr1 fetched from userland and - after acquiring lock nqueued field compared with - the stored value. The smp_mb () below - makes sure that bh1->nqueued is read from memory - before *uaddr1. */ - smp_mb(); - ret = get_futex_value_locked(&curval, (int __user *)uaddr1); if (unlikely(ret)) { + spin_unlock(&bh1->lock); + if (bh1 != bh2) + spin_unlock(&bh2->lock); + /* If we would have faulted, release mmap_sem, fault * it in and start all over again. */ @@ -385,21 +381,10 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, } if (curval != *valp) { ret = -EAGAIN; - goto out; + goto out_unlock; } } - if (bh1 < bh2) - spin_lock(&bh1->lock); - spin_lock(&bh2->lock); - if (bh1 > bh2) - spin_lock(&bh1->lock); - - if (unlikely(nqueued != bh1->nqueued && valp != NULL)) { - ret = -EAGAIN; - goto out_unlock; - } - head1 = &bh1->chain; list_for_each_entry_safe(this, next, head1, list) { if (!match_futex (&this->key, &key1)) @@ -435,13 +420,9 @@ out: return ret; } -/* - * queue_me and unqueue_me must be called as a pair, each - * exactly once. They are called with the hashed spinlock held. - */ - /* The key must be already stored in q->key. */ -static void queue_me(struct futex_q *q, int fd, struct file *filp) +static inline struct futex_hash_bucket * +queue_lock(struct futex_q *q, int fd, struct file *filp) { struct futex_hash_bucket *bh; @@ -455,11 +436,35 @@ static void queue_me(struct futex_q *q, int fd, struct file *filp) q->lock_ptr = &bh->lock; spin_lock(&bh->lock); - bh->nqueued++; + return bh; +} + +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) +{ list_add_tail(&q->list, &bh->chain); spin_unlock(&bh->lock); } +static inline void +queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) +{ + spin_unlock(&bh->lock); + drop_key_refs(&q->key); +} + +/* + * queue_me and unqueue_me must be called as a pair, each + * exactly once. They are called with the hashed spinlock held. + */ + +/* The key must be already stored in q->key. */ +static void queue_me(struct futex_q *q, int fd, struct file *filp) +{ + struct futex_hash_bucket *bh; + bh = queue_lock(q, fd, filp); + __queue_me(q, bh); +} + /* Return 1 if we were still queued (ie. 0 means we were woken) */ static int unqueue_me(struct futex_q *q) { @@ -503,6 +508,7 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) DECLARE_WAITQUEUE(wait, current); int ret, curval; struct futex_q q; + struct futex_hash_bucket *bh; retry: down_read(¤t->mm->mmap_sem); @@ -511,7 +517,7 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) if (unlikely(ret != 0)) goto out_release_sem; - queue_me(&q, -1, NULL); + bh = queue_lock(&q, -1, NULL); /* * Access the page AFTER the futex is queued. @@ -537,14 +543,13 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) ret = get_futex_value_locked(&curval, (int __user *)uaddr); if (unlikely(ret)) { + queue_unlock(&q, bh); + /* If we would have faulted, release mmap_sem, fault it in and * start all over again. */ up_read(¤t->mm->mmap_sem); - if (!unqueue_me(&q)) /* There's a chance we got woken already */ - return 0; - ret = get_user(curval, (int __user *)uaddr); if (!ret) @@ -553,9 +558,13 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) } if (curval != val) { ret = -EWOULDBLOCK; - goto out_unqueue; + queue_unlock(&q, bh); + goto out_release_sem; } + /* Only actually queue if *uaddr contained val. */ + __queue_me(&q, bh); + /* * Now the futex is queued and we have checked the data, we * don't want to hold mmap_sem while we sleep. @@ -596,10 +605,6 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) * have handled it for us already. */ return -EINTR; - out_unqueue: - /* If we were woken (and unqueued), we succeeded, whatever. */ - if (!unqueue_me(&q)) - ret = 0; out_release_sem: up_read(¤t->mm->mmap_sem); return ret; diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 33fe32e114cb..85d08daa6600 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -19,6 +19,13 @@ static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS]; */ static struct proc_dir_entry *smp_affinity_entry[NR_IRQS]; +void __attribute__((weak)) +proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) +{ + irq_affinity[irq] = mask_val; + irq_desc[irq].handler->set_affinity(irq, mask_val); +} + static int irq_affinity_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -53,8 +60,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, if (cpus_empty(tmp)) return -EINVAL; - irq_affinity[irq] = new_value; - irq_desc[irq].handler->set_affinity(irq, new_value); + proc_set_irq_affinity(irq, new_value); return full_count; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 4a331aed0866..1d5dd1337bd1 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -79,7 +79,7 @@ int register_kprobe(struct kprobe *p) unsigned long flags = 0; if ((ret = arch_prepare_kprobe(p)) != 0) { - goto out; + goto rm_kprobe; } spin_lock_irqsave(&kprobe_lock, flags); INIT_HLIST_NODE(&p->hlist); @@ -96,8 +96,9 @@ int register_kprobe(struct kprobe *p) *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); - out: +out: spin_unlock_irqrestore(&kprobe_lock, flags); +rm_kprobe: if (ret == -EEXIST) arch_remove_kprobe(p); return ret; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 5dfd280631ae..ad85d3f0dcc4 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -38,7 +38,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp) if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; } else { - ret.cpu = timespec_to_jiffies(tp); + ret.cpu = timespec_to_cputime(tp); } return ret; } @@ -94,28 +94,46 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, static inline void bump_cpu_timer(struct k_itimer *timer, union cpu_time_count now) { + int i; + if (timer->it.cpu.incr.sched == 0) return; if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { - long long delta; - delta = now.sched - timer->it.cpu.expires.sched; - if (delta >= 0) { - do_div(delta, timer->it.cpu.incr.sched); - delta++; - timer->it.cpu.expires.sched += - delta * timer->it.cpu.incr.sched; - timer->it_overrun += (int) delta; + unsigned long long delta, incr; + + if (now.sched < timer->it.cpu.expires.sched) + return; + incr = timer->it.cpu.incr.sched; + delta = now.sched + incr - timer->it.cpu.expires.sched; + /* Don't use (incr*2 < delta), incr*2 might overflow. */ + for (i = 0; incr < delta - incr; i++) + incr = incr << 1; + for (; i >= 0; incr >>= 1, i--) { + if (delta <= incr) + continue; + timer->it.cpu.expires.sched += incr; + timer->it_overrun += 1 << i; + delta -= incr; } - } else if (cputime_le(now.cpu, timer->it.cpu.expires.cpu)) { - cputime_t delta = cputime_sub(now.cpu, - timer->it.cpu.expires.cpu); - if (cputime_ge(delta, cputime_zero)) { - long orun = 1 + (delta / timer->it.cpu.incr.cpu); + } else { + cputime_t delta, incr; + + if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) + return; + incr = timer->it.cpu.incr.cpu; + delta = cputime_sub(cputime_add(now.cpu, incr), + timer->it.cpu.expires.cpu); + /* Don't use (incr*2 < delta), incr*2 might overflow. */ + for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) + incr = cputime_add(incr, incr); + for (; i >= 0; incr = cputime_halve(incr), i--) { + if (cputime_le(delta, incr)) + continue; timer->it.cpu.expires.cpu = - cputime_add(timer->it.cpu.expires.cpu, - orun * timer->it.cpu.incr.cpu); - timer->it_overrun += orun; + cputime_add(timer->it.cpu.expires.cpu, incr); + timer->it_overrun += 1 << i; + delta = cputime_sub(delta, incr); } } } @@ -479,8 +497,8 @@ static void process_timer_rebalance(struct task_struct *p, BUG(); break; case CPUCLOCK_PROF: - left = cputime_sub(expires.cpu, val.cpu) - / nthreads; + left = cputime_div(cputime_sub(expires.cpu, val.cpu), + nthreads); do { if (!unlikely(t->exit_state)) { ticks = cputime_add(prof_ticks(t), left); @@ -494,8 +512,8 @@ static void process_timer_rebalance(struct task_struct *p, } while (t != p); break; case CPUCLOCK_VIRT: - left = cputime_sub(expires.cpu, val.cpu) - / nthreads; + left = cputime_div(cputime_sub(expires.cpu, val.cpu), + nthreads); do { if (!unlikely(t->exit_state)) { ticks = cputime_add(virt_ticks(t), left); @@ -587,17 +605,25 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) switch (CPUCLOCK_WHICH(timer->it_clock)) { default: BUG(); -#define UPDATE_CLOCK(WHICH, c, n) \ - case CPUCLOCK_##WHICH: \ - if (p->it_##c##_expires == 0 || \ - p->it_##c##_expires > nt->expires.n) { \ - p->it_##c##_expires = nt->expires.n; \ - } \ - break - UPDATE_CLOCK(PROF, prof, cpu); - UPDATE_CLOCK(VIRT, virt, cpu); - UPDATE_CLOCK(SCHED, sched, sched); -#undef UPDATE_CLOCK + case CPUCLOCK_PROF: + if (cputime_eq(p->it_prof_expires, + cputime_zero) || + cputime_gt(p->it_prof_expires, + nt->expires.cpu)) + p->it_prof_expires = nt->expires.cpu; + break; + case CPUCLOCK_VIRT: + if (cputime_eq(p->it_virt_expires, + cputime_zero) || + cputime_gt(p->it_virt_expires, + nt->expires.cpu)) + p->it_virt_expires = nt->expires.cpu; + break; + case CPUCLOCK_SCHED: + if (p->it_sched_expires == 0 || + p->it_sched_expires > nt->expires.sched) + p->it_sched_expires = nt->expires.sched; + break; } } else { /* @@ -934,7 +960,7 @@ static void check_thread_timers(struct task_struct *tsk, { struct list_head *timers = tsk->cpu_timers; - tsk->it_prof_expires = 0; + tsk->it_prof_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, @@ -948,7 +974,7 @@ static void check_thread_timers(struct task_struct *tsk, } ++timers; - tsk->it_virt_expires = 0; + tsk->it_virt_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, @@ -1044,7 +1070,7 @@ static void check_process_timers(struct task_struct *tsk, } ++timers; - sched_expires = cputime_zero; + sched_expires = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, @@ -1132,9 +1158,11 @@ static void check_process_timers(struct task_struct *tsk, unsigned long long sched_left, sched; const unsigned int nthreads = atomic_read(&sig->live); - prof_left = cputime_sub(prof_expires, - cputime_add(utime, stime)) / nthreads; - virt_left = cputime_sub(virt_expires, utime) / nthreads; + prof_left = cputime_sub(prof_expires, utime); + prof_left = cputime_sub(prof_left, stime); + prof_left = cputime_div(prof_left, nthreads); + virt_left = cputime_sub(virt_expires, utime); + virt_left = cputime_div(virt_left, nthreads); if (sched_expires) { sched_left = sched_expires - sched_time; do_div(sched_left, nthreads); @@ -1245,7 +1273,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) BUG_ON(!irqs_disabled()); #define UNEXPIRED(clock) \ - (tsk->it_##clock##_expires == 0 || \ + (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) if (UNEXPIRED(prof) && UNEXPIRED(virt) && diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5f0fbcf511ba..fd316c272260 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -46,6 +46,7 @@ #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/module.h> #ifndef div_long_long_rem #include <asm/div64.h> @@ -460,6 +461,7 @@ int posix_timer_event(struct k_itimer *timr,int si_private) timr->it_process); } } +EXPORT_SYMBOL_GPL(posix_timer_event); /* * This function gets called when a POSIX.1b interval timer expires. It @@ -555,6 +557,7 @@ void register_posix_clock(clockid_t clock_id, struct k_clock *new_clock) posix_clocks[clock_id] = *new_clock; } +EXPORT_SYMBOL_GPL(register_posix_clock); static struct k_itimer * alloc_posix_timer(void) { @@ -935,6 +938,10 @@ static int adjust_abs_time(struct k_clock *clock, struct timespec *tp, */ if (oc.tv_sec < 0) oc.tv_sec = oc.tv_nsec = 0; + + if (oc.tv_sec | oc.tv_nsec) + set_normalized_timespec(&oc, oc.tv_sec, + oc.tv_nsec + clock->res); tstojiffie(&oc, clock->res, exp); /* @@ -1246,16 +1253,17 @@ int do_posix_clock_monotonic_gettime(struct timespec *tp) return do_posix_clock_monotonic_get(CLOCK_MONOTONIC, tp); } - int do_posix_clock_nosettime(clockid_t clockid, struct timespec *tp) { return -EINVAL; } +EXPORT_SYMBOL_GPL(do_posix_clock_nosettime); int do_posix_clock_notimer_create(struct k_itimer *timer) { return -EINVAL; } +EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create); int do_posix_clock_nonanosleep(clockid_t clock, int flags, struct timespec *t) { @@ -1265,6 +1273,7 @@ int do_posix_clock_nonanosleep(clockid_t clock, int flags, struct timespec *t) return -ENOTSUP; #endif } +EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); asmlinkage long sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp) @@ -1502,7 +1511,6 @@ static int common_nsleep(clockid_t which_clock, if (abs || !rq_time) { adjust_abs_time(&posix_clocks[which_clock], &t, abs, &rq_time, &dum); - rq_time += (t.tv_sec || t.tv_nsec); } left = rq_time - get_jiffies_64(); diff --git a/kernel/power/main.c b/kernel/power/main.c index b0315cbad9b9..7960ddf04a57 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -65,8 +65,10 @@ static int suspend_prepare(suspend_state_t state) goto Thaw; } - if ((error = device_suspend(PMSG_SUSPEND))) + if ((error = device_suspend(PMSG_SUSPEND))) { + printk(KERN_ERR "Some devices failed to suspend\n"); goto Finish; + } return 0; Finish: if (pm_ops->finish) @@ -85,8 +87,10 @@ static int suspend_enter(suspend_state_t state) local_irq_save(flags); - if ((error = device_power_down(PMSG_SUSPEND))) + if ((error = device_power_down(PMSG_SUSPEND))) { + printk(KERN_ERR "Some devices failed to power down\n"); goto Done; + } error = pm_ops->enter(state); device_power_up(); Done: diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index cf555c10d3b5..ae5bebc3b18f 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -98,7 +98,6 @@ unsigned int nr_copy_pages __nosavedata = 0; */ suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; static suspend_pagedir_t *pagedir_save; -static int pagedir_order __nosavedata = 0; #define SWSUSP_SIG "S1SUSPEND" @@ -893,34 +892,29 @@ int swsusp_suspend(void) * at resume time, and evil weirdness ensues. */ if ((error = device_power_down(PMSG_FREEZE))) { + printk(KERN_ERR "Some devices failed to power down, aborting suspend\n"); local_irq_enable(); + swsusp_free(); return error; } save_processor_state(); - error = swsusp_arch_suspend(); + if ((error = swsusp_arch_suspend())) + swsusp_free(); /* Restore control flow magically appears here */ restore_processor_state(); + BUG_ON (nr_copy_pages_check != nr_copy_pages); restore_highmem(); device_power_up(); local_irq_enable(); return error; } - -asmlinkage int swsusp_restore(void) -{ - BUG_ON (nr_copy_pages_check != nr_copy_pages); - - /* Even mappings of "global" things (vmalloc) need to be fixed */ - __flush_tlb_global(); - return 0; -} - int swsusp_resume(void) { int error; local_irq_disable(); - device_power_down(PMSG_FREEZE); + if (device_power_down(PMSG_FREEZE)) + printk(KERN_ERR "Some devices failed to power down, very bad\n"); /* We'll ignore saved state, but this gets preempt count (etc) right */ save_processor_state(); error = swsusp_arch_resume(); @@ -1219,7 +1213,6 @@ static int check_header(void) return -EPERM; } nr_copy_pages = swsusp_info.image_pages; - pagedir_order = get_bitmask_order(SUSPEND_PD_PAGES(nr_copy_pages)); return error; } @@ -1238,7 +1231,7 @@ static int check_sig(void) */ error = bio_write_page(0, &swsusp_header); } else { - pr_debug(KERN_ERR "swsusp: Suspend partition has wrong signature?\n"); + printk(KERN_ERR "swsusp: Suspend partition has wrong signature?\n"); return -EINVAL; } if (!error) diff --git a/kernel/printk.c b/kernel/printk.c index e5a2222f477e..5d5754964bf4 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -861,6 +861,11 @@ void register_console(struct console * console) if (!(console->flags & CON_ENABLED)) return; + if (console_drivers && (console_drivers->flags & CON_BOOT)) { + unregister_console(console_drivers); + console->flags &= ~CON_PRINTBUFFER; + } + /* * Put this console in the list - keep the * preferred driver at the head of the list. diff --git a/kernel/sched.c b/kernel/sched.c index c32f9389978f..dff94ba6df38 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3741,14 +3741,11 @@ EXPORT_SYMBOL(cond_resched); */ int cond_resched_lock(spinlock_t * lock) { -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) - if (lock->break_lock) { - lock->break_lock = 0; + if (need_lockbreak(lock)) { spin_unlock(lock); cpu_relax(); spin_lock(lock); } -#endif if (need_resched()) { _raw_spin_unlock(lock); preempt_enable_no_resched(); diff --git a/kernel/signal.c b/kernel/signal.c index 8dc483007847..f00a1d610f0b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2219,6 +2219,8 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); + if (current->flags & PF_FREEZE) + refrigerator(PF_FREEZE); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = current->real_blocked; diff --git a/kernel/spinlock.c b/kernel/spinlock.c index b8e76ca8a001..e15ed17863f1 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -187,6 +187,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ cpu_relax(); \ preempt_disable(); \ } \ + (lock)->break_lock = 0; \ } \ \ EXPORT_SYMBOL(_##op##_lock); \ @@ -209,6 +210,7 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ cpu_relax(); \ preempt_disable(); \ } \ + (lock)->break_lock = 0; \ return flags; \ } \ \ diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e31b1cb8e503..c39ed70af174 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -6,6 +6,7 @@ #include <linux/syscalls.h> #include <asm/atomic.h> #include <asm/semaphore.h> +#include <asm/uaccess.h> /* Since we effect priority and affinity (both of which are visible * to, and settable by outside processes) we do indirection via a @@ -86,9 +87,13 @@ static int stop_machine(void) { int i, ret = 0; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + mm_segment_t old_fs = get_fs(); /* One high-prio thread per cpu. We'll do this one. */ - sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); + set_fs(KERNEL_DS); + sys_sched_setscheduler(current->pid, SCHED_FIFO, + (struct sched_param __user *)¶m); + set_fs(old_fs); atomic_set(&stopmachine_thread_ack, 0); stopmachine_num_threads = 0; diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 623eaf517534..1802a311dd3f 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -83,3 +83,4 @@ cond_syscall(sys_pciconfig_write); cond_syscall(sys_pciconfig_iobase); cond_syscall(sys32_ipc); cond_syscall(sys32_sysctl); +cond_syscall(ppc_rtas); diff --git a/kernel/time.c b/kernel/time.c index d5400f6af052..96fd0f499631 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -34,6 +34,7 @@ #include <linux/syscalls.h> #include <linux/security.h> #include <linux/fs.h> +#include <linux/module.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -215,6 +216,14 @@ long pps_stbcnt; /* stability limit exceeded */ /* hook for a loadable hardpps kernel module */ void (*hardpps_ptr)(struct timeval *); +/* we call this to notify the arch when the clock is being + * controlled. If no such arch routine, do nothing. + */ +void __attribute__ ((weak)) notify_arch_cmos_timer(void) +{ + return; +} + /* adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ @@ -398,6 +407,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0 txc->stbcnt = pps_stbcnt; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); + notify_arch_cmos_timer(); return(result); } @@ -494,6 +504,7 @@ void getnstimeofday (struct timespec *tv) tv->tv_sec = sec; tv->tv_nsec = nsec; } +EXPORT_SYMBOL_GPL(getnstimeofday); int do_settimeofday (struct timespec *tv) { |
