summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2004-06-26 21:53:53 +0100
committerAnton Altaparmakov <aia21@cantab.net>2004-06-26 21:53:53 +0100
commit320ed1994ecf7ccadaaa95196467565b34d8d686 (patch)
treeb36f16a87596469091c97d35002faa1b5c0360ab /arch/mips/kernel
parent702fdfcae9a47ec4976d82d3d0b4b4a41bd72a52 (diff)
parentf6a7507c1714f5cb4faaebc76a1d02260830be01 (diff)
Merge cantab.net:/home/src/bklinux-2.6
into cantab.net:/home/src/ntfs-2.6
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/cpu-bugs64.c40
-rw-r--r--arch/mips/kernel/cpu-probe.c46
-rw-r--r--arch/mips/kernel/irq.c2
-rw-r--r--arch/mips/kernel/module-elf32.c11
-rw-r--r--arch/mips/kernel/module-elf64.c11
-rw-r--r--arch/mips/kernel/module.c53
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/semaphore.c328
-rw-r--r--arch/mips/kernel/setup.c15
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/sysirix.c2
-rw-r--r--arch/mips/kernel/time.c5
-rw-r--r--arch/mips/kernel/traps.c59
17 files changed, 251 insertions, 330 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9ce8090eff68..3a7f766fd961 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
time.o traps.o unaligned.o
ifdef CONFIG_MODULES
-obj-y += mips_ksyms.o
+obj-y += mips_ksyms.o module.o
obj-$(CONFIG_MIPS32) += module-elf32.o
obj-$(CONFIG_MIPS64) += module-elf64.o
endif
@@ -23,6 +23,7 @@ obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 31c17ced894e..1375d448308b 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -177,7 +177,7 @@ static inline void check_daddi(void)
extern asmlinkage void handle_daddi_ov(void);
unsigned long flags;
void *handler;
- long v;
+ long v, tmp;
printk("Checking for the daddi bug... ");
@@ -197,13 +197,15 @@ static inline void check_daddi(void)
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddi %0, %1, %2\n\t"
+ "daddi %0, %1, %3\n\t"
".set pop"
- : "=r" (v)
- : "r" (0x7fffffffffffedcd), "I" (0x1234));
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9a), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
@@ -217,9 +219,11 @@ static inline void check_daddi(void)
local_irq_save(flags);
handler = set_except_vector(12, handle_daddi_ov);
asm volatile(
- "daddi %0, %1, %2"
- : "=r" (v)
- : "r" (0x7fffffffffffedcd), "I" (0x1234));
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
+ "daddi %0, %1, %3"
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9a), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
@@ -240,7 +244,7 @@ static inline void check_daddi(void)
static inline void check_daddiu(void)
{
- long v, w;
+ long v, w, tmp;
printk("Checking for the daddiu bug... ");
@@ -265,15 +269,17 @@ static inline void check_daddiu(void)
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddiu %0, %2, %3\n\t"
- "addiu %1, $0, %3\n\t"
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
- : "=&r" (v), "=&r" (w)
- : "r" (0x7fffffffffffedcd), "I" (0x1234));
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9a), "I" (0x1234));
if (v == w) {
printk("no.\n");
@@ -283,11 +289,13 @@ static inline void check_daddiu(void)
printk("yes, workaround... ");
asm volatile(
- "daddiu %0, %2, %3\n\t"
- "addiu %1, $0, %3\n\t"
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
"daddu %1, %2"
- : "=&r" (v), "=&r" (w)
- : "r" (0x7fffffffffffedcd), "I" (0x1234));
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9a), "I" (0x1234));
if (v == w) {
printk("yes.\n");
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5013599347e3..36777476dae1 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1,6 +1,8 @@
/*
* Processor capabilities determination functions.
*
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 2003 Maciej W. Rozycki
* Copyright (C) 1994 - 2003 Ralf Baechle
* Copyright (C) 2001 MIPS Inc.
*
@@ -49,6 +51,14 @@ static void r4k_wait(void)
".set\tmips0");
}
+/*
+ * The Au1xxx wait is available only if we run CONFIG_PM and
+ * the timer setup found we had a 32KHz counter available.
+ * There are still problems with functions that may call au1k_wait
+ * directly, but that will be discovered pretty quickly.
+ */
+extern void (*au1k_wait_ptr)(void);
+
void au1k_wait(void)
{
#ifdef CONFIG_PM
@@ -90,7 +100,6 @@ static inline void check_wait(void)
case CPU_R5000:
case CPU_NEVADA:
case CPU_RM7000:
-/* case CPU_RM9000: */
case CPU_TX49XX:
case CPU_4KC:
case CPU_4KEC:
@@ -102,12 +111,19 @@ static inline void check_wait(void)
cpu_wait = r4k_wait;
printk(" available.\n");
break;
+#ifdef CONFIG_PM
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
- cpu_wait = au1k_wait;
- printk(" available.\n");
+ if (au1k_wait_ptr != NULL) {
+ cpu_wait = au1k_wait_ptr;
+ printk(" available.\n");
+ }
+ else {
+ printk(" unavailable.\n");
+ }
break;
+#endif
default:
printk(" unavailable.\n");
break;
@@ -238,8 +254,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
break;
default:
printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- c->cputype = CPU_VR41XX;
- break;
+ c->cputype = CPU_VR41XX;
+ break;
}
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS;
@@ -371,7 +387,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
c->cputype = CPU_RM9000;
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
@@ -407,9 +423,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
@@ -475,9 +488,6 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
/* Probe for L2 cache */
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
@@ -505,9 +515,6 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
break;
}
c->isa_level = MIPS_CPU_ISA_M32;
- break;
- default:
- c->cputype = CPU_UNKNOWN;
break;
}
}
@@ -528,9 +535,6 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
#endif
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
@@ -542,14 +546,11 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
c->cputype = CPU_SR71000;
c->isa_level = MIPS_CPU_ISA_M64;
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_4KTLB | MIPS_CPU_FPU |
+ MIPS_CPU_4KTLB | MIPS_CPU_FPU |
MIPS_CPU_COUNTER | MIPS_CPU_MCHECK;
c->scache.ways = 8;
c->tlbsize = 64;
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
@@ -563,7 +564,6 @@ __init void cpu_probe(void)
c->processor_id = read_c0_prid();
switch (c->processor_id & 0xff0000) {
-
case PRID_COMP_LEGACY:
cpu_probe_legacy(c);
break;
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 845e0914ffe5..13acf876b46f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -487,7 +487,7 @@ int request_irq(unsigned int irq,
action->handler = handler;
action->flags = irqflags;
- action->mask = 0;
+ cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
diff --git a/arch/mips/kernel/module-elf32.c b/arch/mips/kernel/module-elf32.c
index 35818bb1a359..ffd216d6d6dc 100644
--- a/arch/mips/kernel/module-elf32.c
+++ b/arch/mips/kernel/module-elf32.c
@@ -248,14 +248,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
me->name);
return -ENOEXEC;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/mips/kernel/module-elf64.c b/arch/mips/kernel/module-elf64.c
index da7295d53113..e804792ee1ee 100644
--- a/arch/mips/kernel/module-elf64.c
+++ b/arch/mips/kernel/module-elf64.c
@@ -272,14 +272,3 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
new file mode 100644
index 000000000000..581687080082
--- /dev/null
+++ b/arch/mips/kernel/module.c
@@ -0,0 +1,53 @@
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+static LIST_HEAD(dbe_list);
+static spinlock_t dbe_lock = SPIN_LOCK_UNLOCKED;
+
+/* Given an address, look for it in the module exception tables. */
+const struct exception_table_entry *search_module_dbetables(unsigned long addr)
+{
+ unsigned long flags;
+ const struct exception_table_entry *e = NULL;
+ struct mod_arch_specific *dbe;
+
+ spin_lock_irqsave(&dbe_lock, flags);
+ list_for_each_entry(dbe, &dbe_list, dbe_list) {
+ e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
+ if (e)
+ break;
+ }
+ spin_unlock_irqrestore(&dbe_lock, flags);
+
+ /* Now, if we found one, we are running inside it now, hence
+ we cannot unload the module, hence no refcnt needed. */
+ return e;
+}
+
+/* Put in dbe list if neccessary. */
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ INIT_LIST_HEAD(&me->arch.dbe_list);
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
+ continue;
+ me->arch.dbe_start = (void *)s->sh_addr;
+ me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
+ spin_lock_irq(&dbe_lock);
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ spin_lock_irq(&dbe_lock);
+ list_del(&mod->arch.dbe_list);
+ spin_unlock_irq(&dbe_lock);
+}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 09477c1e3e7b..24eab2f9d7dd 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -627,6 +627,7 @@ out: jr ra
sys sys_mq_timedreceive 5
sys sys_mq_notify 2 /* 4275 */
sys sys_mq_getsetattr 3
+ sys sys_ni_syscall 0 /* sys_vserver */
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 19e430d62c47..3125b634faec 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -447,3 +447,4 @@ sys_call_table:
PTR sys_mq_timedreceive
PTR sys_mq_notify
PTR sys_mq_getsetattr /* 5235 */
+ PTR sys_ni_syscall /* sys_vserver */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 9993a8a15397..c00459f8f59d 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -357,3 +357,4 @@ EXPORT(sysn32_call_table)
PTR compat_sys_mq_timedreceive
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr /* 6239 */
+ PTR sys_ni_syscall /* sys_vserver */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b351656863af..3a89bf425bf6 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -535,6 +535,7 @@ out: jr ra
sys compat_sys_mq_timedreceive 5
sys compat_sys_mq_notify 2 /* 4275 */
sys compat_sys_mq_getsetattr 3
+ sys sys_ni_syscall 0 /* sys_vserver */
.endm
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
index 51c3e772c029..4197b4109dc3 100644
--- a/arch/mips/kernel/semaphore.c
+++ b/arch/mips/kernel/semaphore.c
@@ -1,273 +1,165 @@
/*
- * Copyright (C) 1999, 2001, 02, 03 Ralf Baechle
+ * MIPS-specific semaphore code.
*
- * Heavily inspired by the Alpha implementation
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
+ * to eliminate the SMP races in the old version between the updates
+ * of `count' and `waking'. Now we use negative `count' values to
+ * indicate that some process(es) are waiting for the semaphore.
*/
+
#include <linux/config.h>
-#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
-#ifdef CONFIG_CPU_HAS_LLDSCD
-/*
- * On machines without lld/scd we need a spinlock to make the manipulation of
- * sem->count and sem->waking atomic. Scalability isn't an issue because
- * this lock is used on UP only so it's just an empty variable.
- */
-spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
-
-EXPORT_SYMBOL(semaphore_lock);
-#endif
+#ifdef CONFIG_CPU_HAS_LLSC
/*
- * Semaphores are implemented using a two-way counter: The "count" variable is
- * decremented for each process that tries to sleep, while the "waking" variable
- * is incremented when the "up()" code goes to wake up waiting processes.
- *
- * Notably, the inline "up()" and "down()" functions can efficiently test if
- * they need to do any extra work (up needs to do something only if count was
- * negative before the increment operation.
- *
- * waking_non_zero() must execute atomically.
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
*
- * When __up() is called, the count was negative before incrementing it, and we
- * need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to wake up and
- * exit. ALL waiting processes actually wake up but only the one that gets to
- * the "waking" field first will gate through and acquire the semaphore. The
- * others will go back to sleep.
- *
- * Note that these functions are only called when there is contention on the
- * lock, and as such all this is the "non-critical" part of the whole semaphore
- * business. The critical part is the inline stuff in <asm/semaphore.h> where
- * we want to avoid any extra jumps and calls.
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
*/
-void __up_wakeup(struct semaphore *sem)
+static inline int __sem_update_count(struct semaphore *sem, int incr)
{
- wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__up_wakeup);
-
-#ifdef CONFIG_CPU_HAS_LLSC
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret, tmp;
+ int old_count, tmp;
__asm__ __volatile__(
- "1: ll %1, %2 # waking_non_zero \n"
- " blez %1, 2f \n"
- " subu %0, %1, 1 \n"
- " sc %0, %2 \n"
- " beqz %0, 1b \n"
- "2: \n"
- : "=r" (ret), "=r" (tmp), "+m" (sem->waking)
- : "0" (0));
-
- return ret;
+ "1: ll %0, %2 \n"
+ " sra %1, %0, 31 \n"
+ " not %1 \n"
+ " and %1, %0, %1 \n"
+ " add %1, %1, %3 \n"
+ " sc %1, %2 \n"
+ " beqz %1, 1b \n"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "r" (incr), "m" (sem->count));
+
+ return old_count;
}
-#else /* !CONFIG_CPU_HAS_LLSC */
+#else
-static inline int waking_non_zero(struct semaphore *sem)
+/*
+ * On machines without lld/scd we need a spinlock to make the manipulation of
+ * sem->count and sem->waking atomic. Scalability isn't an issue because
+ * this lock is used on UP only so it's just an empty variable.
+ */
+static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
+
+static inline int __sem_update_count(struct semaphore *sem, int incr)
{
unsigned long flags;
- int waking, ret = 0;
+ int old_count, tmp;
spin_lock_irqsave(&semaphore_lock, flags);
- waking = atomic_read(&sem->waking);
- if (waking > 0) {
- atomic_set(&sem->waking, waking - 1);
- ret = 1;
- }
+ old_count = atomic_read(&sem->count);
+ tmp = max_t(int, old_count, 0) + incr;
+ atomic_set(&sem->count, tmp);
spin_unlock_irqrestore(&semaphore_lock, flags);
- return ret;
+ return old_count;
}
-#endif /* !CONFIG_CPU_HAS_LLSC */
-
-/*
- * Perform the "down" function. Return zero for semaphore acquired, return
- * negative for signalled out of the function.
- *
- * If called from down, the return is ignored and the wait loop is not
- * interruptible. This means that a task waiting on a semaphore using "down()"
- * cannot be killed until someone does an "up()" on the semaphore.
- *
- * If called from down_interruptible, the return value gets checked upon return.
- * If the return value is negative then the task continues with the negative
- * value in the return register (it can be tested by the caller).
- *
- * Either form may be used in conjunction with "up()".
- */
+#endif
-void __sched __down_failed(struct semaphore * sem)
+void __up(struct semaphore *sem)
{
- struct task_struct *tsk = current;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, tsk);
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue_exclusive(&sem->wait, &wait);
-
/*
- * Ok, we're set up. sem->count is known to be less than zero
- * so we must wait.
- *
- * We can let go the lock for purposes of waiting.
- * We re-acquire it after awaking so as to protect
- * all semaphore operations.
- *
- * If "up()" is called before we call waking_non_zero() then
- * we will catch it right away. If it is called later then
- * we will have to go through a wakeup cycle to catch it.
- *
- * Multiple waiters contend for the semaphore lock to see
- * who gets to gate through and who has to wait some more.
+ * Note that we incremented count in up() before we came here,
+ * but that was ineffective since the result was <= 0, and
+ * any negative value of count is equivalent to 0.
+ * This ends up setting count to 1, unless count is now > 0
+ * (i.e. because some other cpu has called up() in the meantime),
+ * in which case we just increment count.
*/
- for (;;) {
- if (waking_non_zero(sem))
- break;
- schedule();
- __set_current_state(TASK_UNINTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&sem->wait, &wait);
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
}
-EXPORT_SYMBOL(__down_failed);
-
-#ifdef CONFIG_CPU_HAS_LLDSCD
+EXPORT_SYMBOL(__up);
/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible decrement
- * simultaneously and atomically with the sem->waking adjustment,
- * otherwise we can race with wake_one_more.
- *
- * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
- *
- * This is crazy. Normally it's strictly forbidden to use 64-bit operations
- * in the 32-bit MIPS kernel. In this case it's however ok because if an
- * interrupt has destroyed the upper half of registers sc will fail.
- * Note also that this will not work for MIPS32 CPUs!
- *
- * Pseudocode:
- *
- * If(sem->waking > 0) {
- * Decrement(sem->waking)
- * Return(SUCCESS)
- * } else If(signal_pending(tsk)) {
- * Increment(sem->count)
- * Return(-EINTR)
- * } else {
- * Return(SLEEP)
- * }
+ * Note that when we come in to __down or __down_interruptible,
+ * we have already decremented count, but that decrement was
+ * ineffective since the result was < 0, and any negative value
+ * of count is equivalent to 0.
+ * Thus it is only when we decrement count from some value > 0
+ * that we have actually got the semaphore.
*/
-
-static inline int
-waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
-{
- long ret, tmp;
-
- __asm__ __volatile__(
- " .set push # waking_non_zero_interruptible \n"
- " .set mips3 \n"
- " .set noat \n"
- "0: lld %1, %2 \n"
- " li %0, 0 \n"
- " sll $1, %1, 0 \n"
- " blez $1, 1f \n"
- " daddiu %1, %1, -1 \n"
- " li %0, 1 \n"
- " b 2f \n"
- "1: beqz %3, 2f \n"
- " li %0, %4 \n"
- " dli $1, 0x0000000100000000 \n"
- " daddu %1, %1, $1 \n"
- "2: scd %1, %2 \n"
- " beqz %1, 0b \n"
- " .set pop \n"
- : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
- : "r" (signal_pending(tsk)), "i" (-EINTR));
-
- return ret;
-}
-
-#else /* !CONFIG_CPU_HAS_LLDSCD */
-
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
+void __sched __down(struct semaphore *sem)
{
- int waking, pending, ret = 0;
- unsigned long flags;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
- pending = signal_pending(tsk);
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ add_wait_queue_exclusive(&sem->wait, &wait);
- spin_lock_irqsave(&semaphore_lock, flags);
- waking = atomic_read(&sem->waking);
- if (waking > 0) {
- atomic_set(&sem->waking, waking - 1);
- ret = 1;
- } else if (pending) {
- atomic_set(&sem->count, atomic_read(&sem->count) + 1);
- ret = -EINTR;
+ /*
+ * Try to get the semaphore. If the count is > 0, then we've
+ * got the semaphore; we decrement count and exit the loop.
+ * If the count is 0 or negative, we set it to -1, indicating
+ * that we are asleep, and then sleep.
+ */
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
- spin_unlock_irqrestore(&semaphore_lock, flags);
+ remove_wait_queue(&sem->wait, &wait);
+ __set_task_state(tsk, TASK_RUNNING);
- return ret;
+ /*
+ * If there are any more sleepers, wake one of them up so
+ * that it can either get the semaphore, or set count to -1
+ * indicating that there are still processes sleeping.
+ */
+ wake_up(&sem->wait);
}
-#endif /* !CONFIG_CPU_HAS_LLDSCD */
+EXPORT_SYMBOL(__down);
-int __sched __down_failed_interruptible(struct semaphore * sem)
+int __sched __down_interruptible(struct semaphore * sem)
{
+ int retval = 0;
struct task_struct *tsk = current;
- wait_queue_t wait;
- int ret = 0;
+ DECLARE_WAITQUEUE(wait, tsk);
- init_waitqueue_entry(&wait, tsk);
- __set_current_state(TASK_INTERRUPTIBLE);
+ __set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
- /*
- * Ok, we're set up. sem->count is known to be less than zero
- * so we must wait.
- *
- * We can let go the lock for purposes of waiting.
- * We re-acquire it after awaking so as to protect
- * all semaphore operations.
- *
- * If "up()" is called before we call waking_non_zero() then
- * we will catch it right away. If it is called later then
- * we will have to go through a wakeup cycle to catch it.
- *
- * Multiple waiters contend for the semaphore lock to see
- * who gets to gate through and who has to wait some more.
- */
- for (;;) {
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret) {
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ /*
+ * A signal is pending - give up trying.
+ * Set sem->count to 0 if it is negative,
+ * since we are no longer sleeping.
+ */
+ __sem_update_count(sem, 0);
+ retval = -EINTR;
break;
}
schedule();
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_task_state(tsk, TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&sem->wait, &wait);
+ __set_task_state(tsk, TASK_RUNNING);
- return ret;
+ wake_up(&sem->wait);
+ return retval;
}
-EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a1336d16c57e..c7d1d76c9c8b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -71,7 +71,6 @@ EXPORT_SYMBOL(mips_machgroup);
struct boot_mem_map boot_mem_map;
static char command_line[CL_SIZE];
- char saved_command_line[CL_SIZE];
char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
/*
@@ -453,14 +452,18 @@ static void __init do_earlyinitcalls(void)
void __init setup_arch(char **cmdline_p)
{
+ unsigned int status;
+
cpu_probe();
prom_init();
cpu_report();
#ifdef CONFIG_MIPS32
/* Disable coprocessors and set FPU for 16/32 FPR register model */
- clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR);
- set_c0_status(ST0_CU0);
+ status = read_c0_status();
+ status &= ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR);
+ status |= ST0_CU0;
+ write_c0_status(status);
#endif
#ifdef CONFIG_MIPS64
/*
@@ -468,8 +471,10 @@ void __init setup_arch(char **cmdline_p)
* Maybe because the kernel is in ckseg0 and not xkphys? Clear it
* anyway ...
*/
- clear_c0_status(ST0_BEV|ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3);
- set_c0_status(ST0_CU0|ST0_KX|ST0_SX|ST0_FR);
+ status = read_c0_status();
+ status &= ~(ST0_BEV|ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3);
+ status |= (ST0_CU0|ST0_KX|ST0_SX|ST0_FR);
+ write_c0_status(status);
#endif
#if defined(CONFIG_VT)
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 7e1eca9736b1..16519f7e8c3b 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -36,7 +36,7 @@
#include <asm/sysmips.h>
#include <asm/uaccess.h>
-asmlinkage int sys_pipe(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys_pipe(nabi_no_regargs volatile struct pt_regs regs)
{
int fd[2];
int error, res;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 25d7e97edfca..494d1872df32 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -1639,7 +1639,7 @@ asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf)
printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n",
current->comm, current->pid, fname, buf);
- error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs));
+ error = verify_area(VERIFY_WRITE, buf, sizeof(struct irix_statvfs64));
if(error)
goto out;
error = user_path_walk(fname, &nd);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index d0c980a9b859..0199485a4a8f 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -789,3 +789,8 @@ EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL(to_tm);
EXPORT_SYMBOL(rtc_set_time);
EXPORT_SYMBOL(rtc_get_time);
+
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies*(1000000000/HZ);
+}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 752dbd3e93fb..362c1fd66b12 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -234,6 +234,7 @@ void show_regs(struct pt_regs *regs)
void show_registers(struct pt_regs *regs)
{
show_regs(regs);
+ print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
current->comm, current->pid, current_thread_info(), current);
show_stack(current, (long *) regs->regs[29]);
@@ -278,47 +279,8 @@ void __declare_dbe_table(void)
);
}
-#ifdef CONFIG_MDULES
-
-/* Given an address, look for it in the module exception tables. */
-const struct exception_table_entry *search_module_dbetables(unsigned long addr)
-{
- unsigned long flags;
- const struct exception_table_entry *e = NULL;
- struct module *mod;
-
- spin_lock_irqsave(&modlist_lock, flags);
- list_for_each_entry(mod, &modules, list) {
- if (mod->arch.num_dbeentries == 0)
- continue;
-
- e = search_extable(mod->arch.dbe_table_start,
- mod->arch.dbe_table_end +
- mod->arch.num_dbeentries - 1,
- addr);
- if (e)
- break;
- }
- spin_unlock_irqrestore(&modlist_lock, flags);
-
- /* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
- return e;
-}
-
-#else
-
/* Given an address, look for it in the exception tables. */
-static inline const struct exception_table_entry *
-search_module_dbetables(unsigned long addr)
-{
- return NULL;
-}
-
-#endif
-
-/* Given an address, look for it in the exception tables. */
-const struct exception_table_entry *search_dbe_tables(unsigned long addr)
+static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
@@ -745,12 +707,25 @@ asmlinkage void do_reserved(struct pt_regs *regs)
static inline void parity_protection_init(void)
{
switch (current_cpu_data.cputype) {
+ case CPU_24K:
+ /* 24K cache parity not currently implemented in FPGA */
+ printk(KERN_INFO "Disable cache parity protection for "
+ "MIPS 24K CPU.\n");
+ write_c0_ecc(read_c0_ecc() & ~0x80000000);
+ break;
case CPU_5KC:
/* Set the PE bit (bit 31) in the c0_ecc register. */
- printk(KERN_INFO "Enable the cache parity protection for "
- "MIPS 5KC CPUs.\n");
+ printk(KERN_INFO "Enable cache parity protection for "
+ "MIPS 5KC/24K CPUs.\n");
write_c0_ecc(read_c0_ecc() | 0x80000000);
break;
+ case CPU_20KC:
+ case CPU_25KF:
+ /* Clear the DE bit (bit 16) in the c0_status register. */
+ printk(KERN_INFO "Enable cache parity protection for "
+ "MIPS 20KC/25KF CPUs.\n");
+ clear_c0_status(ST0_DE);
+ break;
default:
break;
}