diff options
| author | Ralf Bächle <ralf@linux-mips.org> | 2003-06-22 22:07:17 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2003-06-22 22:07:17 -0700 |
| commit | f6d64aeef94909c98a054590af61906d2ce5acbf (patch) | |
| tree | 38b4b00a94874a5da38eae4c81ab14bcf3b237e6 /arch/mips/kernel | |
| parent | 2e7f53ec14475d56559bcdd07acfb737b7bff1e9 (diff) | |
[PATCH] MIPS merge, generic mips bits.
This contains all the generic 32-bit MIPS code, so all arch/mips/ and
include/asm-mips/ stuff.
Diffstat (limited to 'arch/mips/kernel')
45 files changed, 4829 insertions, 5303 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index d2761db3285c..03ce3434d023 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -2,45 +2,45 @@ # Makefile for the Linux/MIPS kernel. # -# EXTRA_AFLAGS = -mips3 -mcpu=r4000 # not used? +extra-y := head.o init_task.o + +obj-y += branch.o cpu-probe.o process.o signal.o entry.o traps.o \ + ptrace.o irq.o reset.o semaphore.o setup.o syscall.o \ + sysmips.o ipc.o scall_o32.o time.o unaligned.o -extra-y := head.o init_task.o -obj-y += branch.o process.o signal.o entry.o \ - traps.o ptrace.o vm86.o ioport.o reset.o \ - semaphore.o setup.o syscall.o sysmips.o \ - ipc.o scall_o32.o unaligned.o obj-$(CONFIG_MODULES) += mips_ksyms.o -ifdef CONFIG_CPU_R3000 -obj-y += r2300_misc.o r2300_fpu.o r2300_switch.o -else -obj-y += r4k_misc.o r4k_switch.o -ifdef CONFIG_CPU_R6000 -obj-y += r6000_fpu.o -else -obj-y += r4k_fpu.o -endif -endif +obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o +obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o +obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o +obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_SMP) += smp.o -# Old style irq support, going to die in 2.5. -obj-$(CONFIG_NEW_IRQ) += irq.o -obj-$(CONFIG_ROTTEN_IRQ) += old-irq.o obj-$(CONFIG_I8259) += i8259.o - -# transition from old time.c to new time.c -# some boards uses old-time.c, some use time.c, and some use their own ones -obj-$(CONFIG_OLD_TIME_C) += old-time.o -obj-$(CONFIG_NEW_TIME_C) += time.o +obj-$(CONFIG_IRQ_CPU) += irq_cpu.o obj-$(CONFIG_BINFMT_IRIX) += irixelf.o irixioctl.o irixsig.o sysirix.o \ irixinv.o -obj-$(CONFIG_REMOTE_DEBUG) += gdb-low.o gdb-stub.o -obj-$(CONFIG_PCI) += pci-dma.o +obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o obj-$(CONFIG_PROC_FS) += proc.o -ifdef CONFIG_PCI -obj-$(CONFIG_NEW_PCI) += pci.o -obj-$(CONFIG_PCI_AUTO) += pci_auto.o +ifndef CONFIG_MAPPED_PCI_IO +obj-y += pci-dma.o endif + +obj-$(CONFIG_MODULES) += module.o + +EXTRA_AFLAGS := $(CFLAGS) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 6bba0684b71b..021aff38c946 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -16,7 +16,6 @@ #include <asm/inst.h> #include <asm/ptrace.h> #include <asm/uaccess.h> -#include <asm/bootinfo.h> #include <asm/processor.h> /* @@ -164,10 +163,10 @@ int __compute_return_epc(struct pt_regs *regs) * And now the FPA/cp1 branch instructions. */ case cop1_op: - if(!(mips_cpu.options & MIPS_CPU_FPU)) + if (!cpu_has_fpu) fcr31 = current->thread.fpu.soft.sr; else - asm ("cfc1\t%0,$31":"=r" (fcr31)); + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); bit = (insn.i_format.rt >> 2); bit += (bit != 0); bit += 23; diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c new file mode 100644 index 000000000000..beba0d1e82f7 --- /dev/null +++ b/arch/mips/kernel/cpu-probe.c @@ -0,0 +1,509 @@ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/stddef.h> +#include <asm/bugs.h> +#include <asm/cpu.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> + +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void) = NULL; + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); +} + +static void r39xx_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | TX39_CONF_HALT); +} + +static void r4k_wait(void) +{ + __asm__(".set\tmips3\n\t" + "wait\n\t" + ".set\tmips0"); +} + +void au1k_wait(void) +{ +#ifdef CONFIG_PM + /* using the wait instruction makes CP0 counter unusable */ + __asm__(".set\tmips3\n\t" + "wait\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + ".set\tmips0"); +#else + __asm__("nop\n\t" + "nop"); +#endif +} + +static inline void check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + printk("Checking for 'wait' instruction... "); + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + printk(" available.\n"); + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + printk(" available.\n"); + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_NEVADA: + case CPU_RM7000: + case CPU_TX49XX: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: +/* case CPU_20KC:*/ + cpu_wait = r4k_wait; + printk(" available.\n"); + break; + case CPU_AU1000: + case CPU_AU1100: + case CPU_AU1500: + cpu_wait = au1k_wait; + printk(" available.\n"); + break; + default: + printk(" unavailable.\n"); + break; + } +} + +void __init check_bugs(void) +{ + check_wait(); +} + +/* + * Probe whether cpu has config register by trying to play with + * alternate cache bit and see whether it matters. + * It's used by cpu_probe to distinguish between R3000A and R3081. + */ +static inline int cpu_has_confreg(void) +{ +#ifdef CONFIG_CPU_R3000 + extern unsigned long r3k_cache_size(unsigned long); + unsigned long size1, size2; + unsigned long cfg = read_c0_conf(); + + size1 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg ^ R30XX_CONF_AC); + size2 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg); + return size1 != size2; +#else + return 0; +#endif +} + +/* + * Get the FPU Implementation/Revision. + */ +static inline unsigned long cpu_get_fpu_id(void) +{ + unsigned long tmp, fpu_id; + + tmp = read_c0_status(); + __enable_fpu(); + fpu_id = read_32bit_cp1_register(CP1_REVISION); + write_c0_status(tmp); + return fpu_id; +} + +/* + * Check the CPU has an FPU the official way. + */ +static inline int __cpu_has_fpu(void) +{ + return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); +} + +#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4KTLB \ + | MIPS_CPU_COUNTER | MIPS_CPU_CACHE_CDEX) + +__init void cpu_probe(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned long config0 = read_c0_config(); + unsigned long config1; + + c->processor_id = PRID_IMP_UNKNOWN; + c->fpu_id = FPIR_IMP_NONE; + c->cputype = CPU_UNKNOWN; + + if (config0 & (1 << 31)) { + /* MIPS32 or MIPS64 compliant CPU. Read Config 1 register. */ + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | + MIPS_CPU_LLSC; + config1 = read_c0_config1(); + if (config1 & (1 << 3)) + c->options |= MIPS_CPU_WATCH; + if (config1 & (1 << 2)) + c->options |= MIPS_CPU_MIPS16; + if (config1 & (1 << 1)) + c->options |= MIPS_CPU_EJTAG; + if (config1 & 1) { + c->options |= MIPS_CPU_FPU; + c->options |= MIPS_CPU_32FPR; + } + c->scache.flags = MIPS_CACHE_NOT_PRESENT; + + c->tlbsize = ((config1 >> 25) & 0x3f) + 1; + } + + c->processor_id = read_c0_prid(); + switch (c->processor_id & 0xff0000) { + case PRID_COMP_LEGACY: + switch (c->processor_id & 0xff00) { + case PRID_IMP_R2000: + c->cputype = CPU_R2000; + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX | + MIPS_CPU_LLSC; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_IMP_R3000: + if ((c->processor_id & 0xff) == PRID_REV_R3000A) + if (cpu_has_confreg()) + c->cputype = CPU_R3081E; + else + c->cputype = CPU_R3000A; + else + c->cputype = CPU_R3000; + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX | + MIPS_CPU_LLSC; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_IMP_R4000: + if ((c->processor_id & 0xff) >= PRID_REV_R4400) + c->cputype = CPU_R4400SC; + else + c->cputype = CPU_R4000SC; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_VCE | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_VR41XX: + switch (c->processor_id & 0xf0) { +#ifndef CONFIG_VR4181 + case PRID_REV_VR4111: + c->cputype = CPU_VR4111; + break; +#else + case PRID_REV_VR4181: + c->cputype = CPU_VR4181; + break; +#endif + case PRID_REV_VR4121: + c->cputype = CPU_VR4121; + break; + case PRID_REV_VR4122: + if ((c->processor_id & 0xf) < 0x3) + c->cputype = CPU_VR4122; + else + c->cputype = CPU_VR4181A; + break; + case PRID_REV_VR4131: + c->cputype = CPU_VR4131; + break; + default: + printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); + c->cputype = CPU_VR41XX; + break; + } + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS; + c->tlbsize = 32; + break; + case PRID_IMP_R4300: + c->cputype = CPU_R4300; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_R4600: + c->cputype = CPU_R4600; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + #if 0 + case PRID_IMP_R4650: + /* + * This processor doesn't have an MMU, so it's not + * "real easy" to run Linux on it. It is left purely + * for documentation. Commented out because it shares + * it's c0_prid id number with the TX3900. + */ + c->cputype = CPU_R4650; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + #endif + case PRID_IMP_TX39: + c->isa_level = MIPS_CPU_ISA_I; + c->options = MIPS_CPU_TLB; + + if ((c->processor_id & 0xf0) == + (PRID_REV_TX3927 & 0xf0)) { + c->cputype = CPU_TX3927; + c->tlbsize = 64; + } else { + switch (c->processor_id & 0xff) { + case PRID_REV_TX3912: + c->cputype = CPU_TX3912; + c->tlbsize = 32; + break; + case PRID_REV_TX3922: + c->cputype = CPU_TX3922; + c->tlbsize = 64; + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + } + break; + case PRID_IMP_R4700: + c->cputype = CPU_R4700; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_TX49: + c->cputype = CPU_TX49XX; + c->isa_level = MIPS_CPU_ISA_III; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R5000: + c->cputype = CPU_R5000; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R5432: + c->cputype = CPU_R5432; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R5500: + c->cputype = CPU_R5500; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_WATCH | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_NEVADA: + c->cputype = CPU_NEVADA; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_DIVEC | MIPS_CPU_LLSC; + c->tlbsize = 48; + break; + case PRID_IMP_R6000: + c->cputype = CPU_R6000; + c->isa_level = MIPS_CPU_ISA_II; + c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_R6000A: + c->cputype = CPU_R6000A; + c->isa_level = MIPS_CPU_ISA_II; + c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | + MIPS_CPU_LLSC; + c->tlbsize = 32; + break; + case PRID_IMP_RM7000: + c->cputype = CPU_RM7000; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + /* + * Undocumented RM7000: Bit 29 in the info register of + * the RM7000 v2.0 indicates if the TLB has 48 or 64 + * entries. + * + * 29 1 => 64 entry JTLB + * 0 => 48 entry JTLB + */ + c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; + break; + case PRID_IMP_R8000: + c->cputype = CPU_R8000; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_LLSC; + c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ + break; + case PRID_IMP_R10000: + c->cputype = CPU_R10000; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | + MIPS_CPU_LLSC; + c->tlbsize = 64; + break; + case PRID_IMP_R12000: + c->cputype = CPU_R12000; + c->isa_level = MIPS_CPU_ISA_IV; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_FPU | MIPS_CPU_32FPR | + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | + MIPS_CPU_LLSC; + c->tlbsize = 64; + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + break; + case PRID_COMP_MIPS: + switch (c->processor_id & 0xff00) { + case PRID_IMP_4KC: + c->cputype = CPU_4KC; + c->isa_level = MIPS_CPU_ISA_M32; + break; + case PRID_IMP_4KEC: + c->cputype = CPU_4KEC; + c->isa_level = MIPS_CPU_ISA_M32; + break; + case PRID_IMP_4KSC: + c->cputype = CPU_4KSC; + c->isa_level = MIPS_CPU_ISA_M32; + break; + case PRID_IMP_5KC: + c->cputype = CPU_5KC; + c->isa_level = MIPS_CPU_ISA_M64; + break; + case PRID_IMP_20KC: + c->cputype = CPU_20KC; + c->isa_level = MIPS_CPU_ISA_M64; + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + break; + case PRID_COMP_ALCHEMY: + switch (c->processor_id & 0xff00) { + case PRID_IMP_AU1_REV1: + case PRID_IMP_AU1_REV2: + switch ((c->processor_id >> 24) & 0xff) { + case 0: + c->cputype = CPU_AU1000; + break; + case 1: + c->cputype = CPU_AU1500; + break; + case 2: + c->cputype = CPU_AU1100; + break; + default: + panic("Unknown Au Core!"); + break; + } + c->isa_level = MIPS_CPU_ISA_M32; + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + break; + case PRID_COMP_SIBYTE: + switch (c->processor_id & 0xff00) { + case PRID_IMP_SB1: + c->cputype = CPU_SB1; + c->isa_level = MIPS_CPU_ISA_M64; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | + MIPS_CPU_MCHECK | MIPS_CPU_EJTAG | + MIPS_CPU_WATCH | MIPS_CPU_LLSC; +#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS + /* FPU in pass1 is known to have issues. */ + c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; +#endif + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + break; + + case PRID_COMP_SANDCRAFT: + switch (c->processor_id & 0xff00) { + case PRID_IMP_SR71000: + c->cputype = CPU_SR71000; + c->isa_level = MIPS_CPU_ISA_M64; + c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | + MIPS_CPU_4KTLB | MIPS_CPU_FPU | + MIPS_CPU_COUNTER | MIPS_CPU_MCHECK; + c->scache.ways = 8; + c->tlbsize = 64; + break; + default: + c->cputype = CPU_UNKNOWN; + break; + } + break; + default: + c->cputype = CPU_UNKNOWN; + c->tlbsize = ((config1 >> 25) & 0x3f) + 1; + } + if (c->options & MIPS_CPU_FPU) + c->fpu_id = cpu_get_fpu_id(); +} + +__init void cpu_report(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + printk("CPU revision is: %08x\n", c->processor_id); + if (c->options & MIPS_CPU_FPU) + printk("FPU revision is: %08x\n", c->fpu_id); +} diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 3d816c8bc385..ddc3eb8e8109 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -1,5 +1,4 @@ /* -/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -9,79 +8,155 @@ * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/config.h> +#include <linux/init.h> #include <linux/sys.h> #include <asm/addrspace.h> #include <asm/asm.h> -#include <asm/current.h> +#include <asm/cacheops.h> #include <asm/errno.h> #include <asm/mipsregs.h> #include <asm/page.h> -#include <asm/pgtable.h> +#include <asm/pgtable-bits.h> +#include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/processor.h> -#include <asm/regdef.h> #include <asm/fpregdef.h> #include <asm/unistd.h> #include <asm/isadep.h> +#include <asm/thread_info.h> -/* This duplicates the definition from <linux/sched.h> */ -#define PT_TRACESYS 0x00000002 /* tracing system calls */ +#ifdef CONFIG_PREEMPT + .macro preempt_stop + cli + .endm + .macro init_ret_intr temp + mfc0 t0, CP0_STATUS # cli + ori t0, t0, 1 + xori t0, t0, 1 + mtc0 t0, CP0_STATUS + SSNOP; SSNOP; SSNOP + + lw \temp, TI_PRE_COUNT($28) + subu \temp, \temp, 1 + sw \temp, TI_PRE_COUNT($28) + .endm +#else + .macro preempt_stop + .endm + + .macro init_ret_intr + .endm + +#define resume_kernel restore_all +#endif .text - .align 4 + .align 5 .set push .set reorder -EXPORT(ret_from_fork) - move a0, v0 # prev - jal schedule_tail -#error lw t0, TASK_PTRACE($28) # syscall tracing enabled? - andi t0, PT_TRACESYS - bnez t0, tracesys_exit - j ret_from_sys_call - -tracesys_exit: jal syscall_trace - b ret_from_sys_call - -EXPORT(ret_from_irq) -EXPORT(ret_from_exception) +FEXPORT(ret_from_irq) +FEXPORT(ret_from_exception) lw t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER - bnez t0, ret_from_sys_call + beqz t0, resume_kernel + +FEXPORT(resume_userspace) + mfc0 t0, CP0_STATUS # make sure we dont miss an + ori t0, t0, 1 # interrupt setting need_resched + xori t0, t0, 1 # between sampling and return + mtc0 t0, CP0_STATUS + SSNOP; SSNOP; SSNOP + + LONG_L a2, TI_FLAGS($28) + andi a2, _TIF_WORK_MASK # current->work (ignoring + # syscall_trace) + bnez a2, work_pending j restore_all -reschedule: jal schedule +#ifdef CONFIG_PREEMPT +ENTRY(resume_kernel) + lw t0, TI_PRE_COUNT($28) + bnez t0, restore_all + LONG_L t0, TI_FLAGS($28) + andi t1, t0, _TIF_NEED_RESCHED + beqz restore_all +#ifdef CONFIG_SMP + lw t0, TI_CPU($28) + la t1, irq_stat + sll t0, 5 # *sizeof(irq_cpustat_t) + addu t0, t1 + lw t1, local_bh_count(t0) + addl t0, local_irq_count(t0) +#else + lw t1, irq_stat+local_bh_count + addl t0, irq_stat+local_irq_count +#endif + addu t0, t1 + bnez t0, restore_all + lw t0, TI_PRE_COUNT($28) + addiu t0, 1 + sw t0, TI_PRE_COUNT($28) + sti + movl t0, TI_TASK($28) # ti->task + sw zero, TASK_STATE(t0) # current->state = TASK_RUNNING + jal schedule + j ret_from_intr +#endif -EXPORT(ret_from_sys_call) - .type ret_from_irq,@function +FEXPORT(ret_from_fork) + jal schedule_tail - mfc0 t0, CP0_STATUS # need_resched and signals atomic test - ori t0, t0, 1 - xori t0, t0, 1 +FEXPORT(syscall_exit) + mfc0 t0, CP0_STATUS # make sure need_resched and + ori t0, t0, 1 # signals dont change between + xori t0, t0, 1 # sampling and return mtc0 t0, CP0_STATUS - nop; nop; nop + SSNOP; SSNOP; SSNOP -#error lw v0, TASK_NEED_RESCHED($28) -#error lw v1, TASK_SIGPENDING($28) - bnez v0, reschedule - bnez v1, signal_return -restore_all: .set noat + LONG_L a2, TI_FLAGS($28) # current->work + bnez a2, syscall_exit_work + +FEXPORT(restore_all) + .set noat RESTORE_ALL_AND_RET .set at -/* Put this behind restore_all for the sake of the branch prediction. */ -signal_return: - .type signal_return, @function +FEXPORT(work_pending) + andi t0, a2, _TIF_NEED_RESCHED + bnez t0, work_notifysig +work_resched: + jal schedule - mfc0 t0, CP0_STATUS - ori t0, t0, 1 + mfc0 t0, CP0_STATUS # make sure need_resched and + ori t0, t0, 1 # signals dont change between + xori t0, t0, 1 # sampling and return mtc0 t0, CP0_STATUS + SSNOP; SSNOP; SSNOP + + LONG_L a2, TI_FLAGS($28) + andi a2, _TIF_WORK_MASK # is there any work to be done + # other than syscall tracing? + beqz a2, restore_all + andi t0, a2, _TIF_NEED_RESCHED + bnez t0, work_resched - move a0, zero - move a1, sp -#error jal do_signal - b restore_all +work_notifysig: # deal with pending signals and + # notify-resume requests + move a0, sp + li a1, 0 + jal do_notify_resume # a2 already loaded + j restore_all + +FEXPORT(syscall_exit_work) + LONG_L t0, TI_FLAGS($28) + bgez t0, work_pending # trace bit is set + mfc0 t0, CP0_STATUS # could let do_syscall_trace() + ori t0, t0, 1 # call schedule() instead + mtc0 t0, CP0_STATUS + jal do_syscall_trace + b resume_userspace /* * Common spurious interrupt handler. @@ -93,25 +168,105 @@ LEAF(spurious_interrupt) * Someone tried to fool us by sending an interrupt but we * couldn't find a cause for it. */ - lui t1,%hi(spurious_count) - .set reorder - lw t0,%lo(spurious_count)(t1) - .set noreorder + lui t1,%hi(irq_err_count) + lw t0,%lo(irq_err_count)(t1) addiu t0,1 - sw t0,%lo(spurious_count)(t1) + sw t0,%lo(irq_err_count)(t1) j ret_from_irq END(spurious_interrupt) + __INIT + + .set reorder + + NESTED(except_vec1_generic, 0, sp) + PANIC("Exception vector 1 called") + END(except_vec1_generic) + + /* + * General exception vector. Used for all CPUs except R4000 + * and R4400 SC and MC versions. + */ + NESTED(except_vec3_generic, 0, sp) +#if R5432_CP0_INTERRUPT_WAR + mfc0 k0, CP0_INDEX +#endif + mfc0 k1, CP0_CAUSE + la k0, exception_handlers + andi k1, k1, 0x7c + addu k0, k0, k1 + lw k0, (k0) + jr k0 + END(except_vec3_generic) + .set at + + /* General exception vector R4000 version. */ + NESTED(except_vec3_r4000, 0, sp) + .set push + .set mips3 + .set noat + mfc0 k1, CP0_CAUSE + li k0, 31<<2 + andi k1, k1, 0x7c + .set noreorder + beq k1, k0, handle_vced + li k0, 14<<2 + beq k1, k0, handle_vcei + lui k0, %hi(exception_handlers) + addiu k0, %lo(exception_handlers) + .set reorder + addu k0, k0, k1 + lw k0, (k0) + jr k0 + + /* + * Big shit, we now may have two dirty primary cache lines for + * the same physical address. We can savely invalidate the + * line pointed to by c0_badvaddr because after return from + * this exception handler the load / store will be re-executed. + */ +handle_vced: + mfc0 k0, CP0_BADVADDR + li k1, -4 + and k0, k1 + mtc0 zero, CP0_TAGLO + cache Index_Store_Tag_D,(k0) + cache Hit_Writeback_Inv_SD,(k0) +#ifdef CONFIG_PROC_FS + lui k0, %hi(vced_count) + lw k1, %lo(vced_count)(k0) + addiu k1, 1 + sw k1, %lo(vced_count)(k0) +#endif + eret + +handle_vcei: + mfc0 k0, CP0_BADVADDR + cache Hit_Writeback_Inv_SD, (k0) # also cleans pi +#ifdef CONFIG_PROC_FS + lui k0, %hi(vcei_count) + lw k1, %lo(vcei_count)(k0) + addiu k1, 1 + sw k1, %lo(vcei_count)(k0) +#endif + eret + .set pop + END(except_vec3_r4000) + + __FINIT + /* * Build a default exception handler for the exceptions that don't need * special handlers. If you didn't know yet - I *like* playing games with * the C preprocessor ... */ #define __BUILD_clear_none(exception) -#define __BUILD_clear_sti(exception) \ +#define __BUILD_clear_sti(exception) \ STI -#define __BUILD_clear_cli(exception) \ +#define __BUILD_clear_cli(exception) \ CLI +#define __BUILD_clear_kmode(exception) \ + KMODE #define __BUILD_clear_fpe(exception) \ cfc1 a1,fcr31; \ li a2,~(0x3f<<12); \ @@ -148,6 +303,7 @@ EXPORT(exception_count_##exception); \ NESTED(handle_##exception, PT_SIZE, sp); \ .set noat; \ SAVE_ALL; \ + FEXPORT(handle_##exception##_int); \ __BUILD_clear_##clear(exception); \ .set at; \ __BUILD_##verbose(exception); \ @@ -159,16 +315,18 @@ EXPORT(exception_count_##exception); \ BUILD_HANDLER(adel,ade,ade,silent) /* #4 */ BUILD_HANDLER(ades,ade,ade,silent) /* #5 */ - BUILD_HANDLER(ibe,ibe,cli,verbose) /* #6 */ - BUILD_HANDLER(dbe,dbe,cli,silent) /* #7 */ - BUILD_HANDLER(bp,bp,sti,silent) /* #9 */ - BUILD_HANDLER(ri,ri,sti,silent) /* #10 */ - BUILD_HANDLER(cpu,cpu,sti,silent) /* #11 */ - BUILD_HANDLER(ov,ov,sti,silent) /* #12 */ - BUILD_HANDLER(tr,tr,sti,silent) /* #13 */ + BUILD_HANDLER(ibe,be,cli,silent) /* #6 */ + BUILD_HANDLER(dbe,be,cli,silent) /* #7 */ + BUILD_HANDLER(bp,bp,kmode,silent) /* #9 */ + BUILD_HANDLER(ri,ri,kmode,silent) /* #10 */ + BUILD_HANDLER(cpu,cpu,kmode,silent) /* #11 */ + BUILD_HANDLER(ov,ov,kmode,silent) /* #12 */ + BUILD_HANDLER(tr,tr,kmode,silent) /* #13 */ BUILD_HANDLER(fpe,fpe,fpe,silent) /* #15 */ - BUILD_HANDLER(watch,watch,sti,verbose) /* #23 */ - BUILD_HANDLER(reserved,reserved,sti,verbose) /* others */ + BUILD_HANDLER(mdmx,mdmx,sti,silent) /* #22 */ + BUILD_HANDLER(watch,watch,sti,silent) /* #23 */ + BUILD_HANDLER(mcheck,mcheck,cli,silent) /* #24 */ + BUILD_HANDLER(reserved,reserved,kmode,silent) /* others */ .set pop diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 221eeb254e6b..c189294c9c4b 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S @@ -14,6 +14,16 @@ #include <asm/gdb-stub.h> /* + * [jsun] We reserves about 2x GDB_FR_SIZE in stack. The lower (addressed) + * part is used to store registers and passed to exception handler. + * The upper part is reserved for "call func" feature where gdb client + * saves some of the regs, setups call frame and passes args. + * + * A trace shows about 200 bytes are used to store about half of all regs. + * The rest should be big enough for frame setup and passing args. + */ + +/* * The low level trap handler */ .align 5 @@ -38,7 +48,7 @@ nop 1: move k0,sp - subu sp,k1,GDB_FR_SIZE + subu sp,k1,GDB_FR_SIZE*2 # see comment above sw k0,GDB_FR_REG29(sp) sw v0,GDB_FR_REG2(sp) @@ -97,7 +107,7 @@ sw ra,GDB_FR_REG31(sp) CLI /* disable interrupts */ - + /* * Followed by the floating point registers */ @@ -145,9 +155,9 @@ * FPU control registers */ - mfc1 v0,CP1_STATUS + cfc1 v0,CP1_STATUS sw v0,GDB_FR_FSR(sp) - mfc1 v0,CP1_REVISION + cfc1 v0,CP1_REVISION sw v0,GDB_FR_FIR(sp) /* @@ -211,7 +221,7 @@ lw v0,GDB_FR_CP0_CONTEXT(sp) mtc0 v1,CP0_INDEX mtc0 v0,CP0_CONTEXT - + /* * Next, the floating point registers @@ -304,7 +314,7 @@ lw v1,GDB_FR_REG3(sp) lw v0,GDB_FR_REG2(sp) lw $1,GDB_FR_REG1(sp) -#ifdef CONFIG_CPU_R3000 +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) lw k0, GDB_FR_EPC(sp) lw sp, GDB_FR_REG29(sp) /* Deallocate stack */ jr k0 @@ -321,11 +331,7 @@ END(trap_low) LEAF(kgdb_read_byte) - .set push - .set noreorder - .set nomacro 4: lb t0, (a0) - .set pop sb t0, (a1) li v0, 0 jr ra @@ -335,11 +341,7 @@ LEAF(kgdb_read_byte) END(kgdb_read_byte) LEAF(kgdb_write_byte) - .set push - .set noreorder - .set nomacro 5: sb a0, (a1) - .set pop li v0, 0 jr ra .section __ex_table,"a" @@ -349,6 +351,7 @@ LEAF(kgdb_write_byte) .type kgdbfault@function .ent kgdbfault + kgdbfault: li v0, -EFAULT jr ra .end kgdbfault diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index e279dc48ca8a..a3e698849da9 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c @@ -11,8 +11,6 @@ * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de> * * Copyright (C) 1995 Andreas Busse - * - * $Id: gdb-stub.c,v 1.6 1999/05/01 22:40:35 ralf Exp $ */ /* @@ -64,7 +62,7 @@ * Host: Reply: * $m0,10#2a +$00010203040506070809101112131415#42 * - * + * * ============== * MORE EXAMPLES: * ============== @@ -74,11 +72,11 @@ * going. In this scenario the host machine was a PC and the * target platform was a Galileo EVB64120A MIPS evaluation * board. - * + * * Step 1: * First download gdb-5.0.tar.gz from the internet. * and then build/install the package. - * + * * Example: * $ tar zxf gdb-5.0.tar.gz * $ cd gdb-5.0 @@ -87,40 +85,39 @@ * $ install * $ which mips-linux-elf-gdb * /usr/local/bin/mips-linux-elf-gdb - * + * * Step 2: * Configure linux for remote debugging and build it. - * + * * Example: * $ cd ~/linux * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging> * $ make dep; make vmlinux - * + * * Step 3: * Download the kernel to the remote target and start - * the kernel running. It will promptly halt and wait + * the kernel running. It will promptly halt and wait * for the host gdb session to connect. It does this - * since the "Kernel Hacking" option has defined - * CONFIG_REMOTE_DEBUG which in turn enables your calls + * since the "Kernel Hacking" option has defined + * CONFIG_KGDB which in turn enables your calls * to: * set_debug_traps(); * breakpoint(); - * + * * Step 4: * Start the gdb session on the host. - * + * * Example: * $ mips-linux-elf-gdb vmlinux * (gdb) set remotebaud 115200 * (gdb) target remote /dev/ttyS1 - * ...at this point you are connected to + * ...at this point you are connected to * the remote target and can use gdb - * in the normal fasion. Setting + * in the normal fasion. Setting * breakpoints, single stepping, * printing variables, etc. - * */ - +#include <linux/config.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/signal.h> @@ -128,6 +125,8 @@ #include <linux/mm.h> #include <linux/console.h> #include <linux/init.h> +#include <linux/slab.h> +#include <linux/reboot.h> #include <asm/asm.h> #include <asm/mipsregs.h> @@ -176,8 +175,8 @@ static const char hexchars[]="0123456789abcdef"; /* Used to prevent crashes in memory access. Note that they'll crash anyway if we haven't set up fault handlers yet... */ -int kgdb_read_byte(unsigned *address, unsigned *dest); -int kgdb_write_byte(unsigned val, unsigned *dest); +int kgdb_read_byte(unsigned char *address, unsigned char *dest); +int kgdb_write_byte(unsigned char val, unsigned char *dest); /* * Convert ch from a hex digit to an int @@ -214,7 +213,7 @@ static void getpacket(char *buffer) checksum = 0; xmitcsum = -1; count = 0; - + /* * now, read until a # or end of buffer is found */ @@ -376,10 +375,10 @@ void set_debug_traps(void) unsigned long flags; unsigned char c; - save_and_cli(flags); + local_irq_save(flags); for (ht = hard_trap_info; ht->tt && ht->signo; ht++) saved_vectors[ht->tt] = set_except_vector(ht->tt, trap_low); - + putDebugChar('+'); /* 'hello world' */ /* * In case GDB is started before us, ack any packets @@ -392,7 +391,7 @@ void set_debug_traps(void) putDebugChar('+'); /* ack it */ initialized = 1; - restore_flags(flags); + local_irq_restore(flags); } /* @@ -548,7 +547,7 @@ static void single_step(struct gdb_regs *regs) targ += 4 + (insn.i_format.simmediate << 2); break; } - + if (is_branch) { i = 0; if (is_cond && targ != (regs->cp0_epc + 8)) { @@ -568,7 +567,7 @@ static void single_step(struct gdb_regs *regs) /* * If asynchronously interrupted by gdb, then we need to set a breakpoint - * at the interrupted instruction so that we wind up stopped with a + * at the interrupted instruction so that we wind up stopped with a * reasonable stack frame. */ static struct gdb_bp_save async_bp; @@ -578,7 +577,7 @@ void set_async_breakpoint(unsigned int epc) async_bp.addr = epc; async_bp.val = *(unsigned *)epc; *(unsigned *)epc = BP; - flush_cache_all(); + __flush_cache_all(); } @@ -596,31 +595,11 @@ void handle_exception (struct gdb_regs *regs) char *ptr; unsigned long *stack; -#if 0 - printk("in handle_exception()\n"); - show_gdbregs(regs); -#endif - - /* - * First check trap type. If this is CPU_UNUSABLE and CPU_ID is 1, - * the simply switch the FPU on and return since this is no error - * condition. kernel/traps.c does the same. - * FIXME: This doesn't work yet, so we don't catch CPU_UNUSABLE - * traps for now. - */ - trap = (regs->cp0_cause & 0x7c) >> 2; -/* printk("trap=%d\n",trap); */ - if (trap == 11) { - if (((regs->cp0_cause >> CAUSEB_CE) & 3) == 1) { - regs->cp0_status |= ST0_CU1; - return; - } - } - /* * If we're in breakpoint() increment the PC */ - if (trap == 9 && regs->cp0_epc == (unsigned long)breakinst) + trap = (regs->cp0_cause & 0x7c) >> 2; + if (trap == 9 && regs->cp0_epc == (unsigned long)breakinst) regs->cp0_epc += 4; /* @@ -630,7 +609,7 @@ void handle_exception (struct gdb_regs *regs) if (step_bp[0].addr) { *(unsigned *)step_bp[0].addr = step_bp[0].val; step_bp[0].addr = 0; - + if (step_bp[1].addr) { *(unsigned *)step_bp[1].addr = step_bp[1].val; step_bp[1].addr = 0; @@ -708,6 +687,11 @@ void handle_exception (struct gdb_regs *regs) output_buffer[3] = 0; break; + case 'D': + /* detach; let CPU run */ + putpacket(output_buffer); + return; + case 'd': /* toggle debug flag */ break; @@ -724,29 +708,24 @@ void handle_exception (struct gdb_regs *regs) ptr = mem2hex((char *)®s->frame_ptr, ptr, 2*4, 0); /* frp */ ptr = mem2hex((char *)®s->cp0_index, ptr, 16*4, 0); /* cp0 */ break; - + /* * set the value of the CPU registers - return OK - * FIXME: Needs to be written */ case 'G': { -#if 0 - unsigned long *newsp, psr; - ptr = &input_buffer[1]; - hex2mem(ptr, (char *)registers, 16 * 4, 0); /* G & O regs */ - - /* - * See if the stack pointer has moved. If so, then copy the - * saved locals and ins to the new location. - */ - - newsp = (unsigned long *)registers[SP]; - if (sp != newsp) - sp = memcpy(newsp, sp, 16 * 4); - -#endif + hex2mem(ptr, (char *)®s->reg0, 32*4, 0); + ptr += 32*8; + hex2mem(ptr, (char *)®s->cp0_status, 6*4, 0); + ptr += 6*8; + hex2mem(ptr, (char *)®s->fpr0, 32*4, 0); + ptr += 32*8; + hex2mem(ptr, (char *)®s->cp1_fsr, 2*4, 0); + ptr += 2*8; + hex2mem(ptr, (char *)®s->frame_ptr, 2*4, 0); + ptr += 2*8; + hex2mem(ptr, (char *)®s->cp0_index, 16*4, 0); strcpy(output_buffer,"OK"); } break; @@ -770,7 +749,7 @@ void handle_exception (struct gdb_regs *regs) /* * MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */ - case 'M': + case 'M': ptr = &input_buffer[1]; if (hexToInt(&ptr, &addr) @@ -789,13 +768,13 @@ void handle_exception (struct gdb_regs *regs) /* * cAA..AA Continue at address AA..AA(optional) */ - case 'c': + case 'c': /* try to read optional parameter, pc unchanged if no parm */ ptr = &input_buffer[1]; if (hexToInt(&ptr, &addr)) regs->cp0_epc = addr; - + /* * Need to flush the instruction cache here, as we may * have deposited a breakpoint, and the icache probably @@ -805,26 +784,21 @@ void handle_exception (struct gdb_regs *regs) * NB: We flush both caches, just to be sure... */ - flush_cache_all(); + __flush_cache_all(); return; /* NOTREACHED */ break; /* - * kill the program - */ - case 'k' : - break; /* do nothing */ - - - /* - * Reset the whole machine (FIXME: system dependent) + * kill the program; let us try to restart the machine + * Reset the whole machine. */ + case 'k': case 'r': + machine_restart("kgdb restarts machine"); break; - /* * Step to next instruction */ @@ -834,7 +808,7 @@ void handle_exception (struct gdb_regs *regs) * use breakpoints and continue, instead. */ single_step(regs); - flush_cache_all(); + __flush_cache_all(); return; /* NOTREACHED */ @@ -844,7 +818,7 @@ void handle_exception (struct gdb_regs *regs) */ case 'b': { -#if 0 +#if 0 int baudrate; extern void set_timer_3(); @@ -904,30 +878,43 @@ void breakpoint(void) if (!initialized) return; - __asm__ __volatile__(" - .globl breakinst - .set noreorder - nop -breakinst: break - nop - .set reorder - "); + __asm__ __volatile__( + ".globl breakinst\n\t" + ".set\tnoreorder\n\t" + "nop\n\t" + "breakinst:\tbreak\n\t" + "nop\n\t" + ".set\treorder" + ); } void adel(void) { - __asm__ __volatile__(" - .globl adel - la $8,0x80000001 - lw $9,0($8) - "); + __asm__ __volatile__( + ".globl\tadel\n\t" + "la\t$8,0x80000001\n\t" + "lw\t$9,0($8)\n\t" + ); +} + +/* + * malloc is needed by gdb client in "call func()", even a private one + * will make gdb happy + */ +static void *malloc(size_t size) +{ + return kmalloc(size, GFP_ATOMIC); +} + +static void free(void *where) +{ + kfree(where); } #ifdef CONFIG_GDB_CONSOLE -void gdb_puts(const char *str) +void gdb_putsn(const char *str, int l) { - int l = strlen(str); char outbuf[18]; outbuf[0]='O'; @@ -936,7 +923,7 @@ void gdb_puts(const char *str) int i = (l>8)?8:l; mem2hex((char *)str, &outbuf[1], i, 0); outbuf[(i*2)+1]=0; - putpacket(outbuf); + putpacket(outbuf); str += i; l -= i; } @@ -944,7 +931,7 @@ void gdb_puts(const char *str) static void gdb_console_write(struct console *con, const char *s, unsigned n) { - gdb_puts(s); + gdb_putsn(s, n); } static struct console gdb_console = { @@ -958,5 +945,5 @@ __init void register_gdb_console(void) { register_console(&gdb_console); } - + #endif diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 3714680d6d44..d801659903c9 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -1,6 +1,4 @@ /* - * arch/mips/kernel/head.S - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -13,550 +11,224 @@ * Further modifications by David S. Miller and Harald Koerfgen * Copyright (C) 1999 Silicon Graphics, Inc. * - * Head.S contains the MIPS exception handler and startup code. - * - ************************************************************************** - * 9 Nov, 2000. - * Added Cache Error exception handler and SBDDP EJTAG debug exception. - * - * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - ************************************************************************** + * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include <linux/config.h> +#include <linux/init.h> #include <linux/threads.h> #include <asm/asm.h> -#include <asm/cacheops.h> -#include <asm/current.h> #include <asm/offset.h> +#include <asm/pgtable-bits.h> #include <asm/processor.h> #include <asm/regdef.h> #include <asm/cachectl.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> -#include <asm/bootinfo.h> - - .text - /* - * Reserved space for exception handlers. - * Necessary for machines which link their kernels at KSEG0. - * FIXME: Use the initcode feature to get rid of unused handler - * variants. - */ - .fill 0x280 -/* - * This is space for the interrupt handlers. - * After trap_init() they are located at virtual address KSEG0. - * - * These handlers much be written in a relocatable manner - * because based upon the cpu type an arbitrary one of the - * following pieces of code will be copied to the KSEG0 - * vector location. - */ - /* TLB refill, EXL == 0, R4xx0, non-R4600 version */ - .set noreorder - .set noat - LEAF(except_vec0_r4000) - .set mips3 -#ifdef CONFIG_SMP - mfc0 k1, CP0_CONTEXT - la k0, current_pgd - srl k1, 23 - sll k1, 2 - addu k1, k0, k1 - lw k1, (k1) -#else - lw k1, current_pgd # get pgd pointer -#endif - mfc0 k0, CP0_BADVADDR # Get faulting address - srl k0, k0, 22 # get pgd only bits - - sll k0, k0, 2 - addu k1, k1, k0 # add in pgd offset - mfc0 k0, CP0_CONTEXT # get context reg - lw k1, (k1) -#if defined(CONFIG_CPU_VR41XX) - srl k0, k0, 3 # get pte offset -#else - srl k0, k0, 1 # get pte offset -#endif - and k0, k0, 0xff8 - addu k1, k1, k0 # add in offset - lw k0, 0(k1) # get even pte - lw k1, 4(k1) # get odd pte - srl k0, k0, 6 # convert to entrylo0 - mtc0 k0, CP0_ENTRYLO0 # load it - srl k1, k1, 6 # convert to entrylo1 - mtc0 k1, CP0_ENTRYLO1 # load it - b 1f - tlbwr # write random tlb entry -1: - nop - eret # return from trap - END(except_vec0_r4000) - - /* TLB refill, EXL == 0, R4600 version */ - LEAF(except_vec0_r4600) - .set mips3 - mfc0 k0, CP0_BADVADDR - srl k0, k0, 22 - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - srl k0, k0, 1 - and k0, k0, 0xff8 - addu k1, k1, k0 - lw k0, 0(k1) - lw k1, 4(k1) - srl k0, k0, 6 - mtc0 k0, CP0_ENTRYLO0 - srl k1, k1, 6 - mtc0 k1, CP0_ENTRYLO1 - nop - tlbwr - nop - eret - END(except_vec0_r4600) - - /* TLB refill, EXL == 0, R52x0 "Nevada" version */ - /* - * This version has a bug workaround for the Nevada. It seems - * as if under certain circumstances the move from cp0_context - * might produce a bogus result when the mfc0 instruction and - * it's consumer are in a different cacheline or a load instruction, - * probably any memory reference, is between them. This is - * potencially slower than the R4000 version, so we use this - * special version. - */ - .set noreorder - .set noat - LEAF(except_vec0_nevada) - .set mips3 - mfc0 k0, CP0_BADVADDR # Get faulting address - srl k0, k0, 22 # get pgd only bits - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 # add in pgd offset - lw k1, (k1) - mfc0 k0, CP0_CONTEXT # get context reg - srl k0, k0, 1 # get pte offset - and k0, k0, 0xff8 - addu k1, k1, k0 # add in offset - lw k0, 0(k1) # get even pte - lw k1, 4(k1) # get odd pte - srl k0, k0, 6 # convert to entrylo0 - mtc0 k0, CP0_ENTRYLO0 # load it - srl k1, k1, 6 # convert to entrylo1 - mtc0 k1, CP0_ENTRYLO1 # load it - nop # QED specified nops - nop - tlbwr # write random tlb entry - nop # traditional nop - eret # return from trap - END(except_vec0_nevada) - - /* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */ - LEAF(except_vec0_r45k_bvahwbug) - .set mips3 - mfc0 k0, CP0_BADVADDR - srl k0, k0, 22 - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - srl k0, k0, 1 - and k0, k0, 0xff8 - addu k1, k1, k0 - lw k0, 0(k1) - lw k1, 4(k1) - nop /* XXX */ - tlbp - srl k0, k0, 6 - mtc0 k0, CP0_ENTRYLO0 - srl k1, k1, 6 - mfc0 k0, CP0_INDEX - mtc0 k1, CP0_ENTRYLO1 - bltzl k0, 1f - tlbwr -1: - nop - eret - END(except_vec0_r45k_bvahwbug) - -#ifdef CONFIG_SMP - /* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */ - LEAF(except_vec0_r4k_mphwbug) - .set mips3 - mfc0 k0, CP0_BADVADDR - srl k0, k0, 22 - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - srl k0, k0, 1 - and k0, k0, 0xff8 - addu k1, k1, k0 - lw k0, 0(k1) - lw k1, 4(k1) - nop /* XXX */ - tlbp - srl k0, k0, 6 - mtc0 k0, CP0_ENTRYLO0 - srl k1, k1, 6 - mfc0 k0, CP0_INDEX - mtc0 k1, CP0_ENTRYLO1 - bltzl k0, 1f - tlbwr -1: - nop - eret - END(except_vec0_r4k_mphwbug) -#endif - - /* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */ - LEAF(except_vec0_r4k_250MHZhwbug) - .set mips3 - mfc0 k0, CP0_BADVADDR - srl k0, k0, 22 - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - srl k0, k0, 1 - and k0, k0, 0xff8 - addu k1, k1, k0 - lw k0, 0(k1) - lw k1, 4(k1) - srl k0, k0, 6 - mtc0 zero, CP0_ENTRYLO0 - mtc0 k0, CP0_ENTRYLO0 - srl k1, k1, 6 - mtc0 zero, CP0_ENTRYLO1 - mtc0 k1, CP0_ENTRYLO1 - b 1f - tlbwr -1: - nop - eret - END(except_vec0_r4k_250MHZhwbug) - -#ifdef CONFIG_SMP - /* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */ - LEAF(except_vec0_r4k_MP250MHZhwbug) - .set mips3 - mfc0 k0, CP0_BADVADDR - srl k0, k0, 22 - lw k1, current_pgd # get pgd pointer - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - srl k0, k0, 1 - and k0, k0, 0xff8 - addu k1, k1, k0 - lw k0, 0(k1) - lw k1, 4(k1) - nop /* XXX */ - tlbp - srl k0, k0, 6 - mtc0 zero, CP0_ENTRYLO0 - mtc0 k0, CP0_ENTRYLO0 - mfc0 k0, CP0_INDEX - srl k1, k1, 6 - mtc0 zero, CP0_ENTRYLO1 - mtc0 k1, CP0_ENTRYLO1 - bltzl k0, 1f - tlbwr -1: - nop - eret - END(except_vec0_r4k_MP250MHZhwbug) -#endif - /* TLB refill, R[23]00 version */ - LEAF(except_vec0_r2300) - .set noat - .set mips1 - mfc0 k0, CP0_BADVADDR - lw k1, current_pgd # get pgd pointer - srl k0, k0, 22 - sll k0, k0, 2 - addu k1, k1, k0 - mfc0 k0, CP0_CONTEXT - lw k1, (k1) - and k0, k0, 0xffc - addu k1, k1, k0 - lw k0, (k1) - nop - mtc0 k0, CP0_ENTRYLO0 - mfc0 k1, CP0_EPC - tlbwr - jr k1 - rfe - END(except_vec0_r2300) - - - /* XTLB refill, EXL == 0, R4xx0 cpus only use this... */ - NESTED(except_vec1_generic, 0, sp) - .set noat - .set mips3 - /* Register saving is delayed as long as we don't know - * which registers really need to be saved. - */ - mfc0 k1, CP0_CONTEXT - dsra k1, 1 - lwu k0, (k1) # May cause another exception - lwu k1, 4(k1) - dsrl k0, 6 # Convert to EntryLo format - dsrl k1, 6 # Convert to EntryLo format - dmtc0 k0, CP0_ENTRYLO0 - dmtc0 k1, CP0_ENTRYLO1 - nop # Needed for R4[04]00 pipeline - tlbwr - nop # Needed for R4[04]00 pipeline - nop - nop - eret - nop /* Workaround for R4000 bug. */ - eret - END(except_vec1_generic) - - /* Cache Error */ - LEAF(except_vec2_generic) - .set noat - .set mips0 - /* - * This is a very bad place to be. Our cache error - * detection has triggered. If we have write-back data - * in the cache, we may not be able to recover. As a - * first-order desperate measure, turn off KSEG0 cacheing. - */ - mfc0 k0,CP0_CONFIG - li k1,~CONF_CM_CMASK - and k0,k0,k1 - ori k0,k0,CONF_CM_UNCACHED - mtc0 k0,CP0_CONFIG - /* Give it a few cycles to sink in... */ - nop - nop - nop - - j cache_parity_error - nop - END(except_vec2_generic) - - /* General exception vector R4000 version. */ - NESTED(except_vec3_r4000, 0, sp) - .set noat - mfc0 k1, CP0_CAUSE - andi k1, k1, 0x7c - li k0, 31<<2 - beq k1, k0, handle_vced - li k0, 14<<2 - beq k1, k0, handle_vcei - la k0, exception_handlers - addu k0, k0, k1 - lw k0, (k0) - nop - jr k0 - nop - -/* - * Big shit, we now may have two dirty primary cache lines for the same - * physical address. We can savely invalidate the line pointed to by - * c0_badvaddr because after return from this exception handler the load / - * store will be re-executed. - */ - .set mips3 -handle_vced: - mfc0 k0, CP0_BADVADDR - li k1, -4 - and k0, k1 - mtc0 zero, CP0_TAGLO - cache Index_Store_Tag_D,(k0) - cache Hit_Writeback_Inv_SD,(k0) -#ifdef CONFIG_PROC_FS - lui k0, %hi(vced_count) - lw k1, %lo(vced_count)(k0) - addiu k1, 1 - sw k1, %lo(vced_count)(k0) -#endif - eret - -handle_vcei: - mfc0 k0, CP0_BADVADDR - cache Hit_Writeback_Inv_SD,(k0) # also cleans pi -#ifdef CONFIG_PROC_FS - lui k0, %hi(vcei_count) - lw k1, %lo(vcei_count)(k0) - addiu k1, 1 - sw k1, %lo(vcei_count)(k0) -#endif - eret - - END(except_vec3_r4000) - .set at - - /* General exception vector. */ - NESTED(except_vec3_generic, 0, sp) - .set noat - .set mips0 - mfc0 k1, CP0_CAUSE - la k0, exception_handlers - andi k1, k1, 0x7c - addu k0, k0, k1 - lw k0, (k0) - nop - jr k0 - nop - END(except_vec3_generic) - .set at - - /* - * Special interrupt vector for embedded MIPS. This is a - * dedicated interrupt vector which reduces interrupt processing - * overhead. The jump instruction will be inserted here at - * initialization time. This handler may only be 8 bytes in size! - */ - NESTED(except_vec4, 0, sp) -1: j 1b /* Dummy, will be replaced */ - nop - END(except_vec4) - - /* - * SBDDP EJTAG debug exception handler. - * The EJTAG debug exception entry point is 0xbfc00480, which - * normally is in the boot PROM, so the boot PROM must do a - * unconditional jump to this vector. - */ - NESTED(except_vec_ejtag_debug, 0, sp) - j ejtag_debug_handler - nop - END(except_vec_ejtag_debug) - - /* - * EJTAG debug exception handler. - */ - NESTED(ejtag_debug_handler, PT_SIZE, sp) - .set noat - .set noreorder - SAVE_ALL - PRINT("SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); - mfc0 k0, $23 # Get EJTAG Debug register. - mfc0 k1, $24 # Get DEPC register. - bgez k0, 1f - addiu k1, k1, 4 # SBDDP inst. in delay slot. - addiu k1, k1, 4 -1: mtc0 k1, $24 - RESTORE_ALL - .word 0x4200001f # deret, return EJTAG debug exception. - nop - .set at - END(ejtag_debug_handler) - - -/* - * Kernel entry point - */ -NESTED(kernel_entry, 16, sp) - .set noreorder - /* The following two symbols are used for kernel profiling. */ - EXPORT(stext) - EXPORT(_stext) - - /* - * Stack for kernel and init, current variable - */ - la $28, init_task_union - addiu t0, $28, KERNEL_STACK_SIZE-32 - subu sp, t0, 4*SZREG - - sw t0, kernelsp - /* The firmware/bootloader passes argc/argp/envp - * to us as arguments. But clear bss first because - * the romvec and other important info is stored there - * by prom_init(). - */ - la t0, _edata - sw zero, (t0) - la t1, (_end - 4) + .text + /* + * Reserved space for exception handlers. + * Necessary for machines which link their kernels at KSEG0. + */ + .fill 0x400 + + /* The following two symbols are used for kernel profiling. */ + EXPORT(stext) + EXPORT(_stext) + + __INIT + + /* Cache Error */ + LEAF(except_vec2_generic) + .set noreorder + .set noat + .set mips0 + /* + * This is a very bad place to be. Our cache error + * detection has triggered. If we have write-back data + * in the cache, we may not be able to recover. As a + * first-order desperate measure, turn off KSEG0 cacheing. + */ + mfc0 k0,CP0_CONFIG + li k1,~CONF_CM_CMASK + and k0,k0,k1 + ori k0,k0,CONF_CM_UNCACHED + mtc0 k0,CP0_CONFIG + /* Give it a few cycles to sink in... */ + nop + nop + nop + + j cache_parity_error + nop + END(except_vec2_generic) + + .set at + + /* + * Special interrupt vector for embedded MIPS. This is a + * dedicated interrupt vector which reduces interrupt processing + * overhead. The jump instruction will be inserted here at + * initialization time. This handler may only be 8 bytes in + * size! + */ + NESTED(except_vec4, 0, sp) +1: j 1b /* Dummy, will be replaced */ + nop + END(except_vec4) + + /* + * EJTAG debug exception handler. + * The EJTAG debug exception entry point is 0xbfc00480, which + * normally is in the boot PROM, so the boot PROM must do a + * unconditional jump to this vector. + */ + NESTED(except_vec_ejtag_debug, 0, sp) + j ejtag_debug_handler + nop + END(except_vec_ejtag_debug) + + __FINIT + + /* + * EJTAG debug exception handler. + */ + NESTED(ejtag_debug_handler, PT_SIZE, sp) + .set noat + .set noreorder + mtc0 k0, CP0_DESAVE + mfc0 k0, CP0_DEBUG + + sll k0, k0, 30 # Check for SDBBP. + bgez k0, ejtag_return + + la k0, ejtag_debug_buffer + sw k1, 0(k0) + SAVE_ALL + jal ejtag_exception_handler + move a0, sp + RESTORE_ALL + la k0, ejtag_debug_buffer + lw k1, 0(k0) + +ejtag_return: + mfc0 k0, CP0_DESAVE + .set mips32 + deret + .set mips0 + nop + .set at + END(ejtag_debug_handler) + + __INIT + + /* + * NMI debug exception handler for MIPS reference boards. + * The NMI debug exception entry point is 0xbfc00000, which + * normally is in the boot PROM, so the boot PROM must do a + * unconditional jump to this vector. + */ + NESTED(except_vec_nmi, 0, sp) + j nmi_handler + nop + END(except_vec_nmi) + + __FINIT + + NESTED(nmi_handler, PT_SIZE, sp) + .set noat + .set noreorder + .set mips3 + SAVE_ALL + jal nmi_exception_handler + move a0, sp + RESTORE_ALL + eret + .set at + .set mips0 + END(nmi_handler) + + __INIT + + /* + * Kernel entry point + */ + NESTED(kernel_entry, 16, sp) + .set noreorder + + /* + * Stack for kernel and init, current variable + */ + la $28, init_thread_union + addiu t0, $28, KERNEL_STACK_SIZE-32 + subu sp, t0, 4*SZREG + sw t0, kernelsp + + /* The firmware/bootloader passes argc/argp/envp + * to us as arguments. But clear bss first because + * the romvec and other important info is stored there + * by prom_init(). + */ + la t0, __bss_start + sw zero, (t0) + la t1, __bss_stop - 4 1: - addiu t0, 4 - bne t0, t1, 1b - sw zero, (t0) + addiu t0, 4 + bne t0, t1, 1b + sw zero, (t0) - jal init_arch - nop - END(kernel_entry) + jal init_arch + nop + END(kernel_entry) #ifdef CONFIG_SMP /* - * SMP slave cpus entry point. Board specific code - * for bootstrap calls this function after setting up - * the stack and gp registers. - */ - LEAF(smp_bootstrap) - .set push - .set noreorder - mtc0 zero, CP0_WIRED - CLI - mfc0 t0, CP0_STATUS - li t1, ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV); - and t0, t1 - or t0, (ST0_CU0|ST0_KX|ST0_SX|ST0_FR); - addiu a0, zero, 0 - jal start_secondary - mtc0 t0, CP0_STATUS - .set pop - END(smp_bootstrap) -#endif - -/* - * This buffer is reserved for the use of the cache error handler. + * SMP slave cpus entry point. Board specific code for bootstrap calls this + * function after setting up the stack and gp registers. */ - .data - EXPORT(cache_error_buffer) - .fill 32*4,1,0 - -#ifndef CONFIG_SMP -EXPORT(kernelsp) - PTR 0 -EXPORT(current_pgd) - PTR 0 -#else - /* There's almost certainly a better way to do this with the macros...*/ - .globl kernelsp - .comm kernelsp, NR_CPUS * 8, 8 - .globl current_pgd - .comm current_pgd, NR_CPUS * 8, 8 + LEAF(smp_bootstrap) + .set push + .set noreorder + mtc0 zero, CP0_WIRED + CLI + mfc0 t0, CP0_STATUS + li t1, ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX) + and t0, t1 + or t0, (ST0_CU0); + jal start_secondary + mtc0 t0, CP0_STATUS + .set pop + END(smp_bootstrap) #endif - .text - .org 0x1000 -EXPORT(swapper_pg_dir) - - .org 0x2000 -EXPORT(empty_bad_page) - .org 0x3000 -EXPORT(empty_bad_page_table) + __FINIT - .org 0x4000 -EXPORT(invalid_pte_table) - - .org 0x5000 -/* XXX This label is required to keep GAS trying to be too clever ... - Bug? */ -dummy: -/* - * Align to 8kb boundary for init_task_union which follows in the - * .text segment. - */ - .align 13 + /* + * This buffer is reserved for the use of the EJTAG debug + * handler. + */ + .data + EXPORT(ejtag_debug_buffer) + .fill 4 + + .comm kernelsp, NR_CPUS * 8, 8 + .comm pgd_current, NR_CPUS * 8, 8 + + .macro page name, order=0 + .globl \name +\name: .size \name, (_PAGE_SIZE << \order) + .org . + (_PAGE_SIZE << \order) + .type \name, @object + .endm + + .data + .align PAGE_SHIFT + + page swapper_pg_dir, _PGD_ORDER + page empty_bad_page, 0 + page empty_bad_page_table, 0 + page invalid_pte_table, 0 diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 8b75f358a4cc..da1722afe716 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -11,11 +11,12 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> -#include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spinlock.h> +#include <linux/sysdev.h> +#include <asm/i8259.h> #include <asm/io.h> void enable_8259A_irq(unsigned int irq); @@ -30,11 +31,12 @@ void disable_8259A_irq(unsigned int irq); * moves to arch independent land */ -spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED; static void end_8259A_irq (unsigned int irq) { - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && + irq_desc[irq].action) enable_8259A_irq(irq); } @@ -43,7 +45,7 @@ static void end_8259A_irq (unsigned int irq) void mask_and_ack_8259A(unsigned int); static unsigned int startup_8259A_irq(unsigned int irq) -{ +{ enable_8259A_irq(irq); return 0; /* never anything pending */ @@ -69,9 +71,8 @@ static struct hw_interrupt_type i8259A_irq_type = { */ static unsigned int cached_irq_mask = 0xffff; -#define __byte(x,y) (((unsigned char *)&(y))[x]) -#define cached_21 (__byte(0,cached_irq_mask)) -#define cached_A1 (__byte(1,cached_irq_mask)) +#define cached_21 (cached_irq_mask) +#define cached_A1 (cached_irq_mask >> 8) void disable_8259A_irq(unsigned int irq) { @@ -211,7 +212,7 @@ spurious_8259A_irq: printk("spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } - irq_err_count++; + atomic_inc(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is @@ -221,6 +222,32 @@ spurious_8259A_irq: } } +static int i8259A_resume(struct sys_device *dev) +{ + init_8259A(0); + return 0; +} + +static struct sysdev_class i8259_sysdev_class = { + set_kset_name("i8259"), + .resume = i8259A_resume, +}; + +static struct sys_device device_i8259A = { + .id = 0, + .cls = &i8259_sysdev_class, +}; + +static int __init i8259A_init_sysfs(void) +{ + int error = sysdev_class_register(&i8259_sysdev_class); + if (!error) + error = sys_device_register(&device_i8259A); + return error; +} + +device_initcall(i8259A_init_sysfs); + void __init init_8259A(int auto_eoi) { unsigned long flags; @@ -234,7 +261,7 @@ void __init init_8259A(int auto_eoi) * outb_p - this has to work on a wide range of PC hardware. */ outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ - outb_p(0x20 + 0, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ + outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */ outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ if (auto_eoi) outb_p(0x03, 0x21); /* master does Auto EOI */ @@ -242,7 +269,7 @@ void __init init_8259A(int auto_eoi) outb_p(0x01, 0x21); /* master expects normal EOI */ outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ - outb_p(0x20 + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ + outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */ outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode is to be investigated) */ diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c index 0cba234e7181..a3540eaa199d 100644 --- a/arch/mips/kernel/init_task.c +++ b/arch/mips/kernel/init_task.c @@ -1,17 +1,19 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/init_task.h> +#include <linux/fs.h> #include <asm/uaccess.h> #include <asm/pgtable.h> static struct fs_struct init_fs = INIT_FS; static struct files_struct init_files = INIT_FILES; -static struct signal_struct init_signals = INIT_SIGNALS; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); /* - * Initial task structure. + * Initial thread structure. * * We need to make sure that this is 8192-byte aligned due to the * way process stacks are handled. This is done by making sure @@ -20,6 +22,13 @@ struct mm_struct init_mm = INIT_MM(init_mm); * * The things we do for performance.. */ -union task_union init_task_union - __attribute__((__section__(".text"))) = - { INIT_TASK(init_task_union.task) }; +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); diff --git a/arch/mips/kernel/ioport.c b/arch/mips/kernel/ioport.c deleted file mode 100644 index cc0581038054..000000000000 --- a/arch/mips/kernel/ioport.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * linux/arch/mips/kernel/ioport.c - */ -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/ioport.h> - -/* - * This changes the io permissions bitmap in the current task. - */ -asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on) -{ - return -ENOSYS; -} - -/* - * sys_iopl has to be used when you want to access the IO ports - * beyond the 0x3ff range: to get the full 65536 ports bitmapped - * you'd need 8kB of bitmaps/process, which is a bit excessive. - * - * Here we just change the eflags value on the stack: we allow - * only the super-user to do it. This depends on the stack-layout - * on system-call entry - see also fork() and the signal handling - * code. - */ -asmlinkage int sys_iopl(long ebx,long ecx,long edx, - long esi, long edi, long ebp, long eax, long ds, - long es, long fs, long gs, long orig_eax, - long eip,long cs,long eflags,long esp,long ss) -{ - return -ENOSYS; -} diff --git a/arch/mips/kernel/ipc.c b/arch/mips/kernel/ipc.c index 4db570ec88f7..6c242f1ebddf 100644 --- a/arch/mips/kernel/ipc.c +++ b/arch/mips/kernel/ipc.c @@ -45,7 +45,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, } case MSGSND: - return sys_msgsnd (first, (struct msgbuf *) ptr, + return sys_msgsnd (first, (struct msgbuf *) ptr, second, third); case MSGRCV: switch (version) { @@ -53,9 +53,9 @@ asmlinkage int sys_ipc (uint call, int first, int second, struct ipc_kludge tmp; if (!ptr) return -EINVAL; - + if (copy_from_user(&tmp, - (struct ipc_kludge *) ptr, + (struct ipc_kludge *) ptr, sizeof (tmp))) return -EFAULT; return sys_msgrcv (first, tmp.msgp, second, @@ -85,7 +85,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, return -EINVAL; return sys_shmat (first, (char *) ptr, second, (ulong *) third); } - case SHMDT: + case SHMDT: return sys_shmdt ((char *)ptr); case SHMGET: return sys_shmget (first, second, third); @@ -93,6 +93,6 @@ asmlinkage int sys_ipc (uint call, int first, int second, return sys_shmctl (first, second, (struct shmid_ds *) ptr); default: - return -EINVAL; + return -ENOSYS; } } diff --git a/arch/mips/kernel/irix5sys.h b/arch/mips/kernel/irix5sys.h index 2d344c3010e0..b0aa94206c93 100644 --- a/arch/mips/kernel/irix5sys.h +++ b/arch/mips/kernel/irix5sys.h @@ -1,5 +1,4 @@ -/* $Id: irix5sys.h,v 1.2 1998/08/17 10:16:25 ralf Exp $ - * +/* * irix5sys.h: 32-bit IRIX5 ABI system call table. * * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c index 93d4f7e177fa..e6908ad74566 100644 --- a/arch/mips/kernel/irixelf.c +++ b/arch/mips/kernel/irixelf.c @@ -45,7 +45,6 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); static int load_irix_library(struct file *); static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file); -extern int dump_fpu (elf_fpregset_t *); static struct linux_binfmt irix_format = { NULL, THIS_MODULE, load_irix_binary, load_irix_library, @@ -127,7 +126,7 @@ static void set_brk(unsigned long start, unsigned long end) { start = PAGE_ALIGN(start); end = PAGE_ALIGN(end); - if (end <= start) + if (end <= start) return; do_brk(start, end - start); } @@ -157,7 +156,7 @@ unsigned long * create_irix_tables(char * p, int argc, int envc, elf_addr_t *argv; elf_addr_t *envp; elf_addr_t *sp, *csp; - + #ifdef DEBUG_ELF printk("create_irix_tables: p[%p] argc[%d] envc[%d] " "load_addr[%08x] interp_load_addr[%08x]\n", @@ -246,7 +245,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, elf_bss = 0; last_bss = 0; error = load_addr = 0; - + #ifdef DEBUG_ELF print_elfhdr(interp_elf_ex); #endif @@ -267,7 +266,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, return 0xffffffff; } - elf_phdata = (struct elf_phdr *) + elf_phdata = (struct elf_phdr *) kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, GFP_KERNEL); @@ -393,7 +392,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) return -ENOEXEC; /* First of all, some simple consistency checks */ - if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || + if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || !irix_elf_check_arch(ehp) || !bprm->file->f_op->mmap) { return -ENOEXEC; } @@ -557,7 +556,7 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp, } /* - * IRIX maps a page at 0x200000 that holds information about the + * IRIX maps a page at 0x200000 that holds information about the * process and the system, here we map the page and fill the * structure */ @@ -567,20 +566,20 @@ void irix_map_prda_page (void) struct prda *pp; v = do_brk (PRDA_ADDRESS, PAGE_SIZE); - + if (v < 0) return; pp = (struct prda *) v; pp->prda_sys.t_pid = current->pid; - pp->prda_sys.t_prid = read_32bit_cp0_register (CP0_PRID); + pp->prda_sys.t_prid = read_c0_prid(); pp->prda_sys.t_rpid = current->pid; /* We leave the rest set to zero */ } - - + + /* These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. */ @@ -595,7 +594,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) int retval, has_interp, has_ephdr, size, i; char *elf_interpreter; mm_segment_t old_fs; - + load_addr = 0; has_interp = has_ephdr = 0; elf_ihdr = elf_ephdr = 0; @@ -684,7 +683,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned int) elf_ex.e_entry; - + /* Do this so that we can load the interpreter, if need be. We will * change some of these later. */ @@ -723,7 +722,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) set_binfmt(&irix_format); compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; - bprm->p = (unsigned long) + bprm->p = (unsigned long) create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc, (elf_interpreter ? &elf_ex : NULL), load_addr, interp_load_addr, regs, elf_ephdr); @@ -811,30 +810,30 @@ static int load_irix_library(struct file *file) if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || !irix_elf_check_arch(&elf_ex) || !file->f_op->mmap) return -ENOEXEC; - + /* Now read in all of the header information. */ if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) return -ENOEXEC; - - elf_phdata = (struct elf_phdr *) + + elf_phdata = (struct elf_phdr *) kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); if (elf_phdata == NULL) return -ENOMEM; - + retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, sizeof(struct elf_phdr) * elf_ex.e_phnum); - + j = 0; for(i=0; i<elf_ex.e_phnum; i++) if((elf_phdata + i)->p_type == PT_LOAD) j++; - + if(j != 1) { kfree(elf_phdata); return -ENOEXEC; } - + while(elf_phdata->p_type != PT_LOAD) elf_phdata++; - + /* Now use mmap to map the library into memory. */ down_write(¤t->mm->mmap_sem); error = do_mmap(file, @@ -862,7 +861,7 @@ static int load_irix_library(struct file *file) kfree(elf_phdata); return 0; } - + /* Called through irix_syssgi() to map an elf image given an FD, * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many * phdrs there are in the USER_PHDRP array. We return the vaddr the @@ -1025,7 +1024,7 @@ static int writenote(struct memelfnote *men, struct file *file) DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ DUMP_WRITE(men->data, men->datasz); DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ - + return 1; end_coredump: @@ -1071,13 +1070,13 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) if (maydump(vma)) { int sz = vma->vm_end-vma->vm_start; - + if (size+sz >= limit) break; else size += sz; } - + segs++; } #ifdef DEBUG @@ -1104,7 +1103,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) elf.e_shentsize = 0; elf.e_shnum = 0; elf.e_shstrndx = 0; - + fs = get_fs(); set_fs(KERNEL_DS); @@ -1129,24 +1128,24 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) prstatus.pr_sigpend = current->pending.signal.sig[0]; prstatus.pr_sighold = current->blocked.sig[0]; psinfo.pr_pid = prstatus.pr_pid = current->pid; - psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid; + psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid; psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp; psinfo.pr_sid = prstatus.pr_sid = current->session; - prstatus.pr_utime.tv_sec = CT_TO_SECS(current->times.tms_utime); - prstatus.pr_utime.tv_usec = CT_TO_USECS(current->times.tms_utime); - prstatus.pr_stime.tv_sec = CT_TO_SECS(current->times.tms_stime); - prstatus.pr_stime.tv_usec = CT_TO_USECS(current->times.tms_stime); - prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->times.tms_cutime); - prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->times.tms_cutime); - prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->times.tms_cstime); - prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->times.tms_cstime); + prstatus.pr_utime.tv_sec = CT_TO_SECS(current->utime); + prstatus.pr_utime.tv_usec = CT_TO_USECS(current->utime); + prstatus.pr_stime.tv_sec = CT_TO_SECS(current->stime); + prstatus.pr_stime.tv_usec = CT_TO_USECS(current->stime); + prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->cutime); + prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->cutime); + prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->cstime); + prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->cstime); if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) { printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) " "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs)); } else { *(struct pt_regs *)&prstatus.pr_reg = *regs; } - + notes[1].name = "CORE"; notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(psinfo); @@ -1155,7 +1154,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; - psinfo.pr_nice = current->nice; + psinfo.pr_nice = task_nice(current); psinfo.pr_flag = current->flags; psinfo.pr_uid = current->uid; psinfo.pr_gid = current->gid; @@ -1163,7 +1162,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) int i, len; set_fs(fs); - + len = current->mm->arg_end - current->mm->arg_start; len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len; copy_from_user(&psinfo.pr_psargs, @@ -1183,7 +1182,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) notes[2].data = current; /* Try to dump the FPU. */ - prstatus.pr_fpvalid = dump_fpu (&fpu); + prstatus.pr_fpvalid = dump_fpu (regs, &fpu); if (!prstatus.pr_fpvalid) { numnote--; } else { @@ -1200,7 +1199,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) for(i = 0; i < numnote; i++) sz += notesize(¬es[i]); - + phdr.p_type = PT_NOTE; phdr.p_offset = offset; phdr.p_vaddr = 0; @@ -1216,7 +1215,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) /* Page-align dumped data. */ dataoff = offset = roundup(offset, PAGE_SIZE); - + /* Write program headers for segments dump. */ for(vma = current->mm->mmap, i = 0; i < segs && vma != NULL; vma = vma->vm_next) { @@ -1226,7 +1225,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) i++; sz = vma->vm_end - vma->vm_start; - + phdr.p_type = PT_LOAD; phdr.p_offset = offset; phdr.p_vaddr = vma->vm_start; @@ -1245,17 +1244,17 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) for(i = 0; i < numnote; i++) if (!writenote(¬es[i], file)) goto end_coredump; - + set_fs(fs); DUMP_SEEK(dataoff); - + for(i = 0, vma = current->mm->mmap; i < segs && vma != NULL; vma = vma->vm_next) { unsigned long addr = vma->vm_start; unsigned long len = vma->vm_end - vma->vm_start; - + if (!maydump(vma)) continue; i++; diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c index 9028a27be9c2..eea2582451e4 100644 --- a/arch/mips/kernel/irixinv.c +++ b/arch/mips/kernel/irixinv.c @@ -1,11 +1,9 @@ /* * Support the inventory interface for IRIX binaries * This is invoked before the mm layer is working, so we do not - * use the linked lists for the inventory yet. + * use the linked lists for the inventory yet. * * Miguel de Icaza, 1997. - * - * $Id: irixinv.c,v 1.3 1998/04/05 11:23:51 ralf Exp $ */ #include <linux/mm.h> #include <linux/init.h> @@ -18,14 +16,13 @@ int inventory_items = 0; static inventory_t inventory [MAX_INVENTORY]; -void -add_to_inventory (int class, int type, int controller, int unit, int state) +void add_to_inventory (int class, int type, int controller, int unit, int state) { inventory_t *ni = &inventory [inventory_items]; if (inventory_items == MAX_INVENTORY) return; - + ni->inv_class = class; ni->inv_type = type; ni->inv_controller = controller; @@ -35,8 +32,7 @@ add_to_inventory (int class, int type, int controller, int unit, int state) inventory_items++; } -int -dump_inventory_to_user (void *userbuf, int size) +int dump_inventory_to_user (void *userbuf, int size) { inventory_t *inv = &inventory [0]; inventory_t *user = userbuf; @@ -53,13 +49,13 @@ dump_inventory_to_user (void *userbuf, int size) return inventory_items * sizeof (inventory_t); } -void __init init_inventory (void) +static int __init init_inventory(void) { - /* gross hack while we put the right bits all over the kernel + /* + * gross hack while we put the right bits all over the kernel * most likely this will not let just anyone run the X server * until we put the right values all over the place */ - add_to_inventory (10, 3, 0, 0, 16400); add_to_inventory (1, 1, 150, -1, 12); add_to_inventory (1, 3, 0, 0, 8976); @@ -78,4 +74,8 @@ void __init init_inventory (void) add_to_inventory (2, 2, 0, 2, 0); add_to_inventory (2, 2, 0, 1, 0); add_to_inventory (7, 14, 0, 0, 6); + + return 0; } + +module_init(init_inventory); diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c index 1ad9ad3399c8..f458c392d8fe 100644 --- a/arch/mips/kernel/irixioctl.c +++ b/arch/mips/kernel/irixioctl.c @@ -10,6 +10,7 @@ #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> +#include <linux/sockios.h> #include <linux/tty.h> #include <linux/file.h> diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c index 7d68e0e659fc..e6713367ce67 100644 --- a/arch/mips/kernel/irixsig.c +++ b/arch/mips/kernel/irixsig.c @@ -17,7 +17,7 @@ #include <asm/ptrace.h> #include <asm/uaccess.h> -extern asmlinkage void syscall_trace(void); +extern asmlinkage void do_syscall_trace(void); #undef DEBUG_SIG @@ -117,7 +117,8 @@ static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs, regs->regs[5] = 0; /* XXX sigcode XXX */ regs->regs[6] = regs->regs[29] = sp; regs->regs[7] = (unsigned long) ka->sa.sa_handler; - regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa.sa_restorer; + regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer; + return; segv_and_exit: @@ -134,27 +135,11 @@ setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, do_exit(SIGSEGV); } -static inline void handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +static inline void handle_signal(unsigned long sig, siginfo_t *info, + sigset_t *oldset, struct pt_regs * regs) { - if (ka->sa.sa_flags & SA_SIGINFO) - setup_irix_rt_frame(ka, regs, sig, oldset, info); - else - setup_irix_frame(ka, regs, sig, oldset); + struct k_sigaction *ka = ¤t->sighand->action[sig-1]; - if (ka->sa.sa_flags & SA_ONESHOT) - ka->sa.sa_handler = SIG_DFL; - if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sigmask_lock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); - } -} - -static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) -{ switch(regs->regs[0]) { case ERESTARTNOHAND: regs->regs[2] = EINTR; @@ -170,111 +155,34 @@ static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) } regs->regs[0] = 0; /* Don't deal with this again. */ + + if (ka->sa.sa_flags & SA_SIGINFO) + setup_irix_rt_frame(ka, regs, sig, oldset, info); + else + setup_irix_frame(ka, regs, sig, oldset); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } } asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs) { - struct k_sigaction *ka; siginfo_t info; + int signr; if (!oldset) oldset = ¤t->blocked; - for (;;) { - unsigned long signr; - - spin_lock_irq(¤t->sigmask_lock); - signr = dequeue_signal(current, ¤t->blocked, &info); - spin_unlock_irq(¤t->sigmask_lock); - - if (!signr) - break; - - if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { - /* Let the debugger run. */ - current->exit_code = signr; - current->state = TASK_STOPPED; - notify_parent(current, SIGCHLD); - schedule(); - - /* We're back. Did the debugger cancel the sig? */ - if (!(signr = current->exit_code)) - continue; - current->exit_code = 0; - - /* The debugger continued. Ignore SIGSTOP. */ - if (signr == SIGSTOP) - continue; - - /* Update the siginfo structure. Is this good? */ - if (signr != info.si_signo) { - info.si_signo = signr; - info.si_errno = 0; - info.si_code = SI_USER; - info.si_pid = current->p_pptr->pid; - info.si_uid = current->p_pptr->uid; - } - - /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { - send_sig_info(signr, &info, current); - continue; - } - } - - ka = ¤t->sig->action[signr-1]; - if (ka->sa.sa_handler == SIG_IGN) { - if (signr != SIGCHLD) - continue; - /* Check for SIGCHLD: it's special. */ - while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) - /* nothing */; - continue; - } - - if (ka->sa.sa_handler == SIG_DFL) { - int exit_code = signr; - - /* Init gets no signals it doesn't want. */ - if (current->pid == 1) - continue; - - switch (signr) { - case SIGCONT: case SIGCHLD: case SIGWINCH: - continue; - - case SIGTSTP: case SIGTTIN: case SIGTTOU: - if (is_orphaned_pgrp(current->pgrp)) - continue; - /* FALLTHRU */ - - case SIGSTOP: - current->state = TASK_STOPPED; - current->exit_code = signr; - if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) - notify_parent(current, SIGCHLD); - schedule(); - continue; - - case SIGQUIT: case SIGILL: case SIGTRAP: - case SIGABRT: case SIGFPE: case SIGSEGV: - if (do_coredump(signr, regs)) - exit_code |= 0x80; - /* FALLTHRU */ - - default: - sigaddset(¤t->pending.signal, signr); - recalc_sigpending(); - current->flags |= PF_SIGNALED; - do_exit(exit_code); - /* NOTREACHED */ - } - } - - if (regs->regs[0]) - syscall_restart(regs, ka); - /* Whee! Actually deliver the signal. */ - handle_signal(signr, ka, &info, oldset, regs); + signr = get_signal_to_deliver(&info, regs, NULL); + if (signr > 0) { + handle_signal(signr, &info, oldset, regs); return 1; } @@ -343,19 +251,19 @@ irix_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = blocked; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); /* * Don't let your children do this ... */ - if (current->ptrace & PT_TRACESYS) - syscall_trace(); + if (current_thread_info()->flags & TIF_SYSCALL_TRACE) + do_syscall_trace(); __asm__ __volatile__( "move\t$29,%0\n\t" - "j\tret_from_sys_call" + "j\tsyscall_exit" :/* no outputs */ :"r" (®s)); /* Unreached */ @@ -380,7 +288,7 @@ static inline void dump_sigact_irix5(struct sigact_irix5 *p) } #endif -asmlinkage int +asmlinkage int irix_sigaction(int sig, const struct sigaction *act, struct sigaction *oact, void *trampoline) { @@ -408,7 +316,7 @@ irix_sigaction(int sig, const struct sigaction *act, * value for all invocations of sigaction. Will have to * investigate. POSIX POSIX, die die die... */ - new_ka.sa.sa_restorer = trampoline; + new_ka.sa_restorer = trampoline; } /* XXX Implement SIG_SETMASK32 for IRIX compatibility */ @@ -443,7 +351,7 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old) __copy_from_user(&newbits, new, sizeof(unsigned long)*4); sigdelsetmask(&newbits, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); oldbits = current->blocked; switch(how) { @@ -466,7 +374,7 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old) return -EINVAL; } recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); } if(old) { error = verify_area(VERIFY_WRITE, old, sizeof(*old)); @@ -487,11 +395,11 @@ asmlinkage int irix_sigsuspend(struct pt_regs *regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); regs->regs[2] = -EINTR; while (1) { @@ -640,7 +548,9 @@ asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info, { int flag, retval; DECLARE_WAITQUEUE(wait, current); + struct task_struct *tsk; struct task_struct *p; + struct list_head *_p; if (!info) { retval = -EINVAL; @@ -667,7 +577,9 @@ repeat: flag = 0; current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); - for (p = current->p_cptr; p; p = p->p_osptr) { + tsk = current; + list_for_each(_p,&tsk->children) { + p = list_entry(_p,struct task_struct,sibling); if ((type == P_PID) && p->pid != pid) continue; if ((type == P_PGID) && p->pgrp != pid) @@ -676,50 +588,63 @@ repeat: continue; flag = 1; switch (p->state) { - case TASK_STOPPED: - if (!p->exit_code) - continue; - if (!(options & (W_TRAPPED|W_STOPPED)) && - !(p->ptrace & PT_PTRACED)) - continue; - if (ru != NULL) - getrusage(p, RUSAGE_BOTH, ru); - __put_user(SIGCHLD, &info->sig); - __put_user(0, &info->code); - __put_user(p->pid, &info->stuff.procinfo.pid); - __put_user((p->exit_code >> 8) & 0xff, + case TASK_STOPPED: + if (!p->exit_code) + continue; + if (!(options & (W_TRAPPED|W_STOPPED)) && + !(p->ptrace & PT_PTRACED)) + continue; + read_unlock(&tasklist_lock); + + /* move to end of parent's list to avoid starvation */ + write_lock_irq(&tasklist_lock); + remove_parent(p); + add_parent(p, p->parent); + write_unlock_irq(&tasklist_lock); + retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; + if (!retval && ru) { + retval |= __put_user(SIGCHLD, &info->sig); + retval |= __put_user(0, &info->code); + retval |= __put_user(p->pid, &info->stuff.procinfo.pid); + retval |= __put_user((p->exit_code >> 8) & 0xff, &info->stuff.procinfo.procdata.child.status); - __put_user(p->times.tms_utime, &info->stuff.procinfo.procdata.child.utime); - __put_user(p->times.tms_stime, &info->stuff.procinfo.procdata.child.stime); + retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime); + retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime); + } + if (!retval) { p->exit_code = 0; - retval = 0; - goto end_waitsys; - case TASK_ZOMBIE: - current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime; - current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime; - if (ru != NULL) - getrusage(p, RUSAGE_BOTH, ru); - __put_user(SIGCHLD, &info->sig); - __put_user(1, &info->code); /* CLD_EXITED */ - __put_user(p->pid, &info->stuff.procinfo.pid); - __put_user((p->exit_code >> 8) & 0xff, - &info->stuff.procinfo.procdata.child.status); - __put_user(p->times.tms_utime, - &info->stuff.procinfo.procdata.child.utime); - __put_user(p->times.tms_stime, - &info->stuff.procinfo.procdata.child.stime); - retval = 0; - if (p->p_opptr != p->p_pptr) { - REMOVE_LINKS(p); - p->p_pptr = p->p_opptr; - SET_LINKS(p); - notify_parent(p, SIGCHLD); - } else - release_task(p); - goto end_waitsys; - default: - continue; + } + goto end_waitsys; + + case TASK_ZOMBIE: + current->cutime += p->utime + p->cutime; + current->cstime += p->stime + p->cstime; + if (ru != NULL) + getrusage(p, RUSAGE_BOTH, ru); + __put_user(SIGCHLD, &info->sig); + __put_user(1, &info->code); /* CLD_EXITED */ + __put_user(p->pid, &info->stuff.procinfo.pid); + __put_user((p->exit_code >> 8) & 0xff, + &info->stuff.procinfo.procdata.child.status); + __put_user(p->utime, + &info->stuff.procinfo.procdata.child.utime); + __put_user(p->stime, + &info->stuff.procinfo.procdata.child.stime); + retval = 0; + if (p->real_parent != p->parent) { + write_lock_irq(&tasklist_lock); + remove_parent(p); + p->parent = p->real_parent; + add_parent(p, p->parent); + do_notify_parent(p, SIGCHLD); + write_unlock_irq(&tasklist_lock); + } else + release_task(p); + goto end_waitsys; + default: + continue; } + tsk = next_thread(tsk); } read_unlock(&tasklist_lock); if (flag) { diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index a40469ff4790..3cd759d24bf0 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -8,18 +8,24 @@ * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle */ +#include <linux/config.h> #include <linux/kernel.h> -#include <linux/irq.h> +#include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/module.h> +#include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/kallsyms.h> +#include <asm/atomic.h> #include <asm/system.h> +#include <asm/uaccess.h> /* * Controller mappings for all interrupt sources: @@ -31,11 +37,14 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { } }; +static void register_irq_proc (unsigned int irq); + /* * Special irq handlers. */ -void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } +irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) +{ return IRQ_NONE; } /* * Generic no controller code @@ -68,7 +77,7 @@ struct hw_interrupt_type no_irq_type = { end_none }; -volatile unsigned long irq_err_count, spurious_count; +atomic_t irq_err_count; /* * Generic, controller-independent functions: @@ -76,35 +85,53 @@ volatile unsigned long irq_err_count, spurious_count; int show_interrupts(struct seq_file *p, void *v) { + int i, j; struct irqaction * action; unsigned long flags; - int i; - - seq_puts(p, " "); - for (i=0; i < 1 /*smp_num_cpus*/; i++) - seq_printf(p, "CPU%d ", i); + + seq_printf(p, " "); + for (j=0; j<NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); for (i = 0 ; i < NR_IRQS ; i++) { spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) - goto unlock; + goto skip; seq_printf(p, "%3d: ",i); +#ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); +#else + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); +#endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); + seq_putc(p, '\n'); -unlock: +skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); } - seq_printf(p, "ERR: %10lu\n", irq_err_count); + seq_putc(p, '\n'); + seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); + return 0; } +#ifdef CONFIG_SMP +inline void synchronize_irq(unsigned int irq) +{ + while (irq_desc[irq].status & IRQ_INPROGRESS) + cpu_relax(); +} +#endif + /* * This should really return information about whether * we should do bottom half handling etc. Right now we @@ -114,37 +141,112 @@ unlock: */ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) { - int status; - int cpu = smp_processor_id(); - - irq_enter(cpu, irq); - - status = 1; /* Force the "do bottom halves" bit */ + int status = 1; /* Force the "do bottom halves" bit */ + int retval = 0; if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); do { status |= action->flags; - action->handler(irq, action->dev_id, regs); + retval |= action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); local_irq_disable(); - irq_exit(cpu, irq); + return retval; +} + +static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret) +{ + struct irqaction *action; + + if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { + printk(KERN_ERR "irq event %d: bogus return value %x\n", + irq, action_ret); + } else { + printk(KERN_ERR "irq %d: nobody cared!\n", irq); + } + dump_stack(); + printk(KERN_ERR "handlers:\n"); + action = desc->action; + do { + printk(KERN_ERR "[<%p>]", action->handler); + print_symbol(" (%s)", + (unsigned long)action->handler); + printk("\n"); + action = action->next; + } while (action); +} + +static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret) +{ + static int count = 100; + + if (count) { + count--; + __report_bad_irq(irq, desc, action_ret); + } +} + +static int noirqdebug; + +static int __init noirqdebug_setup(char *str) +{ + noirqdebug = 1; + printk("IRQ lockup detection disabled\n"); + return 1; +} + +__setup("noirqdebug", noirqdebug_setup); + +/* + * If 99,900 of the previous 100,000 interrupts have not been handled then + * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to + * turn the IRQ off. + * + * (The other 100-of-100,000 interrupts may have been a correctly-functioning + * device sharing an IRQ with the failing one) + * + * Called under desc->lock + */ +static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret) +{ + if (action_ret != IRQ_HANDLED) { + desc->irqs_unhandled++; + if (action_ret != IRQ_NONE) + report_bad_irq(irq, desc, action_ret); + } + + desc->irq_count++; + if (desc->irq_count < 100000) + return; - return status; + desc->irq_count = 0; + if (desc->irqs_unhandled > 99900) { + /* + * The interrupt is stuck + */ + __report_bad_irq(irq, desc, action_ret); + /* + * Now kill the IRQ + */ + printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + desc->status |= IRQ_DISABLED; + desc->handler->disable(irq); + } + desc->irqs_unhandled = 0; } /* * Generic enable/disable code: this just calls * down into the PIC-specific version for the actual * hardware disable after having gotten the irq - * controller lock. + * controller lock. */ - + /** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable @@ -155,7 +257,7 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * * * This function may be called from IRQ context. */ - + void inline disable_irq_nosync(unsigned int irq) { irq_desc_t *desc = irq_desc + irq; @@ -181,16 +283,11 @@ void inline disable_irq_nosync(unsigned int irq) * * This function may be called - with care - from IRQ context. */ - + void disable_irq(unsigned int irq) { disable_irq_nosync(irq); - - if (!local_irq_count(smp_processor_id())) { - do { - barrier(); - } while (irq_desc[irq].status & IRQ_INPROGRESS); - } + synchronize_irq(irq); } /** @@ -202,7 +299,7 @@ void disable_irq(unsigned int irq) * * This function may be called from IRQ context. */ - + void enable_irq(unsigned int irq) { irq_desc_t *desc = irq_desc + irq; @@ -237,7 +334,7 @@ void enable_irq(unsigned int irq) */ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) { - /* + /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the @@ -252,6 +349,7 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) struct irqaction * action; unsigned int status; + irq_enter(); kstat_cpu(cpu).irqs[irq]++; spin_lock(&desc->lock); desc->handler->ack(irq); @@ -267,7 +365,7 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) * use the action we have. */ action = NULL; - if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { action = desc->action; status &= ~IRQ_PENDING; /* we commit to handling */ status |= IRQ_INPROGRESS; /* we are handling it */ @@ -280,7 +378,7 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) a different instance of this same irq, the other processor will take care of it. */ - if (!action) + if (unlikely(!action)) goto out; /* @@ -294,15 +392,19 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) * SMP environment. */ for (;;) { + irqreturn_t action_ret; + spin_unlock(&desc->lock); - handle_IRQ_event(irq, regs, action); + action_ret = handle_IRQ_event(irq, ®s, action); spin_lock(&desc->lock); - - if (!(desc->status & IRQ_PENDING)) + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + if (likely(!(desc->status & IRQ_PENDING))) break; desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS; + out: /* * The ->end() handler has to deal with interrupts which got @@ -311,8 +413,8 @@ out: desc->handler->end(irq); spin_unlock(&desc->lock); - if (softirq_pending(cpu)) - do_softirq(); + irq_exit(); + return 1; } @@ -327,7 +429,7 @@ out: * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. From the point this * call is made your handler function may be invoked. Since - * your handler function must clear any interrupt the board + * your handler function must clear any interrupt the board * raises, you must take care both to initialise your hardware * and to set up the interrupt handler in the right order. * @@ -347,10 +449,10 @@ out: * SA_SAMPLE_RANDOM The interrupt can be used for entropy * */ - -int request_irq(unsigned int irq, - void (*handler)(int, void *, struct pt_regs *), - unsigned long irqflags, + +int request_irq(unsigned int irq, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char * devname, void *dev_id) { @@ -376,7 +478,7 @@ int request_irq(unsigned int irq, return -EINVAL; action = (struct irqaction *) - kmalloc(sizeof(struct irqaction), GFP_KERNEL); + kmalloc(sizeof(struct irqaction), GFP_ATOMIC); if (!action) return -ENOMEM; @@ -405,12 +507,9 @@ int request_irq(unsigned int irq, * does not return until any executing interrupts for this IRQ * have completed. * - * This function may be called from interrupt context. - * - * Bugs: Attempting to free an irq in a handler for the same irq hangs - * the machine. + * This function must not be called from interrupt context. */ - + void free_irq(unsigned int irq, void *dev_id) { irq_desc_t *desc; @@ -439,11 +538,8 @@ void free_irq(unsigned int irq, void *dev_id) } spin_unlock_irqrestore(&desc->lock,flags); -#ifdef CONFIG_SMP /* Wait to make sure it's not being used on another CPU */ - while (desc->status & IRQ_INPROGRESS) - barrier(); -#endif + synchronize_irq(irq); kfree(action); return; } @@ -471,7 +567,7 @@ static DECLARE_MUTEX(probe_sem); * and a mask of potential interrupt lines is returned. * */ - + unsigned long probe_irq_on(void) { unsigned int i; @@ -480,22 +576,22 @@ unsigned long probe_irq_on(void) unsigned long delay; down(&probe_sem); - /* + /* * something may have generated an irq long ago and we want to - * flush such a longstanding irq before considering it as spurious. + * flush such a longstanding irq before considering it as spurious. */ for (i = NR_IRQS-1; i > 0; i--) { desc = irq_desc + i; spin_lock_irq(&desc->lock); - if (!irq_desc[i].action) + if (!irq_desc[i].action) irq_desc[i].handler->startup(i); spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) - /* about 20ms delay */ synchronize_irq(); + /* about 20ms delay */ barrier(); /* * enable any unassigned irqs @@ -518,7 +614,7 @@ unsigned long probe_irq_on(void) * Wait for spurious interrupts to trigger */ for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) - /* about 100ms delay */ synchronize_irq(); + /* about 100ms delay */ barrier(); /* * Now filter out any obviously spurious interrupts @@ -550,7 +646,7 @@ unsigned long probe_irq_on(void) * Return a mask of triggered interrupts (this * can handle only legacy ISA interrupts). */ - + /** * probe_irq_mask - scan a bitmap of interrupt lines * @val: mask of interrupts to consider @@ -612,7 +708,7 @@ unsigned int probe_irq_mask(unsigned long val) * nothing prevents two IRQ probe callers from overlapping. The * results of this are non-optimal. */ - + int probe_irq_off(unsigned long val) { int i, irq_found, nr_irqs; @@ -693,12 +789,12 @@ int setup_irq(unsigned int irq, struct irqaction * new) if (!shared) { desc->depth = 0; - desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING); + desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS); desc->handler->startup(irq); } spin_unlock_irqrestore(&desc->lock,flags); - /* register_irq_proc(irq); */ + register_irq_proc(irq); return 0; } @@ -713,3 +809,176 @@ void __init init_generic_irq(void) irq_desc[i].handler = &no_irq_type; } } + +EXPORT_SYMBOL(disable_irq_nosync); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(probe_irq_mask); + +static struct proc_dir_entry * root_irq_dir; +static struct proc_dir_entry * irq_dir [NR_IRQS]; + +#define HEX_DIGITS 8 + +static unsigned int parse_hex_value (const char *buffer, + unsigned long count, unsigned long *ret) +{ + unsigned char hexnum [HEX_DIGITS]; + unsigned long value; + int i; + + if (!count) + return -EINVAL; + if (count > HEX_DIGITS) + count = HEX_DIGITS; + if (copy_from_user(hexnum, buffer, count)) + return -EFAULT; + + /* + * Parse the first 8 characters as a hex string, any non-hex char + * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. + */ + value = 0; + + for (i = 0; i < count; i++) { + unsigned int c = hexnum[i]; + + switch (c) { + case '0' ... '9': c -= '0'; break; + case 'a' ... 'f': c -= 'a'-10; break; + case 'A' ... 'F': c -= 'A'-10; break; + default: + goto out; + } + value = (value << 4) | c; + } +out: + *ret = value; + return 0; +} + +#ifdef CONFIG_SMP + +static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; + +static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; +static int irq_affinity_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + if (count < HEX_DIGITS+1) + return -EINVAL; + return sprintf (page, "%08lx\n", irq_affinity[(long)data]); +} + +static int irq_affinity_write_proc (struct file *file, const char *buffer, + unsigned long count, void *data) +{ + int irq = (long) data, full_count = count, err; + unsigned long new_value; + + if (!irq_desc[irq].handler->set_affinity) + return -EIO; + + err = parse_hex_value(buffer, count, &new_value); + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!(new_value & cpu_online_map)) + return -EINVAL; + + irq_affinity[irq] = new_value; + irq_desc[irq].handler->set_affinity(irq, new_value); + + return full_count; +} + +#endif + +static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + unsigned long *mask = (unsigned long *) data; + if (count < HEX_DIGITS+1) + return -EINVAL; + return sprintf (page, "%08lx\n", *mask); +} + +static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, + unsigned long count, void *data) +{ + unsigned long *mask = (unsigned long *) data, full_count = count, err; + unsigned long new_value; + + err = parse_hex_value(buffer, count, &new_value); + if (err) + return err; + + *mask = new_value; + return full_count; +} + +#define MAX_NAMELEN 10 + +static void register_irq_proc (unsigned int irq) +{ + char name [MAX_NAMELEN]; + + if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || + irq_dir[irq]) + return; + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + irq_dir[irq] = proc_mkdir(name, root_irq_dir); + +#ifdef CONFIG_SMP + { + struct proc_dir_entry *entry; + + /* create /proc/irq/1234/smp_affinity */ + entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); + + if (entry) { + entry->nlink = 1; + entry->data = (void *)(long)irq; + entry->read_proc = irq_affinity_read_proc; + entry->write_proc = irq_affinity_write_proc; + } + + smp_affinity_entry[irq] = entry; + } +#endif +} + +unsigned long prof_cpu_mask = -1; + +void init_irq_proc (void) +{ + struct proc_dir_entry *entry; + int i; + + /* create /proc/irq */ + root_irq_dir = proc_mkdir("irq", 0); + + /* create /proc/irq/prof_cpu_mask */ + entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); + + if (!entry) + return; + + entry->nlink = 1; + entry->data = (void *)&prof_cpu_mask; + entry->read_proc = prof_cpu_mask_read_proc; + entry->write_proc = prof_cpu_mask_write_proc; + + /* + * Create entries for all existing IRQs. + */ + for (i = 0; i < NR_IRQS; i++) + register_irq_proc(i); +} diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c new file mode 100644 index 000000000000..a5953b523a90 --- /dev/null +++ b/arch/mips/kernel/irq_cpu.c @@ -0,0 +1,117 @@ +/* + * Copyright 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * Copyright (C) 2001 Ralf Baechle + * + * This file define the irq handler for MIPS CPU interrupts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * Almost all MIPS CPUs define 8 interrupt sources. They are typically + * level triggered (i.e., cannot be cleared from CPU; must be cleared from + * device). The first two are software interrupts which we don't really + * use or support. The last one is usually the CPU timer interrupt if + * counter register is present or, for CPUs with an external FPU, by + * convention it's the FPU exception interrupt. + * + * Don't even think about using this on SMP. You have been warned. + * + * This file exports one global function: + * void mips_cpu_irq_init(int irq_base); + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> + +#include <asm/irq_cpu.h> +#include <asm/mipsregs.h> +#include <asm/system.h> + +static int mips_cpu_irq_base; + +static inline void unmask_mips_irq(unsigned int irq) +{ + clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); + set_c0_status(0x100 << (irq - mips_cpu_irq_base)); +} + +static inline void mask_mips_irq(unsigned int irq) +{ + clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); +} + +static inline void mips_cpu_irq_enable(unsigned int irq) +{ + unsigned long flags; + + local_irq_save(flags); + unmask_mips_irq(irq); + local_irq_restore(flags); +} + +static void mips_cpu_irq_disable(unsigned int irq) +{ + unsigned long flags; + + local_irq_save(flags); + mask_mips_irq(irq); + local_irq_restore(flags); +} + +static unsigned int mips_cpu_irq_startup(unsigned int irq) +{ + mips_cpu_irq_enable(irq); + + return 0; +} + +#define mips_cpu_irq_shutdown mips_cpu_irq_disable + +/* + * While we ack the interrupt interrupts are disabled and thus we don't need + * to deal with concurrency issues. Same for mips_cpu_irq_end. + */ +static void mips_cpu_irq_ack(unsigned int irq) +{ + /* Only necessary for soft interrupts */ + clear_c0_cause(1 << (irq - mips_cpu_irq_base + 8)); + + mask_mips_irq(irq); +} + +static void mips_cpu_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + unmask_mips_irq(irq); +} + +static hw_irq_controller mips_cpu_irq_controller = { + "MIPS", + mips_cpu_irq_startup, + mips_cpu_irq_shutdown, + mips_cpu_irq_enable, + mips_cpu_irq_disable, + mips_cpu_irq_ack, + mips_cpu_irq_end, + NULL /* no affinity stuff for UP */ +}; + +void __init mips_cpu_irq_init(int irq_base) +{ + int i; + + for (i = irq_base; i < irq_base + 8; i++) { + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].action = NULL; + irq_desc[i].depth = 1; + irq_desc[i].handler = &mips_cpu_irq_controller; + } + + mips_cpu_irq_base = irq_base; +} diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 3109b7485a8c..a905d8f334fe 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -12,9 +12,9 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> -#include <asm/irq.h> #include <linux/in6.h> #include <linux/pci.h> +#include <linux/tty.h> #include <linux/ide.h> #include <asm/bootinfo.h> @@ -24,7 +24,7 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/semaphore.h> -#include <asm/sgi/sgihpc.h> +#include <asm/softirq.h> #include <asm/uaccess.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> @@ -41,7 +41,9 @@ extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); EXPORT_SYMBOL(mips_machtype); +#ifdef CONFIG_EISA EXPORT_SYMBOL(EISA_bus); +#endif /* * String functions @@ -58,11 +60,8 @@ EXPORT_SYMBOL_NOVERS(strncat); EXPORT_SYMBOL_NOVERS(strnlen); EXPORT_SYMBOL_NOVERS(strrchr); EXPORT_SYMBOL_NOVERS(strstr); -EXPORT_SYMBOL_NOVERS(strsep); EXPORT_SYMBOL(_clear_page); -EXPORT_SYMBOL(enable_irq); -EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(kernel_thread); /* @@ -77,15 +76,6 @@ EXPORT_SYMBOL_NOVERS(__strlen_user_asm); EXPORT_SYMBOL_NOVERS(__strnlen_user_nocheck_asm); EXPORT_SYMBOL_NOVERS(__strnlen_user_asm); - -/* - * Functions to control caches. - */ -EXPORT_SYMBOL(_flush_page_to_ram); -EXPORT_SYMBOL(_flush_cache_all); -EXPORT_SYMBOL(_dma_cache_wback_inv); -EXPORT_SYMBOL(_dma_cache_inv); - EXPORT_SYMBOL(invalid_pte_table); /* @@ -97,32 +87,11 @@ EXPORT_SYMBOL(__down_trylock); EXPORT_SYMBOL(__up); /* - * Base address of ports for Intel style I/O. - */ -EXPORT_SYMBOL(mips_io_port_base); - -/* - * Architecture specific stuff. - */ -#ifdef CONFIG_MIPS_JAZZ -EXPORT_SYMBOL(vdma_alloc); -EXPORT_SYMBOL(vdma_free); -EXPORT_SYMBOL(vdma_log2phys); -#endif - -#ifdef CONFIG_SGI_IP22 -EXPORT_SYMBOL(hpc3c0); -#endif - -/* * Kernel hacking ... */ #include <asm/branch.h> #include <linux/sched.h> -int register_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31)); -int unregister_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31)); - #ifdef CONFIG_VT EXPORT_SYMBOL(screen_info); #endif @@ -132,4 +101,3 @@ EXPORT_SYMBOL(ide_ops); #endif EXPORT_SYMBOL(get_wchan); -EXPORT_SYMBOL(flush_tlb_page); diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c new file mode 100644 index 000000000000..68d4bf68380d --- /dev/null +++ b/arch/mips/kernel/module.c @@ -0,0 +1,242 @@ +/* Kernel module help for MIPS. + Copyright (C) 2001 Rusty Russell. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> + +struct mips_hi16 { + struct mips_hi16 *next; + Elf32_Addr *addr; + Elf32_Addr value; +}; + +static struct mips_hi16 *mips_hi16_list; + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt , ...) +#endif + +void *module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc(size); +} + + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ +} + +/* We don't need anything special. */ +long module_core_size(const Elf32_Ehdr *hdr, + const Elf32_Shdr *sechdrs, + const char *secstrings, + struct module *module) +{ + return module->core_size; +} + +long module_init_size(const Elf32_Ehdr *hdr, + const Elf32_Shdr *sechdrs, + const char *secstrings, + struct module *module) +{ + return module->init_size; +} + +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + return 0; +} + +int apply_relocate(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_offset; + Elf32_Sym *sym; + uint32_t *location; + Elf32_Addr v; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_offset + + rel[i].r_offset; + /* This is the symbol it is referring to */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_offset + + ELF32_R_SYM(rel[i].r_info); + if (!sym->st_value) { + printk(KERN_WARNING "%s: Unknown symbol %s\n", + me->name, strtab + sym->st_name); + return -ENOENT; + } + + v = sym->st_value; + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_MIPS_NONE: + break; + + case R_MIPS_32: + *location += v; + break; + + case R_MIPS_26: + if (v % 4) + printk(KERN_ERR + "module %s: dangerous relocation\n", + me->name); + return -ENOEXEC; + if ((v & 0xf0000000) != + (((unsigned long)location + 4) & 0xf0000000)) + printk(KERN_ERR + "module %s: relocation overflow\n", + me->name); + return -ENOEXEC; + *location = (*location & ~0x03ffffff) | + ((*location + (v >> 2)) & 0x03ffffff); + break; + + case R_MIPS_HI16: { + struct mips_hi16 *n; + + /* + * We cannot relocate this one now because we don't + * know the value of the carry we need to add. Save + * the information, and let LO16 do the actual + * relocation. + */ + n = (struct mips_hi16 *) kmalloc(sizeof *n, GFP_KERNEL); + n->addr = location; + n->value = v; + n->next = mips_hi16_list; + mips_hi16_list = n; + break; + } + + case R_MIPS_LO16: { + unsigned long insnlo = *location; + Elf32_Addr val, vallo; + + /* Sign extend the addend we extract from the lo insn. */ + vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; + + if (mips_hi16_list != NULL) { + struct mips_hi16 *l; + + l = mips_hi16_list; + while (l != NULL) { + struct mips_hi16 *next; + unsigned long insn; + + /* + * The value for the HI16 had best be + * the same. + */ + printk(KERN_ERR "module %s: dangerous " + "relocation\n", me->name); + return -ENOEXEC; + + /* + * Do the HI16 relocation. Note that + * we actually don't need to know + * anything about the LO16 itself, + * except where to find the low 16 bits + * of the addend needed by the LO16. + */ + insn = *l->addr; + val = ((insn & 0xffff) << 16) + vallo; + val += v; + + /* + * Account for the sign extension that + * will happen in the low bits. + */ + val = ((val >> 16) + ((val & 0x8000) != + 0)) & 0xffff; + + insn = (insn & ~0xffff) | val; + *l->addr = insn; + + next = l->next; + kfree(l); + l = next; + } + + mips_hi16_list = NULL; + } + + /* + * Ok, we're done with the HI16 relocs. Now deal with + * the LO16. + */ + val = v + vallo; + insnlo = (insnlo & ~0xffff) | (val & 0xffff); + *location = insnlo; + break; + } + + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", + me->name); + return -ENOEXEC; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +void module_arch_cleanup(struct module *mod) +{ +} diff --git a/arch/mips/kernel/offset.c b/arch/mips/kernel/offset.c new file mode 100644 index 000000000000..fca626775960 --- /dev/null +++ b/arch/mips/kernel/offset.c @@ -0,0 +1,207 @@ +/* + * offset.c: Calculate pt_regs and task_struct offsets. + * + * Copyright (C) 1996 David S. Miller + * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * + * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/interrupt.h> + +#include <asm/ptrace.h> +#include <asm/processor.h> + +#define text(t) __asm__("\n@@@" t) +#define _offset(type, member) (&(((type *)NULL)->member)) + +#define offset(string, ptr, member) \ + __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) +#define constant(string, member) \ + __asm__("\n@@@" string "%x0" : : "i" (member)) +#define size(string, size) \ + __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) +#define linefeed text("") + +void output_ptreg_defines(void) +{ + text("/* MIPS pt_regs offsets. */"); + offset("#define PT_R0 ", struct pt_regs, regs[0]); + offset("#define PT_R1 ", struct pt_regs, regs[1]); + offset("#define PT_R2 ", struct pt_regs, regs[2]); + offset("#define PT_R3 ", struct pt_regs, regs[3]); + offset("#define PT_R4 ", struct pt_regs, regs[4]); + offset("#define PT_R5 ", struct pt_regs, regs[5]); + offset("#define PT_R6 ", struct pt_regs, regs[6]); + offset("#define PT_R7 ", struct pt_regs, regs[7]); + offset("#define PT_R8 ", struct pt_regs, regs[8]); + offset("#define PT_R9 ", struct pt_regs, regs[9]); + offset("#define PT_R10 ", struct pt_regs, regs[10]); + offset("#define PT_R11 ", struct pt_regs, regs[11]); + offset("#define PT_R12 ", struct pt_regs, regs[12]); + offset("#define PT_R13 ", struct pt_regs, regs[13]); + offset("#define PT_R14 ", struct pt_regs, regs[14]); + offset("#define PT_R15 ", struct pt_regs, regs[15]); + offset("#define PT_R16 ", struct pt_regs, regs[16]); + offset("#define PT_R17 ", struct pt_regs, regs[17]); + offset("#define PT_R18 ", struct pt_regs, regs[18]); + offset("#define PT_R19 ", struct pt_regs, regs[19]); + offset("#define PT_R20 ", struct pt_regs, regs[20]); + offset("#define PT_R21 ", struct pt_regs, regs[21]); + offset("#define PT_R22 ", struct pt_regs, regs[22]); + offset("#define PT_R23 ", struct pt_regs, regs[23]); + offset("#define PT_R24 ", struct pt_regs, regs[24]); + offset("#define PT_R25 ", struct pt_regs, regs[25]); + offset("#define PT_R26 ", struct pt_regs, regs[26]); + offset("#define PT_R27 ", struct pt_regs, regs[27]); + offset("#define PT_R28 ", struct pt_regs, regs[28]); + offset("#define PT_R29 ", struct pt_regs, regs[29]); + offset("#define PT_R30 ", struct pt_regs, regs[30]); + offset("#define PT_R31 ", struct pt_regs, regs[31]); + offset("#define PT_LO ", struct pt_regs, lo); + offset("#define PT_HI ", struct pt_regs, hi); + offset("#define PT_EPC ", struct pt_regs, cp0_epc); + offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); + offset("#define PT_STATUS ", struct pt_regs, cp0_status); + offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); + size("#define PT_SIZE ", struct pt_regs); + linefeed; +} + +void output_task_defines(void) +{ + text("/* MIPS task_struct offsets. */"); + offset("#define TASK_STATE ", struct task_struct, state); + offset("#define TASK_THREAD_INFO ", struct task_struct, thread_info); + offset("#define TASK_FLAGS ", struct task_struct, flags); + offset("#define TASK_MM ", struct task_struct, mm); + offset("#define TASK_PID ", struct task_struct, pid); + size( "#define TASK_STRUCT_SIZE ", struct task_struct); + linefeed; +} + +void output_thread_info_defines(void) +{ + text("/* MIPS thread_info offsets. */"); + offset("#define TI_TASK ", struct thread_info, task); + offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain); + offset("#define TI_FLAGS ", struct thread_info, flags); + offset("#define TI_CPU ", struct thread_info, cpu); + offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count); + offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit); + offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block); + linefeed; +} + +void output_thread_defines(void) +{ + text("/* MIPS specific thread_struct offsets. */"); + offset("#define THREAD_REG16 ", struct task_struct, thread.reg16); + offset("#define THREAD_REG17 ", struct task_struct, thread.reg17); + offset("#define THREAD_REG18 ", struct task_struct, thread.reg18); + offset("#define THREAD_REG19 ", struct task_struct, thread.reg19); + offset("#define THREAD_REG20 ", struct task_struct, thread.reg20); + offset("#define THREAD_REG21 ", struct task_struct, thread.reg21); + offset("#define THREAD_REG22 ", struct task_struct, thread.reg22); + offset("#define THREAD_REG23 ", struct task_struct, thread.reg23); + offset("#define THREAD_REG29 ", struct task_struct, thread.reg29); + offset("#define THREAD_REG30 ", struct task_struct, thread.reg30); + offset("#define THREAD_REG31 ", struct task_struct, thread.reg31); + offset("#define THREAD_STATUS ", struct task_struct, \ + thread.cp0_status); + offset("#define THREAD_FPU ", struct task_struct, thread.fpu); + offset("#define THREAD_BVADDR ", struct task_struct, \ + thread.cp0_badvaddr); + offset("#define THREAD_BUADDR ", struct task_struct, \ + thread.cp0_baduaddr); + offset("#define THREAD_ECODE ", struct task_struct, \ + thread.error_code); + offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no); + offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags); + offset("#define THREAD_TRAMP ", struct task_struct, \ + thread.irix_trampoline); + offset("#define THREAD_OLDCTX ", struct task_struct, \ + thread.irix_oldctx); + linefeed; +} + +void output_mm_defines(void) +{ + text("/* Linux mm_struct offsets. */"); + offset("#define MM_USERS ", struct mm_struct, mm_users); + offset("#define MM_PGD ", struct mm_struct, pgd); + offset("#define MM_CONTEXT ", struct mm_struct, context); + linefeed; + constant("#define _PAGE_SIZE ", PAGE_SIZE); + constant("#define _PGD_ORDER ", PGD_ORDER); + constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); + linefeed; +} + +void output_sc_defines(void) +{ + text("/* Linux sigcontext offsets. */"); + offset("#define SC_REGS ", struct sigcontext, sc_regs); + offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); + offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); + offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); + offset("#define SC_PC ", struct sigcontext, sc_pc); + offset("#define SC_STATUS ", struct sigcontext, sc_status); + offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); + offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); + offset("#define SC_CAUSE ", struct sigcontext, sc_cause); + offset("#define SC_BADVADDR ", struct sigcontext, sc_badvaddr); + linefeed; +} + +void output_signal_defined(void) +{ + text("/* Linux signal numbers. */"); + constant("#define _SIGHUP ", SIGHUP); + constant("#define _SIGINT ", SIGINT); + constant("#define _SIGQUIT ", SIGQUIT); + constant("#define _SIGILL ", SIGILL); + constant("#define _SIGTRAP ", SIGTRAP); + constant("#define _SIGIOT ", SIGIOT); + constant("#define _SIGABRT ", SIGABRT); + constant("#define _SIGEMT ", SIGEMT); + constant("#define _SIGFPE ", SIGFPE); + constant("#define _SIGKILL ", SIGKILL); + constant("#define _SIGBUS ", SIGBUS); + constant("#define _SIGSEGV ", SIGSEGV); + constant("#define _SIGSYS ", SIGSYS); + constant("#define _SIGPIPE ", SIGPIPE); + constant("#define _SIGALRM ", SIGALRM); + constant("#define _SIGTERM ", SIGTERM); + constant("#define _SIGUSR1 ", SIGUSR1); + constant("#define _SIGUSR2 ", SIGUSR2); + constant("#define _SIGCHLD ", SIGCHLD); + constant("#define _SIGPWR ", SIGPWR); + constant("#define _SIGWINCH ", SIGWINCH); + constant("#define _SIGURG ", SIGURG); + constant("#define _SIGIO ", SIGIO); + constant("#define _SIGSTOP ", SIGSTOP); + constant("#define _SIGTSTP ", SIGTSTP); + constant("#define _SIGCONT ", SIGCONT); + constant("#define _SIGTTIN ", SIGTTIN); + constant("#define _SIGTTOU ", SIGTTOU); + constant("#define _SIGVTALRM ", SIGVTALRM); + constant("#define _SIGPROF ", SIGPROF); + constant("#define _SIGXCPU ", SIGXCPU); + constant("#define _SIGXFSZ ", SIGXFSZ); + linefeed; +} + +void output_irq_cpustat_t_defines(void) +{ + text("/* Linux irq_cpustat_t offsets. */"); + offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending); + offset("#define IC_SYSCALL_COUNT ", irq_cpustat_t, __syscall_count); + offset("#define IC_KSOFTIRQD_TASK ", irq_cpustat_t, __ksoftirqd_task); + size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t); + linefeed; +} diff --git a/arch/mips/kernel/old-irq.c b/arch/mips/kernel/old-irq.c deleted file mode 100644 index 9dedb01a92e5..000000000000 --- a/arch/mips/kernel/old-irq.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Code to handle x86 style IRQs plus some generic interrupt stuff. - * - * Copyright (C) 1992 Linus Torvalds - * Copyright (C) 1994 - 2001 Ralf Baechle - * - * Old rotten IRQ code. To be killed as soon as everybody had converted or - * in 2.5.0, whatever comes first. - */ -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/kernel_stat.h> -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/timex.h> -#include <linux/slab.h> -#include <linux/random.h> -#include <linux/seq_file.h> - -#include <asm/bitops.h> -#include <asm/bootinfo.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/mipsregs.h> -#include <asm/system.h> -#include <asm/nile4.h> - -/* - * The board specific setup routine sets irq_setup to point to a board - * specific setup routine. - */ -void (*irq_setup)(void); - -/* - * Linux has a controller-independent x86 interrupt architecture. - * every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the apropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. - * (IO-APICs assumed to be messaging to Pentium local-APICs) - * - * the code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic. - */ - -/* - * This contains the irq mask for both 8259A irq controllers, it's an - * int so we can deal with the third PIC in some systems like the RM300. - * (XXX This is broken for big endian.) - */ -static unsigned int cached_irq_mask = 0xffff; - -#define __byte(x,y) (((unsigned char *)&(y))[x]) -#define __word(x,y) (((unsigned short *)&(y))[x]) -#define __long(x,y) (((unsigned int *)&(y))[x]) - -#define cached_21 (__byte(0,cached_irq_mask)) -#define cached_A1 (__byte(1,cached_irq_mask)) - -unsigned long spurious_count = 0; - -/* - * (un)mask_irq, disable_irq() and enable_irq() only handle (E)ISA and - * PCI devices. Other onboard hardware needs specific routines. - */ -static inline void mask_irq(unsigned int irq) -{ - cached_irq_mask |= 1 << irq; - if (irq & 8) { - outb(cached_A1, 0xa1); - } else { - outb(cached_21, 0x21); - } -} - -static inline void unmask_irq(unsigned int irq) -{ - cached_irq_mask &= ~(1 << irq); - if (irq & 8) { - outb(cached_A1, 0xa1); - } else { - outb(cached_21, 0x21); - } -} - -void i8259_disable_irq(unsigned int irq_nr) -{ - unsigned long flags; - - save_and_cli(flags); - mask_irq(irq_nr); - restore_flags(flags); -} - -void i8259_enable_irq(unsigned int irq_nr) -{ - unsigned long flags; - save_and_cli(flags); - unmask_irq(irq_nr); - restore_flags(flags); -} - -static struct irqaction *irq_action[NR_IRQS] = { - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL -}; - -int show_interrupts(struct seq_file *p, void *v) -{ - int i; - struct irqaction * action; - unsigned long flags; - - for (i = 0 ; i < 32 ; i++) { - local_irq_save(flags); - action = irq_action[i]; - if (!action) - goto skip; - seq_printf(p, "%2d: %8d %c %s", - i, kstat_cpu(0).irqs[i], - (action->flags & SA_INTERRUPT) ? '+' : ' ', - action->name); - for (action=action->next; action; action = action->next) { - seq_printf(p, ",%s %s", - (action->flags & SA_INTERRUPT) ? " +" : "", - action->name); - } - seq_putc(p, '\n'); -skip: - local_irq_restore(flags); - } - return 0; -} - -static inline void i8259_mask_and_ack_irq(int irq) -{ - cached_irq_mask |= 1 << irq; - - if (irq & 8) { - inb(0xa1); - outb(cached_A1, 0xa1); - outb(0x62, 0x20); /* Specific EOI to cascade */ - outb(0x20, 0xa0); - } else { - inb(0x21); - outb(cached_21, 0x21); - outb(0x20, 0x20); - } -} - -asmlinkage void i8259_do_irq(int irq, struct pt_regs *regs) -{ - struct irqaction *action; - int do_random, cpu; - - cpu = smp_processor_id(); - irq_enter(cpu, irq); - - if (irq >= 16) - goto out; - - i8259_mask_and_ack_irq(irq); - - kstat_cpu(cpu).irqs[irq]++; - - action = *(irq + irq_action); - if (!action) - goto out; - - if (!(action->flags & SA_INTERRUPT)) - local_irq_enable(); - action = *(irq + irq_action); - do_random = 0; - do { - do_random |= action->flags; - action->handler(irq, action->dev_id, regs); - action = action->next; - } while (action); - if (do_random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - local_irq_disable(); - unmask_irq (irq); - -out: - irq_exit(cpu, irq); -} - -/* - * do_IRQ handles IRQ's that have been installed without the - * SA_INTERRUPT flag: it uses the full signal-handling return - * and runs with other interrupts enabled. All relatively slow - * IRQ's should use this format: notably the keyboard/timer - * routines. - */ -asmlinkage void do_IRQ(int irq, struct pt_regs * regs) -{ - struct irqaction *action; - int do_random, cpu; - - cpu = smp_processor_id(); - irq_enter(cpu, irq); - kstat_cpu(cpu).irqs[irq]++; - - action = *(irq + irq_action); - if (action) { - if (!(action->flags & SA_INTERRUPT)) - local_irq_enable(); - action = *(irq + irq_action); - do_random = 0; - do { - do_random |= action->flags; - action->handler(irq, action->dev_id, regs); - action = action->next; - } while (action); - if (do_random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - local_irq_disable(); - } - irq_exit(cpu, irq); - - if (softirq_pending(cpu)) - do_softirq(); - - /* unmasking and bottom half handling is done magically for us. */ -} - -int i8259_setup_irq(int irq, struct irqaction * new) -{ - int shared = 0; - struct irqaction *old, **p; - unsigned long flags; - - p = irq_action + irq; - if ((old = *p) != NULL) { - /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) - return -EBUSY; - - /* Can't share interrupts unless both are same type */ - if ((old->flags ^ new->flags) & SA_INTERRUPT) - return -EBUSY; - - /* add new interrupt at end of irq queue */ - do { - p = &old->next; - old = *p; - } while (old); - shared = 1; - } - - if (new->flags & SA_SAMPLE_RANDOM) - rand_initialize_irq(irq); - - save_and_cli(flags); - *p = new; - - if (!shared) { - if (is_i8259_irq(irq)) - unmask_irq(irq); -#if (defined(CONFIG_DDB5074) || defined(CONFIG_DDB5476)) - else - nile4_enable_irq(irq_to_nile4(irq)); -#endif - } - restore_flags(flags); - return 0; -} - -/* - * Request_interrupt and free_interrupt ``sort of'' handle interrupts of - * non i8259 devices. They will have to be replaced by architecture - * specific variants. For now we still use this as broken as it is because - * it used to work ... - */ -int request_irq(unsigned int irq, - void (*handler)(int, void *, struct pt_regs *), - unsigned long irqflags, const char * devname, void *dev_id) -{ - int retval; - struct irqaction * action; - - if (irq >= 32) - return -EINVAL; - if (!handler) - return -EINVAL; - - action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); - if (!action) - return -ENOMEM; - - action->handler = handler; - action->flags = irqflags; - action->mask = 0; - action->name = devname; - action->next = NULL; - action->dev_id = dev_id; - - retval = i8259_setup_irq(irq, action); - - if (retval) - kfree(action); - return retval; -} - -void free_irq(unsigned int irq, void *dev_id) -{ - struct irqaction * action, **p; - unsigned long flags; - - if (irq > 31) { - printk("Trying to free IRQ%d\n",irq); - return; - } - for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) { - if (action->dev_id != dev_id) - continue; - - /* Found it - now free it */ - save_and_cli(flags); - *p = action->next; - if (!irq[irq_action]) - mask_irq(irq); - restore_flags(flags); - kfree(action); - return; - } - printk("Trying to free free IRQ%d\n",irq); -} - -unsigned long probe_irq_on (void) -{ - unsigned int i, irqs = 0; - unsigned long delay; - - /* first, enable any unassigned (E)ISA irqs */ - for (i = 15; i > 0; i--) { - if (!irq_action[i]) { - i8259_enable_irq(i); - irqs |= (1 << i); - } - } - - /* wait for spurious interrupts to mask themselves out again */ - for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) - /* about 100ms delay */; - - /* now filter out any obviously spurious interrupts */ - return irqs & ~cached_irq_mask; -} - -int probe_irq_off (unsigned long irqs) -{ - unsigned int i; - -#ifdef DEBUG - printk("probe_irq_off: irqs=0x%04x irqmask=0x%04x\n", irqs, irqmask); -#endif - irqs &= cached_irq_mask; - if (!irqs) - return 0; - i = ffz(~irqs); - if (irqs != (irqs & (1 << i))) - i = -i; - return i; -} - -void __init i8259_init(void) -{ - /* Init master interrupt controller */ - outb(0x11, 0x20); /* Start init sequence */ - outb(0x00, 0x21); /* Vector base */ - outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ - outb(0x01, 0x21); /* Select 8086 mode */ - outb(0xff, 0x21); /* Mask all */ - - /* Init slave interrupt controller */ - outb(0x11, 0xa0); /* Start init sequence */ - outb(0x08, 0xa1); /* Vector base */ - outb(0x02, 0xa1); /* edge triggered, Cascade (slave) on IRQ2 */ - outb(0x01, 0xa1); /* Select 8086 mode */ - outb(0xff, 0xa1); /* Mask all */ - - outb(cached_A1, 0xa1); - outb(cached_21, 0x21); -} - -void __init init_IRQ(void) -{ - /* i8259_init(); */ - irq_setup(); -} diff --git a/arch/mips/kernel/old-time.c b/arch/mips/kernel/old-time.c deleted file mode 100644 index 4793acd60c19..000000000000 --- a/arch/mips/kernel/old-time.c +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Copyright (C) 1996 - 2000 Ralf Baechle - * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * - * Don't use. Deprecated. Dead meat. - */ -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/bcd.h> - -#include <asm/bootinfo.h> -#include <asm/cpu.h> -#include <asm/mipsregs.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/ddb5074.h> - -#include <linux/mc146818rtc.h> -#include <linux/timex.h> - -extern volatile unsigned long wall_jiffies; -unsigned long r4k_interval; -extern rwlock_t xtime_lock; - -/* - * Change this if you have some constant time drift - */ -/* This is the value for the PC-style PICs. */ -/* #define USECS_PER_JIFFY (1000020/HZ) */ - -/* This is for machines which generate the exact clock. */ -#define USECS_PER_JIFFY (1000000/HZ) - -/* Cycle counter value at the previous timer interrupt.. */ - -static unsigned int timerhi, timerlo; - -/* - * On MIPS only R4000 and better have a cycle counter. - * - * FIXME: Does playing with the RP bit in c0_status interfere with this code? - */ -static unsigned long do_fast_gettimeoffset(void) -{ - u32 count; - unsigned long res, tmp; - - /* Last jiffy when do_fast_gettimeoffset() was called. */ - static unsigned long last_jiffies; - unsigned long quotient; - - /* - * Cached "1/(clocks per usec)*2^32" value. - * It has to be recalculated once each jiffy. - */ - static unsigned long cached_quotient; - - tmp = jiffies; - - quotient = cached_quotient; - - if (tmp && last_jiffies != tmp) { - last_jiffies = tmp; - __asm__(".set\tnoreorder\n\t" - ".set\tnoat\n\t" - ".set\tmips3\n\t" - "lwu\t%0,%2\n\t" - "dsll32\t$1,%1,0\n\t" - "or\t$1,$1,%0\n\t" - "ddivu\t$0,$1,%3\n\t" - "mflo\t$1\n\t" - "dsll32\t%0,%4,0\n\t" - "nop\n\t" - "ddivu\t$0,%0,$1\n\t" - "mflo\t%0\n\t" - ".set\tmips0\n\t" - ".set\tat\n\t" - ".set\treorder" - :"=&r" (quotient) - :"r" (timerhi), - "m" (timerlo), - "r" (tmp), - "r" (USECS_PER_JIFFY) - :"$1"); - cached_quotient = quotient; - } - - /* Get last timer tick in absolute kernel time */ - count = read_32bit_cp0_register(CP0_COUNT); - - /* .. relative to previous jiffy (32 bits is enough) */ - count -= timerlo; - - __asm__("multu\t%1,%2\n\t" - "mfhi\t%0" - :"=r" (res) - :"r" (count), - "r" (quotient)); - - /* - * Due to possible jiffies inconsistencies, we need to check - * the result so that we'll get a timer that is monotonic. - */ - if (res >= USECS_PER_JIFFY) - res = USECS_PER_JIFFY-1; - - return res; -} - -/* This function must be called with interrupts disabled - * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs - * - * However, the pc-audio speaker driver changes the divisor so that - * it gets interrupted rather more often - it loads 64 into the - * counter rather than 11932! This has an adverse impact on - * do_gettimeoffset() -- it stops working! What is also not - * good is that the interval that our timer function gets called - * is no longer 10.0002 ms, but 9.9767 ms. To get around this - * would require using a different timing source. Maybe someone - * could use the RTC - I know that this can interrupt at frequencies - * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix - * it so that at startup, the timer code in sched.c would select - * using either the RTC or the 8253 timer. The decision would be - * based on whether there was any other device around that needed - * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz, - * and then do some jiggery to have a version of do_timer that - * advanced the clock by 1/1024 s. Every time that reached over 1/100 - * of a second, then do all the old code. If the time was kept correct - * then do_gettimeoffset could just return 0 - there is no low order - * divider that can be accessed. - * - * Ideally, you would be able to use the RTC for the speaker driver, - * but it appears that the speaker driver really needs interrupt more - * often than every 120 us or so. - * - * Anyway, this needs more thought.... pjsg (1993-08-28) - * - * If you are really that interested, you should be reading - * comp.protocols.time.ntp! - */ - -#define TICK_SIZE tick - -static unsigned long do_slow_gettimeoffset(void) -{ - int count; - - static int count_p = LATCH; /* for the first call after boot */ - static unsigned long jiffies_p; - - /* - * cache volatile jiffies temporarily; we have IRQs turned off. - */ - unsigned long jiffies_t; - - /* timer count may underflow right here */ - outb_p(0x00, 0x43); /* latch the count ASAP */ - - count = inb_p(0x40); /* read the latched count */ - - /* - * We do this guaranteed double memory access instead of a _p - * postfix in the previous port access. Wheee, hackady hack - */ - jiffies_t = jiffies; - - count |= inb_p(0x40) << 8; - - /* - * avoiding timer inconsistencies (they are rare, but they happen)... - * there are two kinds of problems that must be avoided here: - * 1. the timer counter underflows - * 2. hardware problem with the timer, not giving us continuous time, - * the counter does small "jumps" upwards on some Pentium systems, - * (see c't 95/10 page 335 for Neptun bug.) - */ - - if( jiffies_t == jiffies_p ) { - if( count > count_p ) { - /* the nutcase */ - - outb_p(0x0A, 0x20); - - /* assumption about timer being IRQ1 */ - if (inb(0x20) & 0x01) { - /* - * We cannot detect lost timer interrupts ... - * well, that's why we call them lost, don't we? :) - * [hmm, on the Pentium and Alpha we can ... sort of] - */ - count -= LATCH; - } else { - printk("do_slow_gettimeoffset(): hardware timer problem?\n"); - } - } - } else - jiffies_p = jiffies_t; - - count_p = count; - - count = ((LATCH-1) - count) * TICK_SIZE; - count = (count + LATCH/2) / LATCH; - - return count; -} - -static unsigned long (*do_gettimeoffset)(void) = do_slow_gettimeoffset; - -/* - * This version of gettimeofday has near microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - - read_lock_irqsave (&xtime_lock, flags); - *tv = xtime; - tv->tv_usec += do_gettimeoffset(); - - /* - * xtime is atomically updated in timer_bh. jiffies - wall_jiffies - * is nonzero if the timer bottom half hasnt executed yet. - */ - if (jiffies - wall_jiffies) - tv->tv_usec += USECS_PER_JIFFY; - - read_unlock_irqrestore (&xtime_lock, flags); - - if (tv->tv_usec >= 1000000) { - tv->tv_usec -= 1000000; - tv->tv_sec++; - } -} - -void do_settimeofday(struct timeval *tv) -{ - write_lock_irq (&xtime_lock); - - /* This is revolting. We need to set the xtime.tv_usec - * correctly. However, the value in this location is - * is value at the last tick. - * Discover what correction gettimeofday - * would have done, and then undo it! - */ - tv->tv_usec -= do_gettimeoffset(); - - if (tv->tv_usec < 0) { - tv->tv_usec += 1000000; - tv->tv_sec--; - } - - xtime = *tv; - time_adjust = 0; /* stop active adjtime() */ - time_status |= STA_UNSYNC; - time_maxerror = NTP_PHASE_LIMIT; - time_esterror = NTP_PHASE_LIMIT; - - write_unlock_irq (&xtime_lock); -} - -/* - * In order to set the CMOS clock precisely, set_rtc_mmss has to be - * called 500 ms after the second nowtime has started, because when - * nowtime is written into the registers of the CMOS clock, it will - * jump to the next second precisely 500 ms later. Check the Motorola - * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you won't notice until after reboot! - */ -static int set_rtc_mmss(unsigned long nowtime) -{ - int retval = 0; - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - - save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - BCD_TO_BIN(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) - real_minutes += 30; /* correct for half hour time zone */ - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); - } - CMOS_WRITE(real_seconds,RTC_SECONDS); - CMOS_WRITE(real_minutes,RTC_MINUTES); - } else { - printk(KERN_WARNING - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - - return retval; -} - -/* last time the cmos clock got updated */ -static long last_rtc_update; - -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick - */ -static void inline -timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) -{ -#ifdef CONFIG_DDB5074 - static unsigned cnt, period, dist; - - if (cnt == 0 || cnt == dist) - ddb5074_led_d2(1); - else if (cnt == 7 || cnt == dist+7) - ddb5074_led_d2(0); - - if (++cnt > period) { - cnt = 0; - /* The hyperbolic function below modifies the heartbeat period - * length in dependency of the current (5min) load. It goes - * through the points f(0)=126, f(1)=86, f(5)=51, - * f(inf)->30. */ - period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30; - dist = period / 4; - } -#endif - if(!user_mode(regs)) { - if (prof_buffer && current->pid) { - extern int _stext; - unsigned long pc = regs->cp0_epc; - - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - /* - * Dont ignore out-of-bounds pc values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len-1) - pc = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[pc]); - } - } - do_timer(regs); - - /* - * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be - * called as close as possible to 500 ms before the new second starts. - */ - read_lock (&xtime_lock); - if ((time_status & STA_UNSYNC) == 0 && - xtime.tv_sec > last_rtc_update + 660 && - xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 && - xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) { - if (set_rtc_mmss(xtime.tv_sec) == 0) - last_rtc_update = xtime.tv_sec; - else - last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ - } - /* As we return to user mode fire off the other CPU schedulers.. this is - basically because we don't yet share IRQ's around. This message is - rigged to be safe on the 386 - basically it's a hack, so don't look - closely for now.. */ - /*smp_message_pass(MSG_ALL_BUT_SELF, MSG_RESCHEDULE, 0L, 0); */ - read_unlock (&xtime_lock); -} - -static inline void -r4k_timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) -{ - unsigned int count; - - /* - * The cycle counter is only 32 bit which is good for about - * a minute at current count rates of upto 150MHz or so. - */ - count = read_32bit_cp0_register(CP0_COUNT); - timerhi += (count < timerlo); /* Wrap around */ - timerlo = count; - -#ifdef CONFIG_SGI_IP22 - /* Since we don't get anything but r4k timer interrupts, we need to - * set this up so that we'll get one next time. Fortunately since we - * have timerhi/timerlo, we don't care so much if we miss one. So - * we need only ask for the next in r4k_interval counts. On other - * archs we have a real timer, so we don't want this. - */ - write_32bit_cp0_register (CP0_COMPARE, - (unsigned long) (count + r4k_interval)); - kstat_cpu(0).irqs[irq]++; -#endif - - timer_interrupt(irq, dev_id, regs); - - if (!jiffies) - { - /* - * If jiffies has overflowed in this timer_interrupt we must - * update the timer[hi]/[lo] to make do_fast_gettimeoffset() - * quotient calc still valid. -arca - */ - timerhi = timerlo = 0; - } -} - -void indy_r4k_timer_interrupt (struct pt_regs *regs) -{ - static const int INDY_R4K_TIMER_IRQ = 7; - int cpu = smp_processor_id(); - - r4k_timer_interrupt (INDY_R4K_TIMER_IRQ, NULL, regs); - - if (softirq_pending(cpu)) - do_softirq(); -} - -struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, - "timer", NULL, NULL}; - - -void (*board_time_init)(struct irqaction *irq); - -void __init time_init(void) -{ - unsigned int epoch = 0, year, mon, day, hour, min, sec; - int i; - - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - do { /* Isn't this overkill ? UIP above should guarantee consistency */ - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - } while (sec != CMOS_READ(RTC_SECONDS)); - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } - - /* Attempt to guess the epoch. This is the same heuristic as in rtc.c so - no stupid things will happen to timekeeping. Who knows, maybe Ultrix - also uses 1952 as epoch ... */ - if (year > 10 && year < 44) { - epoch = 1980; - } else if (year < 96) { - epoch = 1952; - } - year += epoch; - - write_lock_irq (&xtime_lock); - xtime.tv_sec = mktime(year, mon, day, hour, min, sec); - xtime.tv_usec = 0; - write_unlock_irq (&xtime_lock); - - if (mips_cpu.options & MIPS_CPU_COUNTER) { - write_32bit_cp0_register(CP0_COUNT, 0); - do_gettimeoffset = do_fast_gettimeoffset; - irq0.handler = r4k_timer_interrupt; - } - - board_time_init(&irq0); -} diff --git a/arch/mips/kernel/pci-dma.c b/arch/mips/kernel/pci-dma.c index 542d529b84ac..a51b59048876 100644 --- a/arch/mips/kernel/pci-dma.c +++ b/arch/mips/kernel/pci-dma.c @@ -10,6 +10,7 @@ #include <linux/config.h> #include <linux/types.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/string.h> #include <linux/pci.h> @@ -20,18 +21,23 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, { void *ret; int gfp = GFP_ATOMIC; + struct pci_bus *bus = NULL; +#ifdef CONFIG_ISA if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) gfp |= GFP_DMA; +#endif ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); -#ifndef CONFIG_COHERENT_IO + if (hwdev) + bus = hwdev->bus; + *dma_handle = bus_to_baddr(bus, __pa(ret)); +#ifdef CONFIG_NONCOHERENT_IO dma_cache_wback_inv((unsigned long) ret, size); - ret = KSEG1ADDR(ret); + ret = UNCAC_ADDR(ret); #endif - *dma_handle = virt_to_bus(ret); } return ret; @@ -42,8 +48,11 @@ void pci_free_consistent(struct pci_dev *hwdev, size_t size, { unsigned long addr = (unsigned long) vaddr; -#ifndef CONFIG_COHERENT_IO - addr = KSEG0ADDR(addr); +#ifdef CONFIG_NONCOHERENT_IO + addr = CAC_ADDR(addr); #endif free_pages(addr, get_order(size)); } + +EXPORT_SYMBOL(pci_alloc_consistent); +EXPORT_SYMBOL(pci_free_consistent); diff --git a/arch/mips/kernel/pci.c b/arch/mips/kernel/pci.c deleted file mode 100644 index b7b72fd1a267..000000000000 --- a/arch/mips/kernel/pci.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2001 MontaVista Software Inc. - * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net - * - * Modified to be mips generic, ppopov@mvista.com - * arch/mips/kernel/pci.c - * Common MIPS PCI routines. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/* - * This file contains common PCI routines meant to be shared for - * all MIPS machines. - * - * Strategies: - * - * . We rely on pci_auto.c file to assign PCI resources (MEM and IO) - * TODO: this should be optional for some machines where they do have - * a real "pcibios" that does resource assignment. - * - * . We then use pci_scan_bus() to "discover" all the resources for - * later use by Linux. - * - * . We finally reply on a board supplied function, pcibios_fixup_irq(), to - * to assign the interrupts. We may use setup-irq.c under drivers/pci - * later. - * - * . Specifically, we will *NOT* use pci_assign_unassigned_resources(), - * because we assume all PCI devices should have the resources correctly - * assigned and recorded. - * - * Limitations: - * - * . We "collapse" all IO and MEM spaces in sub-buses under a top-level bus - * into a contiguous range. - * - * . In the case of Memory space, the rnage is 1:1 mapping with CPU physical - * address space. - * - * . In the case of IO space, it starts from 0, and the beginning address - * is mapped to KSEG0ADDR(mips_io_port) in the CPU physical address. - * - * . These are the current MIPS limitations (by ioremap, etc). In the - * future, we may remove them. - * - * Credits: - * Most of the code are derived from the pci routines from PPC and Alpha, - * which were mostly writtne by - * Cort Dougan, cort@fsmlabs.com - * Matt Porter, mporter@mvista.com - * Dave Rusling david.rusling@reo.mts.dec.com - * David Mosberger davidm@cs.arizona.edu - */ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/pci.h> - -#include <asm/pci_channel.h> - -extern void pcibios_fixup(void); -extern void pcibios_fixup_irqs(void); - -struct pci_fixup pcibios_fixups[] = { - { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources }, - { 0 } -}; - -extern int pciauto_assign_resources(int busno, struct pci_channel * hose); - -void __init pcibios_init(void) -{ - struct pci_channel *p; - struct pci_bus *bus; - int busno; - -#ifdef CONFIG_PCI_AUTO - /* assign resources */ - busno=0; - for (p= mips_pci_channels; p->pci_ops != NULL; p++) { - busno = pciauto_assign_resources(busno, p) + 1; - } -#endif - - /* scan the buses */ - busno = 0; - for (p= mips_pci_channels; p->pci_ops != NULL; p++) { - bus = pci_scan_bus(busno, p->pci_ops, p); - busno = bus->subordinate+1; - } - - /* machine dependent fixups */ - pcibios_fixup(); - /* fixup irqs (board specific routines) */ - pcibios_fixup_irqs(); -} - -int pcibios_enable_device(struct pci_dev *dev) -{ - /* pciauto_assign_resources() will enable all devices found */ - return 0; -} - -unsigned long __init pci_bridge_check_io(struct pci_dev *bridge) -{ - u16 io; - - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) - return IORESOURCE_IO; - printk(KERN_WARNING "PCI: bridge %s does not support I/O forwarding!\n", - bridge->name); - return 0; -} - -void __init pcibios_fixup_bus(struct pci_bus *bus) -{ - /* Propogate hose info into the subordinate devices. */ - - struct pci_channel *hose = bus->sysdata; - struct pci_dev *dev = bus->self; - - if (!dev) { - /* Root bus */ - bus->resource[0] = hose->io_resource; - bus->resource[1] = hose->mem_resource; - } else { - /* This is a bridge. Do not care how it's initialized, - just link its resources to the bus ones */ - int i; - - for(i=0; i<3; i++) { - bus->resource[i] = - &dev->resource[PCI_BRIDGE_RESOURCES+i]; - bus->resource[i]->name = bus->name; - } - bus->resource[0]->flags |= pci_bridge_check_io(dev); - bus->resource[1]->flags |= IORESOURCE_MEM; - /* For now, propagate hose limits to the bus; - we'll adjust them later. */ - bus->resource[0]->end = hose->io_resource->end; - bus->resource[1]->end = hose->mem_resource->end; - /* Turn off downstream PF memory address range by default */ - bus->resource[2]->start = 1024*1024; - bus->resource[2]->end = bus->resource[2]->start - 1; - } -} - -char *pcibios_setup(char *str) -{ - return str; -} - -void -pcibios_align_resource(void *data, struct resource *res, - unsigned long size, unsigned long align) -{ - /* this should not be called */ -} diff --git a/arch/mips/kernel/pci_auto.c b/arch/mips/kernel/pci_auto.c deleted file mode 100644 index af9c587a7ad3..000000000000 --- a/arch/mips/kernel/pci_auto.c +++ /dev/null @@ -1,323 +0,0 @@ -/* - * PCI autoconfiguration library - * - * Author: Matt Porter <mporter@mvista.com> - * - * Copyright 2000, 2001 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/* - * Modified for MIPS by Jun Sun, jsun@mvista.com - * - * . Simplify the interface between pci_auto and the rest: a single function. - * . Assign resources from low address to upper address. - * . change most int to u32. - * - * Further modified to include it as mips generic code, ppopov@mvista.com. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/pci.h> - -#include <asm/pci_channel.h> - -#define DEBUG -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -/* These are used for config access before all the PCI probing has been done. */ -int early_read_config_byte(struct pci_channel *hose, int bus, int dev_fn, int where, u8 *val); -int early_read_config_word(struct pci_channel *hose, int bus, int dev_fn, int where, u16 *val); -int early_read_config_dword(struct pci_channel *hose, int bus, int dev_fn, int where, u32 *val); -int early_write_config_byte(struct pci_channel *hose, int bus, int dev_fn, int where, u8 val); -int early_write_config_word(struct pci_channel *hose, int bus, int dev_fn, int where, u16 val); -int early_write_config_dword(struct pci_channel *hose, int bus, int dev_fn, int where, u32 val); - -static u32 pciauto_lower_iospc; -static u32 pciauto_upper_iospc; - -static u32 pciauto_lower_memspc; -static u32 pciauto_upper_memspc; - -void __init -pciauto_setup_bars(struct pci_channel *hose, - int current_bus, - int pci_devfn) -{ - u32 bar_response, bar_size, bar_value; - u32 bar, addr_mask, bar_nr = 0; - u32 * upper_limit; - u32 * lower_limit; - int found_mem64 = 0; - - DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", - current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); - - for (bar = PCI_BASE_ADDRESS_0; bar <= PCI_BASE_ADDRESS_5; bar+=4) { - /* Tickle the BAR and get the response */ - early_write_config_dword(hose, - current_bus, - pci_devfn, - bar, - 0xffffffff); - early_read_config_dword(hose, - current_bus, - pci_devfn, - bar, - &bar_response); - - /* If BAR is not implemented go to the next BAR */ - if (!bar_response) - continue; - - /* Check the BAR type and set our address mask */ - if (bar_response & PCI_BASE_ADDRESS_SPACE) { - addr_mask = PCI_BASE_ADDRESS_IO_MASK; - upper_limit = &pciauto_upper_iospc; - lower_limit = &pciauto_lower_iospc; - DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); - } else { - if ((bar_response & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == - PCI_BASE_ADDRESS_MEM_TYPE_64) - found_mem64 = 1; - - addr_mask = PCI_BASE_ADDRESS_MEM_MASK; - upper_limit = &pciauto_upper_memspc; - lower_limit = &pciauto_lower_memspc; - DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); - } - - /* Calculate requested size */ - bar_size = ~(bar_response & addr_mask) + 1; - - /* Allocate a base address */ - bar_value = ((*lower_limit - 1) & ~(bar_size - 1)) + bar_size; - - /* Write it out and update our limit */ - early_write_config_dword(hose, current_bus, pci_devfn, - bar, bar_value); - - *lower_limit = bar_value + bar_size; - - /* - * If we are a 64-bit decoder then increment to the - * upper 32 bits of the bar and force it to locate - * in the lower 4GB of memory. - */ - if (found_mem64) { - bar += 4; - early_write_config_dword(hose, - current_bus, - pci_devfn, - bar, - 0x00000000); - } - - bar_nr++; - - DBG("size=0x%x, address=0x%x\n", - bar_size, bar_value); - } - -} - -void __init -pciauto_prescan_setup_bridge(struct pci_channel *hose, - int current_bus, - int pci_devfn, - int sub_bus) -{ - int cmdstat; - - /* Configure bus number registers */ - early_write_config_byte(hose, current_bus, pci_devfn, - PCI_PRIMARY_BUS, current_bus); - early_write_config_byte(hose, current_bus, pci_devfn, - PCI_SECONDARY_BUS, sub_bus + 1); - early_write_config_byte(hose, current_bus, pci_devfn, - PCI_SUBORDINATE_BUS, 0xff); - - /* Round memory allocator to 1MB boundary */ - pciauto_upper_memspc &= ~(0x100000 - 1); - - /* Round I/O allocator to 4KB boundary */ - pciauto_upper_iospc &= ~(0x1000 - 1); - - /* Set up memory and I/O filter limits, assume 32-bit I/O space */ - early_write_config_word(hose, current_bus, pci_devfn, PCI_MEMORY_LIMIT, - ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); - early_write_config_byte(hose, current_bus, pci_devfn, PCI_IO_LIMIT, - ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8); - early_write_config_word(hose, current_bus, pci_devfn, - PCI_IO_LIMIT_UPPER16, - ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16); - - /* We don't support prefetchable memory for now, so disable */ - early_write_config_word(hose, current_bus, pci_devfn, - PCI_PREF_MEMORY_BASE, 0x1000); - early_write_config_word(hose, current_bus, pci_devfn, - PCI_PREF_MEMORY_LIMIT, 0x1000); - - /* Enable memory and I/O accesses, enable bus master */ - early_read_config_dword(hose, current_bus, pci_devfn, PCI_COMMAND, - &cmdstat); - early_write_config_dword(hose, current_bus, pci_devfn, PCI_COMMAND, - cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER); -} - -void __init -pciauto_postscan_setup_bridge(struct pci_channel *hose, - int current_bus, - int pci_devfn, - int sub_bus) -{ - /* Configure bus number registers */ - early_write_config_byte(hose, current_bus, pci_devfn, - PCI_SUBORDINATE_BUS, sub_bus); - - /* Round memory allocator to 1MB boundary */ - pciauto_upper_memspc &= ~(0x100000 - 1); - early_write_config_word(hose, current_bus, pci_devfn, PCI_MEMORY_BASE, - pciauto_upper_memspc >> 16); - - /* Round I/O allocator to 4KB boundary */ - pciauto_upper_iospc &= ~(0x1000 - 1); - early_write_config_byte(hose, current_bus, pci_devfn, PCI_IO_BASE, - (pciauto_upper_iospc & 0x0000f000) >> 8); - early_write_config_word(hose, current_bus, pci_devfn, - PCI_IO_BASE_UPPER16, pciauto_upper_iospc >> 16); -} - -#define PCIAUTO_IDE_MODE_MASK 0x05 - -int __init -pciauto_bus_scan(struct pci_channel *hose, int current_bus) -{ - int sub_bus; - u32 pci_devfn, pci_class, cmdstat, found_multi=0; - unsigned short vid; - unsigned char header_type; - int devfn_start = 0; - int devfn_stop = 0xff; - - sub_bus = current_bus; - - if (hose->first_devfn) - devfn_start = hose->first_devfn; - if (hose->last_devfn) - devfn_stop = hose->last_devfn; - - for (pci_devfn=devfn_start; pci_devfn<devfn_stop; pci_devfn++) { - - if (PCI_FUNC(pci_devfn) && !found_multi) - continue; - - early_read_config_byte(hose, current_bus, pci_devfn, - PCI_HEADER_TYPE, &header_type); - - if (!PCI_FUNC(pci_devfn)) - found_multi = header_type & 0x80; - - early_read_config_word(hose, current_bus, pci_devfn, - PCI_VENDOR_ID, &vid); - - if (vid == 0xffff) continue; - - early_read_config_dword(hose, current_bus, pci_devfn, - PCI_CLASS_REVISION, &pci_class); - if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) { - DBG("PCI Autoconfig: Found P2P bridge, device %d\n", PCI_SLOT(pci_devfn)); - pciauto_prescan_setup_bridge(hose, current_bus, - pci_devfn, sub_bus); - sub_bus = pciauto_bus_scan(hose, sub_bus+1); - pciauto_postscan_setup_bridge(hose, current_bus, - pci_devfn, sub_bus); - - } else if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { - - unsigned char prg_iface; - - early_read_config_byte(hose, current_bus, pci_devfn, - PCI_CLASS_PROG, &prg_iface); - if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { - DBG("PCI Autoconfig: Skipping legacy mode IDE controller\n"); - continue; - } - } - - /* - * Found a peripheral, enable some standard - * settings - */ - early_read_config_dword(hose, current_bus, pci_devfn, - PCI_COMMAND, &cmdstat); - early_write_config_dword(hose, current_bus, pci_devfn, - PCI_COMMAND, cmdstat | PCI_COMMAND_IO | - PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER); - early_write_config_byte(hose, current_bus, pci_devfn, - PCI_LATENCY_TIMER, 0x80); - - /* Allocate PCI I/O and/or memory space */ - pciauto_setup_bars(hose, current_bus, pci_devfn); - } - return sub_bus; -} - -int __init -pciauto_assign_resources(int busno, struct pci_channel *hose) -{ - /* setup resource limits */ - pciauto_lower_iospc = hose->io_resource->start; - pciauto_upper_iospc = hose->io_resource->end + 1; - pciauto_lower_memspc = hose->mem_resource->start; - pciauto_upper_memspc = hose->mem_resource->end + 1; - - return pciauto_bus_scan(hose, busno); -} - - -/* - * These functions are used early on before PCI scanning is done - * and all of the pci_dev and pci_bus structures have been created. - */ -static struct pci_dev *fake_pci_dev(struct pci_channel *hose, int busnr, - int devfn) -{ - static struct pci_dev dev; - static struct pci_bus bus; - - dev.bus = &bus; - dev.sysdata = hose; - dev.devfn = devfn; - bus.number = busnr; - bus.ops = hose->pci_ops; - - return &dev; -} - -#define EARLY_PCI_OP(rw, size, type) \ -int early_##rw##_config_##size(struct pci_channel *hose, int bus, \ - int devfn, int offset, type value) \ -{ \ - return pci_##rw##_config_##size(fake_pci_dev(hose, bus, devfn), \ - offset, value); \ -} - -EARLY_PCI_OP(read, byte, u8 *) -EARLY_PCI_OP(read, word, u16 *) -EARLY_PCI_OP(read, dword, u32 *) -EARLY_PCI_OP(write, byte, u8) -EARLY_PCI_OP(write, word, u16) -EARLY_PCI_OP(write, dword, u32) diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index bb7f399d9cf8..6d1d5d8baa03 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -8,102 +8,136 @@ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/watch.h> -extern unsigned long unaligned_instructions; unsigned int vced_count, vcei_count; -#ifndef CONFIG_CPU_HAS_LLSC -unsigned long ll_ops, sc_ops; -#endif -/* - * BUFFER is PAGE_SIZE bytes long. - * - * Currently /proc/cpuinfo is being abused to print data about the - * number of date/instruction cacheflushes. - */ -int get_cpuinfo(char *buffer) +static const char *cpu_name[] = { + [CPU_UNKNOWN] "unknown", + [CPU_R2000] "R2000", + [CPU_R3000] "R3000", + [CPU_R3000A] "R3000A", + [CPU_R3041] "R3041", + [CPU_R3051] "R3051", + [CPU_R3052] "R3052", + [CPU_R3081] "R3081", + [CPU_R3081E] "R3081E", + [CPU_R4000PC] "R4000PC", + [CPU_R4000SC] "R4000SC", + [CPU_R4000MC] "R4000MC", + [CPU_R4200] "R4200", + [CPU_R4400PC] "R4400PC", + [CPU_R4400SC] "R4400SC", + [CPU_R4400MC] "R4400MC", + [CPU_R4600] "R4600", + [CPU_R6000] "R6000", + [CPU_R6000A] "R6000A", + [CPU_R8000] "R8000", + [CPU_R10000] "R10000", + [CPU_R4300] "R4300", + [CPU_R4650] "R4650", + [CPU_R4700] "R4700", + [CPU_R5000] "R5000", + [CPU_R5000A] "R5000A", + [CPU_R4640] "R4640", + [CPU_NEVADA] "Nevada", + [CPU_RM7000] "RM7000", + [CPU_R5432] "R5432", + [CPU_4KC] "MIPS 4Kc", + [CPU_5KC] "MIPS 5Kc", + [CPU_R4310] "R4310", + [CPU_SB1] "SiByte SB1", + [CPU_TX3912] "TX3912", + [CPU_TX3922] "TX3922", + [CPU_TX3927] "TX3927", + [CPU_AU1000] "Au1000", + [CPU_AU1500] "Au1500", + [CPU_4KEC] "MIPS 4KEc", + [CPU_4KSC] "MIPS 4KSc", + [CPU_VR41XX] "NEC Vr41xx", + [CPU_R5500] "R5500", + [CPU_TX49XX] "TX49xx", + [CPU_20KC] "MIPS 20Kc", + [CPU_VR4111] "NEC VR4111", + [CPU_VR4121] "NEC VR4121", + [CPU_VR4122] "NEC VR4122", + [CPU_VR4131] "NEC VR4131", + [CPU_VR4181] "NEC VR4181", + [CPU_VR4181A] "NEC VR4181A", + [CPU_SR71000] "Sandcraft SR71000" +}; + + +static int show_cpuinfo(struct seq_file *m, void *v) { + unsigned int version = current_cpu_data.processor_id; + unsigned int fp_vers = current_cpu_data.fpu_id; + unsigned long n = (unsigned long) v - 1; char fmt [64]; - const char *cpu_name[] = CPU_NAMES; - const char *mach_group_names[] = GROUP_NAMES; - const char *mach_unknown_names[] = GROUP_UNKNOWN_NAMES; - const char *mach_jazz_names[] = GROUP_JAZZ_NAMES; - const char *mach_dec_names[] = GROUP_DEC_NAMES; - const char *mach_arc_names[] = GROUP_ARC_NAMES; - const char *mach_sni_rm_names[] = GROUP_SNI_RM_NAMES; - const char *mach_acn_names[] = GROUP_ACN_NAMES; - const char *mach_sgi_names[] = GROUP_SGI_NAMES; - const char *mach_cobalt_names[] = GROUP_COBALT_NAMES; - const char *mach_nec_ddb_names[] = GROUP_NEC_DDB_NAMES; - const char *mach_baget_names[] = GROUP_BAGET_NAMES; - const char *mach_cosine_names[] = GROUP_COSINE_NAMES; - const char *mach_galileo_names[] = GROUP_GALILEO_NAMES; - const char *mach_momenco_names[] = GROUP_MOMENCO_NAMES; - const char *mach_ite_names[] = GROUP_ITE_NAMES; - const char *mach_philips_names[] = GROUP_PHILIPS_NAMES; - const char *mach_globespan_names[] = GROUP_GLOBESPAN_NAMES; - const char *mach_sibyte_names[] = GROUP_SIBYTE_NAMES; - const char *mach_toshiba_names[] = GROUP_TOSHIBA_NAMES; - const char *mach_alchemy_names[] = GROUP_ALCHEMY_NAMES; - const char **mach_group_to_name[] = { mach_unknown_names, - mach_jazz_names, mach_dec_names, mach_arc_names, - mach_sni_rm_names, mach_acn_names, mach_sgi_names, - mach_cobalt_names, mach_nec_ddb_names, mach_baget_names, - mach_cosine_names, mach_galileo_names, mach_momenco_names, - mach_ite_names, mach_philips_names, mach_globespan_names, - mach_sibyte_names, mach_toshiba_names, mach_alchemy_names}; - unsigned int version = read_32bit_cp0_register(CP0_PRID); - int len; - - len = sprintf(buffer, "cpu\t\t\t: MIPS\n"); - len += sprintf(buffer + len, "cpu model\t\t: %s V%d.%d\n", - cpu_name[mips_cpu.cputype <= CPU_LAST ? - mips_cpu.cputype : CPU_UNKNOWN], - (version >> 4) & 0x0f, version & 0x0f); - len += sprintf(buffer + len, "system type\t\t: %s %s\n", - mach_group_names[mips_machgroup], - mach_group_to_name[mips_machgroup][mips_machtype]); - len += sprintf(buffer + len, "BogoMIPS\t\t: %lu.%02lu\n", - loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ)) % 100); -#if defined (__MIPSEB__) - len += sprintf(buffer + len, "byteorder\t\t: big endian\n"); -#endif -#if defined (__MIPSEL__) - len += sprintf(buffer + len, "byteorder\t\t: little endian\n"); +#ifdef CONFIG_SMP + if (!CPUMASK_TSTB(cpu_online_map, n)) + return 0; #endif - len += sprintf(buffer + len, "unaligned accesses\t: %lu\n", - unaligned_instructions); - len += sprintf(buffer + len, "wait instruction\t: %s\n", - cpu_wait ? "yes" : "no"); - len += sprintf(buffer + len, "microsecond timers\t: %s\n", - (mips_cpu.options & MIPS_CPU_COUNTER) ? "yes" : "no"); - len += sprintf(buffer + len, "extra interrupt vector\t: %s\n", - (mips_cpu.options & MIPS_CPU_DIVEC) ? "yes" : "no"); - len += sprintf(buffer + len, "hardware watchpoint\t: %s\n", - watch_available ? "yes" : "no"); + + /* + * For the first processor also print the system type + */ + if (n == 0) + seq_printf(m, "system type\t\t: %s\n", get_system_type()); + + seq_printf(m, "processor\t\t: %ld\n", n); + sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", + cpu_has_fpu ? " FPU V%d.%d" : ""); + seq_printf(m, fmt, cpu_name[current_cpu_data.cputype <= CPU_LAST ? + current_cpu_data.cputype : CPU_UNKNOWN], + (version >> 4) & 0x0f, version & 0x0f, + (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); + seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); + seq_printf(m, "microsecond timers\t: %s\n", + cpu_has_counter ? "yes" : "no"); + seq_printf(m, "tlb_entries\t\t: %d\n", current_cpu_data.tlbsize); + seq_printf(m, "extra interrupt vector\t: %s\n", + cpu_has_divec ? "yes" : "no"); + seq_printf(m, "hardware watchpoint\t: %s\n", + cpu_has_watch ? "yes" : "no"); sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", - (mips_cpu.options & MIPS_CPU_VCE) ? "%d" : "not available"); - len += sprintf(buffer + len, fmt, 'D', vced_count); - len += sprintf(buffer + len, fmt, 'I', vcei_count); + cpu_has_vce ? "%d" : "not available"); + seq_printf(m, fmt, 'D', vced_count); + seq_printf(m, fmt, 'I', vcei_count); -#ifndef CONFIG_CPU_HAS_LLSC - len += sprintf(buffer + len, "ll emulations\t\t: %lu\n", - ll_ops); - len += sprintf(buffer + len, "sc emulations\t\t: %lu\n", - sc_ops); -#endif - return len; + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + unsigned long i = *pos; + + return i < NR_CPUS ? (void *) (i + 1) : NULL; } -void init_irq_proc(void) +static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - /* Nothing, for now. */ + ++*pos; + return c_start(m, pos); } + +static void c_stop(struct seq_file *m, void *v) +{ +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 2511f9d4f630..9720809e9978 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -6,86 +6,95 @@ * Copyright (C) 1994 - 2000 by Ralf Baechle and others. * Copyright (C) 1999 Silicon Graphics, Inc. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> -#include <linux/ptrace.h> +#include <linux/personality.h> #include <linux/slab.h> #include <linux/mman.h> #include <linux/sys.h> #include <linux/user.h> #include <linux/a.out.h> +#include <linux/init.h> +#include <linux/completion.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/fpu.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/mipsregs.h> #include <asm/processor.h> -#include <asm/stackframe.h> +#include <asm/ptrace.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/elf.h> #include <asm/isadep.h> +#include <asm/inst.h> -void cpu_idle(void) +/* + * We use this if we don't have any better idle routine.. + * (This to kill: kernel/platform.c. + */ +void default_idle (void) +{ +} + +/* + * The idle thread. There's no useful work to be done, so just try to conserve + * power and have a low exit latency (ie sit in a loop waiting for somebody to + * say that they'd like to reschedule) + */ +ATTRIB_NORET void cpu_idle(void) { /* endless idle loop with no priority at all */ - current->nice = 20; - init_idle(); while (1) { while (!need_resched()) if (cpu_wait) (*cpu_wait)(); schedule(); - check_pgt_cache(); } } -struct task_struct *last_task_used_math = NULL; - asmlinkage void ret_from_fork(void); +void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) +{ + regs->cp0_status &= ~(ST0_CU0|ST0_KSU|ST0_CU1); + regs->cp0_status |= KU_USER; + current->used_math = 0; + loose_fpu(); + regs->cp0_epc = pc; + regs->regs[29] = sp; + current_thread_info()->addr_limit = USER_DS; +} + void exit_thread(void) { - /* Forget lazy fpu state */ - if (last_task_used_math == current) { - set_cp0_status(ST0_CU1); - __asm__ __volatile__("cfc1\t$0,$31"); - last_task_used_math = NULL; - } } void flush_thread(void) { - /* Forget lazy fpu state */ - if (last_task_used_math == current) { - set_cp0_status(ST0_CU1); - __asm__ __volatile__("cfc1\t$0,$31"); - last_task_used_math = NULL; - } } int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct * p, struct pt_regs * regs) { + struct thread_info *ti = p->thread_info; struct pt_regs * childregs; long childksp; - extern void save_fp(void*); - childksp = (unsigned long)p + KERNEL_STACK_SIZE - 32; + childksp = (unsigned long)ti + KERNEL_STACK_SIZE - 32; + + if (is_fpu_owner()) { + save_fp(p); + } - if (last_task_used_math == current) - if (mips_cpu.options & MIPS_CPU_FPU) { - set_cp0_status(ST0_CU1); - save_fp(p); - } /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; *childregs = *regs; @@ -101,12 +110,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, regs->regs[3] = 0; } if (childregs->cp0_status & ST0_CU0) { - childregs->regs[28] = (unsigned long) p; + childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; - p->thread.current_ds = KERNEL_DS; + ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; - p->thread.current_ds = USER_DS; + ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; @@ -115,9 +124,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ - p->thread.cp0_status = read_32bit_cp0_register(CP0_STATUS) & + p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1|KU_MASK); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); + p->set_child_tid = p->clear_child_tid = NULL; return 0; } @@ -125,29 +135,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) { - /* We actually store the FPU info in the task->thread - * area. - */ - if(regs->cp0_status & ST0_CU1) { - memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); - return 1; - } - return 0; /* Task didn't use the fpu at all. */ -} - -/* Fill in the user structure for a core dump.. */ -void dump_thread(struct pt_regs *regs, struct user *dump) -{ - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = regs->regs[29] & ~(PAGE_SIZE - 1); - dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; - dump->u_dsize = (current->mm->brk + (PAGE_SIZE - 1) - dump->start_data) >> PAGE_SHIFT; - dump->u_ssize = - (current->mm->start_stack - dump->start_stack + PAGE_SIZE - 1) >> PAGE_SHIFT; - memcpy(&dump->regs[0], regs, sizeof(struct pt_regs)); - memcpy(&dump->regs[EF_SIZE/4], ¤t->thread.fpu, sizeof(current->thread.fpu)); + memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); + return 1; } /* @@ -158,35 +147,107 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) long retval; __asm__ __volatile__( - ".set noreorder \n" - " move $6,$sp \n" - " move $4,%5 \n" - " li $2,%1 \n" - " syscall \n" - " beq $6,$sp,1f \n" - " subu $sp,32 \n" /* delay slot */ - " jalr %4 \n" - " move $4,%3 \n" /* delay slot */ - " move $4,$2 \n" - " li $2,%2 \n" - " syscall \n" - "1: addiu $sp,32 \n" - " move %0,$2 \n" - ".set reorder" - :"=r" (retval) - :"i" (__NR_clone), "i" (__NR_exit), - "r" (arg), "r" (fn), - "r" (flags | CLONE_VM | CLONE_UNTRACED) + " .set noreorder \n" + " move $6, $sp \n" + " move $4, %5 \n" + " li $2, %1 \n" + " syscall \n" + " beq $6, $sp, 1f \n" + " subu $sp, 32 \n" + " jalr %4 \n" + " move $4, %3 \n" + " move $4, $2 \n" + " li $2, %2 \n" + " syscall \n" + "1: addiu $sp, 32 \n" + " move %0, $2 \n" + " .set reorder" + : "=r" (retval) + : "i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn), + "r" (flags | CLONE_VM | CLONE_UNTRACED) /* * The called subroutine might have destroyed any of the * at, result, argument or temporary registers ... */ - :"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", - "$9","$10","$11","$12","$13","$14","$15","$24","$25"); + : "$2", "$3", "$4", "$5", "$6", "$7", "$8", + "$9","$10","$11","$12","$13","$14","$15","$24","$25", "$31"); return retval; } +struct mips_frame_info { + int frame_offset; + int pc_offset; +}; +static struct mips_frame_info schedule_frame; +static struct mips_frame_info schedule_timeout_frame; +static struct mips_frame_info sleep_on_frame; +static struct mips_frame_info sleep_on_timeout_frame; +static struct mips_frame_info wait_for_completion_frame; +static int mips_frame_info_initialized; +static int __init get_frame_info(struct mips_frame_info *info, void *func) +{ + int i; + union mips_instruction *ip = (union mips_instruction *)func; + info->pc_offset = -1; + info->frame_offset = -1; + for (i = 0; i < 128; i++, ip++) { + /* if jal, jalr, jr, stop. */ + if (ip->j_format.opcode == jal_op || + (ip->r_format.opcode == spec_op && + (ip->r_format.func == jalr_op || + ip->r_format.func == jr_op))) + break; + if (ip->i_format.opcode == sw_op && + ip->i_format.rs == 29) { + /* sw $ra, offset($sp) */ + if (ip->i_format.rt == 31) { + if (info->pc_offset != -1) + break; + info->pc_offset = + ip->i_format.simmediate / sizeof(long); + } + /* sw $s8, offset($sp) */ + if (ip->i_format.rt == 30) { + if (info->frame_offset != -1) + break; + info->frame_offset = + ip->i_format.simmediate / sizeof(long); + } + } + } + if (info->pc_offset == -1 || info->frame_offset == -1) { + printk("Can't analyze prologue code at %p\n", func); + info->pc_offset = -1; + info->frame_offset = -1; + return -1; + } + + return 0; +} +void __init frame_info_init(void) +{ + mips_frame_info_initialized = + !get_frame_info(&schedule_frame, schedule) && + !get_frame_info(&schedule_timeout_frame, schedule_timeout) && + !get_frame_info(&sleep_on_frame, sleep_on) && + !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) && + !get_frame_info(&wait_for_completion_frame, wait_for_completion); +} + +unsigned long thread_saved_pc(struct thread_struct *t) +{ + extern void ret_from_fork(void); + + /* New born processes are a special case */ + if (t->reg31 == (unsigned long) ret_from_fork) + return t->reg31; + + if (schedule_frame.pc_offset < 0) + return 0; + return ((unsigned long *)t->reg29)[schedule_frame.pc_offset]; +} + /* * These bracket the sleeping functions.. */ @@ -195,7 +256,7 @@ extern void scheduling_functions_end_here(void); #define first_sched ((unsigned long) scheduling_functions_start_here) #define last_sched ((unsigned long) scheduling_functions_end_here) -/* get_wchan - a maintenance nightmare ... */ +/* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ unsigned long get_wchan(struct task_struct *p) { unsigned long frame, pc; @@ -203,6 +264,8 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state == TASK_RUNNING) return 0; + if (!mips_frame_info_initialized) + return 0; pc = thread_saved_pc(&p->thread); if (pc < first_sched || pc >= last_sched) { return pc; @@ -216,26 +279,33 @@ unsigned long get_wchan(struct task_struct *p) goto schedule_timeout_caller; if (pc >= (unsigned long)interruptible_sleep_on) goto schedule_caller; + if (pc >= (unsigned long)wait_for_completion) + goto schedule_caller; goto schedule_timeout_caller; schedule_caller: - frame = ((unsigned long *)p->thread.reg30)[9]; - pc = ((unsigned long *)frame)[11]; + frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; + if (pc >= (unsigned long) sleep_on) + pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset]; + else + pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset]; return pc; schedule_timeout_caller: - /* Must be schedule_timeout ... */ - pc = ((unsigned long *)p->thread.reg30)[10]; - frame = ((unsigned long *)p->thread.reg30)[9]; + /* + * The schedule_timeout frame + */ + frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; - /* The schedule_timeout frame ... */ - pc = ((unsigned long *)frame)[14]; - frame = ((unsigned long *)frame)[13]; + /* + * frame now points to sleep_on_timeout's frame + */ + pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset]; if (pc >= first_sched && pc < last_sched) { - /* schedule_timeout called by interruptible_sleep_on_timeout */ - pc = ((unsigned long *)frame)[11]; - frame = ((unsigned long *)frame)[10]; + /* schedule_timeout called by [interruptible_]sleep_on_timeout */ + frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset]; + pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset]; } return pc; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 90601f455433..c76a4351d533 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -11,6 +11,7 @@ * Copyright (C) 1999 MIPS Technologies, Inc. */ #include <linux/config.h> +#include <linux/compiler.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> @@ -19,8 +20,8 @@ #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/user.h> +#include <linux/security.h> -#include <asm/fp.h> #include <asm/mipsregs.h> #include <asm/pgtable.h> #include <asm/page.h> @@ -28,6 +29,7 @@ #include <asm/uaccess.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/fpu.h> /* * Called by kernel/ptrace.c when detaching.. @@ -42,27 +44,27 @@ void ptrace_disable(struct task_struct *child) asmlinkage int sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; - int res; - extern void save_fp(struct task_struct *); + int ret; - lock_kernel(); #if 0 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n", (int) request, (int) pid, (unsigned long) addr, (unsigned long) data); #endif + lock_kernel(); + ret = -EPERM; if (request == PTRACE_TRACEME) { /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) { - res = -EPERM; + if (current->ptrace & PT_PTRACED) + goto out; + if ((ret = security_ptrace(current->parent, current))) goto out; - } /* set the ptrace bit in the process flags. */ current->ptrace |= PT_PTRACED; - res = 0; + ret = 0; goto out; } - res = -ESRCH; + ret = -ESRCH; read_lock(&tasklist_lock); child = find_task_by_pid(pid); if (child) @@ -71,36 +73,31 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) if (!child) goto out; - res = -EPERM; + ret = -EPERM; if (pid == 1) /* you may not mess with init */ - goto out; + goto out_tsk; if (request == PTRACE_ATTACH) { - res = ptrace_attach(child); - goto out_tsk; - } - res = -ESRCH; - if (!(child->ptrace & PT_PTRACED)) + ret = ptrace_attach(child); goto out_tsk; - if (child->state != TASK_STOPPED) { - if (request != PTRACE_KILL) - goto out_tsk; } - if (child->p_pptr != current) + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) goto out_tsk; + switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; int copied; copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - res = -EIO; + ret = -EIO; if (copied != sizeof(tmp)) break; - res = put_user(tmp,(unsigned long *) data); - - goto out; + ret = put_user(tmp,(unsigned long *) data); + break; } /* Read the word at location addr in the USER area. */ @@ -108,7 +105,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) struct pt_regs *regs; unsigned long tmp; - regs = (struct pt_regs *) ((unsigned long) child + + regs = (struct pt_regs *) ((unsigned long) child->thread_info + KERNEL_STACK_SIZE - 32 - sizeof(struct pt_regs)); tmp = 0; /* Default return value. */ @@ -118,30 +115,12 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) break; case FPR_BASE ... FPR_BASE + 31: if (child->used_math) { - unsigned long long *fregs - = (unsigned long long *) - &child->thread.fpu.hard.fp_regs[0]; - if(!(mips_cpu.options & MIPS_CPU_FPU)) { - fregs = (unsigned long long *) - child->thread.fpu.soft.regs; - } else - if (last_task_used_math == child) { - enable_cp1(); - save_fp(child); - disable_cp1(); - last_task_used_math = NULL; - regs->cp0_status &= ~ST0_CU1; - } + unsigned long long *fregs = get_fpu_regs(child); /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ -#ifdef CONFIG_CPU_R3000 - if (mips_cpu.options & MIPS_CPU_FPU) - tmp = *(unsigned long *)(fregs + addr); - else -#endif if (addr & 1) tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); else @@ -166,7 +145,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) tmp = regs->lo; break; case FPC_CSR: - if (!(mips_cpu.options & MIPS_CPU_FPU)) + if (!cpu_has_fpu) tmp = child->thread.fpu.soft.sr; else tmp = child->thread.fpu.hard.control; @@ -174,34 +153,37 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) case FPC_EIR: { /* implementation / version register */ unsigned int flags; - local_save_flags(flags); - enable_cp1(); + if (!cpu_has_fpu) + break; + + flags = read_c0_status(); + __enable_fpu(); __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); - local_irq_restore(flags); + write_c0_status(flags); break; } default: tmp = 0; - res = -EIO; - goto out; + ret = -EIO; + goto out_tsk; } - res = put_user(tmp, (unsigned long *) data); - goto out; + ret = put_user(tmp, (unsigned long *) data); + break; } case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: - res = 0; + ret = 0; if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) break; - res = -EIO; - goto out; + ret = -EIO; + break; case PTRACE_POKEUSR: { struct pt_regs *regs; - res = 0; - regs = (struct pt_regs *) ((unsigned long) child + + ret = 0; + regs = (struct pt_regs *) ((unsigned long) child->thread_info + KERNEL_STACK_SIZE - 32 - sizeof(struct pt_regs)); switch (addr) { @@ -210,21 +192,8 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) break; case FPR_BASE ... FPR_BASE + 31: { unsigned long long *fregs; - fregs = (unsigned long long *)&child->thread.fpu.hard.fp_regs[0]; - if (child->used_math) { - if (last_task_used_math == child) { - if(!(mips_cpu.options & MIPS_CPU_FPU)) { - fregs = (unsigned long long *) - child->thread.fpu.soft.regs; - } else { - enable_cp1(); - save_fp(child); - disable_cp1(); - last_task_used_math = NULL; - regs->cp0_status &= ~ST0_CU1; - } - } - } else { + fregs = (unsigned long long *)get_fpu_regs(child); + if (!child->used_math) { /* FP not yet used */ memset(&child->thread.fpu.hard, ~0, sizeof(child->thread.fpu.hard)); @@ -235,11 +204,6 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) * of the values stored in the even registers - unless * we're using r2k_switch.S. */ -#ifdef CONFIG_CPU_R3000 - if (mips_cpu.options & MIPS_CPU_FPU) - *(unsigned long *)(fregs + addr) = data; - else -#endif if (addr & 1) { fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; @@ -259,14 +223,14 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) regs->lo = data; break; case FPC_CSR: - if (!(mips_cpu.options & MIPS_CPU_FPU)) + if (!cpu_has_fpu) child->thread.fpu.soft.sr = data; else child->thread.fpu.hard.control = data; break; default: /* The rest are not allowed. */ - res = -EIO; + ret = -EIO; break; } break; @@ -274,26 +238,28 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ - res = -EIO; + ret = -EIO; if ((unsigned long) data > _NSIG) break; - if (request == PTRACE_SYSCALL) - child->ptrace |= PT_TRACESYS; - else - child->ptrace &= ~PT_TRACESYS; + if (request == PTRACE_SYSCALL) { + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + } + else { + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + } child->exit_code = data; wake_up_process(child); - res = 0; + ret = 0; break; } /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to * exit. */ case PTRACE_KILL: - res = 0; + ret = 0; if (child->state == TASK_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; @@ -301,32 +267,40 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) break; case PTRACE_DETACH: /* detach a process that was attached. */ - res = ptrace_detach(child, data); + ret = ptrace_detach(child, data); break; default: - res = ptrace_request(child, request, addr, data); - goto out; + ret = ptrace_request(child, request, addr, data); + break; } out_tsk: - free_task_struct(child); + put_task_struct(child); out: unlock_kernel(); - return res; + return ret; } -asmlinkage void syscall_trace(void) +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void do_syscall_trace(void) { - if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) - != (PT_PTRACED|PT_TRACESYS)) + if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; + if (!(current->ptrace & PT_PTRACED)) + return; + /* The 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0); + preempt_disable(); current->state = TASK_STOPPED; notify_parent(current, SIGCHLD); schedule(); + preempt_enable(); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the diff --git a/arch/mips/kernel/r2300_misc.S b/arch/mips/kernel/r2300_misc.S deleted file mode 100644 index 8ef3683f983d..000000000000 --- a/arch/mips/kernel/r2300_misc.S +++ /dev/null @@ -1,199 +0,0 @@ -/* $Id: r2300_misc.S,v 1.8 1999/12/08 22:05:10 harald Exp $ - * misc.S: Misc. exception handling code for R3000/R2000. - * - * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse - * - * Multi-CPU abstraction reworking: - * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) - * - * Further modifications to make this work: - * Copyright (c) 1998 Harald Koerfgen - * Copyright (c) 1998, 1999 Gleb Raiko & Vladimir Roganov - */ -#include <asm/asm.h> -#include <asm/current.h> -#include <asm/bootinfo.h> -#include <asm/cachectl.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/regdef.h> -#include <asm/segment.h> -#include <asm/stackframe.h> - - .text - .set mips1 - .set noreorder - -#undef NOTLB_OPTIMIZE /* If you are paranoid, define this. */ - - /* ABUSE of CPP macros 101. */ - - /* After this macro runs, the pte faulted on is - * in register PTE, a ptr into the table in which - * the pte belongs is in PTR. - */ -#define LOAD_PTE(pte, ptr) \ - mfc0 pte, CP0_BADVADDR; \ - lw ptr, current_pgd; \ - srl pte, pte, 22; \ - sll pte, pte, 2; \ - addu ptr, ptr, pte; \ - mfc0 pte, CP0_CONTEXT; \ - lw ptr, (ptr); \ - andi pte, pte, 0xffc; \ - addu ptr, ptr, pte; \ - lw pte, (ptr); \ - nop; - - /* This places the even/odd pte pair in the page - * table at PTR into ENTRYLO0 and ENTRYLO1 using - * TMP as a scratch register. - */ -#define PTE_RELOAD(ptr) \ - lw ptr, (ptr) ; \ - nop ; \ - mtc0 ptr, CP0_ENTRYLO0; \ - nop; - -#define DO_FAULT(write) \ - .set noat; \ - .set macro; \ - SAVE_ALL; \ - mfc0 a2, CP0_BADVADDR; \ - STI; \ - .set at; \ - move a0, sp; \ - jal do_page_fault; \ - li a1, write; \ - j ret_from_sys_call; \ - nop; \ - .set noat; \ - .set nomacro; - - /* Check is PTE is present, if not then jump to LABEL. - * PTR points to the page table where this PTE is located, - * when the macro is done executing PTE will be restored - * with its original value. - */ -#define PTE_PRESENT(pte, ptr, label) \ - andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ - xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ - bnez pte, label; \ - .set push; \ - .set reorder; \ - lw pte, (ptr); \ - .set pop; - - /* Make PTE valid, store result in PTR. */ -#define PTE_MAKEVALID(pte, ptr) \ - ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \ - sw pte, (ptr); - - /* Check if PTE can be written to, if not branch to LABEL. - * Regardless restore PTE with value from PTR when done. - */ -#define PTE_WRITABLE(pte, ptr, label) \ - andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ - xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ - bnez pte, label; \ - .set push; \ - .set reorder; \ - lw pte, (ptr); \ - .set pop; - - - /* Make PTE writable, update software status bits as well, - * then store at PTR. - */ -#define PTE_MAKEWRITE(pte, ptr) \ - ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \ - _PAGE_VALID | _PAGE_DIRTY); \ - sw pte, (ptr); - -/* - * The index register may have the probe fail bit set, - * because we would trap on access kseg2, i.e. without refill. - */ -#define TLB_WRITE(reg) \ - mfc0 reg, CP0_INDEX; \ - nop; \ - bltz reg, 1f; \ - nop; \ - tlbwi; \ - j 2f; \ - nop; \ -1: tlbwr; \ -2: - -#define RET(reg) \ - mfc0 reg, CP0_EPC; \ - nop; \ - jr reg; \ - rfe - - .set noreorder - - .align 5 -NESTED(handle_tlbl, PT_SIZE, sp) - .set noat - -#ifndef NOTLB_OPTIMIZE - /* Test present bit in entry. */ - LOAD_PTE(k0, k1) - tlbp - PTE_PRESENT(k0, k1, nopage_tlbl) - PTE_MAKEVALID(k0, k1) - PTE_RELOAD(k1) - TLB_WRITE(k0) - RET(k0) -nopage_tlbl: -#endif - - DO_FAULT(0) -END(handle_tlbl) - -NESTED(handle_tlbs, PT_SIZE, sp) - .set noat - -#ifndef NOTLB_OPTIMIZE - LOAD_PTE(k0, k1) - tlbp # find faulting entry - PTE_WRITABLE(k0, k1, nopage_tlbs) - PTE_MAKEWRITE(k0, k1) - PTE_RELOAD(k1) - TLB_WRITE(k0) - RET(k0) -nopage_tlbs: -#endif - - DO_FAULT(1) -END(handle_tlbs) - - .align 5 -NESTED(handle_mod, PT_SIZE, sp) - .set noat -#ifndef NOTLB_OPTIMIZE - LOAD_PTE(k0, k1) - tlbp # find faulting entry - andi k0, k0, _PAGE_WRITE - beqz k0, nowrite_mod - .set push - .set reorder - lw k0, (k1) - .set pop - - /* Present and writable bits set, set accessed and dirty bits. */ - PTE_MAKEWRITE(k0, k1) - - /* Now reload the entry into the tlb. */ - PTE_RELOAD(k1) - tlbwi - RET(k0) -#endif - -nowrite_mod: - DO_FAULT(1) -END(handle_mod) diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index aefdff8645e7..3fa8298329c2 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -10,18 +10,17 @@ * Further modifications to make this work: * Copyright (c) 1998-2000 Harald Koerfgen */ +#include <linux/config.h> #include <asm/asm.h> -#include <asm/bootinfo.h> #include <asm/cachectl.h> -#include <asm/current.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/offset.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/processor.h> #include <asm/regdef.h> #include <asm/stackframe.h> +#include <asm/thread_info.h> #include <asm/asmmacro.h> @@ -29,8 +28,24 @@ .align 5 /* - * task_struct *resume(task_struct *prev, - * task_struct *next) + * Offset to the current process status flags, the first 32 bytes of the + * stack are not used. + */ +#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS) + +/* + * FPU context is saved iff the process has used it's FPU in the current + * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user + * space STATUS register should be 0, so that a process *always* starts its + * userland with FPU disabled after each context switch. + * + * FPU will be enabled as soon as the process accesses FPU again, through + * do_cpu() trap. + */ + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) ) */ LEAF(resume) #ifndef CONFIG_CPU_HAS_LLSC @@ -41,84 +56,81 @@ LEAF(resume) CPU_SAVE_NONSCRATCH(a0) sw ra, THREAD_REG31(a0) + /* + * check if we need to save FPU registers + */ + lw t3, TASK_THREAD_INFO(a0) + lw t0, TI_FLAGS(t3) + li t1, TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + sw t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + lw t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + sw t0, ST_OFF(t3) + + FPU_SAVE_SINGLE(a0, t0) # clobbers t0 + +1: /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ - move $28, a1 - CPU_RESTORE_NONSCRATCH($28) - addiu t0, $28, KERNEL_STACK_SIZE-32 - sw t0, kernelsp + move $28, a2 + CPU_RESTORE_NONSCRATCH(a1) + + addiu t1, $28, KERNEL_STACK_SIZE-32 + sw t1, kernelsp + mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff00 and t1, a3 - lw a2, THREAD_STATUS($28) + lw a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS - .set noreorder + move v0, a0 jr ra - move v0, a0 - .set reorder END(resume) /* - * Do lazy fpu context switch. Saves FPU context to the process in a0 - * and loads the new context of the current process. - */ - -#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS) - -LEAF(lazy_fpu_switch) - mfc0 t0, CP0_STATUS # enable cp1 - li t3, 0x20000000 - or t0, t3 - mtc0 t0, CP0_STATUS - - .set noreorder - beqz a0, 2f # Save floating point state - nor t3, zero, t3 - .set reorder - lw t1, ST_OFF(a0) # last thread loses fpu - and t1, t3 - sw t1, ST_OFF(a0) - FPU_SAVE_SINGLE(a0, t1) # clobbers t1 - -2: - FPU_RESTORE_SINGLE($28, t0) # clobbers t0 - jr ra - END(lazy_fpu_switch) - -/* * Save a thread's fp context. */ -LEAF(save_fp) +LEAF(_save_fp) FPU_SAVE_SINGLE(a0, t1) # clobbers t1 jr ra - END(save_fp) + END(_save_fp) /* * Restore a thread's fp context. */ -LEAF(restore_fp) +LEAF(_restore_fp) FPU_RESTORE_SINGLE(a0, t1) # clobbers t1 jr ra - END(restore_fp) + END(_restore_fp) /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double - * precission represents signaling NANS. + * precision represents signaling NANS. * * We initialize fcr31 to rounding to nearest, no exceptions. */ #define FPU_DEFAULT 0x00000000 -LEAF(init_fpu) +LEAF(_init_fpu) mfc0 t0, CP0_STATUS - li t1, 0x20000000 + li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS @@ -158,8 +170,6 @@ LEAF(init_fpu) mtc1 t0, $f28 mtc1 t0, $f29 mtc1 t0, $f30 - .set noreorder + mtc1 t0, $f31 jr ra - mtc1 t0, $f31 - .set reorder - END(init_fpu) + END(_init_fpu) diff --git a/arch/mips/kernel/r4k_misc.S b/arch/mips/kernel/r4k_misc.S deleted file mode 100644 index 510b08ca9d16..000000000000 --- a/arch/mips/kernel/r4k_misc.S +++ /dev/null @@ -1,236 +0,0 @@ -/* $Id: r4k_misc.S,v 1.8 1999/10/09 00:00:58 ralf Exp $ - * - * r4k_misc.S: Misc. exception handling code for r4k. - * - * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse - * - * Multi-cpu abstraction and reworking: - * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) - */ -/************************************************************************** - * 14 Nov, 2000. - * Made support for MIPS32 CPUs. - * - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - *************************************************************************/ -#include <asm/asm.h> -#include <asm/current.h> -#include <asm/offset.h> -#include <asm/bootinfo.h> -#include <asm/cachectl.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> - -#undef NOTLB_OPTIMIZE /* If you are paranoid, define this. */ - - /* ABUSE of CPP macros 101. */ - - /* After this macro runs, the pte faulted on is - * in register PTE, a ptr into the table in which - * the pte belongs is in PTR. - */ - -#ifdef CONFIG_SMP -#define GET_PGD(scratch, ptr) \ - mfc0 ptr, CP0_CONTEXT; \ - la scratch, current_pgd;\ - srl ptr, 23; \ - sll ptr, 2; \ - addu ptr, scratch, ptr; \ - lw ptr, (ptr); -#else -#define GET_PGD(scratch, ptr) \ - lw ptr, current_pgd; -#endif - - -#define LOAD_PTE(pte, ptr) \ - GET_PGD(pte, ptr) \ - mfc0 pte, CP0_BADVADDR; \ - srl pte, pte, 22; \ - sll pte, pte, 2; \ - addu ptr, ptr, pte; \ - mfc0 pte, CP0_BADVADDR; \ - lw ptr, (ptr); \ - srl pte, pte, 10; \ - and pte, pte, 0xffc; \ - addu ptr, ptr, pte; \ - lw pte, (ptr); - - /* This places the even/odd pte pair in the page - * table at PTR into ENTRYLO0 and ENTRYLO1 using - * TMP as a scratch register. - */ -#define PTE_RELOAD(ptr, tmp) \ - ori ptr, ptr, 0x4; \ - xori ptr, ptr, 0x4; \ - lw tmp, 4(ptr); \ - lw ptr, 0(ptr); \ - srl tmp, tmp, 6; \ - mtc0 tmp, CP0_ENTRYLO1; \ - srl ptr, ptr, 6; \ - mtc0 ptr, CP0_ENTRYLO0; - -#define DO_FAULT(write) \ - .set noat; \ - SAVE_ALL; \ - mfc0 a2, CP0_BADVADDR; \ - STI; \ - .set at; \ - move a0, sp; \ - jal do_page_fault; \ - li a1, write; \ - j ret_from_sys_call; \ - nop; \ - .set noat; - - /* Check is PTE is present, if not then jump to LABEL. - * PTR points to the page table where this PTE is located, - * when the macro is done executing PTE will be restored - * with its original value. - */ -#define PTE_PRESENT(pte, ptr, label) \ - andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ - xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ - bnez pte, label; \ - lw pte, (ptr); - - /* Make PTE valid, store result in PTR. */ -#define PTE_MAKEVALID(pte, ptr) \ - ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \ - sw pte, (ptr); - - /* Check if PTE can be written to, if not branch to LABEL. - * Regardless restore PTE with value from PTR when done. - */ -#define PTE_WRITABLE(pte, ptr, label) \ - andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ - xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ - bnez pte, label; \ - lw pte, (ptr); - - /* Make PTE writable, update software status bits as well, - * then store at PTR. - */ -#define PTE_MAKEWRITE(pte, ptr) \ - ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \ - _PAGE_VALID | _PAGE_DIRTY); \ - sw pte, (ptr); - - .set noreorder - -/* - * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: - * 2. A timing hazard exists for the TLBP instruction. - * - * stalling_instruction - * TLBP - * - * The JTLB is being read for the TLBP throughout the stall generated by the - * previous instruction. This is not really correct as the stalling instruction - * can modify the address used to access the JTLB. The failure symptom is that - * the TLBP instruction will use an address created for the stalling instruction - * and not the address held in C0_ENHI and thus report the wrong results. - * - * The software work-around is to not allow the instruction preceding the TLBP - * to stall - make it an NOP or some other instruction guaranteed not to stall. - * - * Errata 2 will not be fixed. This errata is also on the R5000. - * - * As if we MIPS hackers wouldn't know how to nop pipelines happy ... - */ -#define R5K_HAZARD nop - - /* - * Note for many R4k variants tlb probes cannot be executed out - * of the instruction cache else you get bogus results. - */ - .align 5 - NESTED(handle_tlbl, PT_SIZE, sp) - .set noat -invalid_tlbl: -#ifndef NOTLB_OPTIMIZE - /* Test present bit in entry. */ - LOAD_PTE(k0, k1) - R5K_HAZARD - tlbp - PTE_PRESENT(k0, k1, nopage_tlbl) - PTE_MAKEVALID(k0, k1) - PTE_RELOAD(k1, k0) - nop - b 1f - tlbwi -1: - nop - .set mips3 - eret - .set mips0 -#endif - -nopage_tlbl: - DO_FAULT(0) - END(handle_tlbl) - - .align 5 - NESTED(handle_tlbs, PT_SIZE, sp) - .set noat -#ifndef NOTLB_OPTIMIZE - LOAD_PTE(k0, k1) - R5K_HAZARD - tlbp # find faulting entry - PTE_WRITABLE(k0, k1, nopage_tlbs) - PTE_MAKEWRITE(k0, k1) - PTE_RELOAD(k1, k0) - nop - b 1f - tlbwi -1: - nop - .set mips3 - eret - .set mips0 -#endif - -nopage_tlbs: - DO_FAULT(1) - END(handle_tlbs) - - .align 5 - NESTED(handle_mod, PT_SIZE, sp) - .set noat -#ifndef NOTLB_OPTIMIZE - LOAD_PTE(k0, k1) - R5K_HAZARD - tlbp # find faulting entry - andi k0, k0, _PAGE_WRITE - beqz k0, nowrite_mod - lw k0, (k1) - - /* Present and writable bits set, set accessed and dirty bits. */ - PTE_MAKEWRITE(k0, k1) -#if 0 - ori k0, k0, (_PAGE_ACCESSED | _PAGE_DIRTY) - sw k0, (k1) -#endif - - /* Now reload the entry into the tlb. */ - PTE_RELOAD(k1, k0) - nop - b 1f - tlbwi -1: - nop - .set mips3 - eret - .set mips0 -#endif - -nowrite_mod: - DO_FAULT(1) - END(handle_mod) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 18d5ae71abe3..67609f0710a8 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994, 1995, 1996, 1998, 1999 by Ralf Baechle + * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002 by Ralf Baechle * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. @@ -12,24 +12,35 @@ */ #include <linux/config.h> #include <asm/asm.h> -#include <asm/bootinfo.h> #include <asm/cachectl.h> -#include <asm/current.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/offset.h> #include <asm/page.h> -#include <asm/pgtable.h> +#include <asm/pgtable-bits.h> #include <asm/processor.h> #include <asm/regdef.h> #include <asm/stackframe.h> +#include <asm/thread_info.h> #include <asm/asmmacro.h> +#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS) + +/* + * FPU context is saved iff the process has used it's FPU in the current + * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user + * space STATUS register should be 0, so that a process *always* starts its + * userland with FPU disabled after each context switch. + * + * FPU will be enabled as soon as the process accesses FPU again, through + * do_cpu() trap. + */ + /* - * task_struct *r4xx0_resume(task_struct *prev, task_struct *next) + * task_struct *r4xx0_resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) */ - .set noreorder .align 5 LEAF(resume) #ifndef CONFIG_CPU_HAS_LLSC @@ -40,79 +51,76 @@ CPU_SAVE_NONSCRATCH(a0) sw ra, THREAD_REG31(a0) + /* + * check if we need to save FPU registers + */ + lw t3, TASK_THREAD_INFO(a0) + lw t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + sw t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + lw t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + sw t0, ST_OFF(t3) + + FPU_SAVE_DOUBLE(a0, t0) # clobbers t0 + +1: /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ - move $28, a1 - CPU_RESTORE_NONSCRATCH($28) + move $28, a2 + CPU_RESTORE_NONSCRATCH(a1) + addiu t0, $28, KERNEL_STACK_SIZE-32 -#ifdef CONFIG_SMP +#ifdef CONFIG_SMP mfc0 a3, CP0_CONTEXT la t1, kernelsp srl a3, 23 sll a3, 2 addu t1, a3, t1 - sw t0, (t1) + sw t0, (t1) #else sw t0, kernelsp -#endif +#endif + mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff00 and t1, a3 - lw a2, THREAD_STATUS($28) + lw a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS + move v0, a0 jr ra - move v0, a0 END(resume) /* - * Do lazy fpu context switch. Saves FPU context to the process in a0 - * and loads the new context of the current process. - */ - -#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS) - -LEAF(lazy_fpu_switch) - mfc0 t0, CP0_STATUS # enable cp1 - li t3, 0x20000000 - or t0, t3 - mtc0 t0, CP0_STATUS - - beqz a0, 2f # Save floating point state - nor t3, zero, t3 - - lw t1, ST_OFF(a0) # last thread loses fpu - and t1, t3 - sw t1, ST_OFF(a0) - - - FPU_SAVE_DOUBLE(a0, t1) # clobbers t1 -2: - - .set reorder - FPU_RESTORE_DOUBLE($28, t0) # clobbers t0 - jr ra - END(lazy_fpu_switch) - -/* * Save a thread's fp context. */ -LEAF(save_fp) +LEAF(_save_fp) FPU_SAVE_DOUBLE(a0, t1) # clobbers t1 jr ra - END(save_fp) + END(_save_fp) /* * Restore a thread's fp context. */ -LEAF(restore_fp) +LEAF(_restore_fp) FPU_RESTORE_DOUBLE(a0, t1) # clobbers t1 jr ra - END(restore_fp) + END(_restore_fp) /* * Load the FPU with signalling NANS. This bit pattern we're using has @@ -124,12 +132,13 @@ LEAF(restore_fp) #define FPU_DEFAULT 0x00000000 -LEAF(init_fpu) +LEAF(_init_fpu) .set mips3 mfc0 t0, CP0_STATUS - li t1, 0x20000000 + li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS + FPU_ENABLE_HAZARD li t1, FPU_DEFAULT ctc1 t1, fcr31 @@ -151,9 +160,6 @@ LEAF(init_fpu) dmtc1 t0, $f24 dmtc1 t0, $f26 dmtc1 t0, $f28 - .set noreorder + dmtc1 t0, $f30 jr ra - dmtc1 t0, $f30 - .set reorder - END(init_fpu) - + END(_init_fpu) diff --git a/arch/mips/kernel/scall_o32.S b/arch/mips/kernel/scall_o32.S index 7df9bd8bbbdb..ff27a1bf1eaf 100644 --- a/arch/mips/kernel/scall_o32.S +++ b/arch/mips/kernel/scall_o32.S @@ -3,124 +3,107 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1997, 1998, 1999, 2000 by Ralf Baechle + * Copyright (C) 1997, 1998, 1999, 2000, 2001 by Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. */ -#include <asm/asm.h> +#include <linux/config.h> #include <linux/errno.h> -#include <asm/current.h> +#include <asm/asm.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/isadep.h> +#include <asm/sysmips.h> #include <asm/unistd.h> - -/* This duplicates the definition from <linux/sched.h> */ -#define PT_TRACESYS 0x00000002 /* tracing system calls */ - -/* This duplicates the definition from <asm/signal.h> */ -#define SIGILL 4 /* Illegal instruction (ANSI). */ +#include <asm/offset.h> /* Highest syscall used of any syscall flavour */ #define MAX_SYSCALL_NO __NR_Linux + __NR_Linux_syscalls .align 5 NESTED(handle_sys, PT_SIZE, sp) - .set noat - SAVE_SOME - STI - .set at + .set noat + SAVE_SOME + STI + .set at - lw t1, PT_EPC(sp) # skip syscall on return + lw t1, PT_EPC(sp) # skip syscall on return - sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number - addiu t1, 4 # skip to next instruction - beqz t0, illegal_syscall - sw t1, PT_EPC(sp) + sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number + addiu t1, 4 # skip to next instruction + beqz t0, illegal_syscall + sw t1, PT_EPC(sp) - /* XXX Put both in one cacheline, should save a bit. */ - sll t0, v0, 2 - lw t2, sys_call_table(t0) # syscall routine - lbu t3, sys_narg_table(v0) # number of arguments - beqz t2, illegal_syscall; + /* XXX Put both in one cacheline, should save a bit. */ + sll t0, v0, 2 + lw t2, sys_call_table(t0) # syscall routine + lbu t3, sys_narg_table(v0) # number of arguments + beqz t2, illegal_syscall; - subu t0, t3, 5 # 5 or more arguments? - sw a3, PT_R26(sp) # save a3 for syscall restarting - bgez t0, stackargs + subu t0, t3, 5 # 5 or more arguments? + sw a3, PT_R26(sp) # save a3 for syscall restarting + bgez t0, stackargs stack_done: - sw a3, PT_R26(sp) # save for syscall restart -#error lw t0, TASK_PTRACE($28) # syscall tracing enabled? - andi t0, PT_TRACESYS - bnez t0, trace_a_syscall - - jalr t2 # Do The Real Thing (TM) - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f - - negu v0 # error - sw v0, PT_R0(sp) # set flag for syscall restarting -1: sw v0, PT_R2(sp) # result - -EXPORT(o32_ret_from_sys_call) - mfc0 t0, CP0_STATUS # need_resched and signals atomic test - ori t0, t0, 1 - xori t0, t0, 1 - mtc0 t0, CP0_STATUS - -#error lw t2, TASK_NEED_RESCHED($28) - bnez t2, o32_reschedule -#error lw v0, TASK_SIGPENDING($28) - bnez v0, signal_return -restore_all: - RESTORE_SOME - RESTORE_SP_AND_RET - -/* Put this behind restore_all for the sake of the branch prediction. */ -signal_return: - .type signal_return, @function - - mfc0 t0, CP0_STATUS - ori t0, t0, 1 - mtc0 t0, CP0_STATUS - - move a0, zero - move a1, sp -#error jal do_signal - b restore_all - -o32_reschedule: - SAVE_STATIC - jal schedule - b o32_ret_from_sys_call + sw a3, PT_R26(sp) # save for syscall restart + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + bltz t0, syscall_trace_entry # -> yes + + jalr t2 # Do The Real Thing (TM) + + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sw t0, PT_R7(sp) # set error flag + beqz t0, 1f + + negu v0 # error + sw v0, PT_R0(sp) # set flag for syscall + # restarting +1: sw v0, PT_R2(sp) # result + +EXPORT(o32_syscall_exit) + mfc0 t0, CP0_STATUS # make sure need_resched and + ori t0, t0, 1 # signals dont change between + xori t0, t0, 1 # sampling and return + mtc0 t0, CP0_STATUS + SSNOP; SSNOP; SSNOP + + LONG_L a2, TI_FLAGS($28) # current->work + bnez a2, o32_syscall_exit_work + +o32_restore_all: + RESTORE_SOME + RESTORE_SP_AND_RET + +o32_syscall_exit_work: + SAVE_STATIC + j syscall_exit_work /* ------------------------------------------------------------------------ */ -trace_a_syscall: - SAVE_STATIC - sw t2, PT_R1(sp) -#error jal syscall_trace - lw t2, PT_R1(sp) +syscall_trace_entry: + SAVE_STATIC + sw t2, PT_R1(sp) + jal do_syscall_trace + lw t2, PT_R1(sp) - lw a0, PT_R4(sp) # Restore argument registers - lw a1, PT_R5(sp) - lw a2, PT_R6(sp) - lw a3, PT_R7(sp) - jalr t2 + lw a0, PT_R4(sp) # Restore argument registers + lw a1, PT_R5(sp) + lw a2, PT_R6(sp) + lw a3, PT_R7(sp) + jalr t2 - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f + li t0, -EMAXERRNO - 1 # error? + sltu t0, t0, v0 + sw t0, PT_R7(sp) # set error flag + beqz t0, 1f - negu v0 # error - sw v0, PT_R0(sp) # set flag for syscall restarting -1: sw v0, PT_R2(sp) # result + negu v0 # error + sw v0, PT_R0(sp) # set flag for syscall + # restarting +1: sw v0, PT_R2(sp) # result -#error jal syscall_trace - j ret_from_sys_call + j syscall_exit /* ------------------------------------------------------------------------ */ @@ -139,7 +122,7 @@ stackargs: bltz t0, bad_stack # -> sp is bad lw t0, PT_R29(sp) # get old user stack pointer - la t1, 3f # copy 1 to 2 arguments + PTR_LA t1, 3f # copy 1 to 2 arguments sll t3, t3, 4 subu t1, t3 jr t1 @@ -153,6 +136,7 @@ stackargs: */ .set push .set noreorder + .set nomacro 1: lw t1, 20(t0) # argument #6 from usp nop sw t1, 20(sp) @@ -161,9 +145,9 @@ stackargs: nop sw t1, 16(sp) nop - .set pop +3: .set pop -3: j stack_done # go back + j stack_done # go back .section __ex_table,"a" PTR 1b,bad_stack @@ -180,7 +164,7 @@ bad_stack: sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) - j ret_from_sys_call + j o32_syscall_exit /* * The system call does not exist in this kernel @@ -190,5 +174,147 @@ illegal_syscall: sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) - j ret_from_sys_call + j o32_syscall_exit END(handle_sys) + + LEAF(mips_atomic_set) + andi v0, a1, 3 # must be word aligned + bnez v0, bad_alignment + + lw v1, TI_ADDR_LIMIT($28) # in legal address range? + addiu a0, a1, 4 + or a0, a0, a1 + and a0, a0, v1 + bltz a0, bad_address + +#ifdef CONFIG_CPU_HAS_LLSC + /* Ok, this is the ll/sc case. World is sane :-) */ +1: ll v0, (a1) + move a0, a2 +2: sc a0, (a1) + beqz a0, 1b + + .section __ex_table,"a" + PTR 1b, bad_stack + PTR 2b, bad_stack + .previous +#else + sw a1, 16(sp) + sw a2, 20(sp) + + move a0, sp + move a2, a1 + li a1, 1 + jal do_page_fault + + lw a1, 16(sp) + lw a2, 20(sp) + + /* + * At this point the page should be readable and writable unless + * there was no more memory available. + */ +1: lw v0, (a1) +2: sw a2, (a1) + + .section __ex_table,"a" + PTR 1b, no_mem + PTR 2b, no_mem + .previous +#endif + + sw v0, PT_R2(sp) # result +1: + + /* Success, so skip usual error handling garbage. */ + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + bltz t0, 1f + b o32_syscall_exit + +1: SAVE_STATIC + jal do_syscall_trace + li a3, 0 # success + j syscall_exit + +no_mem: li v0, -ENOMEM + jr ra + +bad_address: + li v0, -EFAULT + jr ra + +bad_alignment: + li v0, -EINVAL + jr ra + END(mips_atomic_set) + + LEAF(sys_sysmips) + beq a0, MIPS_ATOMIC_SET, mips_atomic_set + j _sys_sysmips + END(sys_sysmips) + + LEAF(sys_syscall) + lw t0, PT_R29(sp) # user sp + + sltu v0, a0, __NR_Linux + __NR_Linux_syscalls + 1 + beqz v0, enosys + + sll v0, a0, 2 + la v1, sys_syscall + lw t2, sys_call_table(v0) # function pointer + lbu t4, sys_narg_table(a0) # number of arguments + + li v0, -EINVAL + beq t2, v1, out # do not recurse + + beqz t2, enosys # null function pointer? + + andi v0, t0, 0x3 # unaligned stack pointer? + bnez v0, sigsegv + + addu v0, t0, 16 # v0 = usp + 16 + addu t1, v0, 12 # 3 32-bit arguments + lw v1, TI_ADDR_LIMIT($28) + or v0, v0, t1 + and v1, v1, v0 + bltz v1, efault + + move a0, a1 # shift argument registers + move a1, a2 + move a2, a3 + +1: lw a3, 16(t0) +2: lw t3, 20(t0) +3: lw t4, 24(t0) + + .section __ex_table, "a" + .word 1b, efault + .word 2b, efault + .word 3b, efault + .previous + + sw t3, 16(sp) # put into new stackframe + sw t4, 20(sp) + + bnez t4, 1f # zero arguments? + addu a0, sp, 32 # then pass sp in a0 +1: + + sw t3, 16(sp) + sw v1, 20(sp) + jr t2 + /* Unreached */ + +enosys: li v0, -ENOSYS + b out + +sigsegv: + li a0, _SIGSEGV + move a1, $28 + jal force_sig + /* Fall through */ + +efault: li v0, -EFAULT + +out: jr ra + END(sys_syscall) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 9d39a8658f27..809b1b14f3a3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -4,9 +4,10 @@ * for more details. * * Copyright (C) 1995 Linus Torvalds - * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000 Ralf Baechle + * Copyright (C) 1995 Waldorf Electronics + * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001 Ralf Baechle * Copyright (C) 1996 Stoned Elipot - * Copyright (C) 2000 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2002 Maciej W. Rozycki */ #include <linux/config.h> #include <linux/errno.h> @@ -16,10 +17,10 @@ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/unistd.h> -#include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/utsname.h> @@ -29,6 +30,8 @@ #include <linux/initrd.h> #include <linux/ide.h> #include <linux/timex.h> +#include <linux/major.h> +#include <linux/kdev_t.h> #include <linux/root_dev.h> #include <asm/asm.h> @@ -36,23 +39,11 @@ #include <asm/cachectl.h> #include <asm/cpu.h> #include <asm/io.h> -#include <asm/stackframe.h> +#include <asm/ptrace.h> +#include <asm/sections.h> #include <asm/system.h> -#ifdef CONFIG_SGI_IP22 -#include <asm/sgialib.h> -#endif - -struct mips_cpuinfo boot_cpu_data = { 0, NULL, NULL, 0 }; - -/* - * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, - * the implementation of the "wait" feature differs between CPU families. This - * points to the function that implements CPU specific wait. - * The wait instruction stops the pipeline and reduces the power consumption of - * the CPU very much. - */ -void (*cpu_wait)(void) = NULL; +struct cpuinfo_mips cpu_data[NR_CPUS]; /* * There are several bus types available for MIPS machines. "RISC PC" @@ -61,7 +52,9 @@ void (*cpu_wait)(void) = NULL; * boxes ... * This flag is set if a EISA slots are available. */ +#ifdef CONFIG_EISA int EISA_bus = 0; +#endif struct screen_info screen_info; @@ -78,11 +71,6 @@ extern void * __rd_start, * __rd_end; extern struct rtc_ops no_rtc_ops; struct rtc_ops *rtc_ops; -#ifdef CONFIG_PC_KEYB -extern struct kbd_ops no_kbd_ops; -struct kbd_ops *kbd_ops; -#endif - /* * Setup information * @@ -94,333 +82,59 @@ unsigned long mips_machgroup = MACH_GROUP_UNKNOWN; struct boot_mem_map boot_mem_map; unsigned char aux_device_present; -extern char _ftext, _etext, _fdata, _edata, _end; -static char command_line[COMMAND_LINE_SIZE]; - char saved_command_line[COMMAND_LINE_SIZE]; -extern char arcs_cmdline[COMMAND_LINE_SIZE]; +static char command_line[CL_SIZE]; + char saved_command_line[CL_SIZE]; +extern char arcs_cmdline[CL_SIZE]; /* * mips_io_port_base is the begin of the address space to which x86 style * I/O ports are mapped. */ -unsigned long mips_io_port_base; +const unsigned long mips_io_port_base = -1; +EXPORT_SYMBOL(mips_io_port_base); + /* - * isa_slot_offset is the address where E(ISA) busaddress 0 is is mapped + * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped * for the processor. */ unsigned long isa_slot_offset; +EXPORT_SYMBOL(isa_slot_offset); -extern void sgi_sysinit(void); extern void SetUpBootInfo(void); -extern void loadmmu(void); +extern void load_mmu(void); extern asmlinkage void start_kernel(void); extern void prom_init(int, char **, char **, int *); static struct resource code_resource = { "Kernel code" }; static struct resource data_resource = { "Kernel data" }; -/* - * Probe whether cpu has config register by trying to play with - * alternate cache bit and see whether it matters. - * It's used by cpu_probe to distinguish between R3000A and R3081. - */ -static inline int cpu_has_confreg(void) -{ -#ifdef CONFIG_CPU_R3000 - extern unsigned long r3k_cache_size(unsigned long); - unsigned long size1, size2; - unsigned long cfg = read_32bit_cp0_register(CP0_CONF); - - size1 = r3k_cache_size(ST0_ISC); - write_32bit_cp0_register(CP0_CONF, cfg^CONF_AC); - size2 = r3k_cache_size(ST0_ISC); - write_32bit_cp0_register(CP0_CONF, cfg); - return size1 != size2; -#else - return 0; -#endif -} - -/* declaration of the global struct */ -struct mips_cpu mips_cpu = {PRID_IMP_UNKNOWN, CPU_UNKNOWN, 0, 0, 0, - {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}}; - -/* Shortcut for assembler access to mips_cpu.options */ -int *cpuoptions = &mips_cpu.options; - -#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4KTLB \ - | MIPS_CPU_COUNTER | MIPS_CPU_CACHE_CDEX) - -static inline void cpu_probe(void) -{ - -#ifdef CONFIG_CPU_MIPS32 - unsigned long config1; -#endif - - mips_cpu.processor_id = read_32bit_cp0_register(CP0_PRID); - switch (mips_cpu.processor_id & 0xff0000) { - case PRID_COMP_LEGACY: - switch (mips_cpu.processor_id & 0xff00) { - case PRID_IMP_R2000: - mips_cpu.cputype = CPU_R2000; - mips_cpu.isa_level = MIPS_CPU_ISA_I; - mips_cpu.options = MIPS_CPU_TLB; - mips_cpu.tlbsize = 64; - break; - case PRID_IMP_R3000: - if ((mips_cpu.processor_id & 0xff) == PRID_REV_R3000A) - if (cpu_has_confreg()) - mips_cpu.cputype = CPU_R3081E; - else - mips_cpu.cputype = CPU_R3000A; - else - mips_cpu.cputype = CPU_R3000; - mips_cpu.isa_level = MIPS_CPU_ISA_I; - mips_cpu.options = MIPS_CPU_TLB; - mips_cpu.tlbsize = 64; - break; - case PRID_IMP_R4000: - if ((mips_cpu.processor_id & 0xff) == PRID_REV_R4400) - mips_cpu.cputype = CPU_R4400SC; - else - mips_cpu.cputype = CPU_R4000SC; - mips_cpu.isa_level = MIPS_CPU_ISA_III; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_WATCH | MIPS_CPU_VCE; - mips_cpu.tlbsize = 48; - break; - case PRID_IMP_VR41XX: - mips_cpu.cputype = CPU_VR41XX; - mips_cpu.isa_level = MIPS_CPU_ISA_III; - mips_cpu.options = R4K_OPTS; - mips_cpu.tlbsize = 32; - break; - case PRID_IMP_R4600: - mips_cpu.cputype = CPU_R4600; - mips_cpu.isa_level = MIPS_CPU_ISA_III; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU; - mips_cpu.tlbsize = 48; - break; -/* - * This processor doesn't have an MMU, so it's not "real easy" to - * run Linux on it. It is left purely for documentation. - * case PRID_IMP_R4650: - mips_cpu.cputype = CPU_R4650; - mips_cpu.isa_level = MIPS_CPU_ISA_III; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU; - mips_cpu.tlbsize = 48; - break; -*/ - case PRID_IMP_TX39: - mips_cpu.isa_level = MIPS_CPU_ISA_I; - mips_cpu.options = MIPS_CPU_TLB; - - switch (mips_cpu.processor_id & 0xff) { - case PRID_REV_TX3912: - mips_cpu.cputype = CPU_TX3912; - mips_cpu.tlbsize = 32; - break; - case PRID_REV_TX3922: - mips_cpu.cputype = CPU_TX3922; - mips_cpu.tlbsize = 64; - break; - case PRID_REV_TX3927: - mips_cpu.cputype = CPU_TX3927; - mips_cpu.tlbsize = 64; - break; - default: - mips_cpu.cputype = CPU_UNKNOWN; - break; - } - break; - case PRID_IMP_R4700: - mips_cpu.cputype = CPU_R4700; - mips_cpu.isa_level = MIPS_CPU_ISA_III; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR; - mips_cpu.tlbsize = 48; - break; - case PRID_IMP_R5000: - mips_cpu.cputype = CPU_R5000; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR; - mips_cpu.tlbsize = 48; - break; - case PRID_IMP_R5432: - mips_cpu.cputype = CPU_R5432; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR; - mips_cpu.tlbsize = 48; - break; - case PRID_IMP_NEVADA: - mips_cpu.cputype = CPU_NEVADA; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_DIVEC; - mips_cpu.tlbsize = 48; - mips_cpu.icache.ways = 2; - mips_cpu.dcache.ways = 2; - break; - case PRID_IMP_R6000: - mips_cpu.cputype = CPU_R6000; - mips_cpu.isa_level = MIPS_CPU_ISA_II; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_FPU; - mips_cpu.tlbsize = 32; - break; - case PRID_IMP_R6000A: - mips_cpu.cputype = CPU_R6000A; - mips_cpu.isa_level = MIPS_CPU_ISA_II; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_FPU; - mips_cpu.tlbsize = 32; - break; - case PRID_IMP_RM7000: - mips_cpu.cputype = CPU_RM7000; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR; - break; - case PRID_IMP_R8000: - mips_cpu.cputype = CPU_R8000; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_FPU | MIPS_CPU_32FPR; - mips_cpu.tlbsize = 384; /* has weird TLB: 3-way x 128 */ - break; - case PRID_IMP_R10000: - mips_cpu.cputype = CPU_R10000; - mips_cpu.isa_level = MIPS_CPU_ISA_IV; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_COUNTER | MIPS_CPU_WATCH; - mips_cpu.tlbsize = 64; - break; - default: - mips_cpu.cputype = CPU_UNKNOWN; - break; - } - break; -#ifdef CONFIG_CPU_MIPS32 - case PRID_COMP_MIPS: - switch (mips_cpu.processor_id & 0xff00) { - case PRID_IMP_4KC: - mips_cpu.cputype = CPU_4KC; - goto cpu_4kc; - case PRID_IMP_4KEC: - mips_cpu.cputype = CPU_4KEC; - goto cpu_4kc; - case PRID_IMP_4KSC: - mips_cpu.cputype = CPU_4KSC; -cpu_4kc: - /* Why do we set all these options by default, THEN query them?? */ - mips_cpu.cputype = MIPS_CPU_ISA_M32; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | - MIPS_CPU_DIVEC | MIPS_CPU_WATCH; - config1 = read_mips32_cp0_config1(); - if (config1 & (1 << 3)) - mips_cpu.options |= MIPS_CPU_WATCH; - if (config1 & (1 << 2)) - mips_cpu.options |= MIPS_CPU_MIPS16; - if (config1 & 1) - mips_cpu.options |= MIPS_CPU_FPU; - mips_cpu.scache.flags = MIPS_CACHE_NOT_PRESENT; - break; - case PRID_IMP_5KC: - mips_cpu.cputype = CPU_5KC; - mips_cpu.cputype = MIPS_CPU_ISA_M64; - /* See comment above about querying options */ - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | - MIPS_CPU_DIVEC | MIPS_CPU_WATCH; - config1 = read_mips32_cp0_config1(); - if (config1 & (1 << 3)) - mips_cpu.options |= MIPS_CPU_WATCH; - if (config1 & (1 << 2)) - mips_cpu.options |= MIPS_CPU_MIPS16; - if (config1 & 1) - mips_cpu.options |= MIPS_CPU_FPU; - break; - mips_cpu.scache.flags = MIPS_CACHE_NOT_PRESENT; - default: - mips_cpu.cputype = CPU_UNKNOWN; - break; - } - break; -#endif - case PRID_COMP_ALCHEMY: - switch (mips_cpu.processor_id & 0xff00) { -#ifdef CONFIG_CPU_MIPS32 - case PRID_IMP_AU1000: - mips_cpu.cputype = CPU_AU1000; - mips_cpu.isa_level = MIPS_CPU_ISA_M32; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | - MIPS_CPU_DIVEC | MIPS_CPU_WATCH; - config1 = read_mips32_cp0_config1(); - if (config1 & (1 << 3)) - mips_cpu.options |= MIPS_CPU_WATCH; - if (config1 & (1 << 2)) - mips_cpu.options |= MIPS_CPU_MIPS16; - if (config1 & 1) - mips_cpu.options |= MIPS_CPU_FPU; - mips_cpu.scache.flags = MIPS_CACHE_NOT_PRESENT; - break; -#endif - default: - mips_cpu.cputype = CPU_UNKNOWN; - break; - } - break; - case PRID_COMP_SIBYTE: - switch (mips_cpu.processor_id & 0xff00) { - case PRID_IMP_SB1: - mips_cpu.cputype = CPU_SB1; - mips_cpu.options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | MIPS_CPU_FPU | - MIPS_CPU_VCE; - break; - default: - mips_cpu.cputype = CPU_UNKNOWN; - break; - } - break; - default: - mips_cpu.cputype = CPU_UNKNOWN; - } -} - asmlinkage void __init init_arch(int argc, char **argv, char **envp, int *prom_vec) { - unsigned int s; - /* Determine which MIPS variant we are running on. */ cpu_probe(); prom_init(argc, argv, envp, prom_vec); -#ifdef CONFIG_SGI_IP22 - sgi_sysinit(); -#endif + cpu_report(); /* * Determine the mmu/cache attached to this machine, * then flush the tlb and caches. On the r4xx0 * variants this also sets CP0_WIRED to zero. */ - loadmmu(); + load_mmu(); - /* Disable coprocessors and set FPU for 16 FPRs */ - s = read_32bit_cp0_register(CP0_STATUS); - s &= ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR); - s |= ST0_CU0; - write_32bit_cp0_register(CP0_STATUS, s); + /* Disable coprocessors and set FPU for 16/32 FPR register model */ + clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_FR); + set_c0_status(ST0_CU0); start_kernel(); } -void __init add_memory_region(unsigned long start, unsigned long size, +void __init add_memory_region(phys_t start, phys_t size, long type) { int x = boot_mem_map.nr_map; @@ -441,8 +155,10 @@ static void __init print_memory_map(void) int i; for (i = 0; i < boot_mem_map.nr_map; i++) { - printk(" memory: %08lx @ %08lx ", - boot_mem_map.map[i].size, boot_mem_map.map[i].addr); + printk(" memory: %08Lx @ %08Lx ", + (u64) boot_mem_map.map[i].size, + (u64) boot_mem_map.map[i].addr); + switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: printk("(usable)\n"); @@ -460,7 +176,7 @@ static void __init print_memory_map(void) } } -static inline void parse_mem_cmdline(void) +static inline void parse_cmdline_early(void) { char c = ' ', *to = command_line, *from = saved_command_line; unsigned long start_at, mem_size; @@ -499,7 +215,7 @@ static inline void parse_mem_cmdline(void) c = *(from++); if (!c) break; - if (COMMAND_LINE_SIZE <= ++len) + if (CL_SIZE <= ++len) break; *(to++) = c; } @@ -511,143 +227,41 @@ static inline void parse_mem_cmdline(void) } } -void __init setup_arch(char **cmdline_p) -{ - void atlas_setup(void); - void baget_setup(void); - void ddb_setup(void); - void decstation_setup(void); - void deskstation_setup(void); - void jazz_setup(void); - void sni_rm200_pci_setup(void); - void sgi_setup(void); - void ev96100_setup(void); - void malta_setup(void); - void momenco_ocelot_setup(void); - void nino_setup(void); - - unsigned long bootmap_size; - unsigned long start_pfn, max_pfn, first_usable_pfn; - - int i; -#ifdef CONFIG_BLK_DEV_FD - fd_ops = &no_fd_ops; -#endif +#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((x) << PAGE_SHIFT) -#ifdef CONFIG_BLK_DEV_IDE - ide_ops = &no_ide_ops; -#endif +#define MAXMEM HIGHMEM_START +#define MAXMEM_PFN PFN_DOWN(MAXMEM) -#ifdef CONFIG_PC_KEYB - kbd_ops = &no_kbd_ops; +static inline void bootmem_init(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + unsigned long tmp; + unsigned long *initrd_header; #endif - - rtc_ops = &no_rtc_ops; + unsigned long bootmap_size; + unsigned long start_pfn, max_low_pfn, first_usable_pfn; + int i; - switch(mips_machgroup) - { -#ifdef CONFIG_BAGET_MIPS - case MACH_GROUP_BAGET: - baget_setup(); - break; -#endif -#ifdef CONFIG_DECSTATION - case MACH_GROUP_DEC: - decstation_setup(); - break; -#endif -#ifdef CONFIG_MIPS_ATLAS - case MACH_GROUP_UNKNOWN: - atlas_setup(); - break; -#endif -#ifdef CONFIG_MIPS_JAZZ - case MACH_GROUP_JAZZ: - jazz_setup(); - break; -#endif -#ifdef CONFIG_MIPS_MALTA - case MACH_GROUP_UNKNOWN: - malta_setup(); - break; -#endif -#ifdef CONFIG_MOMENCO_OCELOT - case MACH_GROUP_MOMENCO: - momenco_ocelot_setup(); - break; -#endif -#ifdef CONFIG_SGI_IP22 - /* As of now this is only IP22. */ - case MACH_GROUP_SGI: - sgi_setup(); - break; -#endif -#ifdef CONFIG_SNI_RM200_PCI - case MACH_GROUP_SNI_RM: - sni_rm200_pci_setup(); - break; -#endif -#ifdef CONFIG_DDB5074 - case MACH_GROUP_NEC_DDB: - ddb_setup(); - break; -#endif -#ifdef CONFIG_DDB5476 - case MACH_GROUP_NEC_DDB: - ddb_setup(); - break; -#endif -#ifdef CONFIG_DDB5477 - case MACH_GROUP_NEC_DDB: - ddb_setup(); - break; -#endif -#ifdef CONFIG_MIPS_EV96100 - case MACH_GROUP_GALILEO: - ev96100_setup(); - break; -#endif -#ifdef CONFIG_MIPS_EV64120 - case MACH_GROUP_GALILEO: - ev64120_setup(); - break; -#endif -#if defined(CONFIG_MIPS_IVR) || defined(CONFIG_MIPS_ITE8172) - case MACH_GROUP_ITE: - case MACH_GROUP_GLOBESPAN: - it8172_setup(); - break; -#endif -#ifdef CONFIG_NINO - case MACH_GROUP_PHILIPS: - nino_setup(); - break; -#endif -#ifdef CONFIG_MIPS_PB1000 - case MACH_GROUP_ALCHEMY: - au1000_setup(); - break; -#endif - default: - panic("Unsupported architecture"); +#ifdef CONFIG_BLK_DEV_INITRD + tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8; + if (tmp < (unsigned long)&_end) + tmp += PAGE_SIZE; + initrd_header = (unsigned long *)tmp; + if (initrd_header[0] == 0x494E5244) { + initrd_start = (unsigned long)&initrd_header[2]; + initrd_end = initrd_start + initrd_header[1]; } - - strlcpy(command_line, arcs_cmdline, sizeof command_line); - strlcpy(saved_command_line, command_line, sizeof saved_command_line); - *cmdline_p = command_line; - - parse_mem_cmdline(); - -#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - + start_pfn = PFN_UP(__pa((&_end)+(initrd_end - initrd_start) + PAGE_SIZE)); +#else /* * Partially used pages are not usable - thus * we are rounding upwards. */ start_pfn = PFN_UP(__pa(&_end)); +#endif /* CONFIG_BLK_DEV_INITRD */ /* Find the highest page frame number we have available. */ max_pfn = 0; @@ -674,9 +288,36 @@ void __init setup_arch(char **cmdline_p) } } } - - /* Initialize the boot-time allocator. */ - bootmap_size = init_bootmem(first_usable_pfn, max_pfn); + + /* + * Determine low and high memory ranges + */ + max_low_pfn = max_pfn; + if (max_low_pfn > MAXMEM_PFN) { + max_low_pfn = MAXMEM_PFN; +#ifndef CONFIG_HIGHMEM + /* Maximum memory usable is what is directly addressable */ + printk(KERN_WARNING "Warning only %dMB will be used.\n", + MAXMEM>>20); + printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); +#endif + } + +#ifdef CONFIG_HIGHMEM + /* + * Crude, we really should make a better attempt at detecting + * highstart_pfn + */ + highstart_pfn = highend_pfn = max_pfn; + if (max_pfn > MAXMEM_PFN) { + highstart_pfn = MAXMEM_PFN; + printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", + (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT)); + } +#endif + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. @@ -694,7 +335,7 @@ void __init setup_arch(char **cmdline_p) * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(boot_mem_map.map[i].addr); - if (curr_pfn >= max_pfn) + if (curr_pfn >= max_low_pfn) continue; if (curr_pfn < start_pfn) curr_pfn = start_pfn; @@ -705,8 +346,19 @@ void __init setup_arch(char **cmdline_p) last_pfn = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); - if (last_pfn > max_pfn) - last_pfn = max_pfn; + if (last_pfn > max_low_pfn) + last_pfn = max_low_pfn; + + /* + * Only register lowmem part of lowmem segment with bootmem. + */ + size = last_pfn - curr_pfn; + if (curr_pfn > PFN_DOWN(HIGHMEM_START)) + continue; + if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START)) + size = PFN_DOWN(HIGHMEM_START) - curr_pfn; + if (!size) + continue; /* * ... finally, did all the rounding and playing @@ -715,7 +367,7 @@ void __init setup_arch(char **cmdline_p) if (last_pfn <= curr_pfn) continue; - size = last_pfn - curr_pfn; + /* Register lowmem ranges */ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); } @@ -725,38 +377,49 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_BLK_DEV_INITRD /* Board specific code should have set up initrd_start and initrd_end */ ROOT_DEV = Root_RAM0; - if( __rd_start != __rd_end ) { + if (&__rd_start != &__rd_end) { initrd_start = (unsigned long)&__rd_start; initrd_end = (unsigned long)&__rd_end; } initrd_below_start_ok = 1; if (initrd_start) { - unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); + unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); printk("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)initrd_start, + (void *)initrd_start, initrd_size); - if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { + if (PHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { printk("initrd extends beyond end of memory " - "(0x%lx > 0x%p)\ndisabling initrd\n", - initrd_end, - phys_to_virt(PFN_PHYS(max_low_pfn))); + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + PHYSADDR(initrd_end), + PFN_PHYS(max_low_pfn)); initrd_start = initrd_end = 0; - } + } } #endif /* CONFIG_BLK_DEV_INITRD */ +} - paging_init(); +static inline void resource_init(void) +{ + int i; - code_resource.start = virt_to_bus(&_ftext); - code_resource.end = virt_to_bus(&_etext) - 1; - data_resource.start = virt_to_bus(&_fdata); - data_resource.end = virt_to_bus(&_edata) - 1; + code_resource.start = virt_to_phys(&_text); + code_resource.end = virt_to_phys(&_etext) - 1; + data_resource.start = virt_to_phys(&_etext); + data_resource.end = virt_to_phys(&_edata) - 1; /* * Request address space for all standard RAM. */ for (i = 0; i < boot_mem_map.nr_map; i++) { struct resource *res; + unsigned long start, end; + + start = boot_mem_map.map[i].addr; + end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1; + if (start >= MAXMEM) + continue; + if (end >= MAXMEM) + end = MAXMEM - 1; res = alloc_bootmem(sizeof(struct resource)); switch (boot_mem_map.map[i].type) { @@ -768,8 +431,10 @@ void __init setup_arch(char **cmdline_p) default: res->name = "reserved"; } - res->start = boot_mem_map.map[i].addr; - res->end = res->start + boot_mem_map.map[i].size - 1; + + res->start = start; + res->end = end; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); @@ -783,15 +448,244 @@ void __init setup_arch(char **cmdline_p) } } -void r3081_wait(void) +#undef PFN_UP +#undef PFN_DOWN +#undef PFN_PHYS + +#undef MAXMEM +#undef MAXMEM_PFN + + +void __init setup_arch(char **cmdline_p) { - unsigned long cfg = read_32bit_cp0_register(CP0_CONF); - write_32bit_cp0_register(CP0_CONF, cfg|CONF_HALT); + void atlas_setup(void); + void baget_setup(void); + void cobalt_setup(void); + void lasat_setup(void); + void ddb_setup(void); + void decstation_setup(void); + void deskstation_setup(void); + void jazz_setup(void); + void sni_rm200_pci_setup(void); + void ip22_setup(void); + void ev96100_setup(void); + void malta_setup(void); + void sead_setup(void); + void ikos_setup(void); + void momenco_ocelot_setup(void); + void momenco_ocelot_g_setup(void); + void momenco_ocelot_c_setup(void); + void nec_osprey_setup(void); + void nec_eagle_setup(void); + void zao_capcella_setup(void); + void victor_mpc30x_setup(void); + void ibm_workpad_setup(void); + void casio_e55_setup(void); + void jmr3927_setup(void); + void it8172_setup(void); + void swarm_setup(void); + void hp_setup(void); + void au1x00_setup(void); + void frame_info_init(void); + + frame_info_init(); + +#ifdef CONFIG_BLK_DEV_FD + fd_ops = &no_fd_ops; +#endif + +#ifdef CONFIG_BLK_DEV_IDE + ide_ops = &no_ide_ops; +#endif + + rtc_ops = &no_rtc_ops; + + switch(mips_machgroup) + { +#ifdef CONFIG_BAGET_MIPS + case MACH_GROUP_BAGET: + baget_setup(); + break; +#endif +#ifdef CONFIG_MIPS_COBALT + case MACH_GROUP_COBALT: + cobalt_setup(); + break; +#endif +#ifdef CONFIG_DECSTATION + case MACH_GROUP_DEC: + decstation_setup(); + break; +#endif +#ifdef CONFIG_MIPS_ATLAS + case MACH_GROUP_UNKNOWN: + atlas_setup(); + break; +#endif +#ifdef CONFIG_MIPS_JAZZ + case MACH_GROUP_JAZZ: + jazz_setup(); + break; +#endif +#ifdef CONFIG_MIPS_MALTA + case MACH_GROUP_UNKNOWN: + malta_setup(); + break; +#endif +#ifdef CONFIG_MOMENCO_OCELOT + case MACH_GROUP_MOMENCO: + momenco_ocelot_setup(); + break; +#endif +#ifdef CONFIG_MOMENCO_OCELOT_G + case MACH_GROUP_MOMENCO: + momenco_ocelot_g_setup(); + break; +#endif +#ifdef CONFIG_MOMENCO_OCELOT_C + case MACH_GROUP_MOMENCO: + momenco_ocelot_c_setup(); + break; +#endif +#ifdef CONFIG_MIPS_SEAD + case MACH_GROUP_UNKNOWN: + sead_setup(); + break; +#endif +#ifdef CONFIG_SGI_IP22 + /* As of now this is only IP22. */ + case MACH_GROUP_SGI: + ip22_setup(); + break; +#endif +#ifdef CONFIG_SNI_RM200_PCI + case MACH_GROUP_SNI_RM: + sni_rm200_pci_setup(); + break; +#endif +#ifdef CONFIG_DDB5074 + case MACH_GROUP_NEC_DDB: + ddb_setup(); + break; +#endif +#ifdef CONFIG_DDB5476 + case MACH_GROUP_NEC_DDB: + ddb_setup(); + break; +#endif +#ifdef CONFIG_DDB5477 + case MACH_GROUP_NEC_DDB: + ddb_setup(); + break; +#endif +#ifdef CONFIG_CPU_VR41XX + case MACH_GROUP_NEC_VR41XX: + switch (mips_machtype) { +#ifdef CONFIG_NEC_OSPREY + case MACH_NEC_OSPREY: + nec_osprey_setup(); + break; +#endif +#ifdef CONFIG_NEC_EAGLE + case MACH_NEC_EAGLE: + nec_eagle_setup(); + break; +#endif +#ifdef CONFIG_ZAO_CAPCELLA + case MACH_ZAO_CAPCELLA: + zao_capcella_setup(); + break; +#endif +#ifdef CONFIG_VICTOR_MPC30X + case MACH_VICTOR_MPC30X: + victor_mpc30x_setup(); + break; +#endif +#ifdef CONFIG_IBM_WORKPAD + case MACH_IBM_WORKPAD: + ibm_workpad_setup(); + break; +#endif +#ifdef CONFIG_CASIO_E55 + case MACH_CASIO_E55: + casio_e55_setup(); + break; +#endif +#ifdef CONFIG_TANBAC_TB0229 + case MACH_TANBAC_TB0229: + tanbac_tb0229_setup(); + break; +#endif + } + break; +#endif +#ifdef CONFIG_MIPS_EV96100 + case MACH_GROUP_GALILEO: + ev96100_setup(); + break; +#endif +#ifdef CONFIG_MIPS_EV64120 + case MACH_GROUP_GALILEO: + ev64120_setup(); + break; +#endif +#if defined(CONFIG_MIPS_IVR) || defined(CONFIG_MIPS_ITE8172) + case MACH_GROUP_ITE: + case MACH_GROUP_GLOBESPAN: + it8172_setup(); + break; +#endif +#ifdef CONFIG_LASAT + case MACH_GROUP_LASAT: + lasat_setup(); + break; +#endif +#ifdef CONFIG_SOC_AU1X00 + case MACH_GROUP_ALCHEMY: + au1x00_setup(); + break; +#endif +#ifdef CONFIG_TOSHIBA_JMR3927 + case MACH_GROUP_TOSHIBA: + jmr3927_setup(); + break; +#endif +#ifdef CONFIG_TOSHIBA_RBTX4927 + case MACH_GROUP_TOSHIBA: + tx4927_setup(); + break; +#endif +#ifdef CONFIG_SIBYTE_BOARD + case MACH_GROUP_SIBYTE: + swarm_setup(); + break; +#endif +#ifdef CONFIG_HP_LASERJET + case MACH_GROUP_HP_LJ: + hp_setup(); + break; +#endif + default: + panic("Unsupported architecture"); + } + + strlcpy(command_line, arcs_cmdline, sizeof command_line); + strlcpy(saved_command_line, command_line, sizeof saved_command_line); + *cmdline_p = command_line; + + parse_cmdline_early(); + + bootmem_init(); + + paging_init(); + + resource_init(); } -void r4k_wait(void) +int __init fpu_disable(char *s) { - __asm__(".set\tmips3\n\t" - "wait\n\t" - ".set\tmips0"); + cpu_data[0].options &= ~MIPS_CPU_FPU; + + return 1; } +__setup("nofpu", fpu_disable); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 63342be2b43a..ef9ba8509425 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -10,19 +10,22 @@ #include <linux/config.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/personality.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> -#include <linux/ptrace.h> #include <linux/unistd.h> #include <asm/asm.h> #include <asm/bitops.h> -#include <asm/pgalloc.h> -#include <asm/stackframe.h> +#include <asm/cacheflush.h> +#include <asm/cpu.h> +#include <asm/fpu.h> +#include <asm/offset.h> +#include <asm/ptrace.h> #include <asm/uaccess.h> #include <asm/ucontext.h> @@ -32,17 +35,13 @@ extern asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs); -extern asmlinkage int (*save_fp_context)(struct sigcontext *sc); -extern asmlinkage int (*restore_fp_context)(struct sigcontext *sc); - -extern asmlinkage void syscall_trace(void); +extern asmlinkage void do_syscall_trace(void); /* * Atomically swap in the new signal mask, and wait for a signal. */ save_static_function(sys_sigsuspend); -static_unused int -_sys_sigsuspend(struct pt_regs regs) +static_unused int _sys_sigsuspend(struct pt_regs regs) { sigset_t *uset, saveset, newset; @@ -51,11 +50,11 @@ _sys_sigsuspend(struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); regs.regs[2] = EINTR; regs.regs[7] = 1; @@ -67,10 +66,8 @@ _sys_sigsuspend(struct pt_regs regs) } } - save_static_function(sys_rt_sigsuspend); -static_unused int -_sys_rt_sigsuspend(struct pt_regs regs) +static_unused int _sys_rt_sigsuspend(struct pt_regs regs) { sigset_t *unewset, saveset, newset; size_t sigsetsize; @@ -85,11 +82,11 @@ _sys_rt_sigsuspend(struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); regs.regs[2] = EINTR; regs.regs[7] = 1; @@ -101,8 +98,8 @@ _sys_rt_sigsuspend(struct pt_regs regs) } } -asmlinkage int -sys_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) +asmlinkage int sys_sigaction(int sig, const struct sigaction *act, + struct sigaction *oact) { struct k_sigaction new_ka, old_ka; int ret; @@ -116,7 +113,6 @@ sys_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); err |= __get_user(mask, &act->sa_mask.sig[0]); - err |= __get_user(new_ka.sa.sa_restorer, &act->sa_restorer); if (err) return -EFAULT; @@ -134,7 +130,6 @@ sys_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) err |= __put_user(0, &oact->sa_mask.sig[1]); err |= __put_user(0, &oact->sa_mask.sig[2]); err |= __put_user(0, &oact->sa_mask.sig[3]); - err |= __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer); if (err) return -EFAULT; } @@ -142,8 +137,7 @@ sys_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) return ret; } -asmlinkage int -sys_sigaltstack(struct pt_regs regs) +asmlinkage int sys_sigaltstack(struct pt_regs regs) { const stack_t *uss = (const stack_t *) regs.regs[4]; stack_t *uoss = (stack_t *) regs.regs[5]; @@ -152,10 +146,8 @@ sys_sigaltstack(struct pt_regs regs) return do_sigaltstack(uss, uoss, usp); } -asmlinkage int -restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) +static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) { - int owned_fp; int err = 0; u64 reg; @@ -183,10 +175,15 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) restore_gp_reg(31); #undef restore_gp_reg - err |= __get_user(owned_fp, &sc->sc_ownedfp); - if (owned_fp) { + err |= __get_user(current->used_math, &sc->sc_used_math); + + if (current->used_math) { + /* restore fpu context if we have used it before */ + own_fpu(); err |= restore_fp_context(sc); - last_task_used_math = current; + } else { + /* signal handler may have used FPU. Give it up. */ + loose_fpu(); } return err; @@ -206,8 +203,7 @@ struct rt_sigframe { struct ucontext rs_uc; }; -asmlinkage void -sys_sigreturn(struct pt_regs regs) +asmlinkage void sys_sigreturn(struct pt_regs regs) { struct sigframe *frame; sigset_t blocked; @@ -219,10 +215,10 @@ sys_sigreturn(struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = blocked; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(®s, &frame->sf_sc)) goto badframe; @@ -230,11 +226,11 @@ sys_sigreturn(struct pt_regs regs) /* * Don't let your children do this ... */ - if (current->ptrace & PT_TRACESYS) - syscall_trace(); + if (current_thread_info()->flags & TIF_SYSCALL_TRACE) + do_syscall_trace(); __asm__ __volatile__( "move\t$29, %0\n\t" - "j\tret_from_sys_call" + "j\tsyscall_exit" :/* no outputs */ :"r" (®s)); /* Unreached */ @@ -243,8 +239,7 @@ badframe: force_sig(SIGSEGV, current); } -asmlinkage void -sys_rt_sigreturn(struct pt_regs regs) +asmlinkage void sys_rt_sigreturn(struct pt_regs regs) { struct rt_sigframe *frame; sigset_t set; @@ -257,10 +252,10 @@ sys_rt_sigreturn(struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sigmask_lock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(®s, &frame->rs_uc.uc_mcontext)) goto badframe; @@ -276,7 +271,7 @@ sys_rt_sigreturn(struct pt_regs regs) */ __asm__ __volatile__( "move\t$29, %0\n\t" - "j\tret_from_sys_call" + "j\tsyscall_exit" :/* no outputs */ :"r" (®s)); /* Unreached */ @@ -285,21 +280,20 @@ badframe: force_sig(SIGSEGV, current); } -static inline int -setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) +static inline int setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) { - int owned_fp; int err = 0; u64 reg; - err |= __put_user(regs->cp0_epc, &sc->sc_pc); + reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); err |= __put_user(regs->cp0_status, &sc->sc_status); #define save_gp_reg(i) { \ reg = regs->regs[i]; \ err |= __put_user(reg, &sc->sc_regs[i]); \ } while(0) - __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2); + reg = 0; err |= __put_user(reg, &sc->sc_regs[0]); + save_gp_reg(1); save_gp_reg(2); save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); @@ -310,36 +304,48 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) save_gp_reg(31); #undef save_gp_reg - err |= __put_user(regs->hi, &sc->sc_mdhi); - err |= __put_user(regs->lo, &sc->sc_mdlo); + reg = regs->hi; err |= __put_user(reg, &sc->sc_mdhi); + reg = regs->lo; err |= __put_user(reg, &sc->sc_mdlo); err |= __put_user(regs->cp0_cause, &sc->sc_cause); err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr); - owned_fp = (current == last_task_used_math); - err |= __put_user(owned_fp, &sc->sc_ownedfp); + err |= __put_user(current->used_math, &sc->sc_used_math); + + if (!current->used_math) + goto out; - if (current->used_math) { /* fp is active. */ - set_cp0_status(ST0_CU1); - err |= save_fp_context(sc); - last_task_used_math = NULL; - regs->cp0_status &= ~ST0_CU1; - current->used_math = 0; + /* + * Save FPU state to signal context. Signal handler will "inherit" + * current FPU state. + */ + if (!is_fpu_owner()) { + own_fpu(); + restore_fp(current); } + err |= save_fp_context(sc); +out: return err; } /* * Determine which stack to use.. */ -static inline void * -get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) +static inline void * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = regs->regs[29]; + /* + * FPU emulator may have it's own trampoline active just + * above the user stack, 16-bytes before the next lowest + * 16 byte boundary. Try to avoid trashing it. + */ + sp -= 32; + /* This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && ! on_sig_stack(sp)) sp = current->sas_ss_sp + current->sas_ss_size; @@ -347,9 +353,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) return (void *)((sp - frame_size) & ALMASK); } -static void inline -setup_frame(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set) +static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, + int signr, sigset_t *set) { struct sigframe *frame; int err = 0; @@ -358,23 +363,15 @@ setup_frame(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - /* Set up to return from userspace. If provided, use a stub already - in userspace. */ - if (ka->sa.sa_flags & SA_RESTORER) - regs->regs[31] = (unsigned long) ka->sa.sa_restorer; - else { - /* - * Set up the return code ... - * - * li v0, __NR_sigreturn - * syscall - */ - err |= __put_user(0x24020000 + __NR_sigreturn, - frame->sf_code + 0); - err |= __put_user(0x0000000c , - frame->sf_code + 1); - flush_cache_sigtramp((unsigned long) frame->sf_code); - } + /* + * Set up the return code ... + * + * li v0, __NR_sigreturn + * syscall + */ + err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0); + err |= __put_user(0x0000000c , frame->sf_code + 1); + flush_cache_sigtramp((unsigned long) frame->sf_code); err |= setup_sigcontext(regs, &frame->sf_sc); err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); @@ -399,8 +396,9 @@ setup_frame(struct k_sigaction * ka, struct pt_regs *regs, regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; #if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=0x%p pc=0x%p ra=0x%p\n", - current->comm, current->pid, frame, regs->cp0_epc, frame->code); + printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", + current->comm, current->pid, + frame, regs->cp0_epc, frame->sf_code); #endif return; @@ -410,9 +408,8 @@ give_sigsegv: force_sig(SIGSEGV, current); } -static void inline -setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set, siginfo_t *info) +static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, + int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe *frame; int err = 0; @@ -421,23 +418,15 @@ setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - /* Set up to return from userspace. If provided, use a stub already - in userspace. */ - if (ka->sa.sa_flags & SA_RESTORER) - regs->regs[31] = (unsigned long) ka->sa.sa_restorer; - else { - /* - * Set up the return code ... - * - * li v0, __NR_rt_sigreturn - * syscall - */ - err |= __put_user(0x24020000 + __NR_rt_sigreturn, - frame->rs_code + 0); - err |= __put_user(0x0000000c , - frame->rs_code + 1); - flush_cache_sigtramp((unsigned long) frame->rs_code); - } + /* + * Set up the return code ... + * + * li v0, __NR_rt_sigreturn + * syscall + */ + err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0); + err |= __put_user(0x0000000c , frame->rs_code + 1); + flush_cache_sigtramp((unsigned long) frame->rs_code); /* Create siginfo. */ err |= copy_siginfo_to_user(&frame->rs_info, info); @@ -475,8 +464,9 @@ setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; #if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=0x%p pc=0x%p ra=0x%p\n", - current->comm, current->pid, frame, regs->cp0_epc, frame->code); + printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", + current->comm, current->pid, + frame, regs->cp0_epc, frame->rs_code); #endif return; @@ -486,31 +476,11 @@ give_sigsegv: force_sig(SIGSEGV, current); } -static inline void -handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, - struct pt_regs * regs) +static inline void handle_signal(unsigned long sig, siginfo_t *info, + sigset_t *oldset, struct pt_regs * regs) { - struct k_sigaction *ka = ¤t->sig->action[sig-1]; + struct k_sigaction *ka = ¤t->sighand->action[sig-1]; - if (ka->sa.sa_flags & SA_SIGINFO) - setup_rt_frame(ka, regs, sig, oldset, info); - else - setup_frame(ka, regs, sig, oldset); - - if (ka->sa.sa_flags & SA_ONESHOT) - ka->sa.sa_handler = SIG_DFL; - if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sigmask_lock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sigmask_lock); - } -} - -static inline void -syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) -{ switch(regs->regs[0]) { case ERESTARTNOHAND: regs->regs[2] = EINTR; @@ -527,27 +497,33 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) } regs->regs[0] = 0; /* Don't deal with this again. */ -} -extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame(ka, regs, sig, oldset, info); + else + setup_frame(ka, regs, sig, oldset); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } +} asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) { siginfo_t info; - int sig_nr; - -#ifdef CONFIG_BINFMT_IRIX - if (current->personality != PER_LINUX) - return do_irix_signal(oldset, regs); -#endif + int signr; if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, regs, NULL); if (signr > 0) { - if (regs->regs[0]) - syscall_restart(regs, ka); /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, oldset, regs); return 1; @@ -568,3 +544,24 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) } return 0; } + +extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); + +/* + * notification of userspace execution resumption + * - triggered by current->work.notify_resume + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, + __u32 thread_info_flags) +{ + /* deal with pending signal delivery */ + if (thread_info_flags & _TIF_SIGPENDING) { +#ifdef CONFIG_BINFMT_IRIX + if (unlikely(current->personality != PER_LINUX)) { + do_irix_signal(oldset, regs); + return; + } +#endif + do_signal(oldset, regs); + } +} diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 36e55ea4d57e..5532635e2da0 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -1,11 +1,4 @@ /* - * - * arch/mips/kernel/smp.c - * - * Copyright (C) 2000 Sibyte - * - * Written by Justin Carlson (carlson@sibyte.com) - * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 @@ -15,229 +8,256 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - */ - - + * Copyright (C) 2000, 2001 Kanoj Sarcar + * Copyright (C) 2000, 2001 Ralf Baechle + * Copyright (C) 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2000, 2001 Broadcom Corporation + */ #include <linux/config.h> +#include <linux/cache.h> +#include <linux/delay.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/threads.h> +#include <linux/module.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/cache.h> #include <asm/atomic.h> +#include <asm/cpu.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> -#include <asm/delay.h> #include <asm/smp.h> -/* - * This was written with the BRCM12500 MP SOC in mind, but tries to - * be generic. It's modelled on the mips64 smp.c code, which is - * derived from Sparc, I'm guessing, which is derived from... - * - * It's probably horribly designed for very large ccNUMA systems - * as it doesn't take any node clustering into account. -*/ - - -/* Ze Big Kernel Lock! */ -int smp_threads_ready; /* Not used */ -int smp_num_cpus; -int global_irq_holder = NO_PROC_ID; -spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED; -struct mips_cpuinfo cpu_data[NR_CPUS]; +int smp_threads_ready; /* Not used */ -struct smp_fn_call_struct smp_fn_call = -{ SPIN_LOCK_UNLOCKED, ATOMIC_INIT(0), NULL, NULL}; - -static atomic_t cpus_booted = ATOMIC_INIT(0); +// static atomic_t cpus_booted = ATOMIC_INIT(0); +atomic_t cpus_booted = ATOMIC_INIT(0); +cpumask_t phys_cpu_present_map; /* Bitmask of physically CPUs */ +cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ +int __cpu_number_map[NR_CPUS]; +int __cpu_logical_map[NR_CPUS]; /* These are defined by the board-specific code. */ -/* Cause the function described by smp_fn_call - to be executed on the passed cpu. When the function - has finished, increment the finished field of - smp_fn_call. */ - -void core_call_function(int cpu); +/* + * Cause the function described by call_data to be executed on the passed + * cpu. When the function has finished, increment the finished field of + * call_data. + */ +void core_send_ipi(int cpu, unsigned int action); /* * Clear all undefined state in the cpu, set up sp and gp to the passed - * values, and kick the cpu into smp_bootstrap(); + * values, and kick the cpu into smp_bootstrap(); */ void prom_boot_secondary(int cpu, unsigned long sp, unsigned long gp); /* * After we've done initial boot, this function is called to allow the - * board code to clean up state, if needed + * board code to clean up state, if needed */ - void prom_init_secondary(void); +void prom_smp_finish(void); -void cpu_idle(void); - -/* Do whatever setup needs to be done for SMP at the board level. Return - the number of cpus in the system, including this one */ -int prom_setup_smp(void); - -int start_secondary(void *unused) -{ - prom_init_secondary(); - write_32bit_cp0_register(CP0_CONTEXT, smp_processor_id()<<23); - current_pgd[smp_processor_id()] = init_mm.pgd; - printk("Slave cpu booted successfully\n"); - atomic_inc(&cpus_booted); - cpu_idle(); - return 0; -} +cycles_t cacheflush_time; +unsigned long cache_decay_ticks; -void __init smp_boot_cpus(void) +void smp_tune_scheduling (void) { - int i; - - smp_num_cpus = prom_setup_smp(); - init_new_context(current, &init_mm); - current->processor = 0; - atomic_set(&cpus_booted, 1); /* Master CPU is already booted... */ - init_idle(); - for (i = 1; i < smp_num_cpus; i++) { - struct task_struct *p; - struct pt_regs regs; - printk("Starting CPU %d... ", i); - - /* Spawn a new process normally. Grab a pointer to - its task struct so we can mess with it */ - p = do_fork(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0); - - /* Schedule the first task manually */ - p->processor = i; - p->cpus_runnable = 1 << i; /* we schedule the first task manually */ - - /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); - p->active_mm = &init_mm; - init_tasks[i] = p; - - del_from_runqueue(p); - unhash_process(p); - - prom_boot_secondary(i, - (unsigned long)p + KERNEL_STACK_SIZE - 32, - (unsigned long)p); + struct cache_desc *cd = ¤t_cpu_data.scache; + unsigned long cachesize; /* kB */ + unsigned long bandwidth = 350; /* MB/s */ + unsigned long cpu_khz; -#if 0 - - /* This is copied from the ip-27 code in the mips64 tree */ - - struct task_struct *p; + /* + * Crude estimate until we actually meassure ... + */ + cpu_khz = loops_per_jiffy * 2 * HZ / 1000; + /* + * Rough estimation for SMP scheduling, this is the number of + * cycles it takes for a fully memory-limited process to flush + * the SMP-local cache. + * + * (For a P5 this pretty much means we will choose another idle + * CPU almost always at wakeup time (this is due to the small + * L1 cache), on PIIs it's around 50-100 usecs, depending on + * the cache size) + */ + if (!cpu_khz) { /* - * The following code is purely to make sure - * Linux can schedule processes on this slave. + * This basically disables processor-affinity scheduling on SMP + * without a cycle counter. Currently all SMP capable MIPS + * processors have a cycle counter. */ - kernel_thread(0, NULL, CLONE_IDLETASK); - p = prev_task(&init_task); - sprintf(p->comm, "%s%d", "Idle", i); - init_tasks[i] = p; - p->processor = i; - p->cpus_runnable = 1 << i; /* we schedule the first task manually */ - del_from_runqueue(p); - unhash_process(p); - /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); - p->active_mm = &init_mm; - prom_boot_secondary(i, - (unsigned long)p + KERNEL_STACK_SIZE - 32, - (unsigned long)p); -#endif + cacheflush_time = 0; + return; } - /* Wait for everyone to come up */ - while (atomic_read(&cpus_booted) != smp_num_cpus); + cachesize = cd->linesz * cd->sets * cd->ways; + cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; + cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; + + printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", + (long)cacheflush_time/(cpu_khz/1000), + ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); + printk("task migration cache decay timeout: %ld msecs.\n", + (cache_decay_ticks + 1) * 1000 / HZ); } -void __init smp_commence(void) +void __init smp_callin(void) { - /* Not sure what to do here yet */ +#if 0 + calibrate_delay(); + smp_store_cpu_info(cpuid); +#endif } -static void reschedule_this_cpu(void *dummy) +#ifndef CONFIG_SGI_IP27 +/* + * Hook for doing final board-specific setup after the generic smp setup + * is done + */ +asmlinkage void start_secondary(void) { - current->work.need_resched = 1; + unsigned int cpu = smp_processor_id(); + + cpu_probe(); + prom_init_secondary(); + per_cpu_trap_init(); + + /* + * XXX parity protection should be folded in here when it's converted + * to an option instead of something based on .cputype + */ + pgd_current[cpu] = init_mm.pgd; + cpu_data[cpu].udelay_val = loops_per_jiffy; + prom_smp_finish(); + printk("Slave cpu booted successfully\n"); + CPUMASK_SETB(cpu_online_map, cpu); + atomic_inc(&cpus_booted); + cpu_idle(); } +#endif /* CONFIG_SGI_IP27 */ -void FASTCALL(smp_send_reschedule(int cpu)) +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +void smp_send_reschedule(int cpu) { - smp_call_function(reschedule_this_cpu, NULL, 0, 0); + core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); } +static spinlock_t call_lock = SPIN_LOCK_UNLOCKED; + +struct call_data_struct *call_data; /* - * The caller of this wants the passed function to run on every cpu. If wait - * is set, wait until all cpus have finished the function before returning. - * The lock is here to protect the call structure. + * Run a function on all other CPUs. + * <func> The function to run. This must be fast and non-blocking. + * <info> An arbitrary pointer to pass to the function. + * <retry> If true, keep retrying until ready. + * <wait> If true, wait until function has completed on other CPUs. + * [RETURNS] 0 on success, else a negative status code. + * + * Does not return until remote CPUs are nearly ready to execute <func> + * or are or have executed. + * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function (void (*func) (void *info), void *info, int retry, +int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) { - int cpus = smp_num_cpus - 1; - int i; + struct call_data_struct data; + int i, cpus = num_online_cpus() - 1; + int cpu = smp_processor_id(); - if (smp_num_cpus < 2) { + if (!cpus) return 0; - } - spin_lock(&smp_fn_call.lock); + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); - atomic_set(&smp_fn_call.finished, 0); - smp_fn_call.fn = func; - smp_fn_call.data = info; + spin_lock(&call_lock); + call_data = &data; - for (i = 0; i < smp_num_cpus; i++) { - if (i != smp_processor_id()) { - /* Call the board specific routine */ - core_call_function(i); - } - } + /* Send a message to all other CPUs and wait for them to respond */ + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(cpu) && cpu != smp_processor_id()) + core_send_ipi(i, SMP_CALL_FUNCTION); - if (wait) { - while(atomic_read(&smp_fn_call.finished) != cpus) {} - } + /* Wait for response */ + /* FIXME: lock-up detection, backtrace on lock-up */ + while (atomic_read(&data.started) != cpus) + barrier(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + barrier(); + spin_unlock(&call_lock); - spin_unlock(&smp_fn_call.lock); return 0; } -void synchronize_irq(void) +void smp_call_function_interrupt(void) { - panic("synchronize_irq"); + void (*func) (void *info) = call_data->func; + void *info = call_data->info; + int wait = call_data->wait; + + irq_enter(); + /* + * Notify initiating CPU that I've grabbed the data and am + * about to execute the function. + */ + mb(); + atomic_inc(&call_data->started); + + /* + * At this point the info structure may be out of scope unless wait==1. + */ + irq_enter(); + (*func)(info); + irq_exit(); + + if (wait) { + mb(); + atomic_inc(&call_data->finished); + } } static void stop_this_cpu(void *dummy) { - printk("Cpu stopping\n"); - for (;;); + /* + * Remove this CPU: + */ + clear_bit(smp_processor_id(), &cpu_online_map); + local_irq_enable(); /* May need to service _machine_restart IPI */ + for (;;); /* Wait if available. */ } void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 1, 0); - smp_num_cpus = 1; } /* Not really SMP stuff ... */ @@ -246,157 +266,142 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } +static void flush_tlb_all_ipi(void *info) +{ + local_flush_tlb_all(); +} -/* - * Most of this code is take from the mips64 tree (ip27-irq.c). It's virtually - * identical to the i386 implentation in arh/i386/irq.c, with translations for - * the interrupt enable bit - */ - -#define MAXCOUNT 100000000 -#define SYNC_OTHER_CORES(x) udelay(x+1) +void flush_tlb_all(void) +{ + on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); +} -static inline void wait_on_irq(int cpu) +static void flush_tlb_mm_ipi(void *mm) { - int count = MAXCOUNT; + local_flush_tlb_mm((struct mm_struct *)mm); +} - for (;;) { +/* + * The following tlb flush calls are invoked when old translations are + * being torn down, or pte attributes are changing. For single threaded + * address spaces, a new context is obtained on the current cpu, and tlb + * context on other cpus are invalidated to force a new context allocation + * at switch_mm time, should the mm ever be used on other cpus. For + * multithreaded address spaces, intercpu interrupts have to be sent. + * Another case where intercpu interrupts are required is when the target + * mm might be active on another cpu (eg debuggers doing the flushes on + * behalf of debugees, kswapd stealing pages from another process etc). + * Kanoj 07/00. + */ - /* - * Wait until all interrupts are gone. Wait - * for bottom half handlers unless we're - * already executing in one.. - */ - if (!irqs_running()) - if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock)) - break; - - /* Duh, we have to loop. Release the lock to avoid deadlocks */ - spin_unlock(&global_irq_lock); - - for (;;) { - if (!--count) { - printk("Count spun out. Huh?\n"); - count = ~0; - } - local_irq_enable(); - SYNC_OTHER_CORES(cpu); - local_irq_disable(); - if (irqs_running()) - continue; - if (spin_is_locked(&global_irq_lock)) - continue; - if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock)) - continue; - if (spin_trylock(&global_irq_lock)) - break; - } +void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); + } else { + int i; + for (i = 0; i < num_online_cpus(); i++) + if (smp_processor_id() != i) + cpu_context(i, mm) = 0; } + local_flush_tlb_mm(mm); + + preempt_enable(); } +struct flush_tlb_data { + struct vm_area_struct *vma; + unsigned long addr1; + unsigned long addr2; +}; -static inline void get_irqlock(int cpu) +static void flush_tlb_range_ipi(void *info) { - if (!spin_trylock(&global_irq_lock)) { - /* do we already hold the lock? */ - if ((unsigned char) cpu == global_irq_holder) - return; - /* Uhhuh.. Somebody else got it. Wait.. */ - spin_lock(&global_irq_lock); - } - /* - * We also to make sure that nobody else is running - * in an interrupt context. - */ - wait_on_irq(cpu); + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; - /* - * Ok, finally.. - */ - global_irq_holder = cpu; + local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); } - -/* - * A global "cli()" while in an interrupt context - * turns into just a local cli(). Interrupts - * should use spinlocks for the (very unlikely) - * case that they ever want to protect against - * each other. - * - * If we already have local interrupts disabled, - * this will not turn a local disable into a - * global one (problems with spinlocks: this makes - * save_flags+cli+sti usable inside a spinlock). - */ -void __global_cli(void) +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned int flags; - - local_save_flags(flags); - if (flags & ST0_IE) { - int cpu = smp_processor_id(); - local_irq_disable(); - if (!local_irq_count(cpu)) - get_irqlock(cpu); + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + struct flush_tlb_data fd; + + fd.vma = vma; + fd.addr1 = start; + fd.addr2 = end; + smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); + } else { + int i; + for (i = 0; i < num_online_cpus(); i++) + if (smp_processor_id() != i) + cpu_context(i, mm) = 0; } + local_flush_tlb_range(vma, start, end); + preempt_enable(); } -void __global_sti(void) +static void flush_tlb_kernel_range_ipi(void *info) { - int cpu = smp_processor_id(); + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; - if (!local_irq_count(cpu)) - release_irqlock(cpu); - local_irq_enable(); + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); } -/* - * SMP flags value to restore to: - * 0 - global cli - * 1 - global sti - * 2 - local cli - * 3 - local sti - */ -unsigned long __global_save_flags(void) +void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - int retval; - int local_enabled; - unsigned long flags; - int cpu = smp_processor_id(); + struct flush_tlb_data fd; - local_save_flags(flags); - local_enabled = (flags & ST0_IE); - /* default to local */ - retval = 2 + local_enabled; - - /* check for global flags if we're not in an interrupt */ - if (!local_irq_count(cpu)) { - if (local_enabled) - retval = 1; - if (global_irq_holder == cpu) - retval = 0; - } + fd.addr1 = start; + fd.addr2 = end; + on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); +} - return retval; +static void flush_tlb_page_ipi(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_page(fd->vma, fd->addr1); } -void __global_restore_flags(unsigned long flags) +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { - switch (flags) { - case 0: - __global_cli(); - break; - case 1: - __global_sti(); - break; - case 2: - local_irq_disable(); - break; - case 3: - local_irq_enable(); - break; - default: - printk("global_restore_flags: %08lx\n", flags); + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { + struct flush_tlb_data fd; + + fd.vma = vma; + fd.addr1 = page; + smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); + } else { + int i; + for (i = 0; i < num_online_cpus(); i++) + if (smp_processor_id() != i) + cpu_context(i, vma->vm_mm) = 0; } + local_flush_tlb_page(vma, page); + preempt_enable(); } + +static void flush_tlb_one_ipi(void *info) +{ + unsigned long vaddr = (unsigned long) info; + + local_flush_tlb_one(vaddr); +} + +void flush_tlb_one(unsigned long vaddr) +{ + smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1); + local_flush_tlb_one(vaddr); +} + +EXPORT_SYMBOL(flush_tlb_page); +EXPORT_SYMBOL(flush_tlb_one); +EXPORT_SYMBOL(cpu_data); +EXPORT_SYMBOL(synchronize_irq); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index afbce4dc94a9..f6ca25bc6483 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -14,6 +14,7 @@ #undef CONF_DEBUG_IRIX #include <linux/config.h> +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/mm.h> #include <linux/smp.h> @@ -28,7 +29,7 @@ #include <asm/offset.h> #include <asm/ptrace.h> #include <asm/signal.h> -#include <asm/stackframe.h> +#include <asm/shmparam.h> #include <asm/uaccess.h> extern asmlinkage void syscall_trace(void); @@ -54,6 +55,61 @@ out: return res; } +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ + +#define COLOUR_ALIGN(addr,pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct * vmm; + int do_color_align; + + if (flags & MAP_FIXED) { + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && (addr & shm_align_mask)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); + if (TASK_SIZE - len >= addr && + (!vmm || addr + len <= vmm->vm_start)) + return addr; + } + addr = TASK_UNMAPPED_BASE; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } +} + /* common code for old and new mmaps */ static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, @@ -82,7 +138,16 @@ out: asmlinkage unsigned long old_mmap(unsigned long addr, size_t len, int prot, int flags, int fd, off_t offset) { - return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + int result; + + result = -EINVAL; + if (offset & ~PAGE_MASK) + goto out; + + result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + +out: + return result; } asmlinkage long @@ -95,10 +160,7 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, save_static_function(sys_fork); static_unused int _sys_fork(struct pt_regs regs) { - struct task_struct *p; - - p = do_fork(SIGCHLD, regs.regs[29], ®s, 0); - return IS_ERR(p) ? PTR_ERR(p) : p->pid; + return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); } @@ -107,14 +169,16 @@ static_unused int _sys_clone(struct pt_regs regs) { unsigned long clone_flags; unsigned long newsp; - struct task_struct *p; + int *parent_tidptr, *child_tidptr; clone_flags = regs.regs[4]; newsp = regs.regs[5]; if (!newsp) newsp = regs.regs[29]; - p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0); - return IS_ERR(p) ? PTR_ERR(p) : p->pid; + parent_tidptr = (int *) regs.regs[6]; + child_tidptr = (int *) regs.regs[7]; + return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, ®s, 0, + parent_tidptr, child_tidptr); } /* @@ -158,7 +222,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) return -EFAULT; if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) return -EFAULT; - + error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN); error -= __put_user(0,name->sysname+__OLD_UTS_LEN); error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); @@ -175,76 +239,6 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) } /* - * Do the indirect syscall syscall. - * Don't care about kernel locking; the actual syscall will do it. - * - * XXX This is borken. - */ -asmlinkage int sys_syscall(struct pt_regs regs) -{ - syscall_t syscall; - unsigned long syscallnr = regs.regs[4]; - unsigned long a0, a1, a2, a3, a4, a5, a6; - int nargs, errno; - - if (syscallnr > __NR_Linux + __NR_Linux_syscalls) - return -ENOSYS; - - syscall = sys_call_table[syscallnr]; - nargs = sys_narg_table[syscallnr]; - /* - * Prevent stack overflow by recursive - * syscall(__NR_syscall, __NR_syscall,...); - */ - if (syscall == (syscall_t) sys_syscall) { - return -EINVAL; - } - - if (syscall == NULL) { - return -ENOSYS; - } - - if(nargs > 3) { - unsigned long usp = regs.regs[29]; - unsigned long *sp = (unsigned long *) usp; - if(usp & 3) { - printk("unaligned usp -EFAULT\n"); - force_sig(SIGSEGV, current); - return -EFAULT; - } - errno = verify_area(VERIFY_READ, (void *) (usp + 16), - (nargs - 3) * sizeof(unsigned long)); - if(errno) { - return -EFAULT; - } - switch(nargs) { - case 7: - a3 = sp[4]; a4 = sp[5]; a5 = sp[6]; a6 = sp[7]; - break; - case 6: - a3 = sp[4]; a4 = sp[5]; a5 = sp[6]; a6 = 0; - break; - case 5: - a3 = sp[4]; a4 = sp[5]; a5 = a6 = 0; - break; - case 4: - a3 = sp[4]; a4 = a5 = a6 = 0; - break; - - default: - a3 = a4 = a5 = a6 = 0; - break; - } - } else { - a3 = a4 = a5 = a6 = 0; - } - a0 = regs.regs[5]; a1 = regs.regs[6]; a2 = regs.regs[7]; - if(nargs == 0) - a0 = (unsigned long) ®s; - return syscall((void *)a0, a1, a2, a3, a4, a5, a6); -} - -/* * If we ever come here the user sp is bad. Zap the process right away. * Due to the bad stack signaling wouldn't work. * XXX kernel locking??? diff --git a/arch/mips/kernel/syscalls.h b/arch/mips/kernel/syscalls.h index 953186802c1b..7b52a1b197a5 100644 --- a/arch/mips/kernel/syscalls.h +++ b/arch/mips/kernel/syscalls.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 2001, 2002 by Ralf Baechle */ /* @@ -20,7 +20,7 @@ SYS(sys_fork, 0) SYS(sys_read, 3) SYS(sys_write, 3) SYS(sys_open, 3) /* 4005 */ -SYS(sys_close, 3) +SYS(sys_close, 1) SYS(sys_waitpid, 3) SYS(sys_creat, 2) SYS(sys_link, 2) @@ -32,7 +32,7 @@ SYS(sys_mknod, 3) SYS(sys_chmod, 2) /* 4015 */ SYS(sys_lchown, 3) SYS(sys_ni_syscall, 0) -SYS(sys_stat, 2) +SYS(sys_ni_syscall, 0) /* was sys_stat */ SYS(sys_lseek, 3) SYS(sys_getpid, 0) /* 4020 */ SYS(sys_mount, 5) @@ -42,7 +42,7 @@ SYS(sys_getuid, 0) SYS(sys_stime, 1) /* 4025 */ SYS(sys_ptrace, 4) SYS(sys_alarm, 1) -SYS(sys_fstat, 2) +SYS(sys_ni_syscall, 0) /* was sys_fstat */ SYS(sys_pause, 0) SYS(sys_utime, 2) /* 4030 */ SYS(sys_ni_syscall, 0) @@ -96,9 +96,9 @@ SYS(sys_gettimeofday, 2) SYS(sys_settimeofday, 2) SYS(sys_getgroups, 2) /* 4080 */ SYS(sys_setgroups, 2) -SYS(sys_ni_syscall, 0) /* old_select */ +SYS(sys_ni_syscall, 0) /* old_select */ SYS(sys_symlink, 2) -SYS(sys_lstat, 2) +SYS(sys_ni_syscall, 0) /* was sys_lstat */ SYS(sys_readlink, 3) /* 4085 */ SYS(sys_uselib, 1) SYS(sys_swapon, 2) @@ -115,7 +115,7 @@ SYS(sys_setpriority, 3) SYS(sys_ni_syscall, 0) SYS(sys_statfs, 2) SYS(sys_fstatfs, 2) /* 4100 */ -SYS(sys_ioperm, 3) +SYS(sys_ni_syscall, 0) /* was ioperm(2) */ SYS(sys_socketcall, 2) SYS(sys_syslog, 3) SYS(sys_setitimer, 3) @@ -124,10 +124,10 @@ SYS(sys_newstat, 2) SYS(sys_newlstat, 2) SYS(sys_newfstat, 2) SYS(sys_uname, 1) -SYS(sys_iopl, 0) /* Well, actually 17 args ... */ /* 4110 */ +SYS(sys_ni_syscall, 0) /* 4110 was iopl(2) */ SYS(sys_vhangup, 0) -SYS(sys_ni_syscall, 0) /* was sys_idle() */ -SYS(sys_vm86, 1) +SYS(sys_ni_syscall, 0) /* was sys_idle() */ +SYS(sys_ni_syscall, 0) /* was sys_vm86 */ SYS(sys_wait4, 4) SYS(sys_swapoff, 1) /* 4115 */ SYS(sys_sysinfo, 1) @@ -141,10 +141,10 @@ SYS(sys_ni_syscall, 0) /* sys_modify_ldt */ SYS(sys_adjtimex, 1) SYS(sys_mprotect, 3) /* 4125 */ SYS(sys_sigprocmask, 3) -SYS(sys_create_module, 2) +SYS(sys_ni_syscall, 0) /* was create_module */ SYS(sys_init_module, 5) SYS(sys_delete_module, 1) -SYS(sys_get_kernel_syms, 1) /* 4130 */ +SYS(sys_ni_syscall, 0) /* 4130, was get_kernel_syms */ SYS(sys_quotactl, 0) SYS(sys_getpgid, 1) SYS(sys_fchdir, 1) @@ -201,7 +201,7 @@ SYS(sys_socket, 3) SYS(sys_socketpair, 4) SYS(sys_setresuid, 3) /* 4185 */ SYS(sys_getresuid, 3) -SYS(sys_query_module, 5) +SYS(sys_ni_syscall, 0) /* sys_query_module */ SYS(sys_poll, 3) SYS(sys_nfsservctl, 3) SYS(sys_setresgid, 3) /* 4190 */ @@ -214,19 +214,19 @@ SYS(sys_rt_sigpending, 2) SYS(sys_rt_sigtimedwait, 4) SYS(sys_rt_sigqueueinfo, 3) SYS(sys_rt_sigsuspend, 0) -SYS(sys_pread, 6) /* 4200 */ -SYS(sys_pwrite, 6) +SYS(sys_pread64, 6) /* 4200 */ +SYS(sys_pwrite64, 6) SYS(sys_chown, 3) SYS(sys_getcwd, 2) SYS(sys_capget, 2) SYS(sys_capset, 2) /* 4205 */ SYS(sys_sigaltstack, 0) -SYS(sys_sendfile, 3) +SYS(sys_sendfile, 4) SYS(sys_ni_syscall, 0) SYS(sys_ni_syscall, 0) SYS(sys_mmap2, 6) /* 4210 */ -SYS(sys_truncate64, 2) -SYS(sys_ftruncate64, 2) +SYS(sys_truncate64, 4) +SYS(sys_ftruncate64, 4) SYS(sys_stat64, 2) SYS(sys_lstat64, 2) SYS(sys_fstat64, 2) /* 4215 */ @@ -235,5 +235,39 @@ SYS(sys_mincore, 3) SYS(sys_madvise, 3) SYS(sys_getdents64, 3) SYS(sys_fcntl64, 3) /* 4220 */ +SYS(sys_ni_syscall, 0) SYS(sys_gettid, 0) +SYS(sys_readahead, 5) +SYS(sys_setxattr, 5) +SYS(sys_lsetxattr, 5) /* 4225 */ +SYS(sys_fsetxattr, 5) +SYS(sys_getxattr, 4) +SYS(sys_lgetxattr, 4) +SYS(sys_fgetxattr, 4) +SYS(sys_listxattr, 3) /* 4230 */ +SYS(sys_llistxattr, 3) +SYS(sys_flistxattr, 3) +SYS(sys_removexattr, 2) +SYS(sys_lremovexattr, 2) +SYS(sys_fremovexattr, 2) /* 4235 */ SYS(sys_tkill, 2) +SYS(sys_sendfile64, 5) +SYS(sys_futex, 2) +SYS(sys_sched_setaffinity, 3) +SYS(sys_sched_getaffinity, 3) /* 4240 */ +SYS(sys_io_setup, 2) +SYS(sys_io_destroy, 1) +SYS(sys_io_getevents, 5) +SYS(sys_io_submit, 3) +SYS(sys_io_cancel, 3) /* 4245 */ +SYS(sys_exit_group, 1) +SYS(sys_lookup_dcookie, 3) +SYS(sys_epoll_create, 1) +SYS(sys_epoll_ctl, 4) +SYS(sys_epoll_wait, 3) /* 4250 */ +SYS(sys_remap_file_pages, 5) +SYS(sys_set_tid_address, 1) +SYS(sys_restart_syscall, 0) /* XXX */ +SYS(sys_fadvise64, 6) +SYS(sys_statfs64, 3) /* 4255 */ +SYS(sys_fstatfs64, 2) diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c index 7bdf103d4284..5722c28c1e9d 100644 --- a/arch/mips/kernel/sysirix.c +++ b/arch/mips/kernel/sysirix.c @@ -7,6 +7,8 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/binfmts.h> +#include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/mm.h> #include <linux/mman.h> @@ -24,6 +26,8 @@ #include <linux/utsname.h> #include <linux/file.h> #include <linux/vfs.h> +#include <linux/namei.h> +#include <linux/socket.h> #include <asm/ptrace.h> #include <asm/page.h> @@ -55,7 +59,7 @@ asmlinkage int irix_sysmp(struct pt_regs *regs) break; case MP_NPROCS: case MP_NAPROCS: - error = smp_num_cpus; + error = num_online_cpus(); break; default: printk("SYSMP[%s:%d]: Unsupported opcode %d\n", @@ -111,7 +115,7 @@ asmlinkage int irix_prctl(struct pt_regs *regs) if (error) error = (task->run_list.next != NULL); read_unlock(&tasklist_lock); - /* Can _your_ OS find this out that fast? */ + /* Can _your_ OS find this out that fast? */ break; } @@ -334,7 +338,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) current->comm, current->pid, name, value, retval); /* if (retval == PROM_ENOENT) retval = -ENOENT; */ - break; + break; } #endif @@ -507,7 +511,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) } break; } - + default: printk("irix_syssgi: Unsupported command %d\n", (int)cmd); retval = -EINVAL; @@ -600,7 +604,7 @@ out: asmlinkage int irix_getpid(struct pt_regs *regs) { - regs->regs[3] = current->p_opptr->pid; + regs->regs[3] = current->real_parent->pid; return current->pid; } @@ -623,9 +627,11 @@ asmlinkage int irix_stime(int value) write_seqlock_irq(&xtime_lock); xtime.tv_sec = value; - xtime.tv_usec = 0; - time_maxerror = MAXPHASE; - time_esterror = MAXPHASE; + xtime.tv_nsec = 0; + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; write_sequnlock_irq(&xtime_lock); return 0; @@ -819,10 +825,10 @@ asmlinkage int irix_times(struct tms * tbuf) err = verify_area(VERIFY_WRITE,tbuf,sizeof *tbuf); if (err) return err; - err |= __put_user(current->times.tms_utime,&tbuf->tms_utime); - err |= __put_user(current->times.tms_stime,&tbuf->tms_stime); - err |= __put_user(current->times.tms_cutime,&tbuf->tms_cutime); - err |= __put_user(current->times.tms_cstime,&tbuf->tms_cstime); + err |= __put_user(current->utime, &tbuf->tms_utime); + err |= __put_user(current->stime, &tbuf->tms_stime); + err |= __put_user(current->cutime, &tbuf->tms_cutime); + err |= __put_user(current->cstime, &tbuf->tms_cstime); } return err; @@ -1048,7 +1054,23 @@ asmlinkage int irix_sgikopt(char *istring, char *ostring, int len) asmlinkage int irix_gettimeofday(struct timeval *tv) { - return copy_to_user(tv, &xtime, sizeof(*tv)) ? -EFAULT : 0; + time_t sec; + long nsec, seq; + int err; + + if (verify_area(VERIFY_WRITE, tv, sizeof(struct timeval))) + return -EFAULT; + + do { + seq = read_seqbegin(&xtime_lock); + sec = xtime.tv_sec; + nsec = xtime.tv_nsec; + } while (read_seqretry(&xtime_lock, seq)); + + err = __put_user(sec, &tv->tv_sec); + err |= __put_user((nsec / 1000), &tv->tv_usec); + + return err; } #define IRIX_MAP_AUTOGROW 0x40 @@ -1068,7 +1090,7 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot, if (flags & IRIX_MAP_AUTOGROW) { unsigned long old_pos; long max_size = offset + len; - + if (max_size > file->f_dentry->d_inode->i_size) { old_pos = sys_lseek (fd, max_size - 1, 0); sys_write (fd, "", 1); @@ -1198,14 +1220,14 @@ static inline int irix_xstat32_xlate(struct kstat *stat, void *ubuf) #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; -#endif +#endif ub.st_size = stat->size; - ub.st_atime0 = stat->atime; - ub.st_atime1 = 0; - ub.st_mtime0 = stat->mtime; - ub.st_mtime1 = 0; - ub.st_ctime0 = stat->ctime; - ub.st_ctime1 = 0; + ub.st_atime0 = stat->atime.tv_sec; + ub.st_atime1 = stat->atime.tv_nsec; + ub.st_mtime0 = stat->mtime.tv_sec; + ub.st_mtime1 = stat->atime.tv_nsec; + ub.st_ctime0 = stat->ctime.tv_sec; + ub.st_ctime1 = stat->atime.tv_nsec; ub.st_blksize = stat->blksize; ub.st_blocks = stat->blocks; strcpy (ub.st_fstype, "efs"); @@ -1243,9 +1265,12 @@ static inline void irix_xstat64_xlate(struct kstat *stat, void *ubuf) ks.st_pad3 = 0; /* XXX hackety hack... */ - ks.st_atime.tv_sec = (s32) stat->atime; ks.st_atime.tv_nsec = 0; - ks.st_mtime.tv_sec = (s32) stat->atime; ks.st_mtime.tv_nsec = 0; - ks.st_ctime.tv_sec = (s32) stat->atime; ks.st_ctime.tv_nsec = 0; + ks.st_atime.tv_sec = (s32) stat->atime.tv_sec; + ks.st_atime.tv_nsec = stat->atime.tv_nsec; + ks.st_mtime.tv_sec = (s32) stat->mtime.tv_sec; + ks.st_mtime.tv_nsec = stat->mtime.tv_nsec;; + ks.st_ctime.tv_sec = (s32) stat->ctime.tv_sec; + ks.st_ctime.tv_nsec = stat->ctime.tv_nsec;; ks.st_blksize = (s32) stat->blksize; ks.st_blocks = (long long) stat->blocks; @@ -1812,7 +1837,8 @@ static int irix_filldir32(void *__buf, const char *name, int namlen, return 0; } -asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, unsigned int count, int *eob) +asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, + unsigned int count, int *eob) { struct file *file; struct irix_dirent32 *lastdirent; @@ -1836,6 +1862,7 @@ asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, unsigned int count error = vfs_readdir(file, irix_filldir32, &buf); if (error < 0) goto out_putf; + error = buf.error; lastdirent = buf.previous; if (lastdirent) { @@ -1844,11 +1871,10 @@ asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, unsigned int count } if (put_user(0, eob) < 0) { - error = EFAULT; + error = -EFAULT; goto out_putf; } - #ifdef DEBUG_GETDENTS printk("eob=%d returning %d\n", *eob, count - buf.count); #endif diff --git a/arch/mips/kernel/sysmips.c b/arch/mips/kernel/sysmips.c index 71a59703b223..3157086d24a8 100644 --- a/arch/mips/kernel/sysmips.c +++ b/arch/mips/kernel/sysmips.c @@ -1,13 +1,11 @@ /* - * MIPS specific syscalls - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1995, 1996, 1997, 2000 by Ralf Baechle + * Copyright (C) 1995, 1996, 1997, 2000, 2001 by Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/mm.h> @@ -19,7 +17,7 @@ #include <linux/ptrace.h> #include <asm/cachectl.h> -#include <asm/pgalloc.h> +#include <asm/cacheflush.h> #include <asm/sysmips.h> #include <asm/uaccess.h> @@ -49,11 +47,10 @@ get_max_hostname(unsigned long address) } asmlinkage int -sys_sysmips(int cmd, int arg1, int arg2, int arg3) +_sys_sysmips(int cmd, int arg1, int arg2, int arg3) { - int *p; char *name; - int tmp, len, retval, errno; + int tmp, len, retval; switch(cmd) { case SETNAME: { @@ -65,66 +62,22 @@ sys_sysmips(int cmd, int arg1, int arg2, int arg3) name = (char *) arg1; len = strncpy_from_user(nodename, name, __NEW_UTS_LEN); - if (len < 0) + if (len < 0) return -EFAULT; - nodename[__NEW_UTS_LEN] = '\0'; down_write(&uts_sem); + strncpy(system_utsname.nodename, nodename, len); + nodename[__NEW_UTS_LEN] = '\0'; strlcpy(system_utsname.nodename, nodename, - sizeof(system_utsname.nodename)); + sizeof(system_utsname.nodename)); up_write(&uts_sem); return 0; } - case MIPS_ATOMIC_SET: { -#ifdef CONFIG_CPU_HAS_LLSC - unsigned int tmp; - - p = (int *) arg1; - errno = verify_area(VERIFY_WRITE, p, sizeof(*p)); - if (errno) - return errno; - errno = 0; - - __asm__(".set\tpush\t\t\t# sysmips(MIPS_ATOMIC, ...)\n\t" - ".set\tmips2\n\t" - ".set\tnoat\n\t" - "1:\tll\t%0, %4\n\t" - "move\t$1, %3\n\t" - "2:\tsc\t$1, %1\n\t" - "beqz\t$1, 1b\n\t" - ".set\tpop\n\t" - ".section\t.fixup,\"ax\"\n" - "3:\tli\t%2, 1\t\t\t# error\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - ".word\t1b, 3b\n\t" - ".word\t2b, 3b\n\t" - ".previous\n\t" - : "=&r" (tmp), "=o" (* (u32 *) p), "=r" (errno) - : "r" (arg2), "o" (* (u32 *) p), "2" (errno) - : "$1"); - - if (errno) - return -EFAULT; - - /* We're skipping error handling etc. */ - if (current->ptrace & PT_TRACESYS) - syscall_trace(); - - ((struct pt_regs *)&cmd)->regs[2] = tmp; - ((struct pt_regs *)&cmd)->regs[7] = 0; - - __asm__ __volatile__( - "move\t$29, %0\n\t" - "j\to32_ret_from_sys_call" - : /* No outputs */ - : "r" (&cmd)); - /* Unreached */ -#else - printk("sys_sysmips(MIPS_ATOMIC_SET, ...) not ready for !CONFIG_CPU_HAS_LLSC\n"); -#endif - } + case MIPS_ATOMIC_SET: + printk(KERN_CRIT "How did I get here?\n"); + retval = -EINVAL; + goto out; case MIPS_FIXADE: tmp = current->thread.mflags & ~3; @@ -133,7 +86,7 @@ sys_sysmips(int cmd, int arg1, int arg2, int arg3) goto out; case FLUSH_CACHE: - flush_cache_all(); + __flush_cache_all(); retval = 0; goto out; diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index f7551d197dfb..36c91fdcda3f 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -2,8 +2,8 @@ * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * - * Common time service routines for MIPS machines. See - * Documentation/mips/time.README. + * Common time service routines for MIPS machines. See + * Documents/mips/README.txt. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -21,6 +21,7 @@ #include <linux/kernel_stat.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/module.h> #include <asm/bootinfo.h> #include <asm/cpu.h> @@ -30,7 +31,9 @@ /* This is for machines which generate the exact clock. */ #define USECS_PER_JIFFY (1000000/HZ) -#define USECS_PER_JIFFY_FRAC ((1000000ULL << 32) / HZ & 0xffffffff) +#define USECS_PER_JIFFY_FRAC ((u32)((1000000ULL << 32) / HZ)) + +#define TICK_SIZE (tick_nsec / 1000) u64 jiffies_64 = INITIAL_JIFFIES; @@ -39,6 +42,13 @@ u64 jiffies_64 = INITIAL_JIFFIES; */ extern volatile unsigned long wall_jiffies; +spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; + +/* + * whether we emulate local_timer_interrupts for SMP machines. + */ +int emulate_local_timer_interrupt; + /* * By default we provide the null RTC ops */ @@ -57,58 +67,64 @@ int (*rtc_set_time)(unsigned long) = null_rtc_set_time; /* - * timeofday services, for syscalls. + * This version of gettimeofday has microsecond resolution and better than + * microsecond precision on fast machines with cycle counter. */ void do_gettimeofday(struct timeval *tv) { - unsigned long flags; unsigned long seq; + unsigned long usec, sec; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - - *tv = xtime; - tv->tv_usec += do_gettimeoffset(); - - /* - * xtime is atomically updated in timer_bh. - * jiffies - wall_jiffies - * is nonzero if the timer bottom half hasnt executed yet. - */ - if (jiffies - wall_jiffies) - tv->tv_usec += USECS_PER_JIFFY; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - + seq = read_seqbegin(&xtime_lock); + usec = do_gettimeoffset(); + { + unsigned long lost = jiffies - wall_jiffies; + if (lost) + usec += lost * (1000000 / HZ); + } + sec = xtime.tv_sec; + usec += (xtime.tv_nsec / 1000); + } while (read_seqretry(&xtime_lock, seq)); - if (tv->tv_usec >= 1000000) { - tv->tv_usec -= 1000000; - tv->tv_sec++; + while (usec >= 1000000) { + usec -= 1000000; + sec++; } + + tv->tv_sec = sec; + tv->tv_usec = usec; } -void do_settimeofday(struct timeval *tv) +int do_settimeofday(struct timespec *tv) { - write_seqlock_irq (&xtime_lock); + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; - /* This is revolting. We need to set the xtime.tv_usec - * correctly. However, the value in this location is - * is value at the last tick. - * Discover what correction gettimeofday - * would have done, and then undo it! + write_seqlock_irq(&xtime_lock); + /* + * This is revolting. We need to set "xtime" correctly. However, the + * value in this location is the value at the most recent update of + * wall time. Discover what correction gettimeofday() would have + * made, and then undo it! */ - tv->tv_usec -= do_gettimeoffset(); + tv->tv_nsec -= do_gettimeoffset() * NSEC_PER_USEC; + tv->tv_nsec -= (jiffies - wall_jiffies) * TICK_NSEC; - if (tv->tv_usec < 0) { - tv->tv_usec += 1000000; + while (tv->tv_nsec < 0) { + tv->tv_nsec += NSEC_PER_SEC; tv->tv_sec--; } - xtime = *tv; - time_adjust = 0; /* stop active adjtime() */ + + xtime.tv_sec = tv->tv_sec; + xtime.tv_nsec = tv->tv_nsec; + time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; + write_sequnlock_irq(&xtime_lock); - write_sequnlock_irq (&xtime_lock); + return 0; } @@ -137,6 +153,9 @@ static unsigned long cycles_per_jiffy=0; /* Cycle counter value at the previous timer interrupt.. */ static unsigned int timerhi, timerlo; +/* expirelo is the count value for next CPU timer interrupt */ +static unsigned int expirelo; + /* last time when xtime and rtc are sync'ed up */ static long last_rtc_update; @@ -154,7 +173,7 @@ unsigned long fixed_rate_gettimeoffset(void) unsigned long res; /* Get last timer tick in absolute kernel time */ - count = read_32bit_cp0_register(CP0_COUNT); + count = read_c0_count(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; @@ -210,7 +229,7 @@ unsigned long calibrate_div32_gettimeoffset(void) } /* Get last timer tick in absolute kernel time */ - count = read_32bit_cp0_register(CP0_COUNT); + count = read_c0_count(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; @@ -260,13 +279,12 @@ unsigned long calibrate_div64_gettimeoffset(void) :"r" (timerhi), "m" (timerlo), "r" (tmp), - "r" (USECS_PER_JIFFY) - :"$1"); + "r" (USECS_PER_JIFFY)); cached_quotient = quotient; } /* Get last timer tick in absolute kernel time */ - count = read_32bit_cp0_register(CP0_COUNT); + count = read_c0_count(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; @@ -289,35 +307,18 @@ unsigned long calibrate_div64_gettimeoffset(void) /* - * high-level timer interrupt service routines. This function - * is set as irqaction->handler and is invoked through do_IRQ. + * local_timer_interrupt() does profiling and process accounting + * on a per-CPU basis. + * + * In UP mode, it is invoked from the (global) timer_interrupt. + * + * In SMP mode, it might invoked by per-CPU timer interrupt, or + * a broadcasted inter-processor interrupt which itself is triggered + * by the global timer interrupt. */ -void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - unsigned long seq; - - if (mips_cpu.options & MIPS_CPU_COUNTER) { - unsigned int count; - - /* - * The cycle counter is only 32 bit which is good for about - * a minute at current count rates of upto 150MHz or so. - */ - count = read_32bit_cp0_register(CP0_COUNT); - timerhi += (count < timerlo); /* Wrap around */ - timerlo = count; - - /* - * set up for next timer interrupt - no harm if the machine - * is using another timer interrupt source. - * Note that writing to COMPARE register clears the interrupt - */ - write_32bit_cp0_register (CP0_COMPARE, - count + cycles_per_jiffy); - - } - - if(!user_mode(regs)) { + if (!user_mode(regs)) { if (prof_buffer && current->pid) { extern int _stext; unsigned long pc = regs->cp0_epc; @@ -335,6 +336,38 @@ void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } } +#ifdef CONFIG_SMP + /* in UP mode, update_process_times() is invoked by do_timer() */ + update_process_times(user_mode(regs)); +#endif +} + +/* + * high-level timer interrupt service routines. This function + * is set as irqaction->handler and is invoked through do_IRQ. + */ +irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + if (cpu_has_counter) { + unsigned int count; + + /* ack timer interrupt, and try to set next interrupt */ + expirelo += cycles_per_jiffy; + write_c0_compare(expirelo); + count = read_c0_count(); + + /* check to see if we have missed any timer interrupts */ + if ((count - expirelo) < 0x7fffffff) { + /* missed_timer_count ++; */ + expirelo = count + cycles_per_jiffy; + write_c0_compare(expirelo); + } + + /* Update timerhi/timerlo for intra-jiffy calibration. */ + timerhi += count < timerlo; /* Wrap around */ + timerlo = count; + } + /* * call the generic timer interrupt handling */ @@ -345,21 +378,19 @@ void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ - do { - seq = read_seqbegin(&xtime_lock); - - if ((time_status & STA_UNSYNC) == 0 && - xtime.tv_sec > last_rtc_update + 660 && - xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 && - xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) { - if (rtc_set_time(xtime.tv_sec) == 0) { - last_rtc_update = xtime.tv_sec; - } else { - last_rtc_update = xtime.tv_sec - 600; - /* do it again in 60 s */ - } + write_seqlock(&xtime_lock); + if ((time_status & STA_UNSYNC) == 0 && + xtime.tv_sec > last_rtc_update + 660 && + (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && + (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { + if (rtc_set_time(xtime.tv_sec) == 0) { + last_rtc_update = xtime.tv_sec; + } else { + last_rtc_update = xtime.tv_sec - 600; + /* do it again in 60 s */ } - } while (read_seqretry(&xtime_lock, seq)); + } + write_sequnlock(&xtime_lock); /* * If jiffies has overflowed in this timer_interrupt we must @@ -369,41 +400,83 @@ void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) if (!jiffies) { timerhi = timerlo = 0; } + +#if !defined(CONFIG_SMP) + /* + * In UP mode, we call local_timer_interrupt() to do profiling + * and process accouting. + * + * In SMP mode, local_timer_interrupt() is invoked by appropriate + * low-level local timer interrupt handler. + */ + local_timer_interrupt(0, NULL, regs); + +#else /* CONFIG_SMP */ + + if (emulate_local_timer_interrupt) { + /* + * this is the place where we send out inter-process + * interrupts and let each CPU do its own profiling + * and process accouting. + * + * Obviously we need to call local_timer_interrupt() for + * the current CPU too. + */ + panic("Not implemented yet!!!"); + } +#endif /* CONFIG_SMP */ + + return IRQ_HANDLED; } asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs) { int cpu = smp_processor_id(); - irq_enter(cpu, irq); + irq_enter(); kstat_cpu(cpu).irqs[irq]++; /* we keep interrupt disabled all the time */ timer_interrupt(irq, NULL, regs); - - irq_exit(cpu, irq); + + irq_exit(); if (softirq_pending(cpu)) do_softirq(); } +asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + irq_enter(); + kstat_cpu(cpu).irqs[irq]++; + + /* we keep interrupt disabled all the time */ + local_timer_interrupt(irq, NULL, regs); + + irq_exit(); + + if (softirq_pending(cpu)) + do_softirq(); +} /* * time_init() - it does the following things. * - * 1) board_time_init() - - * a) (optional) set up RTC routines, + * 1) board_time_init() - + * a) (optional) set up RTC routines, * b) (optional) calibrate and set the mips_counter_frequency * (only needed if you intended to use fixed_rate_gettimeoffset * or use cpu counter as timer interrupt source) * 2) setup xtime based on rtc_get_time(). * 3) choose a appropriate gettimeoffset routine. * 4) calculate a couple of cached variables for later usage - * 5) board_timer_setup() - + * 5) board_timer_setup() - * a) (optional) over-write any choices made above by time_init(). * b) machine specific code should setup the timer irqaction. * c) enable the timer interrupt - */ + */ void (*board_time_init)(void) = NULL; void (*board_timer_setup)(struct irqaction *irq) = NULL; @@ -416,7 +489,8 @@ static struct irqaction timer_irqaction = { 0, "timer", NULL, - NULL}; + NULL +}; void __init time_init(void) { @@ -424,18 +498,18 @@ void __init time_init(void) board_time_init(); xtime.tv_sec = rtc_get_time(); - xtime.tv_usec = 0; + xtime.tv_nsec = 0; /* choose appropriate gettimeoffset routine */ - if (!(mips_cpu.options & MIPS_CPU_COUNTER)) { + if (!cpu_has_counter) { /* no cpu counter - sorry */ do_gettimeoffset = null_gettimeoffset; } else if (mips_counter_frequency != 0) { /* we have cpu counter and know counter frequency! */ do_gettimeoffset = fixed_rate_gettimeoffset; - } else if ((mips_cpu.isa_level == MIPS_CPU_ISA_M32) || - (mips_cpu.isa_level == MIPS_CPU_ISA_I) || - (mips_cpu.isa_level == MIPS_CPU_ISA_II) ) { + } else if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) || + (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || + (current_cpu_data.isa_level == MIPS_CPU_ISA_II) ) { /* we need to calibrate the counter but we don't have * 64-bit division. */ do_gettimeoffset = calibrate_div32_gettimeoffset; @@ -443,7 +517,7 @@ void __init time_init(void) /* we need to calibrate the counter but we *do* have * 64-bit division. */ do_gettimeoffset = calibrate_div64_gettimeoffset; - } + } /* caclulate cache parameters */ if (mips_counter_frequency) { @@ -454,16 +528,24 @@ void __init time_init(void) sll32_usecs_per_cycle = mips_counter_frequency / 100000; sll32_usecs_per_cycle = 0xffffffff / sll32_usecs_per_cycle; sll32_usecs_per_cycle *= 10; + + /* + * For those using cpu counter as timer, this sets up the + * first interrupt + */ + write_c0_compare(cycles_per_jiffy); + write_c0_count(0); + expirelo = cycles_per_jiffy; } - /* + /* * Call board specific timer interrupt setup. * - * this pointer must be setup in machine setup routine. + * this pointer must be setup in machine setup routine. * * Even if the machine choose to use low-level timer interrupt, * it still needs to setup the timer_irqaction. - * In that case, it might be better to set timer_irqaction.handler + * In that case, it might be better to set timer_irqaction.handler * to be NULL function so that we are sure the high-level code * is not invoked accidentally. */ @@ -484,10 +566,10 @@ static int month_days[12] = { void to_tm(unsigned long tim, struct rtc_time * tm) { - long hms, day; + long hms, day, gday; int i; - day = tim / SECDAY; + gday = day = tim / SECDAY; hms = tim % SECDAY; /* Hours, minutes, seconds are easy */ @@ -506,7 +588,7 @@ void to_tm(unsigned long tim, struct rtc_time * tm) for (i = 1; day >= days_in_month(i); i++) day -= days_in_month(i); days_in_month(FEBRUARY) = 28; - tm->tm_mon = i; + tm->tm_mon = i-1; /* tm_mon starts from 0 to 11 */ /* Days are what is left over (+1) from all that. */ tm->tm_mday = day + 1; @@ -514,5 +596,7 @@ void to_tm(unsigned long tim, struct rtc_time * tm) /* * Determine the day of week */ - tm->tm_wday = (day + 3) % 7; + tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */ } + +EXPORT_SYMBOL(rtc_lock); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f3c333596d00..96ebda32053e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -4,12 +4,12 @@ * for more details. * * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle - * Modified for R3000 by Paul M. Antoine, 1995, 1996 - * Complete output from die() by Ulf Carlsson, 1998 + * Copyright (C) 1995, 1996 Paul M. Antoine + * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. - * * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000, 01 MIPS Technologies, Inc. + * Copyright (C) 2002, 2003 Maciej W. Rozycki */ #include <linux/config.h> #include <linux/init.h> @@ -19,30 +19,23 @@ #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/spinlock.h> +#include <linux/kallsyms.h> #include <asm/bootinfo.h> #include <asm/branch.h> #include <asm/cpu.h> -#include <asm/cachectl.h> -#include <asm/inst.h> -#include <asm/jazz.h> +#include <asm/fpu.h> #include <asm/module.h> #include <asm/pgtable.h> -#include <asm/io.h> -#include <asm/siginfo.h> -#include <asm/watch.h> +#include <asm/ptrace.h> +#include <asm/sections.h> #include <asm/system.h> +#include <asm/tlbdebug.h> +#include <asm/traps.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> - -/* - * Machine specific interrupt handlers - */ -extern asmlinkage void acer_pica_61_handle_int(void); -extern asmlinkage void decstation_handle_int(void); -extern asmlinkage void deskstation_rpc44_handle_int(void); -extern asmlinkage void deskstation_tyne_handle_int(void); -extern asmlinkage void mips_magnum_4000_handle_int(void); +#include <asm/watch.h> +#include <asm/types.h> extern asmlinkage void handle_mod(void); extern asmlinkage void handle_tlbl(void); @@ -58,16 +51,16 @@ extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_mdmx(void); extern asmlinkage void handle_watch(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); -extern int fpu_emulator_cop1Handler(struct pt_regs *); - -char watch_available = 0; +extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp, + struct mips_fpu_soft_struct *ctx); -void (*ibe_board_handler)(struct pt_regs *regs); -void (*dbe_board_handler)(struct pt_regs *regs); +void (*board_be_init)(void); +int (*board_be_handler)(struct pt_regs *regs, int is_fixup); /* * These constant is for searching for possible module text segments. @@ -75,99 +68,74 @@ void (*dbe_board_handler)(struct pt_regs *regs); */ #define MODULE_RANGE (8*1024*1024) -#ifndef CONFIG_CPU_HAS_LLSC -/* - * This stuff is needed for the userland ll-sc emulation for R2300 - */ -void simulate_ll(struct pt_regs *regs, unsigned int opcode); -void simulate_sc(struct pt_regs *regs, unsigned int opcode); - -#define OPCODE 0xfc000000 -#define BASE 0x03e00000 -#define RT 0x001f0000 -#define OFFSET 0x0000ffff -#define LL 0xc0000000 -#define SC 0xe0000000 -#endif - /* * This routine abuses get_user()/put_user() to reference pointers * with at least a bit of error checking ... */ -void show_stack(unsigned int *sp) +void show_stack(struct task_struct *task, unsigned long *sp) { + const int field = 2 * sizeof(unsigned long); + long stackdata; int i; - unsigned int *stack; - - stack = sp; - i = 0; - printk("Stack:"); - while ((unsigned long) stack & (PAGE_SIZE - 1)) { - unsigned long stackdata; + sp = sp ? sp : (unsigned long *) &sp; - if (__get_user(stackdata, stack++)) { - printk(" (Bad stack address)"); + printk("Stack: "); + i = 1; + while ((unsigned long) sp & (PAGE_SIZE - 1)) { + if (i && ((i % (64 / sizeof(unsigned long))) == 0)) + printk("\n "); + if (i > 40) { + printk(" ..."); break; } - printk(" %08lx", stackdata); - - if (++i > 40) { - printk(" ..."); + if (__get_user(stackdata, sp++)) { + printk(" (Bad stack address)"); break; } - if (i % 8 == 0) - printk("\n "); + printk(" %0*lx", field, stackdata); + i++; } + printk("\n"); } -void show_trace(unsigned int *sp) +void show_trace(struct task_struct *task, unsigned long *stack) { - int i; - unsigned int *stack; - unsigned long kernel_start, kernel_end; - unsigned long module_start, module_end; - extern char _stext, _etext; - - stack = sp; - i = 0; - - kernel_start = (unsigned long) &_stext; - kernel_end = (unsigned long) &_etext; - module_start = VMALLOC_START; - module_end = module_start + MODULE_RANGE; - - printk("\nCall Trace:"); + const int field = 2 * sizeof(unsigned long); + unsigned long addr; - while ((unsigned long) stack & (PAGE_SIZE -1)) { - unsigned long addr; + if (!stack) + stack = (unsigned long*)&stack; - if (__get_user(addr, stack++)) { - printk(" (Bad stack address)\n"); - break; + printk("Call Trace:"); +#ifdef CONFIG_KALLSYMS + printk("\n"); +#endif + while (((long) stack & (THREAD_SIZE-1)) != 0) { + addr = *stack++; + if (kernel_text_address(addr)) { + printk(" [<%0*lx>] ", field, addr); + print_symbol("%s\n", addr); } + } + printk("\n"); +} - /* - * If the address is either in the text segment of the - * kernel, or in the region which contains vmalloc'ed - * memory, it *may* be the address of a calling - * routine; if so, print it so that someone tracing - * down the cause of the crash will be able to figure - * out the call path that was taken. - */ +void show_trace_task(struct task_struct *tsk) +{ + show_trace(tsk, (long *)tsk->thread.reg29); +} - if ((addr >= kernel_start && addr < kernel_end) || - (addr >= module_start && addr < module_end)) { +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + unsigned long stack; - printk(" [<%08lx>]", addr); - if (++i > 40) { - printk(" ..."); - break; - } - } - } + show_trace(current, &stack); } void show_code(unsigned int *pc) @@ -177,43 +145,114 @@ void show_code(unsigned int *pc) printk("\nCode:"); for(i = -3 ; i < 6 ; i++) { - unsigned long insn; + unsigned int insn; if (__get_user(insn, pc + i)) { printk(" (Bad address in epc)\n"); break; } - printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); + printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); + } +} + +void show_regs(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + int i; + + printk("Cpu %d\n", smp_processor_id()); + + /* + * Saved main processor registers + */ + for (i = 0; i < 32; i++) { + if ((i % 4) == 0) + printk("$%2d :", i); + if (i == 0) + printk(" %0*lx", field, 0UL); + else if (i == 26 || i == 27) + printk(" %*s", field, ""); + else + printk(" %0*lx", field, regs->regs[i]); + + i++; + if ((i % 4) == 0) + printk("\n"); } + + printk("Hi : %0*lx\n", field, regs->hi); + printk("Lo : %0*lx\n", field, regs->lo); + + /* + * Saved cp0 registers + */ + printk("epc : %0*lx %s\n", field, regs->cp0_epc, print_tainted()); + printk("Status: %0*lx\n", field, regs->cp0_status); + printk("Cause : %0*lx\n", field, regs->cp0_cause); + + if (regs->cp0_status & ST0_KX) + printk("KX "); + if (regs->cp0_status & ST0_SX) + printk("SX "); + if (regs->cp0_status & ST0_UX) + printk("UX "); + switch (regs->cp0_status & ST0_KSU) { + case KSU_USER: + printk("USER "); + break; + case KSU_SUPERVISOR: + printk("SUPERVISOR "); + break; + case KSU_KERNEL: + printk("KERNEL "); + break; + default: + printk("BAD_MODE "); + break; + } + if (regs->cp0_status & ST0_ERL) + printk("ERL "); + if (regs->cp0_status & ST0_EXL) + printk("EXL "); + if (regs->cp0_status & ST0_IE) + printk("IE "); +} + +void show_registers(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + + show_regs(regs); + printk("Process %s (pid: %d, stackpage=%0*lx)\n", + current->comm, current->pid, field, (unsigned long) current); + show_stack(current, (long *) regs->regs[29]); + show_trace(current, (long *) regs->regs[29]); + show_code((unsigned int *) regs->cp0_epc); + printk("\n"); } -spinlock_t die_lock; +static spinlock_t die_lock = SPIN_LOCK_UNLOCKED; -extern void __die(const char * str, struct pt_regs * regs, const char *where, - unsigned long line) +void __die(const char * str, struct pt_regs * regs, const char * file, + const char * func, unsigned long line) { static int die_counter; + console_verbose(); spin_lock_irq(&die_lock); printk("%s", str); - if (where) - printk(" in %s, line %ld", where, line); + if (file && func) + printk(" in %s:%s, line %ld", file, func, line); printk("[#%d]:\n", ++die_counter); - show_regs(regs); - printk("Process %s (pid: %d, stackpage=%08lx)\n", - current->comm, current->pid, (unsigned long) current); - show_stack((unsigned int *) regs->regs[29]); - show_trace((unsigned int *) regs->regs[29]); - show_code((unsigned int *) regs->cp0_epc); - printk("\n"); + show_registers(regs); spin_unlock_irq(&die_lock); do_exit(SIGSEGV); } -void __die_if_kernel(const char * str, struct pt_regs * regs, const char *where, - unsigned long line) +void __die_if_kernel(const char * str, struct pt_regs * regs, + const char * file, const char * func, unsigned long line) { if (!user_mode(regs)) - __die(str, regs, where, line); + __die(str, regs, file, func, line); } extern const struct exception_table_entry __start___dbe_table[]; @@ -227,106 +266,199 @@ void __declare_dbe_table(void) ); } -static inline unsigned long -search_one_table(const struct exception_table_entry *first, - const struct exception_table_entry *last, - unsigned long value) +asmlinkage void do_be(struct pt_regs *regs) { - const struct exception_table_entry *mid; - long diff; - - while (first < last) { - mid = (last - first) / 2 + first; - diff = mid->insn - value; - if (diff < 0) - first = mid + 1; - else - last = mid; - } - return (first == last && first->insn == value) ? first->nextinsn : 0; -} - -extern spinlock_t modlist_lock; - -static inline unsigned long -search_dbe_table(unsigned long addr) -{ - unsigned long ret = 0; + const int field = 2 * sizeof(unsigned long); + const struct exception_table_entry *fixup = NULL; + int data = regs->cp0_cause & 4; + int action = MIPS_BE_FATAL; -#ifndef CONFIG_MODULES - /* There is only the kernel to search. */ - ret = search_one_table(__start___dbe_table, __stop___dbe_table-1, addr); - return ret; -#else - unsigned long flags; + /* XXX For now. Fixme, this searches the wrong table ... */ + if (data && !user_mode(regs)) + fixup = search_exception_tables(regs->cp0_epc); - /* The kernel is the last "module" -- no need to treat it special. */ - struct module *mp; - struct archdata *ap; + if (fixup) + action = MIPS_BE_FIXUP; - spin_lock_irqsave(&modlist_lock, flags); - for (mp = module_list; mp != NULL; mp = mp->next) { - if (!mod_member_present(mp, archdata_end) || - !mod_archdata_member_present(mp, struct archdata, - dbe_table_end)) - continue; - ap = (struct archdata *)(mp->archdata_start); + if (board_be_handler) + action = board_be_handler(regs, fixup != 0); - if (ap->dbe_table_start == NULL || - !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING))) - continue; - ret = search_one_table(ap->dbe_table_start, - ap->dbe_table_end - 1, addr); - if (ret) - break; - } - spin_unlock_irqrestore(&modlist_lock, flags); - return ret; -#endif -} - -static void default_be_board_handler(struct pt_regs *regs) -{ - unsigned long new_epc; - unsigned long fixup; - int data = regs->cp0_cause & 4; - - if (data && !user_mode(regs)) { - fixup = search_dbe_table(regs->cp0_epc); + switch (action) { + case MIPS_BE_DISCARD: + return; + case MIPS_BE_FIXUP: if (fixup) { - new_epc = fixup_exception(dpf_reg, fixup, - regs->cp0_epc); - regs->cp0_epc = new_epc; + regs->cp0_epc = fixup->nextinsn; return; } + break; + default: + break; } /* * Assume it would be too dangerous to continue ... */ - printk(KERN_ALERT "%s bus error, epc == %08lx, ra == %08lx\n", + printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", data ? "Data" : "Instruction", - regs->cp0_epc, regs->regs[31]); + field, regs->cp0_epc, field, regs->regs[31]); die_if_kernel("Oops", regs); force_sig(SIGBUS, current); } -asmlinkage void do_ibe(struct pt_regs *regs) +static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) { - ibe_board_handler(regs); + unsigned int *epc; + + epc = (unsigned int *) regs->cp0_epc + + ((regs->cp0_cause & CAUSEF_BD) != 0); + if (!get_user(*opcode, epc)) + return 0; + + force_sig(SIGSEGV, current); + return 1; } -asmlinkage void do_dbe(struct pt_regs *regs) +/* + * ll/sc emulation + */ + +#define OPCODE 0xfc000000 +#define BASE 0x03e00000 +#define RT 0x001f0000 +#define OFFSET 0x0000ffff +#define LL 0xc0000000 +#define SC 0xe0000000 + +/* + * The ll_bit is cleared by r*_switch.S + */ + +unsigned long ll_bit; + +static struct task_struct *ll_task = NULL; + +static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) { - dbe_board_handler(regs); + unsigned long value, *vaddr; + long offset; + int signal = 0; + + /* + * analyse the ll instruction that just caused a ri exception + * and put the referenced address to addr. + */ + + /* sign extend offset */ + offset = opcode & OFFSET; + offset <<= 16; + offset >>= 16; + + vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); + + if ((unsigned long)vaddr & 3) { + signal = SIGBUS; + goto sig; + } + if (get_user(value, vaddr)) { + signal = SIGSEGV; + goto sig; + } + + if (ll_task == NULL || ll_task == current) { + ll_bit = 1; + } else { + ll_bit = 0; + } + ll_task = current; + + regs->regs[(opcode & RT) >> 16] = value; + + compute_return_epc(regs); + return; + +sig: + force_sig(signal, current); } -asmlinkage void do_ov(struct pt_regs *regs) +static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) { - if (compute_return_epc(regs)) + unsigned long *vaddr, reg; + long offset; + int signal = 0; + + /* + * analyse the sc instruction that just caused a ri exception + * and put the referenced address to addr. + */ + + /* sign extend offset */ + offset = opcode & OFFSET; + offset <<= 16; + offset >>= 16; + + vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); + reg = (opcode & RT) >> 16; + + if ((unsigned long)vaddr & 3) { + signal = SIGBUS; + goto sig; + } + if (ll_bit == 0 || ll_task != current) { + regs->regs[reg] = 0; + compute_return_epc(regs); return; + } - force_sig(SIGFPE, current); + if (put_user(regs->regs[reg], vaddr)) { + signal = SIGSEGV; + goto sig; + } + + regs->regs[reg] = 1; + + compute_return_epc(regs); + return; + +sig: + force_sig(signal, current); +} + +/* + * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both + * opcodes are supposed to result in coprocessor unusable exceptions if + * executed on ll/sc-less processors. That's the theory. In practice a + * few processors such as NEC's VR4100 throw reserved instruction exceptions + * instead, so we're doing the emulation thing in both exception handlers. + */ +static inline int simulate_llsc(struct pt_regs *regs) +{ + unsigned int opcode; + + if (unlikely(get_insn_opcode(regs, &opcode))) + return -EFAULT; + + if ((opcode & OPCODE) == LL) { + simulate_ll(regs, opcode); + return 0; + } + if ((opcode & OPCODE) == SC) { + simulate_sc(regs, opcode); + return 0; + } + + return -EFAULT; /* Strange things going on ... */ +} + +asmlinkage void do_ov(struct pt_regs *regs) +{ + siginfo_t info; + + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void *)regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); } /* @@ -335,30 +467,29 @@ asmlinkage void do_ov(struct pt_regs *regs) asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { if (fcr31 & FPU_CSR_UNI_X) { - extern void save_fp(struct task_struct *); - extern void restore_fp(struct task_struct *); int sig; + /* - * Unimplemented operation exception. If we've got the - * full software emulator on-board, let's use it... + * Unimplemented operation exception. If we've got the full + * software emulator on-board, let's use it... * - * Force FPU to dump state into task/thread context. - * We're moving a lot of data here for what is probably - * a single instruction, but the alternative is to - * pre-decode the FP register operands before invoking - * the emulator, which seems a bit extreme for what - * should be an infrequent event. + * Force FPU to dump state into task/thread context. We're + * moving a lot of data here for what is probably a single + * instruction, but the alternative is to pre-decode the FP + * register operands before invoking the emulator, which seems + * a bit extreme for what should be an infrequent event. */ save_fp(current); - + /* Run the emulator */ - sig = fpu_emulator_cop1Handler(regs); + sig = fpu_emulator_cop1Handler (0, regs, + ¤t->thread.fpu.soft); - /* - * We can't allow the emulated instruction to leave the - * Unimplemented Operation bit set in $fcr31. + /* + * We can't allow the emulated instruction to leave any of + * the cause bit set in $fcr31. */ - current->thread.fpu.soft.sr &= ~FPU_CSR_UNI_X; + current->thread.fpu.soft.sr &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ restore_fp(current); @@ -370,36 +501,18 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) return; } - if (compute_return_epc(regs)) - return; - force_sig(SIGFPE, current); - printk(KERN_DEBUG "Sent send SIGFPE to %s\n", current->comm); -} - -static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) -{ - unsigned int *epc; - - epc = (unsigned int *) regs->cp0_epc + - ((regs->cp0_cause & CAUSEF_BD) != 0); - if (!get_user(*opcode, epc)) - return 0; - - force_sig(SIGSEGV, current); - return 1; } asmlinkage void do_bp(struct pt_regs *regs) { - siginfo_t info; unsigned int opcode, bcode; - unsigned int *epc; + siginfo_t info; - epc = (unsigned int *) regs->cp0_epc + - ((regs->cp0_cause & CAUSEF_BD) != 0); - if (get_user(opcode, epc)) - goto sigsegv; + die_if_kernel("Break instruction in kernel code", regs); + + if (get_insn_opcode(regs, &opcode)) + return; /* * There is the ancient bug in the MIPS assemblers that the break @@ -423,241 +536,107 @@ asmlinkage void do_bp(struct pt_regs *regs) info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_addr = (void *)compute_return_epc(regs); + info.si_addr = (void *)regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; default: force_sig(SIGTRAP, current); } - return; - -sigsegv: - force_sig(SIGSEGV, current); } asmlinkage void do_tr(struct pt_regs *regs) { + unsigned int opcode, tcode = 0; siginfo_t info; - unsigned int opcode, bcode; - unsigned *epc; - epc = (unsigned int *) regs->cp0_epc + - ((regs->cp0_cause & CAUSEF_BD) != 0); - if (get_user(opcode, epc)) - goto sigsegv; + die_if_kernel("Trap instruction in kernel code", regs); + + if (get_insn_opcode(regs, &opcode)) + return; - bcode = ((opcode >> 6) & ((1 << 20) - 1)); + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = ((opcode >> 6) & ((1 << 20) - 1)); /* - * (A short test says that IRIX 5.3 sends SIGTRAP for all break - * insns, even for break codes that indicate arithmetic failures. + * (A short test says that IRIX 5.3 sends SIGTRAP for all trap + * insns, even for trap codes that indicate arithmetic failures. * Weird ...) * But should we continue the brokenness??? --macro */ - switch (bcode) { + switch (tcode) { case 6: case 7: - if (bcode == 7) + if (tcode == 7) info.si_code = FPE_INTDIV; else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_addr = (void *)compute_return_epc(regs); + info.si_addr = (void *)regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; default: force_sig(SIGTRAP, current); } - return; - -sigsegv: - force_sig(SIGSEGV, current); } -#ifndef CONFIG_CPU_HAS_LLSC - -#ifdef CONFIG_SMP -#error "ll/sc emulation is not SMP safe" -#endif - -/* - * userland emulation for R2300 CPUs - * needed for the multithreading part of glibc - * - * this implementation can handle only sychronization between 2 or more - * user contexts and is not SMP safe. - */ asmlinkage void do_ri(struct pt_regs *regs) { - unsigned int opcode; - - if (!user_mode(regs)) - BUG(); + die_if_kernel("Reserved instruction in kernel code", regs); - if (!get_insn_opcode(regs, &opcode)) { - if ((opcode & OPCODE) == LL) { - simulate_ll(regs, opcode); - return; - } - if ((opcode & OPCODE) == SC) { - simulate_sc(regs, opcode); + if (!cpu_has_llsc) + if (!simulate_llsc(regs)) return; - } - } - if (compute_return_epc(regs)) - return; force_sig(SIGILL, current); } -/* - * The ll_bit is cleared by r*_switch.S - */ - -unsigned long ll_bit; -#ifdef CONFIG_PROC_FS -extern unsigned long ll_ops; -extern unsigned long sc_ops; -#endif - -static struct task_struct *ll_task = NULL; - -void simulate_ll(struct pt_regs *regp, unsigned int opcode) +asmlinkage void do_cpu(struct pt_regs *regs) { - unsigned long value, *vaddr; - long offset; - int signal = 0; + unsigned int cpid; - /* - * analyse the ll instruction that just caused a ri exception - * and put the referenced address to addr. - */ + die_if_kernel("do_cpu invoked from kernel context!", regs); - /* sign extend offset */ - offset = opcode & OFFSET; - offset <<= 16; - offset >>= 16; + cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; - vaddr = (unsigned long *)((long)(regp->regs[(opcode & BASE) >> 21]) + offset); + switch (cpid) { + case 0: + if (cpu_has_llsc) + break; -#ifdef CONFIG_PROC_FS - ll_ops++; -#endif + if (!simulate_llsc(regs)) + return; + break; - if ((unsigned long)vaddr & 3) - signal = SIGBUS; - else if (get_user(value, vaddr)) - signal = SIGSEGV; - else { - if (ll_task == NULL || ll_task == current) { - ll_bit = 1; - } else { - ll_bit = 0; + case 1: + own_fpu(); + if (current->used_math) { /* Using the FPU again. */ + restore_fp(current); + } else { /* First time FPU user. */ + init_fpu(); + current->used_math = 1; } - ll_task = current; - regp->regs[(opcode & RT) >> 16] = value; - } - if (compute_return_epc(regp)) - return; - if (signal) - send_sig(signal, current, 1); -} - -void simulate_sc(struct pt_regs *regp, unsigned int opcode) -{ - unsigned long *vaddr, reg; - long offset; - int signal = 0; - - /* - * analyse the sc instruction that just caused a ri exception - * and put the referenced address to addr. - */ - - /* sign extend offset */ - offset = opcode & OFFSET; - offset <<= 16; - offset >>= 16; - - vaddr = (unsigned long *)((long)(regp->regs[(opcode & BASE) >> 21]) + offset); - reg = (opcode & RT) >> 16; -#ifdef CONFIG_PROC_FS - sc_ops++; -#endif + if (!cpu_has_fpu) { + int sig = fpu_emulator_cop1Handler(0, regs, + ¤t->thread.fpu.soft); + if (sig) + force_sig(sig, current); + } - if ((unsigned long)vaddr & 3) - signal = SIGBUS; - else if (ll_bit == 0 || ll_task != current) - regp->regs[reg] = 0; - else if (put_user(regp->regs[reg], vaddr)) - signal = SIGSEGV; - else - regp->regs[reg] = 1; - if (compute_return_epc(regp)) return; - if (signal) - send_sig(signal, current, 1); -} - -#else /* MIPS 2 or higher */ -asmlinkage void do_ri(struct pt_regs *regs) -{ - unsigned int opcode; - - get_insn_opcode(regs, &opcode); - if (compute_return_epc(regs)) - return; + case 2: + case 3: + break; + } force_sig(SIGILL, current); } -#endif - -asmlinkage void do_cpu(struct pt_regs *regs) +asmlinkage void do_mdmx(struct pt_regs *regs) { - unsigned int cpid; - extern void lazy_fpu_switch(void *); - extern void init_fpu(void); - void fpu_emulator_init_fpu(void); - int sig; - - cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; - if (cpid != 1) - goto bad_cid; - - if (!(mips_cpu.options & MIPS_CPU_FPU)) - goto fp_emul; - - regs->cp0_status |= ST0_CU1; - if (last_task_used_math == current) - return; - - if (current->used_math) { /* Using the FPU again. */ - lazy_fpu_switch(last_task_used_math); - } else { /* First time FPU user. */ - init_fpu(); - current->used_math = 1; - } - last_task_used_math = current; - return; - -fp_emul: - if (last_task_used_math != current) { - if (!current->used_math) { - fpu_emulator_init_fpu(); - current->used_math = 1; - } - } - sig = fpu_emulator_cop1Handler(regs); - last_task_used_math = current; - if (sig) - force_sig(sig, current); - return; - -bad_cid: force_sig(SIGILL, current); } @@ -667,6 +646,7 @@ asmlinkage void do_watch(struct pt_regs *regs) * We use the watch exception where available to detect stack * overflows. */ + dump_tlb_all(); show_regs(regs); panic("Caught WATCH exception - probably caused by stack overflow."); } @@ -674,8 +654,14 @@ asmlinkage void do_watch(struct pt_regs *regs) asmlinkage void do_mcheck(struct pt_regs *regs) { show_regs(regs); - panic("Caught Machine Check exception - probably caused by multiple " - "matching entries in the TLB."); + dump_tlb_all(); + /* + * Some chips may have other causes of machine check (e.g. SB1 + * graduation timer) + */ + panic("Caught Machine Check exception - %scaused by multiple " + "matching entries in the TLB.", + (regs->cp0_status & ST0_TS) ? "" : "not "); } asmlinkage void do_reserved(struct pt_regs *regs) @@ -686,15 +672,8 @@ asmlinkage void do_reserved(struct pt_regs *regs) * hard/software error. */ show_regs(regs); - panic("Caught reserved exception - should not happen."); -} - -static inline void watch_init(void) -{ - if (mips_cpu.options & MIPS_CPU_WATCH ) { - set_except_vector(23, handle_watch); - watch_available = 1; - } + panic("Caught reserved exception %ld - should not happen.", + (regs->cp0_cause & 0x7f) >> 2); } /* @@ -703,14 +682,12 @@ static inline void watch_init(void) */ static inline void parity_protection_init(void) { - switch (mips_cpu.cputype) { + switch (current_cpu_data.cputype) { case CPU_5KC: - /* Set the PE bit (bit 31) in the CP0_ECC register. */ + /* Set the PE bit (bit 31) in the c0_ecc register. */ printk(KERN_INFO "Enable the cache parity protection for " "MIPS 5KC CPUs.\n"); - write_32bit_cp0_register(CP0_ECC, - read_32bit_cp0_register(CP0_ECC) - | 0x80000000); + write_c0_ecc(read_c0_ecc() | 0x80000000); break; default: break; @@ -719,16 +696,16 @@ static inline void parity_protection_init(void) asmlinkage void cache_parity_error(void) { + const int field = 2 * sizeof(unsigned long); unsigned int reg_val; /* For the moment, report the problem and hang. */ - reg_val = read_32bit_cp0_register(CP0_ERROREPC); printk("Cache error exception:\n"); - printk("cp0_errorepc == %08x\n", reg_val); - reg_val = read_32bit_cp0_register(CP0_CACHEERR); - printk("cp0_cacheerr == %08x\n", reg_val); + printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); + reg_val = read_c0_cacheerr(); + printk("c0_cacheerr == %08x\n", reg_val); - printk("Decoded CP0_CACHEERR: %s cache fault in %s reference.\n", + printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); printk("Error bits: %s%s%s%s%s%s%s\n", @@ -741,17 +718,63 @@ asmlinkage void cache_parity_error(void) reg_val & (1<<22) ? "E0 " : ""); printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); - if (reg_val&(1<<22)) - printk("DErrAddr0: 0x%08x\n", - read_32bit_cp0_set1_register(CP0_S1_DERRADDR0)); +#if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64) + if (reg_val & (1<<22)) + printk("DErrAddr0: 0x%08x\n", read_c0_derraddr0()); - if (reg_val&(1<<23)) - printk("DErrAddr1: 0x%08x\n", - read_32bit_cp0_set1_register(CP0_S1_DERRADDR1)); + if (reg_val & (1<<23)) + printk("DErrAddr1: 0x%08x\n", read_c0_derraddr1()); +#endif panic("Can't handle the cache error!"); } +/* + * SDBBP EJTAG debug exception handler. + * We skip the instruction and return to the next instruction. + */ +void ejtag_exception_handler(struct pt_regs *regs) +{ + const int field = 2 * sizeof(unsigned long); + unsigned long depc, old_epc; + unsigned int debug; + + printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); + depc = read_c0_depc(); + debug = read_c0_debug(); + printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); + if (debug & 0x80000000) { + /* + * In branch delay slot. + * We cheat a little bit here and use EPC to calculate the + * debug return address (DEPC). EPC is restored after the + * calculation. + */ + old_epc = regs->cp0_epc; + regs->cp0_epc = depc; + __compute_return_epc(regs); + depc = regs->cp0_epc; + regs->cp0_epc = old_epc; + } else + depc += 4; + write_c0_depc(depc); + +#if 0 + printk("\n\n----- Enable EJTAG single stepping ----\n\n"); + write_c0_debug(debug | 0x100); +#endif +} + +/* + * NMI exception handler. + */ +void nmi_exception_handler(struct pt_regs *regs) +{ + printk("NMI taken!!!!\n"); + die("NMI", regs); + while(1) ; +} + unsigned long exception_handlers[32]; /* @@ -761,11 +784,11 @@ unsigned long exception_handlers[32]; */ void *set_except_vector(int n, void *addr) { - unsigned handler = (unsigned long) addr; - unsigned old_handler = exception_handlers[n]; - exception_handlers[n] = handler; + unsigned long handler = (unsigned long) addr; + unsigned long old_handler = exception_handlers[n]; - if (n == 0 && mips_cpu.options & MIPS_CPU_DIVEC) { + exception_handlers[n] = handler; + if (n == 0 && cpu_has_divec) { *(volatile u32 *)(KSEG0+0x200) = 0x08000000 | (0x03ffffff & (handler >> 2)); flush_icache_range(KSEG0+0x200, KSEG0 + 0x204); @@ -775,56 +798,70 @@ void *set_except_vector(int n, void *addr) asmlinkage int (*save_fp_context)(struct sigcontext *sc); asmlinkage int (*restore_fp_context)(struct sigcontext *sc); + extern asmlinkage int _save_fp_context(struct sigcontext *sc); extern asmlinkage int _restore_fp_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); +void __init per_cpu_trap_init(void) +{ + unsigned int cpu = smp_processor_id(); + + /* Some firmware leaves the BEV flag set, clear it. */ + clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV); + + /* + * Some MIPS CPUs have a dedicated interrupt vector which reduces the + * interrupt processing overhead. Use it where available. + */ + if (cpu_has_divec) + set_c0_cause(CAUSEF_IV); + + cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; + write_c0_context(cpu << 23); +} + void __init trap_init(void) { - extern char except_vec0_nevada, except_vec0_r4000; - extern char except_vec0_r4600, except_vec0_r2300; - extern char except_vec1_generic, except_vec2_generic; + extern char except_vec1_generic; extern char except_vec3_generic, except_vec3_r4000; - extern char except_vec4; extern char except_vec_ejtag_debug; + extern char except_vec4; unsigned long i; - /* Some firmware leaves the BEV flag set, clear it. */ - clear_cp0_status(ST0_BEV); + per_cpu_trap_init(); /* Copy the generic exception handler code to its final destination. */ memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80); - memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80); - memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80); - flush_icache_range(KSEG0 + 0x80, KSEG0 + 0x200); + /* * Setup default vectors */ for (i = 0; i <= 31; i++) set_except_vector(i, handle_reserved); - /* - * Copy the EJTAG debug exception vector handler code to its final + /* + * Copy the EJTAG debug exception vector handler code to it's final * destination. */ - memcpy((void *)(KSEG0 + 0x300), &except_vec_ejtag_debug, 0x80); + if (cpu_has_ejtag) + memcpy((void *)(KSEG0 + 0x300), &except_vec_ejtag_debug, 0x80); /* * Only some CPUs have the watch exceptions or a dedicated * interrupt vector. */ - watch_init(); + if (cpu_has_watch) + set_except_vector(23, handle_watch); /* * Some MIPS CPUs have a dedicated interrupt vector which reduces the * interrupt processing overhead. Use it where available. */ - if (mips_cpu.options & MIPS_CPU_DIVEC) { - memcpy((void *)(KSEG0 + 0x200), &except_vec4, 8); - set_cp0_cause(CAUSEF_IV); - } + if (cpu_has_divec) + memcpy((void *)(KSEG0 + 0x200), &except_vec4, 0x8); /* * Some CPUs can enable/disable for cache parity detection, but does @@ -832,21 +869,22 @@ void __init trap_init(void) */ parity_protection_init(); + /* + * The Data Bus Errors / Instruction Bus Errors are signaled + * by external hardware. Therefore these two exceptions + * may have board specific handlers. + */ + if (board_be_init) + board_be_init(); + set_except_vector(1, handle_mod); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); - /* - * The Data Bus Error/ Instruction Bus Errors are signaled - * by external hardware. Therefore these two expection have - * board specific handlers. - */ set_except_vector(6, handle_ibe); set_except_vector(7, handle_dbe); - ibe_board_handler = default_be_board_handler; - dbe_board_handler = default_be_board_handler; set_except_vector(8, handle_sys); set_except_vector(9, handle_bp); @@ -854,59 +892,23 @@ void __init trap_init(void) set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); + set_except_vector(22, handle_mdmx); - if (mips_cpu.options & MIPS_CPU_FPU) + if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); - /* - * Handling the following exceptions depends mostly of the cpu type - */ - if ((mips_cpu.options & MIPS_CPU_4KEX) - && (mips_cpu.options & MIPS_CPU_4KTLB)) { - if (mips_cpu.cputype == CPU_NEVADA) { - memcpy((void *)KSEG0, &except_vec0_nevada, 0x80); - } else if (mips_cpu.cputype == CPU_R4600) - memcpy((void *)KSEG0, &except_vec0_r4600, 0x80); - else - memcpy((void *)KSEG0, &except_vec0_r4000, 0x80); - - /* Cache error vector already set above. */ - - if (mips_cpu.options & MIPS_CPU_VCE) { - memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, - 0x80); - } else { - memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, - 0x80); - } + if (cpu_has_mcheck) + set_except_vector(24, handle_mcheck); - if (mips_cpu.options & MIPS_CPU_FPU) { - save_fp_context = _save_fp_context; - restore_fp_context = _restore_fp_context; - } else { - save_fp_context = fpu_emulator_save_context; - restore_fp_context = fpu_emulator_restore_context; - } - } else switch (mips_cpu.cputype) { - case CPU_SB1: - /* - * XXX - This should be folded in to the "cleaner" handling, - * above - */ - memcpy((void *)KSEG0, &except_vec0_r4000, 0x80); + if (cpu_has_vce) memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x80); - save_fp_context = _save_fp_context; - restore_fp_context = _restore_fp_context; + else if (cpu_has_4kex) + memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80); + else + memcpy((void *)(KSEG0 + 0x080), &except_vec3_generic, 0x80); - /* Enable timer interrupt and scd mapped interrupt */ - clear_cp0_status(0xf000); - set_cp0_status(0xc00); - break; - case CPU_R6000: - case CPU_R6000A: - save_fp_context = _save_fp_context; - restore_fp_context = _restore_fp_context; - + if (current_cpu_data.cputype == CPU_R6000 || + current_cpu_data.cputype == CPU_R6000A) { /* * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and @@ -917,34 +919,24 @@ void __init trap_init(void) */ //set_except_vector(14, handle_mc); //set_except_vector(15, handle_ndc); - case CPU_R2000: - case CPU_R3000: - case CPU_R3000A: - case CPU_R3041: - case CPU_R3051: - case CPU_R3052: - case CPU_R3081: - case CPU_R3081E: - case CPU_TX3912: - case CPU_TX3922: - case CPU_TX3927: + } + + if (cpu_has_fpu) { save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; - memcpy((void *)KSEG0, &except_vec0_r2300, 0x80); - memcpy((void *)(KSEG0 + 0x80), &except_vec3_generic, 0x80); - break; - - case CPU_UNKNOWN: - default: - panic("Unknown CPU type"); + } else { + save_fp_context = fpu_emulator_save_context; + restore_fp_context = fpu_emulator_restore_context; } - flush_icache_range(KSEG0, KSEG0 + 0x200); - if (mips_cpu.isa_level == MIPS_CPU_ISA_IV) - set_cp0_status(ST0_XX); + flush_icache_range(KSEG0, KSEG0 + 0x400); + + if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + set_c0_status(ST0_XX); - atomic_inc(&init_mm.mm_count); /* XXX UP? */ + atomic_inc(&init_mm.mm_count); /* XXX UP? */ current->active_mm = &init_mm; - write_32bit_cp0_register(CP0_CONTEXT, smp_processor_id()<<23); - current_pgd[0] = init_mm.pgd; + + /* XXX Must be done for all CPUs */ + TLBMISS_HANDLER_SETUP(); } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 2c20262b6717..ba891c124afd 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 1998 by Ralf Baechle + * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * This file contains exception handler for address error exception with the @@ -41,7 +41,7 @@ * * #include <stdio.h> * #include <asm/sysmips.h> - * + * * struct foo { * unsigned char bar[8]; * }; @@ -74,6 +74,7 @@ */ #include <linux/config.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/smp_lock.h> @@ -88,23 +89,22 @@ #define STR(x) __STR(x) #define __STR(x) #x -/* - * User code may only access USEG; kernel code may access the - * entire address space. - */ -#define check_axs(pc,a,s) \ - if ((long)(~(pc) & ((a) | ((a)+(s)))) < 0) \ - goto sigbus; +#ifdef CONFIG_PROC_FS +unsigned long unaligned_instructions; +#endif -static inline void -emulate_load_store_insn(struct pt_regs *regs, - unsigned long addr, - unsigned long pc) +static inline int emulate_load_store_insn(struct pt_regs *regs, + void *addr, unsigned long pc, + unsigned long **regptr, unsigned long *newvalue) { union mips_instruction insn; - unsigned long value, fixup; + unsigned long value; + const struct exception_table_entry *fixup; + unsigned int res; regs->regs[0] = 0; + *regptr=NULL; + /* * This load never faults. */ @@ -144,186 +144,295 @@ emulate_load_store_insn(struct pt_regs *regs, * The remaining opcodes are the ones that are really of interest. */ case lh_op: - check_axs(pc, addr, 2); - __asm__( - ".set\tnoat\n" + if (verify_area(VERIFY_READ, addr, 2)) + goto sigbus; + + __asm__ __volatile__ (".set\tnoat\n" #ifdef __BIG_ENDIAN - "1:\tlb\t%0,0(%1)\n" - "2:\tlbu\t$1,1(%1)\n\t" + "1:\tlb\t%0, 0(%2)\n" + "2:\tlbu\t$1, 1(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tlb\t%0,1(%1)\n" - "2:\tlbu\t$1,0(%1)\n\t" + "1:\tlb\t%0, 1(%2)\n" + "2:\tlbu\t$1, 0(%2)\n\t" #endif - "sll\t%0,0x8\n\t" - "or\t%0,$1\n\t" - ".set\tat\n\t" + "sll\t%0, 0x8\n\t" + "or\t%0, $1\n\t" + "li\t%1, 0\n" + "3:\t.set\tat\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - :"=&r" (value) - :"r" (addr), "i" (&&fault) - :"$1"); - regs->regs[insn.i_format.rt] = value; - return; + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + *newvalue = value; + *regptr = ®s->regs[insn.i_format.rt]; + break; case lw_op: - check_axs(pc, addr, 4); - __asm__( + if (verify_area(VERIFY_READ, addr, 4)) + goto sigbus; + + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN - "1:\tlwl\t%0,(%1)\n" - "2:\tlwr\t%0,3(%1)\n\t" + "1:\tlwl\t%0, (%2)\n" + "2:\tlwr\t%0, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0,3(%1)\n" - "2:\tlwr\t%0,(%1)\n\t" + "1:\tlwl\t%0, 3(%2)\n" + "2:\tlwr\t%0, (%2)\n\t" #endif + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - :"=&r" (value) - :"r" (addr), "i" (&&fault)); - regs->regs[insn.i_format.rt] = value; - return; + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + *newvalue = value; + *regptr = ®s->regs[insn.i_format.rt]; + break; case lhu_op: - check_axs(pc, addr, 2); - __asm__( + if (verify_area(VERIFY_READ, addr, 2)) + goto sigbus; + + __asm__ __volatile__ ( ".set\tnoat\n" #ifdef __BIG_ENDIAN - "1:\tlbu\t%0,0(%1)\n" - "2:\tlbu\t$1,1(%1)\n\t" + "1:\tlbu\t%0, 0(%2)\n" + "2:\tlbu\t$1, 1(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tlbu\t%0,1(%1)\n" - "2:\tlbu\t$1,0(%1)\n\t" + "1:\tlbu\t%0, 1(%2)\n" + "2:\tlbu\t$1, 0(%2)\n\t" #endif - "sll\t%0,0x8\n\t" - "or\t%0,$1\n\t" - ".set\tat\n\t" + "sll\t%0, 0x8\n\t" + "or\t%0, $1\n\t" + "li\t%1, 0\n" + "3:\t.set\tat\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - :"=&r" (value) - :"r" (addr), "i" (&&fault) - :"$1"); - regs->regs[insn.i_format.rt] = value; - return; + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + *newvalue = value; + *regptr = ®s->regs[insn.i_format.rt]; + break; case lwu_op: - check_axs(pc, addr, 4); - __asm__( +#ifdef CONFIG_MIPS64 + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (verify_area(VERIFY_READ, addr, 4)) + goto sigbus; + + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN - "1:\tlwl\t%0,(%1)\n" - "2:\tlwr\t%0,3(%1)\n\t" + "1:\tlwl\t%0, (%2)\n" + "2:\tlwr\t%0, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0,3(%1)\n" - "2:\tlwr\t%0,(%1)\n\t" + "1:\tlwl\t%0, 3(%2)\n" + "2:\tlwr\t%0, (%2)\n\t" #endif + "dsll\t%0, %0, 32\n\t" + "dsrl\t%0, %0, 32\n\t" + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - :"=&r" (value) - :"r" (addr), "i" (&&fault)); - value &= 0xffffffff; - regs->regs[insn.i_format.rt] = value; - return; + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + *newvalue = value; + *regptr = ®s->regs[insn.i_format.rt]; + break; +#endif /* CONFIG_MIPS64 */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; case ld_op: - check_axs(pc, addr, 8); - __asm__( - ".set\tmips3\n" +#ifdef CONFIG_MIPS64 + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (verify_area(VERIFY_READ, addr, 8)) + goto sigbus; + + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN - "1:\tldl\t%0,(%1)\n" - "2:\tldr\t%0,7(%1)\n\t" + "1:\tldl\t%0, (%2)\n" + "2:\tldr\t%0, 7(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tldl\t%0,7(%1)\n" - "2:\tldr\t%0,(%1)\n\t" + "1:\tldl\t%0, 7(%2)\n" + "2:\tldr\t%0, (%2)\n\t" #endif - ".set\tmips0\n\t" + "li\t%1, 0\n" + "3:\t.section\t.fixup,\"ax\"\n\t" + "4:\tli\t%1, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - :"=&r" (value) - :"r" (addr), "i" (&&fault)); - regs->regs[insn.i_format.rt] = value; - return; + : "=&r" (value), "=r" (res) + : "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + *newvalue = value; + *regptr = ®s->regs[insn.i_format.rt]; + break; +#endif /* CONFIG_MIPS64 */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; case sh_op: - check_axs(pc, addr, 2); + if (verify_area(VERIFY_WRITE, addr, 2)) + goto sigbus; + value = regs->regs[insn.i_format.rt]; - __asm__( + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN ".set\tnoat\n" - "1:\tsb\t%0,1(%1)\n\t" - "srl\t$1,%0,0x8\n" - "2:\tsb\t$1,0(%1)\n\t" + "1:\tsb\t%1, 1(%2)\n\t" + "srl\t$1, %1, 0x8\n" + "2:\tsb\t$1, 0(%2)\n\t" ".set\tat\n\t" #endif #ifdef __LITTLE_ENDIAN ".set\tnoat\n" - "1:\tsb\t%0,0(%1)\n\t" - "srl\t$1,%0,0x8\n" - "2:\tsb\t$1,1(%1)\n\t" + "1:\tsb\t%1, 0(%2)\n\t" + "srl\t$1,%1, 0x8\n" + "2:\tsb\t$1, 1(%2)\n\t" ".set\tat\n\t" #endif + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - : /* no outputs */ - :"r" (value), "r" (addr), "i" (&&fault) - :"$1"); - return; + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + break; case sw_op: - check_axs(pc, addr, 4); + if (verify_area(VERIFY_WRITE, addr, 4)) + goto sigbus; + value = regs->regs[insn.i_format.rt]; - __asm__( + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN - "1:\tswl\t%0,(%1)\n" - "2:\tswr\t%0,3(%1)\n\t" + "1:\tswl\t%1,(%2)\n" + "2:\tswr\t%1, 3(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tswl\t%0,3(%1)\n" - "2:\tswr\t%0,(%1)\n\t" + "1:\tswl\t%1, 3(%2)\n" + "2:\tswr\t%1, (%2)\n\t" #endif + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - : /* no outputs */ - :"r" (value), "r" (addr), "i" (&&fault)); - return; + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + break; case sd_op: - check_axs(pc, addr, 8); +#ifdef CONFIG_MIPS64 + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (verify_area(VERIFY_WRITE, addr, 8)) + goto sigbus; + value = regs->regs[insn.i_format.rt]; - __asm__( - ".set\tmips3\n" + __asm__ __volatile__ ( #ifdef __BIG_ENDIAN - "1:\tsdl\t%0,(%1)\n" - "2:\tsdr\t%0,7(%1)\n\t" + "1:\tsdl\t%1,(%2)\n" + "2:\tsdr\t%1, 7(%2)\n\t" #endif #ifdef __LITTLE_ENDIAN - "1:\tsdl\t%0,7(%1)\n" - "2:\tsdr\t%0,(%1)\n\t" + "1:\tsdl\t%1, 7(%2)\n" + "2:\tsdr\t%1, (%2)\n\t" #endif - ".set\tmips0\n\t" + "li\t%0, 0\n" + "3:\n\t" + ".section\t.fixup,\"ax\"\n\t" + "4:\tli\t%0, %3\n\t" + "j\t3b\n\t" + ".previous\n\t" ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b,%2\n\t" - STR(PTR)"\t2b,%2\n\t" + STR(PTR)"\t1b, 4b\n\t" + STR(PTR)"\t2b, 4b\n\t" ".previous" - : /* no outputs */ - :"r" (value), "r" (addr), "i" (&&fault)); - return; + : "=r" (res) + : "r" (value), "r" (addr), "i" (-EFAULT)); + if (res) + goto fault; + break; +#endif /* CONFIG_MIPS64 */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; case lwc1_op: case ldc1_op: @@ -352,78 +461,97 @@ emulate_load_store_insn(struct pt_regs *regs, */ goto sigill; } - return; + +#ifdef CONFIG_PROC_FS + unaligned_instructions++; +#endif + + return 0; fault: /* Did we have an exception handler installed? */ - fixup = search_exception_table(regs->cp0_epc); + fixup = search_exception_tables(exception_epc(regs)); if (fixup) { - long new_epc; - new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc); + unsigned long new_epc = fixup->nextinsn; printk(KERN_DEBUG "%s: Forwarding exception at [<%lx>] (%lx)\n", current->comm, regs->cp0_epc, new_epc); regs->cp0_epc = new_epc; - return; + return 1; } die_if_kernel ("Unhandled kernel unaligned access", regs); send_sig(SIGSEGV, current, 1); - return; + + return 0; + sigbus: - die_if_kernel ("Unhandled kernel unaligned access", regs); + die_if_kernel("Unhandled kernel unaligned access", regs); send_sig(SIGBUS, current, 1); - return; + + return 0; + sigill: - die_if_kernel ("Unhandled kernel unaligned access or invalid instruction", regs); + die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); send_sig(SIGILL, current, 1); - return; -} -#ifdef CONFIG_PROC_FS -unsigned long unaligned_instructions; -#endif + return 0; +} asmlinkage void do_ade(struct pt_regs *regs) { - unsigned long pc; + unsigned long *regptr, newval; extern int do_dsemulret(struct pt_regs *); + mm_segment_t seg; + unsigned long pc; - /* - * Address errors may be deliberately induced - * by the FPU emulator to take retake control - * of the CPU after executing the instruction - * in the delay slot of an emulated branch. + /* + * Address errors may be deliberately induced by the FPU emulator to + * retake control of the CPU after executing the instruction in the + * delay slot of an emulated branch. */ - - if ((unsigned long)regs->cp0_epc == current->thread.dsemul_aerpc) { - do_dsemulret(regs); + /* Terminate if exception was recognized as a delay slot return */ + if (do_dsemulret(regs)) return; - } + + /* Otherwise handle as normal */ /* * Did we catch a fault trying to load an instruction? - * This also catches attempts to activate MIPS16 code on - * CPUs which don't support it. + * Or are we running in MIPS16 mode? */ - if (regs->cp0_badvaddr == regs->cp0_epc) + if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) goto sigbus; - pc = regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) ? 4 : 0); - if (compute_return_epc(regs)) - return; + pc = exception_epc(regs); if ((current->thread.mflags & MF_FIXADE) == 0) goto sigbus; - emulate_load_store_insn(regs, regs->cp0_badvaddr, pc); -#ifdef CONFIG_PROC_FS - unaligned_instructions++; -#endif + /* + * Do branch emulation only if we didn't forward the exception. + * This is all so but ugly ... + */ + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + if (!emulate_load_store_insn(regs, (void *)regs->cp0_badvaddr, pc, + ®ptr, &newval)) { + compute_return_epc(regs); + /* + * Now that branch is evaluated, update the dest + * register if necessary + */ + if (regptr) + *regptr = newval; + } + set_fs(seg); return; sigbus: - die_if_kernel ("Kernel unaligned instruction access", regs); + die_if_kernel("Kernel unaligned instruction access", regs); force_sig(SIGBUS, current); - return; + /* + * XXX On return from the signal handler we should advance the epc + */ } diff --git a/arch/mips/kernel/vm86.c b/arch/mips/kernel/vm86.c deleted file mode 100644 index c0c775fba676..000000000000 --- a/arch/mips/kernel/vm86.c +++ /dev/null @@ -1,13 +0,0 @@ -/* - * arch/mips/vm86.c - * - * Copyright (C) 1994 Waldorf GMBH, - * written by Ralf Baechle - */ -#include <linux/linkage.h> -#include <linux/errno.h> - -asmlinkage int sys_vm86(void *v86) -{ - return -ENOSYS; -} |
