From 47d696a99c502d7f75d3cf77ec1feb21ffc0ac9f Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Mon, 18 Aug 2003 01:31:59 -0700 Subject: [power] Improve suspend functions. - Implement pm_suspend(), which is callable from anywhere in the kernel, and takes one of PM_SUSPEND_STANDBY PM_SUSPEND_MEM PM_SUSPEND_DISK and enters the appropriate state. - Change sysfs file to look for "standby" "mem" "disk" for what state to enter (rather than 'suspend' and 'hibernate' for the latter two). - Add pm_sem to block multiple suspend sequences happening at once. - Allocate a console and stop processes from common code before entering state. - Add pm_power_down() callback for platform drivers to implement. Will be called to actually enter the low-power state. --- include/linux/pm.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index e4c795f71cea..a5e4d115d172 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -186,9 +186,31 @@ static inline void pm_dev_idle(struct pm_dev *dev) {} #endif /* CONFIG_PM */ + +/* + * Callbacks for platform drivers to implement. + */ extern void (*pm_idle)(void); extern void (*pm_power_off)(void); +enum { + PM_SUSPEND_ON, + PM_SUSPEND_STANDBY, + PM_SUSPEND_MEM, + PM_SUSPEND_DISK, + PM_SUSPEND_MAX, +}; + +extern int (*pm_power_down)(u32 state); + + +extern int pm_suspend(u32 state); + + +/* + * Device power management + */ + struct device; struct dev_pm_info { -- cgit v1.2.3 From e533deb559deeb04ca83c2612154682aa22ebbe9 Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Mon, 18 Aug 2003 02:40:36 -0700 Subject: [power] Improve suspend sequence. - Expand pm_power_down() into struct pm_ops, with ->prepare(), ->enter() and ->finish() methods, so the platform drivers get called to do start and stop work during suspend sequence. - Make sure devices are suspended/resumed in enter_state(), and that they are powered down in pm_suspend_mem() and pm_suspend_standby(). - Call ->prepare() in suspend_prepare() and ->finish() in suspend_finish(). --- include/linux/pm.h | 7 ++- kernel/power/main.c | 126 +++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 110 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index a5e4d115d172..15b3f5efa692 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -201,8 +201,13 @@ enum { PM_SUSPEND_MAX, }; -extern int (*pm_power_down)(u32 state); +struct pm_ops { + int (*prepare)(u32 state); + int (*enter)(u32 state); + int (*finish)(u32 state); +}; +extern void pm_set_ops(struct pm_ops *); extern int pm_suspend(u32 state); diff --git a/kernel/power/main.c b/kernel/power/main.c index 79301a45ea8a..12e2989ef9e1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -16,20 +16,72 @@ #include -int (*pm_power_down)(u32 state) = NULL; +static DECLARE_MUTEX(pm_sem); +static struct pm_ops * pm_ops = NULL; + +/** + * pm_set_ops - Set the global power method table. + * @ops: Pointer to ops structure. + */ + +void pm_set_ops(struct pm_ops * ops) +{ + down(&pm_sem); + pm_ops = ops; + up(&pm_sem); +} -static DECLARE_MUTEX(pm_sem); +/** + * pm_suspend_standby - Enter 'standby' state. + * + * 'standby' is also known as 'Power-On Suspend'. Here, we power down + * devices, disable interrupts, and enter the state. + */ static int pm_suspend_standby(void) { - return 0; + int error = 0; + unsigned long flags; + + if (!pm_ops || !pm_ops->enter) + return -EPERM; + + if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) + goto Done; + local_irq_save(flags); + error = pm_ops->enter(PM_SUSPEND_STANDBY); + local_irq_restore(flags); + device_pm_power_up(); + Done: + return error; } + +/** + * pm_suspend_mem - Enter suspend-to-RAM state. + * + * Identical to pm_suspend_standby() - we power down devices, disable + * interrupts, and enter the low-power state. + */ + static int pm_suspend_mem(void) { - return 0; + int error = 0; + unsigned long flags; + + if (!pm_ops || !pm_ops->enter) + return -EPERM; + + if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) + goto Done; + local_irq_save(flags); + error = pm_ops->enter(PM_SUSPEND_STANDBY); + local_irq_restore(flags); + device_pm_power_up(); + Done: + return error; } static int pm_suspend_disk(void) @@ -51,25 +103,51 @@ struct pm_state { }; -static int suspend_prepare(void) +/** + * suspend_prepare - Do prep work before entering low-power state. + * @state: State we're entering. + * + * This is common code that is called for each state that we're + * entering. Allocate a console, stop all processes, then make sure + * the platform can enter the requested state. + */ + +static int suspend_prepare(u32 state) { int error = 0; pm_prepare_console(); if (freeze_processes()) { - thaw_processes(); error = -EAGAIN; - goto Done; + goto Thaw; } + if (pm_ops && pm_ops->prepare) { + if ((error = pm_ops->prepare(state))) + goto Thaw; + } Done: pm_restore_console(); return error; + Thaw: + thaw_processes(); + goto Done; } -static void suspend_finish(void) + +/** + * suspend_finish - Do final work before exiting suspend sequence. + * @state: State we're coming out of. + * + * Call platform code to clean up, restart processes, and free the + * console that we've allocated. + */ + +static void suspend_finish(u32 state) { + if (pm_ops && pm_ops->finish) + pm_ops->finish(state); thaw_processes(); pm_restore_console(); } @@ -86,23 +164,25 @@ static void suspend_finish(void) * we've woken up). */ -static int enter_state(struct pm_state * state) +static int enter_state(u32 state) { int error; + struct pm_state * s = &pm_states[state]; if (down_trylock(&pm_sem)) return -EBUSY; - if (!pm_power_down) { - error = -EPERM; + if ((error = suspend_prepare(state))) goto Unlock; - } - if ((error = suspend_prepare())) - return error; + if ((error = device_pm_suspend(state))) + goto Finish; - error = state->fn(); - suspend_finish(); + error = s->fn(); + + device_pm_resume(); + Finish: + suspend_finish(state); Unlock: up(&pm_sem); return error; @@ -120,7 +200,7 @@ static int enter_state(struct pm_state * state) int pm_suspend(u32 state) { if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) - return enter_state(&pm_states[state]); + return enter_state(state); return -EINVAL; } @@ -160,20 +240,22 @@ static ssize_t state_show(struct subsystem * subsys, char * buf) return (s - buf); } -static ssize_t state_store(struct subsystem * s, const char * buf, size_t n) +static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n) { - struct pm_state * state; + u32 state; + struct pm_state * s; int error; char * end = strchr(buf,'\n'); if (end) *end = '\0'; - for (state = &pm_states[0]; state; state++) { - if (state->name && !strcmp(buf,state->name)) + for (state = 0; state < PM_SUSPEND_MAX; state++) { + s = &pm_states[state]; + if (s->name && !strcmp(buf,s->name)) break; } - if (state) + if (s) error = enter_state(state); else error = -EINVAL; -- cgit v1.2.3 From 4db335837b40105b914d6eab47785638dfa65059 Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 01:26:30 -0700 Subject: [power] Add flag to control suspend-to-disk behavior. Suspend-to-disk can be handled in numerous ways, some we have control over, and others we don't. The biggest difference is whether or not the firmware is responsible for entering a low-power state or if the platform driver is. The two modes are incompatible, so we enable the platform driver tell the PM core when they register their pm_ops (via the ->pm_disk_mode) field. If the firmware is responsible, then it will also write memory to disk, while the kernel is otherwise responsible. However, a user may choose to use the in-kernel suspend mechanism, even if the system supports only the firmware mechanism. Instead of entering a low-power state, the system will turn off (or reboot for testing). A sysfs file -- /sys/power/disk -- is available to set the mode to one of: 'firmware' 'platform' 'shutdown' 'reboot' The latter two are settable any time, and assume that one is using swsusp. The other two are only settable to what the platform supports. --- drivers/acpi/sleep/main.c | 10 ++++-- include/linux/pm.h | 10 ++++++ kernel/power/main.c | 78 ++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 91 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 92c6840f6c23..5011051e6a94 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -165,9 +165,13 @@ static int __init acpi_sleep_init(void) sleep_states[i] = 1; printk(" S%d", i); } - if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f) { - sleep_states[i] = 1; - printk(" S4bios"); + if (i == ACPI_STATE_S4) { + if (acpi_gbl_FACS->S4bios_f) { + sleep_states[i] = 1; + printk(" S4bios"); + acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE; + } else if (sleep_states[i]) + acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM; } } printk(")\n"); diff --git a/include/linux/pm.h b/include/linux/pm.h index 15b3f5efa692..70282a7943b1 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -201,7 +201,17 @@ enum { PM_SUSPEND_MAX, }; +enum { + PM_DISK_FIRMWARE = 1, + PM_DISK_PLATFORM, + PM_DISK_SHUTDOWN, + PM_DISK_REBOOT, + PM_DISK_MAX, +}; + + struct pm_ops { + u32 pm_disk_mode; int (*prepare)(u32 state); int (*enter)(u32 state); int (*finish)(u32 state); diff --git a/kernel/power/main.c b/kernel/power/main.c index 71648de7aa81..98c16b847144 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -20,6 +20,9 @@ static DECLARE_MUTEX(pm_sem); static struct pm_ops * pm_ops = NULL; +static u32 pm_disk_mode = PM_DISK_SHUTDOWN; + + /** * pm_set_ops - Set the global power method table. * @ops: Pointer to ops structure. @@ -29,6 +32,8 @@ void pm_set_ops(struct pm_ops * ops) { down(&pm_sem); pm_ops = ops; + if (ops->pm_disk_mode && ops->pm_disk_mode < PM_DISK_MAX) + pm_disk_mode = ops->pm_disk_mode; up(&pm_sem); } @@ -224,6 +229,74 @@ static struct subsys_attribute _name##_attr = { \ .store = _name##_store, \ } + +static char * pm_disk_modes[] = { + [PM_DISK_FIRMWARE] = "firmware", + [PM_DISK_PLATFORM] = "platform", + [PM_DISK_SHUTDOWN] = "shutdown", + [PM_DISK_REBOOT] = "reboot", +}; + +/** + * disk - Control suspend-to-disk mode + * + * Suspend-to-disk can be handled in several ways. The greatest + * distinction is who writes memory to disk - the firmware or the OS. + * If the firmware does it, we assume that it also handles suspending + * the system. + * If the OS does it, then we have three options for putting the system + * to sleep - using the platform driver (e.g. ACPI or other PM registers), + * powering off the system or rebooting the system (for testing). + * + * The system will support either 'firmware' or 'platform', and that is + * known a priori (and encoded in pm_ops). But, the user may choose + * 'shutdown' or 'reboot' as alternatives. + * + * show() will display what the mode is currently set to. + * store() will accept one of + * + * 'firmware' + * 'platform' + * 'shutdown' + * 'reboot' + * + * It will only change to 'firmware' or 'platform' if the system + * supports it (as determined from pm_ops->pm_disk_mode). + */ + +static ssize_t disk_show(struct subsystem * subsys, char * buf) +{ + return sprintf(buf,"%s\n",pm_disk_modes[pm_disk_mode]); +} + + +static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) +{ + int i; + u32 mode = 0; + + for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) { + if (!strcmp(buf,pm_disk_modes[i])) { + mode = i; + break; + } + } + if (mode) { + if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT) + pm_disk_mode = mode; + else { + if (pm_ops && (mode == pm_ops->pm_disk_mode)) + pm_disk_mode = mode; + else + return -EINVAL; + } + return n; + } + return -EINVAL; +} + +power_attr(disk); + /** * state - control system power state. * @@ -251,10 +324,6 @@ static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n u32 state; struct pm_state * s; int error; - char * end = strchr(buf,'\n'); - - if (end) - *end = '\0'; for (state = 0; state < PM_SUSPEND_MAX; state++) { s = &pm_states[state]; @@ -272,6 +341,7 @@ power_attr(state); static struct attribute * g[] = { &state_attr.attr, + &disk_attr.attr, NULL, }; -- cgit v1.2.3 From 708df1b01bf75a0556663c489b41f1bb58851091 Mon Sep 17 00:00:00 2001 From: "Suresh B. Siddha" Date: Tue, 19 Aug 2003 02:55:30 -0700 Subject: [PATCH] ia64: cleanup inline assembly --- arch/ia64/boot/Makefile | 2 +- arch/ia64/boot/bootloader.c | 20 +- arch/ia64/boot/fw-emu.S | 119 ++++++++ arch/ia64/hp/sim/Makefile | 2 +- arch/ia64/hp/sim/hpsim.S | 11 + arch/ia64/hp/sim/hpsim_setup.c | 13 - arch/ia64/ia32/ia32_signal.c | 140 ++++----- arch/ia64/ia32/ia32_support.c | 32 +-- arch/ia64/ia32/ia32_traps.c | 6 +- arch/ia64/ia32/ia32priv.h | 24 +- arch/ia64/ia32/sys_ia32.c | 5 +- arch/ia64/kernel/entry.S | 12 + arch/ia64/kernel/fw-emu.c | 118 +------- arch/ia64/kernel/init_task.c | 2 +- arch/ia64/kernel/iosapic.c | 8 +- arch/ia64/kernel/irq_ia64.c | 13 +- arch/ia64/kernel/mca.c | 24 +- arch/ia64/kernel/perfmon.c | 44 +-- arch/ia64/kernel/setup.c | 10 +- arch/ia64/kernel/signal.c | 5 +- arch/ia64/kernel/traps.c | 30 +- arch/ia64/kernel/unaligned.c | 49 ++-- arch/ia64/mm/tlb.c | 12 +- arch/ia64/sn/fakeprom/fw-emu.c | 12 +- arch/ia64/sn/kernel/irq.c | 12 +- arch/ia64/sn/kernel/setup.c | 2 +- arch/ia64/sn/kernel/sn2/io.c | 70 ++--- arch/ia64/vmlinux.lds.S | 5 +- include/asm-ia64/atomic.h | 8 +- include/asm-ia64/bitops.h | 8 +- include/asm-ia64/byteorder.h | 3 +- include/asm-ia64/current.h | 3 +- include/asm-ia64/delay.h | 30 +- include/asm-ia64/gcc_intrin.h | 633 +++++++++++++++++++++++++++++++++++++++++ include/asm-ia64/ia64regs.h | 98 +++++++ include/asm-ia64/intrinsics.h | 101 ++++--- include/asm-ia64/io.h | 3 +- include/asm-ia64/machvec.h | 2 +- include/asm-ia64/mmu_context.h | 2 - include/asm-ia64/page.h | 3 +- include/asm-ia64/pal.h | 4 +- include/asm-ia64/processor.h | 403 ++++++-------------------- include/asm-ia64/rwsem.h | 11 +- include/asm-ia64/sal.h | 4 + include/asm-ia64/smp.h | 2 +- include/asm-ia64/sn/sn2/io.h | 34 ++- include/asm-ia64/sn/sn_cpuid.h | 6 +- include/asm-ia64/spinlock.h | 8 +- include/asm-ia64/system.h | 40 +-- include/asm-ia64/timex.h | 3 +- include/asm-ia64/tlbflush.h | 3 +- include/asm-ia64/unistd.h | 63 +--- 52 files changed, 1377 insertions(+), 900 deletions(-) create mode 100644 arch/ia64/boot/fw-emu.S create mode 100644 arch/ia64/hp/sim/hpsim.S create mode 100644 include/asm-ia64/gcc_intrin.h create mode 100644 include/asm-ia64/ia64regs.h (limited to 'include') diff --git a/arch/ia64/boot/Makefile b/arch/ia64/boot/Makefile index b18c6323ee9b..65faf7474797 100644 --- a/arch/ia64/boot/Makefile +++ b/arch/ia64/boot/Makefile @@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE LDFLAGS_bootloader = -static -T -$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o \ +$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/fw-emu.o \ lib/lib.a arch/ia64/lib/lib.a FORCE $(call if_changed,ld) diff --git a/arch/ia64/boot/bootloader.c b/arch/ia64/boot/bootloader.c index 593667cbb74d..3be97b8a9afb 100644 --- a/arch/ia64/boot/bootloader.c +++ b/arch/ia64/boot/bootloader.c @@ -21,6 +21,7 @@ struct task_struct; /* forward declaration for elf.h */ #include #include #include +#include /* Simulator system calls: */ @@ -54,9 +55,9 @@ struct disk_stat { }; #include "../kernel/fw-emu.c" +extern void jmp_to_kernel(ulong sp, ulong bp, ulong e_entry); +extern void __bsw1(void); -/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */ -asm (".global printk; printk = 0"); /* * Set a break point on this function so that symbols are available to set breakpoints in @@ -98,9 +99,12 @@ _start (void) char *kpath, *args; long arglen = 0; - asm volatile ("movl gp=__gp;;" ::: "memory"); - asm volatile ("mov sp=%0" :: "r"(stack) : "memory"); - asm volatile ("bsw.1;;"); + extern __u64 __gp; + register unsigned long tmp = (unsigned long) &stack[0]; + + ia64_setreg(_IA64_REG_GP, __gp); + ia64_setreg(_IA64_REG_SP, tmp); + __bsw1(); ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); @@ -195,15 +199,15 @@ _start (void) cons_write("starting kernel...\n"); /* fake an I/O base address: */ - asm volatile ("mov ar.k0=%0" :: "r"(0xffffc000000UL)); + ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL); bp = sys_fw_init(args, arglen); ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); debug_break(); - asm volatile ("mov sp=%2; mov r28=%1; br.sptk.few %0" - :: "b"(e_entry), "r"(bp), "r"(__pa(&stack))); + tmp = __pa(&stack); + jmp_to_kernel(tmp, (unsigned long) bp, e_entry); cons_write("kernel returned!\n"); ssc(-1, 0, 0, 0, SSC_EXIT); diff --git a/arch/ia64/boot/fw-emu.S b/arch/ia64/boot/fw-emu.S new file mode 100644 index 000000000000..a24e72850d0e --- /dev/null +++ b/arch/ia64/boot/fw-emu.S @@ -0,0 +1,119 @@ +#include + +GLOBAL_ENTRY(ssc) + .regstk 5,0,0,0 + mov r15=in4 + break 0x80001 + br.ret.sptk.many b0 +END(ssc) + +GLOBAL_ENTRY(pal_emulator_static) + mov r8=-1 + mov r9=256 + ;; + cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */ +(p6) br.cond.sptk.few static + ;; + mov r9=512 + ;; + cmp.gtu p6,p7=r9,r28 +(p6) br.cond.sptk.few stacked + ;; +static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ +(p7) br.cond.sptk.few 1f + ;; + mov r8=0 /* status = 0 */ + movl r9=0x100000000 /* tc.base */ + movl r10=0x0000000200000003 /* count[0], count[1] */ + movl r11=0x1000000000002000 /* stride[0], stride[1] */ + br.cond.sptk.few rp +1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + movl r9 =0x100000064 /* proc_ratio (1/100) */ + movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ + movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ + ;; +1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + mov r9=96 /* num phys stacked */ + mov r10=0 /* hints */ + mov r11=0 + br.cond.sptk.few rp +1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ +(p7) br.cond.sptk.few 1f + mov r9=ar.lc + movl r8=524288 /* flush 512k million cache lines (16MB) */ + ;; + mov ar.lc=r8 + movl r8=0xe000000000000000 + ;; +.loop: fc r8 + add r8=32,r8 + br.cloop.sptk.few .loop + sync.i + ;; + srlz.i + ;; + mov ar.lc=r9 + mov r8=r0 + ;; +1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */ + mov r10=0 /* reserved */ + mov r11=0 /* reserved */ + mov r16=0xffff /* implemented PMC */ + mov r17=0xffff /* implemented PMD */ + add r18=8,r29 /* second index */ + ;; + st8 [r29]=r16,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r17,16 /* store implemented PMD */ + st8 [r18]=r0,16 /* clear remaining bits */ + mov r16=0xf0 /* cycles count capable PMC */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + mov r17=0x10 /* retired bundles capable PMC */ + ;; + st8 [r29]=r16,16 /* store cycles capable */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r17,16 /* store retired bundle capable */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; +1: br.cond.sptk.few rp +stacked: + br.ret.sptk.few rp +END(pal_emulator_static) + +GLOBAL_ENTRY(jmp_to_kernel) + .regstk 3,0,0,0 + mov sp=in0 + mov r28=in1 + mov b7=in0 + br.sptk.few b7 +END(jmp_to_kernel) + +GLOBAL_ENTRY(__bsw1) + bsw.1 + ;; + br.ret.sptk.many b0 +END(__bsw1) + +/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */ +.global printk; printk = 0 + diff --git a/arch/ia64/hp/sim/Makefile b/arch/ia64/hp/sim/Makefile index e8fba4e6f774..d10da47931d7 100644 --- a/arch/ia64/hp/sim/Makefile +++ b/arch/ia64/hp/sim/Makefile @@ -7,7 +7,7 @@ # Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) # -obj-y := hpsim_irq.o hpsim_setup.o +obj-y := hpsim_irq.o hpsim_setup.o hpsim.o obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o obj-$(CONFIG_HP_SIMETH) += simeth.o diff --git a/arch/ia64/hp/sim/hpsim.S b/arch/ia64/hp/sim/hpsim.S new file mode 100644 index 000000000000..9c223a3a8b14 --- /dev/null +++ b/arch/ia64/hp/sim/hpsim.S @@ -0,0 +1,11 @@ +#include + +/* + * Simulator system call. + */ +GLOBAL_ENTRY(ia64_ssc) + mov r15=r36 + break 0x80001 + br.ret.sptk.many rp +END(ia64_ssc) + diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c index 64ff3a9c6e3c..694fc86bfbd5 100644 --- a/arch/ia64/hp/sim/hpsim_setup.c +++ b/arch/ia64/hp/sim/hpsim_setup.c @@ -25,19 +25,6 @@ #include "hpsim_ssc.h" -/* - * Simulator system call. - */ -asm (".text\n" - ".align 32\n" - ".global ia64_ssc\n" - ".proc ia64_ssc\n" - "ia64_ssc:\n" - "mov r15=r36\n" - "break 0x80001\n" - "br.ret.sptk.many rp\n" - ".endp\n"); - void ia64_ssc_connect_irq (long intr, long irq) { diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 19de74e1edae..88e7ca03eabe 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c @@ -41,6 +41,8 @@ #define __IA32_NR_sigreturn 119 #define __IA32_NR_rt_sigreturn 173 +#include +#ifdef ASM_SUPPORTED register double f16 asm ("f16"); register double f17 asm ("f17"); register double f18 asm ("f18"); register double f19 asm ("f19"); register double f20 asm ("f20"); register double f21 asm ("f21"); @@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); register double f26 asm ("f26"); register double f27 asm ("f27"); register double f28 asm ("f28"); register double f29 asm ("f29"); register double f30 asm ("f30"); register double f31 asm ("f31"); +#endif struct sigframe_ia32 { @@ -198,30 +201,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) * All other fields unused... */ -#define __ldfe(regnum, x) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \ -}) - -#define __ldf8(regnum, x) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \ -}) - -#define __stfe(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ -}) - -#define __stf8(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ -}) - static int save_ia32_fpstate_live (struct _fpstate_ia32 *save) { @@ -239,17 +218,18 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) return -EFAULT; /* Readin fsr, fcr, fir, fdr and copy onto fpstate */ - asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); - asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); - asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); - asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); + fsr = ia64_getreg(_IA64_REG_AR_FSR); + fcr = ia64_getreg(_IA64_REG_AR_FCR); + fir = ia64_getreg(_IA64_REG_AR_FIR); + fdr = ia64_getreg(_IA64_REG_AR_FDR); + /* * We need to clear the exception state before calling the signal handler. Clear * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex * instruction. */ new_fsr = fsr & ~0x80ff; - asm volatile ( "mov ar.fsr=%0;" :: "r"(new_fsr)); + ia64_setreg(_IA64_REG_AR_FSR, new_fsr); __put_user(fcr & 0xffff, &save->cw); __put_user(fsr & 0xffff, &save->sw); @@ -286,45 +266,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) ia64f2ia32f(fpregp, &ptp->f11); copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - __stfe(fpregp, 12); + ia64_stfe(fpregp, 12); copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - __stfe(fpregp, 13); + ia64_stfe(fpregp, 13); copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - __stfe(fpregp, 14); + ia64_stfe(fpregp, 14); copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - __stfe(fpregp, 15); + ia64_stfe(fpregp, 15); copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - __stf8(&num128[0], 16); - __stf8(&num128[1], 17); + ia64_stf8(&num128[0], 16); + ia64_stf8(&num128[1], 17); copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 18); - __stf8(&num128[1], 19); + ia64_stf8(&num128[0], 18); + ia64_stf8(&num128[1], 19); copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 20); - __stf8(&num128[1], 21); + ia64_stf8(&num128[0], 20); + ia64_stf8(&num128[1], 21); copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 22); - __stf8(&num128[1], 23); + ia64_stf8(&num128[0], 22); + ia64_stf8(&num128[1], 23); copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 24); - __stf8(&num128[1], 25); + ia64_stf8(&num128[0], 24); + ia64_stf8(&num128[1], 25); copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 26); - __stf8(&num128[1], 27); + ia64_stf8(&num128[0], 26); + ia64_stf8(&num128[1], 27); copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 28); - __stf8(&num128[1], 29); + ia64_stf8(&num128[0], 28); + ia64_stf8(&num128[1], 29); copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); - __stf8(&num128[0], 30); - __stf8(&num128[1], 31); + ia64_stf8(&num128[0], 30); + ia64_stf8(&num128[1], 31); copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); return 0; } @@ -354,10 +334,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) * should remain same while writing. * So, we do a read, change specific fields and write. */ - asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr)); - asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr)); - asm volatile ( "mov %0=ar.fir;" : "=r"(fir)); - asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr)); + fsr = ia64_getreg(_IA64_REG_AR_FSR); + fcr = ia64_getreg(_IA64_REG_AR_FCR); + fir = ia64_getreg(_IA64_REG_AR_FIR); + fdr = ia64_getreg(_IA64_REG_AR_FDR); __get_user(mxcsr, (unsigned int *)&save->mxcsr); /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ @@ -391,10 +371,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) num64 = (num64 << 32) | lo; fdr = (fdr & (~0xffffffffffff)) | num64; - asm volatile ( "mov ar.fsr=%0;" :: "r"(fsr)); - asm volatile ( "mov ar.fcr=%0;" :: "r"(fcr)); - asm volatile ( "mov ar.fir=%0;" :: "r"(fir)); - asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr)); + ia64_setreg(_IA64_REG_AR_FSR, fsr); + ia64_setreg(_IA64_REG_AR_FCR, fcr); + ia64_setreg(_IA64_REG_AR_FIR, fir); + ia64_setreg(_IA64_REG_AR_FDR, fdr); /* * restore f8..f11 onto pt_regs @@ -420,45 +400,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ia32f2ia64f(&ptp->f11, fpregp); copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - __ldfe(12, fpregp); + ia64_ldfe(12, fpregp); copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - __ldfe(13, fpregp); + ia64_ldfe(13, fpregp); copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - __ldfe(14, fpregp); + ia64_ldfe(14, fpregp); copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - __ldfe(15, fpregp); + ia64_ldfe(15, fpregp); copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); - __ldf8(16, &num128[0]); - __ldf8(17, &num128[1]); + ia64_ldf8(16, &num128[0]); + ia64_ldf8(17, &num128[1]); copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); - __ldf8(18, &num128[0]); - __ldf8(19, &num128[1]); + ia64_ldf8(18, &num128[0]); + ia64_ldf8(19, &num128[1]); copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); - __ldf8(20, &num128[0]); - __ldf8(21, &num128[1]); + ia64_ldf8(20, &num128[0]); + ia64_ldf8(21, &num128[1]); copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); - __ldf8(22, &num128[0]); - __ldf8(23, &num128[1]); + ia64_ldf8(22, &num128[0]); + ia64_ldf8(23, &num128[1]); copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); - __ldf8(24, &num128[0]); - __ldf8(25, &num128[1]); + ia64_ldf8(24, &num128[0]); + ia64_ldf8(25, &num128[1]); copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); - __ldf8(26, &num128[0]); - __ldf8(27, &num128[1]); + ia64_ldf8(26, &num128[0]); + ia64_ldf8(27, &num128[1]); copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); - __ldf8(28, &num128[0]); - __ldf8(29, &num128[1]); + ia64_ldf8(28, &num128[0]); + ia64_ldf8(29, &num128[1]); copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); - __ldf8(30, &num128[0]); - __ldf8(31, &num128[1]); + ia64_ldf8(30, &num128[0]); + ia64_ldf8(31, &num128[1]); return 0; } @@ -705,7 +685,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate /* * `eflags' is in an ar register for this context */ - asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); + flag = ia64_getreg(_IA64_REG_AR_EFLAG); err |= __put_user((unsigned int)flag, &sc->eflags); err |= __put_user(regs->r12, &sc->esp_at_signal); err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss); @@ -790,10 +770,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int * * IA32 process's context. */ err |= __get_user(tmpflags, &sc->eflags); - asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag)); + flag = ia64_getreg(_IA64_REG_AR_EFLAG); flag &= ~0x40DD5; flag |= (tmpflags & 0x40DD5); - asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag)); + ia64_setreg(_IA64_REG_AR_EFLAG, flag); regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ } diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index df53dcdb72bb..651e3df7a2c4 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "ia32priv.h" @@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task) void ia32_save_state (struct task_struct *t) { - unsigned long eflag, fsr, fcr, fir, fdr; - - asm ("mov %0=ar.eflag;" - "mov %1=ar.fsr;" - "mov %2=ar.fcr;" - "mov %3=ar.fir;" - "mov %4=ar.fdr;" - : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr)); - t->thread.eflag = eflag; - t->thread.fsr = fsr; - t->thread.fcr = fcr; - t->thread.fir = fir; - t->thread.fdr = fdr; + t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG); + t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR); + t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR); + t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR); + t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR); ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); } @@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t) fdr = t->thread.fdr; tssd = load_desc(_TSS(nr)); /* TSSD */ - asm volatile ("mov ar.eflag=%0;" - "mov ar.fsr=%1;" - "mov ar.fcr=%2;" - "mov ar.fir=%3;" - "mov ar.fdr=%4;" - :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr)); + ia64_setreg(_IA64_REG_AR_EFLAG, eflag); + ia64_setreg(_IA64_REG_AR_FSR, fsr); + ia64_setreg(_IA64_REG_AR_FCR, fcr); + ia64_setreg(_IA64_REG_AR_FIR, fir); + ia64_setreg(_IA64_REG_AR_FDR, fdr); current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); @@ -178,7 +170,7 @@ void ia32_cpu_init (void) { /* initialize global ia32 state - CR0 and CR4 */ - asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0)); + ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0)); } static int __init diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c index fbdb10a580dc..0de400a6b177 100644 --- a/arch/ia64/ia32/ia32_traps.c +++ b/arch/ia64/ia32/ia32_traps.c @@ -15,6 +15,7 @@ #include "ia32priv.h" #include +#include int ia32_intercept (struct pt_regs *regs, unsigned long isr) @@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) { unsigned long fsr, fcr; - asm ("mov %0=ar.fsr;" - "mov %1=ar.fcr;" - : "=r"(fsr), "=r"(fcr)); + fsr = ia64_getreg(_IA64_REG_AR_FSR); + fcr = ia64_getreg(_IA64_REG_AR_FCR); siginfo.si_signo = SIGFPE; /* diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index e830969c84ea..5f24a94696aa 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h @@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm); extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); extern void ia32_load_segment_descriptors (struct task_struct *task); -#define ia32f2ia64f(dst,src) \ - do { \ - register double f6 asm ("f6"); \ - asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ - } while(0) - -#define ia64f2ia32f(dst,src) \ - do { \ - register double f6 asm ("f6"); \ - asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \ - } while(0) +#define ia32f2ia64f(dst,src) \ +do { \ + ia64_ldfe(6,src); \ + ia64_stop(); \ + ia64_stf_spill(dst, 6); \ +} while(0) + +#define ia64f2ia32f(dst,src) \ +do { \ + ia64_ldf_fill(6, src); \ + ia64_stop(); \ + ia64_stfe(dst, 6); \ +} while(0) struct user_regs_struct32 { __u32 ebx, ecx, edx, esi, edi, ebp, eax; diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 5cd378191619..d21c4b6e3d16 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "ia32priv.h" @@ -2192,7 +2193,7 @@ sys32_iopl (int level) if (level != 3) return(-EINVAL); /* Trying to gain more privileges? */ - asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); + old = ia64_getreg(_IA64_REG_AR_EFLAG); if ((unsigned int) level > ((old >> 12) & 3)) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; @@ -2216,7 +2217,7 @@ sys32_iopl (int level) if (addr >= 0) { old = (old & ~0x3000) | (level << 12); - asm volatile ("mov ar.eflag=%0;;" :: "r"(old)); + ia64_setreg(_IA64_REG_AR_EFLAG, old); } fput(file); diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 44461263aadd..fe9556272d98 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall) br.ret.sptk.many rp END(__ia64_syscall) +GLOBAL_ENTRY(execve) + mov r15=__NR_execve // put syscall number in place + break __BREAK_SYSCALL + br.ret.sptk.many rp +END(execve) + +GLOBAL_ENTRY(clone) + mov r15=__NR_clone // put syscall number in place + break __BREAK_SYSCALL + br.ret.sptk.many rp +END(clone) + /* * We invoke syscall_trace through this intermediate function to * ensure that the syscall input arguments are not clobbered. We diff --git a/arch/ia64/kernel/fw-emu.c b/arch/ia64/kernel/fw-emu.c index 138203f0c511..cf5702071f28 100644 --- a/arch/ia64/kernel/fw-emu.c +++ b/arch/ia64/kernel/fw-emu.c @@ -46,17 +46,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param) /* * Simulator system call. */ -static long -ssc (long arg0, long arg1, long arg2, long arg3, int nr) -{ - register long r8 asm ("r8"); - - asm volatile ("mov r15=%1\n\t" - "break 0x80001" - : "=r"(r8) - : "r"(nr), "r"(arg0), "r"(arg1), "r"(arg2), "r"(arg3)); - return r8; -} +extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr); #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) @@ -127,101 +117,6 @@ offtime (unsigned long t, efi_time_t *tp) */ extern void pal_emulator_static (void); -asm ( -" .proc pal_emulator_static\n" -"pal_emulator_static:" -" mov r8=-1\n" -" mov r9=256\n" -" ;;\n" -" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n" -"(p6) br.cond.sptk.few static\n" -" ;;\n" -" mov r9=512\n" -" ;;\n" -" cmp.gtu p6,p7=r9,r28\n" -"(p6) br.cond.sptk.few stacked\n" -" ;;\n" -"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n" -"(p7) br.cond.sptk.few 1f\n" -" ;;\n" -" mov r8=0 /* status = 0 */\n" -" movl r9=0x100000000 /* tc.base */\n" -" movl r10=0x0000000200000003 /* count[0], count[1] */\n" -" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n" -" br.cond.sptk.few rp\n" -"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n" -"(p7) br.cond.sptk.few 1f\n" -" mov r8=0 /* status = 0 */\n" -" movl r9 =0x100000064 /* proc_ratio (1/100) */\n" -" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n" -" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n" -" ;;\n" -"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n" -"(p7) br.cond.sptk.few 1f\n" -" mov r8=0 /* status = 0 */\n" -" mov r9=96 /* num phys stacked */\n" -" mov r10=0 /* hints */\n" -" mov r11=0\n" -" br.cond.sptk.few rp\n" -"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n" -"(p7) br.cond.sptk.few 1f\n" -" mov r9=ar.lc\n" -" movl r8=524288 /* flush 512k million cache lines (16MB) */\n" -" ;;\n" -" mov ar.lc=r8\n" -" movl r8=0xe000000000000000\n" -" ;;\n" -".loop: fc r8\n" -" add r8=32,r8\n" -" br.cloop.sptk.few .loop\n" -" sync.i\n" -" ;;\n" -" srlz.i\n" -" ;;\n" -" mov ar.lc=r9\n" -" mov r8=r0\n" -" ;;\n" -"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */\n" -"(p7) br.cond.sptk.few 1f\n" -" mov r8=0 /* status = 0 */\n" -" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */\n" -" mov r10=0 /* reserved */\n" -" mov r11=0 /* reserved */\n" -" mov r16=0xffff /* implemented PMC */\n" -" mov r17=0xffff /* implemented PMD */\n" -" add r18=8,r29 /* second index */\n" -" ;;\n" -" st8 [r29]=r16,16 /* store implemented PMC */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -" st8 [r29]=r0,16 /* store implemented PMC */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -" st8 [r29]=r17,16 /* store implemented PMD */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" mov r16=0xf0 /* cycles count capable PMC */\n" -" ;;\n" -" st8 [r29]=r0,16 /* store implemented PMC */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" mov r17=0x10 /* retired bundles capable PMC */\n" -" ;;\n" -" st8 [r29]=r16,16 /* store cycles capable */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -" st8 [r29]=r0,16 /* store implemented PMC */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -" st8 [r29]=r17,16 /* store retired bundle capable */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -" st8 [r29]=r0,16 /* store implemented PMC */\n" -" st8 [r18]=r0,16 /* clear remaining bits */\n" -" ;;\n" -"1: br.cond.sptk.few rp\n" -"stacked:\n" -" br.ret.sptk.few rp\n" -" .endp pal_emulator_static\n"); - /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ #define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) @@ -268,14 +163,14 @@ efi_unimplemented (void) return EFI_UNSUPPORTED; } -static long +static struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7) { - register long r9 asm ("r9") = 0; - register long r10 asm ("r10") = 0; - register long r11 asm ("r11") = 0; + long r9 = 0; + long r10 = 0; + long r11 = 0; long status; /* @@ -357,8 +252,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, } else { status = -1; } - asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); - return status; + return ((struct sal_ret_values) {status, r9, r10, r11}); } diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 05b2c6b580e2..ab79b199aadf 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -39,4 +39,4 @@ static union { .thread_info = INIT_THREAD_INFO(init_task_mem.s.task) }}; -asm (".global init_task; init_task = init_task_mem"); +extern struct task_struct init_task __attribute__ ((alias("init_task_mem"))); diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 0da6f1683ccb..5f2fdf7369c9 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -497,7 +497,7 @@ iosapic_register_intr (unsigned int gsi, unsigned long polarity, unsigned long trigger) { int vector; - unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; + unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; vector = gsi_to_vector(gsi); if (vector < 0) @@ -574,7 +574,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, unsigned long trigger) { int vector; - unsigned int dest = (ia64_get_lid() >> 16) & 0xffff; + unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; vector = isa_irq_to_vector(isa_irq); @@ -668,11 +668,11 @@ iosapic_enable_intr (unsigned int vector) * Direct the interrupt vector to the current cpu, platform redirection * will distribute them. */ - dest = (ia64_get_lid() >> 16) & 0xffff; + dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; } #else /* direct the interrupt vector to the running cpu id */ - dest = (ia64_get_lid() >> 16) & 0xffff; + dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; #endif set_rte(vector, dest); diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index fd56c30ed308..fbf529b2147f 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -35,6 +35,7 @@ #include #include #include +#include #ifdef CONFIG_PERFMON # include @@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) * because the register and the memory stack are not * switched atomically. */ - asm ("mov %0=ar.bsp" : "=r"(bsp)); - asm ("mov %0=sp" : "=r"(sp)); + bsp = ia64_getreg(_IA64_REG_AR_BSP); + sp = ia64_getreg(_IA64_REG_AR_SP); if ((sp - bsp) < 1024) { static unsigned char count; @@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ - saved_tpr = ia64_get_tpr(); + saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { if (!IS_RESCHEDULE(vector)) { - ia64_set_tpr(vector); + ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); do_IRQ(local_vector_to_irq(vector), regs); @@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) * Disable interrupts and send EOI: */ local_irq_disable(); - ia64_set_tpr(saved_tpr); + ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); @@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) #ifdef CONFIG_SMP phys_cpu_id = cpu_physical_id(cpu); #else - phys_cpu_id = (ia64_get_lid() >> 16) & 0xffff; + phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; #endif /* diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 49bf7bfb8a85..1ad4d10ffe89 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void) cmcv.cmcv_regval = 0; cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_vector = IA64_CMC_VECTOR; - ia64_set_cmcv(cmcv.cmcv_regval); + ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected " "machine check vector %#x setup and enabled.\n", smp_processor_id(), IA64_CMC_VECTOR); IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n", - smp_processor_id(), ia64_get_cmcv()); + smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); } /* @@ -532,10 +532,10 @@ ia64_mca_cmc_vector_disable (void *dummy) { cmcv_reg_t cmcv; - cmcv = (cmcv_reg_t)ia64_get_cmcv(); + cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ - ia64_set_cmcv(cmcv.cmcv_regval); + ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval) IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected " "machine check vector %#x disabled.\n", @@ -559,10 +559,10 @@ ia64_mca_cmc_vector_enable (void *dummy) { cmcv_reg_t cmcv; - cmcv = (cmcv_reg_t)ia64_get_cmcv(); + cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ - ia64_set_cmcv(cmcv.cmcv_regval); + ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval) IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected " "machine check vector %#x enabled.\n", @@ -727,10 +727,10 @@ ia64_mca_init(void) /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, - ia64_tpa(ia64_get_gp()), + ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, - ia64_tpa(ia64_get_gp()), + ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. " @@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void) do { switch(irr_num) { case 0: - irr = ia64_get_irr0(); + irr = ia64_getreg(_IA64_REG_CR_IRR0); break; case 1: - irr = ia64_get_irr1(); + irr = ia64_getreg(_IA64_REG_CR_IRR1); break; case 2: - irr = ia64_get_irr2(); + irr = ia64_getreg(_IA64_REG_CR_IRR2); break; case 3: - irr = ia64_get_irr3(); + irr = ia64_getreg(_IA64_REG_CR_IRR3); break; } } while (!(irr & (1 << irr_bit))) ; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 88b591be8faa..a3a2a50916c3 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -46,6 +46,7 @@ #include #include #include +#include #ifdef CONFIG_PERFMON /* @@ -679,39 +680,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx); static inline void pfm_clear_psr_pp(void) { - __asm__ __volatile__ ("rsm psr.pp;; srlz.i;;"::: "memory"); + ia64_rsm(IA64_PSR_PP) + ia64_srlz_i(); } static inline void pfm_set_psr_pp(void) { - __asm__ __volatile__ ("ssm psr.pp;; srlz.i;;"::: "memory"); + ia64_ssm(IA64_PSR_PP) + ia64_srlz_i(); } static inline void pfm_clear_psr_up(void) { - __asm__ __volatile__ ("rsm psr.up;; srlz.i;;"::: "memory"); + ia64_rsm(IA64_PSR_UP) + ia64_srlz_i(); } static inline void pfm_set_psr_up(void) { - __asm__ __volatile__ ("ssm psr.up;; srlz.i;;"::: "memory"); + ia64_ssm(IA64_PSR_UP) + ia64_srlz_i(); } static inline unsigned long pfm_get_psr(void) { unsigned long tmp; - __asm__ __volatile__ ("mov %0=psr;;": "=r"(tmp) :: "memory"); + tmp = ia64_getreg(_IA64_REG_PSR); + ia64_srlz_i(); return tmp; } static inline void pfm_set_psr_l(unsigned long val) { - __asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(val): "memory"); + ia64_setreg(_IA64_REG_PSR_L, val); + ia64_srlz_i(); } static inline void @@ -978,7 +985,8 @@ pfm_restore_monitoring(struct task_struct *task) */ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* disable dcr pp */ - ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, + ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); pfm_clear_psr_pp(); } else { pfm_clear_psr_up(); @@ -1025,7 +1033,8 @@ pfm_restore_monitoring(struct task_struct *task) */ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* enable dcr pp */ - ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, + ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_srlz_i(); } pfm_set_psr_l(psr); @@ -1781,7 +1790,8 @@ pfm_syswide_force_stop(void *info) /* * Update local PMU */ - ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, + ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); ia64_srlz_i(); /* * update local cpuinfo @@ -3952,7 +3962,8 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * * disable dcr pp */ - ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, + ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); ia64_srlz_i(); /* @@ -4042,7 +4053,8 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_set_psr_pp(); /* enable dcr pp */ - ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, + ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_srlz_i(); return 0; @@ -4207,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) current->pid, thread->pfm_context, ctx)); - old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); + old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); if (old != NULL) { DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); goto error_unres; @@ -5467,13 +5479,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i * if monitoring has started */ if (dcr_pp) { - dcr = ia64_get_dcr(); + dcr = ia64_getreg(_IA64_REG_CR_DCR); /* * context switching in? */ if (is_ctxswin) { /* mask monitoring for the idle task */ - ia64_set_dcr(dcr & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP); pfm_clear_psr_pp(); ia64_srlz_i(); return; @@ -5485,7 +5497,7 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i * Due to inlining this odd if-then-else construction generates * better code. */ - ia64_set_dcr(dcr |IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP); pfm_set_psr_pp(); ia64_srlz_i(); } @@ -6265,7 +6277,7 @@ pfm_init_percpu (void) if (smp_processor_id() == 0) register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); - ia64_set_pmv(IA64_PERFMON_VECTOR); + ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); ia64_srlz_d(); /* diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 86b2ce3594ab..8edc299421d5 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -741,8 +741,8 @@ cpu_init (void) * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ - ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR - | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); + ia64_setreg(_IA64_REG_CR_DCR, IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX + | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) @@ -758,11 +758,11 @@ cpu_init (void) ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); - ia64_set_pmv(1 << 16); - ia64_set_cmcv(1 << 16); + ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); + ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ - ia64_set_tpr(0); + ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 21f97a864a39..3ecebadd468d 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -41,6 +41,8 @@ # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif +#include +#ifdef ASM_SUPPORTED register double f16 asm ("f16"); register double f17 asm ("f17"); register double f18 asm ("f18"); register double f19 asm ("f19"); register double f20 asm ("f20"); register double f21 asm ("f21"); @@ -50,6 +52,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); register double f26 asm ("f26"); register double f27 asm ("f27"); register double f28 asm ("f28"); register double f29 asm ("f29"); register double f30 asm ("f30"); register double f31 asm ("f31"); +#endif long ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr) @@ -192,7 +195,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user(from->si_value, &to->si_value); + err |= __put_user(from->si_value.sival_ptr, &to->si_value.sival_ptr); break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 70df2828e837..fdb43f8186a8 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -7,6 +7,19 @@ * 05/12/00 grao : added isr in siginfo for SIGFPE */ +#include +#include +#include +#include +#include +#include /* For unblank_screen() */ + +#include +#include +#include +#include + +#include /* * fp_emulate() needs to be able to access and update all floating point registers. Those * saved in pt_regs can be accessed through that structure, but those not saved, will be @@ -15,6 +28,8 @@ * by declaring preserved registers that are not marked as "fixed" as global register * variables. */ +#include +#ifdef ASM_SUPPORTED register double f2 asm ("f2"); register double f3 asm ("f3"); register double f4 asm ("f4"); register double f5 asm ("f5"); @@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25"); register double f26 asm ("f26"); register double f27 asm ("f27"); register double f28 asm ("f28"); register double f29 asm ("f29"); register double f30 asm ("f30"); register double f31 asm ("f31"); - -#include -#include -#include -#include -#include -#include /* For unblank_screen() */ - -#include -#include -#include -#include - -#include +#endif extern spinlock_t timerlist_lock; diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index c7f8012eb757..06595d8d37db 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -22,6 +22,7 @@ #include #include #include +#include extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); @@ -231,7 +232,7 @@ static u16 fr_info[32]={ static void invala_gr (int regno) { -# define F(reg) case reg: __asm__ __volatile__ ("invala.e r%0" :: "i"(reg)); break +# define F(reg) case reg: ia64_invala_gr(reg); break switch (regno) { F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); @@ -258,7 +259,7 @@ invala_gr (int regno) static void invala_fr (int regno) { -# define F(reg) case reg: __asm__ __volatile__ ("invala.e f%0" :: "i"(reg)); break +# define F(reg) case reg: ia64_invala_fr(reg); break switch (regno) { F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); @@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs) static inline void float_spill_f0 (struct ia64_fpreg *final) { - __asm__ __volatile__ ("stf.spill [%0]=f0" :: "r"(final) : "memory"); + ia64_stf_spill(final, 0); } static inline void float_spill_f1 (struct ia64_fpreg *final) { - __asm__ __volatile__ ("stf.spill [%0]=f1" :: "r"(final) : "memory"); + ia64_stf_spill(final, 1); } static void @@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={ static inline void mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldfe f6=[%0];; stf.spill [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldfe(6, init); + ia64_stop(); + ia64_stf_spill(final, 6); } static inline void mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldf8 f6=[%0];; stf.spill [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldf8(6, init); + ia64_stop(); + ia64_stf_spill(final, 6); } static inline void mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldfs f6=[%0];; stf.spill [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldfs(6, init); + ia64_stop(); + ia64_stf_spill(final, 6); } static inline void mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldfd f6=[%0];; stf.spill [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldfd(6, init); + ia64_stop(); + ia64_stf_spill(final, 6); } static inline void float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldf.fill f6=[%0];; stfe [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldf_fill(6, init); + ia64_stop(); + ia64_stfe(final, 6); } static inline void float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldf.fill f6=[%0];; stf8 [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldf_fill(6, init); + ia64_stop(); + ia64_stf8(final, 6); } static inline void float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldf.fill f6=[%0];; stfs [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldf_fill(6, init); + ia64_stop(); + ia64_stfs(final, 6); } static inline void float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) { - __asm__ __volatile__ ("ldf.fill f6=[%0];; stfd [%1]=f6" - :: "r"(init), "r"(final) : "f6","memory"); + ia64_ldf_fill(6, init); + ia64_stop(); + ia64_stfd(final, 6); } static int diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index a9235e8567de..ec89adf23c6a 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi /* * Flush ALAT entries also. */ - asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) - : "memory"); + ia64_ptcga(start, (nbits<<2)); + ia64_srlz_i(); start += (1UL << nbits); } while (start < end); } @@ -118,15 +118,13 @@ local_flush_tlb_all (void) local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { - asm volatile ("ptc.e %0" :: "r"(addr)); + ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); - ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ - ia64_insn_group_barrier(); } void @@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long platform_global_tlb_purge(start, end, nbits); # else do { - asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); + ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); # endif - ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ - ia64_insn_group_barrier(); } void __init diff --git a/arch/ia64/sn/fakeprom/fw-emu.c b/arch/ia64/sn/fakeprom/fw-emu.c index ce5c919b2f0a..c332d63d1167 100644 --- a/arch/ia64/sn/fakeprom/fw-emu.c +++ b/arch/ia64/sn/fakeprom/fw-emu.c @@ -200,7 +200,7 @@ efi_unimplemented (void) #ifdef SGI_SN2 #undef cpu_physical_id -#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) +#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) void fprom_send_cpei(void) { @@ -224,14 +224,14 @@ fprom_send_cpei(void) { #endif -static long +static struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7) { - register long r9 asm ("r9") = 0; - register long r10 asm ("r10") = 0; - register long r11 asm ("r11") = 0; + long r9 = 0; + long r10 = 0; + long r11 = 0; long status; /* @@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, } asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11)); - return status; + return ((struct sal_ret_values) {status, r9, r10, r11}); } diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 09cbbb65ab13..3f5553ba36f8 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) { irr_bit = irq_to_vector(irq) % 64; switch (irr_reg_num) { case 0: - irr_reg = ia64_get_irr0(); + irr_reg = ia64_getreg(_IA64_REG_CR_IRR0); break; case 1: - irr_reg = ia64_get_irr1(); + irr_reg = ia64_getreg(_IA64_REG_CR_IRR1); break; case 2: - irr_reg = ia64_get_irr2(); + irr_reg = ia64_getreg(_IA64_REG_CR_IRR2); break; case 3: - irr_reg = ia64_get_irr3(); + irr_reg = ia64_getreg(_IA64_REG_CR_IRR3); break; } if (!test_bit(irr_bit, &irr_reg) ) { @@ -354,9 +354,9 @@ sn_get_next_bit(void) { void sn_set_tpr(int vector) { if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) { - ia64_set_tpr(vector); + ia64_setreg(_IA64_REG_CR_TPR, vector); } else { - ia64_set_tpr(IA64_LAST_DEVICE_VECTOR); + ia64_setreg(_IA64_REG_CR_TPR, IA64_LAST_DEVICE_VECTOR); } } diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index db8925491663..f687b6d14c0f 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -395,7 +395,7 @@ sn_cpu_init(void) return; cpuid = smp_processor_id(); - cpuphyid = ((ia64_get_lid() >> 16) & 0xffff); + cpuphyid = ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff); nasid = cpu_physical_id_to_nasid(cpuphyid); cnode = nasid_to_cnodeid(nasid); slice = cpu_physical_id_to_slice(cpuphyid); diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c index 59423708d30c..92764186fd06 100644 --- a/arch/ia64/sn/kernel/sn2/io.c +++ b/arch/ia64/sn/kernel/sn2/io.c @@ -11,81 +11,73 @@ #include +#undef __sn_inb +#undef __sn_inw +#undef __sn_inl +#undef __sn_outb +#undef __sn_outw +#undef __sn_outl +#undef __sn_readb +#undef __sn_readw +#undef __sn_readl +#undef __sn_readq + unsigned int -sn_inb (unsigned long port) +__sn_inb (unsigned long port) { - return __sn_inb(port); + return ___sn_inb(port); } unsigned int -sn_inw (unsigned long port) +__sn_inw (unsigned long port) { - return __sn_inw(port); + return ___sn_inw(port); } unsigned int -sn_inl (unsigned long port) +__sn_inl (unsigned long port) { - return __sn_inl(port); + return ___sn_inl(port); } void -sn_outb (unsigned char val, unsigned long port) +__sn_outb (unsigned char val, unsigned long port) { - __sn_outb(val, port); + ___sn_outb(val, port); } void -sn_outw (unsigned short val, unsigned long port) +__sn_outw (unsigned short val, unsigned long port) { - __sn_outw(val, port); + ___sn_outw(val, port); } void -sn_outl (unsigned int val, unsigned long port) +__sn_outl (unsigned int val, unsigned long port) { - __sn_outl(val, port); + ___sn_outl(val, port); } unsigned char -sn_readb (void *addr) +__sn_readb (void *addr) { - return __sn_readb (addr); + return ___sn_readb (addr); } unsigned short -sn_readw (void *addr) +__sn_readw (void *addr) { - return __sn_readw (addr); + return ___sn_readw (addr); } unsigned int -sn_readl (void *addr) +__sn_readl (void *addr) { - return __sn_readl (addr); + return ___sn_readl (addr); } unsigned long -sn_readq (void *addr) +__sn_readq (void *addr) { - return __sn_readq (addr); + return ___sn_readq (addr); } - - -/* define aliases: */ - -asm (".global __sn_inb, __sn_inw, __sn_inl"); -asm ("__sn_inb = sn_inb"); -asm ("__sn_inw = sn_inw"); -asm ("__sn_inl = sn_inl"); - -asm (".global __sn_outb, __sn_outw, __sn_outl"); -asm ("__sn_outb = sn_outb"); -asm ("__sn_outw = sn_outw"); -asm ("__sn_outl = sn_outl"); - -asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq"); -asm ("__sn_readb = sn_readb"); -asm ("__sn_readw = sn_readw"); -asm ("__sn_readl = sn_readl"); -asm ("__sn_readq = sn_readq"); diff --git a/arch/ia64/vmlinux.lds.S b/arch/ia64/vmlinux.lds.S index a2f212131083..0ac35a6b78e4 100644 --- a/arch/ia64/vmlinux.lds.S +++ b/arch/ia64/vmlinux.lds.S @@ -35,6 +35,7 @@ SECTIONS { *(.text.ivt) *(.text) + *(.gnu.linkonce.t*) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } @@ -183,7 +184,7 @@ SECTIONS . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ .data : AT(ADDR(.data) - LOAD_OFFSET) - { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } + { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS } . = ALIGN(16); __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ @@ -194,7 +195,7 @@ SECTIONS can access them all, and initialized data all before uninitialized, so we can shorten the on-disk segment size. */ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) - { *(.sdata) } + { *(.sdata) *(.sdata1) *(.srdata) } _edata = .; _bss = .; .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 5b88749e54b2..f2e179d4bb76 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } @@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; - } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old); + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index b133b67609eb..2a4b667058d9 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -292,7 +292,7 @@ ffz (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1))); + result = ia64_popcnt((x & (~x - 1))); return result; } @@ -307,7 +307,7 @@ __ffs (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x)); + result = ia64_popcnt((x-1) & ~x); return result; } @@ -323,7 +323,7 @@ ia64_fls (unsigned long x) long double d = x; long exp; - __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d)); + exp = ia64_getf_exp(d); return exp - 0xffff; } @@ -349,7 +349,7 @@ static __inline__ unsigned long hweight64 (unsigned long x) { unsigned long result; - __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x)); + result = ia64_popcnt(x); return result; } diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h index a4e3abfc3477..434686fccb95 100644 --- a/include/asm-ia64/byteorder.h +++ b/include/asm-ia64/byteorder.h @@ -7,13 +7,14 @@ */ #include +#include static __inline__ __const__ __u64 __ia64_swab64 (__u64 x) { __u64 result; - __asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x)); + result = ia64_mux1(x, ia64_mux1_rev); return result; } diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h index 73a5edf825b8..f8b27b5b8fa8 100644 --- a/include/asm-ia64/current.h +++ b/include/asm-ia64/current.h @@ -6,8 +6,9 @@ * David Mosberger-Tang */ +#include /* In kernel mode, thread pointer (r13) is used to point to the current task structure. */ -register struct task_struct *current asm ("r13"); +#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) #endif /* _ASM_IA64_CURRENT_H */ diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h index da812415f634..1725d43e74ba 100644 --- a/include/asm-ia64/delay.h +++ b/include/asm-ia64/delay.h @@ -18,11 +18,13 @@ #include #include +#include static __inline__ void ia64_set_itm (unsigned long val) { - __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_CR_ITM, val); + ia64_srlz_d(); } static __inline__ unsigned long @@ -30,20 +32,23 @@ ia64_get_itm (void) { unsigned long result; - __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory"); + result = ia64_getreg(_IA64_REG_CR_ITM); + ia64_srlz_d(); return result; } static __inline__ void ia64_set_itv (unsigned long val) { - __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_CR_ITV, val); + ia64_srlz_d(); } static __inline__ void ia64_set_itc (unsigned long val) { - __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_AR_ITC, val); + ia64_srlz_d(); } static __inline__ unsigned long @@ -51,10 +56,13 @@ ia64_get_itc (void) { unsigned long result; - __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); #ifdef CONFIG_ITANIUM - while (unlikely((__s32) result == -1)) - __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); + while (unlikely((__s32) result == -1)) { + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); + } #endif return result; } @@ -62,15 +70,11 @@ ia64_get_itc (void) static __inline__ void __delay (unsigned long loops) { - unsigned long saved_ar_lc; - if (loops < 1) return; - __asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc)); - __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1)); - __asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;"); - __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc)); + for (;loops--;) + ia64_nop(0); } static __inline__ void diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h new file mode 100644 index 000000000000..41a24523d953 --- /dev/null +++ b/include/asm-ia64/gcc_intrin.h @@ -0,0 +1,633 @@ +#ifndef _ASM_IA64_GCC_INTRIN_H +#define _ASM_IA64_GCC_INTRIN_H +/* + * + * Copyright (C) 2002,2003 Jun Nakajima + * Copyright (C) 2002,2003 Suresh Siddha + * + */ + +/* define this macro to get some asm stmts included in 'c' files */ +#define ASM_SUPPORTED + +/* Optimization barrier */ +/* The "volatile" is due to gcc bugs */ +#define ia64_barrier() __asm__ __volatile__ ("":::"memory") + + +#define ia64_stop() __asm__ __volatile__ (";;"::) + + +#define ia64_invala_gr(regnum) \ + __asm__ __volatile__ ("invala.e r%0" :: "i"(regnum)) + +#define ia64_invala_fr(regnum) \ + __asm__ __volatile__ ("invala.e f%0" :: "i"(regnum)) + +extern void ia64_bad_param_for_setreg(void); +extern void ia64_bad_param_for_getreg(void); + +#define ia64_setreg(regnum, val) \ +({ \ + switch (regnum) { \ + case _IA64_REG_PSR_L: \ + __asm__ __volatile__ ("mov psr.l=%0" :: "r"(val) : "memory"); \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + __asm__ __volatile__ ("mov ar%0=%1" :: \ + "i" (regnum - _IA64_REG_AR_KR0), \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + __asm__ __volatile__ ("mov cr%0=%1" :: \ + "i" (regnum - _IA64_REG_CR_DCR), \ + "r"(val): "memory" ); \ + break; \ + case _IA64_REG_SP: \ + __asm__ __volatile__ ("mov r12=%0" :: \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_GP: \ + __asm__ __volatile__ ("mov gp=%0" :: "r"(val) : "memory"); \ + break; \ + default: \ + ia64_bad_param_for_setreg(); \ + break; \ + } \ +}) + +#define ia64_getreg(regnum) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + __asm__ __volatile__ ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + __asm__ __volatile__ ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + __asm__ __volatile__ ("mov %0=psr" : "=r"(ia64_intri_res));\ + break; \ + case _IA64_REG_TP: /* for current() */ \ + { \ + register __u64 ia64_r13 asm ("r13"); \ + ia64_intri_res = ia64_r13; \ + } \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + __asm__ __volatile__ ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + __asm__ __volatile__ ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + __asm__ __volatile__ ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define ia64_hint_pause 0 +#define ia64_hint(mode) \ +({ \ + switch (mode) { \ + case ia64_hint_pause: \ + asm volatile ("hint @pause" ::: "memory"); \ + break; \ + } \ +}) + + +/* Integer values for mux1 instruction */ +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1(x, mode) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (mode) { \ + case ia64_mux1_brcst: \ + __asm__ ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_mix: \ + __asm__ ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_shuf: \ + __asm__ ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_alt: \ + __asm__ ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_rev: \ + __asm__ ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + } \ + ia64_intri_res; \ +}) + + +#define ia64_popcnt(x) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ + \ + ia64_intri_res; \ +}) + + +#define ia64_getf_exp(x) \ +({ \ + long ia64_intri_res; \ + \ + __asm__ ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_shrp(a, b, count) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \ + ia64_intri_res; \ +}) + + +#define ia64_ldfs(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfd(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfe(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf8(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf_fill(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_stfs(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + + +#define ia64_stfd(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stfe(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + + +#define ia64_stf8(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf_spill(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + __asm__ __volatile__ ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_fetchadd4_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("fetchadd4.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd4_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + + +#define ia64_fetchadd8_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("fetchadd8.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + + +#define ia64_xchg1(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg2(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg4(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg8(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + __asm__ __volatile__ ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + __asm__ __volatile__ ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + __asm__ __volatile__ ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_mf() __asm__ __volatile__ ("mf" ::: "memory") +#define ia64_mfa() __asm__ __volatile__ ("mf.a" ::: "memory") + + +#define ia64_invala() __asm__ __volatile__ ("invala" ::: "memory") + +#define ia64_thash(addr) \ +({ \ + __u64 ia64_intri_res; \ + __asm__ __volatile__ ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ +}) + + +#define ia64_srlz_i() __asm__ __volatile__ (";; srlz.i ;;" ::: "memory") + + +#define ia64_srlz_d() __asm__ __volatile__ (";; srlz.d" ::: "memory"); + + +#define ia64_nop(x) __asm__ __volatile__ ("nop %0"::"i"(x)); + + +#define ia64_itci(addr) __asm__ __volatile__ ("itc.i %0;;" :: "r"(addr) : "memory") + +#define ia64_itcd(addr) __asm__ __volatile__ ("itc.d %0;;" :: "r"(addr) : "memory") + + +#define ia64_itri(trnum, addr) __asm__ __volatile__ ("itr.i itr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + + +#define ia64_itrd(trnum, addr) __asm__ __volatile__ ("itr.d dtr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + + +#define ia64_tpa(addr) \ +({ \ + __u64 ia64_pa; \ + __asm__ __volatile__ ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : \ + "memory"); \ + ia64_pa; \ +}) + +#define __ia64_set_dbr(index, val) \ + __asm__ __volatile__ ("mov dbr[%0]=%1" :: "r"(index), "r"(val) \ + : "memory") + +#define ia64_set_ibr(index, val) \ + __asm__ __volatile__ ("mov ibr[%0]=%1" :: "r"(index), "r"(val) \ + : "memory") + +#define ia64_set_pkr(index, val) \ + __asm__ __volatile__ ("mov pkr[%0]=%1" :: "r"(index), "r"(val) \ + : "memory") + +#define ia64_set_pmc(index, val) \ + __asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(index), "r"(val) \ + : "memory"); + +#define ia64_set_pmd(index, val) \ + __asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(index), "r"(val) \ + : "memory"); + +#define ia64_set_rr(index, val) \ + __asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(index), "r"(val) \ + : "memory"); + + +#define ia64_get_cpuid(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ + \ + ia64_intri_res; \ +}) + +#define __ia64_get_dbr(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_get_ibr(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_get_pkr(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_get_pmc(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + \ + ia64_intri_res; \ +}) + + +#define ia64_get_pmd(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_get_rr(index) \ +({ \ + __u64 ia64_intri_res; \ + \ + __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" \ + (index)); \ + \ + ia64_intri_res; \ +}) + + +#define ia64_fc(addr) \ + __asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory"); + + +#define ia64_sync_i() \ + __asm__ __volatile__ (";; sync.i" ::: "memory") + +#define ia64_ssm(mask) __asm__ __volatile__ ("ssm %0":: "i"((mask)) : "memory"); +#define ia64_rsm(mask) __asm__ __volatile__ ("rsm %0":: "i"((mask)) : "memory"); +#define ia64_sum(mask) __asm__ __volatile__ ("sum %0":: "i"((mask)) : "memory"); +#define ia64_rum(mask) __asm__ __volatile__ ("rum %0":: "i"((mask)) : "memory"); + +#define ia64_ptce(addr) \ + __asm__ __volatile__ ("ptc.e %0" :: "r"(addr)) + + +#define ia64_ptcga(addr, size) \ + __asm__ __volatile__ ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory") + + +#define ia64_ptcl(addr, size) \ + __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory") + + +#define ia64_ptri(addr, size) \ + __asm__ __volatile__ ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") + + +#define ia64_ptrd(addr, size) \ + __asm__ __volatile__ ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") + +/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + __asm__ __volatile__ ("lfetch [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + __asm__ __volatile__ ("lfetch.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + __asm__ __volatile__ ("lfetch.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + __asm__ __volatile__ ("lfetch.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + __asm__ __volatile__ ("lfetch.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + __asm__ __volatile__ ("lfetch.excl.nt1 [%0]" :: "r"(y));\ + break; \ + case ia64_lfhint_nt2: \ + __asm__ __volatile__ ("lfetch.excl.nt2 [%0]" :: "r"(y));\ + break; \ + case ia64_lfhint_nta: \ + __asm__ __volatile__ ("lfetch.excl.nta [%0]" :: "r"(y));\ + break; \ + } \ +}) + +#define ia64_lfetch_fault(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + __asm__ __volatile__ ("lfetch.fault [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + __asm__ __volatile__ ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + __asm__ __volatile__ ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + __asm__ __volatile__ ("lfetch.fault.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_fault_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + __asm__ __volatile__ ("lfetch.fault.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + __asm__ __volatile__ ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + __asm__ __volatile__ ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + __asm__ __volatile__ ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ + break; \ + } \ +}) + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "(p6) srlz.d" \ + : : "r"((x)) \ + : "p6", "p7", "memory"); \ +} while (0) + +#endif /* _ASM_IA64_GCC_INTRIN_H */ diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h new file mode 100644 index 000000000000..3ddfa3128667 --- /dev/null +++ b/include/asm-ia64/ia64regs.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2002,2003 Intel Corp. + * Jun Nakajima + * Suresh Siddha + */ + +#ifndef _ASM_IA64_IA64REGS_H +#define _ASM_IA64_IA64REGS_H + +/* +** Register Names for getreg() and setreg() +*/ + +/* Special Registers */ + +#define _IA64_REG_IP 1016 /* getreg only */ +#define _IA64_REG_PSR 1019 +#define _IA64_REG_PSR_L 1019 + + // General Integer Registers + +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ + + /* Application Registers */ + +#define _IA64_REG_AR_KR0 3072 +#define _IA64_REG_AR_KR1 3073 +#define _IA64_REG_AR_KR2 3074 +#define _IA64_REG_AR_KR3 3075 +#define _IA64_REG_AR_KR4 3076 +#define _IA64_REG_AR_KR5 3077 +#define _IA64_REG_AR_KR6 3078 +#define _IA64_REG_AR_KR7 3079 +#define _IA64_REG_AR_RSC 3088 +#define _IA64_REG_AR_BSP 3089 +#define _IA64_REG_AR_BSPSTORE 3090 +#define _IA64_REG_AR_RNAT 3091 +#define _IA64_REG_AR_FCR 3093 +#define _IA64_REG_AR_EFLAG 3096 +#define _IA64_REG_AR_CSD 3097 +#define _IA64_REG_AR_SSD 3098 +#define _IA64_REG_AR_CFLAG 3099 +#define _IA64_REG_AR_FSR 3100 +#define _IA64_REG_AR_FIR 3101 +#define _IA64_REG_AR_FDR 3102 +#define _IA64_REG_AR_CCV 3104 +#define _IA64_REG_AR_UNAT 3108 +#define _IA64_REG_AR_FPSR 3112 +#define _IA64_REG_AR_ITC 3116 +#define _IA64_REG_AR_PFS 3136 +#define _IA64_REG_AR_LC 3137 +#define _IA64_REG_AR_EC 3138 + + /* Control Registers */ + +#define _IA64_REG_CR_DCR 4096 +#define _IA64_REG_CR_ITM 4097 +#define _IA64_REG_CR_IVA 4098 +#define _IA64_REG_CR_PTA 4104 +#define _IA64_REG_CR_IPSR 4112 +#define _IA64_REG_CR_ISR 4113 +#define _IA64_REG_CR_IIP 4115 +#define _IA64_REG_CR_IFA 4116 +#define _IA64_REG_CR_ITIR 4117 +#define _IA64_REG_CR_IIPA 4118 +#define _IA64_REG_CR_IFS 4119 +#define _IA64_REG_CR_IIM 4120 +#define _IA64_REG_CR_IHA 4121 +#define _IA64_REG_CR_LID 4160 +#define _IA64_REG_CR_IVR 4161 /* getreg only */ +#define _IA64_REG_CR_TPR 4162 +#define _IA64_REG_CR_EOI 4163 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */ +#define _IA64_REG_CR_IRR1 4165 /* getreg only */ +#define _IA64_REG_CR_IRR2 4166 /* getreg only */ +#define _IA64_REG_CR_IRR3 4167 /* getreg only */ +#define _IA64_REG_CR_ITV 4168 +#define _IA64_REG_CR_PMV 4169 +#define _IA64_REG_CR_CMCV 4170 +#define _IA64_REG_CR_LRR0 4176 +#define _IA64_REG_CR_LRR1 4177 + + /* Indirect Registers for getindreg() and setindreg() */ + +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ +#define _IA64_REG_INDR_DBR 9001 +#define _IA64_REG_INDR_IBR 9002 +#define _IA64_REG_INDR_PKR 9003 +#define _IA64_REG_INDR_PMC 9004 +#define _IA64_REG_INDR_PMD 9005 +#define _IA64_REG_INDR_RR 9006 + + +#endif /* _ASM_IA64_IA64REGS_H */ diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index 19408747bd17..16d21d49a1bc 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -8,8 +8,17 @@ * David Mosberger-Tang */ +#ifndef __ASSEMBLY__ #include +/* include compiler specific intrinsics */ +#include +#ifdef __INTEL_COMPILER +#include +#else +#include +#endif + /* * Force an unresolved reference if someone tries to use * ia64_fetch_and_add() with a bad value. @@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ({ \ switch (sz) { \ case 4: \ - __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \ - : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ + tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ break; \ \ case 8: \ - __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \ - : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ + tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ break; \ \ default: \ @@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); (__typeof__(*(v))) (_tmp); /* return old value */ \ }) -#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */ +#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer (void); - -static __inline__ unsigned long -__xchg (unsigned long x, volatile void *ptr, int size) -{ - unsigned long result; - - switch (size) { - case 1: - __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 2: - __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 4: - __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - - case 8: - __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result) - : "r" (ptr), "r" (x) : "memory"); - return result; - } - __xchg_called_with_bad_pointer(); - return x; -} +extern void ia64_xchg_called_with_bad_pointer (void); + +#define __xchg(x,ptr,size) \ +({ \ + unsigned long __xchg_result; \ + \ + switch (size) { \ + case 1: \ + __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ + break; \ + \ + case 2: \ + __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ + break; \ + \ + case 4: \ + __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ + break; \ + \ + case 8: \ + __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ + break; \ + default: \ + ia64_xchg_called_with_bad_pointer(); \ + } \ + __xchg_result; \ +}) #define xchg(ptr,x) \ ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) @@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size) * This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ -extern long __cmpxchg_called_with_bad_pointer(void); +extern long ia64_cmpxchg_called_with_bad_pointer(void); #define ia64_cmpxchg(sem,ptr,old,new,size) \ ({ \ - __typeof__(ptr) _p_ = (ptr); \ - __typeof__(new) _n_ = (new); \ __u64 _o_, _r_; \ \ switch (size) { \ @@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void); case 8: _o_ = (__u64) (long) (old); break; \ default: break; \ } \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \ switch (size) { \ case 1: \ - __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ break; \ \ case 2: \ - __asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ break; \ \ case 4: \ - __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ break; \ \ case 8: \ - __asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \ - : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \ + _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ break; \ \ default: \ - _r_ = __cmpxchg_called_with_bad_pointer(); \ + _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ break; \ } \ (__typeof__(old)) _r_; \ }) -#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr))) -#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr))) +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) /* for compatibility with other platforms: */ #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n) @@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void); if (_cmpxchg_bugcheck_count-- <= 0) { \ void *ip; \ extern int printk(const char *fmt, ...); \ - asm ("mov %0=ip" : "=r"(ip)); \ + ip = ia64_getreg(_IA64_REG_IP); \ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ break; \ } \ @@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void); # define CMPXCHG_BUGCHECK(v) #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ +#endif #endif /* _ASM_IA64_INTRINSICS_H */ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index 1297c6bba42b..e02f403a2339 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -55,6 +55,7 @@ extern unsigned int num_io_spaces; #include #include #include +#include /* * Change virtual addresses to physical addresses and vv. @@ -85,7 +86,7 @@ phys_to_virt (unsigned long address) * Memory fence w/accept. This should never be used in code that is * not IA-64 specific. */ -#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") +#define __ia64_mf_a() ia64_mfa() static inline const unsigned long __ia64_get_io_port_base (void) diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index a277c8ff9595..471a2c91cd29 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -155,7 +155,7 @@ struct ia64_machine_vector { ia64_mv_readw_t *readw; ia64_mv_readl_t *readl; ia64_mv_readq_t *readq; -}; +} __attribute__((__aligned__(16))); /* align attrib? see above comment */ #define MACHVEC_INIT(name) \ { \ diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 95e786212982..0255260f61bc 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -158,9 +158,7 @@ reload_context (mm_context_t context) ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x6000000000000000, rr3); ia64_set_rr(0x8000000000000000, rr4); - ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ - ia64_insn_group_barrier(); } static inline void diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 44b3f419c854..8372da8cad5d 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -10,6 +10,7 @@ #include #include +#include /* * PAGE_SHIFT determines the actual kernel page size. @@ -143,7 +144,7 @@ get_order (unsigned long size) double d = size - 1; long order; - __asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d)); + order = ia64_getf_exp(d); order = order - PAGE_SHIFT - 0xffff + 1; if (order < 0) order = 0; diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 5640226e8a15..e3152bc4fb39 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector /* Initialize the processor controlled caches */ static inline s64 -ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict) +ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict); + PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest); return iprv.status; } diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 669e44bf8012..c225163a49b2 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -18,6 +18,7 @@ #include #include #include +#include #define IA64_NUM_DBG_REGS 8 /* @@ -356,38 +357,42 @@ extern unsigned long get_wchan (struct task_struct *p); /* Return stack pointer of blocked task TSK. */ #define KSTK_ESP(tsk) ((tsk)->thread.ksp) -static inline unsigned long -ia64_get_kr (unsigned long regnum) -{ - unsigned long r = 0; - - switch (regnum) { - case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break; - case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break; - case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break; - case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break; - case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break; - case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break; - case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break; - case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break; - } - return r; -} +extern void ia64_getreg_unknown_kr(void); +extern void ia64_setreg_unknown_kr(void); + + +#define ia64_get_kr(regnum) \ +({ \ + unsigned long r=0; \ + \ + switch (regnum) { \ + case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ + case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ + case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ + case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ + case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ + case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ + case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ + case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ + default: ia64_getreg_unknown_kr(); break; \ + } \ + r; \ +}) -static inline void -ia64_set_kr (unsigned long regnum, unsigned long r) -{ - switch (regnum) { - case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break; - case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break; - case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break; - case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break; - case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break; - case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break; - case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break; - case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break; - } -} +#define ia64_set_kr(regnum, r) \ +({ \ + switch (regnum) { \ + case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ + case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ + case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ + case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ + case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ + case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ + case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ + case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ + default: ia64_setreg_unknown_kr(); break; \ + } \ +}) /* * The following three macros can't be inline functions because we don't have struct @@ -423,8 +428,8 @@ extern void ia32_save_state (struct task_struct *task); extern void ia32_load_state (struct task_struct *task); #endif -#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory"); -#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory"); +#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) +#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) /* load fp 0.0 into fph */ static inline void @@ -450,78 +455,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) { ia64_fph_disable(); } -static inline void -ia64_fc (void *addr) -{ - asm volatile ("fc %0" :: "r"(addr) : "memory"); -} - -static inline void -ia64_sync_i (void) -{ - asm volatile (";; sync.i" ::: "memory"); -} - -static inline void -ia64_srlz_i (void) -{ - asm volatile (";; srlz.i ;;" ::: "memory"); -} - -static inline void -ia64_srlz_d (void) -{ - asm volatile (";; srlz.d" ::: "memory"); -} - -static inline __u64 -ia64_get_rr (__u64 reg_bits) -{ - __u64 r; - asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory"); - return r; -} - -static inline void -ia64_set_rr (__u64 reg_bits, __u64 rr_val) -{ - asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory"); -} - -static inline __u64 -ia64_get_dcr (void) -{ - __u64 r; - asm volatile ("mov %0=cr.dcr" : "=r"(r)); - return r; -} - -static inline void -ia64_set_dcr (__u64 val) -{ - asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_lid (void) -{ - __u64 r; - asm volatile ("mov %0=cr.lid" : "=r"(r)); - return r; -} - -static inline void -ia64_invala (void) -{ - asm volatile ("invala" ::: "memory"); -} - static inline __u64 ia64_clear_ic (void) { __u64 psr; - asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory"); + psr = ia64_getreg(_IA64_REG_PSR); + ia64_stop(); + ia64_rsm(IA64_PSR_I | IA64_PSR_IC); + ia64_srlz_i(); return psr; } @@ -531,7 +472,9 @@ ia64_clear_ic (void) static inline void ia64_set_psr (__u64 psr) { - asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory"); + ia64_stop(); + ia64_setreg(_IA64_REG_PSR_L, psr); + ia64_srlz_d(); } /* @@ -543,14 +486,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num, __u64 vmaddr, __u64 pte, __u64 log_page_size) { - asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); - asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); if (target_mask & 0x1) - asm volatile ("itr.i itr[%0]=%1" - :: "r"(tr_num), "r"(pte) : "memory"); + ia64_itri(tr_num, pte); if (target_mask & 0x2) - asm volatile (";;itr.d dtr[%0]=%1" - :: "r"(tr_num), "r"(pte) : "memory"); + ia64_itrd(tr_num, pte); } /* @@ -561,13 +503,14 @@ static inline void ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 log_page_size) { - asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory"); - asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory"); + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); /* as per EAS2.6, itc must be the last instruction in an instruction group */ if (target_mask & 0x1) - asm volatile ("itc.i %0;;" :: "r"(pte) : "memory"); + ia64_itci(pte); if (target_mask & 0x2) - asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory"); + ia64_itcd(pte); } /* @@ -578,16 +521,17 @@ static inline void ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) { if (target_mask & 0x1) - asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); + ia64_ptri(vmaddr, (log_size << 2)); if (target_mask & 0x2) - asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2)); + ia64_ptrd(vmaddr, (log_size << 2)); } /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ static inline void ia64_set_iva (void *ivt_addr) { - asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory"); + ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); + ia64_srlz_i(); } /* Set the page table address and control bits. */ @@ -595,79 +539,33 @@ static inline void ia64_set_pta (__u64 pta) { /* Note: srlz.i implies srlz.d */ - asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory"); -} - -static inline __u64 -ia64_get_cpuid (__u64 regnum) -{ - __u64 r; - - asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum)); - return r; + ia64_setreg(_IA64_REG_CR_PTA, pta); + ia64_srlz_i(); } static inline void ia64_eoi (void) { - asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory"); + ia64_setreg(_IA64_REG_CR_EOI, 0); + ia64_srlz_d(); } +#define cpu_relax() ia64_hint(ia64_hint_pause) + static inline void ia64_set_lrr0 (unsigned long val) { - asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory"); + ia64_setreg(_IA64_REG_CR_LRR0, val); + ia64_srlz_d(); } -static inline void -ia64_hint_pause (void) -{ - asm volatile ("hint @pause" ::: "memory"); -} - -#define cpu_relax() ia64_hint_pause() - static inline void ia64_set_lrr1 (unsigned long val) { - asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory"); -} - -static inline void -ia64_set_pmv (__u64 val) -{ - asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory"); -} - -static inline __u64 -ia64_get_pmc (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum)); - return retval; -} - -static inline void -ia64_set_pmc (__u64 regnum, __u64 value) -{ - asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value)); -} - -static inline __u64 -ia64_get_pmd (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum)); - return retval; + ia64_setreg(_IA64_REG_CR_LRR1, val); + ia64_srlz_d(); } -static inline void -ia64_set_pmd (__u64 regnum, __u64 value) -{ - asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value)); -} /* * Given the address to which a spill occurred, return the unat bit @@ -713,160 +611,35 @@ thread_saved_pc (struct task_struct *t) * Get the current instruction/program counter value. */ #define current_text_addr() \ - ({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; }) - -/* - * Set the correctable machine check vector register - */ -static inline void -ia64_set_cmcv (__u64 val) -{ - asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory"); -} - -/* - * Read the correctable machine check vector register - */ -static inline __u64 -ia64_get_cmcv (void) -{ - __u64 val; - - asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory"); - return val; -} + ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) static inline __u64 ia64_get_ivr (void) { __u64 r; - asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r)); - return r; -} - -static inline void -ia64_set_tpr (__u64 val) -{ - asm volatile ("mov cr.tpr=%0" :: "r"(val)); -} - -static inline __u64 -ia64_get_tpr (void) -{ - __u64 r; - asm volatile ("mov %0=cr.tpr" : "=r"(r)); - return r; -} - -static inline void -ia64_set_irr0 (__u64 val) -{ - asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory"); ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr0 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr0" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr1 (__u64 val) -{ - asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory"); + r = ia64_getreg(_IA64_REG_CR_IVR); ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr1 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr1" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr2 (__u64 val) -{ - asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr2 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile("mov %0=cr.irr2" : "=r"(val)); - return val; -} - -static inline void -ia64_set_irr3 (__u64 val) -{ - asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory"); - ia64_srlz_d(); -} - -static inline __u64 -ia64_get_irr3 (void) -{ - __u64 val; - - /* this is volatile because irr may change unbeknownst to gcc... */ - asm volatile ("mov %0=cr.irr3" : "=r"(val)); - return val; -} - -static inline __u64 -ia64_get_gp(void) -{ - __u64 val; - - asm ("mov %0=gp" : "=r"(val)); - return val; -} - -static inline void -ia64_set_ibr (__u64 regnum, __u64 value) -{ - asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value)); + return r; } static inline void ia64_set_dbr (__u64 regnum, __u64 value) { - asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value)); + __ia64_set_dbr(regnum, value); #ifdef CONFIG_ITANIUM - asm volatile (";; srlz.d"); + ia64_srlz_d(); #endif } -static inline __u64 -ia64_get_ibr (__u64 regnum) -{ - __u64 retval; - - asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum)); - return retval; -} - static inline __u64 ia64_get_dbr (__u64 regnum) { __u64 retval; - asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum)); + retval = __ia64_get_dbr(regnum); #ifdef CONFIG_ITANIUM - asm volatile (";; srlz.d"); + ia64_srlz_d(); #endif return retval; } @@ -883,29 +656,13 @@ ia64_get_dbr (__u64 regnum) # define ia64_rotr(w,n) \ ({ \ __u64 result; \ - asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \ + result = ia64_shrp((w), (w), (n)); \ result; \ }) #endif #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n)) -static inline __u64 -ia64_thash (__u64 addr) -{ - __u64 result; - asm ("thash %0=%1" : "=r"(result) : "r" (addr)); - return result; -} - -static inline __u64 -ia64_tpa (__u64 addr) -{ - __u64 result; - asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); - return result; -} - /* * Take a mapped kernel address and return the equivalent address * in the region 7 identity mapped virtual area. @@ -914,7 +671,7 @@ static inline void * ia64_imva (void *addr) { void *result; - asm ("tpa %0=%1" : "=r"(result) : "r"(addr)); + result = (void *) ia64_tpa(addr); return __va(result); } @@ -926,13 +683,13 @@ ia64_imva (void *addr) static inline void prefetch (const void *x) { - __asm__ __volatile__ ("lfetch [%0]" : : "r"(x)); + ia64_lfetch(ia64_lfhint_none, x); } static inline void prefetchw (const void *x) { - __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x)); + ia64_lfetch_excl(ia64_lfhint_none, x); } #define spin_lock_prefetch(x) prefetchw(x) diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index b0427fa5bccf..4921b76fdfb9 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h @@ -22,6 +22,7 @@ #include #include +#include /* * the semaphore definition @@ -82,8 +83,9 @@ static inline void __down_read (struct rw_semaphore *sem) { int result; - __asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" : - "=r"(result) : "r"(&sem->count) : "memory"); + + result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); + if (result < 0) rwsem_down_read_failed(sem); } @@ -112,8 +114,9 @@ static inline void __up_read (struct rw_semaphore *sem) { int result; - __asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" : - "=r"(result) : "r"(&sem->count) : "memory"); + + result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); + if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) rwsem_wake(sem); } diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index a2c7f1c09050..855c24712736 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, extern unsigned long sal_platform_features; +struct sal_ret_values { + long r8; long r9; long r10; long r11; +}; + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IA64_PAL_H */ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 0b9a1253845c..5da816099349 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -120,7 +120,7 @@ hard_smp_processor_id (void) unsigned long bits; } lid; - lid.bits = ia64_get_lid(); + lid.bits = ia64_getreg(_IA64_REG_CR_LID); return lid.f.id << 8 | lid.f.eid; } diff --git a/include/asm-ia64/sn/sn2/io.h b/include/asm-ia64/sn/sn2/io.h index fc30f1f4c5c8..3a3b1e214164 100644 --- a/include/asm-ia64/sn/sn2/io.h +++ b/include/asm-ia64/sn/sn2/io.h @@ -11,11 +11,23 @@ extern void * sn_io_addr(unsigned long port); /* Forward definition */ extern void sn_mmiob(void); /* Forward definition */ +#include -#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") +#define __sn_mf_a() ia64_mfa() extern void sn_dma_flush(unsigned long); +#define __sn_inb ___sn_inb +#define __sn_inw ___sn_inw +#define __sn_inl ___sn_inl +#define __sn_outb ___sn_outb +#define __sn_outw ___sn_outw +#define __sn_outl ___sn_outl +#define __sn_readb ___sn_readb +#define __sn_readw ___sn_readw +#define __sn_readl ___sn_readl +#define __sn_readq ___sn_readq + /* * The following routines are SN Platform specific, called when * a reference is made to inX/outX set macros. SN Platform @@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long); */ static inline unsigned int -__sn_inb (unsigned long port) +___sn_inb (unsigned long port) { volatile unsigned char *addr; unsigned char ret = -1; @@ -40,7 +52,7 @@ __sn_inb (unsigned long port) } static inline unsigned int -__sn_inw (unsigned long port) +___sn_inw (unsigned long port) { volatile unsigned short *addr; unsigned short ret = -1; @@ -54,7 +66,7 @@ __sn_inw (unsigned long port) } static inline unsigned int -__sn_inl (unsigned long port) +___sn_inl (unsigned long port) { volatile unsigned int *addr; unsigned int ret = -1; @@ -68,7 +80,7 @@ __sn_inl (unsigned long port) } static inline void -__sn_outb (unsigned char val, unsigned long port) +___sn_outb (unsigned char val, unsigned long port) { volatile unsigned char *addr; @@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port) } static inline void -__sn_outw (unsigned short val, unsigned long port) +___sn_outw (unsigned short val, unsigned long port) { volatile unsigned short *addr; @@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port) } static inline void -__sn_outl (unsigned int val, unsigned long port) +___sn_outl (unsigned int val, unsigned long port) { volatile unsigned int *addr; @@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port) */ static inline unsigned char -__sn_readb (void *addr) +___sn_readb (void *addr) { unsigned char val; @@ -121,7 +133,7 @@ __sn_readb (void *addr) } static inline unsigned short -__sn_readw (void *addr) +___sn_readw (void *addr) { unsigned short val; @@ -132,7 +144,7 @@ __sn_readw (void *addr) } static inline unsigned int -__sn_readl (void *addr) +___sn_readl (void *addr) { unsigned int val; @@ -143,7 +155,7 @@ __sn_readl (void *addr) } static inline unsigned long -__sn_readq (void *addr) +___sn_readq (void *addr) { unsigned long val; diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index 74dd5a6d2460..a2831ceb16a8 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h @@ -89,7 +89,7 @@ #ifndef CONFIG_SMP #define cpu_logical_id(cpu) 0 -#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff) +#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) #endif /* @@ -98,8 +98,8 @@ */ #define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff) #define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3) -#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff) -#define get_slice() ((ia64_get_lid() >> 28) & 0xf) +#define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff) +#define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf) #define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff) /* diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 3c0d89837b02..ca3a25477949 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -10,10 +10,12 @@ */ #include +#include #include #include #include +#include typedef struct { volatile unsigned int lock; @@ -102,8 +104,8 @@ typedef struct { do { \ rwlock_t *__read_lock_ptr = (rw); \ \ - while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ + while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ while (*(volatile int *)__read_lock_ptr < 0) \ cpu_relax(); \ } \ @@ -112,7 +114,7 @@ do { \ #define _raw_read_unlock(rw) \ do { \ rwlock_t *__read_lock_ptr = (rw); \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #define _raw_write_lock(rw) \ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index f4951838e69d..5c59cb6b8d19 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -55,12 +55,6 @@ extern struct ia64_boot_param { __u64 initrd_size; } *ia64_boot_param; -static inline void -ia64_insn_group_barrier (void) -{ - __asm__ __volatile__ (";;" ::: "memory"); -} - /* * Macros to force memory ordering. In these descriptions, "previous" * and "subsequent" refer to program order; "visible" means that all @@ -83,7 +77,7 @@ ia64_insn_group_barrier (void) * it's (presumably) much slower than mf and (b) mf.a is supported for * sequential memory pages only. */ -#define mb() __asm__ __volatile__ ("mf" ::: "memory") +#define mb() ia64_mf() #define rmb() mb() #define wmb() mb() #define read_barrier_depends() do { } while(0) @@ -119,22 +113,28 @@ ia64_insn_group_barrier (void) /* clearing psr.i is implicitly serialized (visible by next insn) */ /* setting psr.i requires data serialization */ -#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \ - "rsm psr.i;;" \ - : "=r" (x) :: "memory") -#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") -#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ - "(p6) ssm psr.i;" \ - "(p7) rsm psr.i;;" \ - "(p6) srlz.d" \ - :: "r" ((x) & IA64_PSR_I) \ - : "p6", "p7", "memory") +#define __local_irq_save(x) \ +do { \ + unsigned long psr; \ + psr = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ + (x) = psr; \ +} while (0) + +#define __local_irq_disable() \ +do { \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; -# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip)) +# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) # define local_irq_save(x) \ do { \ @@ -164,8 +164,8 @@ do { \ # define local_irq_restore(x) __local_irq_restore(x) #endif /* !CONFIG_IA64_DEBUG_IRQ */ -#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") -#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") +#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define local_save_flags(flags) ({ (flags) = ia64_getreg(_IA64_REG_PSR); }) #define irqs_disabled() \ ({ \ diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h index 5bf5bd8f148e..df9085722e6b 100644 --- a/include/asm-ia64/timex.h +++ b/include/asm-ia64/timex.h @@ -11,6 +11,7 @@ */ #include +#include typedef unsigned long cycles_t; @@ -32,7 +33,7 @@ get_cycles (void) { cycles_t ret; - __asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret)); + ret = ia64_getreg(_IA64_REG_AR_ITC); return ret; } diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index dd49222e8f08..62d0d466f547 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -12,6 +12,7 @@ #include #include +#include /* * Now for some TLB flushing routines. This is the kind of stuff that @@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); #else if (vma->vm_mm == current->active_mm) - asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); + ia64_ptcl(addr, (PAGE_SHIFT << 2)); else vma->vm_mm->context = 0; #endif diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 09325eb6503d..f28ad25550a4 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -334,73 +334,18 @@ waitpid (int pid, int * wait_stat, int flags) } -static inline int -execve (const char *filename, char *const av[], char *const ep[]) -{ - register long r8 asm("r8"); - register long r10 asm("r10"); - register long r15 asm("r15") = __NR_execve; - register long out0 asm("out0") = (long)filename; - register long out1 asm("out1") = (long)av; - register long out2 asm("out2") = (long)ep; - - asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t" - : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2) - : "2" (r15), "3" (out0), "4" (out1), "5" (out2) - : "memory", "out3", "out4", "out5", "out6", "out7", - /* Non-stacked integer registers, minus r8, r10, r15, r13 */ - "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18", - "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", - "r28", "r29", "r30", "r31", - /* Predicate registers. */ - "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", - /* Non-rotating fp registers. */ - "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - /* Branch registers. */ - "b6", "b7" ); - return r8; -} - -static inline pid_t -clone (unsigned long flags, void *sp) -{ - register long r8 asm("r8"); - register long r10 asm("r10"); - register long r15 asm("r15") = __NR_clone; - register long out0 asm("out0") = (long)flags; - register long out1 asm("out1") = (long)sp; - long retval; - - /* clone clobbers current, hence the "r13" in the clobbers list */ - asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t" - : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1) - : "2" (r15), "3" (out0), "4" (out1) - : "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13", - /* Non-stacked integer registers, minus r8, r10, r15, r13 */ - "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18", - "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", - "r28", "r29", "r30", "r31", - /* Predicate registers. */ - "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", - /* Non-rotating fp registers. */ - "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - /* Branch registers. */ - "b6", "b7" ); - retval = r8; - return retval;; - -} +extern int execve (const char *filename, char *const av[], char *const ep[]); +extern pid_t clone (unsigned long flags, void *sp); #endif /* __KERNEL_SYSCALLS__ */ /* * "Conditional" syscalls * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on - * all toolchains, so we just do it by hand. Note, this macro can only be used in the + * Note, this macro can only be used in the * file which defines sys_ni_syscall, i.e., in kernel/sys.c. */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); +#define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall"))); #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From 98256b561371e19539b68c631ef0dcc395e77908 Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 07:21:38 -0700 Subject: [acpi] Fix compilation when CONFIG_SMP=n A recent slew of ACPI "fixes" completely broke the build when one built without SMP, IO APICs, or Local APICs. Bad Intel, no cookie. --- arch/i386/kernel/acpi/boot.c | 22 +++++----------------- arch/i386/kernel/mpparse.c | 3 +-- arch/i386/kernel/setup.c | 10 ++++------ include/asm-i386/acpi.h | 38 +++++++++++++++++++++++++++----------- include/linux/acpi.h | 12 ++++++++++++ 5 files changed, 49 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 336b2088b808..3bf5e36282f4 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,9 @@ extern int acpi_disabled; extern int acpi_ht; +int acpi_lapic = 0; +int acpi_ioapic = 0; + /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ @@ -90,8 +94,6 @@ char *__acpi_map_table(unsigned long phys, unsigned long size) #ifdef CONFIG_X86_LOCAL_APIC -int acpi_lapic; - static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -158,8 +160,6 @@ acpi_parse_lapic_addr_ovr ( return 0; } -#ifdef CONFIG_ACPI - static int __init acpi_parse_lapic_nmi ( acpi_table_entry_header *header) @@ -178,15 +178,11 @@ acpi_parse_lapic_nmi ( return 0; } -#endif /*CONFIG_ACPI*/ #endif /*CONFIG_X86_LOCAL_APIC*/ #ifdef CONFIG_X86_IO_APIC -int acpi_ioapic; - -#ifdef CONFIG_ACPI static int __init acpi_parse_ioapic ( @@ -248,7 +244,6 @@ acpi_parse_nmi_src ( return 0; } -#endif /*CONFIG_ACPI*/ #endif /*CONFIG_X86_IO_APIC*/ @@ -331,14 +326,12 @@ acpi_boot_init (void) if (result) return result; -#ifdef CONFIG_ACPI result = acpi_blacklisted(); if (result) { printk(KERN_WARNING PREFIX "BIOS listed in blacklist, disabling ACPI support\n"); acpi_disabled = 1; return result; } -#endif #ifdef CONFIG_X86_LOCAL_APIC @@ -389,21 +382,18 @@ acpi_boot_init (void) return result; } -#ifdef CONFIG_ACPI result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); if (result < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return result; } -#endif /*CONFIG_ACPI*/ acpi_lapic = 1; #endif /*CONFIG_X86_LOCAL_APIC*/ #ifdef CONFIG_X86_IO_APIC -#ifdef CONFIG_ACPI /* * I/O APIC @@ -423,7 +413,7 @@ acpi_boot_init (void) /* * if "noapic" boot option, don't look for IO-APICs */ - if (skip_ioapic_setup) { + if (ioapic_setup_disabled()) { printk(KERN_INFO PREFIX "Skipping IOAPIC probe " "due to 'noapic' option.\n"); return 1; @@ -459,8 +449,6 @@ acpi_boot_init (void) acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; acpi_ioapic = 1; - -#endif /*CONFIG_ACPI*/ #endif /*CONFIG_X86_IO_APIC*/ #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index b57846ce7dbe..bd105ba58219 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -1013,7 +1013,6 @@ void __init mp_config_acpi_legacy_irqs (void) panic("Max # of irq sources exceeded!\n"); } } -#endif /* CONFIG_X86_IO_APIC */ #ifdef CONFIG_ACPI @@ -1150,5 +1149,5 @@ void __init mp_parse_prt (void) } #endif /*CONFIG_ACPI_PCI*/ - +#endif /* CONFIG_X86_IO_APIC */ #endif /*CONFIG_ACPI_BOOT*/ diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index ab2881128c35..f13a8ee9f67b 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -544,9 +544,8 @@ static void __init parse_cmdline_early (char ** cmdline_p) } /* disable IO-APIC */ - else if (!memcmp(from, "noapic", 6)) { - skip_ioapic_setup = 1; - } + else if (!memcmp(from, "noapic", 6)) + disable_ioapic_setup(); #endif /* @@ -1003,12 +1002,11 @@ void __init setup_arch(char **cmdline_p) generic_apic_probe(*cmdline_p); #endif -#ifdef CONFIG_ACPI_BOOT /* * Parse the ACPI tables for possible boot-time SMP configuration. */ - (void) acpi_boot_init(); -#endif + acpi_boot_init(); + #ifdef CONFIG_X86_LOCAL_APIC if (smp_found_config) get_smp_config(); diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 350048b1f39e..6b56aa3eaa39 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h @@ -106,21 +106,37 @@ :"0"(n_hi), "1"(n_lo)) -#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_LOCAL_APIC) - extern int acpi_lapic; -#else - #define acpi_lapic 0 -#endif +#ifdef CONFIG_ACPI_BOOT +extern int acpi_lapic; +extern int acpi_ioapic; -#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_IO_APIC) - extern int acpi_ioapic; -#else - #define acpi_ioapic 0 -#endif -#ifdef CONFIG_ACPI_BOOT /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ #define FIX_ACPI_PAGES 4 + +#ifdef CONFIG_X86_IO_APIC +extern int skip_ioapic_setup; + +static inline void disable_ioapic_setup(void) +{ + skip_ioapic_setup = 1; +} + +static inline int ioapic_setup_disabled(void) +{ + return skip_ioapic_setup; +} + +#else +static inline void disable_ioapic_setup(void) +{ } + +#endif + +#else /* CONFIG_ACPI_BOOT */ +# define acpi_lapic 0 +# define acpi_ioapic 0 + #endif #ifdef CONFIG_ACPI_SLEEP diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3fd526160f1a..94a0f27e331c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -373,6 +373,11 @@ extern int acpi_mp_config; #define acpi_mp_config 0 +static inline int acpi_boot_init(void) +{ + return 0; +} + #endif /*!CONFIG_ACPI_BOOT*/ @@ -423,6 +428,13 @@ int ec_write(u8 addr, u8 val); int acpi_blacklisted(void); +#else + +static inline int acpi_blacklisted(void) +{ + return 0; +} + #endif /*CONFIG_ACPI*/ #endif /*_LINUX_ACPI_H*/ -- cgit v1.2.3 From f2a93dbd71db549d43e7bf9283918a83a5f5071d Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 07:53:13 -0700 Subject: [power] Adapt swsusp to new PM core. Clean up heavily. - Split suspend/resume code into the four functions called from the PM core. - Remove now-duplicated code. - Make sure PM core frees memory and sync's disks before we shut down devices. - Remove software_suspend(), in favor of pm_suspend(). - Remove unused definitions from suspend.h --- include/linux/suspend.h | 24 +------ kernel/power/main.c | 28 ++++++++ kernel/power/power.h | 7 ++ kernel/power/swsusp.c | 171 ++++++------------------------------------------ kernel/sys.c | 7 +- 5 files changed, 59 insertions(+), 178 deletions(-) (limited to 'include') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 28788d8a65ff..da171766c8c8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -8,8 +8,7 @@ #include #include #include - -extern unsigned char software_suspend_enabled; +#include #ifdef CONFIG_SOFTWARE_SUSPEND /* page backup entry */ @@ -46,12 +45,6 @@ extern int shrink_mem(void); /* mm/page_alloc.c */ extern void drain_local_pages(void); -/* kernel/suspend.c */ -extern int software_suspend(void); - -extern int register_suspend_notifier(struct notifier_block *); -extern int unregister_suspend_notifier(struct notifier_block *); - extern unsigned int nr_copy_pages __nosavedata; extern suspend_pagedir_t *pagedir_nosave __nosavedata; @@ -72,31 +65,16 @@ static inline int software_suspend(void) { return -EPERM; } -#define register_suspend_notifier(a) do { } while(0) -#define unregister_suspend_notifier(a) do { } while(0) #endif /* CONFIG_SOFTWARE_SUSPEND */ #ifdef CONFIG_PM extern void refrigerator(unsigned long); -extern int freeze_processes(void); -extern void thaw_processes(void); - -extern int pm_prepare_console(void); -extern void pm_restore_console(void); #else static inline void refrigerator(unsigned long flag) { -} -static inline int freeze_processes(void) -{ - return 0; -} -static inline void thaw_processes(void) -{ - } #endif /* CONFIG_PM */ diff --git a/kernel/power/main.c b/kernel/power/main.c index 063e98c6bceb..42bf7233a3a7 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include "power.h" @@ -30,6 +32,8 @@ static int have_swsusp = 1; static int have_swsusp = 0; #endif +extern long sys_sync(void); + /** * pm_set_ops - Set the global power method table. @@ -128,6 +132,25 @@ static int power_down(u32 mode) static int in_suspend __nosavedata = 0; +/** + * free_some_memory - Try to free as much memory as possible + * + * ... but do not OOM-kill anyone + * + * Notice: all userland should be stopped at this point, or + * livelock is possible. + */ + +static void free_some_memory(void) +{ + printk("Freeing memory: "); + while (shrink_all_memory(10000)) + printk("."); + printk("|\n"); + blk_run_queues(); +} + + /** * pm_suspend_disk - The granpappy of power management. * @@ -197,6 +220,7 @@ static int suspend_prepare(u32 state) pm_prepare_console(); + sys_sync(); if (freeze_processes()) { error = -EAGAIN; goto Thaw; @@ -207,6 +231,10 @@ static int suspend_prepare(u32 state) goto Thaw; } + /* Free memory before shutting down devices. */ + if (state == PM_SUSPEND_DISK) + free_some_memory(); + if ((error = device_pm_suspend(state))) goto Finish; diff --git a/kernel/power/power.h b/kernel/power/power.h index ae7bcbc37845..e98de640155d 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -37,3 +37,10 @@ static inline int swsusp_free(void) return 0; } #endif + + +extern int freeze_processes(void); +extern void thaw_processes(void); + +extern int pm_prepare_console(void); +extern void pm_restore_console(void); diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index d1b2a46de5fa..5c70937437d4 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -65,8 +65,6 @@ #include "power.h" -extern long sys_sync(void); - unsigned char software_suspend_enabled = 1; #define __ADDRESS(x) ((unsigned long) phys_to_virt(x)) @@ -439,29 +437,6 @@ static suspend_pagedir_t *create_suspend_pagedir(int nr_copy_pages) return pagedir; } -static int prepare_suspend_processes(void) -{ - sys_sync(); /* Syncing needs pdflushd, so do it before stopping processes */ - if (freeze_processes()) { - printk( KERN_ERR "Suspend failed: Not all processes stopped!\n" ); - thaw_processes(); - return 1; - } - return 0; -} - -/* - * Try to free as much memory as possible, but do not OOM-kill anyone - * - * Notice: all userland should be stopped at this point, or livelock is possible. - */ -static void free_some_memory(void) -{ - printk("Freeing memory: "); - while (shrink_all_memory(10000)) - printk("."); - printk("|\n"); -} /* Make disk drivers accept operations, again */ static void drivers_unsuspend(void) @@ -470,28 +445,6 @@ static void drivers_unsuspend(void) device_resume(RESUME_ENABLE); } -/* Called from process context */ -static int drivers_suspend(void) -{ - if (device_suspend(4, SUSPEND_NOTIFY)) - return -EIO; - if (device_suspend(4, SUSPEND_SAVE_STATE)) { - device_resume(RESUME_RESTORE_STATE); - return -EIO; - } - if (!pm_suspend_state) { - if(pm_send_all(PM_SUSPEND,(void *)3)) { - printk(KERN_WARNING "Problem while sending suspend event\n"); - return -EIO; - } - pm_suspend_state=1; - } else - printk(KERN_WARNING "PM suspend state already raised\n"); - device_suspend(4, SUSPEND_DISABLE); - - return 0; -} - #define RESUME_PHASE1 1 /* Called from interrupts disabled */ #define RESUME_PHASE2 2 /* Called with interrupts enabled */ #define RESUME_ALL_PHASES (RESUME_PHASE1 | RESUME_PHASE2) @@ -694,72 +647,6 @@ void do_magic_suspend_2(void) mark_swapfiles(((swp_entry_t) {0}), MARK_SWAP_RESUME); } -static int do_software_suspend(void) -{ - arch_prepare_suspend(); - if (pm_prepare_console()) - printk( "%sCan't allocate a console... proceeding\n", name_suspend); - if (!prepare_suspend_processes()) { - - /* At this point, all user processes and "dangerous" - kernel threads are stopped. Free some memory, as we - need half of memory free. */ - - free_some_memory(); - - /* No need to invalidate any vfsmnt list -- - * they will be valid after resume, anyway. - */ - blk_run_queues(); - - /* Save state of all device drivers, and stop them. */ - if (drivers_suspend()==0) - /* If stopping device drivers worked, we proceed basically into - * suspend_save_image. - * - * do_magic(0) returns after system is resumed. - * - * do_magic() copies all "used" memory to "free" memory, then - * unsuspends all device drivers, and writes memory to disk - * using normal kernel mechanism. - */ - do_magic(0); - thaw_processes(); - } - software_suspend_enabled = 1; - MDELAY(1000); - pm_restore_console(); - return 0; -} - - -/** - * software_suspend - initiate suspend-to-swap transition. - * - * This is main interface to the outside world. It needs to be - * called from process context. - */ - -int software_suspend(void) -{ - if(!software_suspend_enabled) - return -EINVAL; - - if (num_online_cpus() > 1) { - printk(KERN_WARNING "swsusp does not support SMP.\n"); - return -EPERM; - } - -#if defined (CONFIG_HIGHMEM) || defined (COFNIG_DISCONTIGMEM) - printk("swsusp is not supported with high- or discontig-mem.\n"); - return -EPERM; -#endif - - software_suspend_enabled = 0; - might_sleep(); - return do_software_suspend(); -} - /* More restore stuff */ /* FIXME: Why not memcpy(to, from, 1<version_code != LINUX_VERSION_CODE) return sanity_check_failed("Incorrect kernel version"); @@ -776,7 +701,8 @@ static int sanity_check(struct suspend_header *sh) return 0; } -static int bdev_read_page(struct block_device *bdev, long pos, void *buf) +static int __init bdev_read_page(struct block_device *bdev, + long pos, void *buf) { struct buffer_head *bh; BUG_ON (pos%PAGE_SIZE); @@ -792,7 +718,8 @@ static int bdev_read_page(struct block_device *bdev, long pos, void *buf) extern dev_t __init name_to_dev_t(const char *line); -static int __read_suspend_image(struct block_device *bdev, union diskpage *cur) +static int __init read_suspend_image(struct block_device *bdev, + union diskpage *cur) { swp_entry_t next; int i, nr_pgdir_pages; @@ -869,54 +796,6 @@ static int __read_suspend_image(struct block_device *bdev, union diskpage *cur) return 0; } -static int read_suspend_image(const char * specialfile) -{ - union diskpage *cur; - unsigned long scratch_page = 0; - int error; - char b[BDEVNAME_SIZE]; - - resume_device = name_to_dev_t(specialfile); - scratch_page = get_zeroed_page(GFP_ATOMIC); - cur = (void *) scratch_page; - if (cur) { - struct block_device *bdev; - printk("Resuming from device %s\n", - __bdevname(resume_device, b)); - bdev = open_by_devnum(resume_device, FMODE_READ, BDEV_RAW); - if (IS_ERR(bdev)) { - error = PTR_ERR(bdev); - } else { - set_blocksize(bdev, PAGE_SIZE); - error = __read_suspend_image(bdev, cur); - blkdev_put(bdev, BDEV_RAW); - } - } else error = -ENOMEM; - - if (scratch_page) - free_page(scratch_page); - switch (error) { - case 0: - PRINTK("Reading resume file was successful\n"); - break; - case -EINVAL: - break; - case -EIO: - printk( "%sI/O error\n", name_resume); - break; - case -ENOENT: - printk( "%s%s: No such file or directory\n", name_resume, specialfile); - break; - case -ENOMEM: - printk( "%sNot enough memory\n", name_resume); - break; - default: - printk( "%sError %d resuming\n", name_resume, error ); - } - MDELAY(1000); - return error; -} - /** * swsusp_save - Snapshot memory */ @@ -944,9 +823,7 @@ int swsusp_save(void) int swsusp_write(void) { arch_prepare_suspend(); - do_magic(0); - MDELAY(1000); - return 0; + return do_magic(0); } @@ -954,13 +831,39 @@ int swsusp_write(void) * swsusp_read - Read saved image from swap. */ -int swsusp_read(void) +int __init swsusp_read(void) { + union diskpage *cur; + int error; + char b[BDEVNAME_SIZE]; + if (!strlen(resume_file)) return -ENOENT; - printk("swsusp: %s\n", name_resume ); + + resume_device = name_to_dev_t(resume_file); + printk("swsusp: Resume From Partition: %s, Device: %s\n", + resume_file, __bdevname(resume_device, b)); + + cur = (union diskpage *)get_zeroed_page(GFP_ATOMIC); + if (cur) { + struct block_device *bdev; + bdev = open_by_devnum(resume_device, FMODE_READ, BDEV_RAW); + if (!IS_ERR(bdev)) { + set_blocksize(bdev, PAGE_SIZE); + error = read_suspend_image(bdev, cur); + blkdev_put(bdev, BDEV_RAW); + } else + error = PTR_ERR(bdev); + free_page((unsigned long)cur); + } else + error = -ENOMEM; + + if (!error) + PRINTK("Reading resume file was successful\n"); + else + printk( "%sError %d resuming\n", name_resume, error ); MDELAY(1000); - return read_suspend_image(resume_file); + return error; } @@ -968,10 +871,9 @@ int swsusp_read(void) * swsusp_restore - Replace running kernel with saved image. */ -int swsusp_restore(void) +int __init swsusp_restore(void) { - do_magic(1); - return 0; + return do_magic(1); } @@ -981,6 +883,12 @@ int swsusp_restore(void) int swsusp_free(void) { + PRINTK( "Freeing prev allocated pagedir\n" ); + free_suspend_pagedir((unsigned long) pagedir_save); + + PRINTK( "Fixing swap signatures... " ); + mark_swapfiles(((swp_entry_t) {0}), MARK_SWAP_RESUME); + PRINTK( "ok\n" ); return 0; } -- cgit v1.2.3 From 3cf71b171525b99fab321b73ee7d54e436ef95a9 Mon Sep 17 00:00:00 2001 From: David Mosberger Date: Tue, 19 Aug 2003 09:14:00 -0700 Subject: ia64: Fixes for the inline-asm cleanup patch so the tree builds and works again on the simulator (besides the real hw, of course). Also, clean up simulator bootloader code so it's all in a single place (arch/ia64/hp/sim/bootloader/). --- arch/ia64/Makefile | 2 +- arch/ia64/boot/Makefile | 37 -- arch/ia64/boot/bootloader.c | 214 ---------- arch/ia64/boot/bootloader.lds | 65 --- arch/ia64/hp/sim/boot/Makefile | 37 ++ arch/ia64/hp/sim/boot/boot_head.S | 136 ++++++ arch/ia64/hp/sim/boot/bootloader.c | 214 ++++++++++ arch/ia64/hp/sim/boot/bootloader.lds | 65 +++ arch/ia64/hp/sim/boot/fw-emu.c | 398 ++++++++++++++++++ arch/ia64/hp/sim/boot/ssc.h | 35 ++ arch/ia64/hp/sim/hpsim.S | 1 - arch/ia64/ia32/ia32_signal.c | 8 +- arch/ia64/ia32/ia32_support.c | 2 +- arch/ia64/ia32/ia32_traps.c | 2 +- arch/ia64/ia32/ia32priv.h | 13 +- arch/ia64/ia32/sys_ia32.c | 4 +- arch/ia64/kernel/fw-emu.c | 416 ------------------- arch/ia64/kernel/irq_ia64.c | 2 +- arch/ia64/kernel/mca.c | 8 +- arch/ia64/kernel/perfmon.c | 25 +- arch/ia64/kernel/setup.c | 4 +- arch/ia64/kernel/signal.c | 11 +- arch/ia64/kernel/traps.c | 4 +- arch/ia64/kernel/unaligned.c | 6 +- include/asm-ia64/bitops.h | 2 +- include/asm-ia64/current.h | 9 +- include/asm-ia64/delay.h | 6 +- include/asm-ia64/gcc_intrin.h | 779 ++++++++++++++++------------------- include/asm-ia64/ia64regs.h | 26 +- include/asm-ia64/intrinsics.h | 6 +- include/asm-ia64/io.h | 2 +- include/asm-ia64/page.h | 2 +- include/asm-ia64/processor.h | 31 +- include/asm-ia64/rwsem.h | 9 +- include/asm-ia64/siginfo.h | 1 - include/asm-ia64/smp.h | 2 +- include/asm-ia64/spinlock.h | 6 +- include/asm-ia64/system.h | 28 +- include/asm-ia64/timex.h | 2 +- include/asm-ia64/tlbflush.h | 2 +- include/asm-ia64/unistd.h | 6 +- 41 files changed, 1366 insertions(+), 1262 deletions(-) delete mode 100644 arch/ia64/boot/Makefile delete mode 100644 arch/ia64/boot/bootloader.c delete mode 100644 arch/ia64/boot/bootloader.lds create mode 100644 arch/ia64/hp/sim/boot/Makefile create mode 100644 arch/ia64/hp/sim/boot/boot_head.S create mode 100644 arch/ia64/hp/sim/boot/bootloader.c create mode 100644 arch/ia64/hp/sim/boot/bootloader.lds create mode 100644 arch/ia64/hp/sim/boot/fw-emu.c create mode 100644 arch/ia64/hp/sim/boot/ssc.h delete mode 100644 arch/ia64/kernel/fw-emu.c (limited to 'include') diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 2a1b67297597..286291e13860 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -66,7 +66,7 @@ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ -boot := arch/ia64/boot +boot := arch/ia64/hp/sim/boot .PHONY: boot compressed check diff --git a/arch/ia64/boot/Makefile b/arch/ia64/boot/Makefile deleted file mode 100644 index 65faf7474797..000000000000 --- a/arch/ia64/boot/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# ia64/boot/Makefile -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# -# Copyright (C) 1998 by David Mosberger-Tang -# - -targets-$(CONFIG_IA64_HP_SIM) += bootloader -targets := vmlinux.bin vmlinux.gz $(targets-y) - -quiet_cmd_cptotop = LN $@ - cmd_cptotop = ln -f $< $@ - -vmlinux.gz: $(obj)/vmlinux.gz $(addprefix $(obj)/,$(targets-y)) - $(call cmd,cptotop) - @echo ' Kernel: $@ is ready' - -boot: bootloader - -bootloader: $(obj)/bootloader - $(call cmd,cptotop) - -$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE - $(call if_changed,gzip) - -$(obj)/vmlinux.bin: vmlinux FORCE - $(call if_changed,objcopy) - - -LDFLAGS_bootloader = -static -T - -$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/fw-emu.o \ - lib/lib.a arch/ia64/lib/lib.a FORCE - $(call if_changed,ld) diff --git a/arch/ia64/boot/bootloader.c b/arch/ia64/boot/bootloader.c deleted file mode 100644 index 3be97b8a9afb..000000000000 --- a/arch/ia64/boot/bootloader.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * arch/ia64/boot/bootloader.c - * - * Loads an ELF kernel. - * - * Copyright (C) 1998-2002 Hewlett-Packard Co - * David Mosberger-Tang - * Stephane Eranian - * - * 01/07/99 S.Eranian modified to pass command line arguments to kernel - */ -struct task_struct; /* forward declaration for elf.h */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* Simulator system calls: */ - -#define SSC_CONSOLE_INIT 20 -#define SSC_GETCHAR 21 -#define SSC_PUTCHAR 31 -#define SSC_OPEN 50 -#define SSC_CLOSE 51 -#define SSC_READ 52 -#define SSC_WRITE 53 -#define SSC_GET_COMPLETION 54 -#define SSC_WAIT_COMPLETION 55 -#define SSC_CONNECT_INTERRUPT 58 -#define SSC_GENERATE_INTERRUPT 59 -#define SSC_SET_PERIODIC_INTERRUPT 60 -#define SSC_GET_RTC 65 -#define SSC_EXIT 66 -#define SSC_LOAD_SYMBOLS 69 -#define SSC_GET_TOD 74 - -#define SSC_GET_ARGS 75 - -struct disk_req { - unsigned long addr; - unsigned len; -}; - -struct disk_stat { - int fd; - unsigned count; -}; - -#include "../kernel/fw-emu.c" -extern void jmp_to_kernel(ulong sp, ulong bp, ulong e_entry); -extern void __bsw1(void); - - -/* - * Set a break point on this function so that symbols are available to set breakpoints in - * the kernel being debugged. - */ -static void -debug_break (void) -{ -} - -static void -cons_write (const char *buf) -{ - unsigned long ch; - - while ((ch = *buf++) != '\0') { - ssc(ch, 0, 0, 0, SSC_PUTCHAR); - if (ch == '\n') - ssc('\r', 0, 0, 0, SSC_PUTCHAR); - } -} - -#define MAX_ARGS 32 - -void -_start (void) -{ - static char stack[16384] __attribute__ ((aligned (16))); - static char mem[4096]; - static char buffer[1024]; - unsigned long off; - int fd, i; - struct disk_req req; - struct disk_stat stat; - struct elfhdr *elf; - struct elf_phdr *elf_phdr; /* program header */ - unsigned long e_entry, e_phoff, e_phnum; - register struct ia64_boot_param *bp; - char *kpath, *args; - long arglen = 0; - - extern __u64 __gp; - register unsigned long tmp = (unsigned long) &stack[0]; - - ia64_setreg(_IA64_REG_GP, __gp); - ia64_setreg(_IA64_REG_SP, tmp); - __bsw1(); - - ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); - - /* - * S.Eranian: extract the commandline argument from the simulator - * - * The expected format is as follows: - * - * kernelname args... - * - * Both are optional but you can't have the second one without the first. - */ - arglen = ssc((long) buffer, 0, 0, 0, SSC_GET_ARGS); - - kpath = "vmlinux"; - args = buffer; - if (arglen > 0) { - kpath = buffer; - while (*args != ' ' && *args != '\0') - ++args, --arglen; - if (*args == ' ') - *args++ = '\0', --arglen; - } - - if (arglen <= 0) { - args = ""; - arglen = 1; - } - - fd = ssc((long) kpath, 1, 0, 0, SSC_OPEN); - - if (fd < 0) { - cons_write(kpath); - cons_write(": file not found, reboot now\n"); - for(;;); - } - stat.fd = fd; - off = 0; - - req.len = sizeof(mem); - req.addr = (long) mem; - ssc(fd, 1, (long) &req, off, SSC_READ); - ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); - - elf = (struct elfhdr *) mem; - if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) != 0) { - cons_write("not an ELF file\n"); - return; - } - if (elf->e_type != ET_EXEC) { - cons_write("not an ELF executable\n"); - return; - } - if (!elf_check_arch(elf)) { - cons_write("kernel not for this processor\n"); - return; - } - - e_entry = elf->e_entry; - e_phnum = elf->e_phnum; - e_phoff = elf->e_phoff; - - cons_write("loading "); - cons_write(kpath); - cons_write("...\n"); - - for (i = 0; i < e_phnum; ++i) { - req.len = sizeof(*elf_phdr); - req.addr = (long) mem; - ssc(fd, 1, (long) &req, e_phoff, SSC_READ); - ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); - if (stat.count != sizeof(*elf_phdr)) { - cons_write("failed to read phdr\n"); - return; - } - e_phoff += sizeof(*elf_phdr); - - elf_phdr = (struct elf_phdr *) mem; - - if (elf_phdr->p_type != PT_LOAD) - continue; - - req.len = elf_phdr->p_filesz; - req.addr = __pa(elf_phdr->p_paddr); - ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ); - ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); - memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0, - elf_phdr->p_memsz - elf_phdr->p_filesz); - } - ssc(fd, 0, 0, 0, SSC_CLOSE); - - cons_write("starting kernel...\n"); - - /* fake an I/O base address: */ - ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL); - - bp = sys_fw_init(args, arglen); - - ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); - - debug_break(); - tmp = __pa(&stack); - jmp_to_kernel(tmp, (unsigned long) bp, e_entry); - - cons_write("kernel returned!\n"); - ssc(-1, 0, 0, 0, SSC_EXIT); -} diff --git a/arch/ia64/boot/bootloader.lds b/arch/ia64/boot/bootloader.lds deleted file mode 100644 index 69ae58531033..000000000000 --- a/arch/ia64/boot/bootloader.lds +++ /dev/null @@ -1,65 +0,0 @@ -OUTPUT_FORMAT("elf64-ia64-little") -OUTPUT_ARCH(ia64) -ENTRY(_start) -SECTIONS -{ - /* Read-only sections, merged into text segment: */ - . = 0x100000; - - _text = .; - .text : { *(__ivt_section) *(.text) } - _etext = .; - - /* Global data */ - _data = .; - .rodata : { *(.rodata) *(.rodata.*) } - .data : { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } - __gp = ALIGN (8) + 0x200000; - .got : { *(.got.plt) *(.got) } - /* We want the small data sections together, so single-instruction offsets - can access them all, and initialized data all before uninitialized, so - we can shorten the on-disk segment size. */ - .sdata : { *(.sdata) } - _edata = .; - - _bss = .; - .sbss : { *(.sbss) *(.scommon) } - .bss : { *(.bss) *(COMMON) } - . = ALIGN(64 / 8); - _end = . ; - - /* Stabs debugging sections. */ - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } - .comment 0 : { *(.comment) } - /* DWARF debug sections. - Symbols in the DWARF debugging sections are relative to the beginning - of the section so we begin them at 0. */ - /* DWARF 1 */ - .debug 0 : { *(.debug) } - .line 0 : { *(.line) } - /* GNU DWARF 1 extensions */ - .debug_srcinfo 0 : { *(.debug_srcinfo) } - .debug_sfnames 0 : { *(.debug_sfnames) } - /* DWARF 1.1 and DWARF 2 */ - .debug_aranges 0 : { *(.debug_aranges) } - .debug_pubnames 0 : { *(.debug_pubnames) } - /* DWARF 2 */ - .debug_info 0 : { *(.debug_info) } - .debug_abbrev 0 : { *(.debug_abbrev) } - .debug_line 0 : { *(.debug_line) } - .debug_frame 0 : { *(.debug_frame) } - .debug_str 0 : { *(.debug_str) } - .debug_loc 0 : { *(.debug_loc) } - .debug_macinfo 0 : { *(.debug_macinfo) } - /* SGI/MIPS DWARF 2 extensions */ - .debug_weaknames 0 : { *(.debug_weaknames) } - .debug_funcnames 0 : { *(.debug_funcnames) } - .debug_typenames 0 : { *(.debug_typenames) } - .debug_varnames 0 : { *(.debug_varnames) } - /* These must appear regardless of . */ -} diff --git a/arch/ia64/hp/sim/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile new file mode 100644 index 000000000000..65faf7474797 --- /dev/null +++ b/arch/ia64/hp/sim/boot/Makefile @@ -0,0 +1,37 @@ +# +# ia64/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1998 by David Mosberger-Tang +# + +targets-$(CONFIG_IA64_HP_SIM) += bootloader +targets := vmlinux.bin vmlinux.gz $(targets-y) + +quiet_cmd_cptotop = LN $@ + cmd_cptotop = ln -f $< $@ + +vmlinux.gz: $(obj)/vmlinux.gz $(addprefix $(obj)/,$(targets-y)) + $(call cmd,cptotop) + @echo ' Kernel: $@ is ready' + +boot: bootloader + +bootloader: $(obj)/bootloader + $(call cmd,cptotop) + +$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + + +LDFLAGS_bootloader = -static -T + +$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/fw-emu.o \ + lib/lib.a arch/ia64/lib/lib.a FORCE + $(call if_changed,ld) diff --git a/arch/ia64/hp/sim/boot/boot_head.S b/arch/ia64/hp/sim/boot/boot_head.S new file mode 100644 index 000000000000..92c20ce3c404 --- /dev/null +++ b/arch/ia64/hp/sim/boot/boot_head.S @@ -0,0 +1,136 @@ +/* + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang + */ + +#include + + .bss + .align 16 +stack_mem: + .skip 16834 + + .text + +/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */ +GLOBAL_ENTRY(printk) + break 0 +END(printk) + +GLOBAL_ENTRY(_start) + .prologue + .save rp, r0 + .body + movl gp = __gp + movl sp = stack_mem + bsw.1 + br.call.sptk.many rp=start_bootloader +END(_start) + +GLOBAL_ENTRY(ssc) + .regstk 5,0,0,0 + mov r15=in4 + break 0x80001 + br.ret.sptk.many b0 +END(ssc) + +GLOBAL_ENTRY(jmp_to_kernel) + .regstk 2,0,0,0 + mov r28=in0 + mov b7=in1 + br.sptk.few b7 +END(jmp_to_kernel) + + +GLOBAL_ENTRY(pal_emulator_static) + mov r8=-1 + mov r9=256 + ;; + cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */ +(p6) br.cond.sptk.few static + ;; + mov r9=512 + ;; + cmp.gtu p6,p7=r9,r28 +(p6) br.cond.sptk.few stacked + ;; +static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ +(p7) br.cond.sptk.few 1f + ;; + mov r8=0 /* status = 0 */ + movl r9=0x100000000 /* tc.base */ + movl r10=0x0000000200000003 /* count[0], count[1] */ + movl r11=0x1000000000002000 /* stride[0], stride[1] */ + br.cond.sptk.few rp +1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + movl r9 =0x100000064 /* proc_ratio (1/100) */ + movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ + movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ + ;; +1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + mov r9=96 /* num phys stacked */ + mov r10=0 /* hints */ + mov r11=0 + br.cond.sptk.few rp +1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ +(p7) br.cond.sptk.few 1f + mov r9=ar.lc + movl r8=524288 /* flush 512k million cache lines (16MB) */ + ;; + mov ar.lc=r8 + movl r8=0xe000000000000000 + ;; +.loop: fc r8 + add r8=32,r8 + br.cloop.sptk.few .loop + sync.i + ;; + srlz.i + ;; + mov ar.lc=r9 + mov r8=r0 + ;; +1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ +(p7) br.cond.sptk.few 1f + mov r8=0 /* status = 0 */ + movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */ + mov r10=0 /* reserved */ + mov r11=0 /* reserved */ + mov r16=0xffff /* implemented PMC */ + mov r17=0xffff /* implemented PMD */ + add r18=8,r29 /* second index */ + ;; + st8 [r29]=r16,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r17,16 /* store implemented PMD */ + st8 [r18]=r0,16 /* clear remaining bits */ + mov r16=0xf0 /* cycles count capable PMC */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + mov r17=0x10 /* retired bundles capable PMC */ + ;; + st8 [r29]=r16,16 /* store cycles capable */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r17,16 /* store retired bundle capable */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; + st8 [r29]=r0,16 /* store implemented PMC */ + st8 [r18]=r0,16 /* clear remaining bits */ + ;; +1: br.cond.sptk.few rp +stacked: + br.ret.sptk.few rp +END(pal_emulator_static) diff --git a/arch/ia64/hp/sim/boot/bootloader.c b/arch/ia64/hp/sim/boot/bootloader.c new file mode 100644 index 000000000000..3be97b8a9afb --- /dev/null +++ b/arch/ia64/hp/sim/boot/bootloader.c @@ -0,0 +1,214 @@ +/* + * arch/ia64/boot/bootloader.c + * + * Loads an ELF kernel. + * + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang + * Stephane Eranian + * + * 01/07/99 S.Eranian modified to pass command line arguments to kernel + */ +struct task_struct; /* forward declaration for elf.h */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Simulator system calls: */ + +#define SSC_CONSOLE_INIT 20 +#define SSC_GETCHAR 21 +#define SSC_PUTCHAR 31 +#define SSC_OPEN 50 +#define SSC_CLOSE 51 +#define SSC_READ 52 +#define SSC_WRITE 53 +#define SSC_GET_COMPLETION 54 +#define SSC_WAIT_COMPLETION 55 +#define SSC_CONNECT_INTERRUPT 58 +#define SSC_GENERATE_INTERRUPT 59 +#define SSC_SET_PERIODIC_INTERRUPT 60 +#define SSC_GET_RTC 65 +#define SSC_EXIT 66 +#define SSC_LOAD_SYMBOLS 69 +#define SSC_GET_TOD 74 + +#define SSC_GET_ARGS 75 + +struct disk_req { + unsigned long addr; + unsigned len; +}; + +struct disk_stat { + int fd; + unsigned count; +}; + +#include "../kernel/fw-emu.c" +extern void jmp_to_kernel(ulong sp, ulong bp, ulong e_entry); +extern void __bsw1(void); + + +/* + * Set a break point on this function so that symbols are available to set breakpoints in + * the kernel being debugged. + */ +static void +debug_break (void) +{ +} + +static void +cons_write (const char *buf) +{ + unsigned long ch; + + while ((ch = *buf++) != '\0') { + ssc(ch, 0, 0, 0, SSC_PUTCHAR); + if (ch == '\n') + ssc('\r', 0, 0, 0, SSC_PUTCHAR); + } +} + +#define MAX_ARGS 32 + +void +_start (void) +{ + static char stack[16384] __attribute__ ((aligned (16))); + static char mem[4096]; + static char buffer[1024]; + unsigned long off; + int fd, i; + struct disk_req req; + struct disk_stat stat; + struct elfhdr *elf; + struct elf_phdr *elf_phdr; /* program header */ + unsigned long e_entry, e_phoff, e_phnum; + register struct ia64_boot_param *bp; + char *kpath, *args; + long arglen = 0; + + extern __u64 __gp; + register unsigned long tmp = (unsigned long) &stack[0]; + + ia64_setreg(_IA64_REG_GP, __gp); + ia64_setreg(_IA64_REG_SP, tmp); + __bsw1(); + + ssc(0, 0, 0, 0, SSC_CONSOLE_INIT); + + /* + * S.Eranian: extract the commandline argument from the simulator + * + * The expected format is as follows: + * + * kernelname args... + * + * Both are optional but you can't have the second one without the first. + */ + arglen = ssc((long) buffer, 0, 0, 0, SSC_GET_ARGS); + + kpath = "vmlinux"; + args = buffer; + if (arglen > 0) { + kpath = buffer; + while (*args != ' ' && *args != '\0') + ++args, --arglen; + if (*args == ' ') + *args++ = '\0', --arglen; + } + + if (arglen <= 0) { + args = ""; + arglen = 1; + } + + fd = ssc((long) kpath, 1, 0, 0, SSC_OPEN); + + if (fd < 0) { + cons_write(kpath); + cons_write(": file not found, reboot now\n"); + for(;;); + } + stat.fd = fd; + off = 0; + + req.len = sizeof(mem); + req.addr = (long) mem; + ssc(fd, 1, (long) &req, off, SSC_READ); + ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); + + elf = (struct elfhdr *) mem; + if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) != 0) { + cons_write("not an ELF file\n"); + return; + } + if (elf->e_type != ET_EXEC) { + cons_write("not an ELF executable\n"); + return; + } + if (!elf_check_arch(elf)) { + cons_write("kernel not for this processor\n"); + return; + } + + e_entry = elf->e_entry; + e_phnum = elf->e_phnum; + e_phoff = elf->e_phoff; + + cons_write("loading "); + cons_write(kpath); + cons_write("...\n"); + + for (i = 0; i < e_phnum; ++i) { + req.len = sizeof(*elf_phdr); + req.addr = (long) mem; + ssc(fd, 1, (long) &req, e_phoff, SSC_READ); + ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); + if (stat.count != sizeof(*elf_phdr)) { + cons_write("failed to read phdr\n"); + return; + } + e_phoff += sizeof(*elf_phdr); + + elf_phdr = (struct elf_phdr *) mem; + + if (elf_phdr->p_type != PT_LOAD) + continue; + + req.len = elf_phdr->p_filesz; + req.addr = __pa(elf_phdr->p_paddr); + ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ); + ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION); + memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0, + elf_phdr->p_memsz - elf_phdr->p_filesz); + } + ssc(fd, 0, 0, 0, SSC_CLOSE); + + cons_write("starting kernel...\n"); + + /* fake an I/O base address: */ + ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL); + + bp = sys_fw_init(args, arglen); + + ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS); + + debug_break(); + tmp = __pa(&stack); + jmp_to_kernel(tmp, (unsigned long) bp, e_entry); + + cons_write("kernel returned!\n"); + ssc(-1, 0, 0, 0, SSC_EXIT); +} diff --git a/arch/ia64/hp/sim/boot/bootloader.lds b/arch/ia64/hp/sim/boot/bootloader.lds new file mode 100644 index 000000000000..69ae58531033 --- /dev/null +++ b/arch/ia64/hp/sim/boot/bootloader.lds @@ -0,0 +1,65 @@ +OUTPUT_FORMAT("elf64-ia64-little") +OUTPUT_ARCH(ia64) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0x100000; + + _text = .; + .text : { *(__ivt_section) *(.text) } + _etext = .; + + /* Global data */ + _data = .; + .rodata : { *(.rodata) *(.rodata.*) } + .data : { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } + __gp = ALIGN (8) + 0x200000; + .got : { *(.got.plt) *(.got) } + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + .sdata : { *(.sdata) } + _edata = .; + + _bss = .; + .sbss : { *(.sbss) *(.scommon) } + .bss : { *(.bss) *(COMMON) } + . = ALIGN(64 / 8); + _end = . ; + + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* These must appear regardless of . */ +} diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c new file mode 100644 index 000000000000..95d71afbd279 --- /dev/null +++ b/arch/ia64/hp/sim/boot/fw-emu.c @@ -0,0 +1,398 @@ +/* + * PAL & SAL emulation. + * + * Copyright (C) 1998-2001 Hewlett-Packard Co + * David Mosberger-Tang + */ +#include + +#ifdef CONFIG_PCI +# include +#endif + +#include +#include +#include +#include + +#include "ssc.h" + +#define MB (1024*1024UL) + +#define SIMPLE_MEMMAP 1 + +#if SIMPLE_MEMMAP +# define NUM_MEM_DESCS 4 +#else +# define NUM_MEM_DESCS 16 +#endif + +static char fw_mem[( sizeof(struct ia64_boot_param) + + sizeof(efi_system_table_t) + + sizeof(efi_runtime_services_t) + + 1*sizeof(efi_config_table_t) + + sizeof(struct ia64_sal_systab) + + sizeof(struct ia64_sal_desc_entry_point) + + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) + + 1024)] __attribute__ ((aligned (8))); + +#define SECS_PER_HOUR (60 * 60) +#define SECS_PER_DAY (SECS_PER_HOUR * 24) + +/* Compute the `struct tm' representation of *T, + offset OFFSET seconds east of UTC, + and store year, yday, mon, mday, wday, hour, min, sec into *TP. + Return nonzero if successful. */ +int +offtime (unsigned long t, efi_time_t *tp) +{ + const unsigned short int __mon_yday[2][13] = + { + /* Normal years. */ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + /* Leap years. */ + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } + }; + long int days, rem, y; + const unsigned short int *ip; + + days = t / SECS_PER_DAY; + rem = t % SECS_PER_DAY; + while (rem < 0) { + rem += SECS_PER_DAY; + --days; + } + while (rem >= SECS_PER_DAY) { + rem -= SECS_PER_DAY; + ++days; + } + tp->hour = rem / SECS_PER_HOUR; + rem %= SECS_PER_HOUR; + tp->minute = rem / 60; + tp->second = rem % 60; + /* January 1, 1970 was a Thursday. */ + y = 1970; + +# define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) +# define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) +# define __isleap(year) \ + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) + + while (days < 0 || days >= (__isleap (y) ? 366 : 365)) { + /* Guess a corrected year, assuming 365 days per year. */ + long int yg = y + days / 365 - (days % 365 < 0); + + /* Adjust DAYS and Y to match the guessed year. */ + days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1) + - LEAPS_THRU_END_OF (y - 1)); + y = yg; + } + tp->year = y; + ip = __mon_yday[__isleap(y)]; + for (y = 11; days < (long int) ip[y]; --y) + continue; + days -= ip[y]; + tp->month = y + 1; + tp->day = days + 1; + return 1; +} + +extern void pal_emulator_static (void); + +/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ + +#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) + +#define REG_OFFSET(addr) (0x00000000000000FF & (addr)) +#define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr)) +#define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr)) + +static efi_status_t +efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) +{ +#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) + struct { + int tv_sec; /* must be 32bits to work */ + int tv_usec; + } tv32bits; + + ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD); + + memset(tm, 0, sizeof(*tm)); + offtime(tv32bits.tv_sec, tm); + + if (tc) + memset(tc, 0, sizeof(*tc)); +#else +# error Not implemented yet... +#endif + return EFI_SUCCESS; +} + +static void +efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) +{ +#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) + ssc(status, 0, 0, 0, SSC_EXIT); +#else +# error Not implemented yet... +#endif +} + +static efi_status_t +efi_unimplemented (void) +{ + return EFI_UNSUPPORTED; +} + +static struct sal_ret_values +sal_emulator (long index, unsigned long in1, unsigned long in2, + unsigned long in3, unsigned long in4, unsigned long in5, + unsigned long in6, unsigned long in7) +{ + long r9 = 0; + long r10 = 0; + long r11 = 0; + long status; + + /* + * Don't do a "switch" here since that gives us code that + * isn't self-relocatable. + */ + status = 0; + if (index == SAL_FREQ_BASE) { + switch (in1) { + case SAL_FREQ_BASE_PLATFORM: + r9 = 200000000; + break; + + case SAL_FREQ_BASE_INTERVAL_TIMER: + /* + * Is this supposed to be the cr.itc frequency + * or something platform specific? The SAL + * doc ain't exactly clear on this... + */ + r9 = 700000000; + break; + + case SAL_FREQ_BASE_REALTIME_CLOCK: + r9 = 1; + break; + + default: + status = -1; + break; + } + } else if (index == SAL_SET_VECTORS) { + ; + } else if (index == SAL_GET_STATE_INFO) { + ; + } else if (index == SAL_GET_STATE_INFO_SIZE) { + ; + } else if (index == SAL_CLEAR_STATE_INFO) { + ; + } else if (index == SAL_MC_RENDEZ) { + ; + } else if (index == SAL_MC_SET_PARAMS) { + ; + } else if (index == SAL_CACHE_FLUSH) { + ; + } else if (index == SAL_CACHE_INIT) { + ; +#ifdef CONFIG_PCI + } else if (index == SAL_PCI_CONFIG_READ) { + /* + * in1 contains the PCI configuration address and in2 + * the size of the read. The value that is read is + * returned via the general register r9. + */ + outl(BUILD_CMD(in1), 0xCF8); + if (in2 == 1) /* Reading byte */ + r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3))); + else if (in2 == 2) /* Reading word */ + r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2))); + else /* Reading dword */ + r9 = inl(0xCFC); + status = PCIBIOS_SUCCESSFUL; + } else if (index == SAL_PCI_CONFIG_WRITE) { + /* + * in1 contains the PCI configuration address, in2 the + * size of the write, and in3 the actual value to be + * written out. + */ + outl(BUILD_CMD(in1), 0xCF8); + if (in2 == 1) /* Writing byte */ + outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3))); + else if (in2 == 2) /* Writing word */ + outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2))); + else /* Writing dword */ + outl(in3, 0xCFC); + status = PCIBIOS_SUCCESSFUL; +#endif /* CONFIG_PCI */ + } else if (index == SAL_UPDATE_PAL) { + ; + } else { + status = -1; + } + return ((struct sal_ret_values) {status, r9, r10, r11}); +} + + +/* + * This is here to work around a bug in egcs-1.1.1b that causes the + * compiler to crash (seems like a bug in the new alias analysis code. + */ +void * +id (long addr) +{ + return (void *) addr; +} + +struct ia64_boot_param * +sys_fw_init (const char *args, int arglen) +{ + efi_system_table_t *efi_systab; + efi_runtime_services_t *efi_runtime; + efi_config_table_t *efi_tables; + struct ia64_sal_systab *sal_systab; + efi_memory_desc_t *efi_memmap, *md; + unsigned long *pal_desc, *sal_desc; + struct ia64_sal_desc_entry_point *sal_ed; + struct ia64_boot_param *bp; + unsigned char checksum = 0; + char *cp, *cmd_line; + int i = 0; +# define MAKE_MD(typ, attr, start, end) \ + do { \ + md = efi_memmap + i++; \ + md->type = typ; \ + md->pad = 0; \ + md->phys_addr = start; \ + md->virt_addr = 0; \ + md->num_pages = (end - start) >> 12; \ + md->attribute = attr; \ + } while (0) + + memset(fw_mem, 0, sizeof(fw_mem)); + + pal_desc = (unsigned long *) &pal_emulator_static; + sal_desc = (unsigned long *) &sal_emulator; + + cp = fw_mem; + efi_systab = (void *) cp; cp += sizeof(*efi_systab); + efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); + efi_tables = (void *) cp; cp += sizeof(*efi_tables); + sal_systab = (void *) cp; cp += sizeof(*sal_systab); + sal_ed = (void *) cp; cp += sizeof(*sal_ed); + efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); + bp = (void *) cp; cp += sizeof(*bp); + cmd_line = (void *) cp; + + if (args) { + if (arglen >= 1024) + arglen = 1023; + memcpy(cmd_line, args, arglen); + } else { + arglen = 0; + } + cmd_line[arglen] = '\0'; + + memset(efi_systab, 0, sizeof(efi_systab)); + efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; + efi_systab->hdr.revision = EFI_SYSTEM_TABLE_REVISION; + efi_systab->hdr.headersize = sizeof(efi_systab->hdr); + efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); + efi_systab->fw_revision = 1; + efi_systab->runtime = (void *) __pa(efi_runtime); + efi_systab->nr_tables = 1; + efi_systab->tables = __pa(efi_tables); + + efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; + efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; + efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); + efi_runtime->get_time = __pa(&efi_get_time); + efi_runtime->set_time = __pa(&efi_unimplemented); + efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); + efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); + efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); + efi_runtime->get_variable = __pa(&efi_unimplemented); + efi_runtime->get_next_variable = __pa(&efi_unimplemented); + efi_runtime->set_variable = __pa(&efi_unimplemented); + efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); + efi_runtime->reset_system = __pa(&efi_reset_system); + + efi_tables->guid = SAL_SYSTEM_TABLE_GUID; + efi_tables->table = __pa(sal_systab); + + /* fill in the SAL system table: */ + memcpy(sal_systab->signature, "SST_", 4); + sal_systab->size = sizeof(*sal_systab); + sal_systab->sal_rev_minor = 1; + sal_systab->sal_rev_major = 0; + sal_systab->entry_count = 1; + +#ifdef CONFIG_IA64_GENERIC + strcpy(sal_systab->oem_id, "Generic"); + strcpy(sal_systab->product_id, "IA-64 system"); +#endif + +#ifdef CONFIG_IA64_HP_SIM + strcpy(sal_systab->oem_id, "Hewlett-Packard"); + strcpy(sal_systab->product_id, "HP-simulator"); +#endif + +#ifdef CONFIG_IA64_SDV + strcpy(sal_systab->oem_id, "Intel"); + strcpy(sal_systab->product_id, "SDV"); +#endif + + /* fill in an entry point: */ + sal_ed->type = SAL_DESC_ENTRY_POINT; + sal_ed->pal_proc = __pa(pal_desc[0]); + sal_ed->sal_proc = __pa(sal_desc[0]); + sal_ed->gp = __pa(sal_desc[1]); + + for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) + checksum += *cp; + + sal_systab->checksum = -checksum; + +#if SIMPLE_MEMMAP + /* simulate free memory at physical address zero */ + MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB); + MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB); + MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB); + MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB); +#else + MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000); + MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000); + MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000); + MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000); + MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000); + MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000); + MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000); + MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000); + MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000); + MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000); + MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000); + MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000); + MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000); + MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000); + MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000); + MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000); +#endif + + bp->efi_systab = __pa(&fw_mem); + bp->efi_memmap = __pa(efi_memmap); + bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); + bp->efi_memdesc_size = sizeof(efi_memory_desc_t); + bp->efi_memdesc_version = 1; + bp->command_line = __pa(cmd_line); + bp->console_info.num_cols = 80; + bp->console_info.num_rows = 25; + bp->console_info.orig_x = 0; + bp->console_info.orig_y = 24; + bp->fpswa = 0; + + return bp; +} diff --git a/arch/ia64/hp/sim/boot/ssc.h b/arch/ia64/hp/sim/boot/ssc.h new file mode 100644 index 000000000000..3b94c03e43a9 --- /dev/null +++ b/arch/ia64/hp/sim/boot/ssc.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang + * Stephane Eranian + */ +#ifndef ssc_h +#define ssc_h + +/* Simulator system calls: */ + +#define SSC_CONSOLE_INIT 20 +#define SSC_GETCHAR 21 +#define SSC_PUTCHAR 31 +#define SSC_OPEN 50 +#define SSC_CLOSE 51 +#define SSC_READ 52 +#define SSC_WRITE 53 +#define SSC_GET_COMPLETION 54 +#define SSC_WAIT_COMPLETION 55 +#define SSC_CONNECT_INTERRUPT 58 +#define SSC_GENERATE_INTERRUPT 59 +#define SSC_SET_PERIODIC_INTERRUPT 60 +#define SSC_GET_RTC 65 +#define SSC_EXIT 66 +#define SSC_LOAD_SYMBOLS 69 +#define SSC_GET_TOD 74 + +#define SSC_GET_ARGS 75 + +/* + * Simulator system call. + */ +extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr); + +#endif /* ssc_h */ diff --git a/arch/ia64/hp/sim/hpsim.S b/arch/ia64/hp/sim/hpsim.S index 9c223a3a8b14..ff16e8a857d1 100644 --- a/arch/ia64/hp/sim/hpsim.S +++ b/arch/ia64/hp/sim/hpsim.S @@ -8,4 +8,3 @@ GLOBAL_ENTRY(ia64_ssc) break 0x80001 br.ret.sptk.many rp END(ia64_ssc) - diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 88e7ca03eabe..aba19859d2f7 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -41,8 +42,11 @@ #define __IA32_NR_sigreturn 119 #define __IA32_NR_rt_sigreturn 173 -#include #ifdef ASM_SUPPORTED +/* + * Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and + * restore_ia32_fpstate_live() can be sure the live register contain user-level state. + */ register double f16 asm ("f16"); register double f17 asm ("f17"); register double f18 asm ("f18"); register double f19 asm ("f19"); register double f20 asm ("f20"); register double f21 asm ("f21"); @@ -217,7 +221,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save) if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) return -EFAULT; - /* Readin fsr, fcr, fir, fdr and copy onto fpstate */ + /* Read in fsr, fcr, fir, fdr and copy onto fpstate */ fsr = ia64_getreg(_IA64_REG_AR_FSR); fcr = ia64_getreg(_IA64_REG_AR_FCR); fir = ia64_getreg(_IA64_REG_AR_FIR); diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index 651e3df7a2c4..4d373270fe3b 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c @@ -18,11 +18,11 @@ #include #include +#include #include #include #include #include -#include #include "ia32priv.h" diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c index 0de400a6b177..e486042672f1 100644 --- a/arch/ia64/ia32/ia32_traps.c +++ b/arch/ia64/ia32/ia32_traps.c @@ -14,8 +14,8 @@ #include "ia32priv.h" -#include #include +#include int ia32_intercept (struct pt_regs *regs, unsigned long isr) diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index 5f24a94696aa..50620e5c2d4d 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h @@ -446,8 +446,8 @@ extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, extern void ia32_load_segment_descriptors (struct task_struct *task); #define ia32f2ia64f(dst,src) \ -do { \ - ia64_ldfe(6,src); \ +do { \ + ia64_ldfe(6,src); \ ia64_stop(); \ ia64_stf_spill(dst, 6); \ } while(0) @@ -456,7 +456,7 @@ do { \ do { \ ia64_ldf_fill(6, src); \ ia64_stop(); \ - ia64_stfe(dst, 6); \ + ia64_stfe(dst, 6); \ } while(0) struct user_regs_struct32 { @@ -470,11 +470,8 @@ struct user_regs_struct32 { }; /* Prototypes for use in elfcore32.h */ -int save_ia32_fpstate (struct task_struct *tsk, - struct ia32_user_i387_struct *save); - -int save_ia32_fpxstate (struct task_struct *tsk, - struct ia32_user_fxsr_struct *save); +extern int save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save); +extern int save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save); #endif /* !CONFIG_IA32_SUPPORT */ diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index d21c4b6e3d16..10727d0026ff 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -51,10 +51,10 @@ #include #include +#include +#include #include #include -#include -#include #include "ia32priv.h" diff --git a/arch/ia64/kernel/fw-emu.c b/arch/ia64/kernel/fw-emu.c deleted file mode 100644 index cf5702071f28..000000000000 --- a/arch/ia64/kernel/fw-emu.c +++ /dev/null @@ -1,416 +0,0 @@ -/* - * PAL & SAL emulation. - * - * Copyright (C) 1998-2001 Hewlett-Packard Co - * David Mosberger-Tang - * - * For the HP simulator, this file gets include in boot/bootloader.c. - * For SoftSDV, this file gets included in sys_softsdv.c. - */ -#include - -#ifdef CONFIG_PCI -# include -#endif - -#include -#include -#include -#include - -#define MB (1024*1024UL) - -#define SIMPLE_MEMMAP 1 - -#if SIMPLE_MEMMAP -# define NUM_MEM_DESCS 4 -#else -# define NUM_MEM_DESCS 16 -#endif - -static char fw_mem[( sizeof(struct ia64_boot_param) - + sizeof(efi_system_table_t) - + sizeof(efi_runtime_services_t) - + 1*sizeof(efi_config_table_t) - + sizeof(struct ia64_sal_systab) - + sizeof(struct ia64_sal_desc_entry_point) - + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) - + 1024)] __attribute__ ((aligned (8))); - -#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) - -/* Simulator system calls: */ - -#define SSC_EXIT 66 - -/* - * Simulator system call. - */ -extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr); - -#define SECS_PER_HOUR (60 * 60) -#define SECS_PER_DAY (SECS_PER_HOUR * 24) - -/* Compute the `struct tm' representation of *T, - offset OFFSET seconds east of UTC, - and store year, yday, mon, mday, wday, hour, min, sec into *TP. - Return nonzero if successful. */ -int -offtime (unsigned long t, efi_time_t *tp) -{ - const unsigned short int __mon_yday[2][13] = - { - /* Normal years. */ - { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, - /* Leap years. */ - { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } - }; - long int days, rem, y; - const unsigned short int *ip; - - days = t / SECS_PER_DAY; - rem = t % SECS_PER_DAY; - while (rem < 0) { - rem += SECS_PER_DAY; - --days; - } - while (rem >= SECS_PER_DAY) { - rem -= SECS_PER_DAY; - ++days; - } - tp->hour = rem / SECS_PER_HOUR; - rem %= SECS_PER_HOUR; - tp->minute = rem / 60; - tp->second = rem % 60; - /* January 1, 1970 was a Thursday. */ - y = 1970; - -# define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) -# define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) -# define __isleap(year) \ - ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) - - while (days < 0 || days >= (__isleap (y) ? 366 : 365)) { - /* Guess a corrected year, assuming 365 days per year. */ - long int yg = y + days / 365 - (days % 365 < 0); - - /* Adjust DAYS and Y to match the guessed year. */ - days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1) - - LEAPS_THRU_END_OF (y - 1)); - y = yg; - } - tp->year = y; - ip = __mon_yday[__isleap(y)]; - for (y = 11; days < (long int) ip[y]; --y) - continue; - days -= ip[y]; - tp->month = y + 1; - tp->day = days + 1; - return 1; -} - -#endif /* CONFIG_IA64_HP_SIM */ - -/* - * Very ugly, but we need this in the simulator only. Once we run on - * real hw, this can all go away. - */ -extern void pal_emulator_static (void); - -/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ - -#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3) - -#define REG_OFFSET(addr) (0x00000000000000FF & (addr)) -#define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr)) -#define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr)) - -static efi_status_t -efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) -{ -#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) - struct { - int tv_sec; /* must be 32bits to work */ - int tv_usec; - } tv32bits; - - ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD); - - memset(tm, 0, sizeof(*tm)); - offtime(tv32bits.tv_sec, tm); - - if (tc) - memset(tc, 0, sizeof(*tc)); -#else -# error Not implemented yet... -#endif - return EFI_SUCCESS; -} - -static void -efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) -{ -#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC) - ssc(status, 0, 0, 0, SSC_EXIT); -#else -# error Not implemented yet... -#endif -} - -static efi_status_t -efi_unimplemented (void) -{ - return EFI_UNSUPPORTED; -} - -static struct sal_ret_values -sal_emulator (long index, unsigned long in1, unsigned long in2, - unsigned long in3, unsigned long in4, unsigned long in5, - unsigned long in6, unsigned long in7) -{ - long r9 = 0; - long r10 = 0; - long r11 = 0; - long status; - - /* - * Don't do a "switch" here since that gives us code that - * isn't self-relocatable. - */ - status = 0; - if (index == SAL_FREQ_BASE) { - switch (in1) { - case SAL_FREQ_BASE_PLATFORM: - r9 = 200000000; - break; - - case SAL_FREQ_BASE_INTERVAL_TIMER: - /* - * Is this supposed to be the cr.itc frequency - * or something platform specific? The SAL - * doc ain't exactly clear on this... - */ - r9 = 700000000; - break; - - case SAL_FREQ_BASE_REALTIME_CLOCK: - r9 = 1; - break; - - default: - status = -1; - break; - } - } else if (index == SAL_SET_VECTORS) { - ; - } else if (index == SAL_GET_STATE_INFO) { - ; - } else if (index == SAL_GET_STATE_INFO_SIZE) { - ; - } else if (index == SAL_CLEAR_STATE_INFO) { - ; - } else if (index == SAL_MC_RENDEZ) { - ; - } else if (index == SAL_MC_SET_PARAMS) { - ; - } else if (index == SAL_CACHE_FLUSH) { - ; - } else if (index == SAL_CACHE_INIT) { - ; -#ifdef CONFIG_PCI - } else if (index == SAL_PCI_CONFIG_READ) { - /* - * in1 contains the PCI configuration address and in2 - * the size of the read. The value that is read is - * returned via the general register r9. - */ - outl(BUILD_CMD(in1), 0xCF8); - if (in2 == 1) /* Reading byte */ - r9 = inb(0xCFC + ((REG_OFFSET(in1) & 3))); - else if (in2 == 2) /* Reading word */ - r9 = inw(0xCFC + ((REG_OFFSET(in1) & 2))); - else /* Reading dword */ - r9 = inl(0xCFC); - status = PCIBIOS_SUCCESSFUL; - } else if (index == SAL_PCI_CONFIG_WRITE) { - /* - * in1 contains the PCI configuration address, in2 the - * size of the write, and in3 the actual value to be - * written out. - */ - outl(BUILD_CMD(in1), 0xCF8); - if (in2 == 1) /* Writing byte */ - outb(in3, 0xCFC + ((REG_OFFSET(in1) & 3))); - else if (in2 == 2) /* Writing word */ - outw(in3, 0xCFC + ((REG_OFFSET(in1) & 2))); - else /* Writing dword */ - outl(in3, 0xCFC); - status = PCIBIOS_SUCCESSFUL; -#endif /* CONFIG_PCI */ - } else if (index == SAL_UPDATE_PAL) { - ; - } else { - status = -1; - } - return ((struct sal_ret_values) {status, r9, r10, r11}); -} - - -/* - * This is here to work around a bug in egcs-1.1.1b that causes the - * compiler to crash (seems like a bug in the new alias analysis code. - */ -void * -id (long addr) -{ - return (void *) addr; -} - -struct ia64_boot_param * -sys_fw_init (const char *args, int arglen) -{ - efi_system_table_t *efi_systab; - efi_runtime_services_t *efi_runtime; - efi_config_table_t *efi_tables; - struct ia64_sal_systab *sal_systab; - efi_memory_desc_t *efi_memmap, *md; - unsigned long *pal_desc, *sal_desc; - struct ia64_sal_desc_entry_point *sal_ed; - struct ia64_boot_param *bp; - unsigned char checksum = 0; - char *cp, *cmd_line; - int i = 0; -# define MAKE_MD(typ, attr, start, end) \ - do { \ - md = efi_memmap + i++; \ - md->type = typ; \ - md->pad = 0; \ - md->phys_addr = start; \ - md->virt_addr = 0; \ - md->num_pages = (end - start) >> 12; \ - md->attribute = attr; \ - } while (0) - - memset(fw_mem, 0, sizeof(fw_mem)); - - pal_desc = (unsigned long *) &pal_emulator_static; - sal_desc = (unsigned long *) &sal_emulator; - - cp = fw_mem; - efi_systab = (void *) cp; cp += sizeof(*efi_systab); - efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); - efi_tables = (void *) cp; cp += sizeof(*efi_tables); - sal_systab = (void *) cp; cp += sizeof(*sal_systab); - sal_ed = (void *) cp; cp += sizeof(*sal_ed); - efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); - bp = (void *) cp; cp += sizeof(*bp); - cmd_line = (void *) cp; - - if (args) { - if (arglen >= 1024) - arglen = 1023; - memcpy(cmd_line, args, arglen); - } else { - arglen = 0; - } - cmd_line[arglen] = '\0'; - - memset(efi_systab, 0, sizeof(efi_systab)); - efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE; - efi_systab->hdr.revision = EFI_SYSTEM_TABLE_REVISION; - efi_systab->hdr.headersize = sizeof(efi_systab->hdr); - efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); - efi_systab->fw_revision = 1; - efi_systab->runtime = __pa(efi_runtime); - efi_systab->nr_tables = 1; - efi_systab->tables = __pa(efi_tables); - - efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; - efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; - efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); - efi_runtime->get_time = __pa(&efi_get_time); - efi_runtime->set_time = __pa(&efi_unimplemented); - efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); - efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); - efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); - efi_runtime->get_variable = __pa(&efi_unimplemented); - efi_runtime->get_next_variable = __pa(&efi_unimplemented); - efi_runtime->set_variable = __pa(&efi_unimplemented); - efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); - efi_runtime->reset_system = __pa(&efi_reset_system); - - efi_tables->guid = SAL_SYSTEM_TABLE_GUID; - efi_tables->table = __pa(sal_systab); - - /* fill in the SAL system table: */ - memcpy(sal_systab->signature, "SST_", 4); - sal_systab->size = sizeof(*sal_systab); - sal_systab->sal_rev_minor = 1; - sal_systab->sal_rev_major = 0; - sal_systab->entry_count = 1; - -#ifdef CONFIG_IA64_GENERIC - strcpy(sal_systab->oem_id, "Generic"); - strcpy(sal_systab->product_id, "IA-64 system"); -#endif - -#ifdef CONFIG_IA64_HP_SIM - strcpy(sal_systab->oem_id, "Hewlett-Packard"); - strcpy(sal_systab->product_id, "HP-simulator"); -#endif - -#ifdef CONFIG_IA64_SDV - strcpy(sal_systab->oem_id, "Intel"); - strcpy(sal_systab->product_id, "SDV"); -#endif - - /* fill in an entry point: */ - sal_ed->type = SAL_DESC_ENTRY_POINT; - sal_ed->pal_proc = __pa(pal_desc[0]); - sal_ed->sal_proc = __pa(sal_desc[0]); - sal_ed->gp = __pa(sal_desc[1]); - - for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) - checksum += *cp; - - sal_systab->checksum = -checksum; - -#if SIMPLE_MEMMAP - /* simulate free memory at physical address zero */ - MAKE_MD(EFI_BOOT_SERVICES_DATA, EFI_MEMORY_WB, 0*MB, 1*MB); - MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB, 1*MB, 2*MB); - MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 2*MB, 130*MB); - MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, 4096*MB, 4128*MB); -#else - MAKE_MD( 4, 0x9, 0x0000000000000000, 0x0000000000001000); - MAKE_MD( 7, 0x9, 0x0000000000001000, 0x000000000008a000); - MAKE_MD( 4, 0x9, 0x000000000008a000, 0x00000000000a0000); - MAKE_MD( 5, 0x8000000000000009, 0x00000000000c0000, 0x0000000000100000); - MAKE_MD( 7, 0x9, 0x0000000000100000, 0x0000000004400000); - MAKE_MD( 2, 0x9, 0x0000000004400000, 0x0000000004be5000); - MAKE_MD( 7, 0x9, 0x0000000004be5000, 0x000000007f77e000); - MAKE_MD( 6, 0x8000000000000009, 0x000000007f77e000, 0x000000007fb94000); - MAKE_MD( 6, 0x8000000000000009, 0x000000007fb94000, 0x000000007fb95000); - MAKE_MD( 6, 0x8000000000000009, 0x000000007fb95000, 0x000000007fc00000); - MAKE_MD(13, 0x8000000000000009, 0x000000007fc00000, 0x000000007fc3a000); - MAKE_MD( 7, 0x9, 0x000000007fc3a000, 0x000000007fea0000); - MAKE_MD( 5, 0x8000000000000009, 0x000000007fea0000, 0x000000007fea8000); - MAKE_MD( 7, 0x9, 0x000000007fea8000, 0x000000007feab000); - MAKE_MD( 5, 0x8000000000000009, 0x000000007feab000, 0x000000007ffff000); - MAKE_MD( 7, 0x9, 0x00000000ff400000, 0x0000000104000000); -#endif - - bp->efi_systab = __pa(&fw_mem); - bp->efi_memmap = __pa(efi_memmap); - bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); - bp->efi_memdesc_size = sizeof(efi_memory_desc_t); - bp->efi_memdesc_version = 1; - bp->command_line = __pa(cmd_line); - bp->console_info.num_cols = 80; - bp->console_info.num_rows = 25; - bp->console_info.orig_x = 0; - bp->console_info.orig_y = 24; - bp->fpswa = 0; - - return bp; -} diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index fbf529b2147f..82d4c9891a07 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -30,12 +30,12 @@ #include #include +#include #include #include #include #include #include -#include #ifdef CONFIG_PERFMON # include diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1ad4d10ffe89..3626ec5ef210 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -531,7 +531,7 @@ void ia64_mca_cmc_vector_disable (void *dummy) { cmcv_reg_t cmcv; - + cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ @@ -558,7 +558,7 @@ void ia64_mca_cmc_vector_enable (void *dummy) { cmcv_reg_t cmcv; - + cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ @@ -1146,7 +1146,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs) ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs); for (++cpuid ; !cpu_online(cpuid) && cpuid < NR_CPUS ; cpuid++); - + if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { @@ -1176,7 +1176,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs) start_count = -1; } - + return IRQ_HANDLED; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a3a2a50916c3..5ed00c1bfe64 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -46,7 +47,6 @@ #include #include #include -#include #ifdef CONFIG_PERFMON /* @@ -680,28 +680,28 @@ static int pfm_end_notify_user(pfm_context_t *ctx); static inline void pfm_clear_psr_pp(void) { - ia64_rsm(IA64_PSR_PP) + ia64_rsm(IA64_PSR_PP); ia64_srlz_i(); } static inline void pfm_set_psr_pp(void) { - ia64_ssm(IA64_PSR_PP) + ia64_ssm(IA64_PSR_PP); ia64_srlz_i(); } static inline void pfm_clear_psr_up(void) { - ia64_rsm(IA64_PSR_UP) + ia64_rsm(IA64_PSR_UP); ia64_srlz_i(); } static inline void pfm_set_psr_up(void) { - ia64_ssm(IA64_PSR_UP) + ia64_ssm(IA64_PSR_UP); ia64_srlz_i(); } @@ -985,8 +985,7 @@ pfm_restore_monitoring(struct task_struct *task) */ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* disable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, - ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); pfm_clear_psr_pp(); } else { pfm_clear_psr_up(); @@ -1033,8 +1032,7 @@ pfm_restore_monitoring(struct task_struct *task) */ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* enable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, - ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_srlz_i(); } pfm_set_psr_l(psr); @@ -1790,8 +1788,7 @@ pfm_syswide_force_stop(void *info) /* * Update local PMU */ - ia64_setreg(_IA64_REG_CR_DCR, - ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); ia64_srlz_i(); /* * update local cpuinfo @@ -3962,8 +3959,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * * disable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, - ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); ia64_srlz_i(); /* @@ -4053,8 +4049,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_set_psr_pp(); /* enable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, - ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); + ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_srlz_i(); return 0; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 8edc299421d5..c0dd4aa0dba8 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -741,8 +741,8 @@ cpu_init (void) * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ - ia64_setreg(_IA64_REG_CR_DCR, IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX - | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); + ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR + | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 3ecebadd468d..dd6ba6951f98 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -1,7 +1,7 @@ /* * Architecture-specific signal handling support. * - * Copyright (C) 1999-2002 Hewlett-Packard Co + * Copyright (C) 1999-2003 Hewlett-Packard Co * David Mosberger-Tang * * Derived from i386 and Alpha versions. @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -41,8 +42,12 @@ # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif -#include #ifdef ASM_SUPPORTED +/* + * Don't let GCC uses f16-f31 so that when we setup/restore the registers in the signal + * context in __kernel_sigtramp(), we can be sure that registers f16-f31 contain user-level + * values. + */ register double f16 asm ("f16"); register double f17 asm ("f17"); register double f18 asm ("f18"); register double f19 asm ("f19"); register double f20 asm ("f20"); register double f21 asm ("f21"); @@ -195,7 +200,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user(from->si_value.sival_ptr, &to->si_value.sival_ptr); + err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index fdb43f8186a8..ddd4cb1a25a7 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -14,12 +14,13 @@ #include #include /* For unblank_screen() */ +#include #include #include +#include #include #include -#include /* * fp_emulate() needs to be able to access and update all floating point registers. Those * saved in pt_regs can be accessed through that structure, but those not saved, will be @@ -28,7 +29,6 @@ * by declaring preserved registers that are not marked as "fixed" as global register * variables. */ -#include #ifdef ASM_SUPPORTED register double f2 asm ("f2"); register double f3 asm ("f3"); register double f4 asm ("f4"); register double f5 asm ("f5"); diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 06595d8d37db..95f77b1dfa11 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -18,11 +18,11 @@ #include #include -#include -#include +#include #include +#include +#include #include -#include extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 2a4b667058d9..295ff0aa820b 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -292,7 +292,7 @@ ffz (unsigned long x) { unsigned long result; - result = ia64_popcnt((x & (~x - 1))); + result = ia64_popcnt(x & (~x - 1)); return result; } diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h index f8b27b5b8fa8..8e316f179815 100644 --- a/include/asm-ia64/current.h +++ b/include/asm-ia64/current.h @@ -7,8 +7,11 @@ */ #include -/* In kernel mode, thread pointer (r13) is used to point to the - current task structure. */ -#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) + +/* + * In kernel mode, thread pointer (r13) is used to point to the current task + * structure. + */ +#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) #endif /* _ASM_IA64_CURRENT_H */ diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h index 1725d43e74ba..74c542acc1e8 100644 --- a/include/asm-ia64/delay.h +++ b/include/asm-ia64/delay.h @@ -5,7 +5,7 @@ * Delay routines using a pre-computed "cycles/usec" value. * * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang + * David Mosberger-Tang * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond * Copyright (C) 1999 Asit Mallick @@ -17,8 +17,8 @@ #include #include -#include #include +#include static __inline__ void ia64_set_itm (unsigned long val) @@ -73,7 +73,7 @@ __delay (unsigned long loops) if (loops < 1) return; - for (;loops--;) + while (loops--) ia64_nop(0); } diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index 41a24523d953..5175f0345555 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -12,89 +12,86 @@ /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ -#define ia64_barrier() __asm__ __volatile__ ("":::"memory") - - -#define ia64_stop() __asm__ __volatile__ (";;"::) - - -#define ia64_invala_gr(regnum) \ - __asm__ __volatile__ ("invala.e r%0" :: "i"(regnum)) - -#define ia64_invala_fr(regnum) \ - __asm__ __volatile__ ("invala.e f%0" :: "i"(regnum)) - -extern void ia64_bad_param_for_setreg(void); -extern void ia64_bad_param_for_getreg(void); - -#define ia64_setreg(regnum, val) \ -({ \ - switch (regnum) { \ - case _IA64_REG_PSR_L: \ - __asm__ __volatile__ ("mov psr.l=%0" :: "r"(val) : "memory"); \ - break; \ - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ - __asm__ __volatile__ ("mov ar%0=%1" :: \ - "i" (regnum - _IA64_REG_AR_KR0), \ - "r"(val): "memory"); \ - break; \ - case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ - __asm__ __volatile__ ("mov cr%0=%1" :: \ - "i" (regnum - _IA64_REG_CR_DCR), \ - "r"(val): "memory" ); \ - break; \ - case _IA64_REG_SP: \ - __asm__ __volatile__ ("mov r12=%0" :: \ - "r"(val): "memory"); \ - break; \ - case _IA64_REG_GP: \ - __asm__ __volatile__ ("mov gp=%0" :: "r"(val) : "memory"); \ - break; \ - default: \ - ia64_bad_param_for_setreg(); \ - break; \ - } \ -}) - -#define ia64_getreg(regnum) \ -({ \ - __u64 ia64_intri_res; \ - \ - switch (regnum) { \ - case _IA64_REG_GP: \ - __asm__ __volatile__ ("mov %0=gp" : "=r"(ia64_intri_res)); \ - break; \ - case _IA64_REG_IP: \ - __asm__ __volatile__ ("mov %0=ip" : "=r"(ia64_intri_res)); \ - break; \ - case _IA64_REG_PSR: \ - __asm__ __volatile__ ("mov %0=psr" : "=r"(ia64_intri_res));\ - break; \ - case _IA64_REG_TP: /* for current() */ \ - { \ - register __u64 ia64_r13 asm ("r13"); \ - ia64_intri_res = ia64_r13; \ - } \ - break; \ - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ - __asm__ __volatile__ ("mov %0=ar%1" : "=r" (ia64_intri_res) \ - : "i"(regnum - _IA64_REG_AR_KR0)); \ - break; \ - case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ - __asm__ __volatile__ ("mov %0=cr%1" : "=r" (ia64_intri_res) \ - : "i" (regnum - _IA64_REG_CR_DCR)); \ - break; \ - case _IA64_REG_SP: \ - __asm__ __volatile__ ("mov %0=sp" : "=r" (ia64_intri_res)); \ - break; \ - default: \ - ia64_bad_param_for_getreg(); \ - break; \ - } \ - ia64_intri_res; \ +#define ia64_barrier() asm volatile ("":::"memory") + +#define ia64_stop() asm volatile (";;"::) + +#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum)) + +#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) + +extern void ia64_bad_param_for_setreg (void); +extern void ia64_bad_param_for_getreg (void); + +#define ia64_setreg(regnum, val) \ +({ \ + switch (regnum) { \ + case _IA64_REG_PSR_L: \ + asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov ar%0=%1" :: \ + "i" (regnum - _IA64_REG_AR_KR0), \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov cr%0=%1" :: \ + "i" (regnum - _IA64_REG_CR_DCR), \ + "r"(val): "memory" ); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov r12=%0" :: \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_GP: \ + asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \ + break; \ + default: \ + ia64_bad_param_for_setreg(); \ + break; \ + } \ +}) + +#define ia64_getreg(regnum) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_TP: /* for current() */ \ + { \ + register __u64 ia64_r13 asm ("r13"); \ + ia64_intri_res = ia64_r13; \ + } \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ }) #define ia64_hint_pause 0 + #define ia64_hint(mode) \ ({ \ switch (mode) { \ @@ -112,434 +109,389 @@ extern void ia64_bad_param_for_getreg(void); #define ia64_mux1_alt 10 #define ia64_mux1_rev 11 -#define ia64_mux1(x, mode) \ -({ \ - __u64 ia64_intri_res; \ - \ - switch (mode) { \ - case ia64_mux1_brcst: \ - __asm__ ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \ - break; \ - case ia64_mux1_mix: \ - __asm__ ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \ - break; \ - case ia64_mux1_shuf: \ - __asm__ ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \ - break; \ - case ia64_mux1_alt: \ - __asm__ ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \ - break; \ - case ia64_mux1_rev: \ - __asm__ ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \ - break; \ - } \ - ia64_intri_res; \ +#define ia64_mux1(x, mode) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (mode) { \ + case ia64_mux1_brcst: \ + asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_mix: \ + asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_shuf: \ + asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_alt: \ + asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_rev: \ + asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + } \ + ia64_intri_res; \ }) - #define ia64_popcnt(x) \ ({ \ __u64 ia64_intri_res; \ - __asm__ ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ + asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ \ ia64_intri_res; \ }) - #define ia64_getf_exp(x) \ ({ \ long ia64_intri_res; \ \ - __asm__ ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \ + asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \ \ ia64_intri_res; \ }) -#define ia64_shrp(a, b, count) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \ - ia64_intri_res; \ +#define ia64_shrp(a, b, count) \ +({ \ + __u64 ia64_intri_res; \ + asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \ + ia64_intri_res; \ }) - #define ia64_ldfs(regnum, x) \ ({ \ register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \ + asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \ }) #define ia64_ldfd(regnum, x) \ ({ \ register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \ + asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \ }) #define ia64_ldfe(regnum, x) \ ({ \ register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \ + asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \ }) #define ia64_ldf8(regnum, x) \ ({ \ register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \ + asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \ }) #define ia64_ldf_fill(regnum, x) \ ({ \ register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ -}) - -#define ia64_stfs(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ -}) - - -#define ia64_stfd(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ + asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ }) -#define ia64_stfe(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +#define ia64_stfs(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ }) - -#define ia64_stf8(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +#define ia64_stfd(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ }) -#define ia64_stf_spill(x, regnum) \ -({ \ - register double __f__ asm ("f"#regnum); \ - __asm__ __volatile__ ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +#define ia64_stfe(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ }) -#define ia64_fetchadd4_acq(p, inc) \ +#define ia64_stf8(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf_spill(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_fetchadd4_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd4_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_xchg1(ptr,x) \ ({ \ - \ __u64 ia64_intri_res; \ - __asm__ __volatile__ ("fetchadd4.acq %0=[%1],%2" \ - : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ - : "memory"); \ - \ + asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ ia64_intri_res; \ }) -#define ia64_fetchadd4_rel(p, inc) \ +#define ia64_xchg2(ptr,x) \ ({ \ __u64 ia64_intri_res; \ - __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \ - : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ - : "memory"); \ - \ + asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ ia64_intri_res; \ }) - -#define ia64_fetchadd8_acq(p, inc) \ +#define ia64_xchg4(ptr,x) \ ({ \ - \ __u64 ia64_intri_res; \ - __asm__ __volatile__ ("fetchadd8.acq %0=[%1],%2" \ - : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ - : "memory"); \ - \ + asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ ia64_intri_res; \ }) -#define ia64_fetchadd8_rel(p, inc) \ +#define ia64_xchg8(ptr,x) \ ({ \ __u64 ia64_intri_res; \ - __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \ - : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ - : "memory"); \ - \ + asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ ia64_intri_res; \ }) - -#define ia64_xchg1(ptr,x) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \ - : "r" (ptr), "r" (x) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_xchg2(ptr,x) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \ - : "r" (ptr), "r" (x) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_xchg4(ptr,x) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \ - : "r" (ptr), "r" (x) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_xchg8(ptr,x) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \ - : "r" (ptr), "r" (x) : "memory"); \ - ia64_intri_res; \ -}) - -#define ia64_cmpxchg1_acq(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ -}) - -#define ia64_cmpxchg1_rel(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_cmpxchg2_acq(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_cmpxchg2_rel(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - \ - __asm__ __volatile__ ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_cmpxchg4_acq(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_cmpxchg4_rel(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ }) -#define ia64_cmpxchg8_acq(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - __asm__ __volatile__ ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ -}) - -#define ia64_cmpxchg8_rel(ptr, new, old) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \ - \ - __asm__ __volatile__ ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ - "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ - ia64_intri_res; \ -}) +#define ia64_mf() asm volatile ("mf" ::: "memory") +#define ia64_mfa() asm volatile ("mf.a" ::: "memory") -#define ia64_mf() __asm__ __volatile__ ("mf" ::: "memory") -#define ia64_mfa() __asm__ __volatile__ ("mf.a" ::: "memory") +#define ia64_invala() asm volatile ("invala" ::: "memory") - -#define ia64_invala() __asm__ __volatile__ ("invala" ::: "memory") - -#define ia64_thash(addr) \ -({ \ - __u64 ia64_intri_res; \ - __asm__ __volatile__ ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ - ia64_intri_res; \ +#define ia64_thash(addr) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ }) +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") -#define ia64_srlz_i() __asm__ __volatile__ (";; srlz.i ;;" ::: "memory") - +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); -#define ia64_srlz_d() __asm__ __volatile__ (";; srlz.d" ::: "memory"); +#define ia64_nop(x) asm volatile ("nop %0"::"i"(x)); +#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") -#define ia64_nop(x) __asm__ __volatile__ ("nop %0"::"i"(x)); +#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory") -#define ia64_itci(addr) __asm__ __volatile__ ("itc.i %0;;" :: "r"(addr) : "memory") +#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") -#define ia64_itcd(addr) __asm__ __volatile__ ("itc.d %0;;" :: "r"(addr) : "memory") +#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") - -#define ia64_itri(trnum, addr) __asm__ __volatile__ ("itr.i itr[%0]=%1" \ - :: "r"(trnum), "r"(addr) : "memory") - - -#define ia64_itrd(trnum, addr) __asm__ __volatile__ ("itr.d dtr[%0]=%1" \ - :: "r"(trnum), "r"(addr) : "memory") - - -#define ia64_tpa(addr) \ -({ \ - __u64 ia64_pa; \ - __asm__ __volatile__ ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : \ - "memory"); \ +#define ia64_tpa(addr) \ +({ \ + __u64 ia64_pa; \ + asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ ia64_pa; \ }) -#define __ia64_set_dbr(index, val) \ - __asm__ __volatile__ ("mov dbr[%0]=%1" :: "r"(index), "r"(val) \ - : "memory") +#define __ia64_set_dbr(index, val) \ + asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_ibr(index, val) \ - __asm__ __volatile__ ("mov ibr[%0]=%1" :: "r"(index), "r"(val) \ - : "memory") +#define ia64_set_ibr(index, val) \ + asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_pkr(index, val) \ - __asm__ __volatile__ ("mov pkr[%0]=%1" :: "r"(index), "r"(val) \ - : "memory") +#define ia64_set_pkr(index, val) \ + asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_pmc(index, val) \ - __asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(index), "r"(val) \ - : "memory"); +#define ia64_set_pmc(index, val) \ + asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_pmd(index, val) \ - __asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(index), "r"(val) \ - : "memory"); +#define ia64_set_pmd(index, val) \ + asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_rr(index, val) \ - __asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(index), "r"(val) \ - : "memory"); +#define ia64_set_rr(index, val) \ + asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); - -#define ia64_get_cpuid(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ - \ - ia64_intri_res; \ +#define ia64_get_cpuid(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ + ia64_intri_res; \ }) -#define __ia64_get_dbr(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ - \ - ia64_intri_res; \ +#define __ia64_get_dbr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ }) -#define ia64_get_ibr(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ - \ - ia64_intri_res; \ +#define ia64_get_ibr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ }) -#define ia64_get_pkr(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ - \ - ia64_intri_res; \ +#define ia64_get_pkr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ }) -#define ia64_get_pmc(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ - \ - ia64_intri_res; \ +#define ia64_get_pmc(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ }) -#define ia64_get_pmd(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ - \ - ia64_intri_res; \ +#define ia64_get_pmd(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ }) -#define ia64_get_rr(index) \ -({ \ - __u64 ia64_intri_res; \ - \ - __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" \ - (index)); \ - \ - ia64_intri_res; \ +#define ia64_get_rr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ + ia64_intri_res; \ }) - -#define ia64_fc(addr) \ - __asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory"); - - -#define ia64_sync_i() \ - __asm__ __volatile__ (";; sync.i" ::: "memory") - -#define ia64_ssm(mask) __asm__ __volatile__ ("ssm %0":: "i"((mask)) : "memory"); -#define ia64_rsm(mask) __asm__ __volatile__ ("rsm %0":: "i"((mask)) : "memory"); -#define ia64_sum(mask) __asm__ __volatile__ ("sum %0":: "i"((mask)) : "memory"); -#define ia64_rum(mask) __asm__ __volatile__ ("rum %0":: "i"((mask)) : "memory"); - -#define ia64_ptce(addr) \ - __asm__ __volatile__ ("ptc.e %0" :: "r"(addr)) +#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") -#define ia64_ptcga(addr, size) \ - __asm__ __volatile__ ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") +#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") +#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") +#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") +#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") -#define ia64_ptcl(addr, size) \ - __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) +#define ia64_ptcga(addr, size) \ + asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory") -#define ia64_ptri(addr, size) \ - __asm__ __volatile__ ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_ptcl(addr, size) \ + asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_ptri(addr, size) \ + asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") -#define ia64_ptrd(addr, size) \ - __asm__ __volatile__ ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_ptrd(addr, size) \ + asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ @@ -548,38 +500,38 @@ extern void ia64_bad_param_for_getreg(void); #define ia64_lfhint_nt2 2 #define ia64_lfhint_nta 3 -#define ia64_lfetch(lfhint, y) \ -({ \ - switch (lfhint) { \ - case ia64_lfhint_none: \ - __asm__ __volatile__ ("lfetch [%0]" : : "r"(y)); \ - break; \ - case ia64_lfhint_nt1: \ - __asm__ __volatile__ ("lfetch.nt1 [%0]" : : "r"(y)); \ - break; \ - case ia64_lfhint_nt2: \ - __asm__ __volatile__ ("lfetch.nt2 [%0]" : : "r"(y)); \ - break; \ - case ia64_lfhint_nta: \ - __asm__ __volatile__ ("lfetch.nta [%0]" : : "r"(y)); \ - break; \ - } \ +#define ia64_lfetch(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.nta [%0]" : : "r"(y)); \ + break; \ + } \ }) #define ia64_lfetch_excl(lfhint, y) \ ({ \ switch (lfhint) { \ case ia64_lfhint_none: \ - __asm__ __volatile__ ("lfetch.excl [%0]" :: "r"(y)); \ + asm volatile ("lfetch.excl [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt1: \ - __asm__ __volatile__ ("lfetch.excl.nt1 [%0]" :: "r"(y));\ + asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt2: \ - __asm__ __volatile__ ("lfetch.excl.nt2 [%0]" :: "r"(y));\ + asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nta: \ - __asm__ __volatile__ ("lfetch.excl.nta [%0]" :: "r"(y));\ + asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \ break; \ } \ }) @@ -588,16 +540,16 @@ extern void ia64_bad_param_for_getreg(void); ({ \ switch (lfhint) { \ case ia64_lfhint_none: \ - __asm__ __volatile__ ("lfetch.fault [%0]" : : "r"(y)); \ + asm volatile ("lfetch.fault [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt1: \ - __asm__ __volatile__ ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ + asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nt2: \ - __asm__ __volatile__ ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ + asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ break; \ case ia64_lfhint_nta: \ - __asm__ __volatile__ ("lfetch.fault.nta [%0]" : : "r"(y)); \ + asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \ break; \ } \ }) @@ -606,28 +558,27 @@ extern void ia64_bad_param_for_getreg(void); ({ \ switch (lfhint) { \ case ia64_lfhint_none: \ - __asm__ __volatile__ ("lfetch.fault.excl [%0]" :: "r"(y)); \ + asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt1: \ - __asm__ __volatile__ ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ + asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nt2: \ - __asm__ __volatile__ ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ + asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ break; \ case ia64_lfhint_nta: \ - __asm__ __volatile__ ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ + asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ break; \ } \ }) -#define ia64_intrin_local_irq_restore(x) \ -do { \ - __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ - "(p6) ssm psr.i;" \ - "(p7) rsm psr.i;;" \ - "(p6) srlz.d" \ - : : "r"((x)) \ - : "p6", "p7", "memory"); \ +#define ia64_intrin_local_irq_restore(x) \ +do { \ + asm volatile (" cmp.ne p6,p7=%0,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "(p6) srlz.d" \ + :: "r"((x)) : "p6", "p7", "memory"); \ } while (0) #endif /* _ASM_IA64_GCC_INTRIN_H */ diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h index 3ddfa3128667..1757f1c11ad4 100644 --- a/include/asm-ia64/ia64regs.h +++ b/include/asm-ia64/ia64regs.h @@ -8,8 +8,11 @@ #define _ASM_IA64_IA64REGS_H /* -** Register Names for getreg() and setreg() -*/ + * Register Names for getreg() and setreg(). + * + * The "magic" numbers happen to match the values used by the Intel compiler's + * getreg()/setreg() intrinsics. + */ /* Special Registers */ @@ -17,15 +20,15 @@ #define _IA64_REG_PSR 1019 #define _IA64_REG_PSR_L 1019 - // General Integer Registers +/* General Integer Registers */ -#define _IA64_REG_GP 1025 /* R1 */ -#define _IA64_REG_R8 1032 /* R8 */ -#define _IA64_REG_R9 1033 /* R9 */ -#define _IA64_REG_SP 1036 /* R12 */ -#define _IA64_REG_TP 1037 /* R13 */ +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ - /* Application Registers */ +/* Application Registers */ #define _IA64_REG_AR_KR0 3072 #define _IA64_REG_AR_KR1 3073 @@ -55,7 +58,7 @@ #define _IA64_REG_AR_LC 3137 #define _IA64_REG_AR_EC 3138 - /* Control Registers */ +/* Control Registers */ #define _IA64_REG_CR_DCR 4096 #define _IA64_REG_CR_ITM 4097 @@ -84,7 +87,7 @@ #define _IA64_REG_CR_LRR0 4176 #define _IA64_REG_CR_LRR1 4177 - /* Indirect Registers for getindreg() and setindreg() */ +/* Indirect Registers for getindreg() and setindreg() */ #define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ #define _IA64_REG_INDR_DBR 9001 @@ -94,5 +97,4 @@ #define _IA64_REG_INDR_PMD 9005 #define _IA64_REG_INDR_RR 9006 - #endif /* _ASM_IA64_IA64REGS_H */ diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index 16d21d49a1bc..743049ca0851 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -14,9 +14,9 @@ /* include compiler specific intrinsics */ #include #ifdef __INTEL_COMPILER -#include +# include #else -#include +# include #endif /* @@ -117,7 +117,7 @@ extern void ia64_xchg_called_with_bad_pointer (void); * This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ -extern long ia64_cmpxchg_called_with_bad_pointer(void); +extern long ia64_cmpxchg_called_with_bad_pointer (void); #define ia64_cmpxchg(sem,ptr,old,new,size) \ ({ \ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index e02f403a2339..297efb06c347 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -52,10 +52,10 @@ extern unsigned int num_io_spaces; # ifdef __KERNEL__ +#include #include #include #include -#include /* * Change virtual addresses to physical addresses and vv. diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 8372da8cad5d..56f5c49a4e95 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -9,8 +9,8 @@ #include -#include #include +#include /* * PAGE_SHIFT determines the actual kernel page size. diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index c225163a49b2..c6b4af2b3643 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -15,10 +15,10 @@ #include -#include +#include #include +#include #include -#include #define IA64_NUM_DBG_REGS 8 /* @@ -357,13 +357,12 @@ extern unsigned long get_wchan (struct task_struct *p); /* Return stack pointer of blocked task TSK. */ #define KSTK_ESP(tsk) ((tsk)->thread.ksp) -extern void ia64_getreg_unknown_kr(void); -extern void ia64_setreg_unknown_kr(void); - +extern void ia64_getreg_unknown_kr (void); +extern void ia64_setreg_unknown_kr (void); #define ia64_get_kr(regnum) \ ({ \ - unsigned long r=0; \ + unsigned long r = 0; \ \ switch (regnum) { \ case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ @@ -646,18 +645,18 @@ ia64_get_dbr (__u64 regnum) /* XXX remove the handcoded version once we have a sufficiently clever compiler... */ #ifdef SMART_COMPILER -# define ia64_rotr(w,n) \ - ({ \ - __u64 _w = (w), _n = (n); \ - \ - (_w >> _n) | (_w << (64 - _n)); \ +# define ia64_rotr(w,n) \ + ({ \ + __u64 __ia64_rotr_w = (w), _n = (n); \ + \ + (__ia64_rotr_w >> _n) | (__ia64_rotr_w << (64 - _n)); \ }) #else -# define ia64_rotr(w,n) \ - ({ \ - __u64 result; \ - result = ia64_shrp((w), (w), (n)); \ - result; \ +# define ia64_rotr(w,n) \ + ({ \ + __u64 __ia64_rotr_w; \ + __ia64_rotr_w = ia64_shrp((w), (w), (n)); \ + __ia64_rotr_w; \ }) #endif diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index 4921b76fdfb9..6ece5061dc19 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h @@ -22,6 +22,7 @@ #include #include + #include /* @@ -82,9 +83,7 @@ init_rwsem (struct rw_semaphore *sem) static inline void __down_read (struct rw_semaphore *sem) { - int result; - - result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); + int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); if (result < 0) rwsem_down_read_failed(sem); @@ -113,9 +112,7 @@ __down_write (struct rw_semaphore *sem) static inline void __up_read (struct rw_semaphore *sem) { - int result; - - result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); + int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) rwsem_wake(sem); diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h index 16de6f10c1e1..eca7d714a8fb 100644 --- a/include/asm-ia64/siginfo.h +++ b/include/asm-ia64/siginfo.h @@ -79,7 +79,6 @@ typedef struct siginfo { * si_code is non-zero and __ISR_VALID is set in si_flags. */ #define si_isr _sifields._sigfault._isr -#define si_pfm_ovfl _sifields._sigprof._pfm_ovfl_counters /* * Flag values for si_flags: diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 5da816099349..09e923da6893 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -120,7 +120,7 @@ hard_smp_processor_id (void) unsigned long bits; } lid; - lid.bits = ia64_getreg(_IA64_REG_CR_LID); + lid.bits = ia64_getreg(_IA64_REG_CR_LID); return lid.f.id << 8 | lid.f.eid; } diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index ca3a25477949..3a5f08f4c6f2 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -9,13 +9,13 @@ * This file is used for SMP configurations only. */ -#include #include +#include -#include -#include #include +#include #include +#include typedef struct { volatile unsigned int lock; diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 5c59cb6b8d19..c0a638402858 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -113,19 +113,17 @@ extern struct ia64_boot_param { /* clearing psr.i is implicitly serialized (visible by next insn) */ /* setting psr.i requires data serialization */ -#define __local_irq_save(x) \ -do { \ - unsigned long psr; \ - psr = ia64_getreg(_IA64_REG_PSR); \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ - (x) = psr; \ +#define __local_irq_save(x) \ +do { \ + (x) = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ } while (0) -#define __local_irq_disable() \ -do { \ - ia64_stop(); \ - ia64_rsm(IA64_PSR_I); \ +#define __local_irq_disable() \ +do { \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ } while (0) #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) @@ -165,13 +163,13 @@ do { \ #endif /* !CONFIG_IA64_DEBUG_IRQ */ #define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ({ (flags) = ia64_getreg(_IA64_REG_PSR); }) +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) #define irqs_disabled() \ ({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & IA64_PSR_I) == 0; \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ }) #ifdef __KERNEL__ diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h index df9085722e6b..414aae060440 100644 --- a/include/asm-ia64/timex.h +++ b/include/asm-ia64/timex.h @@ -10,8 +10,8 @@ * Also removed cacheflush_time as it's entirely unused. */ -#include #include +#include typedef unsigned long cycles_t; diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 62d0d466f547..049c69845b23 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -10,9 +10,9 @@ #include +#include #include #include -#include /* * Now for some TLB flushing routines. This is the kind of stuff that diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index f28ad25550a4..f65623c70fb1 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -342,8 +342,10 @@ extern pid_t clone (unsigned long flags, void *sp); /* * "Conditional" syscalls * - * Note, this macro can only be used in the - * file which defines sys_ni_syscall, i.e., in kernel/sys.c. + * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in + * kernel/sys.c. This version causes warnings because the declaration isn't a + * proper prototype, but we can't use __typeof__ either, because not all cond_syscall() + * declarations have prototypes at the moment. */ #define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall"))); -- cgit v1.2.3 From 54050a4e1b5d6e97513237052c796ccb97fd90d9 Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 09:23:34 -0700 Subject: [power] Update device handling. - From conversations with Ben Herrenschmidt. Most devices should be able to handle powering down with interrupts enabled, which I already assume. But since suspending will stop I/O transactions before the call to power it off (making the device unusable anyway), there is no need to separate the calls - we may as well make it simpler for driver authors and require that driver authors do everything at the same time. There will always be devices that need to either power down or power up the device with interrupts disabled. They will get called with interrupts enabled, but may return -EAGAIN to be called again with interrupts disabled to do what they need to do. System devices are now always called only with interrupts disabled. Come on - they're system devices. Of course we need interrupts disabled. --- drivers/base/power/main.c | 1 - drivers/base/power/power.h | 4 -- drivers/base/power/resume.c | 78 ++++------------------- drivers/base/power/runtime.c | 12 +--- drivers/base/power/suspend.c | 148 ++++++++----------------------------------- drivers/base/sys.c | 111 ++------------------------------ include/linux/device.h | 2 - include/linux/sysdev.h | 4 -- kernel/power/main.c | 16 ++++- 9 files changed, 60 insertions(+), 316 deletions(-) (limited to 'include') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5aabe5179fd1..611a69accdd0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -25,7 +25,6 @@ #include "power.h" LIST_HEAD(dpm_active); -LIST_HEAD(dpm_suspended); LIST_HEAD(dpm_off); LIST_HEAD(dpm_off_irq); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 8130b04ffe5f..fde72b37f938 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -31,7 +31,6 @@ extern struct semaphore dpm_sem; * The PM lists. */ extern struct list_head dpm_active; -extern struct list_head dpm_suspended; extern struct list_head dpm_off; extern struct list_head dpm_off_irq; @@ -61,15 +60,12 @@ extern void dpm_sysfs_remove(struct device *); */ extern int dpm_resume(void); extern void dpm_power_up(void); -extern void dpm_power_up_irq(void); -extern void power_up_device(struct device *); extern int resume_device(struct device *); /* * suspend.c */ extern int suspend_device(struct device *, u32); -extern int power_down_device(struct device *, u32); /* diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c index 544104c6bbd1..a34f66a1b42c 100644 --- a/drivers/base/power/resume.c +++ b/drivers/base/power/resume.c @@ -12,7 +12,6 @@ #include "power.h" extern int sysdev_resume(void); -extern int sysdev_restore(void); /** @@ -30,37 +29,22 @@ int resume_device(struct device * dev) return 0; } + /** - * dpm_resume - Restore all device state. + * device_pm_resume - Restore state of each device in system. * - * Walk the dpm_suspended list and restore each device. As they are - * resumed, move the devices to the dpm_active list. + * Restore normal device state and release the dpm_sem. */ -int dpm_resume(void) +void device_pm_resume(void) { - while(!list_empty(&dpm_suspended)) { - struct list_head * entry = dpm_suspended.next; + while(!list_empty(&dpm_off)) { + struct list_head * entry = dpm_off.next; struct device * dev = to_device(entry); list_del_init(entry); resume_device(dev); list_add_tail(entry,&dpm_active); } - return 0; -} - - -/** - * device_pm_resume - Restore state of each device in system. - * - * Restore system device state, then common device state. Finally, - * release dpm_sem, as we're done with device PM. - */ - -void device_pm_resume(void) -{ - sysdev_restore(); - dpm_resume(); up(&dpm_sem); } @@ -89,65 +73,27 @@ void power_up_device(struct device * dev) * Interrupts must be disabled when calling this. */ -void dpm_power_up_irq(void) +void dpm_power_up(void) { while(!list_empty(&dpm_off_irq)) { struct list_head * entry = dpm_off_irq.next; list_del_init(entry); power_up_device(to_device(entry)); - list_add_tail(entry,&dpm_suspended); - } -} - - -/** - * dpm_power_up - Power on most devices. - * - * Walk the dpm_off list and power each device up. This is used - * to power on devices that were able to power down with interrupts - * enabled. - */ - -void dpm_power_up(void) -{ - while (!list_empty(&dpm_off)) { - struct list_head * entry = dpm_off.next; - list_del_init(entry); - power_up_device(to_device(entry)); - list_add_tail(entry,&dpm_suspended); + list_add_tail(entry,&dpm_active); } } /** - * device_pm_power_up - Turn on all devices. + * device_pm_power_up - Turn on all devices that need special attention. * - * First, power on system devices, which must happen with interrupts - * disbled. Then, power on devices that also require interrupts disabled. - * Turn interrupts back on, and finally power up the rest of the normal - * devices. + * Power on system devices then devices that required we shut them down + * with interrupts disabled. + * Called with interrupts disabled. */ void device_pm_power_up(void) { sysdev_resume(); - dpm_power_up_irq(); - local_irq_enable(); dpm_power_up(); } - -/** - * device_resume - resume all the devices in the system - * @level: stage of resume process we're at - * - * This function is deprecated, and should be replaced with appropriate - * calls to device_pm_power_up() and device_pm_resume() above. - */ - -void device_resume(u32 level) -{ - - printk("%s is deprecated. Called from:\n",__FUNCTION__); - dump_stack(); -} - diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 4a4ac9f7764d..05ef979a3791 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -14,8 +14,6 @@ static void runtime_resume(struct device * dev) { if (!dev->power.power_state) return; - - power_up_device(dev); resume_device(dev); } @@ -55,19 +53,11 @@ int dpm_runtime_suspend(struct device * dev, u32 state) if (dev->power.power_state) dpm_runtime_resume(dev); - error = suspend_device(dev,state); - if (!error) { - error = power_down_device(dev,state); - if (error) - goto ErrResume; + if (!(error = suspend_device(dev,state))) dev->power.power_state = state; - } Done: up(&dpm_sem); return error; - ErrResume: - resume_device(dev); - goto Done; } diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c index 0747e409d0ec..8e5842521d4d 100644 --- a/drivers/base/power/suspend.c +++ b/drivers/base/power/suspend.c @@ -11,7 +11,6 @@ #include #include "power.h" -extern int sysdev_save(u32 state); extern int sysdev_suspend(u32 state); /* @@ -46,7 +45,10 @@ int suspend_device(struct device * dev, u32 state) if (!error) { list_del(&dev->power.entry); - list_add(&dev->power.entry,&dpm_suspended); + list_add(&dev->power.entry,&dpm_off); + } else if (error == -EAGAIN) { + list_del(&dev->power.entry); + list_add(&dev->power.entry,&dpm_off_irq); } return error; } @@ -57,10 +59,13 @@ int suspend_device(struct device * dev, u32 state) * @state: Power state to put each device in. * * Walk the dpm_active list, call ->suspend() for each device, and move - * it to dpm_suspended. If we hit a failure with any of the devices, call - * dpm_resume() above to bring the suspended devices back to life. + * it to dpm_off. + * Check the return value for each. If it returns 0, then we move the + * the device to the dpm_off list. If it returns -EAGAIN, we move it to + * the dpm_off_irq list. If we get a different error, try and back out. * - * Have system devices save state last. + * If we hit a failure with any of the devices, call device_pm_resume() + * above to bring the suspended devices back to life. * * Note this function leaves dpm_sem held to * a) block other devices from registering. @@ -83,153 +88,56 @@ int device_pm_suspend(u32 state) if ((error = suspend_device(dev,state))) goto Error; } - - if ((error = sysdev_save(state))) - goto Error; Done: return error; Error: - dpm_resume(); - up(&dpm_sem); + device_pm_resume(); goto Done; } /** - * power_down_device - Put one device in low power state. - * @dev: Device. - * @state: Power state to enter. - */ - -int power_down_device(struct device * dev, u32 state) -{ - struct device_driver * drv = dev->driver; - int error = 0; - - if (drv && drv->suspend) - error = drv->suspend(dev,state,SUSPEND_POWER_DOWN); - if (!error) { - list_del(&dev->power.entry); - list_add(&dev->power.entry,&dpm_off); - } - return error; -} - - -/** - * dpm_power_down - Put all devices in low power state. - * @state: Power state to enter. - * - * Walk the dpm_suspended list (with interrupts enabled) and try - * to power down each each. If any fail with -EAGAIN, they require - * the call to be done with interrupts disabled. So, we move them to - * the dpm_off_irq list. - * - * If the call succeeds, we move each device to the dpm_off list. - */ - -static int dpm_power_down(u32 state) -{ - while(!list_empty(&dpm_suspended)) { - struct list_head * entry = dpm_suspended.prev; - int error; - error = power_down_device(to_device(entry),state); - if (error) { - if (error == -EAGAIN) { - list_del(entry); - list_add(entry,&dpm_off_irq); - continue; - } - return error; - } - } - return 0; -} - - -/** - * dpm_power_down_irq - Power down devices without interrupts. + * dpm_power_down - Power down devices without interrupts. * @state: State to enter. * - * Walk the dpm_off_irq list (built by dpm_power_down) and power + * Walk the dpm_off_irq list (built by device_pm_suspend) and power * down each device that requires the call to be made with interrupts * disabled. */ -static int dpm_power_down_irq(u32 state) +static int dpm_power_down(u32 state) { - struct device * dev; int error = 0; - list_for_each_entry_reverse(dev,&dpm_off_irq,power.entry) { - if ((error = power_down_device(dev,state))) - break; - } return error; } /** - * device_pm_power_down - Put all devices in low power state. + * device_pm_power_down - Shut down special devices. * @state: Power state to enter. * - * Walk the dpm_suspended list, calling ->power_down() for each device. - * Check the return value for each. If it returns 0, then we move the - * the device to the dpm_off list. If it returns -EAGAIN, we move it to - * the dpm_off_irq list. If we get a different error, try and back out. - * - * dpm_irq_off is for devices that require interrupts to be disabled to - * either to power down the device or power it back on. - * - * When we're done, we disable interrrupts (!!) and walk the dpm_off_irq - * list to shut down the devices that need interrupts disabled. - * - * This function leaves interrupts disabled on exit, since powering down - * devices should be the very last thing before the system is put into a - * low-power state. - * - * device_pm_power_on() should be called to re-enable interrupts and power - * the devices back on. + * Walk the dpm_off_irq list, calling ->power_down() for each device that + * couldn't power down the device with interrupts enabled. When we're + * done, power down system devices. */ int device_pm_power_down(u32 state) { int error = 0; + struct device * dev; - if ((error = dpm_power_down(state))) - goto ErrorIRQOn; - local_irq_disable(); - if ((error = dpm_power_down_irq(state))) - goto ErrorIRQOff; - - sysdev_suspend(state); + list_for_each_entry_reverse(dev,&dpm_off_irq,power.entry) { + if ((error = suspend_device(dev,state))) + break; + } + if (error) + goto Error; + if ((error = sysdev_suspend(state))) + goto Error; Done: return error; - - ErrorIRQOff: - dpm_power_up_irq(); - local_irq_enable(); - ErrorIRQOn: + Error: dpm_power_up(); goto Done; } - - -/** - * device_suspend - suspend all devices on the device ree - * @state: state we're entering - * @level: Stage of suspend sequence we're in. - * - * - * This function is deprecated. Calls should be replaced with - * appropriate calls to device_pm_suspend() and device_pm_power_down(). - */ - -int device_suspend(u32 state, u32 level) -{ - - printk("%s Called from:\n",__FUNCTION__); - dump_stack(); - return -EFAULT; -} - diff --git a/drivers/base/sys.c b/drivers/base/sys.c index e306d2e26363..299b390e243b 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -282,62 +282,17 @@ void sysdev_shutdown(void) } -/** - * sysdev_save - Save system device state - * @state: Power state we're entering. - * - * This is called when the system is going to sleep, but before interrupts - * have been disabled. This allows system device drivers to allocate and - * save device state, including sleeping during the process.. - */ - -int sysdev_save(u32 state) -{ - struct sysdev_class * cls; - - pr_debug("Saving System Device State\n"); - - down_write(&system_subsys.rwsem); - - list_for_each_entry_reverse(cls,&system_subsys.kset.list, - kset.kobj.entry) { - struct sys_device * sysdev; - pr_debug("Saving state for type '%s':\n",cls->kset.kobj.name); - - list_for_each_entry(sysdev,&cls->kset.list,kobj.entry) { - struct sysdev_driver * drv; - - pr_debug(" %s\n",sysdev->kobj.name); - - list_for_each_entry(drv,&global_drivers,entry) { - if (drv->save) - drv->save(sysdev,state); - } - - list_for_each_entry(drv,&cls->drivers,entry) { - if (drv->save) - drv->save(sysdev,state); - } - - if (cls->save) - cls->save(sysdev,state); - } - } - up_write(&system_subsys.rwsem); - return 0; -} - - /** * sysdev_suspend - Suspend all system devices. * @state: Power state to enter. * * We perform an almost identical operation as sys_device_shutdown() - * above, though calling ->suspend() instead. + * above, though calling ->suspend() instead. Interrupts are disabled + * when this called. Devices are responsible for both saving state and + * quiescing or powering down the device. * - * Note: Interrupts are disabled when called, so we can't sleep when - * trying to get the subsystem's rwsem. If that happens, print a nasty - * warning and return an error. + * This is only called by the device PM core, so we let them handle + * all synchronization. */ int sysdev_suspend(u32 state) @@ -346,11 +301,6 @@ int sysdev_suspend(u32 state) pr_debug("Suspending System Devices\n"); - if (!down_write_trylock(&system_subsys.rwsem)) { - printk("%s: Cannot acquire semaphore; Failing\n",__FUNCTION__); - return -EFAULT; - } - list_for_each_entry_reverse(cls,&system_subsys.kset.list, kset.kobj.entry) { struct sys_device * sysdev; @@ -378,8 +328,6 @@ int sysdev_suspend(u32 state) cls->suspend(sysdev,state); } } - up_write(&system_subsys.rwsem); - return 0; } @@ -390,7 +338,7 @@ int sysdev_suspend(u32 state) * Similar to sys_device_suspend(), but we iterate the list forwards * to guarantee that parent devices are resumed before their children. * - * Note: Interrupts are disabled when called. + * Note: Interrupts are disabled when called. */ int sysdev_resume(void) @@ -399,9 +347,6 @@ int sysdev_resume(void) pr_debug("Resuming System Devices\n"); - if(!down_write_trylock(&system_subsys.rwsem)) - return -EFAULT; - list_for_each_entry(cls,&system_subsys.kset.list,kset.kobj.entry) { struct sys_device * sysdev; @@ -429,50 +374,6 @@ int sysdev_resume(void) } } - up_write(&system_subsys.rwsem); - return 0; -} - - -/** - * sysdev_restore - Restore system device state - * - * This is called during a suspend/resume cycle last, after interrupts - * have been re-enabled. This is intended for auxillary drivers, etc, - * that may sleep when restoring state. - */ - -int sysdev_restore(void) -{ - struct sysdev_class * cls; - - down_write(&system_subsys.rwsem); - pr_debug("Restoring System Device State\n"); - - list_for_each_entry(cls,&system_subsys.kset.list,kset.kobj.entry) { - struct sys_device * sysdev; - - pr_debug("Restoring state for type '%s':\n",cls->kset.kobj.name); - list_for_each_entry(sysdev,&cls->kset.list,kobj.entry) { - struct sysdev_driver * drv; - pr_debug(" %s\n",sysdev->kobj.name); - - if (cls->restore) - cls->restore(sysdev); - - list_for_each_entry(drv,&cls->drivers,entry) { - if (drv->restore) - drv->restore(sysdev); - } - - list_for_each_entry(drv,&global_drivers,entry) { - if (drv->restore) - drv->restore(sysdev); - } - } - } - - up_write(&system_subsys.rwsem); return 0; } diff --git a/include/linux/device.h b/include/linux/device.h index 7b49400adf31..c99ad50c6784 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -372,8 +372,6 @@ extern struct bus_type platform_bus_type; extern struct device legacy_bus; /* drivers/base/power.c */ -extern int device_suspend(u32 state, u32 level); -extern void device_resume(u32 level); extern void device_shutdown(void); diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index 4bc3e22b5104..2a90db8d41de 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -31,10 +31,8 @@ struct sysdev_class { /* Default operations for these types of devices */ int (*shutdown)(struct sys_device *); - int (*save)(struct sys_device *, u32 state); int (*suspend)(struct sys_device *, u32 state); int (*resume)(struct sys_device *); - int (*restore)(struct sys_device *); struct kset kset; }; @@ -52,10 +50,8 @@ struct sysdev_driver { int (*add)(struct sys_device *); int (*remove)(struct sys_device *); int (*shutdown)(struct sys_device *); - int (*save)(struct sys_device *, u32 state); int (*suspend)(struct sys_device *, u32 state); int (*resume)(struct sys_device *); - int (*restore)(struct sys_device *); }; diff --git a/kernel/power/main.c b/kernel/power/main.c index 42bf7233a3a7..4d1476ba8156 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -65,9 +65,9 @@ static int pm_suspend_standby(void) if (!pm_ops || !pm_ops->enter) return -EPERM; + local_irq_save(flags); if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) goto Done; - local_irq_save(flags); error = pm_ops->enter(PM_SUSPEND_STANDBY); local_irq_restore(flags); device_pm_power_up(); @@ -91,9 +91,9 @@ static int pm_suspend_mem(void) if (!pm_ops || !pm_ops->enter) return -EPERM; + local_irq_save(flags); if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) goto Done; - local_irq_save(flags); error = pm_ops->enter(PM_SUSPEND_STANDBY); local_irq_restore(flags); device_pm_power_up(); @@ -114,9 +114,19 @@ static int pm_suspend_mem(void) static int power_down(u32 mode) { + unsigned long flags; + int error = 0; + + local_irq_save(flags); + device_pm_power_down(); switch(mode) { case PM_DISK_PLATFORM: - return pm_ops->enter(PM_SUSPEND_DISK); + error = pm_ops->enter(PM_SUSPEND_DISK); + if (error) { + device_pm_power_up(); + local_irq_restore(flags); + return error; + } case PM_DISK_SHUTDOWN: machine_power_off(); break; -- cgit v1.2.3 From 0d045e1fa51a9686c2a0aba434dd2b0039a9aa3d Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 09:36:46 -0700 Subject: [power] Move suspend()/resume() methods. Instead of putting them in struct device_driver (which few, if any drivers use directly), put them in the controlling bus_type of the device (which are currently responsible for claiming the methods and forwarding the calls to the bus-specific driver anyway). This will save 8 bytes per driver instance, which isn't that much, but it's something. It also makes it more obvious to the reader what is going on. And, it makes for easier bus-level defaults in the case the device has no driver attached. The old calls remain until all instances have been fixed up. --- drivers/base/power/resume.c | 21 +++------------------ drivers/base/power/suspend.c | 22 ++-------------------- include/linux/device.h | 3 ++- kernel/power/main.c | 2 +- 4 files changed, 8 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c index a34f66a1b42c..a022a0cdedd5 100644 --- a/drivers/base/power/resume.c +++ b/drivers/base/power/resume.c @@ -22,10 +22,8 @@ extern int sysdev_resume(void); int resume_device(struct device * dev) { - struct device_driver * drv = dev->driver; - - if (drv && drv->resume) - return drv->resume(dev,RESUME_RESTORE_STATE); + if (dev->bus && dev->bus->resume) + return dev->bus->resume(dev); return 0; } @@ -49,19 +47,6 @@ void device_pm_resume(void) } -/** - * power_up_device - Power one device on. - * @dev: Device. - */ - -void power_up_device(struct device * dev) -{ - struct device_driver * drv = dev->driver; - if (drv && drv->resume) - drv->resume(dev,RESUME_POWER_ON); -} - - /** * device_power_up_irq - Power on some devices. * @@ -78,7 +63,7 @@ void dpm_power_up(void) while(!list_empty(&dpm_off_irq)) { struct list_head * entry = dpm_off_irq.next; list_del_init(entry); - power_up_device(to_device(entry)); + resume_device(to_device(entry)); list_add_tail(entry,&dpm_active); } } diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c index 8e5842521d4d..5e154218801e 100644 --- a/drivers/base/power/suspend.c +++ b/drivers/base/power/suspend.c @@ -37,11 +37,10 @@ extern int sysdev_suspend(u32 state); int suspend_device(struct device * dev, u32 state) { - struct device_driver * drv = dev->driver; int error = 0; - if (drv && drv->suspend) - error = drv->suspend(dev,state,SUSPEND_SAVE_STATE); + if (dev->bus && dev->bus->suspend) + error = dev->bus->suspend(dev,state); if (!error) { list_del(&dev->power.entry); @@ -96,23 +95,6 @@ int device_pm_suspend(u32 state) } -/** - * dpm_power_down - Power down devices without interrupts. - * @state: State to enter. - * - * Walk the dpm_off_irq list (built by device_pm_suspend) and power - * down each device that requires the call to be made with interrupts - * disabled. - */ - -static int dpm_power_down(u32 state) -{ - int error = 0; - - return error; -} - - /** * device_pm_power_down - Shut down special devices. * @state: Power state to enter. diff --git a/include/linux/device.h b/include/linux/device.h index c99ad50c6784..8d6266f2e3c3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -58,7 +58,8 @@ struct bus_type { struct device * (*add) (struct device * parent, char * bus_id); int (*hotplug) (struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); - + int (*suspend)(struct device * dev, u32 state); + int (*resume)(struct device * dev); }; extern int bus_register(struct bus_type * bus); diff --git a/kernel/power/main.c b/kernel/power/main.c index 4d1476ba8156..422d5131fbf6 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -118,7 +118,7 @@ static int power_down(u32 mode) int error = 0; local_irq_save(flags); - device_pm_power_down(); + device_pm_power_down(PM_SUSPEND_DISK); switch(mode) { case PM_DISK_PLATFORM: error = pm_ops->enter(PM_SUSPEND_DISK); -- cgit v1.2.3 From 505f6a8832b7ac71135db6adcbefab270e5a047a Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Tue, 19 Aug 2003 19:01:05 -0700 Subject: [power] Update IDE to set suspend/resume methods in bus_type. Instead of having each driver set them in their own drivers. --- drivers/ide/ide-cd.c | 4 ---- drivers/ide/ide-disk.c | 4 ---- drivers/ide/ide.c | 16 ++++------------ include/linux/ide.h | 2 -- 4 files changed, 4 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index cf0b5307a8de..4e7a197f6611 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -3330,10 +3330,6 @@ static ide_driver_t ide_cdrom_driver = { .drives = LIST_HEAD_INIT(ide_cdrom_driver.drives), .start_power_step = ide_cdrom_start_power_step, .complete_power_step = ide_cdrom_complete_power_step, - .gen_driver = { - .suspend = generic_ide_suspend, - .resume = generic_ide_resume, - } }; static int idecd_open(struct inode * inode, struct file * file) diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 30865145af3e..1217e840ac02 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -1732,10 +1732,6 @@ static ide_driver_t idedisk_driver = { .drives = LIST_HEAD_INIT(idedisk_driver.drives), .start_power_step = idedisk_start_power_step, .complete_power_step = idedisk_complete_power_step, - .gen_driver = { - .suspend = generic_ide_suspend, - .resume = generic_ide_resume, - } }; static int idedisk_open(struct inode *inode, struct file *filp) diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 5d19aa20abcd..dd0ad3ff074c 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -1534,16 +1534,13 @@ int ata_attach(ide_drive_t *drive) EXPORT_SYMBOL(ata_attach); -int generic_ide_suspend(struct device *dev, u32 state, u32 level) +static int generic_ide_suspend(struct device *dev, u32 state) { ide_drive_t *drive = dev->driver_data; struct request rq; struct request_pm_state rqpm; ide_task_t args; - if (level == dev->power_state || level != SUSPEND_SAVE_STATE) - return 0; - memset(&rq, 0, sizeof(rq)); memset(&rqpm, 0, sizeof(rqpm)); memset(&args, 0, sizeof(args)); @@ -1556,18 +1553,13 @@ int generic_ide_suspend(struct device *dev, u32 state, u32 level) return ide_do_drive_cmd(drive, &rq, ide_wait); } -EXPORT_SYMBOL(generic_ide_suspend); - -int generic_ide_resume(struct device *dev, u32 level) +static int generic_ide_resume(struct device *dev) { ide_drive_t *drive = dev->driver_data; struct request rq; struct request_pm_state rqpm; ide_task_t args; - if (level == dev->power_state || level != RESUME_RESTORE_STATE) - return 0; - memset(&rq, 0, sizeof(rq)); memset(&rqpm, 0, sizeof(rqpm)); memset(&args, 0, sizeof(args)); @@ -1580,8 +1572,6 @@ int generic_ide_resume(struct device *dev, u32 level) return ide_do_drive_cmd(drive, &rq, ide_head_wait); } -EXPORT_SYMBOL(generic_ide_resume); - int generic_ide_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg) { @@ -2594,6 +2584,8 @@ EXPORT_SYMBOL(ide_probe); struct bus_type ide_bus_type = { .name = "ide", + .suspend = generic_ide_suspend, + .resume = generic_ide_resume, }; /* diff --git a/include/linux/ide.h b/include/linux/ide.h index 82ca6da75b3f..a3ee36b438ca 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1241,8 +1241,6 @@ typedef struct ide_driver_s { #define DRIVER(drive) ((drive)->driver) extern int generic_ide_ioctl(struct block_device *, unsigned, unsigned long); -extern int generic_ide_suspend(struct device *dev, u32 state, u32 level); -extern int generic_ide_resume(struct device *dev, u32 level); /* * IDE modules. -- cgit v1.2.3 From 9234878aafa9a6e8f595fa259ea4fe267526714c Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Wed, 20 Aug 2003 21:47:37 -0700 Subject: [power] Fixup device suspend/resume function names. - Revert names of functions back to device_{suspend,resume} since at least APM are still using them. --- drivers/base/power/resume.c | 23 ++++++----------------- drivers/base/power/suspend.c | 38 +++++++++++--------------------------- include/linux/pm.h | 8 ++++---- kernel/power/main.c | 16 ++++++++-------- kernel/power/swsusp.c | 4 ++-- 5 files changed, 31 insertions(+), 58 deletions(-) (limited to 'include') diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c index 0f68bf95beac..19c6387fec40 100644 --- a/drivers/base/power/resume.c +++ b/drivers/base/power/resume.c @@ -29,12 +29,12 @@ int resume_device(struct device * dev) /** - * device_pm_resume - Restore state of each device in system. + * device_resume - Restore state of each device in system. * * Restore normal device state and release the dpm_sem. */ -void device_pm_resume(void) +void device_resume(void) { while(!list_empty(&dpm_off)) { struct list_head * entry = dpm_off.next; @@ -46,6 +46,8 @@ void device_pm_resume(void) up(&dpm_sem); } +EXPORT_SYMBOL(device_resume); + /** * device_power_up_irq - Power on some devices. @@ -77,25 +79,12 @@ void dpm_power_up(void) * Called with interrupts disabled. */ -void device_pm_power_up(void) +void device_power_up(void) { sysdev_resume(); dpm_power_up(); } -/** - * device_resume - resume all the devices in the system - * @level: stage of resume process we're at - * - * This function is deprecated, and should be replaced with appropriate - * calls to device_pm_power_up() and device_pm_resume() above. - */ - -void device_resume(u32 level) -{ +EXPORT_SYMBOL(device_power_up); - printk("%s is deprecated. Called from:\n",__FUNCTION__); - dump_stack(); -} -EXPORT_SYMBOL(device_resume); diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c index 78b8e91f1e1a..e9ca633fc129 100644 --- a/drivers/base/power/suspend.c +++ b/drivers/base/power/suspend.c @@ -54,7 +54,7 @@ int suspend_device(struct device * dev, u32 state) /** - * device_pm_suspend - Save state and stop all devices in system. + * device_suspend - Save state and stop all devices in system. * @state: Power state to put each device in. * * Walk the dpm_active list, call ->suspend() for each device, and move @@ -63,7 +63,7 @@ int suspend_device(struct device * dev, u32 state) * the device to the dpm_off list. If it returns -EAGAIN, we move it to * the dpm_off_irq list. If we get a different error, try and back out. * - * If we hit a failure with any of the devices, call device_pm_resume() + * If we hit a failure with any of the devices, call device_resume() * above to bring the suspended devices back to life. * * Note this function leaves dpm_sem held to @@ -71,12 +71,12 @@ int suspend_device(struct device * dev, u32 state) * b) prevent other PM operations from happening after we've begun. * c) make sure we're exclusive when we disable interrupts. * - * device_pm_resume() will release dpm_sem after restoring state to + * device_resume() will release dpm_sem after restoring state to * all devices (as will this on error). You must call it once you've - * called device_pm_suspend(). + * called device_suspend(). */ -int device_pm_suspend(u32 state) +int device_suspend(u32 state) { int error = 0; @@ -90,13 +90,15 @@ int device_pm_suspend(u32 state) Done: return error; Error: - device_pm_resume(); + device_resume(); goto Done; } +EXPORT_SYMBOL(device_suspend); + /** - * device_pm_power_down - Shut down special devices. + * device_power_down - Shut down special devices. * @state: Power state to enter. * * Walk the dpm_off_irq list, calling ->power_down() for each device that @@ -104,7 +106,7 @@ int device_pm_suspend(u32 state) * done, power down system devices. */ -int device_pm_power_down(u32 state) +int device_power_down(u32 state) { int error = 0; struct device * dev; @@ -124,23 +126,5 @@ int device_pm_power_down(u32 state) goto Done; } +EXPORT_SYMBOL(device_power_down); -/** - * device_suspend - suspend all devices on the device ree - * @state: state we're entering - * @level: Stage of suspend sequence we're in. - * - * - * This function is deprecated. Calls should be replaced with - * appropriate calls to device_pm_suspend() and device_pm_power_down(). - */ - -int device_suspend(u32 state, u32 level) -{ - - printk("%s Called from:\n",__FUNCTION__); - dump_stack(); - return -EFAULT; -} - -EXPORT_SYMBOL(device_suspend); diff --git a/include/linux/pm.h b/include/linux/pm.h index 70282a7943b1..3017bdef5f03 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -240,10 +240,10 @@ struct dev_pm_info { extern void device_pm_set_parent(struct device * dev, struct device * parent); -extern int device_pm_suspend(u32 state); -extern int device_pm_power_down(u32 state); -extern void device_pm_power_up(void); -extern void device_pm_resume(void); +extern int device_suspend(u32 state); +extern int device_power_down(u32 state); +extern void device_power_up(void); +extern void device_resume(void); #endif /* __KERNEL__ */ diff --git a/kernel/power/main.c b/kernel/power/main.c index 422d5131fbf6..1b92f13d9a77 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -66,11 +66,11 @@ static int pm_suspend_standby(void) return -EPERM; local_irq_save(flags); - if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) + if ((error = device_power_down(PM_SUSPEND_STANDBY))) goto Done; error = pm_ops->enter(PM_SUSPEND_STANDBY); local_irq_restore(flags); - device_pm_power_up(); + device_power_up(); Done: return error; } @@ -92,11 +92,11 @@ static int pm_suspend_mem(void) return -EPERM; local_irq_save(flags); - if ((error = device_pm_power_down(PM_SUSPEND_STANDBY))) + if ((error = device_power_down(PM_SUSPEND_STANDBY))) goto Done; error = pm_ops->enter(PM_SUSPEND_STANDBY); local_irq_restore(flags); - device_pm_power_up(); + device_power_up(); Done: return error; } @@ -118,12 +118,12 @@ static int power_down(u32 mode) int error = 0; local_irq_save(flags); - device_pm_power_down(PM_SUSPEND_DISK); + device_power_down(PM_SUSPEND_DISK); switch(mode) { case PM_DISK_PLATFORM: error = pm_ops->enter(PM_SUSPEND_DISK); if (error) { - device_pm_power_up(); + device_power_up(); local_irq_restore(flags); return error; } @@ -245,7 +245,7 @@ static int suspend_prepare(u32 state) if (state == PM_SUSPEND_DISK) free_some_memory(); - if ((error = device_pm_suspend(state))) + if ((error = device_suspend(state))) goto Finish; return 0; @@ -271,7 +271,7 @@ static int suspend_prepare(u32 state) static void suspend_finish(u32 state) { - device_pm_resume(); + device_resume(); if (pm_ops && pm_ops->finish) pm_ops->finish(state); thaw_processes(); diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 4ce75e5c23bb..2d6bac84469e 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -1,4 +1,4 @@ -/* +'/* * linux/kernel/suspend.c * * This file is to realize architecture-independent @@ -492,7 +492,7 @@ static int suspend_save_image(void) { int error; - device_pm_resume(); + device_resume(); lock_swapdevices(); error = write_suspend_image(); -- cgit v1.2.3 From 07758b2e3110f6e37cbbd877805af9dc7475447b Mon Sep 17 00:00:00 2001 From: Patrick Mochel Date: Wed, 20 Aug 2003 22:02:33 -0700 Subject: [acpi] Make a dummy mp_congig_ioapic_for_sci() function. --- include/asm-i386/mpspec.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index b596438496a1..a4ee37cade68 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h @@ -37,8 +37,14 @@ extern void mp_register_lapic_address (u64 address); extern void mp_register_ioapic (u8 id, u32 address, u32 irq_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 global_irq); extern void mp_config_acpi_legacy_irqs (void); -extern void mp_config_ioapic_for_sci(int irq); extern void mp_parse_prt (void); + +#ifdef CONFIG_X86_IO_APIC +extern void mp_config_ioapic_for_sci(int irq); +#else +static inline void mp_config_ioapic_for_sci(int irq) +{ } +#endif #endif /*CONFIG_ACPI_BOOT*/ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) -- cgit v1.2.3 From 2b50fcaae095b81ee226a96ae708253532db8f27 Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Thu, 21 Aug 2003 01:09:09 -0700 Subject: [NET]: Make sure interval member of struct tc_estimator is signed. --- include/linux/pkt_sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index fec8ad62b567..d97edad0effc 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -45,7 +45,7 @@ struct tc_stats struct tc_estimator { - char interval; + signed char interval; unsigned char ewma_log; }; -- cgit v1.2.3 From 050716fa3c07571acfec1b0720002fbe79e9339d Mon Sep 17 00:00:00 2001 From: David Mosberger Date: Thu, 21 Aug 2003 01:46:04 -0700 Subject: ia64: perfmon update. Here is a ChangeLog for the patch: - The perfmon core will invoke the sampling module handler routine once for each overflowed PMD. When multiple PMDs overflow at the same time (with the same PMU interrupt), then up to 64 distinct calls can happen. A common timestamp parameter allows the module to identify this kind of entries. - Changed the module ovfl_ctrl arguments to simplify the reset field. Now it is a simple boolean. - Updated perfmon.h to convert the "set" field to ushort from uint. Other structure updates to get better layout. - Update perfmon_default_smpl.h to reflect the change in overflow processing mentioned above. - Cleanup some state checking code to use switch-case instead of if-then with macros. Make the code more readable and easier to optmize for gcc. Thanks to David for the suggestion. - Added extra safety checks on pfm_context_load() to verify that the task actually exists. - The default sampling format module now supports the fmt_restart_active callbacks. Patch from David. --- arch/ia64/kernel/perfmon.c | 445 ++++++++++++++++++-------------- arch/ia64/kernel/perfmon_default_smpl.c | 84 +++--- include/asm-ia64/perfmon.h | 103 ++++---- include/asm-ia64/perfmon_default_smpl.h | 39 +-- 4 files changed, 370 insertions(+), 301 deletions(-) (limited to 'include') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5a519c69a7dc..76c39b7d511a 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -256,6 +256,8 @@ typedef struct { /* * 64-bit software counter structure + * + * the next_reset_type is applied to the next call to pfm_reset_regs() */ typedef struct { unsigned long val; /* virtual 64bit counter value */ @@ -267,7 +269,7 @@ typedef struct { unsigned long seed; /* seed for random-number generator */ unsigned long mask; /* mask for random-number generator */ unsigned int flags; /* notify/do not notify */ - unsigned int reserved; /* for future use */ + int next_reset_type;/* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */ unsigned long eventid; /* overflow event identifier */ } pfm_counter_t; @@ -557,7 +559,6 @@ static struct vm_operations_struct pfm_vm_ops={ close: pfm_vm_close }; - #define pfm_wait_task_inactive(t) wait_task_inactive(t) #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) #define pfm_get_cpu_data(a,b) per_cpu(a, b) @@ -648,7 +649,6 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner); DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); DEFINE_PER_CPU(unsigned long, pmu_activation_number); - /* forward declaration */ static struct file_operations pfm_file_ops; @@ -1532,7 +1532,7 @@ pfm_lseek(struct file *file, loff_t offset, int whence) } static ssize_t -pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos) +pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos) { pfm_context_t *ctx; pfm_msg_t *msg; @@ -1628,18 +1628,6 @@ abort: return ret; } -static ssize_t -pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos) -{ - int oldvar, ret; - - oldvar = pfm_debug_var; - pfm_debug_var = pfm_sysctl.debug_pfm_read; - ret = pfm_do_read(filp, buf, size, ppos); - pfm_debug_var = oldvar; - return ret; -} - static ssize_t pfm_write(struct file *file, const char *ubuf, size_t size, loff_t *ppos) @@ -2759,20 +2747,18 @@ pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag) DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag)); - if (flag == PFM_PMD_NO_RESET) return; - /* * now restore reset value on sampling overflowed counters */ mask >>= PMU_FIRST_COUNTER; for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { - if (mask & 0x1) { - ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); - reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; - DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", - is_long_reset ? "long" : "short", i, val)); - } + if ((mask & 0x1UL) == 0UL) continue; + + ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); + reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; + + DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); } /* @@ -2811,15 +2797,15 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag) */ mask >>= PMU_FIRST_COUNTER; for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { - if (mask & 0x1) { - val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); - reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; - DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", - is_long_reset ? "long" : "short", i, val)); + if ((mask & 0x1UL) == 0UL) continue; - pfm_write_soft_counter(ctx, i, val); - } + val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); + reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; + + DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); + + pfm_write_soft_counter(ctx, i, val); } /* @@ -2861,7 +2847,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) if (is_loaded) { thread = &ctx->ctx_task->thread; - can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0; + can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0; /* * In system wide and when the context is loaded, access can only happen * when the caller is running on the CPU being monitored by the session. @@ -3569,51 +3555,49 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) struct task_struct *task; pfm_buffer_fmt_t *fmt; pfm_ovfl_ctrl_t rst_ctrl; - int is_loaded; + int state, is_system; int ret = 0; + state = ctx->ctx_state; fmt = ctx->ctx_buf_fmt; - is_loaded = CTX_IS_LOADED(ctx); - - if (is_loaded && CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) goto proceed; + is_system = ctx->ctx_fl_system; + task = PFM_CTX_TASK(ctx); - /* - * restarting a terminated context is a nop - */ - if (unlikely(CTX_IS_TERMINATED(ctx))) { - DPRINT(("context is terminated, nothing to do\n")); - return 0; + switch(state) { + case PFM_CTX_MASKED: + break; + case PFM_CTX_LOADED: + if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; + /* fall through */ + case PFM_CTX_UNLOADED: + case PFM_CTX_ZOMBIE: + DPRINT(("invalid state=%d\n", state)); + return -EBUSY; + case PFM_CTX_TERMINATED: + DPRINT(("context is terminated, nothing to do\n")); + return 0; + default: + DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); + return -EINVAL; } - - /* - * LOADED, UNLOADED, ZOMBIE - */ - if (CTX_IS_MASKED(ctx) == 0) return -EBUSY; - -proceed: /* * In system wide and when the context is loaded, access can only happen * when the caller is running on the CPU being monitored by the session. * It does not have to be the owner (ctx_task) of the context per se. */ - if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { + if (is_system && ctx->ctx_cpu != smp_processor_id()) { DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); return -EBUSY; } - task = PFM_CTX_TASK(ctx); - /* sanity check */ if (unlikely(task == NULL)) { printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); return -EINVAL; } - /* - * this test is always true in system wide mode - */ - if (task == current) { + if (task == current || is_system) { fmt = ctx->ctx_buf_fmt; @@ -3625,25 +3609,23 @@ proceed: prefetch(ctx->ctx_smpl_hdr); - rst_ctrl.stop_monitoring = 0; - rst_ctrl.reset_pmds = PFM_PMD_NO_RESET; + rst_ctrl.bits.mask_monitoring = 0; + rst_ctrl.bits.reset_ovfl_pmds = 1; - if (is_loaded) + if (state == PFM_CTX_LOADED) ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); else ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); - - } else { - rst_ctrl.stop_monitoring = 0; - rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET; + rst_ctrl.bits.mask_monitoring = 0; + rst_ctrl.bits.reset_ovfl_pmds = 1; } if (ret == 0) { - if (rst_ctrl.reset_pmds) - pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, rst_ctrl.reset_pmds); + if (rst_ctrl.bits.reset_ovfl_pmds) + pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); - if (rst_ctrl.stop_monitoring == 0) { + if (rst_ctrl.bits.mask_monitoring == 0) { DPRINT(("resuming monitoring for [%d]\n", task->pid)); if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task); @@ -3686,7 +3668,6 @@ proceed: ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; - PFM_SET_WORK_PENDING(task, 1); pfm_set_task_notify(task); @@ -3707,10 +3688,9 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_debug_var = pfm_sysctl.debug; - printk(KERN_ERR "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); + printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); - - if (m==0) { + if (m == 0) { memset(pfm_stats, 0, sizeof(pfm_stats)); for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; } @@ -3718,7 +3698,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) return 0; } - static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { @@ -3926,6 +3905,7 @@ static int pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { struct pt_regs *tregs; + struct task_struct *task = PFM_CTX_TASK(ctx); if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL; @@ -3975,7 +3955,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * per-task mode */ - if (ctx->ctx_task == current) { + if (task == current) { /* stop monitoring at kernel level */ pfm_clear_psr_up(); @@ -3984,7 +3964,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ ia64_psr(regs)->up = 0; } else { - tregs = ia64_task_regs(ctx->ctx_task); + tregs = ia64_task_regs(task); /* * stop monitoring at the user level @@ -3995,7 +3975,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * monitoring disabled in kernel at next reschedule */ ctx->ctx_saved_psr_up = 0; - printk("pfm_stop: current [%d] task=[%d]\n", current->pid, ctx->ctx_task->pid); + DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid)); } return 0; } @@ -4106,6 +4086,28 @@ abort_mission: return ret; } +static int +pfm_check_task_exist(pfm_context_t *ctx) +{ + struct task_struct *g, *t; + int ret = -ESRCH; + + read_lock(&tasklist_lock); + + do_each_thread (g, t) { + if (t->thread.pfm_context == ctx) { + ret = 0; + break; + } + } while_each_thread (g, t); + + read_unlock(&tasklist_lock); + + DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); + + return ret; +} + static int pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { @@ -4316,8 +4318,17 @@ error: /* * release task, there is now a link with the context */ - if (ctx->ctx_fl_system == 0 && task != current) pfm_put_task(task); + if (ctx->ctx_fl_system == 0 && task != current) { + pfm_put_task(task); + if (ret == 0) { + ret = pfm_check_task_exist(ctx); + if (ret) { + CTX_UNLOADED(ctx); + ctx->ctx_task = NULL; + } + } + } return ret; } @@ -4334,7 +4345,7 @@ static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { - struct task_struct *task = ctx->ctx_task; + struct task_struct *task = PFM_CTX_TASK(ctx); struct pt_regs *tregs; DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); @@ -4416,8 +4427,8 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg * cancel user level control */ ia64_psr(regs)->sp = 1; - DPRINT(("setting psr.sp for [%d]\n", task->pid)); + DPRINT(("setting psr.sp for [%d]\n", task->pid)); } /* * save PMDs to context @@ -4490,7 +4501,7 @@ pfm_exit_thread(struct task_struct *task) pfm_context_t *ctx; unsigned long flags; struct pt_regs *regs = ia64_task_regs(task); - int ret; + int ret, state; int free_ok = 0; ctx = PFM_GET_CTX(task); @@ -4499,38 +4510,42 @@ pfm_exit_thread(struct task_struct *task) DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); - /* - * come here only if attached - */ - if (unlikely(CTX_IS_UNLOADED(ctx))) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); - goto skip_all; - } - - if (CTX_IS_LOADED(ctx) || CTX_IS_MASKED(ctx)) { - - ret = pfm_context_unload(ctx, NULL, 0, regs); - if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret); - } - CTX_TERMINATED(ctx); - DPRINT(("ctx terminated by [%d]\n", task->pid)); - - pfm_end_notify_user(ctx); + state = ctx->ctx_state; + switch(state) { + case PFM_CTX_UNLOADED: + /* + * come here only if attached + */ + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); + break; + case PFM_CTX_LOADED: + case PFM_CTX_MASKED: + ret = pfm_context_unload(ctx, NULL, 0, regs); + if (ret) { + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret); + } + CTX_TERMINATED(ctx); + DPRINT(("ctx terminated by [%d]\n", task->pid)); - } else if (CTX_IS_ZOMBIE(ctx)) { - pfm_clear_psr_up(); + pfm_end_notify_user(ctx); + break; + case PFM_CTX_ZOMBIE: + pfm_clear_psr_up(); - BUG_ON(ctx->ctx_smpl_hdr); + BUG_ON(ctx->ctx_smpl_hdr); - pfm_force_cleanup(ctx, regs); + pfm_force_cleanup(ctx, regs); - free_ok = 1; + free_ok = 1; + break; + default: + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); + break; } { u64 psr = pfm_get_psr(); BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); + BUG_ON(GET_PMU_OWNER()); } -skip_all: UNPROTECT_CTX(ctx, flags); /* @@ -4660,7 +4675,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon /* * reject any call if perfmon was disabled at initialization time - */ + mask*/ if (PFM_IS_DISABLED()) return -ENOSYS; if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) { @@ -4780,6 +4795,8 @@ abort_locked: error_args: if (args_k) kfree(args_k); + DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); + return ret; } @@ -4796,22 +4813,22 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg */ if (CTX_HAS_SMPL(ctx)) { - rst_ctrl.stop_monitoring = 1; - rst_ctrl.reset_pmds = PFM_PMD_NO_RESET; + rst_ctrl.bits.mask_monitoring = 0; + rst_ctrl.bits.reset_ovfl_pmds = 1; /* XXX: check return value */ if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); } else { - rst_ctrl.stop_monitoring = 0; - rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET; + rst_ctrl.bits.mask_monitoring = 0; + rst_ctrl.bits.reset_ovfl_pmds = 1; } if (ret == 0) { - if (rst_ctrl.reset_pmds != PFM_PMD_NO_RESET) - pfm_reset_regs(ctx, &ovfl_regs, rst_ctrl.reset_pmds); - - if (rst_ctrl.stop_monitoring == 0) { + if (rst_ctrl.bits.reset_ovfl_pmds) { + pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); + } + if (rst_ctrl.bits.mask_monitoring == 0) { DPRINT(("resuming monitoring\n")); if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current); } else { @@ -4981,11 +4998,12 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; - msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */ msg->pfm_ovfl_msg.msg_active_set = 0; msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; - msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; - + msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL; + msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL; + msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; + msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */ } DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n", @@ -5031,9 +5049,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str pfm_ovfl_arg_t ovfl_arg; unsigned long mask; unsigned long old_val; - unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL; + unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL; + unsigned long tstamp; pfm_ovfl_ctrl_t ovfl_ctrl; - unsigned int i, j, has_smpl, first_pmd = ~0U; + unsigned int i, has_smpl; int must_notify = 0; if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring; @@ -5043,9 +5062,11 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str */ if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; + tstamp = ia64_get_itc(); + mask = pmc0 >> PMU_FIRST_COUNTER; - DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s" + DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " "used_pmds=0x%lx reload_pmcs=0x%lx\n", pmc0, task ? task->pid: -1, @@ -5066,7 +5087,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str if ((mask & 0x1) == 0) continue; DPRINT_ovfl(("pmd[%d] overflowed hw_pmd=0x%lx ctx_pmd=0x%lx\n", - i, ia64_get_pmd(i), ctx->ctx_pmds[i].val)); + i, ia64_get_pmd(i), ctx->ctx_pmds[i].val)); /* * Note that the pmd is not necessarily 0 at this point as qualified events @@ -5081,91 +5102,132 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str * check for overflow condition */ if (likely(old_val > ctx->ctx_pmds[i].val)) { - ovfl_pmds |= 1UL << i; - - /* - * keep track of pmds of interest for samples - */ - if (has_smpl) { - if (first_pmd == ~0U) first_pmd = i; - smpl_pmds |= ctx->ctx_pmds[i].smpl_pmds[0]; - } - if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; } - DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx first_pmd=%u smpl_pmds=0x%lx\n", - i, ctx->ctx_pmds[i].val, old_val, - ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, first_pmd, smpl_pmds)); + DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx " + "ovfl_notify=0x%lx\n", + i, ctx->ctx_pmds[i].val, old_val, + ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify)); } - ovfl_ctrl.notify_user = ovfl_notify ? 1 : 0; - ovfl_ctrl.reset_pmds = ovfl_pmds && ovfl_notify == 0UL ? 1 : 0; - ovfl_ctrl.block = ovfl_notify ? 1 : 0; - ovfl_ctrl.stop_monitoring = ovfl_notify ? 1 : 0; + /* + * there was no 64-bit overflow, nothing else to do + */ + if (ovfl_pmds == 0UL) return; + + /* + * reset all control bits + */ + ovfl_ctrl.val = 0; /* - * when a overflow is detected, check for sampling buffer, if present, invoke - * record() callback. + * if a sampling format module exists, then we "cache" the overflow by + * calling the module's handler() routine. */ - if (ovfl_pmds && has_smpl) { - unsigned long start_cycles; + if (has_smpl) { + unsigned long start_cycles, end_cycles; + unsigned long pmd_mask, smpl_pmds; + int j, k, ret = 0; int this_cpu = smp_processor_id(); - ovfl_arg.ovfl_pmds[0] = ovfl_pmds; - ovfl_arg.ovfl_notify[0] = ovfl_notify; - ovfl_arg.ovfl_ctrl = ovfl_ctrl; - ovfl_arg.smpl_pmds[0] = smpl_pmds; + pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; prefetch(ctx->ctx_smpl_hdr); - ovfl_arg.pmd_value = ctx->ctx_pmds[first_pmd].val; - ovfl_arg.pmd_last_reset = ctx->ctx_pmds[first_pmd].lval; - ovfl_arg.pmd_eventid = ctx->ctx_pmds[first_pmd].eventid; + for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) { - /* - * copy values of pmds of interest. Sampling format may copy them - * into sampling buffer. - */ - if (smpl_pmds) { - for(i=0, j=0; smpl_pmds; i++, smpl_pmds >>=1) { - if ((smpl_pmds & 0x1) == 0) continue; - ovfl_arg.smpl_pmds_values[j++] = PMD_IS_COUNTING(i) ? pfm_read_soft_counter(ctx, i) : ia64_get_pmd(i); + mask = 1UL << i; + + if ((pmd_mask & 0x1) == 0) continue; + + ovfl_arg.ovfl_pmd = (unsigned char )i; + ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0; + ovfl_arg.active_set = 0; + ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */ + ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; + + ovfl_arg.pmd_value = ctx->ctx_pmds[i].val; + ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval; + ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid; + + /* + * copy values of pmds of interest. Sampling format may copy them + * into sampling buffer. + */ + if (smpl_pmds) { + for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { + if ((smpl_pmds & 0x1) == 0) continue; + ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); + } } - } - pfm_stats[this_cpu].pfm_smpl_handler_calls++; - start_cycles = ia64_get_itc(); + pfm_stats[this_cpu].pfm_smpl_handler_calls++; - /* - * call custom buffer format record (handler) routine - */ - (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs); + start_cycles = ia64_get_itc(); - pfm_stats[this_cpu].pfm_smpl_handler_cycles += ia64_get_itc() - start_cycles; + /* + * call custom buffer format record (handler) routine + */ + ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp); + + end_cycles = ia64_get_itc(); + + /* + * For those controls, we take the union because they have + * an all or nothing behavior. + */ + ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user; + ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task; + ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring; + ovfl_ctrl.bits.reset_ovfl_pmds |= ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds; /* yes or no */ - ovfl_pmds = ovfl_arg.ovfl_pmds[0]; - ovfl_notify = ovfl_arg.ovfl_notify[0]; - ovfl_ctrl = ovfl_arg.ovfl_ctrl; + pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; + } + /* + * when the module cannot handle the rest of the overflows, we abort right here + */ + if (ret && pmd_mask) { + DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n", + current->pid, + pmd_mask<ctx_ovfl_regs[0] = ovfl_pmds; - if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.block) { + /* + * check for blocking context + */ + if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; /* - * set the perfmon specific checking pending work + * set the perfmon specific checking pending work for the task */ PFM_SET_WORK_PENDING(task, 1); @@ -5182,21 +5244,22 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str must_notify = 1; } - DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx stopped=%d\n", + DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", current->pid, GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, PFM_GET_WORK_PENDING(task), ctx->ctx_fl_trap_reason, ovfl_pmds, ovfl_notify, - ovfl_ctrl.stop_monitoring ? 1 : 0)); + ovfl_ctrl.bits.mask_monitoring ? 1 : 0)); /* * in case monitoring must be stopped, we toggle the psr bits */ - if (ovfl_ctrl.stop_monitoring) { + if (ovfl_ctrl.bits.mask_monitoring) { pfm_mask_monitoring(task); CTX_MASKED(ctx); } + /* * send notification now */ @@ -5204,7 +5267,6 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str return; - sanity_check: printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", smp_processor_id(), @@ -5312,7 +5374,7 @@ report_spurious: static pfm_irq_handler_t pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) { - unsigned long m; + unsigned long start_cycles, total_cycles; unsigned long min, max; int this_cpu; int ret; @@ -5321,19 +5383,22 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; - m = ia64_get_itc(); + start_cycles = ia64_get_itc(); ret = pfm_do_interrupt_handler(irq, arg, regs); - m = ia64_get_itc() - m; + total_cycles = ia64_get_itc(); /* * don't measure spurious interrupts */ - if (ret == 0) { - if (m < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = m; - if (m > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = m; - pfm_stats[this_cpu].pfm_ovfl_intr_cycles += m; + if (likely(ret == 0)) { + total_cycles -= start_cycles; + + if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; + if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; + + pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; } PFM_IRQ_HANDLER_RET(); } @@ -5488,6 +5553,10 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i pfm_set_psr_pp(); ia64_srlz_i(); } + { unsigned long val; + val = ia64_get_pmc(4); + if ((val & (1UL<<23)) == 0UL) printk("perfmon: PMU off: pmc4=0x%lx\n", val); + } } void @@ -5750,13 +5819,6 @@ pfm_load_regs (struct task_struct *task) BUG_ON(GET_PMU_OWNER()); t = &task->thread; - psr = pfm_get_psr(); - -#if 1 - BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); - BUG_ON(psr & IA64_PSR_I); -#endif - /* * possible on unload */ @@ -5771,6 +5833,12 @@ pfm_load_regs (struct task_struct *task) * access, not CPU concurrency. */ flags = pfm_protect_ctx_ctxsw(ctx); + psr = pfm_get_psr(); + +#if 1 + BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); + BUG_ON(psr & IA64_PSR_I); +#endif if (unlikely(CTX_IS_ZOMBIE(ctx))) { struct pt_regs *regs = ia64_task_regs(task); @@ -6133,6 +6201,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val)); if (is_self) task->thread.pmds[i] = pmd_val; + ctx->ctx_pmds[i].val = val; } } diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index 4ee7b1379efe..aedf99212c82 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c @@ -109,21 +109,15 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v } static int -default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs) +default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp) { pfm_default_smpl_hdr_t *hdr; pfm_default_smpl_entry_t *ent; void *cur, *last; unsigned long *e; - unsigned long ovfl_mask; - unsigned long ovfl_notify; - unsigned long stamp; unsigned int npmds, i; - - /* - * some time stamp - */ - stamp = ia64_get_itc(); + unsigned char ovfl_pmd; + unsigned char ovfl_notify; if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) { DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg)); @@ -133,8 +127,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct hdr = (pfm_default_smpl_hdr_t *)buf; cur = hdr->hdr_cur_pos; last = hdr->hdr_last_pos; - ovfl_mask = arg->ovfl_pmds[0]; - ovfl_notify = arg->ovfl_notify[0]; + ovfl_pmd = arg->ovfl_pmd; + ovfl_notify = arg->ovfl_notify; /* * check for space against largest possibly entry. @@ -153,12 +147,12 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct hdr->hdr_count++; - DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n", + DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n", task->pid, hdr->hdr_count, cur, last, last-cur, - ovfl_mask, + ovfl_pmd, ovfl_notify, npmds)); /* @@ -172,7 +166,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct * - this is not necessarily the task controlling the session */ ent->pid = current->pid; - ent->cpu = smp_processor_id(); + ent->ovfl_pmd = ovfl_pmd; ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val; /* @@ -180,13 +174,9 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct */ ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3); - /* - * which registers overflowed - */ - ent->ovfl_pmds = ovfl_mask; ent->tstamp = stamp; + ent->cpu = smp_processor_id(); ent->set = arg->active_set; - ent->reserved1 = 0; /* * selectively store PMDs in increasing index number @@ -206,14 +196,14 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct /* * keep same ovfl_pmds, ovfl_notify */ - arg->ovfl_ctrl.notify_user = 0; - arg->ovfl_ctrl.block = 0; - arg->ovfl_ctrl.stop_monitoring = 0; - arg->ovfl_ctrl.reset_pmds = 1; + arg->ovfl_ctrl.bits.notify_user = 0; + arg->ovfl_ctrl.bits.block_task = 0; + arg->ovfl_ctrl.bits.mask_monitoring = 0; + arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */ return 0; full: - DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify)); + DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify)); /* * increment number of buffer overflow. @@ -222,22 +212,21 @@ full: hdr->hdr_overflows++; /* - * if no notification is needed, then we just reset the buffer index. + * if no notification is needed, then we saturate the buffer */ - if (ovfl_notify == 0UL) { + if (ovfl_notify == 0) { hdr->hdr_count = 0UL; - arg->ovfl_ctrl.notify_user = 0; - arg->ovfl_ctrl.block = 0; - arg->ovfl_ctrl.stop_monitoring = 0; - arg->ovfl_ctrl.reset_pmds = 1; + arg->ovfl_ctrl.bits.notify_user = 0; + arg->ovfl_ctrl.bits.block_task = 0; + arg->ovfl_ctrl.bits.mask_monitoring = 1; + arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; } else { - /* keep same ovfl_pmds, ovfl_notify */ - arg->ovfl_ctrl.notify_user = 1; - arg->ovfl_ctrl.block = 1; - arg->ovfl_ctrl.stop_monitoring = 1; - arg->ovfl_ctrl.reset_pmds = 0; + arg->ovfl_ctrl.bits.notify_user = 1; + arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */ + arg->ovfl_ctrl.bits.mask_monitoring = 1; + arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */ } - return 0; + return -1; /* we are full, sorry */ } static int @@ -250,8 +239,8 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru hdr->hdr_count = 0UL; hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr); - ctrl->stop_monitoring = 0; - ctrl->reset_pmds = PFM_PMD_LONG_RESET; + ctrl->bits.mask_monitoring = 0; + ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */ return 0; } @@ -264,15 +253,16 @@ default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) } static pfm_buffer_fmt_t default_fmt={ - .fmt_name = "default_format", - .fmt_uuid = PFM_DEFAULT_SMPL_UUID, - .fmt_arg_size = sizeof(pfm_default_smpl_arg_t), - .fmt_validate = default_validate, - .fmt_getsize = default_get_size, - .fmt_init = default_init, - .fmt_handler = default_handler, - .fmt_restart = default_restart, - .fmt_exit = default_exit, + .fmt_name = "default_format", + .fmt_uuid = PFM_DEFAULT_SMPL_UUID, + .fmt_arg_size = sizeof(pfm_default_smpl_arg_t), + .fmt_validate = default_validate, + .fmt_getsize = default_get_size, + .fmt_init = default_init, + .fmt_handler = default_handler, + .fmt_restart = default_restart, + .fmt_restart_active = default_restart, + .fmt_exit = default_exit, }; static int __init diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index 26afeeb46ea6..b8e81aa3bffa 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -70,64 +70,70 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type * Request structure used to define a context */ typedef struct { - pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ - unsigned long ctx_flags; /* noblock/block */ - unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */ - int ctx_fd; /* return arg: unique identification for context */ - void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ - unsigned long ctx_reserved[11]; /* for future use */ + pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ + unsigned long ctx_flags; /* noblock/block */ + unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */ + unsigned short ctx_reserved1; /* for future use */ + int ctx_fd; /* return arg: unique identification for context */ + void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ + unsigned long ctx_reserved2[11];/* for future use */ } pfarg_context_t; /* * Request structure used to write/read a PMC or PMD */ typedef struct { - unsigned int reg_num; /* which register */ - unsigned int reg_set; /* event set for this register */ + unsigned int reg_num; /* which register */ + unsigned short reg_set; /* event set for this register */ + unsigned short reg_reserved1; /* for future use */ - unsigned long reg_value; /* initial pmc/pmd value */ - unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ + unsigned long reg_value; /* initial pmc/pmd value */ + unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ - unsigned long reg_long_reset; /* reset after buffer overflow notification */ - unsigned long reg_short_reset; /* reset after counter overflow */ + unsigned long reg_long_reset; /* reset after buffer overflow notification */ + unsigned long reg_short_reset; /* reset after counter overflow */ - unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ - unsigned long reg_random_seed; /* seed value when randomization is used */ - unsigned long reg_random_mask; /* bitmask used to limit random value */ - unsigned long reg_last_reset_val;/* return: PMD last reset value */ + unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ + unsigned long reg_random_seed; /* seed value when randomization is used */ + unsigned long reg_random_mask; /* bitmask used to limit random value */ + unsigned long reg_last_reset_val;/* return: PMD last reset value */ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */ - unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ + unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ - unsigned long reserved[3]; /* for future use */ + unsigned long reg_reserved2[3]; /* for future use */ } pfarg_reg_t; typedef struct { - unsigned int dbreg_num; /* which debug register */ - unsigned int dbreg_set; /* event set for this register */ - unsigned long dbreg_value; /* value for debug register */ - unsigned long dbreg_flags; /* return: dbreg error */ - unsigned long dbreg_reserved[1]; /* for future use */ + unsigned int dbreg_num; /* which debug register */ + unsigned short dbreg_set; /* event set for this register */ + unsigned short dbreg_reserved1; /* for future use */ + unsigned long dbreg_value; /* value for debug register */ + unsigned long dbreg_flags; /* return: dbreg error */ + unsigned long dbreg_reserved2[1]; /* for future use */ } pfarg_dbreg_t; typedef struct { unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */ - unsigned int ft_reserved; /* reserved for future use */ - unsigned long reserved[4]; /* for future use */ + unsigned int ft_reserved; /* reserved for future use */ + unsigned long reserved[4]; /* for future use */ } pfarg_features_t; typedef struct { - pid_t load_pid; /* process to load the context into */ - unsigned int load_set; /* first event set to load */ - unsigned long load_reserved[2]; /* for future use */ + pid_t load_pid; /* process to load the context into */ + unsigned short load_set; /* first event set to load */ + unsigned short load_reserved1; /* for future use */ + unsigned long load_reserved2[3]; /* for future use */ } pfarg_load_t; typedef struct { int msg_type; /* generic message header */ int msg_ctx_fd; /* generic message header */ - unsigned long msg_tstamp; /* for perf tuning */ - unsigned int msg_active_set; /* active set at the time of overflow */ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */ + unsigned short msg_active_set; /* active set at the time of overflow */ + unsigned short msg_reserved1; /* for future use */ + unsigned int msg_reserved2; /* for future use */ + unsigned long msg_tstamp; /* for perf tuning/debug */ } pfm_ovfl_msg_t; typedef struct { @@ -192,25 +198,28 @@ extern void pfm_handle_work(void); #define PFM_PMD_LONG_RESET 1 #define PFM_PMD_SHORT_RESET 2 -typedef struct { - unsigned int notify_user:1; /* notify user program of overflow */ - unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */ - unsigned int block:1; /* block monitored task on kernel exit */ - unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */ - unsigned int reserved:26; /* for future use */ +typedef union { + unsigned int val; + struct { + unsigned int notify_user:1; /* notify user program of overflow */ + unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */ + unsigned int block_task:1; /* block monitored task on kernel exit */ + unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */ + unsigned int reserved:28; /* for future use */ + } bits; } pfm_ovfl_ctrl_t; typedef struct { - unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */ - unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */ - unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */ - unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */ - unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */ - unsigned int active_set; /* event set active at the time of the overflow */ - unsigned int reserved1; - unsigned long smpl_pmds[4]; - unsigned long smpl_pmds_values[PMU_MAX_PMDS]; - pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ + unsigned char ovfl_pmd; /* index of overflowed PMD */ + unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */ + unsigned short active_set; /* event set active at the time of the overflow */ + pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ + + unsigned long pmd_last_reset; /* last reset value of of the PMD */ + unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */ + unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */ + unsigned long pmd_value; /* current 64-bit value of the PMD */ + unsigned long pmd_eventid; /* eventid associated with PMD */ } pfm_ovfl_arg_t; @@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t { int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg); int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size); int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg); - int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs); + int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp); int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs); diff --git a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h index 77709625f96f..1c63c7cf7f49 100644 --- a/include/asm-ia64/perfmon_default_smpl.h +++ b/include/asm-ia64/perfmon_default_smpl.h @@ -16,7 +16,9 @@ */ typedef struct { unsigned long buf_size; /* size of the buffer in bytes */ - unsigned long reserved[3]; /* for future use */ + unsigned int flags; /* buffer specific flags */ + unsigned int res1; /* for future use */ + unsigned long reserved[2]; /* for future use */ } pfm_default_smpl_arg_t; /* @@ -46,28 +48,27 @@ typedef struct { /* * Entry header in the sampling buffer. The header is directly followed - * with the PMDs saved in increasing index order: PMD4, PMD5, .... How - * many PMDs are present depends on how the session was programmed. + * with the values of the PMD registers of interest saved in increasing + * index order: PMD4, PMD5, and so on. How many PMDs are present depends + * on how the session was programmed. * - * XXX: in this version of the entry, only up to 64 registers can be - * recorded. This should be enough for quite some time. Always check - * sampling format before parsing entries! + * In the case where multiple counters overflow at the same time, multiple + * entries are written consecutively. * - * In the case where multiple counters overflow at the same time, the - * last_reset_value member indicates the initial value of the - * overflowed PMD with the smallest index. For instance, if PMD2 and - * PMD5 have overflowed, the last_reset_value member contains the - * initial value of PMD2. + * last_reset_value member indicates the initial value of the overflowed PMD. */ typedef struct { - int pid; /* current process at PMU interrupt point */ - int cpu; /* cpu on which the overfow occured */ - unsigned long last_reset_val; /* initial value of 1st overflowed PMD */ - unsigned long ip; /* where did the overflow interrupt happened */ - unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */ - unsigned long tstamp; /* ar.itc on the CPU that took the overflow */ - unsigned int set; /* event set active when overflow ocurred */ - unsigned int reserved1; /* for future use */ + int pid; /* active process at PMU interrupt point */ + unsigned char reserved1[3]; /* reserved for future use */ + unsigned char ovfl_pmd; /* index of overflowed PMD */ + + unsigned long last_reset_val; /* initial value of overflowed PMD */ + unsigned long ip; /* where did the overflow interrupt happened */ + unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ + + unsigned short cpu; /* cpu on which the overfow occured */ + unsigned short set; /* event set active when overflow ocurred */ + unsigned int reserved2; /* for future use */ } pfm_default_smpl_entry_t; #define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */ -- cgit v1.2.3 From 46480a97d81e57150b8218ceeaae2c8ee5f4e9e7 Mon Sep 17 00:00:00 2001 From: John Levon Date: Thu, 21 Aug 2003 02:57:11 -0700 Subject: [PATCH] OProfile: export kernel pointer size in oprofilefs Tell user-space how big kernel pointers are, as preferable to sniffing /proc/kcore. Improve the oprofilefs_ulong_to_user() prototype. --- drivers/oprofile/oprofile_files.c | 14 +++++++++++++- drivers/oprofile/oprofilefs.c | 12 ++++++------ include/linux/oprofile.h | 2 +- 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index 21ae41a54e90..b7466377afd4 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -19,6 +19,17 @@ unsigned long fs_cpu_buffer_size = 8192; unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ +static ssize_t pointer_size_read(struct file * file, char * buf, size_t count, loff_t * offset) +{ + return oprofilefs_ulong_to_user((unsigned long)sizeof(void *), buf, count, offset); +} + + +static struct file_operations pointer_size_fops = { + .read = pointer_size_read, +}; + + static ssize_t cpu_type_read(struct file * file, char * buf, size_t count, loff_t * offset) { return oprofilefs_str_to_user(oprofile_ops->cpu_type, buf, count, offset); @@ -32,7 +43,7 @@ static struct file_operations cpu_type_fops = { static ssize_t enable_read(struct file * file, char * buf, size_t count, loff_t * offset) { - return oprofilefs_ulong_to_user(&oprofile_started, buf, count, offset); + return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); } @@ -85,6 +96,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root) oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); + oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); oprofile_create_stats_files(sb, root); if (oprofile_ops->create_files) oprofile_ops->create_files(sb, root); diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index c82630ec1819..ed1efe61f6e3 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -69,7 +69,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char * buf, size_t count, loff_ #define TMPBUFSIZE 50 -ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset) +ssize_t oprofilefs_ulong_to_user(unsigned long val, char * buf, size_t count, loff_t * offset) { char tmpbuf[TMPBUFSIZE]; size_t maxlen; @@ -78,7 +78,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, return 0; spin_lock(&oprofilefs_lock); - maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", *val); + maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val); spin_unlock(&oprofilefs_lock); if (maxlen > TMPBUFSIZE) maxlen = TMPBUFSIZE; @@ -122,7 +122,8 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const * buf, size_t cou static ssize_t ulong_read_file(struct file * file, char * buf, size_t count, loff_t * offset) { - return oprofilefs_ulong_to_user(file->private_data, buf, count, offset); + unsigned long * val = file->private_data; + return oprofilefs_ulong_to_user(*val, buf, count, offset); } @@ -212,9 +213,8 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, static ssize_t atomic_read_file(struct file * file, char * buf, size_t count, loff_t * offset) { - atomic_t * aval = file->private_data; - unsigned long val = atomic_read(aval); - return oprofilefs_ulong_to_user(&val, buf, count, offset); + atomic_t * val = file->private_data; + return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); } diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index c2b4fd735f40..9555dd4d69fc 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -92,7 +92,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char * buf, size_t count, loff_ * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, * updating *offset appropriately. Returns bytes written or -EFAULT. */ -ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset); +ssize_t oprofilefs_ulong_to_user(unsigned long val, char * buf, size_t count, loff_t * offset); /** * Read an ASCII string for a number from a userspace buffer and fill *val on success. -- cgit v1.2.3 From 7c2b7264beedd86b2caa8ebd0bfd958206b96c01 Mon Sep 17 00:00:00 2001 From: William Lee Irwin III Date: Thu, 21 Aug 2003 03:06:22 -0700 Subject: [PATCH] Fix APIC ID handling Fix APIC ID lookup. In the bios_cpu_apicid[] case, it would walk off the end of bios_cpu_apicid[] and attempt to send APIC INIT messages to garbage without this patch, and in the NUMA-Q case, it would attempt to send NMI wakeups to destinations in the broadcast cluster (which is harmless, but very poor form) without this patch. --- include/asm-i386/mach-bigsmp/mach_apic.h | 5 ++++- include/asm-i386/mach-es7000/mach_apic.h | 4 +++- include/asm-i386/mach-numaq/mach_apic.h | 10 +++++++++- include/asm-i386/mach-summit/mach_apic.h | 5 ++++- 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index c21ed08175d5..dab6aa34c6fa 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -86,7 +86,10 @@ extern u8 bios_cpu_apicid[]; static inline int cpu_present_to_apicid(int mps_cpu) { - return (int) bios_cpu_apicid[mps_cpu]; + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index aa7fd107c1c9..b744ac27f6fc 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h @@ -106,8 +106,10 @@ static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; - else + else if (mps_cpu < NR_CPUS) return (int) bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h index 2f9f19237460..98b4e5921aa8 100644 --- a/include/asm-i386/mach-numaq/mach_apic.h +++ b/include/asm-i386/mach-numaq/mach_apic.h @@ -65,9 +65,17 @@ static inline int cpu_to_logical_apicid(int cpu) return (int)cpu_2_logical_apicid[cpu]; } +/* + * Supporting over 60 cpus on NUMA-Q requires a locality-dependent + * cpu to APIC ID relation to properly interact with the intelligent + * mode of the cluster controller. + */ static inline int cpu_present_to_apicid(int mps_cpu) { - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + if (mps_cpu < 60) + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); + else + return BAD_APICID; } static inline int generate_logical_apicid(int quad, int phys_apicid) diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index f79d5df55e1a..73a4a1077e85 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -87,7 +87,10 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { - return (int) bios_cpu_apicid[mps_cpu]; + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) -- cgit v1.2.3 From 612344a8d26dfb4a2385f1b94396a428f5964fb2 Mon Sep 17 00:00:00 2001 From: Yoshinori Sato Date: Thu, 21 Aug 2003 08:56:22 -0700 Subject: [PATCH] h8300 support fix (2/2) o inline assembler funcs cleanup o machine depend header cleanup o interrupt management fix --- include/asm-h8300/aki3068net/machine-depend.h | 35 +++ include/asm-h8300/atomic.h | 45 +--- include/asm-h8300/bitops.h | 301 ++++++++--------------- include/asm-h8300/edosk2674/machine-depend.h | 70 ++++++ include/asm-h8300/generic/machine-depend.h | 17 ++ include/asm-h8300/h8300_ne.h | 6 +- include/asm-h8300/h8max/ide.h | 60 ----- include/asm-h8300/h8max/machine-depend.h | 167 +++++++++++++ include/asm-h8300/hardirq.h | 6 - include/asm-h8300/ide.h | 7 +- include/asm-h8300/io.h | 9 - include/asm-h8300/machine-depend.h | 70 ++++++ include/asm-h8300/processor.h | 6 +- include/asm-h8300/regs267x.h | 336 ++++++++++++++++++++++++++ include/asm-h8300/semaphore.h | 178 +++----------- include/asm-h8300/system.h | 21 +- include/asm-h8300/timex.h | 6 +- 17 files changed, 845 insertions(+), 495 deletions(-) create mode 100644 include/asm-h8300/aki3068net/machine-depend.h create mode 100644 include/asm-h8300/edosk2674/machine-depend.h create mode 100644 include/asm-h8300/generic/machine-depend.h delete mode 100644 include/asm-h8300/h8max/ide.h create mode 100644 include/asm-h8300/h8max/machine-depend.h create mode 100644 include/asm-h8300/machine-depend.h create mode 100644 include/asm-h8300/regs267x.h (limited to 'include') diff --git a/include/asm-h8300/aki3068net/machine-depend.h b/include/asm-h8300/aki3068net/machine-depend.h new file mode 100644 index 000000000000..e2e5f6a523ac --- /dev/null +++ b/include/asm-h8300/aki3068net/machine-depend.h @@ -0,0 +1,35 @@ +/* AE-3068 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#include +#define H8300_TIMER_COUNT_DATA 20000*10/8192 +#define H8300_TIMER_FREQ 20000*1000/8192 +#endif + +/* AE-3068 RTL8019AS Config */ +#ifdef H8300_NE_DEFINE + +#define NE2000_ADDR 0x200000 +#define NE2000_IRQ 5 +#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ) +#define NE2000_BYTE volatile unsigned short + +#define IER 0xfee015 +#define ISR 0xfee016 +#define IRQ_MASK (1 << NE2000_IRQ) + +#define WCRL 0xfee023 +#define MAR0A 0xffff20 +#define ETCR0A 0xffff24 +#define DTCR0A 0xffff27 +#define MAR0B 0xffff28 +#define DTCR0B 0xffff2f + +#define H8300_INIT_NE() \ +do { \ + wordlength = 1; \ + outb_p(0x48, ioaddr + EN0_DCFG); \ +} while(0) + +#endif diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h index 3af502772b6f..e9595c099c58 100644 --- a/include/asm-h8300/atomic.h +++ b/include/asm-h8300/atomic.h @@ -71,56 +71,27 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) return ret == 0; } -#if defined(__H8300H__) static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("stc ccr,r2l\n\t" + __asm__ __volatile__("stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "and.l er1,er0\n\t" + "and.l %1,er0\n\t" "mov.l er0,%0\n\t" - "ldc r2l,ccr" - : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2"); + "ldc r1l,ccr" + : "=m" (*v) : "g" (~(mask)) :"er0","er1"); } static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("stc ccr,r2l\n\t" + __asm__ __volatile__("stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "or.l er1,er0\n\t" + "or.l %1,er0\n\t" "mov.l er0,%0\n\t" - "ldc r2l,ccr" - : "=m" (*v) : "ir" (mask) :"er0","er1","er2"); + "ldc r1l,ccr" + : "=m" (*v) : "g" (mask) :"er0","er1"); } -#endif -#if defined(__H8300S__) -static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc exr,r2l\n\t" - "orc #0x07,exr\n\t" - "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "and.l er1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r2l,exr" - : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2"); -} - -static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc exr,r2l\n\t" - "orc #0x07,exr\n\t" - "mov.l %0,er0\n\t" - "mov.l %1,er1\n\t" - "or.l er1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r2l,exr" - : "=m" (*v) : "ir" (mask) :"er0","er1","er2"); -} -#endif /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h index 703024599da6..87068f245d5c 100644 --- a/include/asm-h8300/bitops.h +++ b/include/asm-h8300/bitops.h @@ -39,16 +39,18 @@ static __inline__ unsigned long ffz(unsigned long word) static __inline__ void set_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bset r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bset r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -/* Bigendian is complexed... */ -#define __set_bit(nr, addr) set_bit(nr, addr) +/* Bigendian is complexed... */ +#define __set_bit(nr, addr) set_bit((nr), (addr)) /* * clear_bit() doesn't provide any barrier for the compiler. @@ -58,261 +60,158 @@ static __inline__ void set_bit(int nr, volatile unsigned long* addr) static __inline__ void clear_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bclr r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bclr r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -#define __clear_bit(nr, addr) clear_bit(nr, addr) +#define __clear_bit(nr, addr) clear_bit((nr), (addr)) static __inline__ void change_bit(int nr, volatile unsigned long* addr) { - unsigned char *a = (unsigned char *) addr; - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); + volatile unsigned char *b_addr; + b_addr = &(((volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]); __asm__("mov.l %1,er0\n\t" - "mov.l %0,er1\n\t" - "bnot r0l,@er1" - :"=m"(a):"g"(nr & 7):"er0","er1","memory"); + "bnot r0l,%0" + :"+m"(*b_addr) + :"g"(nr & 7),"m"(*b_addr) + :"er0"); } -#define __change_bit(nr, addr) change_bit(nr, addr) +#define __change_bit(nr, addr) change_bit((nr), (addr)) -#if defined(__H8300H__) -static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr) +static __inline__ int test_bit(int nr, const unsigned long* addr) { - int retval; - unsigned char *a; - a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bset r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; + return ((1UL << (nr & 7)) & + (((const volatile unsigned char *) addr) + [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)])) != 0; } -#endif -#if defined(__H8300S__) + +#define __test_bit(nr, addr) test_bit(nr, addr) + static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a; - a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bset r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bset r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_set_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bset r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); + "btst r3l,%1\n\t" + "bset r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#if defined(__H8300H__) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bclr r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; -} -#endif -#if defined(__H8300S__) -static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bclr r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bclr r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bclr r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); + "btst r3l,%1\n\t" + "bclr r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#if defined(__H8300H__) static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc ccr,r0h\n\t" - "orc #0x80,ccr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bnot r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,ccr\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); - return retval; -} -#endif -#if defined(__H8300S__) -static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "stc exr,r0h\n\t" - "orc #0x07,exr\n\t" - "mov.b %1,r1l\n\t" - "btst r0l,r1l\n\t" - "bnot r0l,r1l\n\t" - "stc ccr,r0l\n\t" - "mov.b r1l,%1\n\t" - "ldc r0h,exr\n\t" - "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory"); + "stc ccr,r3h\n\t" + "orc #0x80,ccr\n\t" + "btst r3l,%1\n\t" + "bnot r3l,%1\n\t" + "beq 1f\n\t" + "inc.l #1,%0\n\t" + "1:" + "ldc r3h,ccr" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#endif static __inline__ int __test_and_change_bit(int nr, volatile unsigned long* addr) { - int retval; - unsigned char *a = (unsigned char *) addr; + register int retval __asm__("er0"); + volatile unsigned char *a; + a = (volatile unsigned char *)addr; a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %2,er0\n\t" - "mov.b %1,r0h\n\t" - "btst r0l,r0h\n\t" - "bnot r0l,r0h\n\t" - "stc ccr,r0l\n\t" - "mov.b r0h,%1\n\t" + __asm__("mov.l %2,er3\n\t" "sub.l %0,%0\n\t" - "bild #2,r0l\n\t" - "rotxl.l %0" - : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory"); - return retval; -} - -/* - * This routine doesn't need to be atomic. - */ -static __inline__ int __constant_test_bit(int nr, const volatile unsigned long* addr) -{ - return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static __inline__ int __test_bit(int nr, const unsigned long* addr) -{ - int retval; - unsigned char *a = (unsigned char *) addr; - - a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3)); - __asm__("mov.l %1,er0\n\t" - "btst r0l,@%2\n\t" + "btst r3l,%1\n\t" + "bnot r3l,%1\n\t" "beq 1f\n\t" - "sub.l %0,%0\n\t" - "inc.l #1,%0\n" - "bra 2f\n" - "1:\n\t" - "sub.l %0,%0\n" - "2:" - : "=r"(retval) :"g"(nr & 7),"r"(a):"er0"); + "inc.l #1,%0\n\t" + "1:" + : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory"); return retval; } -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - __constant_test_bit((nr),(addr)) : \ - __test_bit((nr),(addr))) - - #define find_first_zero_bit(addr, size) \ find_next_zero_bit((addr), (size), 0) diff --git a/include/asm-h8300/edosk2674/machine-depend.h b/include/asm-h8300/edosk2674/machine-depend.h new file mode 100644 index 000000000000..1e98b40e5f4e --- /dev/null +++ b/include/asm-h8300/edosk2674/machine-depend.h @@ -0,0 +1,70 @@ +/* EDOSK2674 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 33000*10/8192 +#define H8300_TIMER_FREQ 33000*1000/8192 +#endif + +/* EDOSK-2674R SMSC Network Controler Target Depend impliments */ +#ifdef H8300_SMSC_DEFINE + +#define SMSC_BASE 0xf80000 +#define SMSC_IRQ 16 + +/* sorry quick hack */ +#if defined(outw) +# undef outw +#endif +#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a)) +#if defined(inw) +# undef inw +#endif +#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a)) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l) + +static inline void edosk2674_smsc_outw( + unsigned short d, + volatile unsigned short *a + ) +{ + *a = (d >> 8) | (d << 8); +} + +static inline unsigned short edosk2674_smsc_inw( + volatile unsigned short *a + ) +{ + unsigned short d; + d = *a; + return (d >> 8) | (d << 8); +} + +static inline void edosk2674_smsc_outsw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *a = *p; +} + +static inline void edosk2674_smsc_insw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *p = *a; +} + +#endif diff --git a/include/asm-h8300/generic/machine-depend.h b/include/asm-h8300/generic/machine-depend.h new file mode 100644 index 000000000000..2d78096e54c8 --- /dev/null +++ b/include/asm-h8300/generic/machine-depend.h @@ -0,0 +1,17 @@ +/* machine depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#include +#if defined(CONFIG_H83007) || defined(CONFIG_H83068) || defined(CONFIG_H8S2678) +#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8192 +#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8192 +#endif + +#if defined(CONFIG_H8_3002) || defined(CONFIG_H83048) +#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8 +#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8 +#endif + +#endif + diff --git a/include/asm-h8300/h8300_ne.h b/include/asm-h8300/h8300_ne.h index debef6ae7140..c0350b6ea6e3 100644 --- a/include/asm-h8300/h8300_ne.h +++ b/include/asm-h8300/h8300_ne.h @@ -11,9 +11,9 @@ #define h8300ne_h /****************************************************************************/ -/* Such a description is OK ? */ -#define DEPEND_HEADER(target) -#include DEPEND_HEADER(TARGET) +#define H8300_NE_DEFINE +#include +#undef H8300_NE_DEFINE /****************************************************************************/ #endif /* h8300ne_h */ diff --git a/include/asm-h8300/h8max/ide.h b/include/asm-h8300/h8max/ide.h deleted file mode 100644 index e7d75ac57974..000000000000 --- a/include/asm-h8300/h8max/ide.h +++ /dev/null @@ -1,60 +0,0 @@ -/* H8MAX IDE I/F Config */ - -#define H8300_IDE_BASE 0x200000 -#define H8300_IDE_CTRL 0x60000c -#define H8300_IDE_IRQ 5 -#define H8300_IDE_REG_OFFSET 2 - -#undef outb -#undef inb -#undef outb_p -#undef inb_p -#undef outsw -#undef insw - -#define outb(d,a) h8max_outb(d,(unsigned short *)a) -#define inb(a) h8max_inb((unsigned char *)a) -#define outb_p(d,a) h8max_outb(d,(unsigned short *)a) -#define inb_p(a) h8max_inb((unsigned char *)a) -#define outsw(addr,buf,len) h8max_outsw(addr,buf,len); -#define insw(addr,buf,len) h8max_insw(addr,buf,len); - -static inline void h8max_outb(unsigned short d,unsigned short *a) -{ - *a = d; -} - -static inline unsigned char h8max_inb(unsigned char *a) -{ - return *(a+1); -} - -static inline void h8max_outsw(void *addr, void *buf, int len) -{ - unsigned volatile short *ap = (unsigned volatile short *)addr; - unsigned short *bp = (unsigned short *)buf; - unsigned short d; - while(len--) { - d = *bp++; - *ap = (d >> 8) | (d << 8); - } -} - -static inline void h8max_insw(void *addr, void *buf, int len) -{ - unsigned volatile short *ap = (unsigned volatile short *)addr; - unsigned short *bp = (unsigned short *)buf; - unsigned short d; - while(len--) { - d = *ap; - *bp++ = (d >> 8) | (d << 8); - } -} - -static inline void target_ide_fix_driveid(struct hd_driveid *id) -{ - int c; - unsigned short *p = (unsigned short *)id; - for (c = 0; c < SECTOR_WORDS; c++, p++) - *p = (*p >> 8) | (*p << 8); -} diff --git a/include/asm-h8300/h8max/machine-depend.h b/include/asm-h8300/h8max/machine-depend.h new file mode 100644 index 000000000000..1a2218f9d3bf --- /dev/null +++ b/include/asm-h8300/h8max/machine-depend.h @@ -0,0 +1,167 @@ +/* H8MAX board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 25000*10/8192 +#define H8300_TIMER_FREQ 25000*1000/8192 +#endif + +/* H8MAX RTL8019AS Config */ +#ifdef H8300_NE_DEFINE + +#define NE2000_ADDR 0x800600 +#define NE2000_IRQ 4 +#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ) +#define NE2000_BYTE volatile unsigned short + +#define IER 0xfee015 +#define ISR 0xfee016 +#define IRQ_MASK (1 << NE2000_IRQ) +/* sorry quick hack */ +#if defined(outb) +# undef outb +#endif +#define outb(d,a) h8max_outb((d),(a) - NE2000_ADDR) +#if defined(inb) +# undef inb +#endif +#define inb(a) h8max_inb((a) - NE2000_ADDR) +#if defined(outb_p) +# undef outb_p +#endif +#define outb_p(d,a) h8max_outb((d),(a) - NE2000_ADDR) +#if defined(inb_p) +# undef inb_p +#endif +#define inb_p(a) h8max_inb((a) - NE2000_ADDR) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) h8max_outsw((a) - NE2000_ADDR,(unsigned short *)p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) h8max_insw((a) - NE2000_ADDR,(unsigned short *)p,l) +#if defined(outsb) +# undef outsb +#endif +#define outsb(a,p,l) h8max_outsb((a) - NE2000_ADDR,(unsigned char *)p,l) +#if defined(insb) +# undef insb +#endif +#define insb(a,p,l) h8max_insb((a) - NE2000_ADDR,(unsigned char *)p,l) + +#define H8300_INIT_NE() \ +do { \ + wordlength = 2; \ + h8max_outb(0x49, ioaddr + EN0_DCFG); \ + SA_prom[14] = SA_prom[15] = 0x57;\ +} while(0) + +static inline void h8max_outb(unsigned char d,unsigned char a) +{ + *(unsigned short *)(NE2000_ADDR + (a << 1)) = d; +} + +static inline unsigned char h8max_inb(unsigned char a) +{ + return *(unsigned char *)(NE2000_ADDR + (a << 1) +1); +} + +static inline void h8max_outsw(unsigned char a,unsigned short *p,unsigned long l) +{ + unsigned short d; + for (; l != 0; --l, p++) { + d = (((*p) >> 8) & 0xff) | ((*p) << 8); + *(unsigned short *)(NE2000_ADDR + (a << 1)) = d; + } +} + +static inline void h8max_insw(unsigned char a,unsigned short *p,unsigned long l) +{ + unsigned short d; + for (; l != 0; --l, p++) { + d = *(unsigned short *)(NE2000_ADDR + (a << 1)); + *p = (d << 8)|((d >> 8) & 0xff); + } +} + +static inline void h8max_outsb(unsigned char a,unsigned char *p,unsigned long l) +{ + for (; l != 0; --l, p++) { + *(unsigned short *)(NE2000_ADDR + (a << 1)) = *p; + } +} + +static inline void h8max_insb(unsigned char a,unsigned char *p,unsigned long l) +{ + for (; l != 0; --l, p++) { + *p = *((unsigned char *)(NE2000_ADDR + (a << 1))+1); + } +} + +#endif + +/* H8MAX IDE I/F Config */ +#ifdef H8300_IDE_DEFINE + +#define H8300_IDE_BASE 0x200000 +#define H8300_IDE_CTRL 0x60000c +#define H8300_IDE_IRQ 5 +#define H8300_IDE_REG_OFFSET 2 + +#undef outb +#undef inb +#undef outb_p +#undef inb_p +#undef outsw +#undef insw + +#define outb(d,a) h8max_outb(d,(unsigned short *)a) +#define inb(a) h8max_inb((unsigned char *)a) +#define outb_p(d,a) h8max_outb(d,(unsigned short *)a) +#define inb_p(a) h8max_inb((unsigned char *)a) +#define outsw(addr,buf,len) h8max_outsw(addr,buf,len); +#define insw(addr,buf,len) h8max_insw(addr,buf,len); + +static inline void h8max_outb(unsigned short d,unsigned short *a) +{ + *a = d; +} + +static inline unsigned char h8max_inb(unsigned char *a) +{ + return *(a+1); +} + +static inline void h8max_outsw(void *addr, void *buf, int len) +{ + unsigned volatile short *ap = (unsigned volatile short *)addr; + unsigned short *bp = (unsigned short *)buf; + unsigned short d; + while(len--) { + d = *bp++; + *ap = (d >> 8) | (d << 8); + } +} + +static inline void h8max_insw(void *addr, void *buf, int len) +{ + unsigned volatile short *ap = (unsigned volatile short *)addr; + unsigned short *bp = (unsigned short *)buf; + unsigned short d; + while(len--) { + d = *ap; + *bp++ = (d >> 8) | (d << 8); + } +} + +static inline void target_ide_fix_driveid(struct hd_driveid *id) +{ + int c; + unsigned short *p = (unsigned short *)id; + for (c = 0; c < SECTOR_WORDS; c++, p++) + *p = (*p >> 8) | (*p << 8); +} + +#endif diff --git a/include/asm-h8300/hardirq.h b/include/asm-h8300/hardirq.h index 20f3571cc299..ccab235b9f83 100644 --- a/include/asm-h8300/hardirq.h +++ b/include/asm-h8300/hardirq.h @@ -74,12 +74,6 @@ typedef struct { #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) -#ifdef CONFIG_PREEMPT -# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) -#else -# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET -#endif - #ifdef CONFIG_PREEMPT # define in_atomic() (preempt_count() != kernel_locked()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) diff --git a/include/asm-h8300/ide.h b/include/asm-h8300/ide.h index 3ebf8e262324..3669f106312b 100644 --- a/include/asm-h8300/ide.h +++ b/include/asm-h8300/ide.h @@ -70,9 +70,10 @@ typedef union { * Our list of ports/irq's for different boards. */ -/* Such a description is OK ? */ -#define DEPEND_HEADER(target) -#include DEPEND_HEADER(TARGET) +/* machine depend header include */ +#define H8300_IDE_DEFINE +#include +#undef H8300_IDE_DEFINE /****************************************************************************/ diff --git a/include/asm-h8300/io.h b/include/asm-h8300/io.h index 69efa4f2c0de..42f91752b920 100644 --- a/include/asm-h8300/io.h +++ b/include/asm-h8300/io.h @@ -51,21 +51,12 @@ static inline unsigned int _swapl(volatile unsigned long v) #define writew(b,addr) (void)((*(volatile unsigned short *) (addr & 0x00ffffff)) = (b)) #define writel(b,addr) (void)((*(volatile unsigned int *) (addr & 0x00ffffff)) = (b)) -/* - * The following are some defines we need for MTD with our - * COBRA5272 board. - * Because I don't know if they break something I have - * #ifdef'd them. - * (020325 - hede) - */ -#ifdef CONFIG_senTec #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel -#endif /* CONFIG_senTec */ static inline void io_outsb(unsigned int addr, void *buf, int len) { diff --git a/include/asm-h8300/machine-depend.h b/include/asm-h8300/machine-depend.h new file mode 100644 index 000000000000..1e98b40e5f4e --- /dev/null +++ b/include/asm-h8300/machine-depend.h @@ -0,0 +1,70 @@ +/* EDOSK2674 board depend header */ + +/* TIMER rate define */ +#ifdef H8300_TIMER_DEFINE +#define H8300_TIMER_COUNT_DATA 33000*10/8192 +#define H8300_TIMER_FREQ 33000*1000/8192 +#endif + +/* EDOSK-2674R SMSC Network Controler Target Depend impliments */ +#ifdef H8300_SMSC_DEFINE + +#define SMSC_BASE 0xf80000 +#define SMSC_IRQ 16 + +/* sorry quick hack */ +#if defined(outw) +# undef outw +#endif +#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a)) +#if defined(inw) +# undef inw +#endif +#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a)) +#if defined(outsw) +# undef outsw +#endif +#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l) +#if defined(insw) +# undef insw +#endif +#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l) + +static inline void edosk2674_smsc_outw( + unsigned short d, + volatile unsigned short *a + ) +{ + *a = (d >> 8) | (d << 8); +} + +static inline unsigned short edosk2674_smsc_inw( + volatile unsigned short *a + ) +{ + unsigned short d; + d = *a; + return (d >> 8) | (d << 8); +} + +static inline void edosk2674_smsc_outsw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *a = *p; +} + +static inline void edosk2674_smsc_insw( + volatile unsigned short *a, + unsigned short *p, + unsigned long l + ) +{ + for (; l != 0; --l, p++) + *p = *a; +} + +#endif diff --git a/include/asm-h8300/processor.h b/include/asm-h8300/processor.h index a945b8bede2f..819c9b34e152 100644 --- a/include/asm-h8300/processor.h +++ b/include/asm-h8300/processor.h @@ -70,12 +70,12 @@ struct thread_struct { * pass the data segment into user programs if it exists, * it can't hurt anything as far as I can tell */ -#if defined(__H8300S__) +#if defined(__H8300H__) #define start_thread(_regs, _pc, _usp) \ do { \ set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ - (_regs)->ccr &= ~0x10; /* clear kernel flag */ \ + (_regs)->ccr &= 0x00; /* clear kernel flag */ \ } while(0) #endif #if defined(__H8300S__) @@ -83,7 +83,7 @@ do { \ do { \ set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ - (_regs)->ccr &= ~0x10; /* clear kernel flag */ \ + (_regs)->ccr = 0x00; /* clear kernel flag */ \ (_regs)->exr = 0x78; /* enable all interrupts */ \ /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \ wrusp(((unsigned long)(_usp)) - 14); \ diff --git a/include/asm-h8300/regs267x.h b/include/asm-h8300/regs267x.h new file mode 100644 index 000000000000..1bff731a9f77 --- /dev/null +++ b/include/asm-h8300/regs267x.h @@ -0,0 +1,336 @@ +/* internal Peripherals Register address define */ +/* CPU: H8/306x */ + +#if !defined(__REGS_H8S267x__) +#define __REGS_H8S267x__ + +#if defined(__KERNEL__) + +#define DASTCR 0xFEE01A +#define DADR0 0xFFFFA4 +#define DADR1 0xFFFFA5 +#define DACR01 0xFFFFA6 +#define DADR2 0xFFFFA8 +#define DADR3 0xFFFFA9 +#define DACR23 0xFFFFAA + +#define ADDRA 0xFFFF90 +#define ADDRAH 0xFFFF90 +#define ADDRAL 0xFFFF91 +#define ADDRB 0xFFFF92 +#define ADDRBH 0xFFFF92 +#define ADDRBL 0xFFFF93 +#define ADDRC 0xFFFF94 +#define ADDRCH 0xFFFF94 +#define ADDRCL 0xFFFF95 +#define ADDRD 0xFFFF96 +#define ADDRDH 0xFFFF96 +#define ADDRDL 0xFFFF97 +#define ADDRE 0xFFFF98 +#define ADDREH 0xFFFF98 +#define ADDREL 0xFFFF99 +#define ADDRF 0xFFFF9A +#define ADDRFH 0xFFFF9A +#define ADDRFL 0xFFFF9B +#define ADDRG 0xFFFF9C +#define ADDRGH 0xFFFF9C +#define ADDRGL 0xFFFF9D +#define ADDRH 0xFFFF9E +#define ADDRHH 0xFFFF9E +#define ADDRHL 0xFFFF9F + +#define ADCSR 0xFFFFA0 +#define ADCR 0xFFFFA1 + +#define ABWCR 0xFFFEC0 +#define ASTCR 0xFFFEC1 +#define WTCRAH 0xFFFEC2 +#define WTCRAL 0xFFFEC3 +#define WTCRBH 0xFFFEC4 +#define WTCRBL 0xFFFEC5 +#define RDNCR 0xFFFEC6 +#define CSACRH 0xFFFEC8 +#define CSACRL 0xFFFEC9 +#define BROMCRH 0xFFFECA +#define BROMCRL 0xFFFECB +#define BCR 0xFFFECC +#define DRAMCR 0xFFFED0 +#define DRACCR 0xFFFED2 +#define REFCR 0xFFFED4 +#define RTCNT 0xFFFED6 +#define RTCOR 0xFFFED7 + +#define MAR0AH 0xFFFEE0 +#define MAR0AL 0xFFFEE2 +#define IOAR0A 0xFFFEE4 +#define ETCR0A 0xFFFEE6 +#define MAR0BH 0xFFFEE8 +#define MAR0BL 0xFFFEEA +#define IOAR0B 0xFFFEEC +#define ETCR0B 0xFFFEEE +#define MAR1AH 0xFFFEF0 +#define MAR1AL 0xFFFEF2 +#define IOAR1A 0xFFFEF4 +#define ETCR1A 0xFFFEF6 +#define MAR1BH 0xFFFEF8 +#define MAR1BL 0xFFFEFA +#define IOAR1B 0xFFFEFC +#define ETCR1B 0xFFFEFE +#define DMAWER 0xFFFF20 +#define DMATCR 0xFFFF21 +#define DMACR0A 0xFFFF22 +#define DMACR0B 0xFFFF23 +#define DMACR1A 0xFFFF24 +#define DMACR1B 0xFFFF25 +#define DMABCRH 0xFFFF26 +#define DMABCRL 0xFFFF27 + +#define EDSAR0 0xFFFDC0 +#define EDDAR0 0xFFFDC4 +#define EDTCR0 0xFFFDC8 +#define EDMDR0 0xFFFDCC +#define EDMDR0H 0xFFFDCC +#define EDMDR0L 0xFFFDCD +#define EDACR0 0xFFFDCE +#define EDSAR1 0xFFFDD0 +#define EDDAR1 0xFFFDD4 +#define EDTCR1 0xFFFDD8 +#define EDMDR1 0xFFFDDC +#define EDMDR1H 0xFFFDDC +#define EDMDR1L 0xFFFDDD +#define EDACR1 0xFFFDDE +#define EDSAR2 0xFFFDE0 +#define EDDAR2 0xFFFDE4 +#define EDTCR2 0xFFFDE8 +#define EDMDR2 0xFFFDEC +#define EDMDR2H 0xFFFDEC +#define EDMDR2L 0xFFFDED +#define EDACR2 0xFFFDEE +#define EDSAR3 0xFFFDF0 +#define EDDAR3 0xFFFDF4 +#define EDTCR3 0xFFFDF8 +#define EDMDR3 0xFFFDFC +#define EDMDR3H 0xFFFDFC +#define EDMDR3L 0xFFFDFD +#define EDACR3 0xFFFDFE + +#define IPRA 0xFFFE00 +#define IPRB 0xFFFE02 +#define IPRC 0xFFFE04 +#define IPRD 0xFFFE06 +#define IPRE 0xFFFE08 +#define IPRF 0xFFFE0A +#define IPRG 0xFFFE0C +#define IPRH 0xFFFE0E +#define IPRI 0xFFFE10 +#define IPRJ 0xFFFE12 +#define IPRK 0xFFFE14 +#define ITSR 0xFFFE16 +#define SSIER 0xFFFE18 +#define ISCRH 0xFFFE1A +#define ISCRL 0xFFFE1C + +#define INTCR 0xFFFF31 +#define IER 0xFFFF32 +#define IERH 0xFFFF32 +#define IERL 0xFFFF33 +#define ISR 0xFFFF34 +#define ISRH 0xFFFF34 +#define ISRL 0xFFFF35 + +#define P1DDR 0xFFFE20 +#define P2DDR 0xFFFE21 +#define P3DDR 0xFFFE22 +#define P4DDR 0xFFFE23 +#define P5DDR 0xFFFE24 +#define P6DDR 0xFFFE25 +#define P7DDR 0xFFFE26 +#define P8DDR 0xFFFE27 +#define P9DDR 0xFFFE28 +#define PADDR 0xFFFE29 +#define PBDDR 0xFFFE2A +#define PCDDR 0xFFFE2B +#define PDDDR 0xFFFE2C +#define PEDDR 0xFFFE2D +#define PFDDR 0xFFFE2E +#define PGDDR 0xFFFE2F +#define PHDDR 0xFFFF74 + +#define PFCR0 0xFFFE32 +#define PFCR1 0xFFFE33 +#define PFCR2 0xFFFE34 + +#define PAPCR 0xFFFE36 +#define PBPCR 0xFFFE37 +#define PCPCR 0xFFFE38 +#define PDPCR 0xFFFE39 +#define PEPCR 0xFFFE3A + +#define P3ODR 0xFFFE3C +#define PAODR 0xFFFE3D + +#define P1DR 0xFFFF60 +#define P2DR 0xFFFF61 +#define P3DR 0xFFFF62 +#define P4DR 0xFFFF63 +#define P5DR 0xFFFF64 +#define P6DR 0xFFFF65 +#define P7DR 0xFFFF66 +#define P8DR 0xFFFF67 +#define P9DR 0xFFFF68 +#define PADR 0xFFFF69 +#define PBDR 0xFFFF6A +#define PCDR 0xFFFF6B +#define PDDR 0xFFFF6C +#define PEDR 0xFFFF6D +#define PFDR 0xFFFF6E +#define PGDR 0xFFFF6F +#define PHDR 0xFFFF72 + +#define PORT1 0xFFFF50 +#define PORT2 0xFFFF51 +#define PORT3 0xFFFF52 +#define PORT4 0xFFFF53 +#define PORT5 0xFFFF54 +#define PORT6 0xFFFF55 +#define PORT7 0xFFFF56 +#define PORT8 0xFFFF57 +#define PORT9 0xFFFF58 +#define PORTA 0xFFFF59 +#define PORTB 0xFFFF5A +#define PORTC 0xFFFF5B +#define PORTD 0xFFFF5C +#define PORTE 0xFFFF5D +#define PORTF 0xFFFF5E +#define PORTG 0xFFFF5F +#define PORTH 0xFFFF70 + +#define PCR 0xFFFF46 +#define PMR 0xFFFF47 +#define NDERH 0xFFFF48 +#define NDERL 0xFFFF49 +#define PODRH 0xFFFF4A +#define PODRL 0xFFFF4B +#define NDRH1 0xFFFF4C +#define NDRL1 0xFFFF4D +#define NDRH2 0xFFFF4E +#define NDRL2 0xFFFF4F + +#define SMR0 0xFFFF78 +#define BRR0 0xFFFF79 +#define SCR0 0xFFFF7A +#define TDR0 0xFFFF7B +#define SSR0 0xFFFF7C +#define RDR0 0xFFFF7D +#define SCMR0 0xFFFF7E +#define SMR1 0xFFFF80 +#define BRR1 0xFFFF81 +#define SCR1 0xFFFF82 +#define TDR1 0xFFFF83 +#define SSR1 0xFFFF84 +#define RDR1 0xFFFF85 +#define SCMR1 0xFFFF86 +#define SMR2 0xFFFF88 +#define BRR2 0xFFFF89 +#define SCR2 0xFFFF8A +#define TDR2 0xFFFF8B +#define SSR2 0xFFFF8C +#define RDR2 0xFFFF8D +#define SCMR2 0xFFFF8E + +#define IRCR0 0xFFFE1E +#define SEMR 0xFFFDA8 + +#define MDCR 0xFFFF3E +#define SYSCR 0xFFFF3D +#define MSTPCRH 0xFFFF40 +#define MSTPCRL 0xFFFF41 +#define FLMCR1 0xFFFFC8 +#define FLMCR2 0xFFFFC9 +#define EBR1 0xFFFFCA +#define EBR2 0xFFFFCB +#define CTGARC_RAMCR 0xFFFECE +#define SBYCR 0xFFFF3A +#define SCKCR 0xFFFF3B +#define PLLCR 0xFFFF45 + +#define TSTR 0xFFFFC0 +#define TSNC 0XFFFFC1 + +#define TCR0 0xFFFFD0 +#define TMDR0 0xFFFFD1 +#define TIORH0 0xFFFFD2 +#define TIORL0 0xFFFFD3 +#define TIER0 0xFFFFD4 +#define TSR0 0xFFFFD5 +#define TCNT0 0xFFFFD6 +#define GRA0 0xFFFFD8 +#define GRB0 0xFFFFDA +#define GRC0 0xFFFFDC +#define GRD0 0xFFFFDE +#define TCR1 0xFFFFE0 +#define TMDR1 0xFFFFE1 +#define TIORH1 0xFFFFE2 +#define TIORL1 0xFFFFE3 +#define TIER1 0xFFFFE4 +#define TSR1 0xFFFFE5 +#define TCNT1 0xFFFFE6 +#define GRA1 0xFFFFE8 +#define GRB1 0xFFFFEA +#define TCR2 0xFFFFF0 +#define TMDR2 0xFFFFF1 +#define TIORH2 0xFFFFF2 +#define TIORL2 0xFFFFF3 +#define TIER2 0xFFFFF4 +#define TSR2 0xFFFFF5 +#define TCNT2 0xFFFFF6 +#define GRA2 0xFFFFF8 +#define GRB2 0xFFFFFA +#define TCR3 0xFFFE80 +#define TMDR3 0xFFFE81 +#define TIORH3 0xFFFE82 +#define TIORL3 0xFFFE83 +#define TIER3 0xFFFE84 +#define TSR3 0xFFFE85 +#define TCNT3 0xFFFE86 +#define GRA3 0xFFFE88 +#define GRB3 0xFFFE8A +#define GRC3 0xFFFE8C +#define GRD3 0xFFFE8E +#define TCR4 0xFFFE90 +#define TMDR4 0xFFFE91 +#define TIORH4 0xFFFE92 +#define TIORL4 0xFFFE93 +#define TIER4 0xFFFE94 +#define TSR4 0xFFFE95 +#define TCNT4 0xFFFE96 +#define GRA4 0xFFFE98 +#define GRB4 0xFFFE9A +#define TCR5 0xFFFEA0 +#define TMDR5 0xFFFEA1 +#define TIORH5 0xFFFEA2 +#define TIORL5 0xFFFEA3 +#define TIER5 0xFFFEA4 +#define TSR5 0xFFFEA5 +#define TCNT5 0xFFFEA6 +#define GRA5 0xFFFEA8 +#define GRB5 0xFFFEAA + +#define _8TCR0 0xFFFFB0 +#define _8TCR1 0xFFFFB1 +#define _8TCSR0 0xFFFFB2 +#define _8TCSR1 0xFFFFB3 +#define _8TCORA0 0xFFFFB4 +#define _8TCORA1 0xFFFFB5 +#define _8TCORB0 0xFFFFB6 +#define _8TCORB1 0xFFFFB7 +#define _8TCNT0 0xFFFFB8 +#define _8TCNT1 0xFFFFB9 + +#define TCSR 0xFFFFBC +#define TCNT 0xFFFFBD +#define RSTCSRW 0xFFFFBE +#define RSTCSRR 0xFFFFBF + +#endif /* __KERNEL__ */ +#endif /* __REGS_H8S267x__ */ diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h index 8fdd9e2e8833..962f5eb32d16 100644 --- a/include/asm-h8300/semaphore.h +++ b/include/asm-h8300/semaphore.h @@ -83,7 +83,6 @@ extern spinlock_t semaphore_wake_lock; * "down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/m68k/lib/semaphore.S */ -#if defined(__H8300H__) static inline void down(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -96,9 +95,9 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%1, er1\n\t" + "mov.l %0, er1\n\t" "dec.l #1,er1\n\t" - "mov.l er1,@%1\n\t" + "mov.l er1,%0\n\t" "bpl 1f\n\t" "ldc r3l,ccr\n\t" "jsr @___down\n\t" @@ -106,38 +105,11 @@ static inline void down(struct semaphore * sem) "1:\n\t" "ldc r3l,ccr\n" "2:" - : "=m"(sem->count) - : "g" (count) - : "cc", "er1", "er2", "er3", "er4", "memory"); + : "+m"(*count) + : + : "cc", "er1", "er2", "er3"); } -#endif -#if defined(__H8300S__) -static inline void down(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%1, er1\n\t" - "dec.l #1,er1\n\t" - "mov.l er1,@%1\n\t" - "ldc r3l,exr\n\t" - "bpl 1f\n\t" - "jsr @___down\n" - "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); -} -#endif -#if defined(__H8300H__) static inline int down_interruptible(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -148,56 +120,25 @@ static inline int down_interruptible(struct semaphore * sem) count = &(sem->count); __asm__ __volatile__( - "stc ccr,r3l\n\t" + "stc ccr,r1l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%2, er2\n\t" + "mov.l %1, er2\n\t" "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" + "mov.l er2,%1\n\t" "bpl 1f\n\t" - "ldc r3l,ccr\n\t" + "ldc r1l,ccr\n\t" "jsr @___down_interruptible\n\t" "bra 2f\n" "1:\n\t" - "ldc r3l,ccr\n\t" - "sub.l %0,%0\n" - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); - return (int)count; -} -#endif -#if defined(__H8300S__) -static inline int down_interruptible(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%2, er2\n\t" - "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" - "ldc r3l,exr\n\t" - "bmi 1f\n\t" + "ldc r1l,ccr\n\t" "sub.l %0,%0\n\t" - "bra 2f\n" - "1:\n\t" - "jsr @___down_interruptible\n" - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + "2:\n\t" + : "=r" (count),"+m" (*count) + : + : "cc", "er1", "er2", "er3"); return (int)count; } -#endif -#if defined(__H8300H__) static inline int down_trylock(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -210,60 +151,26 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%2,er2\n\t" + "mov.l %0,er2\n\t" "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" + "mov.l er2,%0\n\t" "bpl 1f\n\t" "ldc r3l,ccr\n\t" - "jmp @3f\n" - "1:\n\t" - "ldc r3l,ccr\n\t" - "sub.l %0,%0\n" + "jmp @3f\n\t" LOCK_SECTION_START(".align 2\n\t") "3:\n\t" "jsr @___down_trylock\n\t" "jmp @2f\n\t" LOCK_SECTION_END - "2:" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er2", "er3", "memory"); - return (int)count; -} -#endif -#if defined(__H8300S__) -static inline int down_trylock(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%2,er2\n\t" - "dec.l #1,er2\n\t" - "mov.l er2,@%2\n\t" - "ldc r3l,exr\n\t" - "bpl 1f\n\t" - "jmp @3f\n" "1:\n\t" - "sub.l %0,%0\n\t" - LOCK_SECTION_START(".align 2\n\t") - "3:\n\t" - "jsr @___down_trylock\n\t" - "jmp @2f\n\t" - LOCK_SECTION_END - "2:\n\t" - : "=r" (count),"=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + "ldc r3l,ccr\n\t" + "sub.l %1,%1\n" + "2:" + : "+m" (*count),"=r"(count) + : + : "cc", "er1","er2", "er3"); return (int)count; } -#endif /* * Note! This is subtle. We jump to wake people up only if @@ -271,7 +178,6 @@ static inline int down_trylock(struct semaphore * sem) * The default case (no contention) will result in NO * jumps for both down() and up(). */ -#if defined(__H8300H__) static inline void up(struct semaphore * sem) { register atomic_t *count asm("er0"); @@ -284,47 +190,19 @@ static inline void up(struct semaphore * sem) __asm__ __volatile__( "stc ccr,r3l\n\t" "orc #0x80,ccr\n\t" - "mov.l @%1,er1\n\t" + "mov.l %0,er1\n\t" "inc.l #1,er1\n\t" - "mov.l er1,@%1\n\t" + "mov.l er1,%0\n\t" "ldc r3l,ccr\n\t" "sub.l er2,er2\n\t" "cmp.l er2,er1\n\t" "bgt 1f\n\t" "jsr @___up\n" "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); + : "+m"(*count) + : + : "cc", "er1", "er2", "er3"); } -#endif -#if defined(__H8300S__) -static inline void up(struct semaphore * sem) -{ - register atomic_t *count asm("er0"); - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = &(sem->count); - __asm__ __volatile__( - "stc exr,r3l\n\t" - "orc #0x07,exr\n\t" - "mov.l @%1,er1\n\t" - "inc.l #1,er1\n\t" - "mov.l er1,@%1\n\t" - "ldc r3l,exr\n\t" - "sub.l er2,er2\n\t" - "cmp.l er2,er1\n\t" - "bgt 1f\n\t" - "jsr @___up\n" - "1:" - : "=m"(sem->count) - : "r" (count) - : "cc", "er1", "er2", "er3", "memory"); -} -#endif #endif /* __ASSEMBLY__ */ diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h index 2c187ff8e348..c2d2457b138c 100644 --- a/include/asm-h8300/system.h +++ b/include/asm-h8300/system.h @@ -35,6 +35,7 @@ * * H8/300 Porting 2002/09/04 Yoshinori Sato */ + asmlinkage void resume(void); #define switch_to(prev,next,last) { \ void *_last; \ @@ -52,7 +53,6 @@ asmlinkage void resume(void); (last) = _last; \ } -#if defined(__H8300H__) #define __sti() asm volatile ("andc #0x7f,ccr") #define __cli() asm volatile ("orc #0x80,ccr") @@ -69,25 +69,6 @@ asmlinkage void resume(void); ((flags & 0x80) == 0x80); \ }) -#endif -#if defined(__H8300S__) -#define __sti() asm volatile ("andc #0xf8,exr") -#define __cli() asm volatile ("orc #0x07,exr") - -#define __save_flags(x) \ - asm volatile ("stc exr,r0l\n\tmov.l er0,%0":"=r" (x) : : "er0") - -#define __restore_flags(x) \ - asm volatile ("mov.l %0,er0\n\tldc r0l,exr": :"r" (x) : "er0") -#endif - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - __save_flags(flags); \ - ((flags & 0x07) == 0x07); \ -}) - #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") /* For spinlocks etc */ diff --git a/include/asm-h8300/timex.h b/include/asm-h8300/timex.h index 99a472819dc1..4ea243a11566 100644 --- a/include/asm-h8300/timex.h +++ b/include/asm-h8300/timex.h @@ -6,9 +6,9 @@ #ifndef _ASM_H8300_TIMEX_H #define _ASM_H8300_TIMEX_H -/* Such a description is OK ? */ -#define TIMEX_DEPEND_HEADER(target) -#include TIMEX_DEPEND_HEADER(TARGET) +#define H8300_TIMER_DEFINE +#include +#undef H8300_TIMER_DEFINE #define CLOCK_TICK_RATE H8300_TIMER_FREQ #define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */ -- cgit v1.2.3