summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/early-userspace/README75
-rw-r--r--Documentation/early-userspace/buffer-format.txt112
-rw-r--r--Documentation/power/devices.txt143
-rw-r--r--Documentation/power/interface.txt43
-rw-r--r--Documentation/power/states.txt79
-rw-r--r--Documentation/power/swsusp.txt (renamed from Documentation/swsusp.txt)0
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/h8300/Makefile28
-rw-r--r--arch/h8300/boot/Makefile12
-rw-r--r--arch/h8300/kernel/signal.c5
-rw-r--r--arch/h8300/kernel/syscalls.S8
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S32
-rw-r--r--arch/h8300/lib/romfs.S52
-rw-r--r--arch/h8300/platform/h8300h/aki3068net/timer.c6
-rw-r--r--arch/h8300/platform/h8300h/entry.S3
-rw-r--r--arch/h8300/platform/h8300h/generic/crt0_rom.S13
-rw-r--r--arch/h8300/platform/h8300h/h8max/timer.c6
-rw-r--r--arch/h8300/platform/h8300h/ints.c38
-rw-r--r--arch/h8300/platform/h8s/edosk2674/crt0_ram.S7
-rw-r--r--arch/h8300/platform/h8s/edosk2674/timer.c6
-rw-r--r--arch/h8300/platform/h8s/entry.S3
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_ram.S7
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_rom.S40
-rw-r--r--arch/h8300/platform/h8s/generic/rom.ld4
-rw-r--r--arch/h8300/platform/h8s/generic/timer.c6
-rw-r--r--arch/h8300/platform/h8s/ints.c40
-rw-r--r--arch/i386/kernel/acpi/boot.c22
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/cpu/intel.c9
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c6
-rw-r--r--arch/i386/kernel/dmi_scan.c18
-rw-r--r--arch/i386/kernel/mpparse.c3
-rw-r--r--arch/i386/kernel/setup.c10
-rw-r--r--arch/i386/oprofile/nmi_int.c84
-rw-r--r--arch/i386/oprofile/op_model_athlon.c26
-rw-r--r--arch/i386/oprofile/op_model_p4.c24
-rw-r--r--arch/i386/oprofile/op_model_ppro.c16
-rw-r--r--arch/i386/oprofile/op_x86_model.h13
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/sim/Makefile2
-rw-r--r--arch/ia64/hp/sim/boot/Makefile (renamed from arch/ia64/boot/Makefile)4
-rw-r--r--arch/ia64/hp/sim/boot/boot_head.S136
-rw-r--r--arch/ia64/hp/sim/boot/bootloader.c (renamed from arch/ia64/boot/bootloader.c)44
-rw-r--r--arch/ia64/hp/sim/boot/bootloader.lds (renamed from arch/ia64/boot/bootloader.lds)0
-rw-r--r--arch/ia64/hp/sim/boot/fw-emu.c (renamed from arch/ia64/kernel/fw-emu.c)140
-rw-r--r--arch/ia64/hp/sim/boot/ssc.h35
-rw-r--r--arch/ia64/hp/sim/hpsim.S10
-rw-r--r--arch/ia64/hp/sim/hpsim_setup.c13
-rw-r--r--arch/ia64/ia32/ia32_signal.c146
-rw-r--r--arch/ia64/ia32/ia32_support.c32
-rw-r--r--arch/ia64/ia32/ia32_traps.c6
-rw-r--r--arch/ia64/ia32/ia32priv.h31
-rw-r--r--arch/ia64/ia32/sys_ia32.c7
-rw-r--r--arch/ia64/kernel/entry.S12
-rw-r--r--arch/ia64/kernel/init_task.c2
-rw-r--r--arch/ia64/kernel/iosapic.c8
-rw-r--r--arch/ia64/kernel/irq_ia64.c13
-rw-r--r--arch/ia64/kernel/mca.c32
-rw-r--r--arch/ia64/kernel/perfmon.c484
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c84
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/signal.c16
-rw-r--r--arch/ia64/kernel/traps.c34
-rw-r--r--arch/ia64/kernel/unaligned.c53
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S5
-rw-r--r--arch/ia64/mm/tlb.c12
-rw-r--r--arch/ia64/sn/fakeprom/fw-emu.c12
-rw-r--r--arch/ia64/sn/kernel/irq.c12
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/io.c70
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/sleep/main.c318
-rw-r--r--drivers/acpi/sleep/proc.c82
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/resume.c101
-rw-r--r--drivers/base/power/runtime.c12
-rw-r--r--drivers/base/power/suspend.c171
-rw-r--r--drivers/base/sys.c111
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide.c16
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c1
-rw-r--r--drivers/oprofile/buffer_sync.c4
-rw-r--r--drivers/oprofile/oprofile_files.c14
-rw-r--r--drivers/oprofile/oprofile_stats.c3
-rw-r--r--drivers/oprofile/oprofile_stats.h1
-rw-r--r--drivers/oprofile/oprofilefs.c12
-rw-r--r--drivers/pci/pci-driver.c31
-rw-r--r--fs/cifs/CHANGES11
-rw-r--r--fs/cifs/cifs_debug.c48
-rwxr-xr-xfs/cifs/cifsencrypt.c7
-rw-r--r--fs/cifs/cifsfs.c113
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifspdu.h116
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c91
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/inode.c33
-rw-r--r--fs/cifs/transport.c5
-rw-r--r--include/asm-h8300/aki3068net/machine-depend.h35
-rw-r--r--include/asm-h8300/atomic.h45
-rw-r--r--include/asm-h8300/bitops.h301
-rw-r--r--include/asm-h8300/edosk2674/machine-depend.h70
-rw-r--r--include/asm-h8300/generic/machine-depend.h17
-rw-r--r--include/asm-h8300/h8300_ne.h6
-rw-r--r--include/asm-h8300/h8max/ide.h60
-rw-r--r--include/asm-h8300/h8max/machine-depend.h167
-rw-r--r--include/asm-h8300/hardirq.h6
-rw-r--r--include/asm-h8300/ide.h7
-rw-r--r--include/asm-h8300/io.h9
-rw-r--r--include/asm-h8300/machine-depend.h70
-rw-r--r--include/asm-h8300/processor.h6
-rw-r--r--include/asm-h8300/regs267x.h336
-rw-r--r--include/asm-h8300/semaphore.h178
-rw-r--r--include/asm-h8300/system.h21
-rw-r--r--include/asm-h8300/timex.h6
-rw-r--r--include/asm-i386/acpi.h38
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apic.h5
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h4
-rw-r--r--include/asm-i386/mach-numaq/mach_apic.h10
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h5
-rw-r--r--include/asm-i386/mpspec.h8
-rw-r--r--include/asm-i386/suspend.h2
-rw-r--r--include/asm-ia64/atomic.h8
-rw-r--r--include/asm-ia64/bitops.h8
-rw-r--r--include/asm-ia64/byteorder.h3
-rw-r--r--include/asm-ia64/current.h10
-rw-r--r--include/asm-ia64/delay.h32
-rw-r--r--include/asm-ia64/gcc_intrin.h584
-rw-r--r--include/asm-ia64/ia64regs.h100
-rw-r--r--include/asm-ia64/intrinsics.h101
-rw-r--r--include/asm-ia64/io.h3
-rw-r--r--include/asm-ia64/machvec.h2
-rw-r--r--include/asm-ia64/mmu_context.h2
-rw-r--r--include/asm-ia64/page.h3
-rw-r--r--include/asm-ia64/pal.h4
-rw-r--r--include/asm-ia64/perfmon.h103
-rw-r--r--include/asm-ia64/perfmon_default_smpl.h39
-rw-r--r--include/asm-ia64/processor.h422
-rw-r--r--include/asm-ia64/rwsem.h12
-rw-r--r--include/asm-ia64/sal.h4
-rw-r--r--include/asm-ia64/siginfo.h1
-rw-r--r--include/asm-ia64/smp.h2
-rw-r--r--include/asm-ia64/sn/sn2/io.h34
-rw-r--r--include/asm-ia64/sn/sn_cpuid.h6
-rw-r--r--include/asm-ia64/spinlock.h12
-rw-r--r--include/asm-ia64/system.h44
-rw-r--r--include/asm-ia64/timex.h3
-rw-r--r--include/asm-ia64/tlbflush.h3
-rw-r--r--include/asm-ia64/unistd.h67
-rw-r--r--include/linux/acpi.h12
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/oprofile.h2
-rw-r--r--include/linux/pkt_sched.h2
-rw-r--r--include/linux/pm.h45
-rw-r--r--include/linux/suspend.h31
-rw-r--r--include/linux/sysdev.h4
-rw-r--r--kernel/cpufreq.c30
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/power/console.c5
-rw-r--r--kernel/power/main.c450
-rw-r--r--kernel/power/power.h37
-rw-r--r--kernel/power/swsusp.c373
-rw-r--r--kernel/sys.c7
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ipv6/mcast.c32
174 files changed, 4767 insertions, 3131 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index a61db966094f..5935ff8708c5 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -70,6 +70,8 @@ dnotify.txt
- info about directory notification in Linux.
driver-model.txt
- info about Linux driver model.
+early-userspace/
+ - info about initramfs, klibc, and userspace early during boot.
exception.txt
- how Linux v2.2 handles exceptions without verify_area etc.
fb/
diff --git a/Documentation/early-userspace/README b/Documentation/early-userspace/README
new file mode 100644
index 000000000000..174cc7b4dd02
--- /dev/null
+++ b/Documentation/early-userspace/README
@@ -0,0 +1,75 @@
+Early userspace support
+=======================
+
+Last update: 2003-08-21
+
+
+"Early userspace" is a set of libraries and programs that provide
+various pieces of functionality that are important enough to be
+available while a Linux kernel is coming up, but that don't need to be
+run inside the kernel itself.
+
+It consists of several major infrastructure components:
+
+- gen_init_cpio, a program that builds a cpio-format archive
+ containing a root filesystem image. This archive is compressed, and
+ the compressed image is linked into the kernel image.
+- initramfs, a chunk of code that unpacks the compressed cpio image
+ midway through the kernel boot process.
+- klibc, a userspace C library, currently packaged separately, that is
+ optimised for correctness and small size.
+
+The cpio file format used by initramfs is the "newc" (aka "cpio -c")
+format, and is documented in the file "buffer-format.txt". If you
+want to generate your own cpio files directly instead of hacking on
+gen_init_cpio, you will need to short-circuit the build process in
+usr/ so that gen_init_cpio does not get run, then simply pop your own
+initramfs_data.cpio.gz file into place.
+
+
+Where's this all leading?
+=========================
+
+The klibc distribution contains some of the necessary software to make
+early userspace useful. The klibc distribution is currently
+maintained separately from the kernel, but this may change early in
+the 2.7 era (it missed the boat for 2.5).
+
+You can obtain somewhat infrequent snapshots of klibc from
+ftp://ftp.kernel.org/pub/linux/libs/klibc/
+
+For active users, you are better off using the klibc BitKeeper
+repositories, at http://klibc.bkbits.net/
+
+The standalone klibc distribution currently provides three components,
+in addition to the klibc library:
+
+- ipconfig, a program that configures network interfaces. It can
+ configure them statically, or use DHCP to obtain information
+ dynamically (aka "IP autoconfiguration").
+- nfsmount, a program that can mount an NFS filesystem.
+- kinit, the "glue" that uses ipconfig and nfsmount to replace the old
+ support for IP autoconfig, mount a filesystem over NFS, and continue
+ system boot using that filesystem as root.
+
+kinit is built as a single statically linked binary to save space.
+
+Eventually, several more chunks of kernel functionality will hopefully
+move to early userspace:
+
+- Almost all of init/do_mounts* (the beginning of this is already in
+ place)
+- ACPI table parsing
+- Insert unwieldy subsystem that doesn't really need to be in kernel
+ space here
+
+If kinit doesn't meet your current needs and you've got bytes to burn,
+the klibc distribution includes a small Bourne-compatible shell (ash)
+and a number of other utilities, so you can replace kinit and build
+custom initramfs images that meet your needs exactly.
+
+For questions and help, you can sign up for the early userspace
+mailing list at http://www.zytor.com/mailman/listinfo/klibc
+
+
+Bryan O'Sullivan <bos@serpentine.com>
diff --git a/Documentation/early-userspace/buffer-format.txt b/Documentation/early-userspace/buffer-format.txt
new file mode 100644
index 000000000000..e1fd7f9dad16
--- /dev/null
+++ b/Documentation/early-userspace/buffer-format.txt
@@ -0,0 +1,112 @@
+ initramfs buffer format
+ -----------------------
+
+ Al Viro, H. Peter Anvin
+ Last revision: 2002-01-13
+
+Starting with kernel 2.5.x, the old "initial ramdisk" protocol is
+getting {replaced/complemented} with the new "initial ramfs"
+(initramfs) protocol. The initramfs contents is passed using the same
+memory buffer protocol used by the initrd protocol, but the contents
+is different. The initramfs buffer contains an archive which is
+expanded into a ramfs filesystem; this document details the format of
+the initramfs buffer format.
+
+The initramfs buffer format is based around the "newc" or "crc" CPIO
+formats, and can be created with the cpio(1) utility. The cpio
+archive can be compressed using gzip(1). One valid version of an
+initramfs buffer is thus a single .cpio.gz file.
+
+The full format of the initramfs buffer is defined by the following
+grammar, where:
+ * is used to indicate "0 or more occurrences of"
+ (|) indicates alternatives
+ + indicates concatenation
+ GZIP() indicates the gzip(1) of the operand
+ ALGN(n) means padding with null bytes to an n-byte boundary
+
+ initramfs := ("\0" | cpio_archive | cpio_gzip_archive)*
+
+ cpio_gzip_archive := GZIP(cpio_archive)
+
+ cpio_archive := cpio_file* + (<nothing> | cpio_trailer)
+
+ cpio_file := ALGN(4) + cpio_header + filename + "\0" + ALGN(4) + data
+
+ cpio_trailer := ALGN(4) + cpio_header + "TRAILER!!!\0" + ALGN(4)
+
+
+In human terms, the initramfs buffer contains a collection of
+compressed and/or uncompressed cpio archives (in the "newc" or "crc"
+formats); arbitrary amounts zero bytes (for padding) can be added
+between members.
+
+The cpio "TRAILER!!!" entry (cpio end-of-archive) is optional, but is
+not ignored; see "handling of hard links" below.
+
+The structure of the cpio_header is as follows (all fields contain
+hexadecimal ASCII numbers fully padded with '0' on the left to the
+full width of the field, for example, the integer 4780 is represented
+by the ASCII string "000012ac"):
+
+Field name Field size Meaning
+c_magic 6 bytes The string "070701" or "070702"
+c_ino 8 bytes File inode number
+c_mode 8 bytes File mode and permissions
+c_uid 8 bytes File uid
+c_gid 8 bytes File gid
+c_nlink 8 bytes Number of links
+c_mtime 8 bytes Modification time
+c_filesize 8 bytes Size of data field
+c_maj 8 bytes Major part of file device number
+c_min 8 bytes Minor part of file device number
+c_rmaj 8 bytes Major part of device node reference
+c_rmin 8 bytes Minor part of device node reference
+c_namesize 8 bytes Length of filename, including final \0
+c_chksum 8 bytes Checksum of data field if c_magic is 070702;
+ otherwise zero
+
+The c_mode field matches the contents of st_mode returned by stat(2)
+on Linux, and encodes the file type and file permissions.
+
+The c_filesize should be zero for any file which is not a regular file
+or symlink.
+
+The c_chksum field contains a simple 32-bit unsigned sum of all the
+bytes in the data field. cpio(1) refers to this as "crc", which is
+clearly incorrect (a cyclic redundancy check is a different and
+significantly stronger integrity check), however, this is the
+algorithm used.
+
+If the filename is "TRAILER!!!" this is actually an end-of-archive
+marker; the c_filesize for an end-of-archive marker must be zero.
+
+
+*** Handling of hard links
+
+When a nondirectory with c_nlink > 1 is seen, the (c_maj,c_min,c_ino)
+tuple is looked up in a tuple buffer. If not found, it is entered in
+the tuple buffer and the entry is created as usual; if found, a hard
+link rather than a second copy of the file is created. It is not
+necessary (but permitted) to include a second copy of the file
+contents; if the file contents is not included, the c_filesize field
+should be set to zero to indicate no data section follows. If data is
+present, the previous instance of the file is overwritten; this allows
+the data-carrying instance of a file to occur anywhere in the sequence
+(GNU cpio is reported to attach the data to the last instance of a
+file only.)
+
+c_filesize must not be zero for a symlink.
+
+When a "TRAILER!!!" end-of-archive marker is seen, the tuple buffer is
+reset. This permits archives which are generated independently to be
+concatenated.
+
+To combine file data from different sources (without having to
+regenerate the (c_maj,c_min,c_ino) fields), therefore, either one of
+the following techniques can be used:
+
+a) Separate the different file data sources with a "TRAILER!!!"
+ end-of-archive marker, or
+
+b) Make sure c_nlink == 1 for all nondirectory entries.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
new file mode 100644
index 000000000000..435ea29bb676
--- /dev/null
+++ b/Documentation/power/devices.txt
@@ -0,0 +1,143 @@
+
+Device Power Management
+
+
+Device power management encompasses two areas - the ability to save
+state and transition a device to a low-power state when the system is
+entering a low-power state; and the ability to transition a device to
+a low-power state while the system is running (and independently of
+any other power management activity).
+
+
+Methods
+
+The methods to suspend and resume devices reside in struct bus_type:
+
+struct bus_type {
+ ...
+ int (*suspend)(struct device * dev, u32 state);
+ int (*resume)(struct device * dev);
+};
+
+Each bus driver is responsible implementing these methods, translating
+the call into a bus-specific request and forwarding the call to the
+bus-specific drivers. For example, PCI drivers implement suspend() and
+resume() methods in struct pci_driver. The PCI core is simply
+responsible for translating the pointers to PCI-specific ones and
+calling the low-level driver.
+
+This is done to a) ease transition to the new power management methods
+and leverage the existing PM code in various bus drivers; b) allow
+buses to implement generic and default PM routines for devices, and c)
+make the flow of execution obvious to the reader.
+
+
+System Power Management
+
+When the system enters a low-power state, the device tree is walked in
+a depth-first fashion to transition each device into a low-power
+state. The ordering of the device tree is guaranteed by the order in
+which devices get registered - children are never registered before
+their ancestors, and devices are placed at the back of the list when
+registered. By walking the list in reverse order, we are guaranteed to
+suspend devices in the proper order.
+
+Devices are suspended once with interrupts enabled. Drivers are
+expected to stop I/O transactions, save device state, and place the
+device into a low-power state. Drivers may sleep, allocate memory,
+etc. at will.
+
+Some devices are broken and will inevitably have problems powering
+down or disabling themselves with interrupts enabled. For these
+special cases, they may return -EAGAIN. This will put the device on a
+list to be taken care of later. When interrupts are disabled, before
+we enter the low-power state, their drivers are called again to put
+their device to sleep.
+
+On resume, the devices that returned -EAGAIN will be called to power
+themselves back on with interrupts disabled. Once interrupts have been
+re-enabled, the rest of the drivers will be called to resume their
+devices. On resume, a driver is responsible for powering back on each
+device, restoring state, and re-enabling I/O transactions for that
+device.
+
+System devices follow a slightly different API, which can be found in
+
+ include/linux/sysdev.h
+ drivers/base/sys.c
+
+System devices will only be suspended with interrupts disabled, and
+after all other devices have been suspended. On resume, they will be
+resumed before any other devices, and also with interrupts disabled.
+
+
+Runtime Power Management
+
+Many devices are able to dynamically power down while the system is
+still running. This feature is useful for devices that are not being
+used, and can offer significant power savings on a running system.
+
+In each device's directory, there is a 'power' directory, which
+contains at least a 'state' file. Reading from this file displays what
+power state the device is currently in. Writing to this file initiates
+a transition to the specified power state, which must be a decimal in
+the range 1-3, inclusive; or 0 for 'On'.
+
+The PM core will call the ->suspend() method in the bus_type object
+that the device belongs to if the specified state is not 0, or
+->resume() if it is.
+
+Nothing will happen if the specified state is the same state the
+device is currently in.
+
+If the device is already in a low-power state, and the specified state
+is another, but different, low-power state, the ->resume() method will
+first be called to power the device back on, then ->suspend() will be
+called again with the new state.
+
+The driver is responsible for saving the working state of the device
+and putting it into the low-power state specified. If this was
+successful, it returns 0, and the device's power_state field is
+updated.
+
+The driver must take care to know whether or not it is able to
+properly resume the device, including all step of reinitialization
+necessary. (This is the hardest part, and the one most protected by
+NDA'd documents).
+
+The driver must also take care not to suspend a device that is
+currently in use. It is their responsibility to provide their own
+exclusion mechanisms.
+
+The runtime power transition happens with interrupts enabled. If a
+device cannot support being powered down with interrupts, it may
+return -EAGAIN (as it would during a system power management
+transition), but it will _not_ be called again, and the transaction
+will fail.
+
+There is currently no way to know what states a device or driver
+supports a priori. This will change in the future.
+
+
+Driver Detach Power Management
+
+The kernel now supports the ability to place a device in a low-power
+state when it is detached from its driver, which happens when its
+module is removed.
+
+Each device contains a 'detach_state' file in its sysfs directory
+which can be used to control this state. Reading from this file
+displays what the current detach state is set to. This is 0 (On) by
+default. A user may write a positive integer value to this file in the
+range of 1-4 inclusive.
+
+A value of 1-3 will indicate the device should be placed in that
+low-power state, which will cause ->suspend() to be called for that
+device. A value of 4 indicates that the device should be shutdown, so
+->shutdown() will be called for that device.
+
+The driver is responsible for reinitializing the device when the
+module is re-inserted during it's ->probe() (or equivalent) method.
+The driver core will not call any extra functions when binding the
+device to the driver.
+
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
new file mode 100644
index 000000000000..f5ebda5f4276
--- /dev/null
+++ b/Documentation/power/interface.txt
@@ -0,0 +1,43 @@
+Power Management Interface
+
+
+The power management subsystem provides a unified sysfs interface to
+userspace, regardless of what architecture or platform one is
+running. The interface exists in /sys/power/ directory (assuming sysfs
+is mounted at /sys).
+
+/sys/power/state controls system power state. Reading from this file
+returns what states are supported, which is hard-coded to 'standby'
+(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+(Suspend-to-Disk).
+
+Writing to this file one of those strings causes the system to
+transition into that state. Please see the file
+Documentation/power/states.txt for a description of each of those
+states.
+
+
+/sys/power/disk controls the operating mode of the suspend-to-disk
+mechanism. Suspend-to-disk can be handled in several ways. The
+greatest distinction is who writes memory to disk - the firmware or
+the kernel. If the firmware does it, we assume that it also handles
+suspending the system.
+
+If the kernel does it, then we have three options for putting the system
+to sleep - using the platform driver (e.g. ACPI or other PM
+registers), powering off the system or rebooting the system (for
+testing). The system will support either 'firmware' or 'platform', and
+that is known a priori. But, the user may choose 'shutdown' or
+'reboot' as alternatives.
+
+Reading from this file will display what the mode is currently set
+to. Writing to this file will accept one of
+
+ 'firmware'
+ 'platform'
+ 'shutdown'
+ 'reboot'
+
+It will only change to 'firmware' or 'platform' if the system supports
+it.
+
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
new file mode 100644
index 000000000000..3e5e5d3ff419
--- /dev/null
+++ b/Documentation/power/states.txt
@@ -0,0 +1,79 @@
+
+System Power Management States
+
+
+The kernel supports three power management states generically, though
+each is dependent on platform support code to implement the low-level
+details for each state. This file describes each state, what they are
+commonly called, what ACPI state they map to, and what string to write
+to /sys/power/state to enter that state
+
+
+State: Standby / Power-On Suspend
+ACPI State: S1
+String: "standby"
+
+This state offers minimal, though real, power savings, while providing
+a very low-latency transition back to a working system. No operating
+state is lost (the CPU retains power), so the system easily starts up
+again where it left off.
+
+We try to put devices in a low-power state equivalent to D1, which
+also offers low power savings, but low resume latency. Not all devices
+support D1, and those that don't are left on.
+
+A transition from Standby to the On state should take about 1-2
+seconds.
+
+
+State: Suspend-to-RAM
+ACPI State: S3
+String: "mem"
+
+This state offers significant power savings as everything in the
+system is put into a low-power state, except for memory, which is
+placed in self-refresh mode to retain its contents.
+
+System and device state is saved and kept in memory. All devices are
+suspended and put into D3. In many cases, all peripheral buses lose
+power when entering STR, so devices must be able to handle the
+transition back to the On state.
+
+For at least ACPI, STR requires some minimal boot-strapping code to
+resume the system from STR. This may be true on other platforms.
+
+A transition from Suspend-to-RAM to the On state should take about
+3-5 seconds.
+
+
+State: Suspend-to-disk
+ACPI State: S4
+String: "disk"
+
+This state offers the greatest power savings, and can be used even in
+the absence of low-level platform support for power management. This
+state operates similarly to Suspend-to-RAM, but includes a final step
+of writing memory contents to disk. On resume, this is read and memory
+is restored to its pre-suspend state.
+
+STD can be handled by the firmware or the kernel. If it is handled by
+the firmware, it usually requires a dedicated partition that must be
+setup via another operating system for it to use. Despite the
+inconvenience, this method requires minimal work by the kernel, since
+the firmware will also handle restoring memory contents on resume.
+
+If the kernel is responsible for persistantly saving state, a mechanism
+called 'swsusp' (Swap Suspend) is used to write memory contents to
+free swap space. swsusp has some restrictive requirements, but should
+work in most cases. Some, albeit outdated, documentation can be found
+in Documentation/power/swsusp.txt.
+
+Once memory state is written to disk, the system may either enter a
+low-power state (like ACPI S4), or it may simply power down. Powering
+down offers greater savings, and allows this mechanism to work on any
+system. However, entering a real low-power state allows the user to
+trigger wake up events (e.g. pressing a key or opening a laptop lid).
+
+A transition from Suspend-to-Disk to the On state should take about 30
+seconds, though it's typically a bit more with the current
+implementation.
diff --git a/Documentation/swsusp.txt b/Documentation/power/swsusp.txt
index c59eec748e13..c59eec748e13 100644
--- a/Documentation/swsusp.txt
+++ b/Documentation/power/swsusp.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index 8648ee7035a6..7ba394c423ae 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -363,6 +363,13 @@ L: linux-scsi@vger.kernel.org
W: http://www.dandelion.com/Linux/
S: Maintained
+COMMON INTERNET FILE SYSTEM (CIFS)
+P: Steve French
+M: sfrench@samba.org
+L: samba-technical@lists.samba.org
+W: http://us1.samba.org/samba/Linux_CIFS_client.html
+S: Supported
+
CIRRUS LOGIC GENERIC FBDEV DRIVER
P: Jeff Garzik
M: jgarzik@pobox.com
@@ -2162,6 +2169,12 @@ W: http://www.ic.nec.co.jp/micro/uclinux/eng/
W: http://www.ee.nec.de/uclinux/
S: Supported
+UCLINUX FOR RENESAS H8/300
+P: Yoshinori Sato
+M: ysato@users.sourceforge.jp
+W: http://uclinux-h8.sourceforge.jp/
+S: Supported
+
USB DIAMOND RIO500 DRIVER
P: Cesar Miquel
M: miquel@df.uba.ar
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index a03881ed53a7..2037d74a96c5 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -190,6 +190,8 @@ endmenu
source "drivers/base/Kconfig"
+source "drivers/mtd/Kconfig"
+
source "drivers/block/Kconfig"
source "drivers/ide/Kconfig"
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 8d754ce470c4..5d7bb4b74bf7 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -5,7 +5,7 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# (C) Copyright 2002, Yoshinori Sato <ysato@users.sourceforge.jp>
+# (C) Copyright 2002,2003 Yoshinori Sato <ysato@users.sourceforge.jp>
#
ifndef include-config
-include $(TOPDIR)/.config
@@ -37,8 +37,8 @@ CFLAGS += $(cflags-y)
CFLAGS += -mint32 -fno-builtin -Os
CFLAGS += -g
CFLAGS += -D__linux__
-CFLAGS += -DUTS_SYSNAME=\"uClinux\" -DTARGET=$(BOARD)
-AFLAGS += -DPLATFORM=$(PLATFORM) -DTARGET=$(BOARD) -DMODEL=$(MODEL) $(cflags-y)
+CFLAGS += -DUTS_SYSNAME=\"uClinux\"
+AFLAGS += -DPLATFORM=$(PLATFORM) -DMODEL=$(MODEL) $(cflags-y)
LDFLAGS += $(ldflags-y)
CROSS_COMPILE = h8300-elf-
@@ -53,28 +53,32 @@ core-y += arch/$(ARCH)/kernel/ \
libs-y += arch/$(ARCH)/lib/ $(LIBGCC)
-export MODEL
+boot := arch/h8300/boot
+
+export MODEL PLATFORM BOARD
archmrproper:
archclean:
- $(call descend arch/$(ARCH), subdirclean)
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+prepare: include/asm-$(ARCH)/machine-depend.h include/asm-$(ARCH)/asm-offsets.h
-prepare: include/asm-$(ARCH)/asm-offsets.h
+include/asm-$(ARCH)/machine-depend.h: include/asm-$(ARCH)/$(BOARD)/machine-depend.h
+ $(Q)ln -sf $(BOARD)/machine-depend.h \
+ include/asm-$(ARCH)/machine-depend.h
+ @echo ' Create include/asm-$(ARCH)/machine-depend.h'
include/asm-$(ARCH)/asm-offsets.h: arch/$(ARCH)/kernel/asm-offsets.s \
include/asm include/linux/version.h
$(call filechk,gen-asm-offsets)
-vmlinux.bin: vmlinux
- $(OBJCOPY) -Obinary $< $@
-
-vmlinux.srec: vmlinux
- $(OBJCOPY) -Osrec $< $@
+vmlinux.srec vmlinux.bin: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
echo 'vmlinux.bin - Create raw binary'
echo 'vmlinux.srec - Create srec binary'
endef
-CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec
+CLEAN_FILES += include/asm-$(ARCH)/asm-offsets.h include/asm-$(ARCH)/machine-depend.h
diff --git a/arch/h8300/boot/Makefile b/arch/h8300/boot/Makefile
new file mode 100644
index 000000000000..65086d925ca7
--- /dev/null
+++ b/arch/h8300/boot/Makefile
@@ -0,0 +1,12 @@
+# arch/h8300/boot/Makefile
+
+targets := vmlinux.srec vmlinux.bin
+
+OBJCOPYFLAGS_vmlinux.srec := -Osrec
+OBJCOPYFLAGS_vmlinux.bin := -Obinary
+
+$(obj)/vmlinux.srec $(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo ' Kernel: $@ is ready'
+
+CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 47b60d493ff8..8db25723b077 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -157,6 +157,7 @@ struct sigframe
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
+ long dummy_pc;
char *pretcode;
unsigned char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
@@ -170,6 +171,7 @@ struct rt_sigframe
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
+ long dummy_pc;
char *pretcode;
unsigned char retcode[8];
struct siginfo info;
@@ -241,7 +243,7 @@ badframe:
asmlinkage int do_sigreturn(unsigned long __unused,...)
{
- struct pt_regs *regs = (struct pt_regs *) &__unused;
+ struct pt_regs *regs = (struct pt_regs *) (&__unused - 1);
unsigned long usp = rdusp();
struct sigframe *frame = (struct sigframe *)(usp - 4);
sigset_t set;
@@ -416,7 +418,6 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. */
err |= __put_user(frame->retcode, &frame->pretcode);
- /* moveq #,d0; notb d0; movea.l #,a5; trap #0 */
/* sub.l er0,er0; mov.b #__NR_rt_sigreturn,r0l; trapa #0 */
err != __put_user(0x1a80f800 + (__NR_rt_sigreturn & 0xff),
(long *)(frame->retcode + 0));
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index de7cc243e196..e77bc055fb5a 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -275,14 +275,18 @@ SYMBOL_NAME_LABEL(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */
.long SYMBOL_NAME(sys_set_tid_address)
.long SYMBOL_NAME(sys_timer_create)
- .long SYMBOL_NAME(sys_timer_settime) /* 260 */
+ .long SYMBOL_NAME(sys_timer_settime) /* 260 */
.long SYMBOL_NAME(sys_timer_gettime)
.long SYMBOL_NAME(sys_timer_getoverrun)
.long SYMBOL_NAME(sys_timer_delete)
.long SYMBOL_NAME(sys_clock_settime)
- .long SYMBOL_NAME(sys_clock_gettime) /* 265 */
+ .long SYMBOL_NAME(sys_clock_gettime) /* 265 */
.long SYMBOL_NAME(sys_clock_getres)
.long SYMBOL_NAME(sys_clock_nanosleep)
+ .long SYMBOL_NAME(sys_statfs64)
+ .long SYMBOL_NAME(sys_fstatfs64)
+ .long SYMBOL_NAME(sys_tgkill) /* 270 */
+ .long SYMBOL_NAME(sys_utimes)
.rept NR_syscalls-(.-SYMBOL_NAME(sys_call_table))/4
.long SYMBOL_NAME(sys_ni_syscall)
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 179b2132ecd3..60787f07eb2b 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -2,58 +2,62 @@
#ifdef CONFIG_H8300H_GENERIC
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8300h/generic/rom.ld"
+#include "../platform/h8300h/generic/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8300h/generic/ram.ld"
+#include "../platform/h8300h/generic/ram.ld"
#endif
#endif
#ifdef CONFIG_H8300H_AKI3068NET
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8300h/aki3068net/rom.ld"
+#include "../platform/h8300h/aki3068net/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8300h/aki3068net/ram.ld"
+#include "../platform/h8300h/aki3068net/ram.ld"
#endif
#endif
#ifdef CONFIG_H8300H_H8MAX
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8300h/h8max/rom.ld"
+#include "../platform/h8300h/h8max/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8300h/h8max/ram.ld"
+#include "../platform/h8300h/h8max/ram.ld"
#endif
#endif
#ifdef CONFIG_H8300H_SIM
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8300h/generic/rom.ld"
+#include "../platform/h8300h/generic/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8300h/generic/ram.ld"
+#include "../platform/h8300h/generic/ram.ld"
#endif
#endif
#ifdef CONFIG_H8S_SIM
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8s/generic/rom.ld"
+#include "../platform/h8s/generic/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8s/generic/ram.ld"
+#include "../platform/h8s/generic/ram.ld"
#endif
#endif
#ifdef CONFIG_H8S_EDOSK2674
#ifdef CONFIG_ROMKERNEL
-#include "platform/h8s/edosk2674/rom.ld"
+#include "../platform/h8s/edosk2674/rom.ld"
#endif
#ifdef CONFIG_RAMKERNEL
-#include "platform/h8s/edosk2674/ram.ld"
+#include "../platform/h8s/edosk2674/ram.ld"
#endif
#endif
+#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)
+INPUT(romfs.o)
+#endif
+
_jiffies = _jiffies_64 + 4;
SECTIONS
@@ -169,6 +173,10 @@ SECTIONS
__end = . ;
__ramstart = .;
} > ram
+ .romfs :
+ {
+ *(.romfs*)
+ } > ram
.dummy :
{
COMMAND_START = . - 0x200 ;
diff --git a/arch/h8300/lib/romfs.S b/arch/h8300/lib/romfs.S
new file mode 100644
index 000000000000..844f1169719e
--- /dev/null
+++ b/arch/h8300/lib/romfs.S
@@ -0,0 +1,52 @@
+/* romfs move to __ebss */
+
+#include <asm/linkage.h>
+
+#if defined(__H8300H__)
+ .h8300h
+#endif
+#if defined(__H8300S__)
+ .h8300s
+#endif
+
+ .text
+.globl __move_romfs
+_romfs_sig_len = 8
+
+__move_romfs:
+ mov.l #__sbss,er0
+ mov.l #_romfs_sig,er1
+ mov.b #_romfs_sig_len,r3l
+1: /* check romfs image */
+ mov.b @er0+,r2l
+ mov.b @er1+,r2h
+ cmp.b r2l,r2h
+ bne 2f
+ dec.b r3l
+ bne 1b
+
+ /* find romfs image */
+ mov.l @__sbss+8,er0 /* romfs length(be) */
+ mov.l #__sbss,er1
+ add.l er0,er1 /* romfs image end */
+ mov.l #__ebss,er2
+ add.l er0,er2 /* distination address */
+ adds #2,er0
+ adds #1,er0
+ shlr er0
+ shlr er0 /* transfer length */
+1:
+ mov.l @er1,er3 /* copy image */
+ mov.l er3,@er2
+ subs #4,er1
+ subs #4,er2
+ dec.l #1,er0
+ bpl 1b
+2:
+ rts
+
+ .section .rodata
+_romfs_sig:
+ .ascii "-rom1fs-"
+
+ .end
diff --git a/arch/h8300/platform/h8300h/aki3068net/timer.c b/arch/h8300/platform/h8300h/aki3068net/timer.c
index 05fa22144094..8dd603b7b4cb 100644
--- a/arch/h8300/platform/h8300h/aki3068net/timer.c
+++ b/arch/h8300/platform/h8300h/aki3068net/timer.c
@@ -25,15 +25,11 @@
#define CMFA 6
-extern int request_irq_boot(unsigned int,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-
void __init platform_timer_setup(irqreturn_t (*timer_int)(int, void *, struct pt_regs *))
{
outb(H8300_TIMER_COUNT_DATA,TCORA2);
outb(0x00,_8TCSR2);
- request_irq_boot(40,timer_int,0,"timer",0);
+ request_irq(40,timer_int,0,"timer",0);
outb(0x40|0x08|0x03,_8TCR2);
}
diff --git a/arch/h8300/platform/h8300h/entry.S b/arch/h8300/platform/h8300h/entry.S
index b156053d9178..9bd8e636cbb4 100644
--- a/arch/h8300/platform/h8300h/entry.S
+++ b/arch/h8300/platform/h8300h/entry.S
@@ -111,7 +111,7 @@ LRET = 38
mov.l er1,@(8:16,er0)
mov.l @sp+,er1
- add.l #(LRET-LORIG),sp /* remove LORIG - LRET */
+ add.l #(LRET-LER1),sp /* remove LORIG - LRET */
mov.l sp,@SYMBOL_NAME(sw_ksp)
mov.l er0,sp
bra 8f
@@ -255,6 +255,7 @@ SYMBOL_NAME_LABEL(ret_from_exception)
btst #TIF_NEED_RESCHED,r1l
bne @SYMBOL_NAME(reschedule):16
mov.l sp,er1
+ subs #4,er1 /* adjust retpc */
mov.l er2,er0
jsr @SYMBOL_NAME(do_signal)
3:
diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S
index 48079f558654..b1e036b457d4 100644
--- a/arch/h8300/platform/h8300h/generic/crt0_rom.S
+++ b/arch/h8300/platform/h8300h/generic/crt0_rom.S
@@ -44,14 +44,19 @@ SYMBOL_NAME_LABEL(_start)
/* copy .data */
#if !defined(CONFIG_H8300H_SIM)
+ /* copy .data */
mov.l #__begin_data,er5
mov.l #__sdata,er6
mov.l #__edata,er4
- sub.l er6,er4
+ sub.l er6,er4
+ shlr.l er4
+ shlr.l er4
1:
- eepmov.w
- dec.w #1,e4
- bpl 1b
+ mov.l @er5+,er0
+ mov.l er0,@er6
+ adds #4,er6
+ dec.l #1,er4
+ bne 1b
#endif
/* copy kernel commandline */
diff --git a/arch/h8300/platform/h8300h/h8max/timer.c b/arch/h8300/platform/h8300h/h8max/timer.c
index fb38a782eb68..cfa83b017e33 100644
--- a/arch/h8300/platform/h8300h/h8max/timer.c
+++ b/arch/h8300/platform/h8300h/h8max/timer.c
@@ -25,15 +25,11 @@
#define CMFA 6
-extern int request_irq_boot(unsigned int,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-
void __init platform_timer_setup(irqreturn_t (*timer_int)(int, void *, struct pt_regs *))
{
outb(H8300_TIMER_COUNT_DATA,TCORA2);
outb(0x00,_8TCSR2);
- request_irq_boot(40,timer_int,0,"timer",0);
+ request_irq(40,timer_int,0,"timer",0);
outb(0x40|0x08|0x03,_8TCR2);
}
diff --git a/arch/h8300/platform/h8300h/ints.c b/arch/h8300/platform/h8300h/ints.c
index ce8c70406fb2..b16726a7d2a6 100644
--- a/arch/h8300/platform/h8300h/ints.c
+++ b/arch/h8300/platform/h8300h/ints.c
@@ -52,7 +52,8 @@ typedef struct irq_handler {
const char *devname;
} irq_handler_t;
-irq_handler_t *irq_list[NR_IRQS];
+static irq_handler_t *irq_list[NR_IRQS];
+static int use_kmalloc;
extern unsigned long *interrupt_redirect_table;
@@ -119,20 +120,6 @@ void __init init_IRQ(void)
#endif
}
-void __init request_irq_boot(unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long flags, const char *devname, void *dev_id)
-{
- irq_handler_t *irq_handle;
- irq_handle = alloc_bootmem(sizeof(irq_handler_t));
- irq_handle->handler = handler;
- irq_handle->flags = flags;
- irq_handle->count = 0;
- irq_handle->dev_id = dev_id;
- irq_handle->devname = devname;
- irq_list[irq] = irq_handle;
-}
-
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long flags, const char *devname, void *dev_id)
@@ -154,7 +141,14 @@ int request_irq(unsigned int irq,
return -EBUSY;
H8300_GPIO_DDR(H8300_GPIO_P9, (irq - EXT_IRQ0), 0);
}
- irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
+
+ if (use_kmalloc)
+ irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
+ else {
+ irq_handle = alloc_bootmem(sizeof(irq_handler_t));
+ (unsigned long)irq_handle |= 0x80000000; /* bootmem allocater */
+ }
+
if (irq_handle == NULL)
return -ENOMEM;
@@ -177,8 +171,10 @@ void free_irq(unsigned int irq, void *dev_id)
irq, irq_list[irq]->devname);
if (irq >= EXT_IRQ0 && irq <= EXT_IRQ5)
*(volatile unsigned char *)IER &= ~(1 << (irq - EXT_IRQ0));
- kfree(irq_list[irq]);
- irq_list[irq] = NULL;
+ if ((irq_list[irq] & 0x80000000) == 0) {
+ kfree(irq_list[irq]);
+ irq_list[irq] = NULL;
+ }
}
/*
@@ -244,3 +240,9 @@ int show_interrupts(struct seq_file *p, void *v)
void init_irq_proc(void)
{
}
+
+static void __init enable_kmalloc(void)
+{
+ use_kmalloc = 1;
+}
+__initcall(enable_kmalloc);
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
index d4b7e0641390..8105dc17d735 100644
--- a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
+++ b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
@@ -37,7 +37,8 @@
/* CPU Reset entry */
SYMBOL_NAME_LABEL(_start)
mov.l #RAMEND,sp
- ldc #0x07,exr
+ ldc #0x80,ccr
+ ldc #0x00,exr
/* Peripheral Setup */
bclr #4,@INTCR:8 /* interrupt mode 2 */
@@ -46,7 +47,7 @@ SYMBOL_NAME_LABEL(_start)
bset #1,@ISCRL+1:16 /* IRQ0 Positive Edge */
bclr #0,@ISCRL+1:16
-#if defined(CONFIG_BLK_DEV_BLKMEM)
+#if defined(CONFIG_MTD_UCLINUX)
/* move romfs image */
jsr @__move_romfs
#endif
@@ -71,7 +72,7 @@ SYMBOL_NAME_LABEL(_start)
eepmov.w
/* uClinux kernel start */
- ldc #0x10,ccr /* running kernel */
+ ldc #0x90,ccr /* running kernel */
mov.l #SYMBOL_NAME(init_thread_union),sp
add.l #0x2000,sp
jsr @_start_kernel
diff --git a/arch/h8300/platform/h8s/edosk2674/timer.c b/arch/h8300/platform/h8s/edosk2674/timer.c
index efea48ef4acf..4d3392dc0f5d 100644
--- a/arch/h8300/platform/h8s/edosk2674/timer.c
+++ b/arch/h8300/platform/h8s/edosk2674/timer.c
@@ -26,10 +26,6 @@
#define REGS(regs) __REGS(regs)
#define __REGS(regs) #regs
-extern int request_irq_boot(unsigned int,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-
int __init platform_timer_setup(irqreturn_t (*timer_int)(int, void *, struct pt_regs *))
{
unsigned char mstpcrl;
@@ -38,7 +34,7 @@ int __init platform_timer_setup(irqreturn_t (*timer_int)(int, void *, struct pt_
outb(mstpcrl,MSTPCRL);
outb(H8300_TIMER_COUNT_DATA,_8TCORA1);
outb(0x00,_8TCSR1);
- request_irq_boot(76,timer_int,0,"timer",0);
+ request_irq(76,timer_int,0,"timer",0);
outb(0x40|0x08|0x03,_8TCR1);
return 0;
}
diff --git a/arch/h8300/platform/h8s/entry.S b/arch/h8300/platform/h8s/entry.S
index 1326e84483e0..b701d67609a0 100644
--- a/arch/h8300/platform/h8s/entry.S
+++ b/arch/h8300/platform/h8s/entry.S
@@ -112,7 +112,7 @@ LRET = 40
mov.l er1,@(10:16,er0)
mov.l @sp+,er1
- add.l #(LRET-LORIG),sp /* remove LORIG - LRET */
+ add.l #(LRET-LER1),sp /* remove LORIG - LRET */
mov.l sp,@SYMBOL_NAME(sw_ksp)
mov.l er0,sp
bra 8f
@@ -252,6 +252,7 @@ SYMBOL_NAME_LABEL(ret_from_exception)
btst #TIF_NEED_RESCHED,r1l
bne @SYMBOL_NAME(reschedule):16
mov.l sp,er1
+ subs #4,er1 /* adjust retpc */
mov.l er2,er0
jsr @SYMBOL_NAME(do_signal)
3:
diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S
index 9310cd57a928..86f450178466 100644
--- a/arch/h8300/platform/h8s/generic/crt0_ram.S
+++ b/arch/h8300/platform/h8s/generic/crt0_ram.S
@@ -37,13 +37,14 @@
/* CPU Reset entry */
SYMBOL_NAME_LABEL(_start)
mov.l #RAMEND,sp
- ldc #0x07,exr
+ ldc #0x80,ccr
+ ldc #0x00,exr
/* Peripheral Setup */
bclr #4,@INTCR:8 /* interrupt mode 2 */
bset #5,@INTCR:8
-#if defined(CONFIG_BLK_DEV_BLKMEM)
+#if defined(CONFIG_MTD_UCLINUX)
/* move romfs image */
jsr @__move_romfs
#endif
@@ -68,7 +69,7 @@ SYMBOL_NAME_LABEL(_start)
eepmov.w
/* uClinux kernel start */
- ldc #0x10,ccr /* running kernel */
+ ldc #0x90,ccr /* running kernel */
mov.l #SYMBOL_NAME(init_thread_union),sp
add.l #0x2000,sp
jsr @_start_kernel
diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S
index b49d22ceb8e8..f345e090523e 100644
--- a/arch/h8300/platform/h8s/generic/crt0_rom.S
+++ b/arch/h8300/platform/h8s/generic/crt0_rom.S
@@ -33,36 +33,32 @@ SYMBOL_NAME_LABEL(_start)
/* Peripheral Setup */
- /* .bss clear */
- mov.l #__sbss,er5
- mov.l er5,er6
- inc.l #1,er6
- mov.l #__ebss,er4
- sub.l er5,er4
- sub.w r0,r0
- mov.b r0l,@er5
-1:
- eepmov.w
- dec.w #1,e4
- bpl 1b
-
/* copy .data */
#if !defined(CONFIG_H8S_SIM)
mov.l #__begin_data,er5
mov.l #__sdata,er6
mov.l #__edata,er4
- sub.l er6,er4
+ sub.l er6,er4
+ shlr.l #2,er4
1:
- eepmov.w
- dec.w #1,e4
- bpl 1b
+ mov.l @er5+,er0
+ mov.l er0,@er6
+ adds #4,er6
+ dec.l #1,er4
+ bne 1b
#endif
- /* copy kernel commandline */
- mov.l #COMMAND_START,er5
- mov.l #SYMBOL_NAME(_command_line),er6
- mov.w #512,r4
- eepmov.w
+ /* .bss clear */
+ mov.l #__sbss,er5
+ mov.l #__ebss,er4
+ sub.l er5,er4
+ shlr.l #2,er4
+ sub.l er0,er0
+1:
+ mov.l er0,@er5
+ adds #4,er5
+ dec.l #1,er4
+ bne 1b
/* linux kernel start */
ldc #0x90,ccr /* running kernel */
diff --git a/arch/h8300/platform/h8s/generic/rom.ld b/arch/h8300/platform/h8s/generic/rom.ld
index f451d2ee1df4..68cfd1767917 100644
--- a/arch/h8300/platform/h8s/generic/rom.ld
+++ b/arch/h8300/platform/h8s/generic/rom.ld
@@ -6,6 +6,6 @@ MEMORY
vector : ORIGIN = 0x000000, LENGTH = 0x000200
rom : ORIGIN = 0x000200, LENGTH = 0x200000-0x000200
erom : ORIGIN = 0x200000, LENGTH = 0
- ram : ORIGIN = 0x200000, LENGTH = 0x200000
- eram : ORIGIN = 0x400000, LENGTH = 0
+ ram : ORIGIN = 0x200000, LENGTH = 0x400000
+ eram : ORIGIN = 0x600000, LENGTH = 0
}
diff --git a/arch/h8300/platform/h8s/generic/timer.c b/arch/h8300/platform/h8s/generic/timer.c
index 8a2e799695b7..5367e3e66bf5 100644
--- a/arch/h8300/platform/h8s/generic/timer.c
+++ b/arch/h8300/platform/h8s/generic/timer.c
@@ -23,15 +23,11 @@
#include <asm/irq.h>
#include <asm/regs267x.h>
-extern int request_irq_boot(unsigned int,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long, const char *, void *);
-
int platform_timer_setup(irqreturn_t (*timer_int)(int, void *, struct pt_regs *))
{
outb(H8300_TIMER_COUNT_DATA,_8TCORA1);
outb(0x00,_8TCSR1);
- request_irq_boot(76,timer_int,0,"timer",0);
+ request_irq(76,timer_int,0,"timer",0);
outb(0x40|0x08|0x03,_8TCR1);
return 0;
}
diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c
index 73409bcac4a5..0e1e66be714a 100644
--- a/arch/h8300/platform/h8s/ints.c
+++ b/arch/h8300/platform/h8s/ints.c
@@ -91,6 +91,8 @@ const static struct irq_pins irq_assign_table1[16]={
{H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7},
};
+static int use_kmalloc;
+
extern unsigned long *interrupt_redirect_table;
static inline unsigned long *get_vector_address(void)
@@ -159,22 +161,6 @@ void __init init_IRQ(void)
#endif
}
-/* special request_irq */
-/* used bootmem allocater */
-void __init request_irq_boot(unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long flags, const char *devname, void *dev_id)
-{
- irq_handler_t *irq_handle;
- irq_handle = alloc_bootmem(sizeof(irq_handler_t));
- irq_handle->handler = handler;
- irq_handle->flags = flags;
- irq_handle->count = 0;
- irq_handle->dev_id = dev_id;
- irq_handle->devname = devname;
- irq_list[irq] = irq_handle;
-}
-
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long flags, const char *devname, void *dev_id)
@@ -202,7 +188,14 @@ int request_irq(unsigned int irq,
H8300_GPIO_DDR(port_no, bit_no, H8300_GPIO_INPUT);
*(volatile unsigned short *)ISR &= ~ptn; /* ISR clear */
}
- irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
+
+ if (use_kmalloc)
+ irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
+ else {
+ irq_handle = alloc_bootmem(sizeof(irq_handler_t));
+ (unsigned long)irq_handle |= 0x80000000; /* bootmem allocater */
+ }
+
if (irq_handle == NULL)
return -ENOMEM;
@@ -243,8 +236,10 @@ void free_irq(unsigned int irq, void *dev_id)
}
H8300_GPIO_FREE(port_no, bit_no);
}
- kfree(irq_list[irq]);
- irq_list[irq] = NULL;
+ if (((unsigned long)irq_list[irq] & 0x80000000) == 0) {
+ kfree(irq_list[irq]);
+ irq_list[irq] = NULL;
+ }
}
unsigned long probe_irq_on (void)
@@ -306,3 +301,10 @@ int show_interrupts(struct seq_file *p, void *v)
void init_irq_proc(void)
{
}
+
+static int __init enable_kmalloc(void)
+{
+ use_kmalloc = 1;
+ return 0;
+}
+__initcall(enable_kmalloc);
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 1486324a3d06..38bb8a5d976a 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -27,6 +27,7 @@
#include <linux/config.h>
#include <linux/acpi.h>
#include <asm/pgalloc.h>
+#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/io.h>
#include <asm/mpspec.h>
@@ -42,6 +43,9 @@
extern int acpi_disabled;
extern int acpi_ht;
+int acpi_lapic = 0;
+int acpi_ioapic = 0;
+
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
@@ -91,8 +95,6 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
#ifdef CONFIG_X86_LOCAL_APIC
-int acpi_lapic;
-
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -159,8 +161,6 @@ acpi_parse_lapic_addr_ovr (
return 0;
}
-#ifdef CONFIG_ACPI
-
static int __init
acpi_parse_lapic_nmi (
acpi_table_entry_header *header)
@@ -179,15 +179,11 @@ acpi_parse_lapic_nmi (
return 0;
}
-#endif /*CONFIG_ACPI*/
#endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC
-int acpi_ioapic;
-
-#ifdef CONFIG_ACPI
static int __init
acpi_parse_ioapic (
@@ -249,7 +245,6 @@ acpi_parse_nmi_src (
return 0;
}
-#endif /*CONFIG_ACPI*/
#endif /*CONFIG_X86_IO_APIC*/
@@ -332,14 +327,12 @@ acpi_boot_init (void)
if (result)
return result;
-#ifdef CONFIG_ACPI
result = acpi_blacklisted();
if (result) {
printk(KERN_WARNING PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
acpi_disabled = 1;
return result;
}
-#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -390,21 +383,18 @@ acpi_boot_init (void)
return result;
}
-#ifdef CONFIG_ACPI
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return result;
}
-#endif /*CONFIG_ACPI*/
acpi_lapic = 1;
#endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_ACPI
/*
* I/O APIC
@@ -424,7 +414,7 @@ acpi_boot_init (void)
/*
* if "noapic" boot option, don't look for IO-APICs
*/
- if (skip_ioapic_setup) {
+ if (ioapic_setup_disabled()) {
printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
"due to 'noapic' option.\n");
return 1;
@@ -460,8 +450,6 @@ acpi_boot_init (void)
acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
acpi_ioapic = 1;
-
-#endif /*CONFIG_ACPI*/
#endif /*CONFIG_X86_IO_APIC*/
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index b534bd53bc67..94ff91ebe2e0 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -1198,7 +1198,7 @@ static int suspend(int vetoable)
printk(KERN_CRIT "apm: suspend was vetoed, but suspending anyway.\n");
}
- device_suspend(3, SUSPEND_POWER_DOWN);
+ device_suspend(3);
/* serialize with the timer interrupt */
write_seqlock_irq(&xtime_lock);
@@ -1232,7 +1232,7 @@ static int suspend(int vetoable)
if (err != APM_SUCCESS)
apm_error("suspend", err);
err = (err == APM_SUCCESS) ? 0 : -EIO;
- device_resume(RESUME_POWER_ON);
+ device_resume();
pm_send_all(PM_RESUME, (void *)0);
queue_event(APM_NORMAL_RESUME, NULL);
out:
@@ -1346,7 +1346,7 @@ static void check_events(void)
write_seqlock_irq(&xtime_lock);
set_time();
write_sequnlock_irq(&xtime_lock);
- device_resume(RESUME_POWER_ON);
+ device_resume();
pm_send_all(PM_RESUME, (void *)0);
queue_event(event, NULL);
}
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 0a2ca6584a2d..109564522fe8 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -237,9 +237,12 @@ static void __init init_intel(struct cpuinfo_x86 *c)
c->x86_cache_size = l2 ? l2 : (l1i+l1d);
}
- /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
- if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
- clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
+ if ( c->x86 == 6) {
+ unsigned model_mask = (c->x86_model << 8) + c->x86_mask;
+ if (model_mask < 0x0303)
+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
+ }
/* Names for the Pentium II/Celeron processors
detectable only by also checking the cache size.
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 6f1563f6f784..73eb9664eedb 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -574,7 +574,7 @@ static int mtrr_save(struct sys_device * sysdev, u32 state)
int i;
int size = num_var_ranges * sizeof(struct mtrr_value);
- mtrr_state = kmalloc(size,GFP_KERNEL);
+ mtrr_state = kmalloc(size,GFP_ATOMIC);
if (mtrr_state)
memset(mtrr_state,0,size);
else
@@ -607,8 +607,8 @@ static int mtrr_restore(struct sys_device * sysdev)
static struct sysdev_driver mtrr_sysdev_driver = {
- .save = mtrr_save,
- .restore = mtrr_restore,
+ .suspend = mtrr_save,
+ .resume = mtrr_restore,
};
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index ef91655c2568..e19e3bab3cf4 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -162,24 +162,6 @@ enum
static char *dmi_ident[DMI_STRING_MAX];
-#ifdef CONFIG_ACPI_BOOT
-
-/* print some information suitable for a blacklist entry. */
-static void dmi_dump_system(void)
-{
- printk("DMI: BIOS: %.40s, %.40s, %.40s\n",
- dmi_ident[DMI_BIOS_VENDOR], dmi_ident[DMI_BIOS_VERSION],
- dmi_ident[DMI_BIOS_DATE]);
- printk("DMI: System: %.40s, %.40s, %.40s\n",
- dmi_ident[DMI_SYS_VENDOR], dmi_ident[DMI_PRODUCT_NAME],
- dmi_ident[DMI_PRODUCT_VERSION]);
- printk("DMI: Board: %.40s, %.40s, %.40s\n",
- dmi_ident[DMI_BOARD_VENDOR], dmi_ident[DMI_BOARD_NAME],
- dmi_ident[DMI_BOARD_VERSION]);
-}
-
-#endif
-
/*
* Save a DMI string
*/
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index b57846ce7dbe..bd105ba58219 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1013,7 +1013,6 @@ void __init mp_config_acpi_legacy_irqs (void)
panic("Max # of irq sources exceeded!\n");
}
}
-#endif /* CONFIG_X86_IO_APIC */
#ifdef CONFIG_ACPI
@@ -1150,5 +1149,5 @@ void __init mp_parse_prt (void)
}
#endif /*CONFIG_ACPI_PCI*/
-
+#endif /* CONFIG_X86_IO_APIC */
#endif /*CONFIG_ACPI_BOOT*/
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 8cb06f26888c..00341dbcc655 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -546,9 +546,8 @@ static void __init parse_cmdline_early (char ** cmdline_p)
#ifdef CONFIG_X86_LOCAL_APIC
/* disable IO-APIC */
- else if (!memcmp(from, "noapic", 6)) {
- skip_ioapic_setup = 1;
- }
+ else if (!memcmp(from, "noapic", 6))
+ disable_ioapic_setup();
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* CONFIG_ACPI_BOOT */
@@ -1006,12 +1005,11 @@ void __init setup_arch(char **cmdline_p)
generic_apic_probe(*cmdline_p);
#endif
-#ifdef CONFIG_ACPI_BOOT
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
- (void) acpi_boot_init();
-#endif
+ acpi_boot_init();
+
#ifdef CONFIG_X86_LOCAL_APIC
if (smp_found_config)
get_smp_config();
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 18655f44b88c..7e404741f7d1 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -12,6 +12,7 @@
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/sysdev.h>
+#include <linux/slab.h>
#include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/apic.h>
@@ -91,24 +92,66 @@ static void nmi_save_registers(struct op_msrs * msrs)
{
unsigned int const nr_ctrs = model->num_counters;
unsigned int const nr_ctrls = model->num_controls;
- struct op_msr_group * counters = &msrs->counters;
- struct op_msr_group * controls = &msrs->controls;
+ struct op_msr * counters = msrs->counters;
+ struct op_msr * controls = msrs->controls;
unsigned int i;
for (i = 0; i < nr_ctrs; ++i) {
- rdmsr(counters->addrs[i],
- counters->saved[i].low,
- counters->saved[i].high);
+ rdmsr(counters[i].addr,
+ counters[i].saved.low,
+ counters[i].saved.high);
}
for (i = 0; i < nr_ctrls; ++i) {
- rdmsr(controls->addrs[i],
- controls->saved[i].low,
- controls->saved[i].high);
+ rdmsr(controls[i].addr,
+ controls[i].saved.low,
+ controls[i].saved.high);
}
}
-
+
+static void free_msrs(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; ++i) {
+ kfree(cpu_msrs[i].counters);
+ cpu_msrs[i].counters = NULL;
+ kfree(cpu_msrs[i].controls);
+ cpu_msrs[i].controls = NULL;
+ }
+}
+
+
+static int allocate_msrs(void)
+{
+ int success = 1;
+ size_t controls_size = sizeof(struct op_msr) * model->num_controls;
+ size_t counters_size = sizeof(struct op_msr) * model->num_counters;
+
+ int i;
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+
+ cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
+ if (!cpu_msrs[i].counters) {
+ success = 0;
+ break;
+ }
+ cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
+ if (!cpu_msrs[i].controls) {
+ success = 0;
+ break;
+ }
+ }
+
+ if (!success)
+ free_msrs();
+
+ return success;
+}
+
+
static void nmi_cpu_setup(void * dummy)
{
int cpu = smp_processor_id();
@@ -125,6 +168,9 @@ static void nmi_cpu_setup(void * dummy)
static int nmi_setup(void)
{
+ if (!allocate_msrs())
+ return -ENOMEM;
+
/* We walk a thin line between law and rape here.
* We need to be careful to install our NMI handler
* without actually triggering any NMIs as this will
@@ -142,20 +188,20 @@ static void nmi_restore_registers(struct op_msrs * msrs)
{
unsigned int const nr_ctrs = model->num_counters;
unsigned int const nr_ctrls = model->num_controls;
- struct op_msr_group * counters = &msrs->counters;
- struct op_msr_group * controls = &msrs->controls;
+ struct op_msr * counters = msrs->counters;
+ struct op_msr * controls = msrs->controls;
unsigned int i;
for (i = 0; i < nr_ctrls; ++i) {
- wrmsr(controls->addrs[i],
- controls->saved[i].low,
- controls->saved[i].high);
+ wrmsr(controls[i].addr,
+ controls[i].saved.low,
+ controls[i].saved.high);
}
for (i = 0; i < nr_ctrs; ++i) {
- wrmsr(counters->addrs[i],
- counters->saved[i].low,
- counters->saved[i].high);
+ wrmsr(counters[i].addr,
+ counters[i].saved.low,
+ counters[i].saved.high);
}
}
@@ -185,6 +231,7 @@ static void nmi_shutdown(void)
on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
unset_nmi_callback();
enable_lapic_nmi_watchdog();
+ free_msrs();
}
@@ -285,6 +332,9 @@ static int __init ppro_init(void)
{
__u8 cpu_model = current_cpu_data.x86_model;
+ if (cpu_model > 0xd)
+ return 0;
+
if (cpu_model > 5) {
nmi_ops.cpu_type = "i386/piii";
} else if (cpu_model > 2) {
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
index 4f55d1da6f60..5f6cc84abfa7 100644
--- a/arch/i386/oprofile/op_model_athlon.c
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -20,12 +20,12 @@
#define NUM_COUNTERS 4
#define NUM_CONTROLS 4
-#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters.addrs[(c)], (l), (h));} while (0)
-#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters.addrs[(c)], -(unsigned int)(l), -1);} while (0)
+#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0)
+#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0)
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
-#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls.addrs[(c)], (l), (h));} while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls.addrs[(c)], (l), (h));} while (0)
+#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0)
+#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0)
#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
#define CTRL_CLEAR(x) (x &= (1<<21))
@@ -39,15 +39,15 @@ static unsigned long reset_value[NUM_COUNTERS];
static void athlon_fill_in_addresses(struct op_msrs * const msrs)
{
- msrs->counters.addrs[0] = MSR_K7_PERFCTR0;
- msrs->counters.addrs[1] = MSR_K7_PERFCTR1;
- msrs->counters.addrs[2] = MSR_K7_PERFCTR2;
- msrs->counters.addrs[3] = MSR_K7_PERFCTR3;
-
- msrs->controls.addrs[0] = MSR_K7_EVNTSEL0;
- msrs->controls.addrs[1] = MSR_K7_EVNTSEL1;
- msrs->controls.addrs[2] = MSR_K7_EVNTSEL2;
- msrs->controls.addrs[3] = MSR_K7_EVNTSEL3;
+ msrs->counters[0].addr = MSR_K7_PERFCTR0;
+ msrs->counters[1].addr = MSR_K7_PERFCTR1;
+ msrs->counters[2].addr = MSR_K7_PERFCTR2;
+ msrs->counters[3].addr = MSR_K7_PERFCTR3;
+
+ msrs->controls[0].addr = MSR_K7_EVNTSEL0;
+ msrs->controls[1].addr = MSR_K7_EVNTSEL1;
+ msrs->controls[2].addr = MSR_K7_EVNTSEL2;
+ msrs->controls[3].addr = MSR_K7_EVNTSEL3;
}
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c
index 1b81fc4299cd..7e41dc9826a1 100644
--- a/arch/i386/oprofile/op_model_p4.c
+++ b/arch/i386/oprofile/op_model_p4.c
@@ -366,8 +366,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_READ(low, high, i) do {rdmsr (p4_counters[(i)].cccr_address, (low), (high));} while (0)
-#define CCCR_WRITE(low, high, i) do {wrmsr (p4_counters[(i)].cccr_address, (low), (high));} while (0)
+#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
+#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
@@ -410,7 +410,7 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
/* the counter registers we pay attention to */
for (i = 0; i < num_counters; ++i) {
- msrs->counters.addrs[i] =
+ msrs->counters[i].addr =
p4_counters[VIRT_CTR(stag, i)].counter_address;
}
@@ -419,42 +419,42 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
/* 18 CCCR registers */
for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag;
addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) {
- msrs->controls.addrs[i] = addr;
+ msrs->controls[i].addr = addr;
}
/* 43 ESCR registers in three discontiguous group */
for (addr = MSR_P4_BSU_ESCR0 + stag;
addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
- msrs->controls.addrs[i] = addr;
+ msrs->controls[i].addr = addr;
}
for (addr = MSR_P4_MS_ESCR0 + stag;
addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
- msrs->controls.addrs[i] = addr;
+ msrs->controls[i].addr = addr;
}
for (addr = MSR_P4_IX_ESCR0 + stag;
addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
- msrs->controls.addrs[i] = addr;
+ msrs->controls[i].addr = addr;
}
/* there are 2 remaining non-contiguously located ESCRs */
if (num_counters == NUM_COUNTERS_NON_HT) {
/* standard non-HT CPUs handle both remaining ESCRs*/
- msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5;
- msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4;
+ msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
+ msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
} else if (stag == 0) {
/* HT CPUs give the first remainder to the even thread, as
the 32nd control register */
- msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4;
+ msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
} else {
/* and two copies of the second to the odd thread,
for the 22st and 23nd control registers */
- msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5;
- msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5;
+ msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
+ msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
}
}
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
index d9f38d685395..68aed25442a5 100644
--- a/arch/i386/oprofile/op_model_ppro.c
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -20,12 +20,12 @@
#define NUM_COUNTERS 2
#define NUM_CONTROLS 2
-#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters.addrs[(c)], (l), (h));} while (0)
-#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters.addrs[(c)], -(u32)(l), -1);} while (0)
+#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0)
+#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), -1);} while (0)
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
-#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls.addrs[(c)]), (l), (h));} while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls.addrs[(c)]), (l), (h));} while (0)
+#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0)
+#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0)
#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
#define CTRL_CLEAR(x) (x &= (1<<21))
@@ -39,11 +39,11 @@ static unsigned long reset_value[NUM_COUNTERS];
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
{
- msrs->counters.addrs[0] = MSR_P6_PERFCTR0;
- msrs->counters.addrs[1] = MSR_P6_PERFCTR1;
+ msrs->counters[0].addr = MSR_P6_PERFCTR0;
+ msrs->counters[1].addr = MSR_P6_PERFCTR1;
- msrs->controls.addrs[0] = MSR_P6_EVNTSEL0;
- msrs->controls.addrs[1] = MSR_P6_EVNTSEL1;
+ msrs->controls[0].addr = MSR_P6_EVNTSEL0;
+ msrs->controls[1].addr = MSR_P6_EVNTSEL1;
}
diff --git a/arch/i386/oprofile/op_x86_model.h b/arch/i386/oprofile/op_x86_model.h
index e0da54abf7b8..5cc2514670f3 100644
--- a/arch/i386/oprofile/op_x86_model.h
+++ b/arch/i386/oprofile/op_x86_model.h
@@ -11,22 +11,19 @@
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
-/* Pentium IV needs all these */
-#define MAX_MSR 63
-
struct op_saved_msr {
unsigned int high;
unsigned int low;
};
-struct op_msr_group {
- unsigned int addrs[MAX_MSR];
- struct op_saved_msr saved[MAX_MSR];
+struct op_msr {
+ unsigned long addr;
+ struct op_saved_msr saved;
};
struct op_msrs {
- struct op_msr_group counters;
- struct op_msr_group controls;
+ struct op_msr * counters;
+ struct op_msr * controls;
};
struct pt_regs;
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 2a1b67297597..286291e13860 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -66,7 +66,7 @@ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
-boot := arch/ia64/boot
+boot := arch/ia64/hp/sim/boot
.PHONY: boot compressed check
diff --git a/arch/ia64/hp/sim/Makefile b/arch/ia64/hp/sim/Makefile
index e8fba4e6f774..d10da47931d7 100644
--- a/arch/ia64/hp/sim/Makefile
+++ b/arch/ia64/hp/sim/Makefile
@@ -7,7 +7,7 @@
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
-obj-y := hpsim_irq.o hpsim_setup.o
+obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
diff --git a/arch/ia64/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile
index b18c6323ee9b..df6e9968c845 100644
--- a/arch/ia64/boot/Makefile
+++ b/arch/ia64/hp/sim/boot/Makefile
@@ -5,7 +5,7 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# Copyright (C) 1998 by David Mosberger-Tang <davidm@hpl.hp.com>
+# Copyright (C) 1998, 2003 by David Mosberger-Tang <davidm@hpl.hp.com>
#
targets-$(CONFIG_IA64_HP_SIM) += bootloader
@@ -32,6 +32,6 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T
-$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o \
+$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)
diff --git a/arch/ia64/hp/sim/boot/boot_head.S b/arch/ia64/hp/sim/boot/boot_head.S
new file mode 100644
index 000000000000..92c20ce3c404
--- /dev/null
+++ b/arch/ia64/hp/sim/boot/boot_head.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/asmmacro.h>
+
+ .bss
+ .align 16
+stack_mem:
+ .skip 16834
+
+ .text
+
+/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
+GLOBAL_ENTRY(printk)
+ break 0
+END(printk)
+
+GLOBAL_ENTRY(_start)
+ .prologue
+ .save rp, r0
+ .body
+ movl gp = __gp
+ movl sp = stack_mem
+ bsw.1
+ br.call.sptk.many rp=start_bootloader
+END(_start)
+
+GLOBAL_ENTRY(ssc)
+ .regstk 5,0,0,0
+ mov r15=in4
+ break 0x80001
+ br.ret.sptk.many b0
+END(ssc)
+
+GLOBAL_ENTRY(jmp_to_kernel)
+ .regstk 2,0,0,0
+ mov r28=in0
+ mov b7=in1
+ br.sptk.few b7
+END(jmp_to_kernel)
+
+
+GLOBAL_ENTRY(pal_emulator_static)
+ mov r8=-1
+ mov r9=256
+ ;;
+ cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
+(p6) br.cond.sptk.few static
+ ;;
+ mov r9=512
+ ;;
+ cmp.gtu p6,p7=r9,r28
+(p6) br.cond.sptk.few stacked
+ ;;
+static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
+(p7) br.cond.sptk.few 1f
+ ;;
+ mov r8=0 /* status = 0 */
+ movl r9=0x100000000 /* tc.base */
+ movl r10=0x0000000200000003 /* count[0], count[1] */
+ movl r11=0x1000000000002000 /* stride[0], stride[1] */
+ br.cond.sptk.few rp
+1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */
+(p7) br.cond.sptk.few 1f
+ mov r8=0 /* status = 0 */
+ movl r9 =0x100000064 /* proc_ratio (1/100) */
+ movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
+ movl r11=0x100000064 /* itc_ratio<<32 (1/100) */
+ ;;
+1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */
+(p7) br.cond.sptk.few 1f
+ mov r8=0 /* status = 0 */
+ mov r9=96 /* num phys stacked */
+ mov r10=0 /* hints */
+ mov r11=0
+ br.cond.sptk.few rp
+1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */
+(p7) br.cond.sptk.few 1f
+ mov r9=ar.lc
+ movl r8=524288 /* flush 512k million cache lines (16MB) */
+ ;;
+ mov ar.lc=r8
+ movl r8=0xe000000000000000
+ ;;
+.loop: fc r8
+ add r8=32,r8
+ br.cloop.sptk.few .loop
+ sync.i
+ ;;
+ srlz.i
+ ;;
+ mov ar.lc=r9
+ mov r8=r0
+ ;;
+1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
+(p7) br.cond.sptk.few 1f
+ mov r8=0 /* status = 0 */
+ movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */
+ mov r10=0 /* reserved */
+ mov r11=0 /* reserved */
+ mov r16=0xffff /* implemented PMC */
+ mov r17=0xffff /* implemented PMD */
+ add r18=8,r29 /* second index */
+ ;;
+ st8 [r29]=r16,16 /* store implemented PMC */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+ st8 [r29]=r0,16 /* store implemented PMC */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+ st8 [r29]=r17,16 /* store implemented PMD */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ mov r16=0xf0 /* cycles count capable PMC */
+ ;;
+ st8 [r29]=r0,16 /* store implemented PMC */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ mov r17=0x10 /* retired bundles capable PMC */
+ ;;
+ st8 [r29]=r16,16 /* store cycles capable */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+ st8 [r29]=r0,16 /* store implemented PMC */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+ st8 [r29]=r17,16 /* store retired bundle capable */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+ st8 [r29]=r0,16 /* store implemented PMC */
+ st8 [r18]=r0,16 /* clear remaining bits */
+ ;;
+1: br.cond.sptk.few rp
+stacked:
+ br.ret.sptk.few rp
+END(pal_emulator_static)
diff --git a/arch/ia64/boot/bootloader.c b/arch/ia64/hp/sim/boot/bootloader.c
index 593667cbb74d..f49cde5de1f9 100644
--- a/arch/ia64/boot/bootloader.c
+++ b/arch/ia64/hp/sim/boot/bootloader.c
@@ -1,9 +1,9 @@
/*
- * arch/ia64/boot/bootloader.c
+ * arch/ia64/hp/sim/boot/bootloader.c
*
* Loads an ELF kernel.
*
- * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
@@ -17,31 +17,13 @@ struct task_struct; /* forward declaration for elf.h */
#include <linux/kernel.h>
#include <asm/elf.h>
+#include <asm/intrinsics.h>
#include <asm/pal.h>
#include <asm/pgtable.h>
#include <asm/sal.h>
#include <asm/system.h>
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT 20
-#define SSC_GETCHAR 21
-#define SSC_PUTCHAR 31
-#define SSC_OPEN 50
-#define SSC_CLOSE 51
-#define SSC_READ 52
-#define SSC_WRITE 53
-#define SSC_GET_COMPLETION 54
-#define SSC_WAIT_COMPLETION 55
-#define SSC_CONNECT_INTERRUPT 58
-#define SSC_GENERATE_INTERRUPT 59
-#define SSC_SET_PERIODIC_INTERRUPT 60
-#define SSC_GET_RTC 65
-#define SSC_EXIT 66
-#define SSC_LOAD_SYMBOLS 69
-#define SSC_GET_TOD 74
-
-#define SSC_GET_ARGS 75
+#include "ssc.h"
struct disk_req {
unsigned long addr;
@@ -53,10 +35,8 @@ struct disk_stat {
unsigned count;
};
-#include "../kernel/fw-emu.c"
-
-/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
-asm (".global printk; printk = 0");
+extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
+extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
/*
* Set a break point on this function so that symbols are available to set breakpoints in
@@ -82,9 +62,8 @@ cons_write (const char *buf)
#define MAX_ARGS 32
void
-_start (void)
+start_bootloader (void)
{
- static char stack[16384] __attribute__ ((aligned (16)));
static char mem[4096];
static char buffer[1024];
unsigned long off;
@@ -98,10 +77,6 @@ _start (void)
char *kpath, *args;
long arglen = 0;
- asm volatile ("movl gp=__gp;;" ::: "memory");
- asm volatile ("mov sp=%0" :: "r"(stack) : "memory");
- asm volatile ("bsw.1;;");
-
ssc(0, 0, 0, 0, SSC_CONSOLE_INIT);
/*
@@ -195,15 +170,14 @@ _start (void)
cons_write("starting kernel...\n");
/* fake an I/O base address: */
- asm volatile ("mov ar.k0=%0" :: "r"(0xffffc000000UL));
+ ia64_setreg(_IA64_REG_AR_KR0, 0xffffc000000UL);
bp = sys_fw_init(args, arglen);
ssc(0, (long) kpath, 0, 0, SSC_LOAD_SYMBOLS);
debug_break();
- asm volatile ("mov sp=%2; mov r28=%1; br.sptk.few %0"
- :: "b"(e_entry), "r"(bp), "r"(__pa(&stack)));
+ jmp_to_kernel((unsigned long) bp, e_entry);
cons_write("kernel returned!\n");
ssc(-1, 0, 0, 0, SSC_EXIT);
diff --git a/arch/ia64/boot/bootloader.lds b/arch/ia64/hp/sim/boot/bootloader.lds
index 69ae58531033..69ae58531033 100644
--- a/arch/ia64/boot/bootloader.lds
+++ b/arch/ia64/hp/sim/boot/bootloader.lds
diff --git a/arch/ia64/kernel/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 138203f0c511..95d71afbd279 100644
--- a/arch/ia64/kernel/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -3,9 +3,6 @@
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * For the HP simulator, this file gets include in boot/bootloader.c.
- * For SoftSDV, this file gets included in sys_softsdv.c.
*/
#include <linux/config.h>
@@ -18,6 +15,8 @@
#include <asm/pal.h>
#include <asm/sal.h>
+#include "ssc.h"
+
#define MB (1024*1024UL)
#define SIMPLE_MEMMAP 1
@@ -37,27 +36,6 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
+ NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
+ 1024)] __attribute__ ((aligned (8)));
-#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
-
-/* Simulator system calls: */
-
-#define SSC_EXIT 66
-
-/*
- * Simulator system call.
- */
-static long
-ssc (long arg0, long arg1, long arg2, long arg3, int nr)
-{
- register long r8 asm ("r8");
-
- asm volatile ("mov r15=%1\n\t"
- "break 0x80001"
- : "=r"(r8)
- : "r"(nr), "r"(arg0), "r"(arg1), "r"(arg2), "r"(arg3));
- return r8;
-}
-
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
@@ -119,109 +97,8 @@ offtime (unsigned long t, efi_time_t *tp)
return 1;
}
-#endif /* CONFIG_IA64_HP_SIM */
-
-/*
- * Very ugly, but we need this in the simulator only. Once we run on
- * real hw, this can all go away.
- */
extern void pal_emulator_static (void);
-asm (
-" .proc pal_emulator_static\n"
-"pal_emulator_static:"
-" mov r8=-1\n"
-" mov r9=256\n"
-" ;;\n"
-" cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */\n"
-"(p6) br.cond.sptk.few static\n"
-" ;;\n"
-" mov r9=512\n"
-" ;;\n"
-" cmp.gtu p6,p7=r9,r28\n"
-"(p6) br.cond.sptk.few stacked\n"
-" ;;\n"
-"static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */\n"
-"(p7) br.cond.sptk.few 1f\n"
-" ;;\n"
-" mov r8=0 /* status = 0 */\n"
-" movl r9=0x100000000 /* tc.base */\n"
-" movl r10=0x0000000200000003 /* count[0], count[1] */\n"
-" movl r11=0x1000000000002000 /* stride[0], stride[1] */\n"
-" br.cond.sptk.few rp\n"
-"1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */\n"
-"(p7) br.cond.sptk.few 1f\n"
-" mov r8=0 /* status = 0 */\n"
-" movl r9 =0x100000064 /* proc_ratio (1/100) */\n"
-" movl r10=0x100000100 /* bus_ratio<<32 (1/256) */\n"
-" movl r11=0x100000064 /* itc_ratio<<32 (1/100) */\n"
-" ;;\n"
-"1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */\n"
-"(p7) br.cond.sptk.few 1f\n"
-" mov r8=0 /* status = 0 */\n"
-" mov r9=96 /* num phys stacked */\n"
-" mov r10=0 /* hints */\n"
-" mov r11=0\n"
-" br.cond.sptk.few rp\n"
-"1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */\n"
-"(p7) br.cond.sptk.few 1f\n"
-" mov r9=ar.lc\n"
-" movl r8=524288 /* flush 512k million cache lines (16MB) */\n"
-" ;;\n"
-" mov ar.lc=r8\n"
-" movl r8=0xe000000000000000\n"
-" ;;\n"
-".loop: fc r8\n"
-" add r8=32,r8\n"
-" br.cloop.sptk.few .loop\n"
-" sync.i\n"
-" ;;\n"
-" srlz.i\n"
-" ;;\n"
-" mov ar.lc=r9\n"
-" mov r8=r0\n"
-" ;;\n"
-"1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */\n"
-"(p7) br.cond.sptk.few 1f\n"
-" mov r8=0 /* status = 0 */\n"
-" movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */\n"
-" mov r10=0 /* reserved */\n"
-" mov r11=0 /* reserved */\n"
-" mov r16=0xffff /* implemented PMC */\n"
-" mov r17=0xffff /* implemented PMD */\n"
-" add r18=8,r29 /* second index */\n"
-" ;;\n"
-" st8 [r29]=r16,16 /* store implemented PMC */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-" st8 [r29]=r0,16 /* store implemented PMC */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-" st8 [r29]=r17,16 /* store implemented PMD */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" mov r16=0xf0 /* cycles count capable PMC */\n"
-" ;;\n"
-" st8 [r29]=r0,16 /* store implemented PMC */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" mov r17=0x10 /* retired bundles capable PMC */\n"
-" ;;\n"
-" st8 [r29]=r16,16 /* store cycles capable */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-" st8 [r29]=r0,16 /* store implemented PMC */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-" st8 [r29]=r17,16 /* store retired bundle capable */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-" st8 [r29]=r0,16 /* store implemented PMC */\n"
-" st8 [r18]=r0,16 /* clear remaining bits */\n"
-" ;;\n"
-"1: br.cond.sptk.few rp\n"
-"stacked:\n"
-" br.ret.sptk.few rp\n"
-" .endp pal_emulator_static\n");
-
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
@@ -268,14 +145,14 @@ efi_unimplemented (void)
return EFI_UNSUPPORTED;
}
-static long
+static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
- register long r9 asm ("r9") = 0;
- register long r10 asm ("r10") = 0;
- register long r11 asm ("r11") = 0;
+ long r9 = 0;
+ long r10 = 0;
+ long r11 = 0;
long status;
/*
@@ -357,8 +234,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
} else {
status = -1;
}
- asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
- return status;
+ return ((struct sal_ret_values) {status, r9, r10, r11});
}
@@ -427,7 +303,7 @@ sys_fw_init (const char *args, int arglen)
efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0");
efi_systab->fw_revision = 1;
- efi_systab->runtime = __pa(efi_runtime);
+ efi_systab->runtime = (void *) __pa(efi_runtime);
efi_systab->nr_tables = 1;
efi_systab->tables = __pa(efi_tables);
diff --git a/arch/ia64/hp/sim/boot/ssc.h b/arch/ia64/hp/sim/boot/ssc.h
new file mode 100644
index 000000000000..3b94c03e43a9
--- /dev/null
+++ b/arch/ia64/hp/sim/boot/ssc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ */
+#ifndef ssc_h
+#define ssc_h
+
+/* Simulator system calls: */
+
+#define SSC_CONSOLE_INIT 20
+#define SSC_GETCHAR 21
+#define SSC_PUTCHAR 31
+#define SSC_OPEN 50
+#define SSC_CLOSE 51
+#define SSC_READ 52
+#define SSC_WRITE 53
+#define SSC_GET_COMPLETION 54
+#define SSC_WAIT_COMPLETION 55
+#define SSC_CONNECT_INTERRUPT 58
+#define SSC_GENERATE_INTERRUPT 59
+#define SSC_SET_PERIODIC_INTERRUPT 60
+#define SSC_GET_RTC 65
+#define SSC_EXIT 66
+#define SSC_LOAD_SYMBOLS 69
+#define SSC_GET_TOD 74
+
+#define SSC_GET_ARGS 75
+
+/*
+ * Simulator system call.
+ */
+extern long ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
+#endif /* ssc_h */
diff --git a/arch/ia64/hp/sim/hpsim.S b/arch/ia64/hp/sim/hpsim.S
new file mode 100644
index 000000000000..ff16e8a857d1
--- /dev/null
+++ b/arch/ia64/hp/sim/hpsim.S
@@ -0,0 +1,10 @@
+#include <asm/asmmacro.h>
+
+/*
+ * Simulator system call.
+ */
+GLOBAL_ENTRY(ia64_ssc)
+ mov r15=r36
+ break 0x80001
+ br.ret.sptk.many rp
+END(ia64_ssc)
diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c
index 64ff3a9c6e3c..694fc86bfbd5 100644
--- a/arch/ia64/hp/sim/hpsim_setup.c
+++ b/arch/ia64/hp/sim/hpsim_setup.c
@@ -25,19 +25,6 @@
#include "hpsim_ssc.h"
-/*
- * Simulator system call.
- */
-asm (".text\n"
- ".align 32\n"
- ".global ia64_ssc\n"
- ".proc ia64_ssc\n"
- "ia64_ssc:\n"
- "mov r15=r36\n"
- "break 0x80001\n"
- "br.ret.sptk.many rp\n"
- ".endp\n");
-
void
ia64_ssc_connect_irq (long intr, long irq)
{
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 19de74e1edae..aba19859d2f7 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/compat.h>
+#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
@@ -41,6 +42,11 @@
#define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173
+#ifdef ASM_SUPPORTED
+/*
+ * Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
+ * restore_ia32_fpstate_live() can be sure the live register contain user-level state.
+ */
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
@@ -50,6 +56,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
+#endif
struct sigframe_ia32
{
@@ -198,30 +205,6 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* All other fields unused...
*/
-#define __ldfe(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- __asm__ __volatile__ ("ldfe %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
-})
-
-#define __ldf8(regnum, x) \
-({ \
- register double __f__ asm ("f"#regnum); \
- __asm__ __volatile__ ("ldf8 %0=[%1] ;;" :"=f"(__f__): "r"(x)); \
-})
-
-#define __stfe(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- __asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
-#define __stf8(x, regnum) \
-({ \
- register double __f__ asm ("f"#regnum); \
- __asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
-})
-
static int
save_ia32_fpstate_live (struct _fpstate_ia32 *save)
{
@@ -238,18 +221,19 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT;
- /* Readin fsr, fcr, fir, fdr and copy onto fpstate */
- asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr));
- asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr));
- asm volatile ( "mov %0=ar.fir;" : "=r"(fir));
- asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr));
+ /* Read in fsr, fcr, fir, fdr and copy onto fpstate */
+ fsr = ia64_getreg(_IA64_REG_AR_FSR);
+ fcr = ia64_getreg(_IA64_REG_AR_FCR);
+ fir = ia64_getreg(_IA64_REG_AR_FIR);
+ fdr = ia64_getreg(_IA64_REG_AR_FDR);
+
/*
* We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction.
*/
new_fsr = fsr & ~0x80ff;
- asm volatile ( "mov ar.fsr=%0;" :: "r"(new_fsr));
+ ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
__put_user(fcr & 0xffff, &save->cw);
__put_user(fsr & 0xffff, &save->sw);
@@ -286,45 +270,45 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stfe(fpregp, 12);
+ ia64_stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stfe(fpregp, 13);
+ ia64_stfe(fpregp, 13);
copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stfe(fpregp, 14);
+ ia64_stfe(fpregp, 14);
copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stfe(fpregp, 15);
+ ia64_stfe(fpregp, 15);
copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stf8(&num128[0], 16);
- __stf8(&num128[1], 17);
+ ia64_stf8(&num128[0], 16);
+ ia64_stf8(&num128[1], 17);
copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 18);
- __stf8(&num128[1], 19);
+ ia64_stf8(&num128[0], 18);
+ ia64_stf8(&num128[1], 19);
copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 20);
- __stf8(&num128[1], 21);
+ ia64_stf8(&num128[0], 20);
+ ia64_stf8(&num128[1], 21);
copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 22);
- __stf8(&num128[1], 23);
+ ia64_stf8(&num128[0], 22);
+ ia64_stf8(&num128[1], 23);
copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 24);
- __stf8(&num128[1], 25);
+ ia64_stf8(&num128[0], 24);
+ ia64_stf8(&num128[1], 25);
copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 26);
- __stf8(&num128[1], 27);
+ ia64_stf8(&num128[0], 26);
+ ia64_stf8(&num128[1], 27);
copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 28);
- __stf8(&num128[1], 29);
+ ia64_stf8(&num128[0], 28);
+ ia64_stf8(&num128[1], 29);
copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
- __stf8(&num128[0], 30);
- __stf8(&num128[1], 31);
+ ia64_stf8(&num128[0], 30);
+ ia64_stf8(&num128[1], 31);
copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
return 0;
}
@@ -354,10 +338,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
* should remain same while writing.
* So, we do a read, change specific fields and write.
*/
- asm volatile ( "mov %0=ar.fsr;" : "=r"(fsr));
- asm volatile ( "mov %0=ar.fcr;" : "=r"(fcr));
- asm volatile ( "mov %0=ar.fir;" : "=r"(fir));
- asm volatile ( "mov %0=ar.fdr;" : "=r"(fdr));
+ fsr = ia64_getreg(_IA64_REG_AR_FSR);
+ fcr = ia64_getreg(_IA64_REG_AR_FCR);
+ fir = ia64_getreg(_IA64_REG_AR_FIR);
+ fdr = ia64_getreg(_IA64_REG_AR_FDR);
__get_user(mxcsr, (unsigned int *)&save->mxcsr);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
@@ -391,10 +375,10 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffff)) | num64;
- asm volatile ( "mov ar.fsr=%0;" :: "r"(fsr));
- asm volatile ( "mov ar.fcr=%0;" :: "r"(fcr));
- asm volatile ( "mov ar.fir=%0;" :: "r"(fir));
- asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
+ ia64_setreg(_IA64_REG_AR_FSR, fsr);
+ ia64_setreg(_IA64_REG_AR_FCR, fcr);
+ ia64_setreg(_IA64_REG_AR_FIR, fir);
+ ia64_setreg(_IA64_REG_AR_FDR, fdr);
/*
* restore f8..f11 onto pt_regs
@@ -420,45 +404,45 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(12, fpregp);
+ ia64_ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(13, fpregp);
+ ia64_ldfe(13, fpregp);
copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(14, fpregp);
+ ia64_ldfe(14, fpregp);
copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(15, fpregp);
+ ia64_ldfe(15, fpregp);
copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
- __ldf8(16, &num128[0]);
- __ldf8(17, &num128[1]);
+ ia64_ldf8(16, &num128[0]);
+ ia64_ldf8(17, &num128[1]);
copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
- __ldf8(18, &num128[0]);
- __ldf8(19, &num128[1]);
+ ia64_ldf8(18, &num128[0]);
+ ia64_ldf8(19, &num128[1]);
copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
- __ldf8(20, &num128[0]);
- __ldf8(21, &num128[1]);
+ ia64_ldf8(20, &num128[0]);
+ ia64_ldf8(21, &num128[1]);
copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
- __ldf8(22, &num128[0]);
- __ldf8(23, &num128[1]);
+ ia64_ldf8(22, &num128[0]);
+ ia64_ldf8(23, &num128[1]);
copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
- __ldf8(24, &num128[0]);
- __ldf8(25, &num128[1]);
+ ia64_ldf8(24, &num128[0]);
+ ia64_ldf8(25, &num128[1]);
copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
- __ldf8(26, &num128[0]);
- __ldf8(27, &num128[1]);
+ ia64_ldf8(26, &num128[0]);
+ ia64_ldf8(27, &num128[1]);
copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
- __ldf8(28, &num128[0]);
- __ldf8(29, &num128[1]);
+ ia64_ldf8(28, &num128[0]);
+ ia64_ldf8(29, &num128[1]);
copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
- __ldf8(30, &num128[0]);
- __ldf8(31, &num128[1]);
+ ia64_ldf8(30, &num128[0]);
+ ia64_ldf8(31, &num128[1]);
return 0;
}
@@ -705,7 +689,7 @@ setup_sigcontext_ia32 (struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate
/*
* `eflags' is in an ar register for this context
*/
- asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
+ flag = ia64_getreg(_IA64_REG_AR_EFLAG);
err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
@@ -790,10 +774,10 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
* IA32 process's context.
*/
err |= __get_user(tmpflags, &sc->eflags);
- asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
+ flag = ia64_getreg(_IA64_REG_AR_EFLAG);
flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5);
- asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag));
+ ia64_setreg(_IA64_REG_AR_EFLAG, flag);
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
}
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index df53dcdb72bb..4d373270fe3b 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/sched.h>
+#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -68,19 +69,11 @@ ia32_load_segment_descriptors (struct task_struct *task)
void
ia32_save_state (struct task_struct *t)
{
- unsigned long eflag, fsr, fcr, fir, fdr;
-
- asm ("mov %0=ar.eflag;"
- "mov %1=ar.fsr;"
- "mov %2=ar.fcr;"
- "mov %3=ar.fir;"
- "mov %4=ar.fdr;"
- : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
- t->thread.eflag = eflag;
- t->thread.fsr = fsr;
- t->thread.fcr = fcr;
- t->thread.fir = fir;
- t->thread.fdr = fdr;
+ t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
+ t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
+ t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
+ t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
+ t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
@@ -99,12 +92,11 @@ ia32_load_state (struct task_struct *t)
fdr = t->thread.fdr;
tssd = load_desc(_TSS(nr)); /* TSSD */
- asm volatile ("mov ar.eflag=%0;"
- "mov ar.fsr=%1;"
- "mov ar.fcr=%2;"
- "mov ar.fir=%3;"
- "mov ar.fdr=%4;"
- :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
+ ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
+ ia64_setreg(_IA64_REG_AR_FSR, fsr);
+ ia64_setreg(_IA64_REG_AR_FCR, fcr);
+ ia64_setreg(_IA64_REG_AR_FIR, fir);
+ ia64_setreg(_IA64_REG_AR_FDR, fdr);
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
@@ -178,7 +170,7 @@ void
ia32_cpu_init (void)
{
/* initialize global ia32 state - CR0 and CR4 */
- asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
+ ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
}
static int __init
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
index fbdb10a580dc..e486042672f1 100644
--- a/arch/ia64/ia32/ia32_traps.c
+++ b/arch/ia64/ia32/ia32_traps.c
@@ -14,6 +14,7 @@
#include "ia32priv.h"
+#include <asm/intrinsics.h>
#include <asm/ptrace.h>
int
@@ -93,9 +94,8 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{
unsigned long fsr, fcr;
- asm ("mov %0=ar.fsr;"
- "mov %1=ar.fcr;"
- : "=r"(fsr), "=r"(fcr));
+ fsr = ia64_getreg(_IA64_REG_AR_FSR);
+ fcr = ia64_getreg(_IA64_REG_AR_FCR);
siginfo.si_signo = SIGFPE;
/*
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index e830969c84ea..50620e5c2d4d 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -445,17 +445,19 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
extern void ia32_load_segment_descriptors (struct task_struct *task);
-#define ia32f2ia64f(dst,src) \
- do { \
- register double f6 asm ("f6"); \
- asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
- } while(0)
-
-#define ia64f2ia32f(dst,src) \
- do { \
- register double f6 asm ("f6"); \
- asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
- } while(0)
+#define ia32f2ia64f(dst,src) \
+do { \
+ ia64_ldfe(6,src); \
+ ia64_stop(); \
+ ia64_stf_spill(dst, 6); \
+} while(0)
+
+#define ia64f2ia32f(dst,src) \
+do { \
+ ia64_ldf_fill(6, src); \
+ ia64_stop(); \
+ ia64_stfe(dst, 6); \
+} while(0)
struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax;
@@ -468,11 +470,8 @@ struct user_regs_struct32 {
};
/* Prototypes for use in elfcore32.h */
-int save_ia32_fpstate (struct task_struct *tsk,
- struct ia32_user_i387_struct *save);
-
-int save_ia32_fpxstate (struct task_struct *tsk,
- struct ia32_user_fxsr_struct *save);
+extern int save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save);
+extern int save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save);
#endif /* !CONFIG_IA32_SUPPORT */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 5cd378191619..10727d0026ff 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -51,9 +51,10 @@
#include <linux/compat.h>
#include <linux/vfs.h>
+#include <asm/intrinsics.h>
+#include <asm/semaphore.h>
#include <asm/types.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "ia32priv.h"
@@ -2192,7 +2193,7 @@ sys32_iopl (int level)
if (level != 3)
return(-EINVAL);
/* Trying to gain more privileges? */
- asm volatile ("mov %0=ar.eflag ;;" : "=r"(old));
+ old = ia64_getreg(_IA64_REG_AR_EFLAG);
if ((unsigned int) level > ((old >> 12) & 3)) {
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -2216,7 +2217,7 @@ sys32_iopl (int level)
if (addr >= 0) {
old = (old & ~0x3000) | (level << 12);
- asm volatile ("mov ar.eflag=%0;;" :: "r"(old));
+ ia64_setreg(_IA64_REG_AR_EFLAG, old);
}
fput(file);
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 44461263aadd..fe9556272d98 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -471,6 +471,18 @@ GLOBAL_ENTRY(__ia64_syscall)
br.ret.sptk.many rp
END(__ia64_syscall)
+GLOBAL_ENTRY(execve)
+ mov r15=__NR_execve // put syscall number in place
+ break __BREAK_SYSCALL
+ br.ret.sptk.many rp
+END(execve)
+
+GLOBAL_ENTRY(clone)
+ mov r15=__NR_clone // put syscall number in place
+ break __BREAK_SYSCALL
+ br.ret.sptk.many rp
+END(clone)
+
/*
* We invoke syscall_trace through this intermediate function to
* ensure that the syscall input arguments are not clobbered. We
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index 05b2c6b580e2..ab79b199aadf 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -39,4 +39,4 @@ static union {
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
-asm (".global init_task; init_task = init_task_mem");
+extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 261b3d3bddcb..9e30b08ba40e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -495,7 +495,7 @@ iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger)
{
int vector;
- unsigned int dest = (ia64_get_lid() >> 16) & 0xffff;
+ unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = gsi_to_vector(gsi);
if (vector < 0)
@@ -572,7 +572,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long trigger)
{
int vector;
- unsigned int dest = (ia64_get_lid() >> 16) & 0xffff;
+ unsigned int dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
vector = isa_irq_to_vector(isa_irq);
@@ -666,11 +666,11 @@ iosapic_enable_intr (unsigned int vector)
* Direct the interrupt vector to the current cpu, platform redirection
* will distribute them.
*/
- dest = (ia64_get_lid() >> 16) & 0xffff;
+ dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
}
#else
/* direct the interrupt vector to the running cpu id */
- dest = (ia64_get_lid() >> 16) & 0xffff;
+ dest = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif
set_rte(vector, dest);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index fd56c30ed308..82d4c9891a07 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -30,6 +30,7 @@
#include <asm/bitops.h>
#include <asm/delay.h>
+#include <asm/intrinsics.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/machvec.h>
@@ -93,8 +94,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* because the register and the memory stack are not
* switched atomically.
*/
- asm ("mov %0=ar.bsp" : "=r"(bsp));
- asm ("mov %0=sp" : "=r"(sp));
+ bsp = ia64_getreg(_IA64_REG_AR_BSP);
+ sp = ia64_getreg(_IA64_REG_AR_SP);
if ((sp - bsp) < 1024) {
static unsigned char count;
@@ -117,11 +118,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows).
*/
- saved_tpr = ia64_get_tpr();
+ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (!IS_RESCHEDULE(vector)) {
- ia64_set_tpr(vector);
+ ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
do_IRQ(local_vector_to_irq(vector), regs);
@@ -130,7 +131,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* Disable interrupts and send EOI:
*/
local_irq_disable();
- ia64_set_tpr(saved_tpr);
+ ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
}
ia64_eoi();
vector = ia64_get_ivr();
@@ -193,7 +194,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
#ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu);
#else
- phys_cpu_id = (ia64_get_lid() >> 16) & 0xffff;
+ phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif
/*
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 49bf7bfb8a85..3626ec5ef210 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -505,14 +505,14 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_regval = 0;
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR;
- ia64_set_cmcv(cmcv.cmcv_regval);
+ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d corrected "
"machine check vector %#x setup and enabled.\n",
smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("ia64_mca_platform_init: CPU %d CMCV = %#016lx\n",
- smp_processor_id(), ia64_get_cmcv());
+ smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
}
/*
@@ -531,11 +531,11 @@ void
ia64_mca_cmc_vector_disable (void *dummy)
{
cmcv_reg_t cmcv;
-
- cmcv = (cmcv_reg_t)ia64_get_cmcv();
+
+ cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
- ia64_set_cmcv(cmcv.cmcv_regval);
+ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.\n",
@@ -558,11 +558,11 @@ void
ia64_mca_cmc_vector_enable (void *dummy)
{
cmcv_reg_t cmcv;
-
- cmcv = (cmcv_reg_t)ia64_get_cmcv();
+
+ cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
- ia64_set_cmcv(cmcv.cmcv_regval);
+ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval)
IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.\n",
@@ -727,10 +727,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
- ia64_tpa(ia64_get_gp()),
+ ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler,
- ia64_tpa(ia64_get_gp()),
+ ia64_tpa(ia64_getreg(_IA64_REG_GP)),
ia64_mc_info.imi_slave_init_handler_size)))
{
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
@@ -816,16 +816,16 @@ ia64_mca_wakeup_ipi_wait(void)
do {
switch(irr_num) {
case 0:
- irr = ia64_get_irr0();
+ irr = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
- irr = ia64_get_irr1();
+ irr = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
- irr = ia64_get_irr2();
+ irr = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
- irr = ia64_get_irr3();
+ irr = ia64_getreg(_IA64_REG_CR_IRR3);
break;
}
} while (!(irr & (1 << irr_bit))) ;
@@ -1146,7 +1146,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs);
for (++cpuid ; !cpu_online(cpuid) && cpuid < NR_CPUS ; cpuid++);
-
+
if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
@@ -1176,7 +1176,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
start_count = -1;
}
-
+
return IRQ_HANDLED;
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 03a49b53ac4d..76c39b7d511a 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -39,6 +39,7 @@
#include <asm/bitops.h>
#include <asm/errno.h>
+#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/perfmon.h>
#include <asm/processor.h>
@@ -255,6 +256,8 @@ typedef struct {
/*
* 64-bit software counter structure
+ *
+ * the next_reset_type is applied to the next call to pfm_reset_regs()
*/
typedef struct {
unsigned long val; /* virtual 64bit counter value */
@@ -266,7 +269,7 @@ typedef struct {
unsigned long seed; /* seed for random-number generator */
unsigned long mask; /* mask for random-number generator */
unsigned int flags; /* notify/do not notify */
- unsigned int reserved; /* for future use */
+ int next_reset_type;/* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
unsigned long eventid; /* overflow event identifier */
} pfm_counter_t;
@@ -556,7 +559,6 @@ static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close
};
-
#define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
@@ -647,7 +649,6 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
-
/* forward declaration */
static struct file_operations pfm_file_ops;
@@ -671,39 +672,45 @@ static int pfm_end_notify_user(pfm_context_t *ctx);
static inline void
pfm_clear_psr_pp(void)
{
- __asm__ __volatile__ ("rsm psr.pp;; srlz.i;;"::: "memory");
+ ia64_rsm(IA64_PSR_PP);
+ ia64_srlz_i();
}
static inline void
pfm_set_psr_pp(void)
{
- __asm__ __volatile__ ("ssm psr.pp;; srlz.i;;"::: "memory");
+ ia64_ssm(IA64_PSR_PP);
+ ia64_srlz_i();
}
static inline void
pfm_clear_psr_up(void)
{
- __asm__ __volatile__ ("rsm psr.up;; srlz.i;;"::: "memory");
+ ia64_rsm(IA64_PSR_UP);
+ ia64_srlz_i();
}
static inline void
pfm_set_psr_up(void)
{
- __asm__ __volatile__ ("ssm psr.up;; srlz.i;;"::: "memory");
+ ia64_ssm(IA64_PSR_UP);
+ ia64_srlz_i();
}
static inline unsigned long
pfm_get_psr(void)
{
unsigned long tmp;
- __asm__ __volatile__ ("mov %0=psr;;": "=r"(tmp) :: "memory");
+ tmp = ia64_getreg(_IA64_REG_PSR);
+ ia64_srlz_i();
return tmp;
}
static inline void
pfm_set_psr_l(unsigned long val)
{
- __asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(val): "memory");
+ ia64_setreg(_IA64_REG_PSR_L, val);
+ ia64_srlz_i();
}
static inline void
@@ -970,7 +977,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */
- ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp();
} else {
pfm_clear_psr_up();
@@ -1017,7 +1024,7 @@ pfm_restore_monitoring(struct task_struct *task)
*/
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */
- ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
}
pfm_set_psr_l(psr);
@@ -1525,7 +1532,7 @@ pfm_lseek(struct file *file, loff_t offset, int whence)
}
static ssize_t
-pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
+pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
pfm_context_t *ctx;
pfm_msg_t *msg;
@@ -1622,18 +1629,6 @@ abort:
}
static ssize_t
-pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
-{
- int oldvar, ret;
-
- oldvar = pfm_debug_var;
- pfm_debug_var = pfm_sysctl.debug_pfm_read;
- ret = pfm_do_read(filp, buf, size, ppos);
- pfm_debug_var = oldvar;
- return ret;
-}
-
-static ssize_t
pfm_write(struct file *file, const char *ubuf,
size_t size, loff_t *ppos)
{
@@ -1773,7 +1768,7 @@ pfm_syswide_force_stop(void *info)
/*
* Update local PMU
*/
- ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
* update local cpuinfo
@@ -2752,20 +2747,18 @@ pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag));
- if (flag == PFM_PMD_NO_RESET) return;
-
/*
* now restore reset value on sampling overflowed counters
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
- if (mask & 0x1) {
- ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
- }
+ if ((mask & 0x1UL) == 0UL) continue;
+
+ ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
+ reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
+
+ DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
}
/*
@@ -2804,15 +2797,15 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
- if (mask & 0x1) {
- val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
- reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
- DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
- is_long_reset ? "long" : "short", i, val));
+ if ((mask & 0x1UL) == 0UL) continue;
- pfm_write_soft_counter(ctx, i, val);
- }
+ val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
+ reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
+
+ DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
+
+ pfm_write_soft_counter(ctx, i, val);
}
/*
@@ -2854,7 +2847,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (is_loaded) {
thread = &ctx->ctx_task->thread;
- can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0;
+ can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
@@ -3562,51 +3555,49 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
struct task_struct *task;
pfm_buffer_fmt_t *fmt;
pfm_ovfl_ctrl_t rst_ctrl;
- int is_loaded;
+ int state, is_system;
int ret = 0;
+ state = ctx->ctx_state;
fmt = ctx->ctx_buf_fmt;
- is_loaded = CTX_IS_LOADED(ctx);
-
- if (is_loaded && CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) goto proceed;
+ is_system = ctx->ctx_fl_system;
+ task = PFM_CTX_TASK(ctx);
- /*
- * restarting a terminated context is a nop
- */
- if (unlikely(CTX_IS_TERMINATED(ctx))) {
- DPRINT(("context is terminated, nothing to do\n"));
- return 0;
+ switch(state) {
+ case PFM_CTX_MASKED:
+ break;
+ case PFM_CTX_LOADED:
+ if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
+ /* fall through */
+ case PFM_CTX_UNLOADED:
+ case PFM_CTX_ZOMBIE:
+ DPRINT(("invalid state=%d\n", state));
+ return -EBUSY;
+ case PFM_CTX_TERMINATED:
+ DPRINT(("context is terminated, nothing to do\n"));
+ return 0;
+ default:
+ DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
+ return -EINVAL;
}
-
- /*
- * LOADED, UNLOADED, ZOMBIE
- */
- if (CTX_IS_MASKED(ctx) == 0) return -EBUSY;
-
-proceed:
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
- if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY;
}
- task = PFM_CTX_TASK(ctx);
-
/* sanity check */
if (unlikely(task == NULL)) {
printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
return -EINVAL;
}
- /*
- * this test is always true in system wide mode
- */
- if (task == current) {
+ if (task == current || is_system) {
fmt = ctx->ctx_buf_fmt;
@@ -3618,25 +3609,23 @@ proceed:
prefetch(ctx->ctx_smpl_hdr);
- rst_ctrl.stop_monitoring = 0;
- rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
+ rst_ctrl.bits.mask_monitoring = 0;
+ rst_ctrl.bits.reset_ovfl_pmds = 1;
- if (is_loaded)
+ if (state == PFM_CTX_LOADED)
ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
else
ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
-
-
} else {
- rst_ctrl.stop_monitoring = 0;
- rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
+ rst_ctrl.bits.mask_monitoring = 0;
+ rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
- if (rst_ctrl.reset_pmds)
- pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, rst_ctrl.reset_pmds);
+ if (rst_ctrl.bits.reset_ovfl_pmds)
+ pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
- if (rst_ctrl.stop_monitoring == 0) {
+ if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring for [%d]\n", task->pid));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task);
@@ -3679,7 +3668,6 @@ proceed:
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
-
PFM_SET_WORK_PENDING(task, 1);
pfm_set_task_notify(task);
@@ -3700,10 +3688,9 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_debug_var = pfm_sysctl.debug;
- printk(KERN_ERR "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
-
+ printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
- if (m==0) {
+ if (m == 0) {
memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
}
@@ -3711,7 +3698,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return 0;
}
-
static int
pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
@@ -3919,6 +3905,7 @@ static int
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct pt_regs *tregs;
+ struct task_struct *task = PFM_CTX_TASK(ctx);
if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL;
@@ -3944,7 +3931,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*
* disable dcr pp
*/
- ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
@@ -3968,7 +3955,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* per-task mode
*/
- if (ctx->ctx_task == current) {
+ if (task == current) {
/* stop monitoring at kernel level */
pfm_clear_psr_up();
@@ -3977,7 +3964,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
ia64_psr(regs)->up = 0;
} else {
- tregs = ia64_task_regs(ctx->ctx_task);
+ tregs = ia64_task_regs(task);
/*
* stop monitoring at the user level
@@ -3988,7 +3975,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* monitoring disabled in kernel at next reschedule
*/
ctx->ctx_saved_psr_up = 0;
- printk("pfm_stop: current [%d] task=[%d]\n", current->pid, ctx->ctx_task->pid);
+ DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid));
}
return 0;
}
@@ -4034,7 +4021,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_set_psr_pp();
/* enable dcr pp */
- ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
return 0;
@@ -4100,6 +4087,28 @@ abort_mission:
}
static int
+pfm_check_task_exist(pfm_context_t *ctx)
+{
+ struct task_struct *g, *t;
+ int ret = -ESRCH;
+
+ read_lock(&tasklist_lock);
+
+ do_each_thread (g, t) {
+ if (t->thread.pfm_context == ctx) {
+ ret = 0;
+ break;
+ }
+ } while_each_thread (g, t);
+
+ read_unlock(&tasklist_lock);
+
+ DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
+
+ return ret;
+}
+
+static int
pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
@@ -4199,7 +4208,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
current->pid,
thread->pfm_context, ctx));
- old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
+ old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
goto error_unres;
@@ -4309,8 +4318,17 @@ error:
/*
* release task, there is now a link with the context
*/
- if (ctx->ctx_fl_system == 0 && task != current) pfm_put_task(task);
+ if (ctx->ctx_fl_system == 0 && task != current) {
+ pfm_put_task(task);
+ if (ret == 0) {
+ ret = pfm_check_task_exist(ctx);
+ if (ret) {
+ CTX_UNLOADED(ctx);
+ ctx->ctx_task = NULL;
+ }
+ }
+ }
return ret;
}
@@ -4327,7 +4345,7 @@ static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
static int
pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- struct task_struct *task = ctx->ctx_task;
+ struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
@@ -4409,8 +4427,8 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* cancel user level control
*/
ia64_psr(regs)->sp = 1;
- DPRINT(("setting psr.sp for [%d]\n", task->pid));
+ DPRINT(("setting psr.sp for [%d]\n", task->pid));
}
/*
* save PMDs to context
@@ -4483,7 +4501,7 @@ pfm_exit_thread(struct task_struct *task)
pfm_context_t *ctx;
unsigned long flags;
struct pt_regs *regs = ia64_task_regs(task);
- int ret;
+ int ret, state;
int free_ok = 0;
ctx = PFM_GET_CTX(task);
@@ -4492,38 +4510,42 @@ pfm_exit_thread(struct task_struct *task)
DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
- /*
- * come here only if attached
- */
- if (unlikely(CTX_IS_UNLOADED(ctx))) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
- goto skip_all;
- }
-
- if (CTX_IS_LOADED(ctx) || CTX_IS_MASKED(ctx)) {
-
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
- }
- CTX_TERMINATED(ctx);
- DPRINT(("ctx terminated by [%d]\n", task->pid));
-
- pfm_end_notify_user(ctx);
+ state = ctx->ctx_state;
+ switch(state) {
+ case PFM_CTX_UNLOADED:
+ /*
+ * come here only if attached
+ */
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
+ break;
+ case PFM_CTX_LOADED:
+ case PFM_CTX_MASKED:
+ ret = pfm_context_unload(ctx, NULL, 0, regs);
+ if (ret) {
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
+ }
+ CTX_TERMINATED(ctx);
+ DPRINT(("ctx terminated by [%d]\n", task->pid));
- } else if (CTX_IS_ZOMBIE(ctx)) {
- pfm_clear_psr_up();
+ pfm_end_notify_user(ctx);
+ break;
+ case PFM_CTX_ZOMBIE:
+ pfm_clear_psr_up();
- BUG_ON(ctx->ctx_smpl_hdr);
+ BUG_ON(ctx->ctx_smpl_hdr);
- pfm_force_cleanup(ctx, regs);
+ pfm_force_cleanup(ctx, regs);
- free_ok = 1;
+ free_ok = 1;
+ break;
+ default:
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
+ break;
}
{ u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
+ BUG_ON(GET_PMU_OWNER());
}
-skip_all:
UNPROTECT_CTX(ctx, flags);
/*
@@ -4653,7 +4675,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
/*
* reject any call if perfmon was disabled at initialization time
- */
+ mask*/
if (PFM_IS_DISABLED()) return -ENOSYS;
if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) {
@@ -4773,6 +4795,8 @@ abort_locked:
error_args:
if (args_k) kfree(args_k);
+ DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
+
return ret;
}
@@ -4789,22 +4813,22 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg
*/
if (CTX_HAS_SMPL(ctx)) {
- rst_ctrl.stop_monitoring = 1;
- rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
+ rst_ctrl.bits.mask_monitoring = 0;
+ rst_ctrl.bits.reset_ovfl_pmds = 1;
/* XXX: check return value */
if (fmt->fmt_restart)
ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else {
- rst_ctrl.stop_monitoring = 0;
- rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
+ rst_ctrl.bits.mask_monitoring = 0;
+ rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
- if (rst_ctrl.reset_pmds != PFM_PMD_NO_RESET)
- pfm_reset_regs(ctx, &ovfl_regs, rst_ctrl.reset_pmds);
-
- if (rst_ctrl.stop_monitoring == 0) {
+ if (rst_ctrl.bits.reset_ovfl_pmds) {
+ pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
+ }
+ if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring\n"));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current);
} else {
@@ -4974,11 +4998,12 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
- msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
msg->pfm_ovfl_msg.msg_active_set = 0;
msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
- msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
-
+ msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
+ msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
+ msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
+ msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
}
DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n",
@@ -5024,9 +5049,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
pfm_ovfl_arg_t ovfl_arg;
unsigned long mask;
unsigned long old_val;
- unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL;
+ unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL;
+ unsigned long tstamp;
pfm_ovfl_ctrl_t ovfl_ctrl;
- unsigned int i, j, has_smpl, first_pmd = ~0U;
+ unsigned int i, has_smpl;
int must_notify = 0;
if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring;
@@ -5036,9 +5062,11 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/
if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
+ tstamp = ia64_get_itc();
+
mask = pmc0 >> PMU_FIRST_COUNTER;
- DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s"
+ DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
"used_pmds=0x%lx reload_pmcs=0x%lx\n",
pmc0,
task ? task->pid: -1,
@@ -5059,7 +5087,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if ((mask & 0x1) == 0) continue;
DPRINT_ovfl(("pmd[%d] overflowed hw_pmd=0x%lx ctx_pmd=0x%lx\n",
- i, ia64_get_pmd(i), ctx->ctx_pmds[i].val));
+ i, ia64_get_pmd(i), ctx->ctx_pmds[i].val));
/*
* Note that the pmd is not necessarily 0 at this point as qualified events
@@ -5074,91 +5102,132 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* check for overflow condition
*/
if (likely(old_val > ctx->ctx_pmds[i].val)) {
-
ovfl_pmds |= 1UL << i;
-
- /*
- * keep track of pmds of interest for samples
- */
- if (has_smpl) {
- if (first_pmd == ~0U) first_pmd = i;
- smpl_pmds |= ctx->ctx_pmds[i].smpl_pmds[0];
- }
-
if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
}
- DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx first_pmd=%u smpl_pmds=0x%lx\n",
- i, ctx->ctx_pmds[i].val, old_val,
- ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, first_pmd, smpl_pmds));
+ DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx "
+ "ovfl_notify=0x%lx\n",
+ i, ctx->ctx_pmds[i].val, old_val,
+ ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify));
}
- ovfl_ctrl.notify_user = ovfl_notify ? 1 : 0;
- ovfl_ctrl.reset_pmds = ovfl_pmds && ovfl_notify == 0UL ? 1 : 0;
- ovfl_ctrl.block = ovfl_notify ? 1 : 0;
- ovfl_ctrl.stop_monitoring = ovfl_notify ? 1 : 0;
+ /*
+ * there was no 64-bit overflow, nothing else to do
+ */
+ if (ovfl_pmds == 0UL) return;
+
+ /*
+ * reset all control bits
+ */
+ ovfl_ctrl.val = 0;
/*
- * when a overflow is detected, check for sampling buffer, if present, invoke
- * record() callback.
+ * if a sampling format module exists, then we "cache" the overflow by
+ * calling the module's handler() routine.
*/
- if (ovfl_pmds && has_smpl) {
- unsigned long start_cycles;
+ if (has_smpl) {
+ unsigned long start_cycles, end_cycles;
+ unsigned long pmd_mask, smpl_pmds;
+ int j, k, ret = 0;
int this_cpu = smp_processor_id();
- ovfl_arg.ovfl_pmds[0] = ovfl_pmds;
- ovfl_arg.ovfl_notify[0] = ovfl_notify;
- ovfl_arg.ovfl_ctrl = ovfl_ctrl;
- ovfl_arg.smpl_pmds[0] = smpl_pmds;
+ pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
prefetch(ctx->ctx_smpl_hdr);
- ovfl_arg.pmd_value = ctx->ctx_pmds[first_pmd].val;
- ovfl_arg.pmd_last_reset = ctx->ctx_pmds[first_pmd].lval;
- ovfl_arg.pmd_eventid = ctx->ctx_pmds[first_pmd].eventid;
+ for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
- /*
- * copy values of pmds of interest. Sampling format may copy them
- * into sampling buffer.
- */
- if (smpl_pmds) {
- for(i=0, j=0; smpl_pmds; i++, smpl_pmds >>=1) {
- if ((smpl_pmds & 0x1) == 0) continue;
- ovfl_arg.smpl_pmds_values[j++] = PMD_IS_COUNTING(i) ? pfm_read_soft_counter(ctx, i) : ia64_get_pmd(i);
+ mask = 1UL << i;
+
+ if ((pmd_mask & 0x1) == 0) continue;
+
+ ovfl_arg.ovfl_pmd = (unsigned char )i;
+ ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0;
+ ovfl_arg.active_set = 0;
+ ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */
+ ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
+
+ ovfl_arg.pmd_value = ctx->ctx_pmds[i].val;
+ ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval;
+ ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid;
+
+ /*
+ * copy values of pmds of interest. Sampling format may copy them
+ * into sampling buffer.
+ */
+ if (smpl_pmds) {
+ for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
+ if ((smpl_pmds & 0x1) == 0) continue;
+ ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
+ }
}
- }
- pfm_stats[this_cpu].pfm_smpl_handler_calls++;
- start_cycles = ia64_get_itc();
+ pfm_stats[this_cpu].pfm_smpl_handler_calls++;
- /*
- * call custom buffer format record (handler) routine
- */
- (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs);
+ start_cycles = ia64_get_itc();
+
+ /*
+ * call custom buffer format record (handler) routine
+ */
+ ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp);
- pfm_stats[this_cpu].pfm_smpl_handler_cycles += ia64_get_itc() - start_cycles;
+ end_cycles = ia64_get_itc();
- ovfl_pmds = ovfl_arg.ovfl_pmds[0];
- ovfl_notify = ovfl_arg.ovfl_notify[0];
- ovfl_ctrl = ovfl_arg.ovfl_ctrl;
+ /*
+ * For those controls, we take the union because they have
+ * an all or nothing behavior.
+ */
+ ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user;
+ ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task;
+ ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring;
+ ovfl_ctrl.bits.reset_ovfl_pmds |= ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds; /* yes or no */
+
+ pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
+ }
+ /*
+ * when the module cannot handle the rest of the overflows, we abort right here
+ */
+ if (ret && pmd_mask) {
+ DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n",
+ current->pid,
+ pmd_mask<<PMU_FIRST_COUNTER));
+ }
+ } else {
+ /*
+ * when no sampling module is used, then the default
+ * is to notify on overflow if requested by user
+ */
+ ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
+ ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
+ ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
+ ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
}
- if (ovfl_pmds && ovfl_ctrl.reset_pmds) {
- pfm_reset_regs(ctx, &ovfl_pmds, ovfl_ctrl.reset_pmds);
+ /*
+ * if we (still) have some overflowed PMD but no notification is requested
+ * then we use the short reset period.
+ */
+ if (ovfl_ctrl.bits.reset_ovfl_pmds) {
+ unsigned long bm = ovfl_pmds;
+ pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
}
- if (ovfl_notify && ovfl_ctrl.notify_user) {
+ if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
/*
* keep track of what to reset when unblocking
*/
ctx->ctx_ovfl_regs[0] = ovfl_pmds;
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.block) {
+ /*
+ * check for blocking context
+ */
+ if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
/*
- * set the perfmon specific checking pending work
+ * set the perfmon specific checking pending work for the task
*/
PFM_SET_WORK_PENDING(task, 1);
@@ -5175,21 +5244,22 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
must_notify = 1;
}
- DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx stopped=%d\n",
+ DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
current->pid,
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason,
ovfl_pmds,
ovfl_notify,
- ovfl_ctrl.stop_monitoring ? 1 : 0));
+ ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
/*
* in case monitoring must be stopped, we toggle the psr bits
*/
- if (ovfl_ctrl.stop_monitoring) {
+ if (ovfl_ctrl.bits.mask_monitoring) {
pfm_mask_monitoring(task);
CTX_MASKED(ctx);
}
+
/*
* send notification now
*/
@@ -5197,7 +5267,6 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
return;
-
sanity_check:
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
smp_processor_id(),
@@ -5305,7 +5374,7 @@ report_spurious:
static pfm_irq_handler_t
pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
{
- unsigned long m;
+ unsigned long start_cycles, total_cycles;
unsigned long min, max;
int this_cpu;
int ret;
@@ -5314,19 +5383,22 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
- m = ia64_get_itc();
+ start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs);
- m = ia64_get_itc() - m;
+ total_cycles = ia64_get_itc();
/*
* don't measure spurious interrupts
*/
- if (ret == 0) {
- if (m < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = m;
- if (m > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = m;
- pfm_stats[this_cpu].pfm_ovfl_intr_cycles += m;
+ if (likely(ret == 0)) {
+ total_cycles -= start_cycles;
+
+ if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
+ if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
+
+ pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
}
PFM_IRQ_HANDLER_RET();
}
@@ -5459,13 +5531,13 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* if monitoring has started
*/
if (dcr_pp) {
- dcr = ia64_get_dcr();
+ dcr = ia64_getreg(_IA64_REG_CR_DCR);
/*
* context switching in?
*/
if (is_ctxswin) {
/* mask monitoring for the idle task */
- ia64_set_dcr(dcr & ~IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp();
ia64_srlz_i();
return;
@@ -5477,10 +5549,14 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
* Due to inlining this odd if-then-else construction generates
* better code.
*/
- ia64_set_dcr(dcr |IA64_DCR_PP);
+ ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
pfm_set_psr_pp();
ia64_srlz_i();
}
+ { unsigned long val;
+ val = ia64_get_pmc(4);
+ if ((val & (1UL<<23)) == 0UL) printk("perfmon: PMU off: pmc4=0x%lx\n", val);
+ }
}
void
@@ -5743,13 +5819,6 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(GET_PMU_OWNER());
t = &task->thread;
- psr = pfm_get_psr();
-
-#if 1
- BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
- BUG_ON(psr & IA64_PSR_I);
-#endif
-
/*
* possible on unload
*/
@@ -5764,6 +5833,12 @@ pfm_load_regs (struct task_struct *task)
* access, not CPU concurrency.
*/
flags = pfm_protect_ctx_ctxsw(ctx);
+ psr = pfm_get_psr();
+
+#if 1
+ BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
+ BUG_ON(psr & IA64_PSR_I);
+#endif
if (unlikely(CTX_IS_ZOMBIE(ctx))) {
struct pt_regs *regs = ia64_task_regs(task);
@@ -6126,6 +6201,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
if (is_self) task->thread.pmds[i] = pmd_val;
+
ctx->ctx_pmds[i].val = val;
}
}
@@ -6257,7 +6333,7 @@ pfm_init_percpu (void)
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
- ia64_set_pmv(IA64_PERFMON_VECTOR);
+ ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d();
/*
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 4ee7b1379efe..aedf99212c82 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -109,21 +109,15 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v
}
static int
-default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs)
+default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
{
pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_entry_t *ent;
void *cur, *last;
unsigned long *e;
- unsigned long ovfl_mask;
- unsigned long ovfl_notify;
- unsigned long stamp;
unsigned int npmds, i;
-
- /*
- * some time stamp
- */
- stamp = ia64_get_itc();
+ unsigned char ovfl_pmd;
+ unsigned char ovfl_notify;
if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
@@ -133,8 +127,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr = (pfm_default_smpl_hdr_t *)buf;
cur = hdr->hdr_cur_pos;
last = hdr->hdr_last_pos;
- ovfl_mask = arg->ovfl_pmds[0];
- ovfl_notify = arg->ovfl_notify[0];
+ ovfl_pmd = arg->ovfl_pmd;
+ ovfl_notify = arg->ovfl_notify;
/*
* check for space against largest possibly entry.
@@ -153,12 +147,12 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr->hdr_count++;
- DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n",
+ DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
task->pid,
hdr->hdr_count,
cur, last,
last-cur,
- ovfl_mask,
+ ovfl_pmd,
ovfl_notify, npmds));
/*
@@ -172,7 +166,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
* - this is not necessarily the task controlling the session
*/
ent->pid = current->pid;
- ent->cpu = smp_processor_id();
+ ent->ovfl_pmd = ovfl_pmd;
ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
/*
@@ -180,13 +174,9 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
*/
ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
- /*
- * which registers overflowed
- */
- ent->ovfl_pmds = ovfl_mask;
ent->tstamp = stamp;
+ ent->cpu = smp_processor_id();
ent->set = arg->active_set;
- ent->reserved1 = 0;
/*
* selectively store PMDs in increasing index number
@@ -206,14 +196,14 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
/*
* keep same ovfl_pmds, ovfl_notify
*/
- arg->ovfl_ctrl.notify_user = 0;
- arg->ovfl_ctrl.block = 0;
- arg->ovfl_ctrl.stop_monitoring = 0;
- arg->ovfl_ctrl.reset_pmds = 1;
+ arg->ovfl_ctrl.bits.notify_user = 0;
+ arg->ovfl_ctrl.bits.block_task = 0;
+ arg->ovfl_ctrl.bits.mask_monitoring = 0;
+ arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
return 0;
full:
- DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify));
+ DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
/*
* increment number of buffer overflow.
@@ -222,22 +212,21 @@ full:
hdr->hdr_overflows++;
/*
- * if no notification is needed, then we just reset the buffer index.
+ * if no notification is needed, then we saturate the buffer
*/
- if (ovfl_notify == 0UL) {
+ if (ovfl_notify == 0) {
hdr->hdr_count = 0UL;
- arg->ovfl_ctrl.notify_user = 0;
- arg->ovfl_ctrl.block = 0;
- arg->ovfl_ctrl.stop_monitoring = 0;
- arg->ovfl_ctrl.reset_pmds = 1;
+ arg->ovfl_ctrl.bits.notify_user = 0;
+ arg->ovfl_ctrl.bits.block_task = 0;
+ arg->ovfl_ctrl.bits.mask_monitoring = 1;
+ arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
} else {
- /* keep same ovfl_pmds, ovfl_notify */
- arg->ovfl_ctrl.notify_user = 1;
- arg->ovfl_ctrl.block = 1;
- arg->ovfl_ctrl.stop_monitoring = 1;
- arg->ovfl_ctrl.reset_pmds = 0;
+ arg->ovfl_ctrl.bits.notify_user = 1;
+ arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
+ arg->ovfl_ctrl.bits.mask_monitoring = 1;
+ arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
}
- return 0;
+ return -1; /* we are full, sorry */
}
static int
@@ -250,8 +239,8 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru
hdr->hdr_count = 0UL;
hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
- ctrl->stop_monitoring = 0;
- ctrl->reset_pmds = PFM_PMD_LONG_RESET;
+ ctrl->bits.mask_monitoring = 0;
+ ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
return 0;
}
@@ -264,15 +253,16 @@ default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
}
static pfm_buffer_fmt_t default_fmt={
- .fmt_name = "default_format",
- .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
- .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
- .fmt_validate = default_validate,
- .fmt_getsize = default_get_size,
- .fmt_init = default_init,
- .fmt_handler = default_handler,
- .fmt_restart = default_restart,
- .fmt_exit = default_exit,
+ .fmt_name = "default_format",
+ .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
+ .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
+ .fmt_validate = default_validate,
+ .fmt_getsize = default_get_size,
+ .fmt_init = default_init,
+ .fmt_handler = default_handler,
+ .fmt_restart = default_restart,
+ .fmt_restart_active = default_restart,
+ .fmt_exit = default_exit,
};
static int __init
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1348ea9c3b56..01bd7fe3437e 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -741,8 +741,8 @@ cpu_init (void)
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
* be fine).
*/
- ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
- | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC);
+ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
+ | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if (current->mm)
@@ -758,11 +758,11 @@ cpu_init (void)
ia64_set_itv(1 << 16);
ia64_set_lrr0(1 << 16);
ia64_set_lrr1(1 << 16);
- ia64_set_pmv(1 << 16);
- ia64_set_cmcv(1 << 16);
+ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
+ ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
/* clear TPR & XTP to enable all interrupt classes: */
- ia64_set_tpr(0);
+ ia64_setreg(_IA64_REG_CR_TPR, 0);
#ifdef CONFIG_SMP
normal_xtp();
#endif
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 21f97a864a39..8d94b0c0f074 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -1,7 +1,7 @@
/*
* Architecture-specific signal handling support.
*
- * Copyright (C) 1999-2002 Hewlett-Packard Co
+ * Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Derived from i386 and Alpha versions.
@@ -23,6 +23,7 @@
#include <linux/wait.h>
#include <asm/ia32.h>
+#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
@@ -41,6 +42,12 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
+#ifdef ASM_SUPPORTED
+/*
+ * Don't let GCC uses f16-f31 so that when we setup/restore the registers in the signal
+ * context in __kernel_sigtramp(), we can be sure that registers f16-f31 contain user-level
+ * values.
+ */
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
@@ -50,6 +57,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
+#endif
long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
@@ -192,7 +200,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_value, &to->si_value);
+ err |= __put_user(from->si_ptr, &to->si_ptr);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
@@ -592,10 +600,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
if (IS_IA32_PROCESS(&scr->pt)) {
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
- if (errno == ERESTART_RESTARTBLOCK) {
+ if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */
- scr->pt.cr_iip -= 2;
- }
} else {
/*
* Note: the syscall number is in r15 which is saved in
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 70df2828e837..2e6cd77d0350 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -7,6 +7,20 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+
+#include <asm/fpswa.h>
+#include <asm/hardirq.h>
+#include <asm/ia32.h>
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
/*
* fp_emulate() needs to be able to access and update all floating point registers. Those
* saved in pt_regs can be accessed through that structure, but those not saved, will be
@@ -15,6 +29,7 @@
* by declaring preserved registers that are not marked as "fixed" as global register
* variables.
*/
+#ifdef ASM_SUPPORTED
register double f2 asm ("f2"); register double f3 asm ("f3");
register double f4 asm ("f4"); register double f5 asm ("f5");
@@ -27,20 +42,7 @@ register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-
-#include <asm/hardirq.h>
-#include <asm/ia32.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-#include <asm/fpswa.h>
+#endif
extern spinlock_t timerlist_lock;
@@ -357,6 +359,10 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x11) {
siginfo.si_code = FPE_FLTINV;
+ } else if (isr & 0x22) {
+ /* denormal operand gets the same si_code as underflow
+ * see arch/i386/kernel/traps.c:math_error() */
+ siginfo.si_code = FPE_FLTUND;
} else if (isr & 0x44) {
siginfo.si_code = FPE_FLTDIV;
}
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index c7f8012eb757..95f77b1dfa11 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -18,9 +18,10 @@
#include <linux/smp_lock.h>
#include <linux/tty.h>
-#include <asm/uaccess.h>
-#include <asm/rse.h>
+#include <asm/intrinsics.h>
#include <asm/processor.h>
+#include <asm/rse.h>
+#include <asm/uaccess.h>
#include <asm/unaligned.h>
extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
@@ -231,7 +232,7 @@ static u16 fr_info[32]={
static void
invala_gr (int regno)
{
-# define F(reg) case reg: __asm__ __volatile__ ("invala.e r%0" :: "i"(reg)); break
+# define F(reg) case reg: ia64_invala_gr(reg); break
switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
@@ -258,7 +259,7 @@ invala_gr (int regno)
static void
invala_fr (int regno)
{
-# define F(reg) case reg: __asm__ __volatile__ ("invala.e f%0" :: "i"(reg)); break
+# define F(reg) case reg: ia64_invala_fr(reg); break
switch (regno) {
F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
@@ -554,13 +555,13 @@ setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
static inline void
float_spill_f0 (struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("stf.spill [%0]=f0" :: "r"(final) : "memory");
+ ia64_stf_spill(final, 0);
}
static inline void
float_spill_f1 (struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("stf.spill [%0]=f1" :: "r"(final) : "memory");
+ ia64_stf_spill(final, 1);
}
static void
@@ -954,57 +955,65 @@ static const unsigned char float_fsz[4]={
static inline void
mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldfe f6=[%0];; stf.spill [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldfe(6, init);
+ ia64_stop();
+ ia64_stf_spill(final, 6);
}
static inline void
mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldf8 f6=[%0];; stf.spill [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldf8(6, init);
+ ia64_stop();
+ ia64_stf_spill(final, 6);
}
static inline void
mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldfs f6=[%0];; stf.spill [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldfs(6, init);
+ ia64_stop();
+ ia64_stf_spill(final, 6);
}
static inline void
mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldfd f6=[%0];; stf.spill [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldfd(6, init);
+ ia64_stop();
+ ia64_stf_spill(final, 6);
}
static inline void
float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldf.fill f6=[%0];; stfe [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldf_fill(6, init);
+ ia64_stop();
+ ia64_stfe(final, 6);
}
static inline void
float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldf.fill f6=[%0];; stf8 [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldf_fill(6, init);
+ ia64_stop();
+ ia64_stf8(final, 6);
}
static inline void
float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldf.fill f6=[%0];; stfs [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldf_fill(6, init);
+ ia64_stop();
+ ia64_stfs(final, 6);
}
static inline void
float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
{
- __asm__ __volatile__ ("ldf.fill f6=[%0];; stfd [%1]=f6"
- :: "r"(init), "r"(final) : "f6","memory");
+ ia64_ldf_fill(6, init);
+ ia64_stop();
+ ia64_stfd(final, 6);
}
static int
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index a2f212131083..0ac35a6b78e4 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -35,6 +35,7 @@ SECTIONS
{
*(.text.ivt)
*(.text)
+ *(.gnu.linkonce.t*)
}
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
@@ -183,7 +184,7 @@ SECTIONS
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - LOAD_OFFSET)
- { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
+ { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
@@ -194,7 +195,7 @@ SECTIONS
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
- { *(.sdata) }
+ { *(.sdata) *(.sdata1) *(.srdata) }
_edata = .;
_bss = .;
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index a9235e8567de..ec89adf23c6a 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -96,8 +96,8 @@ ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbi
/*
* Flush ALAT entries also.
*/
- asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2)
- : "memory");
+ ia64_ptcga(start, (nbits<<2));
+ ia64_srlz_i();
start += (1UL << nbits);
} while (start < end);
}
@@ -118,15 +118,13 @@ local_flush_tlb_all (void)
local_irq_save(flags);
for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) {
- asm volatile ("ptc.e %0" :: "r"(addr));
+ ia64_ptce(addr);
addr += stride1;
}
addr += stride0;
}
local_irq_restore(flags);
- ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
- ia64_insn_group_barrier();
}
void
@@ -157,14 +155,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
platform_global_tlb_purge(start, end, nbits);
# else
do {
- asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+ ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
# endif
- ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
- ia64_insn_group_barrier();
}
void __init
diff --git a/arch/ia64/sn/fakeprom/fw-emu.c b/arch/ia64/sn/fakeprom/fw-emu.c
index ce5c919b2f0a..c332d63d1167 100644
--- a/arch/ia64/sn/fakeprom/fw-emu.c
+++ b/arch/ia64/sn/fakeprom/fw-emu.c
@@ -200,7 +200,7 @@ efi_unimplemented (void)
#ifdef SGI_SN2
#undef cpu_physical_id
-#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
+#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
void
fprom_send_cpei(void) {
@@ -224,14 +224,14 @@ fprom_send_cpei(void) {
#endif
-static long
+static struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
- register long r9 asm ("r9") = 0;
- register long r10 asm ("r10") = 0;
- register long r11 asm ("r11") = 0;
+ long r9 = 0;
+ long r10 = 0;
+ long r11 = 0;
long status;
/*
@@ -338,7 +338,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
}
asm volatile ("" :: "r"(r9), "r"(r10), "r"(r11));
- return status;
+ return ((struct sal_ret_values) {status, r9, r10, r11});
}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 09cbbb65ab13..3f5553ba36f8 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -292,16 +292,16 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) {
case 0:
- irr_reg = ia64_get_irr0();
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
- irr_reg = ia64_get_irr1();
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
- irr_reg = ia64_get_irr2();
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
- irr_reg = ia64_get_irr3();
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break;
}
if (!test_bit(irr_bit, &irr_reg) ) {
@@ -354,9 +354,9 @@ sn_get_next_bit(void) {
void
sn_set_tpr(int vector) {
if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
- ia64_set_tpr(vector);
+ ia64_setreg(_IA64_REG_CR_TPR, vector);
} else {
- ia64_set_tpr(IA64_LAST_DEVICE_VECTOR);
+ ia64_setreg(_IA64_REG_CR_TPR, IA64_LAST_DEVICE_VECTOR);
}
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index db8925491663..f687b6d14c0f 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -395,7 +395,7 @@ sn_cpu_init(void)
return;
cpuid = smp_processor_id();
- cpuphyid = ((ia64_get_lid() >> 16) & 0xffff);
+ cpuphyid = ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff);
nasid = cpu_physical_id_to_nasid(cpuphyid);
cnode = nasid_to_cnodeid(nasid);
slice = cpu_physical_id_to_slice(cpuphyid);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
index 59423708d30c..92764186fd06 100644
--- a/arch/ia64/sn/kernel/sn2/io.c
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -11,81 +11,73 @@
#include <asm/sn/sn2/io.h>
+#undef __sn_inb
+#undef __sn_inw
+#undef __sn_inl
+#undef __sn_outb
+#undef __sn_outw
+#undef __sn_outl
+#undef __sn_readb
+#undef __sn_readw
+#undef __sn_readl
+#undef __sn_readq
+
unsigned int
-sn_inb (unsigned long port)
+__sn_inb (unsigned long port)
{
- return __sn_inb(port);
+ return ___sn_inb(port);
}
unsigned int
-sn_inw (unsigned long port)
+__sn_inw (unsigned long port)
{
- return __sn_inw(port);
+ return ___sn_inw(port);
}
unsigned int
-sn_inl (unsigned long port)
+__sn_inl (unsigned long port)
{
- return __sn_inl(port);
+ return ___sn_inl(port);
}
void
-sn_outb (unsigned char val, unsigned long port)
+__sn_outb (unsigned char val, unsigned long port)
{
- __sn_outb(val, port);
+ ___sn_outb(val, port);
}
void
-sn_outw (unsigned short val, unsigned long port)
+__sn_outw (unsigned short val, unsigned long port)
{
- __sn_outw(val, port);
+ ___sn_outw(val, port);
}
void
-sn_outl (unsigned int val, unsigned long port)
+__sn_outl (unsigned int val, unsigned long port)
{
- __sn_outl(val, port);
+ ___sn_outl(val, port);
}
unsigned char
-sn_readb (void *addr)
+__sn_readb (void *addr)
{
- return __sn_readb (addr);
+ return ___sn_readb (addr);
}
unsigned short
-sn_readw (void *addr)
+__sn_readw (void *addr)
{
- return __sn_readw (addr);
+ return ___sn_readw (addr);
}
unsigned int
-sn_readl (void *addr)
+__sn_readl (void *addr)
{
- return __sn_readl (addr);
+ return ___sn_readl (addr);
}
unsigned long
-sn_readq (void *addr)
+__sn_readq (void *addr)
{
- return __sn_readq (addr);
+ return ___sn_readq (addr);
}
-
-
-/* define aliases: */
-
-asm (".global __sn_inb, __sn_inw, __sn_inl");
-asm ("__sn_inb = sn_inb");
-asm ("__sn_inw = sn_inw");
-asm ("__sn_inl = sn_inl");
-
-asm (".global __sn_outb, __sn_outw, __sn_outl");
-asm ("__sn_outb = sn_outb");
-asm ("__sn_outw = sn_outw");
-asm ("__sn_outl = sn_outl");
-
-asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
-asm ("__sn_readb = sn_readb");
-asm ("__sn_readw = sn_readw");
-asm ("__sn_readl = sn_readl");
-asm ("__sn_readq = sn_readq");
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 99b9fa1f80e1..d9e1a83ab8ef 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -30,7 +30,7 @@ config ACPI
bool "Full ACPI Support"
depends on !X86_VISWS
depends on !IA64_HP_SIM
- depends on IA64 || (X86 && ACPI_HT)
+ depends on IA64 || (X86 || ACPI_HT)
default y
---help---
Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index ba7294275354..d3f29ce924ae 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -1,11 +1,11 @@
/*
* sleep.c - ACPI sleep support.
- *
- * Copyright (c) 2000-2003 Patrick Mochel
*
- * Portions are
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (c) 2000-2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * This file is released under the GPLv2.
+ *
*/
#include <linux/delay.h>
@@ -16,274 +16,151 @@
#include <acpi/acpi_drivers.h>
#include "sleep.h"
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME ("sleep")
-
u8 sleep_states[ACPI_S_STATE_COUNT];
+static struct pm_ops acpi_pm_ops;
+
extern void do_suspend_lowlevel_s4bios(int);
extern void do_suspend_lowlevel(int);
-/**
- * acpi_system_restore_state - OS-specific restoration of state
- * @state: sleep state we're exiting
- *
- * Note that if we're coming back from S4, the memory image should have already
- * been loaded from the disk and is already in place. (Otherwise how else would we
- * be here?).
- */
-acpi_status
-acpi_system_restore_state (
- u32 state)
-{
- /* restore processor state
- * We should only be here if we're coming back from STR or STD.
- * And, in the case of the latter, the memory image should have already
- * been loaded from disk.
- */
- if (state > ACPI_STATE_S1)
- acpi_restore_state_mem();
-
- /* wait for power to come back */
- mdelay(10);
-
- /* turn all the devices back on */
- device_resume(RESUME_POWER_ON);
-
- /* enable interrupts once again */
- ACPI_ENABLE_IRQS();
-
- /* restore device context */
- device_resume(RESUME_RESTORE_STATE);
-
- if (dmi_broken & BROKEN_INIT_AFTER_S1) {
- printk("Broken toshiba laptop -> kicking interrupts\n");
- init_8259A(0);
- }
-
- return AE_OK;
-}
+static u32 acpi_suspend_states[] = {
+ [PM_SUSPEND_ON] = ACPI_STATE_S0,
+ [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
+ [PM_SUSPEND_MEM] = ACPI_STATE_S3,
+ [PM_SUSPEND_DISK] = ACPI_STATE_S4,
+};
/**
- * acpi_system_save_state - save OS specific state and power down devices
- * @state: sleep state we're entering.
+ * acpi_pm_prepare - Do preliminary suspend work.
+ * @state: suspend state we're entering.
*
- * This handles saving all context to memory, and possibly disk.
- * First, we call to the device driver layer to save device state.
- * Once we have that, we save whatevery processor and kernel state we
- * need to memory.
+ * Make sure we support the state. If we do, and we need it, set the
+ * firmware waking vector and do arch-specific nastiness to get the
+ * wakeup code to the waking vector.
*/
-acpi_status
-acpi_system_save_state(
- u32 state)
-{
- int error = 0;
- /* Send notification to devices that they will be suspended.
- * If any device or driver cannot make the transition, either up
- * or down, we'll get an error back.
- */
- error = device_suspend(state, SUSPEND_NOTIFY);
- if (error)
- return AE_ERROR;
-
- if (state < ACPI_STATE_S5) {
-
- /* Tell devices to stop I/O and actually save their state.
- * It is theoretically possible that something could fail,
- * so handle that gracefully..
- */
- error = device_suspend(state, SUSPEND_SAVE_STATE);
- if (error) {
- /* tell devices to restore state if they have
- * it saved and to start taking I/O requests.
- */
- device_resume(RESUME_RESTORE_STATE);
- return error;
- }
-
- /* flush caches */
- ACPI_FLUSH_CPU_CACHE();
-
- /* Do arch specific saving of state. */
- if (state > ACPI_STATE_S1) {
- error = acpi_save_state_mem();
+static int acpi_pm_prepare(u32 state)
+{
+ int error = 0;
+ u32 acpi_state = acpi_suspend_states[state];
- if (!error && (state == ACPI_STATE_S4))
- error = acpi_save_state_disk();
+ if (!sleep_states[acpi_state])
+ return -EPERM;
- if (error) {
- device_resume(RESUME_RESTORE_STATE);
- return error;
- }
- }
+ /* do we have a wakeup address for S2 and S3? */
+ /* Here, we support only S4BIOS, those we set the wakeup address */
+ /* S4OS is only supported for now via swsusp.. */
+ if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_DISK) {
+ if (!acpi_wakeup_address)
+ return -EFAULT;
+ acpi_set_firmware_waking_vector(
+ (acpi_physical_address) acpi_wakeup_address);
}
- /* disable interrupts
- * Note that acpi_suspend -- our caller -- will do this once we return.
- * But, we want it done early, so we don't get any suprises during
- * the device suspend sequence.
- */
- ACPI_DISABLE_IRQS();
+ ACPI_FLUSH_CPU_CACHE();
- /* Unconditionally turn off devices.
- * Obvious if we enter a sleep state.
- * If entering S5 (soft off), this should put devices in a
- * quiescent state.
- */
- error = device_suspend(state, SUSPEND_POWER_DOWN);
+ /* Do arch specific saving of state. */
+ if (state > PM_SUSPEND_STANDBY) {
+ if ((error = acpi_save_state_mem()))
+ goto Err;
+ }
- /* We're pretty screwed if we got an error from this.
- * We try to recover by simply calling our own restore_state
- * function; see above for definition.
- *
- * If it's S5 though, go through with it anyway..
- */
- if (error && state != ACPI_STATE_S5)
- acpi_system_restore_state(state);
+ acpi_enter_sleep_state_prep(acpi_state);
- return error ? AE_ERROR : AE_OK;
+ return 0;
+ Err:
+ acpi_set_firmware_waking_vector(0);
+ return error;
}
-/****************************************************************************
- *
- * FUNCTION: acpi_system_suspend
- *
- * PARAMETERS: %state: Sleep state to enter.
- *
- * RETURN: acpi_status, whether or not we successfully entered and
- * exited sleep.
- *
- * DESCRIPTION: Perform OS-specific action to enter sleep state.
- * This is the final step in going to sleep, per spec. If we
- * know we're coming back (i.e. not entering S5), we save the
- * processor flags. [ We'll have to save and restore them anyway,
- * so we use the arch-agnostic save_flags and restore_flags
- * here.] We then set the place to return to in arch-specific
- * globals using arch_set_return_point. Finally, we call the
- * ACPI function to write the proper values to I/O ports.
+/**
+ * acpi_pm_enter - Actually enter a sleep state.
+ * @state: State we're entering.
*
- ****************************************************************************/
+ * Flush caches and go to sleep. For STR or STD, we have to call
+ * arch-specific assembly, which in turn call acpi_enter_sleep_state().
+ * It's unfortunate, but it works. Please fix if you're feeling frisky.
+ */
-acpi_status
-acpi_system_suspend(
- u32 state)
+static int acpi_pm_enter(u32 state)
{
- acpi_status status = AE_ERROR;
- unsigned long flags = 0;
+ acpi_status status = AE_OK;
+ unsigned long flags = 0;
+ u32 acpi_state = acpi_suspend_states[state];
+ ACPI_FLUSH_CPU_CACHE();
local_irq_save(flags);
-
switch (state)
{
- case ACPI_STATE_S1:
+ case PM_SUSPEND_STANDBY:
barrier();
- status = acpi_enter_sleep_state(state);
+ status = acpi_enter_sleep_state(acpi_state);
break;
- case ACPI_STATE_S2:
- case ACPI_STATE_S3:
+ case PM_SUSPEND_MEM:
do_suspend_lowlevel(0);
break;
- case ACPI_STATE_S4:
- do_suspend_lowlevel_s4bios(0);
+ case PM_SUSPEND_DISK:
+ if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
+ status = acpi_enter_sleep_state(acpi_state);
+ else
+ do_suspend_lowlevel_s4bios(0);
break;
default:
- printk(KERN_WARNING PREFIX "don't know how to handle %d state.\n", state);
- break;
+ return -EINVAL;
}
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
- return status;
+ return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
/**
- * acpi_suspend - OS-agnostic system suspend/resume support (S? states)
- * @state: state we're entering
+ * acpi_pm_finish - Finish up suspend sequence.
+ * @state: State we're coming out of.
*
+ * This is called after we wake back up (or if entering the sleep state
+ * failed).
*/
-acpi_status
-acpi_suspend (
- u32 state)
-{
- acpi_status status;
-
- /* Suspend is hard to get right on SMP. */
- if (num_online_cpus() != 1)
- return AE_ERROR;
-
- /* get out if state is invalid */
- if (state < ACPI_STATE_S1 || state > ACPI_STATE_S5)
- return AE_ERROR;
-
- /* Since we handle S4OS via a different path (swsusp), give up if no s4bios. */
- if (state == ACPI_STATE_S4 && !acpi_gbl_FACS->S4bios_f)
- return AE_ERROR;
-
- pm_prepare_console();
-
- /*
- * TBD: S1 can be done without device_suspend. Make a CONFIG_XX
- * to handle however when S1 failed without device_suspend.
- */
- if (freeze_processes()) {
- status = AE_ERROR;
- goto Done;
- }
-
- /* do we have a wakeup address for S2 and S3? */
- /* Here, we support only S4BIOS, those we set the wakeup address */
- /* S4OS is only supported for now via swsusp.. */
- if (state == ACPI_STATE_S2 || state == ACPI_STATE_S3 || state == ACPI_STATE_S4) {
- if (!acpi_wakeup_address)
- return AE_ERROR;
- acpi_set_firmware_waking_vector((acpi_physical_address) acpi_wakeup_address);
- }
-
- status = acpi_system_save_state(state);
- if (!ACPI_SUCCESS(status))
- return status;
- acpi_enter_sleep_state_prep(state);
-
- /* disable interrupts and flush caches */
- ACPI_DISABLE_IRQS();
- ACPI_FLUSH_CPU_CACHE();
-
- /* perform OS-specific sleep actions */
- status = acpi_system_suspend(state);
-
- /* Even if we failed to go to sleep, all of the devices are in an suspended
- * mode. So, we run these unconditionaly to make sure we have a usable system
- * no matter what.
- */
+static int acpi_pm_finish(u32 state)
+{
acpi_leave_sleep_state(state);
- acpi_system_restore_state(state);
- /* make sure interrupts are enabled */
- ACPI_ENABLE_IRQS();
+ /* restore processor state
+ * We should only be here if we're coming back from STR or STD.
+ * And, in the case of the latter, the memory image should have already
+ * been loaded from disk.
+ */
+ if (state > ACPI_STATE_S1)
+ acpi_restore_state_mem();
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
- Done:
- thaw_processes();
- pm_restore_console();
- return status;
+ if (dmi_broken & BROKEN_INIT_AFTER_S1) {
+ printk("Broken toshiba laptop -> kicking interrupts\n");
+ init_8259A(0);
+ }
+ return 0;
}
+
+static struct pm_ops acpi_pm_ops = {
+ .prepare = acpi_pm_prepare,
+ .enter = acpi_pm_enter,
+ .finish = acpi_pm_finish,
+};
+
static int __init acpi_sleep_init(void)
{
int i = 0;
- ACPI_FUNCTION_TRACE("acpi_system_add_fs");
-
if (acpi_disabled)
- return_VALUE(0);
+ return 0;
printk(KERN_INFO PREFIX "(supports");
for (i=0; i<ACPI_S_STATE_COUNT; i++) {
@@ -294,14 +171,19 @@ static int __init acpi_sleep_init(void)
sleep_states[i] = 1;
printk(" S%d", i);
}
- if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f) {
- sleep_states[i] = 1;
- printk(" S4bios");
+ if (i == ACPI_STATE_S4) {
+ if (acpi_gbl_FACS->S4bios_f) {
+ sleep_states[i] = 1;
+ printk(" S4bios");
+ acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE;
+ } else if (sleep_states[i])
+ acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
}
}
printk(")\n");
- return_VALUE(0);
+ pm_set_ops(&acpi_pm_ops);
+ return 0;
}
late_initcall(acpi_sleep_init);
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 50f0bdfd0992..41cbde00b785 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -13,80 +13,12 @@
#include "sleep.h"
-#define ACPI_SYSTEM_FILE_SLEEP "sleep"
#define ACPI_SYSTEM_FILE_ALARM "alarm"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME ("sleep")
-static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
-{
- int i;
-
- ACPI_FUNCTION_TRACE("acpi_system_sleep_seq_show");
-
- for (i = 0; i <= ACPI_STATE_S5; i++) {
- if (sleep_states[i]) {
- seq_printf(seq,"S%d ", i);
- if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f)
- seq_printf(seq, "S4bios ");
- }
- }
-
- seq_puts(seq, "\n");
-
- return 0;
-}
-
-static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_system_write_sleep (
- struct file *file,
- const char *buffer,
- size_t count,
- loff_t *ppos)
-{
- acpi_status status = AE_ERROR;
- char state_string[12] = {'\0'};
- u32 state = 0;
-
- ACPI_FUNCTION_TRACE("acpi_system_write_sleep");
-
- if (count > sizeof(state_string) - 1)
- goto Done;
-
- if (copy_from_user(state_string, buffer, count))
- return_VALUE(-EFAULT);
-
- state_string[count] = '\0';
-
- state = simple_strtoul(state_string, NULL, 0);
-
- if (state < 1 || state > 4)
- goto Done;
-
- if (!sleep_states[state])
- goto Done;
-
-#ifdef CONFIG_SOFTWARE_SUSPEND
- if (state == 4) {
- software_suspend();
- goto Done;
- }
-#endif
- status = acpi_suspend(state);
- Done:
- if (ACPI_FAILURE(status))
- return_VALUE(-EINVAL);
- else
- return_VALUE(count);
-}
-
static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
{
u32 sec, min, hr;
@@ -362,14 +294,6 @@ end:
}
-static struct file_operations acpi_system_sleep_fops = {
- .open = acpi_system_sleep_open_fs,
- .read = seq_read,
- .write = acpi_system_write_sleep,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static struct file_operations acpi_system_alarm_fops = {
.open = acpi_system_alarm_open_fs,
.read = seq_read,
@@ -383,12 +307,6 @@ static int acpi_sleep_proc_init(void)
{
struct proc_dir_entry *entry = NULL;
- /* 'sleep' [R/W]*/
- entry = create_proc_entry(ACPI_SYSTEM_FILE_SLEEP,
- S_IFREG|S_IRUGO|S_IWUSR, acpi_root_dir);
- if (entry)
- entry->proc_fops = &acpi_system_sleep_fops;
-
/* 'alarm' [R/W] */
entry = create_proc_entry(ACPI_SYSTEM_FILE_ALARM,
S_IFREG|S_IRUGO|S_IWUSR, acpi_root_dir);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 6f53840c6b39..611a69accdd0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,7 +25,6 @@
#include "power.h"
LIST_HEAD(dpm_active);
-LIST_HEAD(dpm_suspended);
LIST_HEAD(dpm_off);
LIST_HEAD(dpm_off_irq);
@@ -76,6 +75,7 @@ int device_pm_add(struct device * dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev->kobj.name);
+ atomic_set(&dev->power.pm_users,0);
down(&dpm_sem);
list_add_tail(&dev->power.entry,&dpm_active);
device_pm_set_parent(dev,dev->parent);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 8130b04ffe5f..fde72b37f938 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -31,7 +31,6 @@ extern struct semaphore dpm_sem;
* The PM lists.
*/
extern struct list_head dpm_active;
-extern struct list_head dpm_suspended;
extern struct list_head dpm_off;
extern struct list_head dpm_off_irq;
@@ -61,15 +60,12 @@ extern void dpm_sysfs_remove(struct device *);
*/
extern int dpm_resume(void);
extern void dpm_power_up(void);
-extern void dpm_power_up_irq(void);
-extern void power_up_device(struct device *);
extern int resume_device(struct device *);
/*
* suspend.c
*/
extern int suspend_device(struct device *, u32);
-extern int power_down_device(struct device *, u32);
/*
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index 6c8653205128..9db84a9e41e3 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -12,7 +12,6 @@
#include "power.h"
extern int sysdev_resume(void);
-extern int sysdev_restore(void);
/**
@@ -23,59 +22,33 @@ extern int sysdev_restore(void);
int resume_device(struct device * dev)
{
- struct device_driver * drv = dev->driver;
-
- if (drv && drv->resume)
- return drv->resume(dev,RESUME_RESTORE_STATE);
+ if (dev->bus && dev->bus->resume)
+ return dev->bus->resume(dev);
return 0;
}
+
/**
- * dpm_resume - Restore all device state.
+ * device_resume - Restore state of each device in system.
*
- * Walk the dpm_suspended list and restore each device. As they are
- * resumed, move the devices to the dpm_active list.
+ * Walk the dpm_off list, remove each entry, resume the device,
+ * then add it to the dpm_active list.
*/
-int dpm_resume(void)
+void device_resume(void)
{
- while(!list_empty(&dpm_suspended)) {
- struct list_head * entry = dpm_suspended.next;
+ down(&dpm_sem);
+ while(!list_empty(&dpm_off)) {
+ struct list_head * entry = dpm_off.next;
struct device * dev = to_device(entry);
list_del_init(entry);
resume_device(dev);
list_add_tail(entry,&dpm_active);
}
- return 0;
-}
-
-
-/**
- * device_pm_resume - Restore state of each device in system.
- *
- * Restore system device state, then common device state. Finally,
- * release dpm_sem, as we're done with device PM.
- */
-
-void device_pm_resume(void)
-{
- sysdev_restore();
- dpm_resume();
up(&dpm_sem);
}
-
-/**
- * power_up_device - Power one device on.
- * @dev: Device.
- */
-
-void power_up_device(struct device * dev)
-{
- struct device_driver * drv = dev->driver;
- if (drv && drv->resume)
- drv->resume(dev,RESUME_POWER_ON);
-}
+EXPORT_SYMBOL(device_resume);
/**
@@ -89,65 +62,31 @@ void power_up_device(struct device * dev)
* Interrupts must be disabled when calling this.
*/
-void dpm_power_up_irq(void)
+void dpm_power_up(void)
{
while(!list_empty(&dpm_off_irq)) {
struct list_head * entry = dpm_off_irq.next;
list_del_init(entry);
- power_up_device(to_device(entry));
- list_add_tail(entry,&dpm_suspended);
- }
-}
-
-
-/**
- * dpm_power_up - Power on most devices.
- *
- * Walk the dpm_off list and power each device up. This is used
- * to power on devices that were able to power down with interrupts
- * enabled.
- */
-
-void dpm_power_up(void)
-{
- while (!list_empty(&dpm_off)) {
- struct list_head * entry = dpm_off.next;
- list_del_init(entry);
- power_up_device(to_device(entry));
- list_add_tail(entry,&dpm_suspended);
+ resume_device(to_device(entry));
+ list_add_tail(entry,&dpm_active);
}
}
/**
- * device_pm_power_up - Turn on all devices.
+ * device_pm_power_up - Turn on all devices that need special attention.
*
- * First, power on system devices, which must happen with interrupts
- * disbled. Then, power on devices that also require interrupts disabled.
- * Turn interrupts back on, and finally power up the rest of the normal
- * devices.
+ * Power on system devices then devices that required we shut them down
+ * with interrupts disabled.
+ * Called with interrupts disabled.
*/
-void device_pm_power_up(void)
+void device_power_up(void)
{
sysdev_resume();
- dpm_power_up_irq();
- local_irq_enable();
dpm_power_up();
}
-/**
- * device_resume - resume all the devices in the system
- * @level: stage of resume process we're at
- *
- * This function is deprecated, and should be replaced with appropriate
- * calls to device_pm_power_up() and device_pm_resume() above.
- */
+EXPORT_SYMBOL(device_power_up);
-void device_resume(u32 level)
-{
- printk("%s is deprecated. Called from:\n",__FUNCTION__);
- dump_stack();
-}
-EXPORT_SYMBOL(device_resume);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 4a4ac9f7764d..05ef979a3791 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -14,8 +14,6 @@ static void runtime_resume(struct device * dev)
{
if (!dev->power.power_state)
return;
-
- power_up_device(dev);
resume_device(dev);
}
@@ -55,19 +53,11 @@ int dpm_runtime_suspend(struct device * dev, u32 state)
if (dev->power.power_state)
dpm_runtime_resume(dev);
- error = suspend_device(dev,state);
- if (!error) {
- error = power_down_device(dev,state);
- if (error)
- goto ErrResume;
+ if (!(error = suspend_device(dev,state)))
dev->power.power_state = state;
- }
Done:
up(&dpm_sem);
return error;
- ErrResume:
- resume_device(dev);
- goto Done;
}
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index cf1f6c0b7b0e..6da8cdd69dce 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include "power.h"
-extern int sysdev_save(u32 state);
extern int sysdev_suspend(u32 state);
/*
@@ -38,41 +37,43 @@ extern int sysdev_suspend(u32 state);
int suspend_device(struct device * dev, u32 state)
{
- struct device_driver * drv = dev->driver;
int error = 0;
- if (drv && drv->suspend)
- error = drv->suspend(dev,state,SUSPEND_SAVE_STATE);
+ if (dev->bus && dev->bus->suspend)
+ error = dev->bus->suspend(dev,state);
if (!error) {
list_del(&dev->power.entry);
- list_add(&dev->power.entry,&dpm_suspended);
+ list_add(&dev->power.entry,&dpm_off);
+ } else if (error == -EAGAIN) {
+ list_del(&dev->power.entry);
+ list_add(&dev->power.entry,&dpm_off_irq);
}
return error;
}
/**
- * device_pm_suspend - Save state and stop all devices in system.
+ * device_suspend - Save state and stop all devices in system.
* @state: Power state to put each device in.
*
* Walk the dpm_active list, call ->suspend() for each device, and move
- * it to dpm_suspended. If we hit a failure with any of the devices, call
- * dpm_resume() above to bring the suspended devices back to life.
+ * it to dpm_off.
+ * Check the return value for each. If it returns 0, then we move the
+ * the device to the dpm_off list. If it returns -EAGAIN, we move it to
+ * the dpm_off_irq list. If we get a different error, try and back out.
*
- * Have system devices save state last.
+ * If we hit a failure with any of the devices, call device_resume()
+ * above to bring the suspended devices back to life.
*
* Note this function leaves dpm_sem held to
* a) block other devices from registering.
* b) prevent other PM operations from happening after we've begun.
* c) make sure we're exclusive when we disable interrupts.
*
- * device_pm_resume() will release dpm_sem after restoring state to
- * all devices (as will this on error). You must call it once you've
- * called device_pm_suspend().
*/
-int device_pm_suspend(u32 state)
+int device_suspend(u32 state)
{
int error = 0;
@@ -83,153 +84,45 @@ int device_pm_suspend(u32 state)
if ((error = suspend_device(dev,state)))
goto Error;
}
-
- if ((error = sysdev_save(state)))
- goto Error;
Done:
+ up(&dpm_sem);
return error;
Error:
- dpm_resume();
- up(&dpm_sem);
+ device_resume();
goto Done;
}
-
-/**
- * power_down_device - Put one device in low power state.
- * @dev: Device.
- * @state: Power state to enter.
- */
-
-int power_down_device(struct device * dev, u32 state)
-{
- struct device_driver * drv = dev->driver;
- int error = 0;
-
- if (drv && drv->suspend)
- error = drv->suspend(dev,state,SUSPEND_POWER_DOWN);
- if (!error) {
- list_del(&dev->power.entry);
- list_add(&dev->power.entry,&dpm_off);
- }
- return error;
-}
-
-
-/**
- * dpm_power_down - Put all devices in low power state.
- * @state: Power state to enter.
- *
- * Walk the dpm_suspended list (with interrupts enabled) and try
- * to power down each each. If any fail with -EAGAIN, they require
- * the call to be done with interrupts disabled. So, we move them to
- * the dpm_off_irq list.
- *
- * If the call succeeds, we move each device to the dpm_off list.
- */
-
-static int dpm_power_down(u32 state)
-{
- while(!list_empty(&dpm_suspended)) {
- struct list_head * entry = dpm_suspended.prev;
- int error;
- error = power_down_device(to_device(entry),state);
- if (error) {
- if (error == -EAGAIN) {
- list_del(entry);
- list_add(entry,&dpm_off_irq);
- continue;
- }
- return error;
- }
- }
- return 0;
-}
+EXPORT_SYMBOL(device_suspend);
/**
- * dpm_power_down_irq - Power down devices without interrupts.
- * @state: State to enter.
+ * device_power_down - Shut down special devices.
+ * @state: Power state to enter.
*
- * Walk the dpm_off_irq list (built by dpm_power_down) and power
- * down each device that requires the call to be made with interrupts
- * disabled.
+ * Walk the dpm_off_irq list, calling ->power_down() for each device that
+ * couldn't power down the device with interrupts enabled. When we're
+ * done, power down system devices.
*/
-static int dpm_power_down_irq(u32 state)
+int device_power_down(u32 state)
{
- struct device * dev;
int error = 0;
+ struct device * dev;
list_for_each_entry_reverse(dev,&dpm_off_irq,power.entry) {
- if ((error = power_down_device(dev,state)))
+ if ((error = suspend_device(dev,state)))
break;
- }
- return error;
-}
-
-
-/**
- * device_pm_power_down - Put all devices in low power state.
- * @state: Power state to enter.
- *
- * Walk the dpm_suspended list, calling ->power_down() for each device.
- * Check the return value for each. If it returns 0, then we move the
- * the device to the dpm_off list. If it returns -EAGAIN, we move it to
- * the dpm_off_irq list. If we get a different error, try and back out.
- *
- * dpm_irq_off is for devices that require interrupts to be disabled to
- * either to power down the device or power it back on.
- *
- * When we're done, we disable interrrupts (!!) and walk the dpm_off_irq
- * list to shut down the devices that need interrupts disabled.
- *
- * This function leaves interrupts disabled on exit, since powering down
- * devices should be the very last thing before the system is put into a
- * low-power state.
- *
- * device_pm_power_on() should be called to re-enable interrupts and power
- * the devices back on.
- */
-
-int device_pm_power_down(u32 state)
-{
- int error = 0;
-
- if ((error = dpm_power_down(state)))
- goto ErrorIRQOn;
- local_irq_disable();
- if ((error = dpm_power_down_irq(state)))
- goto ErrorIRQOff;
-
- sysdev_suspend(state);
+ }
+ if (error)
+ goto Error;
+ if ((error = sysdev_suspend(state)))
+ goto Error;
Done:
return error;
-
- ErrorIRQOff:
- dpm_power_up_irq();
- local_irq_enable();
- ErrorIRQOn:
+ Error:
dpm_power_up();
goto Done;
}
+EXPORT_SYMBOL(device_power_down);
-/**
- * device_suspend - suspend all devices on the device ree
- * @state: state we're entering
- * @level: Stage of suspend sequence we're in.
- *
- *
- * This function is deprecated. Calls should be replaced with
- * appropriate calls to device_pm_suspend() and device_pm_power_down().
- */
-
-int device_suspend(u32 state, u32 level)
-{
-
- printk("%s Called from:\n",__FUNCTION__);
- dump_stack();
- return -EFAULT;
-}
-EXPORT_SYMBOL(device_suspend);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index e306d2e26363..299b390e243b 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -283,61 +283,16 @@ void sysdev_shutdown(void)
/**
- * sysdev_save - Save system device state
- * @state: Power state we're entering.
- *
- * This is called when the system is going to sleep, but before interrupts
- * have been disabled. This allows system device drivers to allocate and
- * save device state, including sleeping during the process..
- */
-
-int sysdev_save(u32 state)
-{
- struct sysdev_class * cls;
-
- pr_debug("Saving System Device State\n");
-
- down_write(&system_subsys.rwsem);
-
- list_for_each_entry_reverse(cls,&system_subsys.kset.list,
- kset.kobj.entry) {
- struct sys_device * sysdev;
- pr_debug("Saving state for type '%s':\n",cls->kset.kobj.name);
-
- list_for_each_entry(sysdev,&cls->kset.list,kobj.entry) {
- struct sysdev_driver * drv;
-
- pr_debug(" %s\n",sysdev->kobj.name);
-
- list_for_each_entry(drv,&global_drivers,entry) {
- if (drv->save)
- drv->save(sysdev,state);
- }
-
- list_for_each_entry(drv,&cls->drivers,entry) {
- if (drv->save)
- drv->save(sysdev,state);
- }
-
- if (cls->save)
- cls->save(sysdev,state);
- }
- }
- up_write(&system_subsys.rwsem);
- return 0;
-}
-
-
-/**
* sysdev_suspend - Suspend all system devices.
* @state: Power state to enter.
*
* We perform an almost identical operation as sys_device_shutdown()
- * above, though calling ->suspend() instead.
+ * above, though calling ->suspend() instead. Interrupts are disabled
+ * when this called. Devices are responsible for both saving state and
+ * quiescing or powering down the device.
*
- * Note: Interrupts are disabled when called, so we can't sleep when
- * trying to get the subsystem's rwsem. If that happens, print a nasty
- * warning and return an error.
+ * This is only called by the device PM core, so we let them handle
+ * all synchronization.
*/
int sysdev_suspend(u32 state)
@@ -346,11 +301,6 @@ int sysdev_suspend(u32 state)
pr_debug("Suspending System Devices\n");
- if (!down_write_trylock(&system_subsys.rwsem)) {
- printk("%s: Cannot acquire semaphore; Failing\n",__FUNCTION__);
- return -EFAULT;
- }
-
list_for_each_entry_reverse(cls,&system_subsys.kset.list,
kset.kobj.entry) {
struct sys_device * sysdev;
@@ -378,8 +328,6 @@ int sysdev_suspend(u32 state)
cls->suspend(sysdev,state);
}
}
- up_write(&system_subsys.rwsem);
-
return 0;
}
@@ -390,7 +338,7 @@ int sysdev_suspend(u32 state)
* Similar to sys_device_suspend(), but we iterate the list forwards
* to guarantee that parent devices are resumed before their children.
*
- * Note: Interrupts are disabled when called.
+ * Note: Interrupts are disabled when called.
*/
int sysdev_resume(void)
@@ -399,9 +347,6 @@ int sysdev_resume(void)
pr_debug("Resuming System Devices\n");
- if(!down_write_trylock(&system_subsys.rwsem))
- return -EFAULT;
-
list_for_each_entry(cls,&system_subsys.kset.list,kset.kobj.entry) {
struct sys_device * sysdev;
@@ -429,50 +374,6 @@ int sysdev_resume(void)
}
}
- up_write(&system_subsys.rwsem);
- return 0;
-}
-
-
-/**
- * sysdev_restore - Restore system device state
- *
- * This is called during a suspend/resume cycle last, after interrupts
- * have been re-enabled. This is intended for auxillary drivers, etc,
- * that may sleep when restoring state.
- */
-
-int sysdev_restore(void)
-{
- struct sysdev_class * cls;
-
- down_write(&system_subsys.rwsem);
- pr_debug("Restoring System Device State\n");
-
- list_for_each_entry(cls,&system_subsys.kset.list,kset.kobj.entry) {
- struct sys_device * sysdev;
-
- pr_debug("Restoring state for type '%s':\n",cls->kset.kobj.name);
- list_for_each_entry(sysdev,&cls->kset.list,kobj.entry) {
- struct sysdev_driver * drv;
- pr_debug(" %s\n",sysdev->kobj.name);
-
- if (cls->restore)
- cls->restore(sysdev);
-
- list_for_each_entry(drv,&cls->drivers,entry) {
- if (drv->restore)
- drv->restore(sysdev);
- }
-
- list_for_each_entry(drv,&global_drivers,entry) {
- if (drv->restore)
- drv->restore(sysdev);
- }
- }
- }
-
- up_write(&system_subsys.rwsem);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d16c54b65102..60f1508b24ab 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,7 +6,7 @@ menu "Character devices"
config VT
bool "Virtual terminal" if EMBEDDED
- requires INPUT=y
+ select INPUT
default y
---help---
If you say Y here, you will get support for terminal devices with
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index cf0b5307a8de..4e7a197f6611 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3330,10 +3330,6 @@ static ide_driver_t ide_cdrom_driver = {
.drives = LIST_HEAD_INIT(ide_cdrom_driver.drives),
.start_power_step = ide_cdrom_start_power_step,
.complete_power_step = ide_cdrom_complete_power_step,
- .gen_driver = {
- .suspend = generic_ide_suspend,
- .resume = generic_ide_resume,
- }
};
static int idecd_open(struct inode * inode, struct file * file)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 30865145af3e..1217e840ac02 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -1732,10 +1732,6 @@ static ide_driver_t idedisk_driver = {
.drives = LIST_HEAD_INIT(idedisk_driver.drives),
.start_power_step = idedisk_start_power_step,
.complete_power_step = idedisk_complete_power_step,
- .gen_driver = {
- .suspend = generic_ide_suspend,
- .resume = generic_ide_resume,
- }
};
static int idedisk_open(struct inode *inode, struct file *filp)
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 5d19aa20abcd..dd0ad3ff074c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1534,16 +1534,13 @@ int ata_attach(ide_drive_t *drive)
EXPORT_SYMBOL(ata_attach);
-int generic_ide_suspend(struct device *dev, u32 state, u32 level)
+static int generic_ide_suspend(struct device *dev, u32 state)
{
ide_drive_t *drive = dev->driver_data;
struct request rq;
struct request_pm_state rqpm;
ide_task_t args;
- if (level == dev->power_state || level != SUSPEND_SAVE_STATE)
- return 0;
-
memset(&rq, 0, sizeof(rq));
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
@@ -1556,18 +1553,13 @@ int generic_ide_suspend(struct device *dev, u32 state, u32 level)
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
-EXPORT_SYMBOL(generic_ide_suspend);
-
-int generic_ide_resume(struct device *dev, u32 level)
+static int generic_ide_resume(struct device *dev)
{
ide_drive_t *drive = dev->driver_data;
struct request rq;
struct request_pm_state rqpm;
ide_task_t args;
- if (level == dev->power_state || level != RESUME_RESTORE_STATE)
- return 0;
-
memset(&rq, 0, sizeof(rq));
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
@@ -1580,8 +1572,6 @@ int generic_ide_resume(struct device *dev, u32 level)
return ide_do_drive_cmd(drive, &rq, ide_head_wait);
}
-EXPORT_SYMBOL(generic_ide_resume);
-
int generic_ide_ioctl(struct block_device *bdev, unsigned int cmd,
unsigned long arg)
{
@@ -2594,6 +2584,8 @@ EXPORT_SYMBOL(ide_probe);
struct bus_type ide_bus_type = {
.name = "ide",
+ .suspend = generic_ide_suspend,
+ .resume = generic_ide_resume,
};
/*
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 63ceffd78748..a73b16eefd58 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -647,7 +647,6 @@ static void __exit exit_sedlbauer_cs(void)
/* XXX: this really needs to move into generic code.. */
while (dev_list != NULL) {
- del_timer(&dev_list->release);
if (dev_list->state & DEV_CONFIG)
sedlbauer_release(dev_list);
sedlbauer_detach(dev_list);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ad105b881410..2dac57b618a3 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -308,8 +308,10 @@ static void add_us_sample(struct mm_struct * mm, struct op_sample * s)
cookie = lookup_dcookie(mm, s->eip, &offset);
- if (!cookie)
+ if (!cookie) {
+ atomic_inc(&oprofile_stats.sample_lost_no_mapping);
return;
+ }
if (cookie != last_cookie) {
add_cookie_switch(cookie);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 21ae41a54e90..b7466377afd4 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -19,6 +19,17 @@ unsigned long fs_cpu_buffer_size = 8192;
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+static ssize_t pointer_size_read(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ return oprofilefs_ulong_to_user((unsigned long)sizeof(void *), buf, count, offset);
+}
+
+
+static struct file_operations pointer_size_fops = {
+ .read = pointer_size_read,
+};
+
+
static ssize_t cpu_type_read(struct file * file, char * buf, size_t count, loff_t * offset)
{
return oprofilefs_str_to_user(oprofile_ops->cpu_type, buf, count, offset);
@@ -32,7 +43,7 @@ static struct file_operations cpu_type_fops = {
static ssize_t enable_read(struct file * file, char * buf, size_t count, loff_t * offset)
{
- return oprofilefs_ulong_to_user(&oprofile_started, buf, count, offset);
+ return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
}
@@ -85,6 +96,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root)
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
+ oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
oprofile_create_stats_files(sb, root);
if (oprofile_ops->create_files)
oprofile_ops->create_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 5624c1e017e0..f01e1935072c 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -32,6 +32,7 @@ void oprofile_reset_stats(void)
}
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+ atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
}
@@ -70,6 +71,8 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
&oprofile_stats.sample_lost_no_mm);
+ oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
+ &oprofile_stats.sample_lost_no_mapping);
oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
&oprofile_stats.event_lost_overflow);
}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index e3f67d5c0910..9f4d4d2046db 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -14,6 +14,7 @@
struct oprofile_stat_struct {
atomic_t sample_lost_no_mm;
+ atomic_t sample_lost_no_mapping;
atomic_t event_lost_overflow;
};
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index c82630ec1819..ed1efe61f6e3 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -69,7 +69,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char * buf, size_t count, loff_
#define TMPBUFSIZE 50
-ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset)
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char * buf, size_t count, loff_t * offset)
{
char tmpbuf[TMPBUFSIZE];
size_t maxlen;
@@ -78,7 +78,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count,
return 0;
spin_lock(&oprofilefs_lock);
- maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", *val);
+ maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
spin_unlock(&oprofilefs_lock);
if (maxlen > TMPBUFSIZE)
maxlen = TMPBUFSIZE;
@@ -122,7 +122,8 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const * buf, size_t cou
static ssize_t ulong_read_file(struct file * file, char * buf, size_t count, loff_t * offset)
{
- return oprofilefs_ulong_to_user(file->private_data, buf, count, offset);
+ unsigned long * val = file->private_data;
+ return oprofilefs_ulong_to_user(*val, buf, count, offset);
}
@@ -212,9 +213,8 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
static ssize_t atomic_read_file(struct file * file, char * buf, size_t count, loff_t * offset)
{
- atomic_t * aval = file->private_data;
- unsigned long val = atomic_read(aval);
- return oprofilefs_ulong_to_user(&val, buf, count, offset);
+ atomic_t * val = file->private_data;
+ return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f0f3d80ce60d..640681c93bdc 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -158,32 +158,23 @@ static int pci_device_remove(struct device * dev)
return 0;
}
-static int pci_device_suspend(struct device * dev, u32 state, u32 level)
+static int pci_device_suspend(struct device * dev, u32 state)
{
struct pci_dev * pci_dev = to_pci_dev(dev);
- int error = 0;
+ struct pci_driver * drv = pci_dev->driver;
- if (pci_dev->driver) {
- if (level == SUSPEND_SAVE_STATE && pci_dev->driver->save_state)
- error = pci_dev->driver->save_state(pci_dev,state);
- else if (level == SUSPEND_POWER_DOWN && pci_dev->driver->suspend)
- error = pci_dev->driver->suspend(pci_dev,state);
- }
- return error;
+ if (drv && drv->suspend)
+ return drv->suspend(pci_dev,state);
+ return 0;
}
-static int pci_device_resume(struct device * dev, u32 level)
+static int pci_device_resume(struct device * dev)
{
struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
- if (pci_dev->driver) {
- /* We may not call PCI drivers resume at
- RESUME_POWER_ON because interrupts are not yet
- working at that point. Calling resume at
- RESUME_RESTORE_STATE seems like solution. */
- if (level == RESUME_RESTORE_STATE && pci_dev->driver->resume)
- pci_dev->driver->resume(pci_dev);
- }
+ if (drv && drv->resume)
+ drv->resume(pci_dev);
return 0;
}
@@ -349,8 +340,6 @@ pci_register_driver(struct pci_driver *drv)
drv->driver.name = drv->name;
drv->driver.bus = &pci_bus_type;
drv->driver.probe = pci_device_probe;
- drv->driver.resume = pci_device_resume;
- drv->driver.suspend = pci_device_suspend;
drv->driver.remove = pci_device_remove;
drv->driver.kobj.ktype = &pci_driver_kobj_type;
pci_init_dynids(&drv->dynids);
@@ -496,6 +485,8 @@ struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
.hotplug = pci_hotplug,
+ .suspend = pci_device_suspend,
+ .resume = pci_device_resume,
};
static int __init pci_driver_init(void)
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 2a58939fb9ba..153a4b0ff288 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,14 @@
+Version 0.89
+------------
+Fix oops on write to dead tcp session. Remove error log write for case when file open
+O_CREAT but not O_EXCL
+
+Version 0.88
+------------
+Fix non-POSIX behavior on rename of open file and delete of open file by taking
+advantage of trans2 SetFileInfo rename facility if available on target server.
+Retry on ENOSPC and EAGAIN socket errors.
+
Version 0.87
------------
Fix oops on big endian readdir. Set blksize to be even power of two (2**blkbits) to fix
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index a45b55dfeb0d..decc2fdbffaf 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -197,6 +197,8 @@ static read_proc_t ntlmv2_enabled_read;
static write_proc_t ntlmv2_enabled_write;
static read_proc_t packet_signing_enabled_read;
static write_proc_t packet_signing_enabled_write;
+static read_proc_t quotaEnabled_read;
+static write_proc_t quotaEnabled_write;
void
cifs_proc_init(void)
@@ -233,6 +235,11 @@ cifs_proc_init(void)
if (pde)
pde->write_proc = oplockEnabled_write;
+ pde = create_proc_read_entry("QuotaEnabled", 0, proc_fs_cifs,
+ quotaEnabled_read, 0);
+ if (pde)
+ pde->write_proc = quotaEnabled_write;
+
pde =
create_proc_read_entry("MultiuserMount", 0, proc_fs_cifs,
multiuser_mount_read, 0);
@@ -362,6 +369,47 @@ oplockEnabled_write(struct file *file, const char *buffer,
}
static int
+quotaEnabled_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ len = sprintf(page, "%d\n", quotaEnabled);
+/* could also check if quotas are enabled in kernel
+ as a whole first */
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+static int
+quotaEnabled_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char c;
+ int rc;
+
+ rc = get_user(c, buffer);
+ if (rc)
+ return rc;
+ if (c == '0' || c == 'n' || c == 'N')
+ quotaEnabled = 0;
+ else if (c == '1' || c == 'y' || c == 'Y')
+ quotaEnabled = 1;
+
+ return count;
+}
+
+
+static int
lookupFlag_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 85b6c1ea67d1..c25067dac20b 100755
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -94,9 +94,14 @@ int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
return 0;
+ if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
+ struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)cifs_pdu;
+ if(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
+ return 0;
+ }
+
/* BB what if signatures are supposed to be on for session but server does not
send one? BB */
- /* BB also do not verify oplock breaks for signature */
/* Do not need to verify session setups with signature "BSRSPYL " */
if(memcmp(cifs_pdu->Signature.SecuritySignature,"BSRSPYL ",8)==0)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0f9af8182c51..f600f27df21f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -42,12 +42,17 @@
#include <linux/mm.h>
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of all SMB PDUs */
+#ifdef CIFS_QUOTA
+static struct quotactl_ops cifs_quotactl_ops;
+#endif
+
extern struct file_system_type cifs_fs_type;
int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
unsigned int oplockEnabled = 1;
+unsigned int quotaEnabled = 0;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
unsigned int extended_security = 0;
@@ -92,7 +97,9 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_op = &cifs_super_ops;
/* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
-
+#ifdef CIFS_QUOTA
+ sb->s_qcop = &cifs_quotactl_ops;
+#endif
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
inode = iget(sb, ROOT_I);
@@ -247,6 +254,110 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
return 0;
}
+#ifdef CIFS_QUOTA
+int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
+ struct fs_disk_quota * pdquota)
+{
+ int xid;
+ int rc = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsTconInfo *pTcon;
+
+ if(cifs_sb)
+ pTcon = cifs_sb->tcon;
+ else
+ return -EIO;
+
+
+ xid = GetXid();
+ if(pTcon) {
+ cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+ } else {
+ return -EIO;
+ }
+
+ FreeXid(xid);
+ return rc;
+}
+
+int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
+ struct fs_disk_quota * pdquota)
+{
+ int xid;
+ int rc = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsTconInfo *pTcon;
+
+ if(cifs_sb)
+ pTcon = cifs_sb->tcon;
+ else
+ return -EIO;
+
+ xid = GetXid();
+ if(pTcon) {
+ cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+ } else {
+ rc = -EIO;
+ }
+
+ FreeXid(xid);
+ return rc;
+}
+
+int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
+{
+ int xid;
+ int rc = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsTconInfo *pTcon;
+
+ if(cifs_sb)
+ pTcon = cifs_sb->tcon;
+ else
+ return -EIO;
+
+ xid = GetXid();
+ if(pTcon) {
+ cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
+ } else {
+ rc = -EIO;
+ }
+
+ FreeXid(xid);
+ return rc;
+}
+
+int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
+{
+ int xid;
+ int rc = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsTconInfo *pTcon;
+
+ if(cifs_sb) {
+ pTcon = cifs_sb->tcon;
+ } else {
+ return -EIO;
+ }
+ xid = GetXid();
+ if(pTcon) {
+ cFYI(1,("pqstats %p",qstats));
+ } else {
+ rc = -EIO;
+ }
+
+ FreeXid(xid);
+ return rc;
+}
+
+static struct quotactl_ops cifs_quotactl_ops = {
+ .set_xquota = cifs_xquota_set,
+ .get_xquota = cifs_xquota_set,
+ .set_xstate = cifs_xstate_set,
+ .get_xstate = cifs_xstate_get,
+};
+#endif
+
struct super_operations cifs_super_ops = {
.read_inode = cifs_read_inode,
.put_super = cifs_put_super,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 64289691f60c..1ff8862d39e9 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -347,6 +347,7 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
+GLOBAL_EXTERN unsigned int quotaEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 6d0c5eb08a9c..b68d917a6b1f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -68,6 +68,8 @@
#define NT_TRANSACT_NOTIFY_CHANGE 0x04
#define NT_TRANSACT_RENAME 0x05
#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
+#define NT_TRANSACT_GET_USER_QUOTA 0x07
+#define NT_TRANSACT_SET_USER_QUOTA 0x08
#define MAX_CIFS_HDR_SIZE 256 /* chained NTCreateXReadX will probably be biggest */
@@ -867,6 +869,52 @@ typedef struct smb_com_transaction_ioctl_rsp {
__u8 Pad[3];
} TRANSACT_IOCTL_RSP;
+typedef struct smb_com_transaction_change_notify_req {
+ struct smb_hdr hdr; /* wct = 23 */
+ __u8 MaxSetupCount;
+ __u16 Reserved;
+ __u32 TotalParameterCount;
+ __u32 TotalDataCount;
+ __u32 MaxParameterCount;
+ __u32 MaxDataCount;
+ __u32 ParameterCount;
+ __u32 ParameterOffset;
+ __u32 DataCount;
+ __u32 DataOffset;
+ __u8 SetupCount; /* four setup words follow subcommand */
+ /* SNIA spec incorrectly included spurious pad here */
+ __u16 SubCommand;/* 4 = Change Notify */
+ __u32 CompletionFilter; /* operation to monitor */
+ __u16 Fid;
+ __u8 WatchTree; /* 1 = Monitor subdirectories */
+ __u16 ByteCount;
+ __u8 Pad[3];
+ __u8 Data[1];
+} TRANSACT_CHANGE_NOTIFY_REQ;
+
+/* Completion Filter flags */
+#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
+#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
+#define FILE_NOTIFY_CHANGE_NAME 0x00000003
+#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004
+#define FILE_NOTIFY_CHANGE_SIZE 0x00000008
+#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010
+#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
+#define FILE_NOTIFY_CHANGE_CREATION 0x00000040
+#define FILE_NOTIFY_CHANGE_EA 0x00000080
+#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100
+#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
+#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
+#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
+
+/* response contains array of the following structures */
+struct file_notify_information {
+ __u32 NextEntryOffset;
+ __u32 Action;
+ __u32 FileNameLength;
+ __u8 FileName[1];
+};
+
struct reparse_data {
__u32 ReparseTag;
__u16 ReparseDataLength;
@@ -878,6 +926,21 @@ struct reparse_data {
char LinkNamesBuf[1];
};
+struct cifs_quota_data {
+ __u32 rsrvd1; /* 0 */
+ __u32 sid_size;
+ __u64 rsrvd2; /* 0 */
+ __u64 space_used;
+ __u64 soft_limit;
+ __u64 hard_limit;
+ char sid[1]; /* variable size? */
+};
+
+/* quota sub commands */
+#define QUOTA_LIST_CONTINUE 0
+#define QUOTA_LIST_START 0x100
+#define QUOTA_FOR_SID 0x101
+
typedef union smb_com_transaction2 {
struct {
struct smb_hdr hdr; /* wct = 14+ */
@@ -919,35 +982,36 @@ typedef union smb_com_transaction2 {
} TRANSACTION2;
/* PathInfo/FileInfo infolevels */
-#define SMB_INFO_STANDARD 1
-#define SMB_INFO_IS_NAME_VALID 6
-#define SMB_QUERY_FILE_BASIC_INFO 0x101
-#define SMB_QUERY_FILE_STANDARD_INFO 0x102
-#define SMB_QUERY_FILE_NAME_INFO 0x104
-#define SMB_QUERY_FILE_ALLOCATION_INFO 0x105
-#define SMB_QUERY_FILE_END_OF_FILEINFO 0x106
-#define SMB_QUERY_FILE_ALL_INFO 0x107
-#define SMB_QUERY_ALT_NAME_INFO 0x108
-#define SMB_QUERY_FILE_STREAM_INFO 0x109
+#define SMB_INFO_STANDARD 1
+#define SMB_INFO_IS_NAME_VALID 6
+#define SMB_QUERY_FILE_BASIC_INFO 0x101
+#define SMB_QUERY_FILE_STANDARD_INFO 0x102
+#define SMB_QUERY_FILE_NAME_INFO 0x104
+#define SMB_QUERY_FILE_ALLOCATION_INFO 0x105
+#define SMB_QUERY_FILE_END_OF_FILEINFO 0x106
+#define SMB_QUERY_FILE_ALL_INFO 0x107
+#define SMB_QUERY_ALT_NAME_INFO 0x108
+#define SMB_QUERY_FILE_STREAM_INFO 0x109
#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
-#define SMB_QUERY_FILE_UNIX_BASIC 0x200
-#define SMB_QUERY_FILE_UNIX_LINK 0x201
+#define SMB_QUERY_FILE_UNIX_BASIC 0x200
+#define SMB_QUERY_FILE_UNIX_LINK 0x201
-#define SMB_SET_FILE_BASIC_INFO 0x101
-#define SMB_SET_FILE_DISPOSITION_INFO 0x102
-#define SMB_SET_FILE_ALLOCATION_INFO 0x103
-#define SMB_SET_FILE_END_OF_FILE_INFO 0x104
+#define SMB_SET_FILE_BASIC_INFO 0x101
+#define SMB_SET_FILE_DISPOSITION_INFO 0x102
+#define SMB_SET_FILE_ALLOCATION_INFO 0x103
+#define SMB_SET_FILE_END_OF_FILE_INFO 0x104
#define SMB_SET_FILE_UNIX_BASIC 0x200
#define SMB_SET_FILE_UNIX_LINK 0x201
#define SMB_SET_FILE_UNIX_HLINK 0x203
#define SMB_SET_FILE_BASIC_INFO2 0x3ec
-#define SMB_SET_FILE_ALLOCATION_INFO2 0x3fb
-#define SMB_SET_FILE_END_OF_FILE_INFO2 0x3fc
+#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2
+#define SMB_SET_FILE_ALLOCATION_INFO2 0x3fb
+#define SMB_SET_FILE_END_OF_FILE_INFO2 0x3fc
/* Find File infolevels */
-#define SMB_FIND_FILE_DIRECTORY_INFO 0x101
+#define SMB_FIND_FILE_DIRECTORY_INFO 0x101
#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
-#define SMB_FIND_FILE_NAMES_INFO 0x103
+#define SMB_FIND_FILE_NAMES_INFO 0x103
#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
#define SMB_FIND_FILE_UNIX 0x202
@@ -1036,6 +1100,13 @@ typedef struct smb_com_transaction2_spi_rsp {
__u16 Reserved2; /* parameter word reserved - present for infolevels > 100 */
} TRANSACTION2_SPI_RSP;
+struct set_file_rename {
+ __u32 overwrite; /* 1 = overwrite dest */
+ __u32 root_fid; /* zero */
+ __u32 target_name_len;
+ char target_name[0]; /* Must be unicode */
+};
+
struct smb_com_transaction2_sfi_req {
struct smb_hdr hdr; /* wct = 15 */
__u16 TotalParameterCount;
@@ -1057,7 +1128,7 @@ struct smb_com_transaction2_sfi_req {
__u16 ByteCount;
__u8 Pad;
__u16 Pad1;
- __u16 Fid;
+ __u16 Fid;
__u16 InformationLevel;
__u16 Reserved4;
};
@@ -1395,6 +1466,9 @@ typedef struct {
__u16 MinorVersionNumber;
__u64 Capability;
} FILE_SYSTEM_UNIX_INFO; /* Unix extensions info, level 0x200 */
+/* Linux/Unix extensions capability flags */
+#define CIFS_UNIX_FCNTL_CAP 0x00000001 /* support for fcntl locks */
+#define CIFS_UNIX_POSIX_ACL_CAP 0x00000002
/* DeviceType Flags */
#define FILE_DEVICE_CD_ROM 0x00000002
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 3c8e4cf8f4ed..1c9d8ae664ab 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -170,6 +170,8 @@ extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage);
+extern int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
+ int netfid, char * target_name, const struct nls_table *nls_codepage);
extern int CIFSCreateHardLink(const int xid,
struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index bdf8b82c7c96..9643932c9df2 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -526,6 +526,10 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
if (rc)
return rc;
+ /* tcon and ses pointer are checked in smb_init */
+ if (tcon->ses->server == NULL)
+ return -ECONNABORTED;
+
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
@@ -584,6 +588,9 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
(void **) &pSMBr);
if (rc)
return rc;
+ /* tcon and ses pointer are checked in smb_init */
+ if (tcon->ses->server == NULL)
+ return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
@@ -639,8 +646,9 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
if (rc)
return rc;
- if(lockType == LOCKING_ANDX_OPLOCK_RELEASE)
+ if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
timeout = -1; /* no response expected */
+ }
pSMB->NumberOfLocks = cpu_to_le32(numLock);
pSMB->NumberOfUnlocks = cpu_to_le32(numUnlock);
@@ -754,7 +762,7 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in RMDir = %d", rc));
+ cFYI(1, ("Send error in rename = %d", rc));
}
if (pSMB)
buf_release(pSMB);
@@ -762,6 +770,81 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
return rc;
}
+int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
+ int netfid, char * target_name, const struct nls_table * nls_codepage)
+{
+ struct smb_com_transaction2_sfi_req *pSMB = NULL;
+ struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+ struct set_file_rename * rename_info;
+ char *data_offset;
+ char dummy_string[30];
+ int rc = 0;
+ int bytes_returned = 0;
+ int len_of_str;
+
+ cFYI(1, ("Rename to File by handle"));
+
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ pSMB->ParameterCount = 6;
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_sfi_req,
+ Fid) - 4;
+ pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
+
+ data_offset = (char *) (&pSMB->hdr.Protocol) + pSMB->DataOffset;
+ rename_info = (struct set_file_rename *) data_offset;
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+ pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount;
+ pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
+ pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
+ /* construct random name ".cifs_tmp<inodenum><mid>" */
+ rename_info->overwrite = cpu_to_le32(1);
+ rename_info->root_fid = 0;
+ /* unicode only call */
+ if(target_name == NULL) {
+ sprintf(dummy_string,"cifs%x",pSMB->hdr.Mid);
+ len_of_str = cifs_strtoUCS((wchar_t *) rename_info->target_name, dummy_string, 24, nls_codepage);
+ } else {
+ len_of_str = cifs_strtoUCS((wchar_t *) rename_info->target_name, target_name, 530, nls_codepage);
+ }
+ cFYI(1,("len of str: %d", len_of_str)); /* BB removeme BB */
+ rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
+ pSMB->DataCount = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str) + 2;
+ pSMB->ByteCount += pSMB->DataCount;
+ pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
+ pSMB->TotalDataCount = pSMB->DataCount;
+ pSMB->Fid = netfid;
+ pSMB->InformationLevel =
+ cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += pSMB->ByteCount;
+ pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
+ rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1,("Send error in Rename (by file handle) = %d", rc));
+ }
+
+ if (pSMB)
+ buf_release(pSMB);
+ return rc;
+}
+
+
int
CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
@@ -1979,7 +2062,7 @@ CIFSSMBQFSDeviceInfo(int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in QFSDeviceInfo = %d", rc));
+ cFYI(1, ("Send error in QFSDeviceInfo = %d", rc));
} else { /* decode response */
pSMBr->DataOffset = le16_to_cpu(pSMBr->DataOffset);
if ((pSMBr->ByteCount < sizeof (FILE_SYSTEM_DEVICE_INFO))
@@ -2202,10 +2285,10 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
- pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
parm_data =
(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
pSMB->DataOffset);
+ pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset); /* now safe to change to le */
parm_data->FileSize = size;
pSMB->Fid = fid;
if(SetAllocation) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e3ecff7187b9..81151b425694 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -102,7 +102,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
list_for_each(tmp, &GlobalTreeConnectionList) {
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
- if(tcon->ses->server == server) {
+ if((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
tcon->tidStatus = CifsNeedReconnect;
}
}
@@ -233,9 +233,9 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
(checkSMBhdr
(smb_buffer, smb_buffer->Mid))) {
cERROR(1,
- (KERN_ERR
- "Invalid size or format for SMB found with length %d and pdu_lenght %d",
+ ("Invalid size or format for SMB found with length %d and pdu_lenght %d",
length, pdu_length));
+ cifs_dump_mem("Received Data is: ",temp,sizeof(struct smb_hdr));
/* BB fix by finding next smb signature - and reading off data until next smb ? BB */
/* BB add reconnect here */
@@ -728,6 +728,7 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket)
*csocket = NULL;
return rc;
} else {
+ /* BB other socket options to set KEEPALIVE, timeouts? NODELAY? */
cFYI(1,("Socket created"));
}
}
@@ -783,6 +784,7 @@ ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
(struct sockaddr *) psin_server,
sizeof (struct sockaddr_in6),0);
if (rc >= 0) {
+ /* BB other socket options to set KEEPALIVE, timeouts? NODELAY? */
return rc;
}
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ba00aa2672e3..7795f24d90b4 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -51,7 +51,6 @@ build_path_from_dentry(struct dentry *direntry)
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
- cFYI(1, (" len %d ", namelen));
temp = temp->d_parent;
}
namelen += 1; /* allow for trailing null */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a5153c3e3c6f..49f9c88c65cc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -76,13 +76,14 @@ cifs_open(struct inode *inode, struct file *file)
FreeXid(xid);
return rc;
} else {
- cERROR(1,("could not find file instance for new file %p ",file));
+ if(file->f_flags & O_EXCL)
+ cERROR(1,("could not find file instance for new file %p ",file));
}
}
full_path = build_path_from_dentry(file->f_dentry);
- cFYI(1, (" inode = 0x%p file flags are %x for %s", inode, file->f_flags,full_path));
+ cFYI(1, (" inode = 0x%p file flags are 0x%x for %s", inode, file->f_flags,full_path));
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
desiredAccess = GENERIC_READ;
else if ((file->f_flags & O_ACCMODE) == O_WRONLY)
@@ -1121,9 +1122,6 @@ construct_dentry(struct qstr *qstring, struct file *file,
}
tmp_dentry->d_time = jiffies;
- (*ptmp_inode)->i_blksize =
- (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;
- cFYI(1, ("i_blksize = %ld", (*ptmp_inode)->i_blksize));
*pnew_dentry = tmp_dentry;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f8bece08c6aa..62625e532132 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -353,8 +353,9 @@ cifs_unlink(struct inode *inode, struct dentry *direntry)
CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
&netfid, &oplock, NULL, cifs_sb->local_nls);
if(rc==0) {
+ CIFSSMBRenameOpenFile(xid,pTcon,netfid,
+ NULL, cifs_sb->local_nls);
CIFSSMBClose(xid, pTcon, netfid);
- /* BB In the future chain close with the NTCreateX to narrow window */
direntry->d_inode->i_nlink--;
}
} else if (rc == -EACCES) {
@@ -370,8 +371,22 @@ cifs_unlink(struct inode *inode, struct dentry *direntry)
}
if(rc==0) {
rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls);
- if (!rc)
+ if (!rc) {
direntry->d_inode->i_nlink--;
+ } else if (rc == -ETXTBSY) {
+ int oplock = FALSE;
+ __u16 netfid;
+
+ rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE,
+ CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
+ &netfid, &oplock, NULL, cifs_sb->local_nls);
+ if(rc==0) {
+ CIFSSMBRenameOpenFile(xid,pTcon,netfid,NULL,cifs_sb->local_nls);
+ CIFSSMBClose(xid, pTcon, netfid);
+ direntry->d_inode->i_nlink--;
+ }
+ /* BB if rc = -ETXTBUSY goto the rename logic BB */
+ }
}
}
cifsInode = CIFS_I(direntry->d_inode);
@@ -511,6 +526,20 @@ cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
rc = CIFSSMBRename(xid, pTcon, fromName, toName,
cifs_sb_source->local_nls);
}
+
+ if((rc == -EIO)||(rc == -EEXIST)) {
+ int oplock = FALSE;
+ __u16 netfid;
+
+ rc = CIFSSMBOpen(xid, pTcon, fromName, FILE_OPEN, GENERIC_READ,
+ CREATE_NOT_DIR,
+ &netfid, &oplock, NULL, cifs_sb_source->local_nls);
+ if(rc==0) {
+ CIFSSMBRenameOpenFile(xid,pTcon,netfid,
+ toName, cifs_sb_source->local_nls);
+ CIFSSMBClose(xid, pTcon, netfid);
+ }
+ }
if (fromName)
kfree(fromName);
if (toName)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index aef84d40a5aa..1e037749e8f4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -169,7 +169,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
temp_fs = get_fs(); /* we must turn off socket api parm checking */
set_fs(get_ds());
rc = sock_sendmsg(ssocket, &smb_msg, smb_buf_length + 4);
-
+ while((rc == -ENOSPC) || (rc == -EAGAIN)) {
+ schedule_timeout(HZ/2);
+ rc = sock_sendmsg(ssocket, &smb_msg, smb_buf_length + 4);
+ }
set_fs(temp_fs);
if (rc < 0) {
diff --git a/include/asm-h8300/aki3068net/machine-depend.h b/include/asm-h8300/aki3068net/machine-depend.h
new file mode 100644
index 000000000000..e2e5f6a523ac
--- /dev/null
+++ b/include/asm-h8300/aki3068net/machine-depend.h
@@ -0,0 +1,35 @@
+/* AE-3068 board depend header */
+
+/* TIMER rate define */
+#ifdef H8300_TIMER_DEFINE
+#include <linux/config.h>
+#define H8300_TIMER_COUNT_DATA 20000*10/8192
+#define H8300_TIMER_FREQ 20000*1000/8192
+#endif
+
+/* AE-3068 RTL8019AS Config */
+#ifdef H8300_NE_DEFINE
+
+#define NE2000_ADDR 0x200000
+#define NE2000_IRQ 5
+#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ)
+#define NE2000_BYTE volatile unsigned short
+
+#define IER 0xfee015
+#define ISR 0xfee016
+#define IRQ_MASK (1 << NE2000_IRQ)
+
+#define WCRL 0xfee023
+#define MAR0A 0xffff20
+#define ETCR0A 0xffff24
+#define DTCR0A 0xffff27
+#define MAR0B 0xffff28
+#define DTCR0B 0xffff2f
+
+#define H8300_INIT_NE() \
+do { \
+ wordlength = 1; \
+ outb_p(0x48, ioaddr + EN0_DCFG); \
+} while(0)
+
+#endif
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index 3af502772b6f..e9595c099c58 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -71,56 +71,27 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
return ret == 0;
}
-#if defined(__H8300H__)
static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("stc ccr,r2l\n\t"
+ __asm__ __volatile__("stc ccr,r1l\n\t"
"orc #0x80,ccr\n\t"
"mov.l %0,er0\n\t"
- "mov.l %1,er1\n\t"
- "and.l er1,er0\n\t"
+ "and.l %1,er0\n\t"
"mov.l er0,%0\n\t"
- "ldc r2l,ccr"
- : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2");
+ "ldc r1l,ccr"
+ : "=m" (*v) : "g" (~(mask)) :"er0","er1");
}
static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("stc ccr,r2l\n\t"
+ __asm__ __volatile__("stc ccr,r1l\n\t"
"orc #0x80,ccr\n\t"
"mov.l %0,er0\n\t"
- "mov.l %1,er1\n\t"
- "or.l er1,er0\n\t"
+ "or.l %1,er0\n\t"
"mov.l er0,%0\n\t"
- "ldc r2l,ccr"
- : "=m" (*v) : "ir" (mask) :"er0","er1","er2");
+ "ldc r1l,ccr"
+ : "=m" (*v) : "g" (mask) :"er0","er1");
}
-#endif
-#if defined(__H8300S__)
-static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("stc exr,r2l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l %0,er0\n\t"
- "mov.l %1,er1\n\t"
- "and.l er1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r2l,exr"
- : "=m" (*v) : "ir" (~(mask)) :"er0","er1","er2");
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
- __asm__ __volatile__("stc exr,r2l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l %0,er0\n\t"
- "mov.l %1,er1\n\t"
- "or.l er1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r2l,exr"
- : "=m" (*v) : "ir" (mask) :"er0","er1","er2");
-}
-#endif
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index 703024599da6..87068f245d5c 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -39,16 +39,18 @@ static __inline__ unsigned long ffz(unsigned long word)
static __inline__ void set_bit(int nr, volatile unsigned long* addr)
{
- unsigned char *a = (unsigned char *) addr;
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
+ volatile unsigned char *b_addr;
+ b_addr = &(((volatile unsigned char *) addr)
+ [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]);
__asm__("mov.l %1,er0\n\t"
- "mov.l %0,er1\n\t"
- "bset r0l,@er1"
- :"=m"(a):"g"(nr & 7):"er0","er1","memory");
+ "bset r0l,%0"
+ :"+m"(*b_addr)
+ :"g"(nr & 7),"m"(*b_addr)
+ :"er0");
}
-/* Bigendian is complexed... */
-#define __set_bit(nr, addr) set_bit(nr, addr)
+/* Bigendian is complexed... */
+#define __set_bit(nr, addr) set_bit((nr), (addr))
/*
* clear_bit() doesn't provide any barrier for the compiler.
@@ -58,261 +60,158 @@ static __inline__ void set_bit(int nr, volatile unsigned long* addr)
static __inline__ void clear_bit(int nr, volatile unsigned long* addr)
{
- unsigned char *a = (unsigned char *) addr;
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
+ volatile unsigned char *b_addr;
+ b_addr = &(((volatile unsigned char *) addr)
+ [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]);
__asm__("mov.l %1,er0\n\t"
- "mov.l %0,er1\n\t"
- "bclr r0l,@er1"
- :"=m"(a):"g"(nr & 7):"er0","er1","memory");
+ "bclr r0l,%0"
+ :"+m"(*b_addr)
+ :"g"(nr & 7),"m"(*b_addr)
+ :"er0");
}
-#define __clear_bit(nr, addr) clear_bit(nr, addr)
+#define __clear_bit(nr, addr) clear_bit((nr), (addr))
static __inline__ void change_bit(int nr, volatile unsigned long* addr)
{
- unsigned char *a = (unsigned char *) addr;
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
+ volatile unsigned char *b_addr;
+ b_addr = &(((volatile unsigned char *) addr)
+ [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)]);
__asm__("mov.l %1,er0\n\t"
- "mov.l %0,er1\n\t"
- "bnot r0l,@er1"
- :"=m"(a):"g"(nr & 7):"er0","er1","memory");
+ "bnot r0l,%0"
+ :"+m"(*b_addr)
+ :"g"(nr & 7),"m"(*b_addr)
+ :"er0");
}
-#define __change_bit(nr, addr) change_bit(nr, addr)
+#define __change_bit(nr, addr) change_bit((nr), (addr))
-#if defined(__H8300H__)
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr)
+static __inline__ int test_bit(int nr, const unsigned long* addr)
{
- int retval;
- unsigned char *a;
- a = (unsigned char *) addr;
-
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc ccr,r0h\n\t"
- "orc #0x80,ccr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bset r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,ccr\n\t"
- "sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
- return retval;
+ return ((1UL << (nr & 7)) &
+ (((const volatile unsigned char *) addr)
+ [((nr >> 3) & ~3) + 3 - ((nr >> 3) & 3)])) != 0;
}
-#endif
-#if defined(__H8300S__)
+
+#define __test_bit(nr, addr) test_bit(nr, addr)
+
static __inline__ int test_and_set_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a;
- a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc exr,r0h\n\t"
- "orc #0x07,exr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bset r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,exr\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
+ "stc ccr,r3h\n\t"
+ "orc #0x80,ccr\n\t"
+ "btst r3l,%1\n\t"
+ "bset r3l,%1\n\t"
+ "beq 1f\n\t"
+ "inc.l #1,%0\n\t"
+ "1:"
+ "ldc r3h,ccr"
+ : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#endif
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "mov.b %1,r0h\n\t"
- "btst r0l,r0h\n\t"
- "bset r0l,r0h\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r0h,%1\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory");
+ "btst r3l,%1\n\t"
+ "bset r3l,%1\n\t"
+ "beq 1f\n\t"
+ "inc.l #1,%0\n\t"
+ "1:"
+ : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#if defined(__H8300H__)
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc ccr,r0h\n\t"
- "orc #0x80,ccr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bclr r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,ccr\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
- return retval;
-}
-#endif
-#if defined(__H8300S__)
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long* addr)
-{
- int retval;
- unsigned char *a = (unsigned char *) addr;
-
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc exr,r0h\n\t"
- "orc #0x07,exr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bclr r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,exr\n\t"
- "sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
+ "stc ccr,r3h\n\t"
+ "orc #0x80,ccr\n\t"
+ "btst r3l,%1\n\t"
+ "bclr r3l,%1\n\t"
+ "beq 1f\n\t"
+ "inc.l #1,%0\n\t"
+ "1:"
+ "ldc r3h,ccr"
+ : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#endif
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "mov.b %1,r0h\n\t"
- "btst r0l,r0h\n\t"
- "bclr r0l,r0h\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r0h,%1\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory");
+ "btst r3l,%1\n\t"
+ "bclr r3l,%1\n\t"
+ "beq 1f\n\t"
+ "inc.l #1,%0\n\t"
+ "1:"
+ : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#if defined(__H8300H__)
static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc ccr,r0h\n\t"
- "orc #0x80,ccr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bnot r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,ccr\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
- return retval;
-}
-#endif
-#if defined(__H8300S__)
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr)
-{
- int retval;
- unsigned char *a = (unsigned char *) addr;
-
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "stc exr,r0h\n\t"
- "orc #0x07,exr\n\t"
- "mov.b %1,r1l\n\t"
- "btst r0l,r1l\n\t"
- "bnot r0l,r1l\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r1l,%1\n\t"
- "ldc r0h,exr\n\t"
- "sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","er1","memory");
+ "stc ccr,r3h\n\t"
+ "orc #0x80,ccr\n\t"
+ "btst r3l,%1\n\t"
+ "bnot r3l,%1\n\t"
+ "beq 1f\n\t"
+ "inc.l #1,%0\n\t"
+ "1:"
+ "ldc r3h,ccr"
+ : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#endif
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long* addr)
{
- int retval;
- unsigned char *a = (unsigned char *) addr;
+ register int retval __asm__("er0");
+ volatile unsigned char *a;
+ a = (volatile unsigned char *)addr;
a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %2,er0\n\t"
- "mov.b %1,r0h\n\t"
- "btst r0l,r0h\n\t"
- "bnot r0l,r0h\n\t"
- "stc ccr,r0l\n\t"
- "mov.b r0h,%1\n\t"
+ __asm__("mov.l %2,er3\n\t"
"sub.l %0,%0\n\t"
- "bild #2,r0l\n\t"
- "rotxl.l %0"
- : "=r"(retval),"=m"(*a) :"g"(nr & 7):"er0","memory");
- return retval;
-}
-
-/*
- * This routine doesn't need to be atomic.
- */
-static __inline__ int __constant_test_bit(int nr, const volatile unsigned long* addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int __test_bit(int nr, const unsigned long* addr)
-{
- int retval;
- unsigned char *a = (unsigned char *) addr;
-
- a += ((nr >> 3) & ~3) + (3 - ((nr >> 3) & 3));
- __asm__("mov.l %1,er0\n\t"
- "btst r0l,@%2\n\t"
+ "btst r3l,%1\n\t"
+ "bnot r3l,%1\n\t"
"beq 1f\n\t"
- "sub.l %0,%0\n\t"
- "inc.l #1,%0\n"
- "bra 2f\n"
- "1:\n\t"
- "sub.l %0,%0\n"
- "2:"
- : "=r"(retval) :"g"(nr & 7),"r"(a):"er0");
+ "inc.l #1,%0\n\t"
+ "1:"
+ : "=r"(retval),"+m"(*a) :"g"(nr & 7):"er3","memory");
return retval;
}
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)))
-
-
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
diff --git a/include/asm-h8300/edosk2674/machine-depend.h b/include/asm-h8300/edosk2674/machine-depend.h
new file mode 100644
index 000000000000..1e98b40e5f4e
--- /dev/null
+++ b/include/asm-h8300/edosk2674/machine-depend.h
@@ -0,0 +1,70 @@
+/* EDOSK2674 board depend header */
+
+/* TIMER rate define */
+#ifdef H8300_TIMER_DEFINE
+#define H8300_TIMER_COUNT_DATA 33000*10/8192
+#define H8300_TIMER_FREQ 33000*1000/8192
+#endif
+
+/* EDOSK-2674R SMSC Network Controler Target Depend impliments */
+#ifdef H8300_SMSC_DEFINE
+
+#define SMSC_BASE 0xf80000
+#define SMSC_IRQ 16
+
+/* sorry quick hack */
+#if defined(outw)
+# undef outw
+#endif
+#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a))
+#if defined(inw)
+# undef inw
+#endif
+#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a))
+#if defined(outsw)
+# undef outsw
+#endif
+#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l)
+#if defined(insw)
+# undef insw
+#endif
+#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l)
+
+static inline void edosk2674_smsc_outw(
+ unsigned short d,
+ volatile unsigned short *a
+ )
+{
+ *a = (d >> 8) | (d << 8);
+}
+
+static inline unsigned short edosk2674_smsc_inw(
+ volatile unsigned short *a
+ )
+{
+ unsigned short d;
+ d = *a;
+ return (d >> 8) | (d << 8);
+}
+
+static inline void edosk2674_smsc_outsw(
+ volatile unsigned short *a,
+ unsigned short *p,
+ unsigned long l
+ )
+{
+ for (; l != 0; --l, p++)
+ *a = *p;
+}
+
+static inline void edosk2674_smsc_insw(
+ volatile unsigned short *a,
+ unsigned short *p,
+ unsigned long l
+ )
+{
+ for (; l != 0; --l, p++)
+ *p = *a;
+}
+
+#endif
diff --git a/include/asm-h8300/generic/machine-depend.h b/include/asm-h8300/generic/machine-depend.h
new file mode 100644
index 000000000000..2d78096e54c8
--- /dev/null
+++ b/include/asm-h8300/generic/machine-depend.h
@@ -0,0 +1,17 @@
+/* machine depend header */
+
+/* TIMER rate define */
+#ifdef H8300_TIMER_DEFINE
+#include <linux/config.h>
+#if defined(CONFIG_H83007) || defined(CONFIG_H83068) || defined(CONFIG_H8S2678)
+#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8192
+#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8192
+#endif
+
+#if defined(CONFIG_H8_3002) || defined(CONFIG_H83048)
+#define H8300_TIMER_COUNT_DATA CONFIG_CPU_CLOCK*10/8
+#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*1000/8
+#endif
+
+#endif
+
diff --git a/include/asm-h8300/h8300_ne.h b/include/asm-h8300/h8300_ne.h
index debef6ae7140..c0350b6ea6e3 100644
--- a/include/asm-h8300/h8300_ne.h
+++ b/include/asm-h8300/h8300_ne.h
@@ -11,9 +11,9 @@
#define h8300ne_h
/****************************************************************************/
-/* Such a description is OK ? */
-#define DEPEND_HEADER(target) <asm/target/ne.h>
-#include DEPEND_HEADER(TARGET)
+#define H8300_NE_DEFINE
+#include <asm/machine-depend.h>
+#undef H8300_NE_DEFINE
/****************************************************************************/
#endif /* h8300ne_h */
diff --git a/include/asm-h8300/h8max/ide.h b/include/asm-h8300/h8max/ide.h
deleted file mode 100644
index e7d75ac57974..000000000000
--- a/include/asm-h8300/h8max/ide.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* H8MAX IDE I/F Config */
-
-#define H8300_IDE_BASE 0x200000
-#define H8300_IDE_CTRL 0x60000c
-#define H8300_IDE_IRQ 5
-#define H8300_IDE_REG_OFFSET 2
-
-#undef outb
-#undef inb
-#undef outb_p
-#undef inb_p
-#undef outsw
-#undef insw
-
-#define outb(d,a) h8max_outb(d,(unsigned short *)a)
-#define inb(a) h8max_inb((unsigned char *)a)
-#define outb_p(d,a) h8max_outb(d,(unsigned short *)a)
-#define inb_p(a) h8max_inb((unsigned char *)a)
-#define outsw(addr,buf,len) h8max_outsw(addr,buf,len);
-#define insw(addr,buf,len) h8max_insw(addr,buf,len);
-
-static inline void h8max_outb(unsigned short d,unsigned short *a)
-{
- *a = d;
-}
-
-static inline unsigned char h8max_inb(unsigned char *a)
-{
- return *(a+1);
-}
-
-static inline void h8max_outsw(void *addr, void *buf, int len)
-{
- unsigned volatile short *ap = (unsigned volatile short *)addr;
- unsigned short *bp = (unsigned short *)buf;
- unsigned short d;
- while(len--) {
- d = *bp++;
- *ap = (d >> 8) | (d << 8);
- }
-}
-
-static inline void h8max_insw(void *addr, void *buf, int len)
-{
- unsigned volatile short *ap = (unsigned volatile short *)addr;
- unsigned short *bp = (unsigned short *)buf;
- unsigned short d;
- while(len--) {
- d = *ap;
- *bp++ = (d >> 8) | (d << 8);
- }
-}
-
-static inline void target_ide_fix_driveid(struct hd_driveid *id)
-{
- int c;
- unsigned short *p = (unsigned short *)id;
- for (c = 0; c < SECTOR_WORDS; c++, p++)
- *p = (*p >> 8) | (*p << 8);
-}
diff --git a/include/asm-h8300/h8max/machine-depend.h b/include/asm-h8300/h8max/machine-depend.h
new file mode 100644
index 000000000000..1a2218f9d3bf
--- /dev/null
+++ b/include/asm-h8300/h8max/machine-depend.h
@@ -0,0 +1,167 @@
+/* H8MAX board depend header */
+
+/* TIMER rate define */
+#ifdef H8300_TIMER_DEFINE
+#define H8300_TIMER_COUNT_DATA 25000*10/8192
+#define H8300_TIMER_FREQ 25000*1000/8192
+#endif
+
+/* H8MAX RTL8019AS Config */
+#ifdef H8300_NE_DEFINE
+
+#define NE2000_ADDR 0x800600
+#define NE2000_IRQ 4
+#define NE2000_IRQ_VECTOR (12 + NE2000_IRQ)
+#define NE2000_BYTE volatile unsigned short
+
+#define IER 0xfee015
+#define ISR 0xfee016
+#define IRQ_MASK (1 << NE2000_IRQ)
+/* sorry quick hack */
+#if defined(outb)
+# undef outb
+#endif
+#define outb(d,a) h8max_outb((d),(a) - NE2000_ADDR)
+#if defined(inb)
+# undef inb
+#endif
+#define inb(a) h8max_inb((a) - NE2000_ADDR)
+#if defined(outb_p)
+# undef outb_p
+#endif
+#define outb_p(d,a) h8max_outb((d),(a) - NE2000_ADDR)
+#if defined(inb_p)
+# undef inb_p
+#endif
+#define inb_p(a) h8max_inb((a) - NE2000_ADDR)
+#if defined(outsw)
+# undef outsw
+#endif
+#define outsw(a,p,l) h8max_outsw((a) - NE2000_ADDR,(unsigned short *)p,l)
+#if defined(insw)
+# undef insw
+#endif
+#define insw(a,p,l) h8max_insw((a) - NE2000_ADDR,(unsigned short *)p,l)
+#if defined(outsb)
+# undef outsb
+#endif
+#define outsb(a,p,l) h8max_outsb((a) - NE2000_ADDR,(unsigned char *)p,l)
+#if defined(insb)
+# undef insb
+#endif
+#define insb(a,p,l) h8max_insb((a) - NE2000_ADDR,(unsigned char *)p,l)
+
+#define H8300_INIT_NE() \
+do { \
+ wordlength = 2; \
+ h8max_outb(0x49, ioaddr + EN0_DCFG); \
+ SA_prom[14] = SA_prom[15] = 0x57;\
+} while(0)
+
+static inline void h8max_outb(unsigned char d,unsigned char a)
+{
+ *(unsigned short *)(NE2000_ADDR + (a << 1)) = d;
+}
+
+static inline unsigned char h8max_inb(unsigned char a)
+{
+ return *(unsigned char *)(NE2000_ADDR + (a << 1) +1);
+}
+
+static inline void h8max_outsw(unsigned char a,unsigned short *p,unsigned long l)
+{
+ unsigned short d;
+ for (; l != 0; --l, p++) {
+ d = (((*p) >> 8) & 0xff) | ((*p) << 8);
+ *(unsigned short *)(NE2000_ADDR + (a << 1)) = d;
+ }
+}
+
+static inline void h8max_insw(unsigned char a,unsigned short *p,unsigned long l)
+{
+ unsigned short d;
+ for (; l != 0; --l, p++) {
+ d = *(unsigned short *)(NE2000_ADDR + (a << 1));
+ *p = (d << 8)|((d >> 8) & 0xff);
+ }
+}
+
+static inline void h8max_outsb(unsigned char a,unsigned char *p,unsigned long l)
+{
+ for (; l != 0; --l, p++) {
+ *(unsigned short *)(NE2000_ADDR + (a << 1)) = *p;
+ }
+}
+
+static inline void h8max_insb(unsigned char a,unsigned char *p,unsigned long l)
+{
+ for (; l != 0; --l, p++) {
+ *p = *((unsigned char *)(NE2000_ADDR + (a << 1))+1);
+ }
+}
+
+#endif
+
+/* H8MAX IDE I/F Config */
+#ifdef H8300_IDE_DEFINE
+
+#define H8300_IDE_BASE 0x200000
+#define H8300_IDE_CTRL 0x60000c
+#define H8300_IDE_IRQ 5
+#define H8300_IDE_REG_OFFSET 2
+
+#undef outb
+#undef inb
+#undef outb_p
+#undef inb_p
+#undef outsw
+#undef insw
+
+#define outb(d,a) h8max_outb(d,(unsigned short *)a)
+#define inb(a) h8max_inb((unsigned char *)a)
+#define outb_p(d,a) h8max_outb(d,(unsigned short *)a)
+#define inb_p(a) h8max_inb((unsigned char *)a)
+#define outsw(addr,buf,len) h8max_outsw(addr,buf,len);
+#define insw(addr,buf,len) h8max_insw(addr,buf,len);
+
+static inline void h8max_outb(unsigned short d,unsigned short *a)
+{
+ *a = d;
+}
+
+static inline unsigned char h8max_inb(unsigned char *a)
+{
+ return *(a+1);
+}
+
+static inline void h8max_outsw(void *addr, void *buf, int len)
+{
+ unsigned volatile short *ap = (unsigned volatile short *)addr;
+ unsigned short *bp = (unsigned short *)buf;
+ unsigned short d;
+ while(len--) {
+ d = *bp++;
+ *ap = (d >> 8) | (d << 8);
+ }
+}
+
+static inline void h8max_insw(void *addr, void *buf, int len)
+{
+ unsigned volatile short *ap = (unsigned volatile short *)addr;
+ unsigned short *bp = (unsigned short *)buf;
+ unsigned short d;
+ while(len--) {
+ d = *ap;
+ *bp++ = (d >> 8) | (d << 8);
+ }
+}
+
+static inline void target_ide_fix_driveid(struct hd_driveid *id)
+{
+ int c;
+ unsigned short *p = (unsigned short *)id;
+ for (c = 0; c < SECTOR_WORDS; c++, p++)
+ *p = (*p >> 8) | (*p << 8);
+}
+
+#endif
diff --git a/include/asm-h8300/hardirq.h b/include/asm-h8300/hardirq.h
index 20f3571cc299..ccab235b9f83 100644
--- a/include/asm-h8300/hardirq.h
+++ b/include/asm-h8300/hardirq.h
@@ -75,12 +75,6 @@ typedef struct {
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#ifdef CONFIG_PREEMPT
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
-#else
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
-#endif
-
-#ifdef CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
diff --git a/include/asm-h8300/ide.h b/include/asm-h8300/ide.h
index 3ebf8e262324..3669f106312b 100644
--- a/include/asm-h8300/ide.h
+++ b/include/asm-h8300/ide.h
@@ -70,9 +70,10 @@ typedef union {
* Our list of ports/irq's for different boards.
*/
-/* Such a description is OK ? */
-#define DEPEND_HEADER(target) <asm/target/ide.h>
-#include DEPEND_HEADER(TARGET)
+/* machine depend header include */
+#define H8300_IDE_DEFINE
+#include <asm/machine-depend.h>
+#undef H8300_IDE_DEFINE
/****************************************************************************/
diff --git a/include/asm-h8300/io.h b/include/asm-h8300/io.h
index 69efa4f2c0de..42f91752b920 100644
--- a/include/asm-h8300/io.h
+++ b/include/asm-h8300/io.h
@@ -51,21 +51,12 @@ static inline unsigned int _swapl(volatile unsigned long v)
#define writew(b,addr) (void)((*(volatile unsigned short *) (addr & 0x00ffffff)) = (b))
#define writel(b,addr) (void)((*(volatile unsigned int *) (addr & 0x00ffffff)) = (b))
-/*
- * The following are some defines we need for MTD with our
- * COBRA5272 board.
- * Because I don't know if they break something I have
- * #ifdef'd them.
- * (020325 - hede)
- */
-#ifdef CONFIG_senTec
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
-#endif /* CONFIG_senTec */
static inline void io_outsb(unsigned int addr, void *buf, int len)
{
diff --git a/include/asm-h8300/machine-depend.h b/include/asm-h8300/machine-depend.h
new file mode 100644
index 000000000000..1e98b40e5f4e
--- /dev/null
+++ b/include/asm-h8300/machine-depend.h
@@ -0,0 +1,70 @@
+/* EDOSK2674 board depend header */
+
+/* TIMER rate define */
+#ifdef H8300_TIMER_DEFINE
+#define H8300_TIMER_COUNT_DATA 33000*10/8192
+#define H8300_TIMER_FREQ 33000*1000/8192
+#endif
+
+/* EDOSK-2674R SMSC Network Controler Target Depend impliments */
+#ifdef H8300_SMSC_DEFINE
+
+#define SMSC_BASE 0xf80000
+#define SMSC_IRQ 16
+
+/* sorry quick hack */
+#if defined(outw)
+# undef outw
+#endif
+#define outw(d,a) edosk2674_smsc_outw(d,(volatile unsigned short *)(a))
+#if defined(inw)
+# undef inw
+#endif
+#define inw(a) edosk2674_smsc_inw((volatile unsigned short *)(a))
+#if defined(outsw)
+# undef outsw
+#endif
+#define outsw(a,p,l) edosk2674_smsc_outsw((volatile unsigned short *)(a),p,l)
+#if defined(insw)
+# undef insw
+#endif
+#define insw(a,p,l) edosk2674_smsc_insw((volatile unsigned short *)(a),p,l)
+
+static inline void edosk2674_smsc_outw(
+ unsigned short d,
+ volatile unsigned short *a
+ )
+{
+ *a = (d >> 8) | (d << 8);
+}
+
+static inline unsigned short edosk2674_smsc_inw(
+ volatile unsigned short *a
+ )
+{
+ unsigned short d;
+ d = *a;
+ return (d >> 8) | (d << 8);
+}
+
+static inline void edosk2674_smsc_outsw(
+ volatile unsigned short *a,
+ unsigned short *p,
+ unsigned long l
+ )
+{
+ for (; l != 0; --l, p++)
+ *a = *p;
+}
+
+static inline void edosk2674_smsc_insw(
+ volatile unsigned short *a,
+ unsigned short *p,
+ unsigned long l
+ )
+{
+ for (; l != 0; --l, p++)
+ *p = *a;
+}
+
+#endif
diff --git a/include/asm-h8300/processor.h b/include/asm-h8300/processor.h
index a945b8bede2f..819c9b34e152 100644
--- a/include/asm-h8300/processor.h
+++ b/include/asm-h8300/processor.h
@@ -70,12 +70,12 @@ struct thread_struct {
* pass the data segment into user programs if it exists,
* it can't hurt anything as far as I can tell
*/
-#if defined(__H8300S__)
+#if defined(__H8300H__)
#define start_thread(_regs, _pc, _usp) \
do { \
set_fs(USER_DS); /* reads from user space */ \
(_regs)->pc = (_pc); \
- (_regs)->ccr &= ~0x10; /* clear kernel flag */ \
+ (_regs)->ccr &= 0x00; /* clear kernel flag */ \
} while(0)
#endif
#if defined(__H8300S__)
@@ -83,7 +83,7 @@ do { \
do { \
set_fs(USER_DS); /* reads from user space */ \
(_regs)->pc = (_pc); \
- (_regs)->ccr &= ~0x10; /* clear kernel flag */ \
+ (_regs)->ccr = 0x00; /* clear kernel flag */ \
(_regs)->exr = 0x78; /* enable all interrupts */ \
/* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \
wrusp(((unsigned long)(_usp)) - 14); \
diff --git a/include/asm-h8300/regs267x.h b/include/asm-h8300/regs267x.h
new file mode 100644
index 000000000000..1bff731a9f77
--- /dev/null
+++ b/include/asm-h8300/regs267x.h
@@ -0,0 +1,336 @@
+/* internal Peripherals Register address define */
+/* CPU: H8/306x */
+
+#if !defined(__REGS_H8S267x__)
+#define __REGS_H8S267x__
+
+#if defined(__KERNEL__)
+
+#define DASTCR 0xFEE01A
+#define DADR0 0xFFFFA4
+#define DADR1 0xFFFFA5
+#define DACR01 0xFFFFA6
+#define DADR2 0xFFFFA8
+#define DADR3 0xFFFFA9
+#define DACR23 0xFFFFAA
+
+#define ADDRA 0xFFFF90
+#define ADDRAH 0xFFFF90
+#define ADDRAL 0xFFFF91
+#define ADDRB 0xFFFF92
+#define ADDRBH 0xFFFF92
+#define ADDRBL 0xFFFF93
+#define ADDRC 0xFFFF94
+#define ADDRCH 0xFFFF94
+#define ADDRCL 0xFFFF95
+#define ADDRD 0xFFFF96
+#define ADDRDH 0xFFFF96
+#define ADDRDL 0xFFFF97
+#define ADDRE 0xFFFF98
+#define ADDREH 0xFFFF98
+#define ADDREL 0xFFFF99
+#define ADDRF 0xFFFF9A
+#define ADDRFH 0xFFFF9A
+#define ADDRFL 0xFFFF9B
+#define ADDRG 0xFFFF9C
+#define ADDRGH 0xFFFF9C
+#define ADDRGL 0xFFFF9D
+#define ADDRH 0xFFFF9E
+#define ADDRHH 0xFFFF9E
+#define ADDRHL 0xFFFF9F
+
+#define ADCSR 0xFFFFA0
+#define ADCR 0xFFFFA1
+
+#define ABWCR 0xFFFEC0
+#define ASTCR 0xFFFEC1
+#define WTCRAH 0xFFFEC2
+#define WTCRAL 0xFFFEC3
+#define WTCRBH 0xFFFEC4
+#define WTCRBL 0xFFFEC5
+#define RDNCR 0xFFFEC6
+#define CSACRH 0xFFFEC8
+#define CSACRL 0xFFFEC9
+#define BROMCRH 0xFFFECA
+#define BROMCRL 0xFFFECB
+#define BCR 0xFFFECC
+#define DRAMCR 0xFFFED0
+#define DRACCR 0xFFFED2
+#define REFCR 0xFFFED4
+#define RTCNT 0xFFFED6
+#define RTCOR 0xFFFED7
+
+#define MAR0AH 0xFFFEE0
+#define MAR0AL 0xFFFEE2
+#define IOAR0A 0xFFFEE4
+#define ETCR0A 0xFFFEE6
+#define MAR0BH 0xFFFEE8
+#define MAR0BL 0xFFFEEA
+#define IOAR0B 0xFFFEEC
+#define ETCR0B 0xFFFEEE
+#define MAR1AH 0xFFFEF0
+#define MAR1AL 0xFFFEF2
+#define IOAR1A 0xFFFEF4
+#define ETCR1A 0xFFFEF6
+#define MAR1BH 0xFFFEF8
+#define MAR1BL 0xFFFEFA
+#define IOAR1B 0xFFFEFC
+#define ETCR1B 0xFFFEFE
+#define DMAWER 0xFFFF20
+#define DMATCR 0xFFFF21
+#define DMACR0A 0xFFFF22
+#define DMACR0B 0xFFFF23
+#define DMACR1A 0xFFFF24
+#define DMACR1B 0xFFFF25
+#define DMABCRH 0xFFFF26
+#define DMABCRL 0xFFFF27
+
+#define EDSAR0 0xFFFDC0
+#define EDDAR0 0xFFFDC4
+#define EDTCR0 0xFFFDC8
+#define EDMDR0 0xFFFDCC
+#define EDMDR0H 0xFFFDCC
+#define EDMDR0L 0xFFFDCD
+#define EDACR0 0xFFFDCE
+#define EDSAR1 0xFFFDD0
+#define EDDAR1 0xFFFDD4
+#define EDTCR1 0xFFFDD8
+#define EDMDR1 0xFFFDDC
+#define EDMDR1H 0xFFFDDC
+#define EDMDR1L 0xFFFDDD
+#define EDACR1 0xFFFDDE
+#define EDSAR2 0xFFFDE0
+#define EDDAR2 0xFFFDE4
+#define EDTCR2 0xFFFDE8
+#define EDMDR2 0xFFFDEC
+#define EDMDR2H 0xFFFDEC
+#define EDMDR2L 0xFFFDED
+#define EDACR2 0xFFFDEE
+#define EDSAR3 0xFFFDF0
+#define EDDAR3 0xFFFDF4
+#define EDTCR3 0xFFFDF8
+#define EDMDR3 0xFFFDFC
+#define EDMDR3H 0xFFFDFC
+#define EDMDR3L 0xFFFDFD
+#define EDACR3 0xFFFDFE
+
+#define IPRA 0xFFFE00
+#define IPRB 0xFFFE02
+#define IPRC 0xFFFE04
+#define IPRD 0xFFFE06
+#define IPRE 0xFFFE08
+#define IPRF 0xFFFE0A
+#define IPRG 0xFFFE0C
+#define IPRH 0xFFFE0E
+#define IPRI 0xFFFE10
+#define IPRJ 0xFFFE12
+#define IPRK 0xFFFE14
+#define ITSR 0xFFFE16
+#define SSIER 0xFFFE18
+#define ISCRH 0xFFFE1A
+#define ISCRL 0xFFFE1C
+
+#define INTCR 0xFFFF31
+#define IER 0xFFFF32
+#define IERH 0xFFFF32
+#define IERL 0xFFFF33
+#define ISR 0xFFFF34
+#define ISRH 0xFFFF34
+#define ISRL 0xFFFF35
+
+#define P1DDR 0xFFFE20
+#define P2DDR 0xFFFE21
+#define P3DDR 0xFFFE22
+#define P4DDR 0xFFFE23
+#define P5DDR 0xFFFE24
+#define P6DDR 0xFFFE25
+#define P7DDR 0xFFFE26
+#define P8DDR 0xFFFE27
+#define P9DDR 0xFFFE28
+#define PADDR 0xFFFE29
+#define PBDDR 0xFFFE2A
+#define PCDDR 0xFFFE2B
+#define PDDDR 0xFFFE2C
+#define PEDDR 0xFFFE2D
+#define PFDDR 0xFFFE2E
+#define PGDDR 0xFFFE2F
+#define PHDDR 0xFFFF74
+
+#define PFCR0 0xFFFE32
+#define PFCR1 0xFFFE33
+#define PFCR2 0xFFFE34
+
+#define PAPCR 0xFFFE36
+#define PBPCR 0xFFFE37
+#define PCPCR 0xFFFE38
+#define PDPCR 0xFFFE39
+#define PEPCR 0xFFFE3A
+
+#define P3ODR 0xFFFE3C
+#define PAODR 0xFFFE3D
+
+#define P1DR 0xFFFF60
+#define P2DR 0xFFFF61
+#define P3DR 0xFFFF62
+#define P4DR 0xFFFF63
+#define P5DR 0xFFFF64
+#define P6DR 0xFFFF65
+#define P7DR 0xFFFF66
+#define P8DR 0xFFFF67
+#define P9DR 0xFFFF68
+#define PADR 0xFFFF69
+#define PBDR 0xFFFF6A
+#define PCDR 0xFFFF6B
+#define PDDR 0xFFFF6C
+#define PEDR 0xFFFF6D
+#define PFDR 0xFFFF6E
+#define PGDR 0xFFFF6F
+#define PHDR 0xFFFF72
+
+#define PORT1 0xFFFF50
+#define PORT2 0xFFFF51
+#define PORT3 0xFFFF52
+#define PORT4 0xFFFF53
+#define PORT5 0xFFFF54
+#define PORT6 0xFFFF55
+#define PORT7 0xFFFF56
+#define PORT8 0xFFFF57
+#define PORT9 0xFFFF58
+#define PORTA 0xFFFF59
+#define PORTB 0xFFFF5A
+#define PORTC 0xFFFF5B
+#define PORTD 0xFFFF5C
+#define PORTE 0xFFFF5D
+#define PORTF 0xFFFF5E
+#define PORTG 0xFFFF5F
+#define PORTH 0xFFFF70
+
+#define PCR 0xFFFF46
+#define PMR 0xFFFF47
+#define NDERH 0xFFFF48
+#define NDERL 0xFFFF49
+#define PODRH 0xFFFF4A
+#define PODRL 0xFFFF4B
+#define NDRH1 0xFFFF4C
+#define NDRL1 0xFFFF4D
+#define NDRH2 0xFFFF4E
+#define NDRL2 0xFFFF4F
+
+#define SMR0 0xFFFF78
+#define BRR0 0xFFFF79
+#define SCR0 0xFFFF7A
+#define TDR0 0xFFFF7B
+#define SSR0 0xFFFF7C
+#define RDR0 0xFFFF7D
+#define SCMR0 0xFFFF7E
+#define SMR1 0xFFFF80
+#define BRR1 0xFFFF81
+#define SCR1 0xFFFF82
+#define TDR1 0xFFFF83
+#define SSR1 0xFFFF84
+#define RDR1 0xFFFF85
+#define SCMR1 0xFFFF86
+#define SMR2 0xFFFF88
+#define BRR2 0xFFFF89
+#define SCR2 0xFFFF8A
+#define TDR2 0xFFFF8B
+#define SSR2 0xFFFF8C
+#define RDR2 0xFFFF8D
+#define SCMR2 0xFFFF8E
+
+#define IRCR0 0xFFFE1E
+#define SEMR 0xFFFDA8
+
+#define MDCR 0xFFFF3E
+#define SYSCR 0xFFFF3D
+#define MSTPCRH 0xFFFF40
+#define MSTPCRL 0xFFFF41
+#define FLMCR1 0xFFFFC8
+#define FLMCR2 0xFFFFC9
+#define EBR1 0xFFFFCA
+#define EBR2 0xFFFFCB
+#define CTGARC_RAMCR 0xFFFECE
+#define SBYCR 0xFFFF3A
+#define SCKCR 0xFFFF3B
+#define PLLCR 0xFFFF45
+
+#define TSTR 0xFFFFC0
+#define TSNC 0XFFFFC1
+
+#define TCR0 0xFFFFD0
+#define TMDR0 0xFFFFD1
+#define TIORH0 0xFFFFD2
+#define TIORL0 0xFFFFD3
+#define TIER0 0xFFFFD4
+#define TSR0 0xFFFFD5
+#define TCNT0 0xFFFFD6
+#define GRA0 0xFFFFD8
+#define GRB0 0xFFFFDA
+#define GRC0 0xFFFFDC
+#define GRD0 0xFFFFDE
+#define TCR1 0xFFFFE0
+#define TMDR1 0xFFFFE1
+#define TIORH1 0xFFFFE2
+#define TIORL1 0xFFFFE3
+#define TIER1 0xFFFFE4
+#define TSR1 0xFFFFE5
+#define TCNT1 0xFFFFE6
+#define GRA1 0xFFFFE8
+#define GRB1 0xFFFFEA
+#define TCR2 0xFFFFF0
+#define TMDR2 0xFFFFF1
+#define TIORH2 0xFFFFF2
+#define TIORL2 0xFFFFF3
+#define TIER2 0xFFFFF4
+#define TSR2 0xFFFFF5
+#define TCNT2 0xFFFFF6
+#define GRA2 0xFFFFF8
+#define GRB2 0xFFFFFA
+#define TCR3 0xFFFE80
+#define TMDR3 0xFFFE81
+#define TIORH3 0xFFFE82
+#define TIORL3 0xFFFE83
+#define TIER3 0xFFFE84
+#define TSR3 0xFFFE85
+#define TCNT3 0xFFFE86
+#define GRA3 0xFFFE88
+#define GRB3 0xFFFE8A
+#define GRC3 0xFFFE8C
+#define GRD3 0xFFFE8E
+#define TCR4 0xFFFE90
+#define TMDR4 0xFFFE91
+#define TIORH4 0xFFFE92
+#define TIORL4 0xFFFE93
+#define TIER4 0xFFFE94
+#define TSR4 0xFFFE95
+#define TCNT4 0xFFFE96
+#define GRA4 0xFFFE98
+#define GRB4 0xFFFE9A
+#define TCR5 0xFFFEA0
+#define TMDR5 0xFFFEA1
+#define TIORH5 0xFFFEA2
+#define TIORL5 0xFFFEA3
+#define TIER5 0xFFFEA4
+#define TSR5 0xFFFEA5
+#define TCNT5 0xFFFEA6
+#define GRA5 0xFFFEA8
+#define GRB5 0xFFFEAA
+
+#define _8TCR0 0xFFFFB0
+#define _8TCR1 0xFFFFB1
+#define _8TCSR0 0xFFFFB2
+#define _8TCSR1 0xFFFFB3
+#define _8TCORA0 0xFFFFB4
+#define _8TCORA1 0xFFFFB5
+#define _8TCORB0 0xFFFFB6
+#define _8TCORB1 0xFFFFB7
+#define _8TCNT0 0xFFFFB8
+#define _8TCNT1 0xFFFFB9
+
+#define TCSR 0xFFFFBC
+#define TCNT 0xFFFFBD
+#define RSTCSRW 0xFFFFBE
+#define RSTCSRR 0xFFFFBF
+
+#endif /* __KERNEL__ */
+#endif /* __REGS_H8S267x__ */
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index 8fdd9e2e8833..962f5eb32d16 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -83,7 +83,6 @@ extern spinlock_t semaphore_wake_lock;
* "down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/m68k/lib/semaphore.S
*/
-#if defined(__H8300H__)
static inline void down(struct semaphore * sem)
{
register atomic_t *count asm("er0");
@@ -96,9 +95,9 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__(
"stc ccr,r3l\n\t"
"orc #0x80,ccr\n\t"
- "mov.l @%1, er1\n\t"
+ "mov.l %0, er1\n\t"
"dec.l #1,er1\n\t"
- "mov.l er1,@%1\n\t"
+ "mov.l er1,%0\n\t"
"bpl 1f\n\t"
"ldc r3l,ccr\n\t"
"jsr @___down\n\t"
@@ -106,38 +105,11 @@ static inline void down(struct semaphore * sem)
"1:\n\t"
"ldc r3l,ccr\n"
"2:"
- : "=m"(sem->count)
- : "g" (count)
- : "cc", "er1", "er2", "er3", "er4", "memory");
+ : "+m"(*count)
+ :
+ : "cc", "er1", "er2", "er3");
}
-#endif
-#if defined(__H8300S__)
-static inline void down(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc exr,r3l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l @%1, er1\n\t"
- "dec.l #1,er1\n\t"
- "mov.l er1,@%1\n\t"
- "ldc r3l,exr\n\t"
- "bpl 1f\n\t"
- "jsr @___down\n"
- "1:"
- : "=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
-}
-#endif
-#if defined(__H8300H__)
static inline int down_interruptible(struct semaphore * sem)
{
register atomic_t *count asm("er0");
@@ -148,56 +120,25 @@ static inline int down_interruptible(struct semaphore * sem)
count = &(sem->count);
__asm__ __volatile__(
- "stc ccr,r3l\n\t"
+ "stc ccr,r1l\n\t"
"orc #0x80,ccr\n\t"
- "mov.l @%2, er2\n\t"
+ "mov.l %1, er2\n\t"
"dec.l #1,er2\n\t"
- "mov.l er2,@%2\n\t"
+ "mov.l er2,%1\n\t"
"bpl 1f\n\t"
- "ldc r3l,ccr\n\t"
+ "ldc r1l,ccr\n\t"
"jsr @___down_interruptible\n\t"
"bra 2f\n"
"1:\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l %0,%0\n"
- "2:"
- : "=r" (count),"=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
- return (int)count;
-}
-#endif
-#if defined(__H8300S__)
-static inline int down_interruptible(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc exr,r3l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l @%2, er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,@%2\n\t"
- "ldc r3l,exr\n\t"
- "bmi 1f\n\t"
+ "ldc r1l,ccr\n\t"
"sub.l %0,%0\n\t"
- "bra 2f\n"
- "1:\n\t"
- "jsr @___down_interruptible\n"
- "2:"
- : "=r" (count),"=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
+ "2:\n\t"
+ : "=r" (count),"+m" (*count)
+ :
+ : "cc", "er1", "er2", "er3");
return (int)count;
}
-#endif
-#if defined(__H8300H__)
static inline int down_trylock(struct semaphore * sem)
{
register atomic_t *count asm("er0");
@@ -210,60 +151,26 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"stc ccr,r3l\n\t"
"orc #0x80,ccr\n\t"
- "mov.l @%2,er2\n\t"
+ "mov.l %0,er2\n\t"
"dec.l #1,er2\n\t"
- "mov.l er2,@%2\n\t"
+ "mov.l er2,%0\n\t"
"bpl 1f\n\t"
"ldc r3l,ccr\n\t"
- "jmp @3f\n"
- "1:\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l %0,%0\n"
+ "jmp @3f\n\t"
LOCK_SECTION_START(".align 2\n\t")
"3:\n\t"
"jsr @___down_trylock\n\t"
"jmp @2f\n\t"
LOCK_SECTION_END
- "2:"
- : "=r" (count),"=m"(sem->count)
- : "r" (count)
- : "cc", "er2", "er3", "memory");
- return (int)count;
-}
-#endif
-#if defined(__H8300S__)
-static inline int down_trylock(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc exr,r3l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l @%2,er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,@%2\n\t"
- "ldc r3l,exr\n\t"
- "bpl 1f\n\t"
- "jmp @3f\n"
"1:\n\t"
- "sub.l %0,%0\n\t"
- LOCK_SECTION_START(".align 2\n\t")
- "3:\n\t"
- "jsr @___down_trylock\n\t"
- "jmp @2f\n\t"
- LOCK_SECTION_END
- "2:\n\t"
- : "=r" (count),"=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
+ "ldc r3l,ccr\n\t"
+ "sub.l %1,%1\n"
+ "2:"
+ : "+m" (*count),"=r"(count)
+ :
+ : "cc", "er1","er2", "er3");
return (int)count;
}
-#endif
/*
* Note! This is subtle. We jump to wake people up only if
@@ -271,7 +178,6 @@ static inline int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
-#if defined(__H8300H__)
static inline void up(struct semaphore * sem)
{
register atomic_t *count asm("er0");
@@ -284,47 +190,19 @@ static inline void up(struct semaphore * sem)
__asm__ __volatile__(
"stc ccr,r3l\n\t"
"orc #0x80,ccr\n\t"
- "mov.l @%1,er1\n\t"
+ "mov.l %0,er1\n\t"
"inc.l #1,er1\n\t"
- "mov.l er1,@%1\n\t"
+ "mov.l er1,%0\n\t"
"ldc r3l,ccr\n\t"
"sub.l er2,er2\n\t"
"cmp.l er2,er1\n\t"
"bgt 1f\n\t"
"jsr @___up\n"
"1:"
- : "=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
+ : "+m"(*count)
+ :
+ : "cc", "er1", "er2", "er3");
}
-#endif
-#if defined(__H8300S__)
-static inline void up(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc exr,r3l\n\t"
- "orc #0x07,exr\n\t"
- "mov.l @%1,er1\n\t"
- "inc.l #1,er1\n\t"
- "mov.l er1,@%1\n\t"
- "ldc r3l,exr\n\t"
- "sub.l er2,er2\n\t"
- "cmp.l er2,er1\n\t"
- "bgt 1f\n\t"
- "jsr @___up\n"
- "1:"
- : "=m"(sem->count)
- : "r" (count)
- : "cc", "er1", "er2", "er3", "memory");
-}
-#endif
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
index 2c187ff8e348..c2d2457b138c 100644
--- a/include/asm-h8300/system.h
+++ b/include/asm-h8300/system.h
@@ -35,6 +35,7 @@
*
* H8/300 Porting 2002/09/04 Yoshinori Sato
*/
+
asmlinkage void resume(void);
#define switch_to(prev,next,last) { \
void *_last; \
@@ -52,7 +53,6 @@ asmlinkage void resume(void);
(last) = _last; \
}
-#if defined(__H8300H__)
#define __sti() asm volatile ("andc #0x7f,ccr")
#define __cli() asm volatile ("orc #0x80,ccr")
@@ -69,25 +69,6 @@ asmlinkage void resume(void);
((flags & 0x80) == 0x80); \
})
-#endif
-#if defined(__H8300S__)
-#define __sti() asm volatile ("andc #0xf8,exr")
-#define __cli() asm volatile ("orc #0x07,exr")
-
-#define __save_flags(x) \
- asm volatile ("stc exr,r0l\n\tmov.l er0,%0":"=r" (x) : : "er0")
-
-#define __restore_flags(x) \
- asm volatile ("mov.l %0,er0\n\tldc r0l,exr": :"r" (x) : "er0")
-#endif
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- __save_flags(flags); \
- ((flags & 0x07) == 0x07); \
-})
-
#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
/* For spinlocks etc */
diff --git a/include/asm-h8300/timex.h b/include/asm-h8300/timex.h
index 99a472819dc1..4ea243a11566 100644
--- a/include/asm-h8300/timex.h
+++ b/include/asm-h8300/timex.h
@@ -6,9 +6,9 @@
#ifndef _ASM_H8300_TIMEX_H
#define _ASM_H8300_TIMEX_H
-/* Such a description is OK ? */
-#define TIMEX_DEPEND_HEADER(target) <asm/target/timer_rate.h>
-#include TIMEX_DEPEND_HEADER(TARGET)
+#define H8300_TIMER_DEFINE
+#include <asm/machine-depend.h>
+#undef H8300_TIMER_DEFINE
#define CLOCK_TICK_RATE H8300_TIMER_FREQ
#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 350048b1f39e..6b56aa3eaa39 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -106,21 +106,37 @@
:"0"(n_hi), "1"(n_lo))
-#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_LOCAL_APIC)
- extern int acpi_lapic;
-#else
- #define acpi_lapic 0
-#endif
+#ifdef CONFIG_ACPI_BOOT
+extern int acpi_lapic;
+extern int acpi_ioapic;
-#if defined(CONFIG_ACPI_BOOT) && defined(CONFIG_X86_IO_APIC)
- extern int acpi_ioapic;
-#else
- #define acpi_ioapic 0
-#endif
-#ifdef CONFIG_ACPI_BOOT
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
+
+#ifdef CONFIG_X86_IO_APIC
+extern int skip_ioapic_setup;
+
+static inline void disable_ioapic_setup(void)
+{
+ skip_ioapic_setup = 1;
+}
+
+static inline int ioapic_setup_disabled(void)
+{
+ return skip_ioapic_setup;
+}
+
+#else
+static inline void disable_ioapic_setup(void)
+{ }
+
+#endif
+
+#else /* CONFIG_ACPI_BOOT */
+# define acpi_lapic 0
+# define acpi_ioapic 0
+
#endif
#ifdef CONFIG_ACPI_SLEEP
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
index c21ed08175d5..dab6aa34c6fa 100644
--- a/include/asm-i386/mach-bigsmp/mach_apic.h
+++ b/include/asm-i386/mach-bigsmp/mach_apic.h
@@ -86,7 +86,10 @@ extern u8 bios_cpu_apicid[];
static inline int cpu_present_to_apicid(int mps_cpu)
{
- return (int) bios_cpu_apicid[mps_cpu];
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
index aa7fd107c1c9..b744ac27f6fc 100644
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -106,8 +106,10 @@ static inline int cpu_present_to_apicid(int mps_cpu)
{
if (!mps_cpu)
return boot_cpu_physical_apicid;
- else
+ else if (mps_cpu < NR_CPUS)
return (int) bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h
index 2f9f19237460..98b4e5921aa8 100644
--- a/include/asm-i386/mach-numaq/mach_apic.h
+++ b/include/asm-i386/mach-numaq/mach_apic.h
@@ -65,9 +65,17 @@ static inline int cpu_to_logical_apicid(int cpu)
return (int)cpu_2_logical_apicid[cpu];
}
+/*
+ * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
+ * cpu to APIC ID relation to properly interact with the intelligent
+ * mode of the cluster controller.
+ */
static inline int cpu_present_to_apicid(int mps_cpu)
{
- return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+ if (mps_cpu < 60)
+ return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+ else
+ return BAD_APICID;
}
static inline int generate_logical_apicid(int quad, int phys_apicid)
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index f79d5df55e1a..73a4a1077e85 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -87,7 +87,10 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu)
{
- return (int) bios_cpu_apicid[mps_cpu];
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index b596438496a1..a4ee37cade68 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -37,8 +37,14 @@ extern void mp_register_lapic_address (u64 address);
extern void mp_register_ioapic (u8 id, u32 address, u32 irq_base);
extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 global_irq);
extern void mp_config_acpi_legacy_irqs (void);
-extern void mp_config_ioapic_for_sci(int irq);
extern void mp_parse_prt (void);
+
+#ifdef CONFIG_X86_IO_APIC
+extern void mp_config_ioapic_for_sci(int irq);
+#else
+static inline void mp_config_ioapic_for_sci(int irq)
+{ }
+#endif
#endif /*CONFIG_ACPI_BOOT*/
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index f1114b6b5275..0d22ec30019b 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -38,7 +38,7 @@ struct saved_context {
extern void save_processor_state(void);
extern void restore_processor_state(void);
-extern void do_magic(int resume);
+extern int do_magic(int resume);
#ifdef CONFIG_ACPI_SLEEP
extern unsigned long saved_eip;
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 5b88749e54b2..f2e179d4bb76 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -42,7 +42,7 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
- } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
@@ -56,7 +56,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
- } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
@@ -70,7 +70,7 @@ ia64_atomic_sub (int i, atomic_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
- } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
@@ -84,7 +84,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
- } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index af15c6694522..502f51a1a0ee 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -292,7 +292,7 @@ ffz (unsigned long x)
{
unsigned long result;
- __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1)));
+ result = ia64_popcnt(x & (~x - 1));
return result;
}
@@ -307,7 +307,7 @@ __ffs (unsigned long x)
{
unsigned long result;
- __asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x));
+ result = ia64_popcnt((x-1) & ~x);
return result;
}
@@ -323,7 +323,7 @@ ia64_fls (unsigned long x)
long double d = x;
long exp;
- __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
+ exp = ia64_getf_exp(d);
return exp - 0xffff;
}
@@ -349,7 +349,7 @@ static __inline__ unsigned long
hweight64 (unsigned long x)
{
unsigned long result;
- __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x));
+ result = ia64_popcnt(x);
return result;
}
diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h
index a4e3abfc3477..434686fccb95 100644
--- a/include/asm-ia64/byteorder.h
+++ b/include/asm-ia64/byteorder.h
@@ -7,13 +7,14 @@
*/
#include <asm/types.h>
+#include <asm/intrinsics.h>
static __inline__ __const__ __u64
__ia64_swab64 (__u64 x)
{
__u64 result;
- __asm__ ("mux1 %0=%1,@rev" : "=r" (result) : "r" (x));
+ result = ia64_mux1(x, ia64_mux1_rev);
return result;
}
diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h
index 73a5edf825b8..8e316f179815 100644
--- a/include/asm-ia64/current.h
+++ b/include/asm-ia64/current.h
@@ -6,8 +6,12 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-/* In kernel mode, thread pointer (r13) is used to point to the
- current task structure. */
-register struct task_struct *current asm ("r13");
+#include <asm/intrinsics.h>
+
+/*
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
+#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */
diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h
index da812415f634..74c542acc1e8 100644
--- a/include/asm-ia64/delay.h
+++ b/include/asm-ia64/delay.h
@@ -5,7 +5,7 @@
* Delay routines using a pre-computed "cycles/usec" value.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
@@ -17,12 +17,14 @@
#include <linux/sched.h>
#include <linux/compiler.h>
+#include <asm/intrinsics.h>
#include <asm/processor.h>
static __inline__ void
ia64_set_itm (unsigned long val)
{
- __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory");
+ ia64_setreg(_IA64_REG_CR_ITM, val);
+ ia64_srlz_d();
}
static __inline__ unsigned long
@@ -30,20 +32,23 @@ ia64_get_itm (void)
{
unsigned long result;
- __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory");
+ result = ia64_getreg(_IA64_REG_CR_ITM);
+ ia64_srlz_d();
return result;
}
static __inline__ void
ia64_set_itv (unsigned long val)
{
- __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory");
+ ia64_setreg(_IA64_REG_CR_ITV, val);
+ ia64_srlz_d();
}
static __inline__ void
ia64_set_itc (unsigned long val)
{
- __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory");
+ ia64_setreg(_IA64_REG_AR_ITC, val);
+ ia64_srlz_d();
}
static __inline__ unsigned long
@@ -51,10 +56,13 @@ ia64_get_itc (void)
{
unsigned long result;
- __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
#ifdef CONFIG_ITANIUM
- while (unlikely((__s32) result == -1))
- __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
+ while (unlikely((__s32) result == -1)) {
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
+ }
#endif
return result;
}
@@ -62,15 +70,11 @@ ia64_get_itc (void)
static __inline__ void
__delay (unsigned long loops)
{
- unsigned long saved_ar_lc;
-
if (loops < 1)
return;
- __asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc));
- __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1));
- __asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;");
- __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc));
+ while (loops--)
+ ia64_nop(0);
}
static __inline__ void
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
new file mode 100644
index 000000000000..5175f0345555
--- /dev/null
+++ b/include/asm-ia64/gcc_intrin.h
@@ -0,0 +1,584 @@
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ *
+ */
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier() asm volatile ("":::"memory")
+
+#define ia64_stop() asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+#define ia64_setreg(regnum, val) \
+({ \
+ switch (regnum) { \
+ case _IA64_REG_PSR_L: \
+ asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov ar%0=%1" :: \
+ "i" (regnum - _IA64_REG_AR_KR0), \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov cr%0=%1" :: \
+ "i" (regnum - _IA64_REG_CR_DCR), \
+ "r"(val): "memory" ); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov r12=%0" :: \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_GP: \
+ asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
+ break; \
+ default: \
+ ia64_bad_param_for_setreg(); \
+ break; \
+ } \
+})
+
+#define ia64_getreg(regnum) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (regnum) { \
+ case _IA64_REG_GP: \
+ asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_IP: \
+ asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_PSR: \
+ asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_TP: /* for current() */ \
+ { \
+ register __u64 ia64_r13 asm ("r13"); \
+ ia64_intri_res = ia64_r13; \
+ } \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
+ : "i"(regnum - _IA64_REG_AR_KR0)); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
+ : "i" (regnum - _IA64_REG_CR_DCR)); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
+ break; \
+ default: \
+ ia64_bad_param_for_getreg(); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode) \
+({ \
+ switch (mode) { \
+ case ia64_hint_pause: \
+ asm volatile ("hint @pause" ::: "memory"); \
+ break; \
+ } \
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix 8
+#define ia64_mux1_shuf 9
+#define ia64_mux1_alt 10
+#define ia64_mux1_rev 11
+
+#define ia64_mux1(x, mode) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (mode) { \
+ case ia64_mux1_brcst: \
+ asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_mix: \
+ asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_shuf: \
+ asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_alt: \
+ asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_rev: \
+ asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#define ia64_popcnt(x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_getf_exp(x) \
+({ \
+ long ia64_intri_res; \
+ \
+ asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_shrp(a, b, count) \
+({ \
+ __u64 ia64_intri_res; \
+ asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
+ ia64_intri_res; \
+})
+
+#define ia64_ldfs(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfd(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfe(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf8(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf_fill(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_stfs(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfd(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfe(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf8(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf_spill(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_fetchadd4_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd4_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg1(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg2(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg4(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg8(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_mf() asm volatile ("mf" ::: "memory")
+#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_thash(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
+
+#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
+
+#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+
+#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_tpa(addr) \
+({ \
+ __u64 ia64_pa; \
+ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
+ ia64_pa; \
+})
+
+#define __ia64_set_dbr(index, val) \
+ asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val) \
+ asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val) \
+ asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val) \
+ asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val) \
+ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_rr(index, val) \
+ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_get_cpuid(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
+ ia64_intri_res; \
+})
+
+#define __ia64_get_dbr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_ibr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pkr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pmc(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+
+#define ia64_get_pmd(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_rr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_ptcga(addr, size) \
+ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptcl(addr, size) \
+ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptri(addr, size) \
+ asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size) \
+ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none 0
+#define ia64_lfhint_nt1 1
+#define ia64_lfhint_nt2 2
+#define ia64_lfhint_nta 3
+
+#define ia64_lfetch(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_intrin_local_irq_restore(x) \
+do { \
+ asm volatile (" cmp.ne p6,p7=%0,r0;;" \
+ "(p6) ssm psr.i;" \
+ "(p7) rsm psr.i;;" \
+ "(p6) srlz.d" \
+ :: "r"((x)) : "p6", "p7", "memory"); \
+} while (0)
+
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h
new file mode 100644
index 000000000000..1757f1c11ad4
--- /dev/null
+++ b/include/asm-ia64/ia64regs.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP 1016 /* getreg only */
+#define _IA64_REG_PSR 1019
+#define _IA64_REG_PSR_L 1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP 1025 /* R1 */
+#define _IA64_REG_R8 1032 /* R8 */
+#define _IA64_REG_R9 1033 /* R9 */
+#define _IA64_REG_SP 1036 /* R12 */
+#define _IA64_REG_TP 1037 /* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0 3072
+#define _IA64_REG_AR_KR1 3073
+#define _IA64_REG_AR_KR2 3074
+#define _IA64_REG_AR_KR3 3075
+#define _IA64_REG_AR_KR4 3076
+#define _IA64_REG_AR_KR5 3077
+#define _IA64_REG_AR_KR6 3078
+#define _IA64_REG_AR_KR7 3079
+#define _IA64_REG_AR_RSC 3088
+#define _IA64_REG_AR_BSP 3089
+#define _IA64_REG_AR_BSPSTORE 3090
+#define _IA64_REG_AR_RNAT 3091
+#define _IA64_REG_AR_FCR 3093
+#define _IA64_REG_AR_EFLAG 3096
+#define _IA64_REG_AR_CSD 3097
+#define _IA64_REG_AR_SSD 3098
+#define _IA64_REG_AR_CFLAG 3099
+#define _IA64_REG_AR_FSR 3100
+#define _IA64_REG_AR_FIR 3101
+#define _IA64_REG_AR_FDR 3102
+#define _IA64_REG_AR_CCV 3104
+#define _IA64_REG_AR_UNAT 3108
+#define _IA64_REG_AR_FPSR 3112
+#define _IA64_REG_AR_ITC 3116
+#define _IA64_REG_AR_PFS 3136
+#define _IA64_REG_AR_LC 3137
+#define _IA64_REG_AR_EC 3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR 4096
+#define _IA64_REG_CR_ITM 4097
+#define _IA64_REG_CR_IVA 4098
+#define _IA64_REG_CR_PTA 4104
+#define _IA64_REG_CR_IPSR 4112
+#define _IA64_REG_CR_ISR 4113
+#define _IA64_REG_CR_IIP 4115
+#define _IA64_REG_CR_IFA 4116
+#define _IA64_REG_CR_ITIR 4117
+#define _IA64_REG_CR_IIPA 4118
+#define _IA64_REG_CR_IFS 4119
+#define _IA64_REG_CR_IIM 4120
+#define _IA64_REG_CR_IHA 4121
+#define _IA64_REG_CR_LID 4160
+#define _IA64_REG_CR_IVR 4161 /* getreg only */
+#define _IA64_REG_CR_TPR 4162
+#define _IA64_REG_CR_EOI 4163
+#define _IA64_REG_CR_IRR0 4164 /* getreg only */
+#define _IA64_REG_CR_IRR1 4165 /* getreg only */
+#define _IA64_REG_CR_IRR2 4166 /* getreg only */
+#define _IA64_REG_CR_IRR3 4167 /* getreg only */
+#define _IA64_REG_CR_ITV 4168
+#define _IA64_REG_CR_PMV 4169
+#define _IA64_REG_CR_CMCV 4170
+#define _IA64_REG_CR_LRR0 4176
+#define _IA64_REG_CR_LRR1 4177
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
+#define _IA64_REG_INDR_DBR 9001
+#define _IA64_REG_INDR_IBR 9002
+#define _IA64_REG_INDR_PKR 9003
+#define _IA64_REG_INDR_PMC 9004
+#define _IA64_REG_INDR_PMD 9005
+#define _IA64_REG_INDR_RR 9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
index 19408747bd17..743049ca0851 100644
--- a/include/asm-ia64/intrinsics.h
+++ b/include/asm-ia64/intrinsics.h
@@ -8,8 +8,17 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#ifndef __ASSEMBLY__
#include <linux/config.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
/*
* Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value.
@@ -21,13 +30,11 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
switch (sz) { \
case 4: \
- __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \
- : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
+ tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
break; \
\
case 8: \
- __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \
- : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
+ tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
break; \
\
default: \
@@ -61,43 +68,39 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
(__typeof__(*(v))) (_tmp); /* return old value */ \
})
-#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
+#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
*/
-extern void __xchg_called_with_bad_pointer (void);
-
-static __inline__ unsigned long
-__xchg (unsigned long x, volatile void *ptr, int size)
-{
- unsigned long result;
-
- switch (size) {
- case 1:
- __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
- : "r" (ptr), "r" (x) : "memory");
- return result;
-
- case 2:
- __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
- : "r" (ptr), "r" (x) : "memory");
- return result;
-
- case 4:
- __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
- : "r" (ptr), "r" (x) : "memory");
- return result;
-
- case 8:
- __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
- : "r" (ptr), "r" (x) : "memory");
- return result;
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
+extern void ia64_xchg_called_with_bad_pointer (void);
+
+#define __xchg(x,ptr,size) \
+({ \
+ unsigned long __xchg_result; \
+ \
+ switch (size) { \
+ case 1: \
+ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ break; \
+ \
+ case 2: \
+ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ break; \
+ \
+ case 4: \
+ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ break; \
+ \
+ case 8: \
+ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ break; \
+ default: \
+ ia64_xchg_called_with_bad_pointer(); \
+ } \
+ __xchg_result; \
+})
#define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
@@ -114,12 +117,10 @@ __xchg (unsigned long x, volatile void *ptr, int size)
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
-extern long __cmpxchg_called_with_bad_pointer(void);
+extern long ia64_cmpxchg_called_with_bad_pointer (void);
#define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \
- __typeof__(ptr) _p_ = (ptr); \
- __typeof__(new) _n_ = (new); \
__u64 _o_, _r_; \
\
switch (size) { \
@@ -129,37 +130,32 @@ extern long __cmpxchg_called_with_bad_pointer(void);
case 8: _o_ = (__u64) (long) (old); break; \
default: break; \
} \
- __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
switch (size) { \
case 1: \
- __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
- : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
+ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
break; \
\
case 2: \
- __asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \
- : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
+ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
break; \
\
case 4: \
- __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
- : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
+ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
break; \
\
case 8: \
- __asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \
- : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
+ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
break; \
\
default: \
- _r_ = __cmpxchg_called_with_bad_pointer(); \
+ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \
} \
(__typeof__(old)) _r_; \
})
-#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
@@ -171,7 +167,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \
extern int printk(const char *fmt, ...); \
- asm ("mov %0=ip" : "=r"(ip)); \
+ ip = ia64_getreg(_IA64_REG_IP); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \
} \
@@ -181,4 +177,5 @@ extern long __cmpxchg_called_with_bad_pointer(void);
# define CMPXCHG_BUGCHECK(v)
#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+#endif
#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 1297c6bba42b..297efb06c347 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -52,6 +52,7 @@ extern unsigned int num_io_spaces;
# ifdef __KERNEL__
+#include <asm/intrinsics.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -85,7 +86,7 @@ phys_to_virt (unsigned long address)
* Memory fence w/accept. This should never be used in code that is
* not IA-64 specific.
*/
-#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
+#define __ia64_mf_a() ia64_mfa()
static inline const unsigned long
__ia64_get_io_port_base (void)
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index a277c8ff9595..471a2c91cd29 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -155,7 +155,7 @@ struct ia64_machine_vector {
ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
-};
+} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
{ \
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 95e786212982..0255260f61bc 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -158,9 +158,7 @@ reload_context (mm_context_t context)
ia64_set_rr(0x4000000000000000, rr2);
ia64_set_rr(0x6000000000000000, rr3);
ia64_set_rr(0x8000000000000000, rr4);
- ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
- ia64_insn_group_barrier();
}
static inline void
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 44b3f419c854..56f5c49a4e95 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -9,6 +9,7 @@
#include <linux/config.h>
+#include <asm/intrinsics.h>
#include <asm/types.h>
/*
@@ -143,7 +144,7 @@ get_order (unsigned long size)
double d = size - 1;
long order;
- __asm__ ("getf.exp %0=%1" : "=r"(order) : "f"(d));
+ order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0)
order = 0;
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 5640226e8a15..e3152bc4fb39 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -822,10 +822,10 @@ ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector
/* Initialize the processor controlled caches */
static inline s64
-ia64_pal_cache_init (u64 level, u64 cache_type, u64 restrict)
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, restrict);
+ PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
return iprv.status;
}
diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h
index 26afeeb46ea6..b8e81aa3bffa 100644
--- a/include/asm-ia64/perfmon.h
+++ b/include/asm-ia64/perfmon.h
@@ -70,64 +70,70 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type
* Request structure used to define a context
*/
typedef struct {
- pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
- unsigned long ctx_flags; /* noblock/block */
- unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */
- int ctx_fd; /* return arg: unique identification for context */
- void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
- unsigned long ctx_reserved[11]; /* for future use */
+ pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
+ unsigned long ctx_flags; /* noblock/block */
+ unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
+ unsigned short ctx_reserved1; /* for future use */
+ int ctx_fd; /* return arg: unique identification for context */
+ void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
+ unsigned long ctx_reserved2[11];/* for future use */
} pfarg_context_t;
/*
* Request structure used to write/read a PMC or PMD
*/
typedef struct {
- unsigned int reg_num; /* which register */
- unsigned int reg_set; /* event set for this register */
+ unsigned int reg_num; /* which register */
+ unsigned short reg_set; /* event set for this register */
+ unsigned short reg_reserved1; /* for future use */
- unsigned long reg_value; /* initial pmc/pmd value */
- unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
+ unsigned long reg_value; /* initial pmc/pmd value */
+ unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
- unsigned long reg_long_reset; /* reset after buffer overflow notification */
- unsigned long reg_short_reset; /* reset after counter overflow */
+ unsigned long reg_long_reset; /* reset after buffer overflow notification */
+ unsigned long reg_short_reset; /* reset after counter overflow */
- unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
- unsigned long reg_random_seed; /* seed value when randomization is used */
- unsigned long reg_random_mask; /* bitmask used to limit random value */
- unsigned long reg_last_reset_val;/* return: PMD last reset value */
+ unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
+ unsigned long reg_random_seed; /* seed value when randomization is used */
+ unsigned long reg_random_mask; /* bitmask used to limit random value */
+ unsigned long reg_last_reset_val;/* return: PMD last reset value */
unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
- unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
+ unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
- unsigned long reserved[3]; /* for future use */
+ unsigned long reg_reserved2[3]; /* for future use */
} pfarg_reg_t;
typedef struct {
- unsigned int dbreg_num; /* which debug register */
- unsigned int dbreg_set; /* event set for this register */
- unsigned long dbreg_value; /* value for debug register */
- unsigned long dbreg_flags; /* return: dbreg error */
- unsigned long dbreg_reserved[1]; /* for future use */
+ unsigned int dbreg_num; /* which debug register */
+ unsigned short dbreg_set; /* event set for this register */
+ unsigned short dbreg_reserved1; /* for future use */
+ unsigned long dbreg_value; /* value for debug register */
+ unsigned long dbreg_flags; /* return: dbreg error */
+ unsigned long dbreg_reserved2[1]; /* for future use */
} pfarg_dbreg_t;
typedef struct {
unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
- unsigned int ft_reserved; /* reserved for future use */
- unsigned long reserved[4]; /* for future use */
+ unsigned int ft_reserved; /* reserved for future use */
+ unsigned long reserved[4]; /* for future use */
} pfarg_features_t;
typedef struct {
- pid_t load_pid; /* process to load the context into */
- unsigned int load_set; /* first event set to load */
- unsigned long load_reserved[2]; /* for future use */
+ pid_t load_pid; /* process to load the context into */
+ unsigned short load_set; /* first event set to load */
+ unsigned short load_reserved1; /* for future use */
+ unsigned long load_reserved2[3]; /* for future use */
} pfarg_load_t;
typedef struct {
int msg_type; /* generic message header */
int msg_ctx_fd; /* generic message header */
- unsigned long msg_tstamp; /* for perf tuning */
- unsigned int msg_active_set; /* active set at the time of overflow */
unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
+ unsigned short msg_active_set; /* active set at the time of overflow */
+ unsigned short msg_reserved1; /* for future use */
+ unsigned int msg_reserved2; /* for future use */
+ unsigned long msg_tstamp; /* for perf tuning/debug */
} pfm_ovfl_msg_t;
typedef struct {
@@ -192,25 +198,28 @@ extern void pfm_handle_work(void);
#define PFM_PMD_LONG_RESET 1
#define PFM_PMD_SHORT_RESET 2
-typedef struct {
- unsigned int notify_user:1; /* notify user program of overflow */
- unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
- unsigned int block:1; /* block monitored task on kernel exit */
- unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */
- unsigned int reserved:26; /* for future use */
+typedef union {
+ unsigned int val;
+ struct {
+ unsigned int notify_user:1; /* notify user program of overflow */
+ unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
+ unsigned int block_task:1; /* block monitored task on kernel exit */
+ unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
+ unsigned int reserved:28; /* for future use */
+ } bits;
} pfm_ovfl_ctrl_t;
typedef struct {
- unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */
- unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */
- unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */
- unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */
- unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */
- unsigned int active_set; /* event set active at the time of the overflow */
- unsigned int reserved1;
- unsigned long smpl_pmds[4];
- unsigned long smpl_pmds_values[PMU_MAX_PMDS];
- pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+ unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
+ unsigned short active_set; /* event set active at the time of the overflow */
+ pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
+
+ unsigned long pmd_last_reset; /* last reset value of of the PMD */
+ unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
+ unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
+ unsigned long pmd_value; /* current 64-bit value of the PMD */
+ unsigned long pmd_eventid; /* eventid associated with PMD */
} pfm_ovfl_arg_t;
@@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t {
int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
- int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs);
+ int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
diff --git a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h
index 77709625f96f..1c63c7cf7f49 100644
--- a/include/asm-ia64/perfmon_default_smpl.h
+++ b/include/asm-ia64/perfmon_default_smpl.h
@@ -16,7 +16,9 @@
*/
typedef struct {
unsigned long buf_size; /* size of the buffer in bytes */
- unsigned long reserved[3]; /* for future use */
+ unsigned int flags; /* buffer specific flags */
+ unsigned int res1; /* for future use */
+ unsigned long reserved[2]; /* for future use */
} pfm_default_smpl_arg_t;
/*
@@ -46,28 +48,27 @@ typedef struct {
/*
* Entry header in the sampling buffer. The header is directly followed
- * with the PMDs saved in increasing index order: PMD4, PMD5, .... How
- * many PMDs are present depends on how the session was programmed.
+ * with the values of the PMD registers of interest saved in increasing
+ * index order: PMD4, PMD5, and so on. How many PMDs are present depends
+ * on how the session was programmed.
*
- * XXX: in this version of the entry, only up to 64 registers can be
- * recorded. This should be enough for quite some time. Always check
- * sampling format before parsing entries!
+ * In the case where multiple counters overflow at the same time, multiple
+ * entries are written consecutively.
*
- * In the case where multiple counters overflow at the same time, the
- * last_reset_value member indicates the initial value of the
- * overflowed PMD with the smallest index. For instance, if PMD2 and
- * PMD5 have overflowed, the last_reset_value member contains the
- * initial value of PMD2.
+ * last_reset_value member indicates the initial value of the overflowed PMD.
*/
typedef struct {
- int pid; /* current process at PMU interrupt point */
- int cpu; /* cpu on which the overfow occured */
- unsigned long last_reset_val; /* initial value of 1st overflowed PMD */
- unsigned long ip; /* where did the overflow interrupt happened */
- unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */
- unsigned long tstamp; /* ar.itc on the CPU that took the overflow */
- unsigned int set; /* event set active when overflow ocurred */
- unsigned int reserved1; /* for future use */
+ int pid; /* active process at PMU interrupt point */
+ unsigned char reserved1[3]; /* reserved for future use */
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+
+ unsigned long last_reset_val; /* initial value of overflowed PMD */
+ unsigned long ip; /* where did the overflow interrupt happened */
+ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
+
+ unsigned short cpu; /* cpu on which the overfow occured */
+ unsigned short set; /* event set active when overflow ocurred */
+ unsigned int reserved2; /* for future use */
} pfm_default_smpl_entry_t;
#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 669e44bf8012..c6b4af2b3643 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -15,8 +15,9 @@
#include <linux/config.h>
-#include <asm/ptrace.h>
+#include <asm/intrinsics.h>
#include <asm/kregs.h>
+#include <asm/ptrace.h>
#include <asm/ustack.h>
#define IA64_NUM_DBG_REGS 8
@@ -356,38 +357,41 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return stack pointer of blocked task TSK. */
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
-static inline unsigned long
-ia64_get_kr (unsigned long regnum)
-{
- unsigned long r = 0;
-
- switch (regnum) {
- case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
- case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break;
- case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break;
- case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break;
- case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break;
- case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break;
- case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break;
- case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break;
- }
- return r;
-}
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum) \
+({ \
+ unsigned long r = 0; \
+ \
+ switch (regnum) { \
+ case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
+ case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
+ case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
+ case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
+ case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
+ case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
+ case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
+ case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
+ default: ia64_getreg_unknown_kr(); break; \
+ } \
+ r; \
+})
-static inline void
-ia64_set_kr (unsigned long regnum, unsigned long r)
-{
- switch (regnum) {
- case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break;
- case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break;
- case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break;
- case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break;
- case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break;
- case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break;
- case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break;
- case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break;
- }
-}
+#define ia64_set_kr(regnum, r) \
+({ \
+ switch (regnum) { \
+ case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
+ case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
+ case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
+ case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
+ case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
+ case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
+ case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
+ case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
+ default: ia64_setreg_unknown_kr(); break; \
+ } \
+})
/*
* The following three macros can't be inline functions because we don't have struct
@@ -423,8 +427,8 @@ extern void ia32_save_state (struct task_struct *task);
extern void ia32_load_state (struct task_struct *task);
#endif
-#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
-#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
+#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
/* load fp 0.0 into fph */
static inline void
@@ -450,78 +454,14 @@ ia64_load_fpu (struct ia64_fpreg *fph) {
ia64_fph_disable();
}
-static inline void
-ia64_fc (void *addr)
-{
- asm volatile ("fc %0" :: "r"(addr) : "memory");
-}
-
-static inline void
-ia64_sync_i (void)
-{
- asm volatile (";; sync.i" ::: "memory");
-}
-
-static inline void
-ia64_srlz_i (void)
-{
- asm volatile (";; srlz.i ;;" ::: "memory");
-}
-
-static inline void
-ia64_srlz_d (void)
-{
- asm volatile (";; srlz.d" ::: "memory");
-}
-
-static inline __u64
-ia64_get_rr (__u64 reg_bits)
-{
- __u64 r;
- asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");
- return r;
-}
-
-static inline void
-ia64_set_rr (__u64 reg_bits, __u64 rr_val)
-{
- asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
-}
-
-static inline __u64
-ia64_get_dcr (void)
-{
- __u64 r;
- asm volatile ("mov %0=cr.dcr" : "=r"(r));
- return r;
-}
-
-static inline void
-ia64_set_dcr (__u64 val)
-{
- asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
- ia64_srlz_d();
-}
-
-static inline __u64
-ia64_get_lid (void)
-{
- __u64 r;
- asm volatile ("mov %0=cr.lid" : "=r"(r));
- return r;
-}
-
-static inline void
-ia64_invala (void)
-{
- asm volatile ("invala" ::: "memory");
-}
-
static inline __u64
ia64_clear_ic (void)
{
__u64 psr;
- asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory");
+ psr = ia64_getreg(_IA64_REG_PSR);
+ ia64_stop();
+ ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+ ia64_srlz_i();
return psr;
}
@@ -531,7 +471,9 @@ ia64_clear_ic (void)
static inline void
ia64_set_psr (__u64 psr)
{
- asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory");
+ ia64_stop();
+ ia64_setreg(_IA64_REG_PSR_L, psr);
+ ia64_srlz_d();
}
/*
@@ -543,14 +485,13 @@ ia64_itr (__u64 target_mask, __u64 tr_num,
__u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
- asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
- asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
if (target_mask & 0x1)
- asm volatile ("itr.i itr[%0]=%1"
- :: "r"(tr_num), "r"(pte) : "memory");
+ ia64_itri(tr_num, pte);
if (target_mask & 0x2)
- asm volatile (";;itr.d dtr[%0]=%1"
- :: "r"(tr_num), "r"(pte) : "memory");
+ ia64_itrd(tr_num, pte);
}
/*
@@ -561,13 +502,14 @@ static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
- asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
- asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
/* as per EAS2.6, itc must be the last instruction in an instruction group */
if (target_mask & 0x1)
- asm volatile ("itc.i %0;;" :: "r"(pte) : "memory");
+ ia64_itci(pte);
if (target_mask & 0x2)
- asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory");
+ ia64_itcd(pte);
}
/*
@@ -578,16 +520,17 @@ static inline void
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
{
if (target_mask & 0x1)
- asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+ ia64_ptri(vmaddr, (log_size << 2));
if (target_mask & 0x2)
- asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
+ ia64_ptrd(vmaddr, (log_size << 2));
}
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
static inline void
ia64_set_iva (void *ivt_addr)
{
- asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
+ ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+ ia64_srlz_i();
}
/* Set the page table address and control bits. */
@@ -595,79 +538,33 @@ static inline void
ia64_set_pta (__u64 pta)
{
/* Note: srlz.i implies srlz.d */
- asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
-}
-
-static inline __u64
-ia64_get_cpuid (__u64 regnum)
-{
- __u64 r;
-
- asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
- return r;
+ ia64_setreg(_IA64_REG_CR_PTA, pta);
+ ia64_srlz_i();
}
static inline void
ia64_eoi (void)
{
- asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
+ ia64_setreg(_IA64_REG_CR_EOI, 0);
+ ia64_srlz_d();
}
-static inline void
-ia64_set_lrr0 (unsigned long val)
-{
- asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
-}
+#define cpu_relax() ia64_hint(ia64_hint_pause)
static inline void
-ia64_hint_pause (void)
+ia64_set_lrr0 (unsigned long val)
{
- asm volatile ("hint @pause" ::: "memory");
+ ia64_setreg(_IA64_REG_CR_LRR0, val);
+ ia64_srlz_d();
}
-#define cpu_relax() ia64_hint_pause()
-
static inline void
ia64_set_lrr1 (unsigned long val)
{
- asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory");
-}
-
-static inline void
-ia64_set_pmv (__u64 val)
-{
- asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory");
-}
-
-static inline __u64
-ia64_get_pmc (__u64 regnum)
-{
- __u64 retval;
-
- asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
- return retval;
-}
-
-static inline void
-ia64_set_pmc (__u64 regnum, __u64 value)
-{
- asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
-}
-
-static inline __u64
-ia64_get_pmd (__u64 regnum)
-{
- __u64 retval;
-
- asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
- return retval;
+ ia64_setreg(_IA64_REG_CR_LRR1, val);
+ ia64_srlz_d();
}
-static inline void
-ia64_set_pmd (__u64 regnum, __u64 value)
-{
- asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
-}
/*
* Given the address to which a spill occurred, return the unat bit
@@ -713,199 +610,58 @@ thread_saved_pc (struct task_struct *t)
* Get the current instruction/program counter value.
*/
#define current_text_addr() \
- ({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; })
-
-/*
- * Set the correctable machine check vector register
- */
-static inline void
-ia64_set_cmcv (__u64 val)
-{
- asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory");
-}
-
-/*
- * Read the correctable machine check vector register
- */
-static inline __u64
-ia64_get_cmcv (void)
-{
- __u64 val;
-
- asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
- return val;
-}
+ ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
static inline __u64
ia64_get_ivr (void)
{
__u64 r;
- asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
- return r;
-}
-
-static inline void
-ia64_set_tpr (__u64 val)
-{
- asm volatile ("mov cr.tpr=%0" :: "r"(val));
-}
-
-static inline __u64
-ia64_get_tpr (void)
-{
- __u64 r;
- asm volatile ("mov %0=cr.tpr" : "=r"(r));
- return r;
-}
-
-static inline void
-ia64_set_irr0 (__u64 val)
-{
- asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory");
ia64_srlz_d();
-}
-
-static inline __u64
-ia64_get_irr0 (void)
-{
- __u64 val;
-
- /* this is volatile because irr may change unbeknownst to gcc... */
- asm volatile("mov %0=cr.irr0" : "=r"(val));
- return val;
-}
-
-static inline void
-ia64_set_irr1 (__u64 val)
-{
- asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory");
+ r = ia64_getreg(_IA64_REG_CR_IVR);
ia64_srlz_d();
-}
-
-static inline __u64
-ia64_get_irr1 (void)
-{
- __u64 val;
-
- /* this is volatile because irr may change unbeknownst to gcc... */
- asm volatile("mov %0=cr.irr1" : "=r"(val));
- return val;
-}
-
-static inline void
-ia64_set_irr2 (__u64 val)
-{
- asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory");
- ia64_srlz_d();
-}
-
-static inline __u64
-ia64_get_irr2 (void)
-{
- __u64 val;
-
- /* this is volatile because irr may change unbeknownst to gcc... */
- asm volatile("mov %0=cr.irr2" : "=r"(val));
- return val;
-}
-
-static inline void
-ia64_set_irr3 (__u64 val)
-{
- asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory");
- ia64_srlz_d();
-}
-
-static inline __u64
-ia64_get_irr3 (void)
-{
- __u64 val;
-
- /* this is volatile because irr may change unbeknownst to gcc... */
- asm volatile ("mov %0=cr.irr3" : "=r"(val));
- return val;
-}
-
-static inline __u64
-ia64_get_gp(void)
-{
- __u64 val;
-
- asm ("mov %0=gp" : "=r"(val));
- return val;
-}
-
-static inline void
-ia64_set_ibr (__u64 regnum, __u64 value)
-{
- asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value));
+ return r;
}
static inline void
ia64_set_dbr (__u64 regnum, __u64 value)
{
- asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value));
+ __ia64_set_dbr(regnum, value);
#ifdef CONFIG_ITANIUM
- asm volatile (";; srlz.d");
+ ia64_srlz_d();
#endif
}
static inline __u64
-ia64_get_ibr (__u64 regnum)
-{
- __u64 retval;
-
- asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum));
- return retval;
-}
-
-static inline __u64
ia64_get_dbr (__u64 regnum)
{
__u64 retval;
- asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum));
+ retval = __ia64_get_dbr(regnum);
#ifdef CONFIG_ITANIUM
- asm volatile (";; srlz.d");
+ ia64_srlz_d();
#endif
return retval;
}
/* XXX remove the handcoded version once we have a sufficiently clever compiler... */
#ifdef SMART_COMPILER
-# define ia64_rotr(w,n) \
- ({ \
- __u64 _w = (w), _n = (n); \
- \
- (_w >> _n) | (_w << (64 - _n)); \
+# define ia64_rotr(w,n) \
+ ({ \
+ __u64 __ia64_rotr_w = (w), _n = (n); \
+ \
+ (__ia64_rotr_w >> _n) | (__ia64_rotr_w << (64 - _n)); \
})
#else
-# define ia64_rotr(w,n) \
- ({ \
- __u64 result; \
- asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \
- result; \
+# define ia64_rotr(w,n) \
+ ({ \
+ __u64 __ia64_rotr_w; \
+ __ia64_rotr_w = ia64_shrp((w), (w), (n)); \
+ __ia64_rotr_w; \
})
#endif
#define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
-static inline __u64
-ia64_thash (__u64 addr)
-{
- __u64 result;
- asm ("thash %0=%1" : "=r"(result) : "r" (addr));
- return result;
-}
-
-static inline __u64
-ia64_tpa (__u64 addr)
-{
- __u64 result;
- asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
- return result;
-}
-
/*
* Take a mapped kernel address and return the equivalent address
* in the region 7 identity mapped virtual area.
@@ -914,7 +670,7 @@ static inline void *
ia64_imva (void *addr)
{
void *result;
- asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
+ result = (void *) ia64_tpa(addr);
return __va(result);
}
@@ -926,13 +682,13 @@ ia64_imva (void *addr)
static inline void
prefetch (const void *x)
{
- __asm__ __volatile__ ("lfetch [%0]" : : "r"(x));
+ ia64_lfetch(ia64_lfhint_none, x);
}
static inline void
prefetchw (const void *x)
{
- __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x));
+ ia64_lfetch_excl(ia64_lfhint_none, x);
}
#define spin_lock_prefetch(x) prefetchw(x)
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
index b0427fa5bccf..6ece5061dc19 100644
--- a/include/asm-ia64/rwsem.h
+++ b/include/asm-ia64/rwsem.h
@@ -23,6 +23,8 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <asm/intrinsics.h>
+
/*
* the semaphore definition
*/
@@ -81,9 +83,8 @@ init_rwsem (struct rw_semaphore *sem)
static inline void
__down_read (struct rw_semaphore *sem)
{
- int result;
- __asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
- "=r"(result) : "r"(&sem->count) : "memory");
+ int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
+
if (result < 0)
rwsem_down_read_failed(sem);
}
@@ -111,9 +112,8 @@ __down_write (struct rw_semaphore *sem)
static inline void
__up_read (struct rw_semaphore *sem)
{
- int result;
- __asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
- "=r"(result) : "r"(&sem->count) : "memory");
+ int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
+
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index a2c7f1c09050..855c24712736 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -804,6 +804,10 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
extern unsigned long sal_platform_features;
+struct sal_ret_values {
+ long r8; long r9; long r10; long r11;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h
index 16de6f10c1e1..eca7d714a8fb 100644
--- a/include/asm-ia64/siginfo.h
+++ b/include/asm-ia64/siginfo.h
@@ -79,7 +79,6 @@ typedef struct siginfo {
* si_code is non-zero and __ISR_VALID is set in si_flags.
*/
#define si_isr _sifields._sigfault._isr
-#define si_pfm_ovfl _sifields._sigprof._pfm_ovfl_counters
/*
* Flag values for si_flags:
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 0f114d98c2ee..adb4716f352c 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -106,7 +106,7 @@ hard_smp_processor_id (void)
unsigned long bits;
} lid;
- lid.bits = ia64_get_lid();
+ lid.bits = ia64_getreg(_IA64_REG_CR_LID);
return lid.f.id << 8 | lid.f.eid;
}
diff --git a/include/asm-ia64/sn/sn2/io.h b/include/asm-ia64/sn/sn2/io.h
index fc30f1f4c5c8..3a3b1e214164 100644
--- a/include/asm-ia64/sn/sn2/io.h
+++ b/include/asm-ia64/sn/sn2/io.h
@@ -11,11 +11,23 @@
extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
+#include <asm/intrinsics.h>
-#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
+#define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long);
+#define __sn_inb ___sn_inb
+#define __sn_inw ___sn_inw
+#define __sn_inl ___sn_inl
+#define __sn_outb ___sn_outb
+#define __sn_outw ___sn_outw
+#define __sn_outl ___sn_outl
+#define __sn_readb ___sn_readb
+#define __sn_readw ___sn_readw
+#define __sn_readl ___sn_readl
+#define __sn_readq ___sn_readq
+
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
@@ -26,7 +38,7 @@ extern void sn_dma_flush(unsigned long);
*/
static inline unsigned int
-__sn_inb (unsigned long port)
+___sn_inb (unsigned long port)
{
volatile unsigned char *addr;
unsigned char ret = -1;
@@ -40,7 +52,7 @@ __sn_inb (unsigned long port)
}
static inline unsigned int
-__sn_inw (unsigned long port)
+___sn_inw (unsigned long port)
{
volatile unsigned short *addr;
unsigned short ret = -1;
@@ -54,7 +66,7 @@ __sn_inw (unsigned long port)
}
static inline unsigned int
-__sn_inl (unsigned long port)
+___sn_inl (unsigned long port)
{
volatile unsigned int *addr;
unsigned int ret = -1;
@@ -68,7 +80,7 @@ __sn_inl (unsigned long port)
}
static inline void
-__sn_outb (unsigned char val, unsigned long port)
+___sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr;
@@ -79,7 +91,7 @@ __sn_outb (unsigned char val, unsigned long port)
}
static inline void
-__sn_outw (unsigned short val, unsigned long port)
+___sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr;
@@ -90,7 +102,7 @@ __sn_outw (unsigned short val, unsigned long port)
}
static inline void
-__sn_outl (unsigned int val, unsigned long port)
+___sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr;
@@ -110,7 +122,7 @@ __sn_outl (unsigned int val, unsigned long port)
*/
static inline unsigned char
-__sn_readb (void *addr)
+___sn_readb (void *addr)
{
unsigned char val;
@@ -121,7 +133,7 @@ __sn_readb (void *addr)
}
static inline unsigned short
-__sn_readw (void *addr)
+___sn_readw (void *addr)
{
unsigned short val;
@@ -132,7 +144,7 @@ __sn_readw (void *addr)
}
static inline unsigned int
-__sn_readl (void *addr)
+___sn_readl (void *addr)
{
unsigned int val;
@@ -143,7 +155,7 @@ __sn_readl (void *addr)
}
static inline unsigned long
-__sn_readq (void *addr)
+___sn_readq (void *addr)
{
unsigned long val;
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h
index 74dd5a6d2460..a2831ceb16a8 100644
--- a/include/asm-ia64/sn/sn_cpuid.h
+++ b/include/asm-ia64/sn/sn_cpuid.h
@@ -89,7 +89,7 @@
#ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0
-#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
+#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif
/*
@@ -98,8 +98,8 @@
*/
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
-#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff)
-#define get_slice() ((ia64_get_lid() >> 28) & 0xf)
+#define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff)
+#define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
/*
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index 3c0d89837b02..3a5f08f4c6f2 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -9,11 +9,13 @@
* This file is used for SMP configurations only.
*/
+#include <linux/compiler.h>
#include <linux/kernel.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/intrinsics.h>
+#include <asm/system.h>
typedef struct {
volatile unsigned int lock;
@@ -102,8 +104,8 @@ typedef struct {
do { \
rwlock_t *__read_lock_ptr = (rw); \
\
- while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
+ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
while (*(volatile int *)__read_lock_ptr < 0) \
cpu_relax(); \
} \
@@ -112,7 +114,7 @@ do { \
#define _raw_read_unlock(rw) \
do { \
rwlock_t *__read_lock_ptr = (rw); \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#define _raw_write_lock(rw) \
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index f4951838e69d..c0a638402858 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -55,12 +55,6 @@ extern struct ia64_boot_param {
__u64 initrd_size;
} *ia64_boot_param;
-static inline void
-ia64_insn_group_barrier (void)
-{
- __asm__ __volatile__ (";;" ::: "memory");
-}
-
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
@@ -83,7 +77,7 @@ ia64_insn_group_barrier (void)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
-#define mb() __asm__ __volatile__ ("mf" ::: "memory")
+#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
@@ -119,22 +113,26 @@ ia64_insn_group_barrier (void)
/* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */
-#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \
- "rsm psr.i;;" \
- : "=r" (x) :: "memory")
-#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
-#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
- "(p6) ssm psr.i;" \
- "(p7) rsm psr.i;;" \
- "(p6) srlz.d" \
- :: "r" ((x) & IA64_PSR_I) \
- : "p6", "p7", "memory")
+#define __local_irq_save(x) \
+do { \
+ (x) = ia64_getreg(_IA64_REG_PSR); \
+ ia64_stop(); \
+ ia64_rsm(IA64_PSR_I); \
+} while (0)
+
+#define __local_irq_disable() \
+do { \
+ ia64_stop(); \
+ ia64_rsm(IA64_PSR_I); \
+} while (0)
+
+#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
#ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip;
-# define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip))
+# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
# define local_irq_save(x) \
do { \
@@ -164,14 +162,14 @@ do { \
# define local_irq_restore(x) __local_irq_restore(x)
#endif /* !CONFIG_IA64_DEBUG_IRQ */
-#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
-#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
+#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
+#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR))
#define irqs_disabled() \
({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (flags & IA64_PSR_I) == 0; \
+ unsigned long __ia64_id_flags; \
+ local_save_flags(__ia64_id_flags); \
+ (__ia64_id_flags & IA64_PSR_I) == 0; \
})
#ifdef __KERNEL__
diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h
index 5bf5bd8f148e..414aae060440 100644
--- a/include/asm-ia64/timex.h
+++ b/include/asm-ia64/timex.h
@@ -10,6 +10,7 @@
* Also removed cacheflush_time as it's entirely unused.
*/
+#include <asm/intrinsics.h>
#include <asm/processor.h>
typedef unsigned long cycles_t;
@@ -32,7 +33,7 @@ get_cycles (void)
{
cycles_t ret;
- __asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret));
+ ret = ia64_getreg(_IA64_REG_AR_ITC);
return ret;
}
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index dd49222e8f08..049c69845b23 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -10,6 +10,7 @@
#include <linux/mm.h>
+#include <asm/intrinsics.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
@@ -77,7 +78,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
#else
if (vma->vm_mm == current->active_mm)
- asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
+ ia64_ptcl(addr, (PAGE_SHIFT << 2));
else
vma->vm_mm->context = 0;
#endif
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 09325eb6503d..f65623c70fb1 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -334,73 +334,20 @@ waitpid (int pid, int * wait_stat, int flags)
}
-static inline int
-execve (const char *filename, char *const av[], char *const ep[])
-{
- register long r8 asm("r8");
- register long r10 asm("r10");
- register long r15 asm("r15") = __NR_execve;
- register long out0 asm("out0") = (long)filename;
- register long out1 asm("out1") = (long)av;
- register long out2 asm("out2") = (long)ep;
-
- asm volatile ("break " __stringify(__BREAK_SYSCALL) ";;\n\t"
- : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1), "=r" (out2)
- : "2" (r15), "3" (out0), "4" (out1), "5" (out2)
- : "memory", "out3", "out4", "out5", "out6", "out7",
- /* Non-stacked integer registers, minus r8, r10, r15, r13 */
- "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
- "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
- "r28", "r29", "r30", "r31",
- /* Predicate registers. */
- "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
- /* Non-rotating fp registers. */
- "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
- /* Branch registers. */
- "b6", "b7" );
- return r8;
-}
-
-static inline pid_t
-clone (unsigned long flags, void *sp)
-{
- register long r8 asm("r8");
- register long r10 asm("r10");
- register long r15 asm("r15") = __NR_clone;
- register long out0 asm("out0") = (long)flags;
- register long out1 asm("out1") = (long)sp;
- long retval;
-
- /* clone clobbers current, hence the "r13" in the clobbers list */
- asm volatile ( "break " __stringify(__BREAK_SYSCALL) ";;\n\t"
- : "=r" (r8), "=r" (r10), "=r" (r15), "=r" (out0), "=r" (out1)
- : "2" (r15), "3" (out0), "4" (out1)
- : "memory", "out2", "out3", "out4", "out5", "out6", "out7", "r13",
- /* Non-stacked integer registers, minus r8, r10, r15, r13 */
- "r2", "r3", "r9", "r11", "r12", "r14", "r16", "r17", "r18",
- "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",
- "r28", "r29", "r30", "r31",
- /* Predicate registers. */
- "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
- /* Non-rotating fp registers. */
- "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
- /* Branch registers. */
- "b6", "b7" );
- retval = r8;
- return retval;;
-
-}
+extern int execve (const char *filename, char *const av[], char *const ep[]);
+extern pid_t clone (unsigned long flags, void *sp);
#endif /* __KERNEL_SYSCALLS__ */
/*
* "Conditional" syscalls
*
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on
- * all toolchains, so we just do it by hand. Note, this macro can only be used in the
- * file which defines sys_ni_syscall, i.e., in kernel/sys.c.
+ * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
+ * kernel/sys.c. This version causes warnings because the declaration isn't a
+ * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
+ * declarations have prototypes at the moment.
*/
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
+#define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall")));
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3fd526160f1a..94a0f27e331c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -373,6 +373,11 @@ extern int acpi_mp_config;
#define acpi_mp_config 0
+static inline int acpi_boot_init(void)
+{
+ return 0;
+}
+
#endif /*!CONFIG_ACPI_BOOT*/
@@ -423,6 +428,13 @@ int ec_write(u8 addr, u8 val);
int acpi_blacklisted(void);
+#else
+
+static inline int acpi_blacklisted(void)
+{
+ return 0;
+}
+
#endif /*CONFIG_ACPI*/
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/device.h b/include/linux/device.h
index 7b49400adf31..8d6266f2e3c3 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -58,7 +58,8 @@ struct bus_type {
struct device * (*add) (struct device * parent, char * bus_id);
int (*hotplug) (struct device *dev, char **envp,
int num_envp, char *buffer, int buffer_size);
-
+ int (*suspend)(struct device * dev, u32 state);
+ int (*resume)(struct device * dev);
};
extern int bus_register(struct bus_type * bus);
@@ -372,8 +373,6 @@ extern struct bus_type platform_bus_type;
extern struct device legacy_bus;
/* drivers/base/power.c */
-extern int device_suspend(u32 state, u32 level);
-extern void device_resume(u32 level);
extern void device_shutdown(void);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 82ca6da75b3f..a3ee36b438ca 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1241,8 +1241,6 @@ typedef struct ide_driver_s {
#define DRIVER(drive) ((drive)->driver)
extern int generic_ide_ioctl(struct block_device *, unsigned, unsigned long);
-extern int generic_ide_suspend(struct device *dev, u32 state, u32 level);
-extern int generic_ide_resume(struct device *dev, u32 level);
/*
* IDE modules.
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index c2b4fd735f40..9555dd4d69fc 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -92,7 +92,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char * buf, size_t count, loff_
* Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
* updating *offset appropriately. Returns bytes written or -EFAULT.
*/
-ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset);
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char * buf, size_t count, loff_t * offset);
/**
* Read an ASCII string for a number from a userspace buffer and fill *val on success.
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index fec8ad62b567..d97edad0effc 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -45,7 +45,7 @@ struct tc_stats
struct tc_estimator
{
- char interval;
+ signed char interval;
unsigned char ewma_log;
};
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e4c795f71cea..3017bdef5f03 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -186,9 +186,46 @@ static inline void pm_dev_idle(struct pm_dev *dev) {}
#endif /* CONFIG_PM */
+
+/*
+ * Callbacks for platform drivers to implement.
+ */
extern void (*pm_idle)(void);
extern void (*pm_power_off)(void);
+enum {
+ PM_SUSPEND_ON,
+ PM_SUSPEND_STANDBY,
+ PM_SUSPEND_MEM,
+ PM_SUSPEND_DISK,
+ PM_SUSPEND_MAX,
+};
+
+enum {
+ PM_DISK_FIRMWARE = 1,
+ PM_DISK_PLATFORM,
+ PM_DISK_SHUTDOWN,
+ PM_DISK_REBOOT,
+ PM_DISK_MAX,
+};
+
+
+struct pm_ops {
+ u32 pm_disk_mode;
+ int (*prepare)(u32 state);
+ int (*enter)(u32 state);
+ int (*finish)(u32 state);
+};
+
+extern void pm_set_ops(struct pm_ops *);
+
+extern int pm_suspend(u32 state);
+
+
+/*
+ * Device power management
+ */
+
struct device;
struct dev_pm_info {
@@ -203,10 +240,10 @@ struct dev_pm_info {
extern void device_pm_set_parent(struct device * dev, struct device * parent);
-extern int device_pm_suspend(u32 state);
-extern int device_pm_power_down(u32 state);
-extern void device_pm_power_up(void);
-extern void device_pm_resume(void);
+extern int device_suspend(u32 state);
+extern int device_power_down(u32 state);
+extern void device_power_up(void);
+extern void device_resume(void);
#endif /* __KERNEL__ */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 28788d8a65ff..132db86c961a 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -8,8 +8,7 @@
#include <linux/notifier.h>
#include <linux/config.h>
#include <linux/init.h>
-
-extern unsigned char software_suspend_enabled;
+#include <linux/pm.h>
#ifdef CONFIG_SOFTWARE_SUSPEND
/* page backup entry */
@@ -46,22 +45,9 @@ extern int shrink_mem(void);
/* mm/page_alloc.c */
extern void drain_local_pages(void);
-/* kernel/suspend.c */
-extern int software_suspend(void);
-
-extern int register_suspend_notifier(struct notifier_block *);
-extern int unregister_suspend_notifier(struct notifier_block *);
-
extern unsigned int nr_copy_pages __nosavedata;
extern suspend_pagedir_t *pagedir_nosave __nosavedata;
-/* Communication between kernel/suspend.c and arch/i386/suspend.c */
-
-extern void do_magic_resume_1(void);
-extern void do_magic_resume_2(void);
-extern void do_magic_suspend_1(void);
-extern void do_magic_suspend_2(void);
-
/* Communication between acpi and arch/i386/suspend.c */
extern void do_suspend_lowlevel(int resume);
@@ -72,32 +58,17 @@ static inline int software_suspend(void)
{
return -EPERM;
}
-#define register_suspend_notifier(a) do { } while(0)
-#define unregister_suspend_notifier(a) do { } while(0)
#endif /* CONFIG_SOFTWARE_SUSPEND */
#ifdef CONFIG_PM
extern void refrigerator(unsigned long);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-extern int pm_prepare_console(void);
-extern void pm_restore_console(void);
#else
static inline void refrigerator(unsigned long flag)
{
}
-static inline int freeze_processes(void)
-{
- return 0;
-}
-static inline void thaw_processes(void)
-{
-
-}
#endif /* CONFIG_PM */
#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
index 4bc3e22b5104..2a90db8d41de 100644
--- a/include/linux/sysdev.h
+++ b/include/linux/sysdev.h
@@ -31,10 +31,8 @@ struct sysdev_class {
/* Default operations for these types of devices */
int (*shutdown)(struct sys_device *);
- int (*save)(struct sys_device *, u32 state);
int (*suspend)(struct sys_device *, u32 state);
int (*resume)(struct sys_device *);
- int (*restore)(struct sys_device *);
struct kset kset;
};
@@ -52,10 +50,8 @@ struct sysdev_driver {
int (*add)(struct sys_device *);
int (*remove)(struct sys_device *);
int (*shutdown)(struct sys_device *);
- int (*save)(struct sys_device *, u32 state);
int (*suspend)(struct sys_device *, u32 state);
int (*resume)(struct sys_device *);
- int (*restore)(struct sys_device *);
};
diff --git a/kernel/cpufreq.c b/kernel/cpufreq.c
index 3f2f06b6bead..7f80c321c785 100644
--- a/kernel/cpufreq.c
+++ b/kernel/cpufreq.c
@@ -469,28 +469,36 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
}
/**
- * cpufreq_restore - restore the CPU clock frequency after resume
+ * cpufreq_resume - restore the CPU clock frequency after resume
*
* Restore the CPU clock frequency so that our idea of the current
* frequency reflects the actual hardware.
*/
-static int cpufreq_restore(struct sys_device * sysdev)
+static int cpufreq_resume(struct sys_device * sysdev)
{
int cpu = sysdev->id;
unsigned int ret = 0;
- struct cpufreq_policy policy;
struct cpufreq_policy *cpu_policy;
if (!cpu_online(cpu))
return 0;
- cpu_policy = cpufreq_cpu_get(cpu);
+ /* we may be lax here as interrupts are off. Nonetheless
+ * we need to grab the correct cpu policy, as to check
+ * whether we really run on this CPU.
+ */
- down(&cpu_policy->lock);
- memcpy(&policy, cpu_policy, sizeof(struct cpufreq_policy));
- up(&cpu_policy->lock);
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ return -EINVAL;
- ret = cpufreq_set_policy(&policy);
+ if (cpufreq_driver->setpolicy)
+ ret = cpufreq_driver->setpolicy(cpu_policy);
+ else
+ /* CPUFREQ_RELATION_H or CPUFREQ_RELATION_L have the same effect here, as cpu_policy->cur is known
+ * to be a valid and exact target frequency
+ */
+ ret = cpufreq_driver->target(cpu_policy, cpu_policy->cur, CPUFREQ_RELATION_H);
cpufreq_cpu_put(cpu_policy);
@@ -500,7 +508,7 @@ static int cpufreq_restore(struct sys_device * sysdev)
static struct sysdev_driver cpufreq_sysdev_driver = {
.add = cpufreq_add_dev,
.remove = cpufreq_remove_dev,
- .restore = cpufreq_restore,
+ .resume = cpufreq_resume,
};
@@ -872,6 +880,10 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
*/
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
+ if (irqs_disabled())
+ return; /* Only valid if we're in the resume process where
+ * everyone knows what CPU frequency we are at */
+
down_read(&cpufreq_notifier_rwsem);
switch (state) {
case CPUFREQ_PRECHANGE:
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index dffcbfbd1343..b3fe9efb0866 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -252,6 +252,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
+ reset_iter(iter);
ret = seq_open(file, &kallsyms_op);
if (ret == 0)
diff --git a/kernel/power/console.c b/kernel/power/console.c
index c05d0e43675f..35b1f50d97de 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -41,6 +41,11 @@ void pm_restore_console(void)
console_loglevel = orig_loglevel;
#ifdef SUSPEND_CONSOLE
set_console(orig_fgconsole);
+
+ /* FIXME:
+ * This following part is left over from swsusp. Is it really needed?
+ */
+ update_screen(fg_console);
#endif
return;
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 603936964815..1b92f13d9a77 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,48 +8,375 @@
*
*/
+#include <linux/suspend.h>
#include <linux/kobject.h>
+#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pm.h>
+#include <linux/fs.h>
+#include "power.h"
-static int standby(void)
+static DECLARE_MUTEX(pm_sem);
+
+static struct pm_ops * pm_ops = NULL;
+
+static u32 pm_disk_mode = PM_DISK_SHUTDOWN;
+
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static int have_swsusp = 1;
+#else
+static int have_swsusp = 0;
+#endif
+
+extern long sys_sync(void);
+
+
+/**
+ * pm_set_ops - Set the global power method table.
+ * @ops: Pointer to ops structure.
+ */
+
+void pm_set_ops(struct pm_ops * ops)
{
- return 0;
+ down(&pm_sem);
+ pm_ops = ops;
+ if (ops->pm_disk_mode && ops->pm_disk_mode < PM_DISK_MAX)
+ pm_disk_mode = ops->pm_disk_mode;
+ up(&pm_sem);
}
-static int suspend(void)
+
+/**
+ * pm_suspend_standby - Enter 'standby' state.
+ *
+ * 'standby' is also known as 'Power-On Suspend'. Here, we power down
+ * devices, disable interrupts, and enter the state.
+ */
+
+static int pm_suspend_standby(void)
{
- return 0;
+ int error = 0;
+ unsigned long flags;
+
+ if (!pm_ops || !pm_ops->enter)
+ return -EPERM;
+
+ local_irq_save(flags);
+ if ((error = device_power_down(PM_SUSPEND_STANDBY)))
+ goto Done;
+ error = pm_ops->enter(PM_SUSPEND_STANDBY);
+ local_irq_restore(flags);
+ device_power_up();
+ Done:
+ return error;
+}
+
+
+/**
+ * pm_suspend_mem - Enter suspend-to-RAM state.
+ *
+ * Identical to pm_suspend_standby() - we power down devices, disable
+ * interrupts, and enter the low-power state.
+ */
+
+static int pm_suspend_mem(void)
+{
+ int error = 0;
+ unsigned long flags;
+
+ if (!pm_ops || !pm_ops->enter)
+ return -EPERM;
+
+ local_irq_save(flags);
+ if ((error = device_power_down(PM_SUSPEND_STANDBY)))
+ goto Done;
+ error = pm_ops->enter(PM_SUSPEND_STANDBY);
+ local_irq_restore(flags);
+ device_power_up();
+ Done:
+ return error;
}
-static int hibernate(void)
+
+/**
+ * power_down - Shut machine down for hibernate.
+ * @mode: Suspend-to-disk mode
+ *
+ * Use the platform driver, if configured so, and return gracefully if it
+ * fails.
+ * Otherwise, try to power off and reboot. If they fail, halt the machine,
+ * there ain't no turning back.
+ */
+
+static int power_down(u32 mode)
{
+ unsigned long flags;
+ int error = 0;
+
+ local_irq_save(flags);
+ device_power_down(PM_SUSPEND_DISK);
+ switch(mode) {
+ case PM_DISK_PLATFORM:
+ error = pm_ops->enter(PM_SUSPEND_DISK);
+ if (error) {
+ device_power_up();
+ local_irq_restore(flags);
+ return error;
+ }
+ case PM_DISK_SHUTDOWN:
+ machine_power_off();
+ break;
+ case PM_DISK_REBOOT:
+ machine_restart(NULL);
+ break;
+ }
+ machine_halt();
return 0;
}
+
+static int in_suspend __nosavedata = 0;
+
+
+/**
+ * free_some_memory - Try to free as much memory as possible
+ *
+ * ... but do not OOM-kill anyone
+ *
+ * Notice: all userland should be stopped at this point, or
+ * livelock is possible.
+ */
+
+static void free_some_memory(void)
+{
+ printk("Freeing memory: ");
+ while (shrink_all_memory(10000))
+ printk(".");
+ printk("|\n");
+ blk_run_queues();
+}
+
+
+/**
+ * pm_suspend_disk - The granpappy of power management.
+ *
+ * If we're going through the firmware, then get it over with quickly.
+ *
+ * If not, then call swsusp to do it's thing, then figure out how
+ * to power down the system.
+ */
+
+static int pm_suspend_disk(void)
+{
+ int error;
+
+ pr_debug("PM: Attempting to suspend to disk.\n");
+ if (pm_disk_mode == PM_DISK_FIRMWARE)
+ return pm_ops->enter(PM_SUSPEND_DISK);
+
+ if (!have_swsusp)
+ return -EPERM;
+
+ pr_debug("PM: snapshotting memory.\n");
+ in_suspend = 1;
+ if ((error = swsusp_save()))
+ goto Done;
+
+ if (in_suspend) {
+ pr_debug("PM: writing image.\n");
+ error = swsusp_write();
+ if (!error)
+ error = power_down(pm_disk_mode);
+ pr_debug("PM: Power down failed.\n");
+ } else
+ pr_debug("PM: Image restored successfully.\n");
+ swsusp_free();
+ Done:
+ return error;
+}
+
+
+
#define decl_state(_name) \
- { .name = __stringify(_name), .fn = _name }
+ { .name = __stringify(_name), .fn = pm_suspend_##_name }
struct pm_state {
char * name;
int (*fn)(void);
} pm_states[] = {
- decl_state(standby),
- decl_state(suspend),
- decl_state(hibernate),
+ [PM_SUSPEND_STANDBY] = decl_state(standby),
+ [PM_SUSPEND_MEM] = decl_state(mem),
+ [PM_SUSPEND_DISK] = decl_state(disk),
{ NULL },
};
-static int enter_state(struct pm_state * state)
+/**
+ * suspend_prepare - Do prep work before entering low-power state.
+ * @state: State we're entering.
+ *
+ * This is common code that is called for each state that we're
+ * entering. Allocate a console, stop all processes, then make sure
+ * the platform can enter the requested state.
+ */
+
+static int suspend_prepare(u32 state)
+{
+ int error = 0;
+
+ pm_prepare_console();
+
+ sys_sync();
+ if (freeze_processes()) {
+ error = -EAGAIN;
+ goto Thaw;
+ }
+
+ if (pm_ops && pm_ops->prepare) {
+ if ((error = pm_ops->prepare(state)))
+ goto Thaw;
+ }
+
+ /* Free memory before shutting down devices. */
+ if (state == PM_SUSPEND_DISK)
+ free_some_memory();
+
+ if ((error = device_suspend(state)))
+ goto Finish;
+
+ return 0;
+ Done:
+ pm_restore_console();
+ return error;
+ Finish:
+ if (pm_ops && pm_ops->finish)
+ pm_ops->finish(state);
+ Thaw:
+ thaw_processes();
+ goto Done;
+}
+
+
+/**
+ * suspend_finish - Do final work before exiting suspend sequence.
+ * @state: State we're coming out of.
+ *
+ * Call platform code to clean up, restart processes, and free the
+ * console that we've allocated.
+ */
+
+static void suspend_finish(u32 state)
+{
+ device_resume();
+ if (pm_ops && pm_ops->finish)
+ pm_ops->finish(state);
+ thaw_processes();
+ pm_restore_console();
+}
+
+
+/**
+ * enter_state - Do common work of entering low-power state.
+ * @state: pm_state structure for state we're entering.
+ *
+ * Make sure we're the only ones trying to enter a sleep state. Fail
+ * if someone has beat us to it, since we don't want anything weird to
+ * happen when we wake up.
+ * Then, do the setup for suspend, enter the state, and cleaup (after
+ * we've woken up).
+ */
+
+static int enter_state(u32 state)
+{
+ int error;
+ struct pm_state * s = &pm_states[state];
+
+ if (down_trylock(&pm_sem))
+ return -EBUSY;
+
+ /* Suspend is hard to get right on SMP. */
+ if (num_online_cpus() != 1) {
+ error = -EPERM;
+ goto Unlock;
+ }
+
+ pr_debug("PM: Preparing system for suspend.\n");
+ if ((error = suspend_prepare(state)))
+ goto Unlock;
+
+ pr_debug("PM: Entering state.\n");
+ error = s->fn();
+
+ pr_debug("PM: Finishing up.\n");
+ suspend_finish(state);
+ Unlock:
+ up(&pm_sem);
+ return error;
+}
+
+
+/**
+ * pm_suspend - Externally visible function for suspending system.
+ * @state: Enumarted value of state to enter.
+ *
+ * Determine whether or not value is within range, get state
+ * structure, and enter (above).
+ */
+
+int pm_suspend(u32 state)
+{
+ if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
+ return enter_state(state);
+ return -EINVAL;
+}
+
+
+/**
+ * pm_resume - Resume from a saved image.
+ *
+ * Called as a late_initcall (so all devices are discovered and
+ * initialized), we call swsusp to see if we have a saved image or not.
+ * If so, we quiesce devices, the restore the saved image. We will
+ * return above (in pm_suspend_disk() ) if everything goes well.
+ * Otherwise, we fail gracefully and return to the normally
+ * scheduled program.
+ *
+ */
+
+static int pm_resume(void)
{
- return state->fn();
+ int error;
+
+ if (!have_swsusp)
+ return 0;
+
+ pr_debug("PM: Reading swsusp image.\n");
+
+ if ((error = swsusp_read()))
+ goto Done;
+
+ pr_debug("PM: Preparing system for restore.\n");
+
+ if ((error = suspend_prepare(PM_SUSPEND_DISK)))
+ goto Free;
+
+ pr_debug("PM: Restoring saved image.\n");
+ swsusp_restore();
+
+ pr_debug("PM: Restore failed, recovering.n");
+ suspend_finish(PM_SUSPEND_DISK);
+ Free:
+ swsusp_free();
+ Done:
+ pr_debug("PM: Resume from disk failed.\n");
+ return 0;
}
+late_initcall(pm_resume);
decl_subsys(power,NULL,NULL);
@@ -65,12 +392,87 @@ static struct subsys_attribute _name##_attr = { \
.store = _name##_store, \
}
+
+static char * pm_disk_modes[] = {
+ [PM_DISK_FIRMWARE] = "firmware",
+ [PM_DISK_PLATFORM] = "platform",
+ [PM_DISK_SHUTDOWN] = "shutdown",
+ [PM_DISK_REBOOT] = "reboot",
+};
+
+/**
+ * disk - Control suspend-to-disk mode
+ *
+ * Suspend-to-disk can be handled in several ways. The greatest
+ * distinction is who writes memory to disk - the firmware or the OS.
+ * If the firmware does it, we assume that it also handles suspending
+ * the system.
+ * If the OS does it, then we have three options for putting the system
+ * to sleep - using the platform driver (e.g. ACPI or other PM registers),
+ * powering off the system or rebooting the system (for testing).
+ *
+ * The system will support either 'firmware' or 'platform', and that is
+ * known a priori (and encoded in pm_ops). But, the user may choose
+ * 'shutdown' or 'reboot' as alternatives.
+ *
+ * show() will display what the mode is currently set to.
+ * store() will accept one of
+ *
+ * 'firmware'
+ * 'platform'
+ * 'shutdown'
+ * 'reboot'
+ *
+ * It will only change to 'firmware' or 'platform' if the system
+ * supports it (as determined from pm_ops->pm_disk_mode).
+ */
+
+static ssize_t disk_show(struct subsystem * subsys, char * buf)
+{
+ return sprintf(buf,"%s\n",pm_disk_modes[pm_disk_mode]);
+}
+
+
+static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n)
+{
+ int error = 0;
+ int i;
+ u32 mode = 0;
+
+ down(&pm_sem);
+ for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) {
+ if (!strcmp(buf,pm_disk_modes[i])) {
+ mode = i;
+ break;
+ }
+ }
+ if (mode) {
+ if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT)
+ pm_disk_mode = mode;
+ else {
+ if (pm_ops && pm_ops->enter &&
+ (mode == pm_ops->pm_disk_mode))
+ pm_disk_mode = mode;
+ else
+ error = -EINVAL;
+ }
+ } else
+ error = -EINVAL;
+
+ pr_debug("PM: suspend-to-disk mode set to '%s'\n",
+ pm_disk_modes[mode]);
+ up(&pm_sem);
+ return error ? error : n;
+}
+
+power_attr(disk);
+
/**
* state - control system power state.
*
* show() returns what states are supported, which is hard-coded to
- * 'standby' (Power-On Suspend), 'suspend' (Suspend-to-RAM), and
- * 'hibernate' (Suspend-to-Disk).
+ * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
+ * 'disk' (Suspend-to-Disk).
*
* store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
@@ -87,22 +489,21 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
return (s - buf);
}
-static ssize_t state_store(struct subsystem * s, const char * buf, size_t n)
+static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n)
{
- struct pm_state * state;
+ u32 state;
+ struct pm_state * s;
int error;
- char * end = strchr(buf,'\n');
-
- if (end)
- *end = '\0';
- for (state = &pm_states[0]; state; state++) {
- if (!strcmp(buf,state->name))
+ for (state = 0; state < PM_SUSPEND_MAX; state++) {
+ s = &pm_states[state];
+ if (s->name && !strcmp(buf,s->name))
break;
}
- if (!state)
- return -EINVAL;
- error = enter_state(state);
+ if (s)
+ error = enter_state(state);
+ else
+ error = -EINVAL;
return error ? error : n;
}
@@ -110,6 +511,7 @@ power_attr(state);
static struct attribute * g[] = {
&state_attr.attr,
+ &disk_attr.attr,
NULL,
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 0b7f86a3311e..e98de640155d 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -7,3 +7,40 @@
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
#endif
+
+
+#ifdef CONFIG_SOFTWARE_SUSPEND
+extern int swsusp_save(void);
+extern int swsusp_write(void);
+extern int swsusp_read(void);
+extern int swsusp_restore(void);
+extern int swsusp_free(void);
+#else
+static inline int swsusp_save(void)
+{
+ return 0;
+}
+static inline int swsusp_write(void)
+{
+ return 0;
+}
+static inline int swsusp_read(void)
+{
+ return 0;
+}
+static inline int swsusp_restore(void)
+{
+ return 0;
+}
+static inline int swsusp_free(void)
+{
+ return 0;
+}
+#endif
+
+
+extern int freeze_processes(void);
+extern void thaw_processes(void);
+
+extern int pm_prepare_console(void);
+extern void pm_restore_console(void);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 3da9c0142676..c849a18c45f4 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -65,8 +65,6 @@
#include "power.h"
-extern long sys_sync(void);
-
unsigned char software_suspend_enabled = 1;
#define __ADDRESS(x) ((unsigned long) phys_to_virt(x))
@@ -90,8 +88,6 @@ static dev_t resume_device;
/* Local variables that should not be affected by save */
unsigned int nr_copy_pages __nosavedata = 0;
-static int pm_suspend_state;
-
/* Suspend pagedir is allocated before final copy, therefore it
must be freed after resume
@@ -439,81 +435,6 @@ static suspend_pagedir_t *create_suspend_pagedir(int nr_copy_pages)
return pagedir;
}
-static int prepare_suspend_processes(void)
-{
- sys_sync(); /* Syncing needs pdflushd, so do it before stopping processes */
- if (freeze_processes()) {
- printk( KERN_ERR "Suspend failed: Not all processes stopped!\n" );
- thaw_processes();
- return 1;
- }
- return 0;
-}
-
-/*
- * Try to free as much memory as possible, but do not OOM-kill anyone
- *
- * Notice: all userland should be stopped at this point, or livelock is possible.
- */
-static void free_some_memory(void)
-{
- printk("Freeing memory: ");
- while (shrink_all_memory(10000))
- printk(".");
- printk("|\n");
-}
-
-/* Make disk drivers accept operations, again */
-static void drivers_unsuspend(void)
-{
- device_resume(RESUME_RESTORE_STATE);
- device_resume(RESUME_ENABLE);
-}
-
-/* Called from process context */
-static int drivers_suspend(void)
-{
- if (device_suspend(4, SUSPEND_NOTIFY))
- return -EIO;
- if (device_suspend(4, SUSPEND_SAVE_STATE)) {
- device_resume(RESUME_RESTORE_STATE);
- return -EIO;
- }
- if (!pm_suspend_state) {
- if(pm_send_all(PM_SUSPEND,(void *)3)) {
- printk(KERN_WARNING "Problem while sending suspend event\n");
- return -EIO;
- }
- pm_suspend_state=1;
- } else
- printk(KERN_WARNING "PM suspend state already raised\n");
- device_suspend(4, SUSPEND_DISABLE);
-
- return 0;
-}
-
-#define RESUME_PHASE1 1 /* Called from interrupts disabled */
-#define RESUME_PHASE2 2 /* Called with interrupts enabled */
-#define RESUME_ALL_PHASES (RESUME_PHASE1 | RESUME_PHASE2)
-static void drivers_resume(int flags)
-{
- if (flags & RESUME_PHASE1) {
- device_resume(RESUME_RESTORE_STATE);
- device_resume(RESUME_ENABLE);
- }
- if (flags & RESUME_PHASE2) {
- if (pm_suspend_state) {
- if(pm_send_all(PM_RESUME,(void *)0))
- printk(KERN_WARNING "Problem while sending resume event\n");
- pm_suspend_state=0;
- } else
- printk(KERN_WARNING "PM suspend state wasn't raised\n");
-
-#ifdef SUSPEND_CONSOLE
- update_screen(fg_console); /* Hmm, is this the problem? */
-#endif
- }
-}
static int suspend_prepare_image(void)
{
@@ -567,12 +488,14 @@ static int suspend_prepare_image(void)
return 0;
}
-static void suspend_save_image(void)
+static int suspend_save_image(void)
{
- drivers_unsuspend();
+ int error;
+
+ device_resume();
lock_swapdevices();
- write_suspend_image();
+ error = write_suspend_image();
lock_swapdevices(); /* This will unlock ignored swap devices since writing is finished */
/* It is important _NOT_ to umount filesystems at this point. We want
@@ -580,29 +503,7 @@ static void suspend_save_image(void)
* filesystem clean: it is not. (And it does not matter, if we resume
* correctly, we'll mark system clean, anyway.)
*/
-}
-
-static void suspend_power_down(void)
-{
- extern int C_A_D;
- C_A_D = 0;
- printk(KERN_EMERG "%s%s Trying to power down.\n", name_suspend, TEST_SWSUSP ? "Disable TEST_SWSUSP. NOT ": "");
-#ifdef CONFIG_VT
- PRINTK(KERN_EMERG "shift_state: %04x\n", shift_state);
- mdelay(1000);
- if (TEST_SWSUSP ^ (!!(shift_state & (1 << KG_CTRL))))
- machine_restart(NULL);
- else
-#endif
- {
- device_shutdown();
- machine_power_off();
- }
-
- printk(KERN_EMERG "%sProbably not capable for powerdown. System halted.\n", name_suspend);
- machine_halt();
- while (1);
- /* NOTREACHED */
+ return error;
}
/*
@@ -614,32 +515,21 @@ void do_magic_resume_1(void)
barrier();
mb();
spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */
-
PRINTK( "Waiting for DMAs to settle down...\n");
- mdelay(1000); /* We do not want some readahead with DMA to corrupt our memory, right?
- Do it with disabled interrupts for best effect. That way, if some
- driver scheduled DMA, we have good chance for DMA to finish ;-). */
+ /* We do not want some readahead with DMA to corrupt our memory, right?
+ Do it with disabled interrupts for best effect. That way, if some
+ driver scheduled DMA, we have good chance for DMA to finish ;-). */
+ mdelay(1000);
}
void do_magic_resume_2(void)
{
BUG_ON (nr_copy_pages_check != nr_copy_pages);
BUG_ON (pagedir_order_check != pagedir_order);
-
- __flush_tlb_global(); /* Even mappings of "global" things (vmalloc) need to be fixed */
-
- PRINTK( "Freeing prev allocated pagedir\n" );
- free_suspend_pagedir((unsigned long) pagedir_save);
+
+ /* Even mappings of "global" things (vmalloc) need to be fixed */
+ __flush_tlb_global();
spin_unlock_irq(&suspend_pagedir_lock);
- drivers_resume(RESUME_ALL_PHASES);
-
- PRINTK( "Fixing swap signatures... " );
- mark_swapfiles(((swp_entry_t) {0}), MARK_SWAP_RESUME);
- PRINTK( "ok\n" );
-
-#ifdef SUSPEND_CONSOLE
- update_screen(fg_console); /* Hmm, is this the problem? */
-#endif
}
/* do_magic() is implemented in arch/?/kernel/suspend_asm.S, and basically does:
@@ -664,106 +554,28 @@ void do_magic_suspend_1(void)
{
mb();
barrier();
- BUG_ON(in_atomic());
spin_lock_irq(&suspend_pagedir_lock);
}
-void do_magic_suspend_2(void)
+int do_magic_suspend_2(void)
{
int is_problem;
read_swapfiles();
is_problem = suspend_prepare_image();
spin_unlock_irq(&suspend_pagedir_lock);
- if (!is_problem) {
- kernel_fpu_end(); /* save_processor_state() does kernel_fpu_begin, and we need to revert it in order to pass in_atomic() checks */
- BUG_ON(in_atomic());
- suspend_save_image();
- suspend_power_down(); /* FIXME: if suspend_power_down is commented out, console is lost after few suspends ?! */
- }
-
+ if (!is_problem)
+ return suspend_save_image();
printk(KERN_EMERG "%sSuspend failed, trying to recover...\n", name_suspend);
- MDELAY(1000); /* So user can wait and report us messages if armageddon comes :-) */
-
barrier();
mb();
- spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */
mdelay(1000);
-
- free_pages((unsigned long) pagedir_nosave, pagedir_order);
- spin_unlock_irq(&suspend_pagedir_lock);
- mark_swapfiles(((swp_entry_t) {0}), MARK_SWAP_RESUME);
-}
-
-static int do_software_suspend(void)
-{
- arch_prepare_suspend();
- if (pm_prepare_console())
- printk( "%sCan't allocate a console... proceeding\n", name_suspend);
- if (!prepare_suspend_processes()) {
-
- /* At this point, all user processes and "dangerous"
- kernel threads are stopped. Free some memory, as we
- need half of memory free. */
-
- free_some_memory();
-
- /* No need to invalidate any vfsmnt list --
- * they will be valid after resume, anyway.
- */
- blk_run_queues();
-
- /* Save state of all device drivers, and stop them. */
- if (drivers_suspend()==0)
- /* If stopping device drivers worked, we proceed basically into
- * suspend_save_image.
- *
- * do_magic(0) returns after system is resumed.
- *
- * do_magic() copies all "used" memory to "free" memory, then
- * unsuspends all device drivers, and writes memory to disk
- * using normal kernel mechanism.
- */
- do_magic(0);
- thaw_processes();
- }
- software_suspend_enabled = 1;
- MDELAY(1000);
- pm_restore_console();
- return 0;
-}
-
-
-/**
- * software_suspend - initiate suspend-to-swap transition.
- *
- * This is main interface to the outside world. It needs to be
- * called from process context.
- */
-
-int software_suspend(void)
-{
- if(!software_suspend_enabled)
- return -EINVAL;
-
- if (num_online_cpus() > 1) {
- printk(KERN_WARNING "swsusp does not support SMP.\n");
- return -EPERM;
- }
-
-#if defined (CONFIG_HIGHMEM) || defined (COFNIG_DISCONTIGMEM)
- printk("swsusp is not supported with high- or discontig-mem.\n");
- return -EPERM;
-#endif
-
- software_suspend_enabled = 0;
- might_sleep();
- return do_software_suspend();
+ return -EFAULT;
}
/* More restore stuff */
/* FIXME: Why not memcpy(to, from, 1<<pagedir_order*PAGE_SIZE)? */
-static void copy_pagedir(suspend_pagedir_t *to, suspend_pagedir_t *from)
+static void __init copy_pagedir(suspend_pagedir_t *to, suspend_pagedir_t *from)
{
int i;
char *topointer=(char *)to, *frompointer=(char *)from;
@@ -780,8 +592,8 @@ static void copy_pagedir(suspend_pagedir_t *to, suspend_pagedir_t *from)
/*
* Returns true if given address/order collides with any orig_address
*/
-static int does_collide_order(suspend_pagedir_t *pagedir, unsigned long addr,
- int order)
+static int __init does_collide_order(suspend_pagedir_t *pagedir,
+ unsigned long addr, int order)
{
int i;
unsigned long addre = addr + (PAGE_SIZE<<order);
@@ -798,7 +610,7 @@ static int does_collide_order(suspend_pagedir_t *pagedir, unsigned long addr,
* We check here that pagedir & pages it points to won't collide with pages
* where we're going to restore from the loaded pages later
*/
-static int check_pagedir(void)
+static int __init check_pagedir(void)
{
int i;
@@ -816,7 +628,7 @@ static int check_pagedir(void)
return 0;
}
-static int relocate_pagedir(void)
+static int __init relocate_pagedir(void)
{
/*
* We have to avoid recursion (not to overflow kernel stack),
@@ -866,13 +678,13 @@ static int relocate_pagedir(void)
* I really don't think that it's foolproof but more than nothing..
*/
-static int sanity_check_failed(char *reason)
+static int __init sanity_check_failed(char *reason)
{
printk(KERN_ERR "%s%s\n",name_resume,reason);
return -EPERM;
}
-static int sanity_check(struct suspend_header *sh)
+static int __init sanity_check(struct suspend_header *sh)
{
if(sh->version_code != LINUX_VERSION_CODE)
return sanity_check_failed("Incorrect kernel version");
@@ -889,7 +701,8 @@ static int sanity_check(struct suspend_header *sh)
return 0;
}
-static int bdev_read_page(struct block_device *bdev, long pos, void *buf)
+static int __init bdev_read_page(struct block_device *bdev,
+ long pos, void *buf)
{
struct buffer_head *bh;
BUG_ON (pos%PAGE_SIZE);
@@ -905,7 +718,8 @@ static int bdev_read_page(struct block_device *bdev, long pos, void *buf)
extern dev_t __init name_to_dev_t(const char *line);
-static int __read_suspend_image(struct block_device *bdev, union diskpage *cur)
+static int __init read_suspend_image(struct block_device *bdev,
+ union diskpage *cur)
{
swp_entry_t next;
int i, nr_pgdir_pages;
@@ -982,89 +796,106 @@ static int __read_suspend_image(struct block_device *bdev, union diskpage *cur)
return 0;
}
-static int read_suspend_image(const char * specialfile)
+/**
+ * swsusp_save - Snapshot memory
+ */
+
+int swsusp_save(void)
+{
+#if defined (CONFIG_HIGHMEM) || defined (COFNIG_DISCONTIGMEM)
+ printk("swsusp is not supported with high- or discontig-mem.\n");
+ return -EPERM;
+#endif
+ return 0;
+}
+
+
+/**
+ * swsusp_write - Write saved memory image to swap.
+ *
+ * do_magic(0) returns after system is resumed.
+ *
+ * do_magic() copies all "used" memory to "free" memory, then
+ * unsuspends all device drivers, and writes memory to disk
+ * using normal kernel mechanism.
+ */
+
+int swsusp_write(void)
+{
+ arch_prepare_suspend();
+ return do_magic(0);
+}
+
+
+/**
+ * swsusp_read - Read saved image from swap.
+ */
+
+int __init swsusp_read(void)
{
union diskpage *cur;
- unsigned long scratch_page = 0;
int error;
char b[BDEVNAME_SIZE];
- resume_device = name_to_dev_t(specialfile);
- scratch_page = get_zeroed_page(GFP_ATOMIC);
- cur = (void *) scratch_page;
+ if (!strlen(resume_file))
+ return -ENOENT;
+
+ resume_device = name_to_dev_t(resume_file);
+ printk("swsusp: Resume From Partition: %s, Device: %s\n",
+ resume_file, __bdevname(resume_device, b));
+
+ cur = (union diskpage *)get_zeroed_page(GFP_ATOMIC);
if (cur) {
struct block_device *bdev;
- printk("Resuming from device %s\n",
- __bdevname(resume_device, b));
bdev = open_by_devnum(resume_device, FMODE_READ, BDEV_RAW);
- if (IS_ERR(bdev)) {
- error = PTR_ERR(bdev);
- } else {
+ if (!IS_ERR(bdev)) {
set_blocksize(bdev, PAGE_SIZE);
- error = __read_suspend_image(bdev, cur);
+ error = read_suspend_image(bdev, cur);
blkdev_put(bdev, BDEV_RAW);
- }
- } else error = -ENOMEM;
+ } else
+ error = PTR_ERR(bdev);
+ free_page((unsigned long)cur);
+ } else
+ error = -ENOMEM;
- if (scratch_page)
- free_page(scratch_page);
- switch (error) {
- case 0:
- PRINTK("Reading resume file was successful\n");
- break;
- case -EINVAL:
- break;
- case -EIO:
- printk( "%sI/O error\n", name_resume);
- break;
- case -ENOENT:
- printk( "%s%s: No such file or directory\n", name_resume, specialfile);
- break;
- case -ENOMEM:
- printk( "%sNot enough memory\n", name_resume);
- break;
- default:
- printk( "%sError %d resuming\n", name_resume, error );
- }
+ if (!error)
+ PRINTK("Reading resume file was successful\n");
+ else
+ printk( "%sError %d resuming\n", name_resume, error );
MDELAY(1000);
return error;
}
+
/**
- * software_resume - Check and load saved image from swap.
- *
- * Defined as a late_initcall, so it gets called after all devices
- * have been probed and initialized, but before we've mounted anything.
+ * swsusp_restore - Replace running kernel with saved image.
*/
-static int software_resume(void)
+int __init swsusp_restore(void)
{
- if (!strlen(resume_file))
- return 0;
-
- if (pm_prepare_console())
- printk("swsusp: Can't allocate a console... proceeding\n");
+ return do_magic(1);
+}
- printk("swsusp: %s\n", name_resume );
- MDELAY(1000);
+/**
+ * swsusp_free - Free memory allocated to hold snapshot.
+ */
- printk("swsusp: resuming from %s\n", resume_file);
- if (read_suspend_image(resume_file))
- goto read_failure;
- do_magic(1);
- printk("swsusp: Resume failed. Continuing.\n");
+int swsusp_free(void)
+{
+ PRINTK( "Freeing prev allocated pagedir\n" );
+ free_suspend_pagedir((unsigned long) pagedir_save);
-read_failure:
- pm_restore_console();
- return -EFAULT;
+ PRINTK( "Fixing swap signatures... " );
+ mark_swapfiles(((swp_entry_t) {0}), MARK_SWAP_RESUME);
+ PRINTK( "ok\n" );
+ return 0;
}
-late_initcall(software_resume);
-
static int __init resume_setup(char *str)
{
- strncpy( resume_file, str, 255 );
+ if (strlen(str))
+ strncpy(resume_file, str, 255);
return 1;
}
@@ -1077,5 +908,3 @@ static int __init noresume_setup(char *str)
__setup("noresume", noresume_setup);
__setup("resume=", resume_setup);
-EXPORT_SYMBOL(software_suspend);
-EXPORT_SYMBOL(software_suspend_enabled);
diff --git a/kernel/sys.c b/kernel/sys.c
index 3497a1565a61..27c19703a1ea 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -456,11 +456,8 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
#ifdef CONFIG_SOFTWARE_SUSPEND
case LINUX_REBOOT_CMD_SW_SUSPEND:
- if (!software_suspend_enabled) {
- unlock_kernel();
- return -EAGAIN;
- }
- software_suspend();
+ if (!pm_suspend(PM_SUSPEND_DISK))
+ break;
do_exit(0);
break;
#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4d2d3ce1d430..453f291437ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -388,7 +388,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return allocated;
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_PM
int is_head_of_free_region(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -435,7 +435,7 @@ void drain_local_pages(void)
}
local_irq_restore(flags);
}
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_PM */
/*
* Free a 0-order page
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 860368990523..48c424295338 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1068,7 +1068,7 @@ void wakeup_kswapd(struct zone *zone)
wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_PM
/*
* Try to free `nr_pages' of memory, system-wide. Returns the number of freed
* pages.
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 51e47c4f0ab6..76d0edd929c2 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -854,15 +854,10 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr)
/*
* device multicast group del
*/
-int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr)
+static int __ipv6_dev_mc_dec(struct net_device *dev, struct inet6_dev *idev, struct in6_addr *addr)
{
- struct inet6_dev *idev;
struct ifmcaddr6 *ma, **map;
- idev = in6_dev_get(dev);
- if (idev == NULL)
- return -ENODEV;
-
write_lock_bh(&idev->lock);
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0) {
@@ -873,20 +868,32 @@ int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr)
igmp6_group_dropped(ma);
ma_put(ma);
- in6_dev_put(idev);
return 0;
}
write_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return 0;
}
}
write_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return -ENOENT;
}
+int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr)
+{
+ struct inet6_dev *idev = in6_dev_get(dev);
+ int err;
+
+ if (!idev)
+ return -ENODEV;
+
+ err = __ipv6_dev_mc_dec(dev, idev, addr);
+
+ in6_dev_put(idev);
+
+ return err;
+}
+
/*
* check if the interface/address pair is valid
*/
@@ -2024,7 +2031,12 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
/* Delete all-nodes address. */
ipv6_addr_all_nodes(&maddr);
- ipv6_dev_mc_dec(idev->dev, &maddr);
+
+ /* We cannot call ipv6_dev_mc_dec() directly, our caller in
+ * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
+ * fail.
+ */
+ __ipv6_dev_mc_dec(idev->dev, idev, &maddr);
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {